\documentclass{article}%
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{graphicx}%
\setcounter{MaxMatrixCols}{30}
%TCIDATA{OutputFilter=latex2.dll}
%TCIDATA{Version=5.00.0.2552}
%TCIDATA{CSTFile=40 LaTeX article.cst}
%TCIDATA{Created=Tuesday, August 18, 2015 14:51:12}
%TCIDATA{LastRevised=Wednesday, September 02, 2015 09:56:25}
%TCIDATA{}
%TCIDATA{}
%TCIDATA{}
%TCIDATA{Language=American English}
\newtheorem{theorem}{Theorem}
\newtheorem{acknowledgement}[theorem]{Acknowledgement}
\newtheorem{algorithm}[theorem]{Algorithm}
\newtheorem{axiom}[theorem]{Axiom}
\newtheorem{case}[theorem]{Case}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{conclusion}[theorem]{Conclusion}
\newtheorem{condition}[theorem]{Condition}
\newtheorem{conjecture}[theorem]{Conjecture}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{criterion}[theorem]{Criterion}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{exercise}[theorem]{Exercise}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{notation}[theorem]{Notation}
\newtheorem{problem}[theorem]{Problem}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{solution}[theorem]{Solution}
\newtheorem{summary}[theorem]{Summary}
\newenvironment{proof}[1][Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\begin{document}
\title{Math 413/513 Chapter 1 (from Friedberg, Insel, \& Spence)}
\author{David Glickenstein}
\maketitle
\section{Vector space definition}
\begin{definition}
A vector space $V$ over a field $F$ is a set together with two operations:%
\begin{align*}
+ & :V\times V\rightarrow V\\
\cdot & :F\times V\rightarrow V
\end{align*}
called addition and scalar multiplication. with the results denoted $x+y$ and
$ax$ respectively, that satisfy the following axioms:
\begin{enumerate}
\item[VS 1] (Commutativity of addition) For all $x,y\in V,$ we have $x+y=y+x$.
\item[VS 2] (Associativity of addition) For all $x,y,z\in V,$ we have $\left(
x+y\right) +z=x+\left( y+z\right) .$
\item[VS 3] (Additive identity) There exists an element $e$ in $V$ such that
$e+x=x$ for all $x\in V.$ We usually denote $e$ as $0$ or $\vec{0}.$
\item[VS 4] (Additive inverse) For each $x\in V$ there exists a $y\in V$ such
that $x+y=0.$ We usually denote $y$ as $-x.$
\item[VS 5] (Multiplication by 1) For each $x\in V,$ $1x=x.$
\item[VS 6] (Associativity of scalar multiplication) For each $a,b\in F$ and
$x\in V,$ we have $a\left( bx\right) =\left( ab\right) x.$
\item[VS 7] (Distributivity) For each $a\in F$ and $x,y\in V,$ we have
$a\left( x+y\right) =ax+ay.$
\item[VS 8] (Distributivity) For each $a,b\in F$ and $x\in V,$ we have
$\left( a+b\right) x=ax+bx.$
\end{enumerate}
\end{definition}
We sometimes call $x+y$ the sum and $ax$ the product. Elements of the field
are called scalars and elements of the vector space are called vectors. We
also have the difference $x-y$ defined to be $x+\left( -y\right) .$
\bigskip
\begin{proposition}
We can denote the finite sum $x_{1}+x_{2}+\cdots+x_{n}$ without confusion due
to VS 2.
\end{proposition}
\begin{proposition}
We can denote the finite product $a_{1}a_{2}\cdots a_{k}x$ without confusion
due to VS 6.
\end{proposition}
\begin{proposition}
\label{prop:multiples of 0}In any vector space $a\vec{0}=\vec{0}$ for any
$a\in F.$
\end{proposition}
\begin{proof}
We know that by VS 7 that for any $a\in F,$
\[
a\vec{0}+a\vec{0}=a\left( \vec{0}+\vec{0}\right) =a\vec{0}.
\]
Adding the additive inverse $-\left( a\vec{0}\right) $ to both sides yields
that $a\vec{0}=\vec{0}.$
\end{proof}
\begin{proposition}
\label{prop:zero}In any vector space, $0x=\vec{0}$ for each $x\in V.$
\end{proposition}
\begin{proof}
We know that
\[
0x+x=\left( 0+1\right) x=1x=x
\]
by using VS 5 and VS 8. It follows that
\[
0x=0x+x+\left( -x\right) =x+\left( -x\right) =\vec{0}.
\]
\end{proof}
\begin{proposition}
In any vector space, $\left( -a\right) x=-\left( ax\right) =a\left(
-x\right) $ for each $a\in F$ and $x\in V.$ In particular, $\left(
-1\right) x=-x.$
\end{proposition}
\begin{proof}
We will simply compute:%
\[
\left( -a\right) x+ax=\left( -a+a\right) x=0x=\vec{0}%
\]
using VS 7, the properties of the field, and Proposition \ref{prop:zero}.
Similarly,
\[
a\left( -x\right) +ax=a\left( -x+x\right) =a0=\vec{0},
\]
where we have used VS 8, the definition of $-x,$ and the last follows from
Proposition \ref{prop:multiples of 0}.
\end{proof}
\section{Examples of vector spaces}
\subsection{n-tuples}
We are familiar with the vector space structure on $\mathbb{R}^{2},$ where
$\left( x_{1},y_{1}\right) +\left( x_{2},y_{2}\right) =\left( x_{1}%
+x_{2},y_{1}+y_{2}\right) $ and $a\left( x,y\right) =\left( ax,ay\right)
.$ One can check the axioms, and $\vec{0}=\left( 0,0\right) .$
Similarly, one can take n-tuples of the field, denoted $F^{n}.$
\subsection{Matrices}
Matrices are basically doubly indexed n-tuples, and written $F^{n\times m},$
for instance. The $\vec{0}$ is the matrix of all zeroes.
\subsection{Polynomials}
Polynomials of a fixed degree: $a_{0}+a_{1}x+a_{2}x^{2}+\cdots+a_{n}x^{n},$
where $x$ is a variable and $a_{i}\in F.$ One can also take polynomials in
several variables. There is a correspondence between polynomials of one
variable of degree $n$ and (n+1)-tuples. One could also take homogeneous polynomials.
\subsection{Functions}
In fact, functions can be added together, as $\left( f+g\right) \left(
x\right) =f\left( x\right) +g\left( x\right) $ and $\left( af\right)
\left( x\right) =a$~$f\left( x\right) .$ Note that n-tuples can be
considered functions $f:\left\{ 1,\ldots,n\right\} \rightarrow F$ and the
addition and scalar multiplication are described analogously. In this way, one
can consider sequences to be functions $f:\mathbb{N}\rightarrow F$ and this
gives them a vector space property. The $\vec{0}$ is the function that takes
every element of the domain to $0\in F.$
\section{Problems}
FIS Section 1.2, problems 9, 10, 13, 20, 21.
\section{Subspaces}
\begin{definition}
If $V$ is a vector space over $F$, a subset $W\subseteq V$ is a
\emph{subspace} if $W$ is a vector space over $F$ when using the operations of
addition and scalar multiplication from $V.$
\end{definition}
Note that $V$ and $\left\{ \vec{0}\right\} $ are subspaces. $\left\{
\vec{0}\right\} $ is called the \emph{trivial} subspace and $V$ is called an
\emph{improper} subspace, so a proper subspace is not $V$ and a nontrivial
subspace is not $\left\{ \vec{0}\right\} .$
The following theorem is a useful way of checking to see if a subset is a subspace.
\begin{theorem}
\label{thm:subspaces}A subset $W\subseteq V$ is a subspace if and only if the
following are all true:
\begin{enumerate}
\item $0\in W$
\item (closed under addition) $x,y\in W$ implies $x+y\in W$
\item (closed under scalar multiplication) $x\in W$ implies that for all $a\in
F,$ $ax\in W.$
\end{enumerate}
\end{theorem}
\begin{proof}
If it is a subspace, it clearly satisfies these properties. If it satisfies
these properties, we need to check the axioms to see if $W$ is a vector space.
VS 1, VS 2, VS 5 follow from the fact that the addition is defined in $V$ and
$V$ is a vector space. VS 3 is assumed. Since additive inverse is the same as
multiplication by $-1,$ the third assumption proves VS 4. VS 6, 7, 8 follow
from the fact that each of these is in $W$ and the corresponding facts in $V.$
\end{proof}
\begin{proposition}
If $W$ and $W^{\prime}$ are subspaces of $V,$ then $W\cap W^{\prime}$ is a
subspace of $V.$ $W\cup W^{\prime}$ may not be.
\end{proposition}
\begin{proof}
We can use the theorem. Notice that if $x,y\in W\cap W^{\prime},$ then since
$x,y\in W,$ we have $x+y\in W$ and similarly for $W^{\prime}.$ The argument
for scalar multiplication is similar. Notice that the vector spaces
$\mathbb{R\times}\left\{ 0\right\} $ and $\left\{ 0\right\} \times
\mathbb{R}$ are subspaces of $\mathbb{R}^{2},$ but their union is not.
\end{proof}
\section{Examples of subspaces}
\subsection{Subspaces of n-tuples}
The set of all points in $\mathbb{R}^{3}$ satisfying $x+y-z=0$ is a subspace.
Note that the set of point satisfying $x+y-z=2$ is not a subspace (why?).
\subsection{Subspaces of matrices}
The set of diagonal matrices is a subspace, as are upper triangular matrices
and lower triangular matrices. The trace of an $n\times n$ matrix $M$ is
defined as
\[
tr\left( M\right) =\sum_{i=1}^{n}M_{ii}.
\]
The set of matrices whose trace is zero is a vector space since the trace
satisfied%
\begin{align*}
tr\left( M+M^{\prime}\right) & =tr\left( M\right) +tr\left( M^{\prime
}\right) \\
tr\left( aM\right) & =a~tr\left( M\right) .
\end{align*}
(Check this!)
The matrices over $\mathbb{R}$ with only positive entries is not a subspace,
nor is the set of upper triangular matrices with $1$'s on the diagonal (unit
upper triangular matrices).
\subsection{Subspaces of functions}
The set of continuous functions $\mathbb{R\rightarrow R}$ is a subspace of the
set of all functions $\mathbb{R\rightarrow\mathbb{R}}$. As are the function
$f:X\rightarrow F$ such that a particular point has value $0$, e.g., the
functions $\mathbb{R\rightarrow\mathbb{R}}$ such that $f\left( 1\right) =0.$
Note that the functions such that $f\left( 1\right) =1$ are not a subspace.
The set of functions satisfying a linear homogeneous differential equation
form a subspace, for instance functions satisfying $f^{\prime\prime}+f=0$ or
functions satisfying $\frac{\partial f}{\partial x}-\frac{\partial f}{\partial
y}=0$ form a subspace. If the differential equation is not homogeneous or
nonlinear, the solution space is not a subspace.
\section{Problems}
FIS Section 1.3, problems 13, 17, 18, 19, 20, 23, 28, 30.
Comprehensive/Graduate Option: FIS Section 1.3 problem 31.
\section{Linear combinations and spans}
Notice that given a vector space $V$ and a vector $v\in V,$ the subset
$\left\{ tv:t\in F\right\} $ is a subspace. In fact, this is the smallest
subset containing $v$ that is a subspace. Similarly, if $w\in V$ also, then we
can consider the space $\left\{ tv+sw:t,s\in F\right\} $ and this is also a
subspace. This subspace is called the \emph{span} of $\left\{ v,w\right\} ,$
denoted $span\left( \left\{ v,w\right\} \right) $, or the subspace
\emph{generated by }$v$ and $w.$ For a given $t,s\in F$ and $v,w\in V,$ we say
that $tv+sw$ is a \emph{linear combination} of $v$ and $w.$ We can generalize
this in the following way.
\begin{definition}
Let $V$ be a vector space over a field $F$ and let $S\subseteq V$ be nonempty
(we usually consider this set to be small, even finite). A vector $v\in V$ is
a \emph{linear combination} of vectors of $S$ if there exist vectors
$u_{1},\ldots,u_{n}\in S$ and scalars $a_{1},\ldots,a_{n}\in F$ such that
\[
v=a_{1}u_{1}+\cdots+a_{n}u_{n}.
\]
We say that $v$ is a linear combination of the vectors $u_{1},\ldots,u_{n}$
and $a_{1},\ldots,a_{n}$ are the coefficients of the linear combination.
\end{definition}
Note that it is a finite sum since that is all that makes sense, even if $S$
is not finite.
How would one check to see if a vector is a linear combination of a bunch of
others? Is $\left( 1,2,3\right) $ a linear combination of the vectors
$\left( 1,0,1\right) $ and $\left( 0,1,1\right) $? The problem boils down
to whether we can find appropriate coefficients:%
\[
a_{1}\left(
\begin{array}
[c]{c}%
1\\
0\\
1
\end{array}
\right) +a_{2}\left(
\begin{array}
[c]{c}%
0\\
1\\
1
\end{array}
\right) =\left(
\begin{array}
[c]{c}%
1\\
2\\
3
\end{array}
\right) ,
\]
or, written as a matrix problem
\[
\left(
\begin{array}
[c]{cc}%
1 & 0\\
0 & 1\\
1 & 1
\end{array}
\right) \left(
\begin{array}
[c]{c}%
a_{1}\\
a_{2}%
\end{array}
\right) =\left(
\begin{array}
[c]{c}%
1\\
2\\
3
\end{array}
\right) .
\]
Can this equation be solved? (No! So $\left( 1,2,3\right) $ is not a linear
combination of $\left( 1,0,1\right) $ and $\left( 0,1,1\right) $) If we
could solve it, then we would have found the appropriate coefficients.
\begin{definition}
Given a nonempty subset $S\subseteq V,$ the span of $S,$ denoted
$\operatorname{span}\left( S\right) ,$ is the set of all linear combinations
of vectors in $S.$ We also define $\operatorname{span}\left( \varnothing
\right) =\left\{ 0\right\} .$
\end{definition}
As in the example above, we see that the span gives a subspace.
\begin{theorem}
Given any nonempty subset $S\subseteq V,$ the span of $S$ is a subspace of
$V.$ Moreover, any subspace of $V$ that contains $S$ must contain
$\operatorname{span}\left( S\right) .$
\end{theorem}
\begin{proof}
Straightforward by showing that it contains $\vec{0},$ is closed under
addition (hardest part -- think about it), and closed under scalar
multiplication. For the second part, we need to know that linear combinations
must be in the subspace, but this follows by induction and the fact that
linear combinations of two vectors must be in the subspace (from Theorem
\ref{thm:subspaces}).
\end{proof}
Determining vector spaces as spans is quite easy and natural. However, some
vector spaces are described in a different way: as the solution to some
equations. The process of going from describing a vector space as a set of
equations to describing it as a span is usually called \textquotedblleft
finding the general solution.\textquotedblright\ For instance, the set of
functions satisfying
\[
f^{\prime\prime}+f=0
\]
can be shown to be a vector space. But that vector space is generated by $\sin
t$ and $\cos t$ and so the vector space can be described as
\[
\left\{ a_{1}\sin t+a_{2}\cos t:a_{1},a_{2}\in\mathbb{R}\right\} .
\]
\section{Problems}
FIS Section 1.4, problems 7, 11, 12, 13, 14, 15, 16.
\section{Linear dependence and independence}
\begin{definition}
A subset $S\subseteq V$ of a vector space $V$ over $F$ is called
\emph{linearly dependent} if there exists a finite number of distinct vectors
$u_{1},\ldots,u_{n}\in S$ and scalars $a_{1},\ldots,a_{n}\in F$ not all zero
such that
\[
a_{1}u_{1}+a_{2}u_{2}+\cdots+a_{n}u_{n}=0.
\]
We may also say the vectors in $S$ are linearly dependent. If $S$ is not
linearly dependent, it is said to be \emph{linearly independent}, or also that
its vectors are linearly independent.\bigskip
\end{definition}
The importance and elegance of this definition cannot be overstated. Note that
if a finite set of vectors is linearly dependent, that means that some vector
can be written as a linear combination of the others. But we do not have to
specify which one, and it is often the case that not every vector can be
written as a linear combination of others. For instance, consider the vectors
\[
\left\{ \left(
\begin{array}
[c]{c}%
1\\
0\\
0
\end{array}
\right) ,\left(
\begin{array}
[c]{c}%
0\\
1\\
0
\end{array}
\right) ,\left(
\begin{array}
[c]{c}%
0\\
0\\
1
\end{array}
\right) ,\left(
\begin{array}
[c]{c}%
1\\
1\\
0
\end{array}
\right) \right\} .
\]
Notice that the vector $\left(
\begin{array}
[c]{c}%
0\\
0\\
1
\end{array}
\right) $ cannot be written as a linear combination of the others, but
$\left(
\begin{array}
[c]{c}%
1\\
0\\
0
\end{array}
\right) $ can. We can see the set is linearly dependent, however, but seeing
that
\[
\left(
\begin{array}
[c]{c}%
1\\
0\\
0
\end{array}
\right) +\left(
\begin{array}
[c]{c}%
0\\
1\\
0
\end{array}
\right) +0\left(
\begin{array}
[c]{c}%
0\\
0\\
1
\end{array}
\right) -\left(
\begin{array}
[c]{c}%
1\\
1\\
0
\end{array}
\right) =\left(
\begin{array}
[c]{c}%
0\\
0\\
0
\end{array}
\right) .
\]
How does one check to see if vectors are linearly dependent? Try to solve
\[
a_{1}v_{1}+\cdots+a_{n}v_{n}=0
\]
for a solution of $a_{1},\ldots,a_{n}$ that is not all zero (notice we can
always take all the $a_{i}$'s to be zero; that is called the trivial
solution). For vectors in $F^{n},$ this can be translated into a matrix
equation
\[
\left(
\begin{array}
[c]{cccc}%
| & | & & |\\
v_{1} & v_{2} & \cdots & v_{n}\\
| & | & & |
\end{array}
\right) \left(
\begin{array}
[c]{c}%
a_{1}\\
\vdots\\
a_{n}%
\end{array}
\right) =\left(
\begin{array}
[c]{c}%
0\\
\vdots\\
0
\end{array}
\right)
\]
where you are trying to find the coefficients. This is the kind of linear
algebra you learned in Math 215 or Math 313 (for instance, row reduce to row
echelon form).
\begin{theorem}
Let $V$ be a vector space, and let $S_{1}\subseteq S_{2}\subseteq V.$ If
$S_{1}$ is linearly dependent then $S_{2}$ is linearly dependent. If $S_{2}$
is linearly independent then $S_{1}$ is linearly independent.
\end{theorem}
\begin{theorem}
Let $S$ be a linearly independent subset of $V$ and let $v\in V\setminus S.$
Then $S\cup\left\{ v\right\} $ is linearly dependent if and only if
$v\in\operatorname{span}\left( S\right) .$
\end{theorem}
\begin{proof}
Suppose $S\cup\left\{ v\right\} $ is linearly dependent. Then we know that
there exist distinct $u_{1},\ldots,u_{n}\in S$ and $a_{1},\ldots,a_{n},b\in
F,$ not all $0$, such that
\[
a_{1}u_{1}+\cdots+a_{n}u_{n}+bv=\vec{0}.
\]
Since $S$ is linearly independent, $b\neq0.$ Thus
\[
v=-b^{-1}a_{1}u_{1}-\cdots-b^{-1}a_{n}u_{n}%
\]
and so $v$ is a linear combination of vectors in $S$ and hence in the span.
Conversely, suppose $v\in\operatorname{span}\left( S\right) .$ Then we can
find scalars (not all zero) and vectors in $S$ such that
\[
v=a_{1}u_{1}+\cdots+a_{k}u_{k}%
\]
so
\[
a_{1}u_{1}+\cdots+a_{k}u_{k}-v=\vec{0}%
\]
and so $S\cup\left\{ v\right\} $ is linearly dependent.
\end{proof}
\section{Problems}
FIS Section 1.5, problems 4, 6, 9, 10, 12, 14.
\section{Basis and dimension}
\begin{definition}
A \emph{basis} $\beta$ for a vector space V is a linearly independent subset
of $V$ that generates $V,$ i.e., $V=\operatorname{span}\left( \beta\right)
.$ If $\beta$ is a basis for $V,$ we also say that the vectors of $\beta$ form
a basis for $V.$
\end{definition}
We often have a natural basis that is called the standard basis:
\begin{itemize}
\item The standard basis for $F^{n}$ is $e_{1}=\left( 1,0,\ldots,0\right)
,e_{2}=\left( 0,1,0,0,\ldots,0\right) ,\ldots,e_{n}=\left( 0,0,\ldots
,0,1\right) .$
\item The standard basis for the polynomials of degree $n$ with coefficients
in $F,$ denoted $P_{n}\left( F\right) ,$ is $\left\{ 1,x,x^{2},\ldots
,x^{n}\right\} .$ The standard basis for all polynomials $P\left( F\right)
$ is $\left\{ 1,x,x^{2},x^{3},\ldots\right\} .$
\end{itemize}
Note that a basis need not be finite. It is a curious fact that every vector
space has a basis (the proof is hard and uses Zorn's Lemma), though not always
a useful basis.
\begin{theorem}
Let $V$ be a vector space and $\beta=\left\{ u_{1},u_{2},\ldots
,u_{n}\right\} $ be a subset of $V.$ The set $\beta$ is a basis for $V$ if
and only if each $v\in V$ can be uniquely expressed as a linear combination of
vectors of $\beta,$ i.e., for each $v\in V$ there exist unique scalars
$a_{1},\ldots,a_{n}$ such that
\[
v=a_{1}u_{1}+\cdots a_{n}u_{n}.
\]
\end{theorem}
\begin{proof}
First suppose $\beta$ is a basis. Since $\beta$ spans, there must be scalars
such that $v=a_{1}u_{1}+\cdots a_{n}u_{n}.$ Since $\beta$ is linearly
independent, if we take $v=\vec{0},$ then since $\beta$ is linearly
independent, the coefficients must be all equal to $0.$ We now need to show
that the representation of $v$ is unique. Suppose $v=b_{1}u_{1}+\cdots
+b_{n}u_{n}$ as well. Then we find
\begin{align*}
\vec{0} & =v-v=a_{1}u_{1}+\cdots a_{n}u_{n}-\left( b_{1}u_{1}+\cdots
+b_{n}u_{n}\right) \\
& =\left( a_{1}-b_{1}\right) u_{1}+\cdots+\left( a_{n}-b_{n}\right)
u_{n}.
\end{align*}
It follows that $a_{i}-b_{i}=0$ for all $i,$ or $a_{i}=b_{i}$ and hence the
coefficients are unique.
Conversely, we know that both $\beta$ spans $V$ and also that $\vec{0}$ has a
unique representation as a linear combination, and since that must be
$a_{1}=a_{2}=\cdots=a_{n}=0,$ $\beta$ must be linearly independent.
\end{proof}
\begin{theorem}
\label{th:subset}If a vector space $V$ is generated by a finite set $S,$ then
some subset of $S$ is a basis for $V,$ and hence $V$ has a finite basis.
\end{theorem}
\begin{proof}
Suppose $V$ is generated by a finite set $S.$ Let $S^{\prime}=\left\{
u_{1}^{\prime},\ldots,u_{k}^{\prime}\right\} $ be the subset of $S$ with the
largest cardinality such that $S^{\prime}$ is linearly independent (since a
set containing one vector is linearly independent, $S^{\prime}$ must contain
at least one element). It follows that for any $w\in S\setminus S^{\prime},$
we can write $w$ as a linear combination of vectors in $S^{\prime}$ (since
$aw+a_{1}u_{1}^{\prime}+\cdots+a_{k}u_{k}^{\prime}=\vec{0}$ must have a
nontrivial solution since otherwise $S^{\prime}\cup\left\{ w\right\} $ would
be a larger linearly independent subset). So that means that
$\operatorname{span}S=\operatorname{span}S^{\prime}.$ Since
$\operatorname{span}S=V,$ the result follows.
\end{proof}
\begin{theorem}
[Replacement Theorem]Let $V$ be a vector space that is generated by a set $G$
containing exactly $n$ vectors and let $L$ be a linearly independent subset of
$V$ containing exactly $m$ vectors. Then $m\leq n$ and there exists a set
$H\subseteq G$ containing exactly $n-m$ vectors such that $L\cup H$ generates
$V.$
\end{theorem}
\begin{proof}
See worksheet.
\end{proof}
\begin{corollary}
Let $V$ be a vector space having a finite basis. Then any basis for $V$
contains the same number of vectors.
\end{corollary}
\begin{proof}
Let $\beta$ and $\beta^{\prime}$ be bases with $n$ and $m$ elements,
respectively. Using the replacement theorem with $G=\beta$ and $L=\beta
^{\prime}$ yields $m\leq n,$ while using it with $G=\beta^{\prime}$ and
$L=\beta$ yields $n\leq m,$ hence $n=m.$
\end{proof}
\begin{definition}
A vector space called \emph{finite-dimensional} if it has a basis consisting
of a finite number of vectors. The unique number of vectors in each basis for
$V$ is called the \emph{dimension} of $V$ and is denoted $\dim V.$ A vector
space that is not finite-dimensional is called \emph{infinite-dimensional}.
\end{definition}
\begin{corollary}
Let $V$ be vector space of dimension $n.$
\begin{enumerate}
\item Any finite generating set for $V$ contains at least $n$ vectors, and a
generating set for $V$ that contains exactly $n$ vectors is a basis for $V.$
\item Any linearly independent subset of $V$ that contains exactly $n$ vectors
is a basis for $V.$ Hence any linearly independent set has at most $n$ vectors.
\item Every linearly independent subset of $V$ can be extended to a basis for
$V.$
\end{enumerate}
\end{corollary}
\begin{proof}
1. Given any generating set, it has a subset that is a basis by Theorem
\ref{th:subset}. That basis must have $n$ elements and so the set must have at
least $n$ elements. If the generating set has $n$ elements, then the subset
must be the set itself.
2. This is a special case of the Replacement Theorem with $G=\beta,$ and in
this case $m=n,$ and so the subset must already span. Once we have $n$
vectors, since the subset must span, any additional vector added to the set
cannot be linearly independent.
3. We can use the Replacement Theorem with $G=\beta$. Given a linearly
independent set $L,$ we can extend it to a generating set with $n$ elements
taking $H\subseteq\beta.$ By the first part of this corollary, it must be a basis.
\end{proof}
\begin{theorem}
Let $W$ be a subspace of a finite-dimensional vector space $V.$ The $W$ is
finite dimensional and $\dim W\leq\dim V.$ Moreover, if $\dim W=\dim V,$ then
$W=V.$
\end{theorem}
\begin{proof}
If $W=\left\{ \vec{0}\right\} ,$ then $\dim W=0\leq\dim V.$ If
$W\neq\left\{ \vec{0}\right\} ,$ then $W$ contains a nonzero vector $x_{1}.$
The set $\left\{ x_{1}\right\} $ is a linearly independent set. We can
continue to choose vectors $x_{2},\ldots,x_{k}\in W$ such that $\left\{
x_{1},\ldots,x_{k}\right\} $ are linearly independent until we cannot do this
any more. We know that there can be no linearly independent set in $W$ with
more than $\dim V$ elements, so $k\leq\dim V.$ This set must generate $W,$
otherwise we could extend it, so $\left\{ x_{1},\ldots,x_{k}\right\} $ forms
a basis. If $k=\dim V$, then by the corollary above we must have that the
basis for $W$ spans $V,$ and hence $V=W.$
\end{proof}
\begin{corollary}
If $W$ is a subspace of a finite-dimensional vector space $V,$ then any basis
for $W$ can be extended to a basis for $V.$
\end{corollary}
\begin{proof}
We can produce the basis for $V$ by starting with a basis for $W$ and adding
in points one by one as in the proof of the previous theorem. Since any
linearly independent set has fewer than $\dim V$ elements, this will
eventually produce a basis for $V$ that extends the basis for $W.$ You could
also use the Replacement Theorem.
\end{proof}
\section{Problems}
FIS Section 1.6, problems 4, 11, 12, 13, 16, 17, 20, 23, 33, 34.
Comprehensive/Graduate Option: FIS Section 1.6 problem 35.
\end{document}