\section{Algorithm description}
\label{sec:algorithm}

\subsection{Basic structure}
\label{sec:algorithm:basic-structure}
The basic structure of the algorithm is fairly simple. For any graph $G=(V,E)$, we pick a vertex $v\in V$. Either that vertex is in an m.i.s. (and none of its neighbours), or it is not. This gives the recurrence: \refeq[basic-structure]{\ms(G) = \max(1 + \ms(G\ex \iN(v)), \ms(G\ex v)).}

Every recursive algorithm needs a base case.

\begin{thmproof}[base-case]{lemma}{For a graph $G$ with $|G|\leq1$, we have $\ms(G)=|G|$.}
    For both the cases $|G|=0$ and $|G|=1$, $G$ is an m.i.s. of itself.
\end{thmproof}

The rest of this section is devoted to optimising this basic structure by cleverly choosing $v$ and pruning the search tree.

\begin{thmproof}[one-neighbour]{lemma}{For a graph $G=(V,E)$ and vertex $v\in V$, if $v$ is not in an m.i.s. there is a neighbour $w\in N(v)$ which is in an m.i.s.}
    By contradiction. Suppose there were an m.i.s. $ms_1$ that does not contain $v$ or one of its neighbours. Then we may create a new m.i.s. $ms_2=ms_1\with v$. We then have $|ms_2| = |ms_1|+1 > |ms_1|$. Therefore, $ms_1$ was not maximal.
\end{thmproof}

\begin{thmproof}[two-neighbours]{lemma}{For a graph $G=(V,E)$ and vertex $v\in V$, there is an m.i.s. with $v$, or there is an m.i.s. with two distinct elements $w_1,w_2\in N(v)$.}
    By contradiction. If $v$ is in an m.i.s., we're done. If $v$ is not in an m.i.s., we know by \autoref{lem:one-neighbour} that there is a $w_1\in N(v)$ that is in an m.i.s., say $ms$. Suppose there were no other neighbour $w_2\in N(v), w_2\neq w_1$ with $w_2\in ms$. Then $ms\ex w_1\with v$ is also an m.i.s., and it does contain $v$.
\end{thmproof}

At this point we define, as Robson \cite{robson}, the function $\ms^n(G,S)$ as the $\ms(G)$ given that the m.i.s. should contain at least $n$ elements of $S$.

Based on \autoref{lem:one-neighbour} and \ref{lem:two-neighbours} we may then rewrite \autoref{eq:basic-structure} to \refeq[with-two-neighbours]{\ms(G) = \max(1+\ms(G\ex\iN(v)), \ms^2(G\ex v, N(v))).}

\subsection{Picking a good $v$}
\label{sec:algorithm:good-v}
Until now, we have assumed that we have some vertex $v$. Let us now discuss how to pick a `good' $v$, i.e. one that allows for the most efficient use of \autoref{eq:basic-structure} and \ref{eq:with-two-neighbours}.

\begin{thmproof}[basic-structure-high-degree]{lemma}{It is most efficient to use \autoref{eq:basic-structure} with a $v$ with large $d(v)$.}
    The left-hand side, $\ms(G\ex\iN(v))$, will run faster with large $d(v)$, because it will run on a smaller graph. For the right-hand side it doesn't matter.
\end{thmproof}

\begin{thmproof}[two-neighbours-low-degree]{lemma}{It is most efficient to use \autoref{eq:with-two-neighbours} with a $v$ with small $d(v)$.}
    A straightforward implementation of $\ms^2(G\ex v,N(v))$ will consider all pairs of vertices in $N(v)$, which is quadratic. In the left-hand side, $\ms(G\ex\iN(v))$ is only linearly faster with large $d(v)$. Therefore, we should prefer small $d(v)$.
\end{thmproof}

In general, we'd like to use \autoref{eq:with-two-neighbours}, because the right-hand side of the $\max$ is more restricted than in \autoref{eq:basic-structure}. However, if we have a $v$ with large $d(v)$, \ref{eq:basic-structure} may be more efficient.

So, we should try to pick a $v$ with small $d(v)$. We apply \autoref{eq:with-two-neighbours} if $d(v)$ is small enough. If $d(v)$ is too large to ensure efficient usage of \autoref{eq:with-two-neighbours}, we apply \autoref{eq:basic-structure}.

But this latter recurrence is clearly more efficient for \emph{large} $d(v)$. Therefore, if $d(v)$ is too large to use $v$ in \autoref{eq:with-two-neighbours}, we find one of its neighbours, say $w\in N(v)$, with the largest $d(w)$, and apply \autoref{eq:basic-structure} to that vertex. In the next recurrence, $d(v)$ will be at least one smaller, because $w$ has been removed, but in the case of a graph with many three-cycles, $d(v)$ may be much smaller. So, after at most a few applications of \autoref{eq:basic-structure} we may use the more efficient \autoref{eq:with-two-neighbours} again.

\subsubsection{Dominance}
\label{sec:algorithm:good-v:dominance}
\begin{thmproof}[dominance]{lemma}{If for a graph $G=(V,E)$ with vertices $v,w\in V$ we know $v>w$, we have $\ms(G)=\ms(G\ex w)$.}
    If there is an m.i.s. $ms$ with $w\in ms$, $ms\ex w\with v$ is an m.i.s. as well.
\end{thmproof}

If we then take $w$ instead of $v$ and use \autoref{eq:basic-structure}, it may be the case that $v>w$. In this case, we may bypass \autoref{eq:basic-structure} by applying the recurrence $\ms(G)=\ms(G\ex w)$ of \autoref{lem:dominance}. After this, since the maximum degree of any vertex in our graph is $4$, we will be able to apply \autoref{eq:with-two-neighbours} instead, since $d(v)$ will have been decreased by one to at most $3$.

This gives us \autoref{alg:with-best-vertex}, which combines \autoref{eq:basic-structure} and \ref{eq:with-two-neighbours}.

\begin{algorithm}[h]
    \caption{Finding the size of the m.i.s. of a graph}
    \label{alg:with-best-vertex}
    \begin{algorithmic}[1]
        \REQUIRE $G=(V,E)$
        \IF{$|G|\le1$}
            \RETURN $|G|$ \COMMENT{\autoref{lem:base-case}}
        \ELSE
            \STATE $v\gets v\in V$ with minimal $d(v)$
            \IF{$d(v)$ is small enough}
                \RETURN $\max(1+\ms(G\ex\iN(v)), \ms^2(G\ex v,N(v)))$ \COMMENT{\autoref{eq:with-two-neighbours}}
            \ELSIF{$v>w$}
                \RETURN $\ms(G\ex w)$ \COMMENT{\autoref{lem:dominance}}
            \ELSE
                \STATE $w\gets w\in N(v)$ with maximal $d(w)$
                \RETURN $\max(1+\ms(G\ex\iN(w)), \ms(G\ex w))$ \COMMENT{\autoref{eq:basic-structure}}
            \ENDIF
        \ENDIF
    \end{algorithmic}
\end{algorithm}

\begin{thmproof}[first-ms-correct]{theorem}{\autoref{alg:with-best-vertex} is a correct implementation of the $\ms$ function.}
    This follows from \autoref{lem:base-case} and \ref{lem:dominance} and \autoref{eq:basic-structure} and \ref{eq:with-two-neighbours}.
\end{thmproof}

\subsection{Strongly connected components}
\label{sec:algorithm:components}
\begin{thmproof}[components]{lemma}{The size of the m.i.s. of a graph is equal to the sum of the sizes of the m.i.s. of all its strongly connected components.}
    The union of the m.i.s. of the strongly connected components is an m.i.s. of the graph.

    If there were a larger m.i.s. in the graph, there has to be a $v$ in a strongly connected component $C$ in that m.i.s. which is not in the m.i.s. of $C$. If we can add $v$ to the m.i.s. of $C$, that m.i.s. wasn't maximal. If we cannot, then the larger m.i.s. of the whole graph cannot be an independent set.
\end{thmproof}

It is not difficult to see that in case a graph has multiple strongly connected components, this recurrence is more efficient than \autoref{alg:with-best-vertex}.

\subsection{Further optimisations}
\label{sec:algorithm:further-optimisations}
Although I have referred to Robson \cite{robson} and used his notation already, this is how far I got without his help. Robson then introduces a number of further optimisations. In this section, we use $G$ for the graph at hand and $v,w$ for the vertex with the lowest degree, and its neighbour with the highest degree, respectively.

\begin{thmproof}[robson-ms-d1]{lemma}{If $d(v)=1$, we have $\ms(G)=1+\ms(G\ex\iN(v))$.}
    By contradiction. By \autoref{lem:one-neighbour} we know that if $v$ is not in any m.i.s., then $w$ is in an m.i.s., say $ms$. But then $ms\ex w\with v$ is also independent, and has the same size.
\end{thmproof}

For the case that $d(v)=2$ we write $w,w'$ for the neighbours of $v$.

\begin{thmproof}[robson-ms-d2-edge]{lemma}{If $d(v)=2$ and $e(w,w')$, we have $\ms(G)=1+\ms(G\ex\iN(v))$.}
    By \autoref{lem:two-neighbours} we know that an m.i.s. will either contain $v$ or both $w$ and $w'$. But since $e(w,w')$, the latter cannot happen. Therefore, it must contain $v$ and neither of its neighbours.
\end{thmproof}

\begin{thmproof}[robson-ms-d2-noedge]{lemma}{If $d(v)=2$ and $\lnot e(w,w')$, we know $$\ms(G) = \max(2+\ms(G\ex\iN(w)\ex\iN(w')), 1 + \ms^2(G\ex\iN(v), N^2(v))).$$}
    By \autoref{lem:two-neighbours}, an m.i.s. contains either $v$ or both $w$ and $w'$. In the second case, we remove $w$ and $w'$ and their neighbourhoods from the graph (the left-hand side of $\max$). In the first case, the m.i.s. cannot contain $w$ or $w'$ but must contain two of their neighbours other than $v$. If not, and there is an m.i.s. $ms$ with at most one such neighbour, $u$, then $ms\ex v\ex u\with w\with w'$ is also independent, and has the same size. This gives the right-hand side.
\end{thmproof}

\autoref{alg:with-best-vertex} combined with \autoref{lem:components}, \ref{lem:robson-ms-d1}, \ref{lem:robson-ms-d2-edge} and \ref{lem:robson-ms-d2-noedge} gives us \autoref{alg:with-robsons-optimisations}.

\begin{algorithm}[h]
    \caption{Finding the size of the m.i.s. of a graph}
    \label{alg:with-robsons-optimisations} \label{alg:ms}
    \begin{algorithmic}[1]
        \REQUIRE $G=(V,E)$
        \IF{$G$ has multiple strongly connected components}
            \STATE $C\gets$ some strongly connected component
            \RETURN $\ms(C) + \ms(G\ex C)$ \COMMENT{\autoref{lem:components}}\label{alg:ms:case-component}
        \ELSIF{$|G|\le1$}
            \RETURN $|G|$ \COMMENT{\autoref{lem:base-case}}\label{alg:ms:case-g1}
        \ELSE
            \STATE $v\gets v\in V$ with minimal $d(v)$
            \STATE $w\gets w\in N(v)$ with maximal $d(w)$
            \IF{$d(v)=1$}
                \RETURN $1+\ms(G\ex\iN(v))$ \COMMENT{\autoref{lem:robson-ms-d1}}\label{alg:ms:case-d1}
            \ELSIF{$d(v)=2$}
                \STATE $\{w'\}\gets N(v)\ex w$
                \IF{$e(w,w')$}
                    \RETURN $1+\ms(G\ex\iN(v))$ \COMMENT{\autoref{lem:robson-ms-d2-edge}}\label{alg:ms:case-d2-edge}
                \ELSE
                    \RETURN $\max(2+\ms(G\ex\iN(w)\ex\iN(w')), 1+\ms^2(G\ex\iN(v),N^2(v)))$ \COMMENT{\autoref{lem:robson-ms-d2-noedge}}\label{alg:ms:case-d2-noedge}
                \ENDIF
            \ELSIF{$d(v)=3$}
                \RETURN $\max(\ms^2(G\ex v,N(v)), 1+\ms(G\ex\iN(v)))$ \COMMENT{\autoref{eq:with-two-neighbours}}\label{alg:ms:case-d3}
            \ELSIF{$v>w$}
                \RETURN $\ms(G\ex w)$ \COMMENT{\autoref{lem:dominance}}\label{alg:ms:case-dominance}
            \ELSE
                \RETURN $\max(\ms(G\ex w), 1+\ms(G\ex\iN(w)))$ \COMMENT{\autoref{eq:basic-structure}}\label{alg:ms:case-otherwise}
            \ENDIF
        \ENDIF
    \end{algorithmic}
\end{algorithm}

\begin{thmproof}[ms-correct]{theorem}{\autoref{alg:ms} is a correct implementation of the $\ms$ function.}
    This follows from \autoref{lem:base-case}, \ref{lem:dominance}, \ref{lem:components}, \ref{lem:robson-ms-d1}, \ref{lem:robson-ms-d2-edge} and \ref{lem:robson-ms-d2-noedge} and \autoref{eq:basic-structure} and \ref{eq:with-two-neighbours}.
\end{thmproof}

\begin{thmproof}[ms-efficiency]{theorem}{\autoref{alg:ms} is more efficient than \autoref{alg:with-best-vertex}, assuming that conditions are evaluated efficiently.}
    The algorithm follows the same basic structure. In any case that \autoref{alg:ms} is different, it considers less cases or smaller graphs (yet it is still correct by \autoref{thm:ms-correct}). Therefore, it can only be more efficient.
\end{thmproof}

\begin{thmproof}[ms-terminates]{theorem}{\autoref{alg:ms} terminates with every input, if $\ms^2$ terminates with a smaller input.}
    There is a base case on line \ref{alg:ms:case-g1} which is not recursive that handles the graphs with the lowest $k$ possible orders ($k=2$). In every other case, the same algorithm or $\ms^2$ is run on a smaller input, so inductively the algorithm terminates with any input.
\end{thmproof}

\subsection{The helper function $\ms^n$}
\label{sec:algorithm:helper-function}
So far we have been using $\ms^n(G,S)$ as the size of the maximum independent set of $G$ given that it contains at least $n$ vertices in $S$. This section is devoted to arguing an efficient implementation of this helper function. In the above, we have only used $\ms^2$. Therefore, we do not need to go into details about the case $n\neq2$.

We will write $s_1,s_2,\dots$ for the elements of $S$.

\begin{thmproof}[helper-general]{lemma}{In our algorithm, it always holds that $\ms^n(G,S)=\ms(G)$.}
    We only call $\ms^n$ when we know for sure that $n$ elements of $S$ are in an m.i.s. of $G$.
\end{thmproof}

\autoref{lem:helper-general} may be used as a default case, when no clever optimisation can be found.

\begin{thmproof}[helper-1]{lemma}{If $|S|<n$, we have $\ms^n(G,S)=0$.}
    It is impossible to pick $n$ vertices from less than $n$.
\end{thmproof}

\begin{thmproof}[helper-2-edge]{lemma}{If $|S|=2$ and $e(s_1,s_2)$, then $\ms^2(G,S)=0$.}
    The only possibility to choose two vertices from $S$, choosing $s_1$ and $s_2$, does not give us an independent set.
\end{thmproof}

\begin{thmproof}[helper-2-noedge]{lemma}{If $|S|=2$ and $\lnot e(s_1,s_2)$, then $\ms^2(G,S)=2+\ms(G\ex\iN(s_1)\ex\iN(S_2))$.}
    We only have to consider the one possibility of choosing $s_1$ and $s_2$. Then we may remove their inclusive neighbourhoods from the graph.
\end{thmproof}

\begin{thmproof}[helper-3-naive]{lemma}{If $|S|=3$, we have $\ms^2(G,S)=\max(\ms^2(G,S\ex s_1), \ms^2(G,S\ex s_2), \ms^2(G,S\ex s_3)).$}
    We only have to consider three possibilities.
\end{thmproof}

\autoref{lem:helper-general} through \ref{lem:helper-3-naive} were the results I achieved. Robson \cite{robson} adds some optimisations to the cases $|S|=3$ and $|S|=4$. As they are fairly straightforward, I will not discuss them here. They are included in \autoref{alg:ms2}, which combines our own results into one algorithm. Robson uses slightly different notation at some points, since he assumes $d(s_1)<d(s_2)$.

\begin{algorithm}[h!]
    \caption{Finding the size of the m.i.s. of a graph, given that it should contain two nodes from a certain subgraph}
    \label{alg:ms2}
    \begin{algorithmic}[1]
        \REQUIRE $G=(V,E), S\subseteq V$
        \IF{$|S|\le1$}
        \RETURN $0$ \COMMENT{\autoref{lem:helper-1}}\label{alg:ms2:case-s1}
        \ELSIF{$|S|=2$}
            \IF{$e(s_1,s_2)$}
                \RETURN $0$ \COMMENT{\autoref{lem:helper-2-edge}}\label{alg:ms2:case-s2-edge}
            \ELSE
                \RETURN $2+\ms(G\ex\iN(s_1)\ex\iN(s_2))$ \COMMENT{\autoref{lem:helper-2-noedge}}\label{alg:ms2:case-s2-noedge}
            \ENDIF
        \ELSIF{$|S|=3$}
            \IF{$\exists_i[d(s_i)=0]$}
                \RETURN $1+\ms^1(G\ex s_i,S\ex s_i)$ \COMMENT{Special case of \autoref{lem:components}}\label{alg:ms2:case-s3-d0}
            \ELSIF{$s_1,s_2,s_3$ form a three-cycle}
                \RETURN $0$ \COMMENT{Impossible to pick two vertices from a three-cycle}\label{alg:ms2:case-s3-cycle}
            \ELSIF{$\exists_{i,j,k}[j\neq k \land e(s_i,s_j) \land e(s_i,s_k)]$}
                \RETURN $2 + \ms(G\ex\iN(s_j)\ex\iN(s_k))$ \COMMENT{Impossible to choose $s_i$ and another vertex, so we have to choose $s_j$ and $s_k$}\label{alg:ms2:case-s3-path}
            \ELSIF{$\exists_{i,j}[e(s_i,s_j)]$}
                \STATE $\{s_k\}\gets S\ex s_i\ex s_j$
                \RETURN $1 + \ms^1(G\ex\iN(s_k), S\ex s_k)$ \COMMENT{Impossible to choose $s_i$ and $s_j$, so we choose $s_k$ and one of $s_i,s_j$}\label{alg:ms2:case-s3-edge}
            \ELSIF{$\exists_{i,j}[N(s_i)\cap N(s_j)\neq\emptyset]$}
                \RETURN $\ms^2(G\ex(N(s_i)\cap N(s_j)), S)$ \COMMENT{m.i.s. contains either $s_i$ or $s_j$, so not their common neighbours}\label{alg:ms2:case-s3-intersection}
            \ELSIF{$\exists_i[d(s_i)=1]$}
                \RETURN $1+\ms^1(G\ex\iN(s_i),S\ex s_i)$ \COMMENT{Special case of \autoref{lem:one-neighbour}}\label{alg:ms2:case-s3-d1}
            \ELSE
                \RETURN $\max(1+\ms^1(G\ex\iN(s_1),S\ex s_1), 2 + \ms^2(G\ex\iN(s_2)\ex\iN(s_3)\ex s_1, N(s_1)))$ \COMMENT{Either the m.i.s. does contain $s_1$, and one of $s_2,s_3$, or it doesn't. In the latter case, it must contain $s_2,s_3$, and at least two of $s_1$'s neighbours, using an argument similar to \autoref{lem:two-neighbours}. Robson misses the `$2+$' for this second case, but from the writing it is clear that it should be there.}\label{alg:ms2:case-s3-otherwise}
            \ENDIF
        \ELSIF{$|S|=4$}
            \IF{$\lnot\exists_i[d(s_i)\leq3]$}
                \RETURN $\max(1+\ms(G\ex\iN(s_1)),\ms^2(G\ex s_1,S\ex s_1))$ \COMMENT{m.i.s. either does or doesn't contain $s_1$}\label{alg:ms2:case-s4-no-dle3}
            \ELSE
                \RETURN $\ms(G)$ \COMMENT{More efficient than the $\max$ in this case; \autoref{lem:helper-general}}\label{alg:ms2:case-s4-dle3}
            \ENDIF
        \ELSE
            \RETURN $\ms(G)$ \COMMENT{No clever optimisation found; \autoref{lem:helper-general}}\label{alg:ms2:case-otherwise}
        \ENDIF
    \end{algorithmic}
\end{algorithm}

\begin{thmproof}[ms2-correct]{theorem}{\autoref{alg:ms2} is a correct implementation of the $\ms^2$ function.}
    This follows from \autoref{lem:one-neighbour}, \ref{lem:components} and \ref{lem:helper-general} through \ref{lem:helper-2-noedge}. We have only made small efficiency enhancements that are clearly correct and are argued in the definition of the algorithm.
\end{thmproof}

\begin{thmproof}[ms2-terminates]{theorem}{\autoref{alg:ms2} terminates for any input, if $\ms$ and $\ms^1$ terminate with a smaller input.}
    There are base cases for the smallest cases, $|S|$-wise, that terminate directly. Every other case terminates directly or reduces to termination of $\ms$ on a smaller input $|G|$-wise, $\ms^1$ on a smaller input $|G|$-wise, or $\ms^2$ on a smaller input $|G|$ or $|S|$-wise. By induction, the algorithm terminates with any input.
\end{thmproof}

\bigskip

\clearpage
As the observant reader may have noticed, we use $\ms^1$ in the definition of $\ms^2$. We therefore also need to write an efficient algorithm to apply this function. Of course, \autoref{lem:helper-general} and \ref{lem:helper-1} apply to $\ms^1$ as well. Note, that we always call $\ms^1(\cdots,S)$ with $|S|=2$. We will not give a complete algorithm for $\ms^1$, but one that works in the necessary cases.

\begin{thmproof}[helper-intersection]{lemma}{If $N(s_1)\cap N(s_2)\neq\emptyset$, we have $\ms^1(G,S)=\ms^1(G\ex(N(s_1)\cap N(s_2)), S)$.}
    Something similar was already used in \autoref{alg:ms2}. The m.i.s. will contain either $s_1$ or $s_2$, therefore cannot contain the intersection of their neighbourhoods. Removing these vertices allows for early pruning.
\end{thmproof}

Robson adds further optimisations to this.

\begin{thmproof}[helper-ms1-2-2]{lemma}{If $d(s_1)=d(s_2)=2$ and $\lnot e(s_1,s_2)$, an m.i.s. containing one of $s_1,s_2$ will contain either $s_1$ or $N(s_1)\with\{s_2\}$.}
    Special case of \autoref{lem:two-neighbours}.
\end{thmproof}

If the conditions of \autoref{lem:helper-ms1-2-2} are met, write $w_1,w_2$ for the two neighbours of $s_1$.

\begin{thmproof}[helper-ms1-2-2-edge]{lemma}{If the conditions of \autoref{lem:helper-ms1-2-2} are met and $e(w_1,w_2)$, we have $\ms^1(G,S)=1+\ms(G\ex s_1)$.}
    It cannot be the case that an m.i.s. will contain $N(s_1)$. \autoref{lem:helper-ms1-2-2} leaves choosing $s_1$ as only option.
\end{thmproof}

\begin{thmproof}[helper-ms1-2-2-dominance]{lemma}{If the conditions of \autoref{lem:helper-ms1-2-2} are met and $N^2(s_1)\subset N(s_2)$, we have $\ms^1(G,S)=3+\ms(G\ex\iN(s_1)\ex\iN(s_2))$.}
    Obviously, an m.i.s. of the induced subgraph $\iN(s_1)+\iN(s_2)$ that contains $s_1$ or $s_2$ cannot be larger than three. Furthermore, $\{w_1,w_2,s_2\}$ is an independent set of that subgraph and dominates every other independent set of the same size.
\end{thmproof}

This leads us to a slightly adapted version of $\ms^2$ that we can use for $\ms^1$. Pseudocode is given in \autoref{alg:ms1}.

\begin{algorithm}[h!]
    \caption{Finding the size of the m.i.s. of a graph, given that it should contain one node from a certain subgraph with two nodes}
    \label{alg:ms1}
    \begin{algorithmic}[1]
        \REQUIRE $G=(V,E), S\subseteq V, S=\{s_1,s_2\}, d(s_1)\leq d(s_2)$
        \IF{$d(s_1)\leq1$}
            \RETURN $\ms(G)$ \COMMENT{\autoref{lem:helper-general}} \label{alg:ms1:case-d1}
        \ELSIF{$e(s_1,s_2)$}
            \IF{$d(s_1)>3$}
                \RETURN $1 + \max(\ms(G\ex\iN(s_1)), \ms(G\ex\iN(s_2)))$ \COMMENT{An m.i.s. contains either $s_1$ or $s_2$, but not both} \label{alg:ms1:case-edge-d4}
            \ELSE
                \RETURN $\ms(G)$ \COMMENT{More efficient than the $\max$ in this case; \autoref{lem:helper-general}}\label{alg:ms1:case-edge-d3}
            \ENDIF
        \ELSIF{$N(s_1)\cap N(s_2)\neq\emptyset$}
            \RETURN $\ms^1(G\ex(N(s_1)\cap N(s_2)), S)$ \COMMENT{m.i.s. contains $s_1$ or $s_2$, so not their common neighbours}\label{alg:ms1:intersection}
        \ELSIF{$d(s_1)=d(s_2)=2$}
            \STATE $\{w_1,w_2\} \gets N(s_1)$
            \IF{$e(w_1,w_2)$}
                \RETURN $1+\ms(G\ex\iN(s_1))$ \COMMENT{\autoref{lem:helper-ms1-2-2-edge}}\label{alg:ms1:case-2-2-edge}
            \ELSIF{$N^2(s_1)\subset N(s_2)$}
                \RETURN $3 + \ms(G\ex\iN(s_1)\ex\iN(s_2))$ \COMMENT{\autoref{lem:helper-ms1-2-2-dominance}}\label{alg:ms1:case-2-2-dominance}
            \ELSE
                \RETURN $\max(1+\ms(G\ex\iN(s_1)), 3+\ms(G\ex\iN(w_1)\ex\iN(w_2)\ex\iN(s_2)))$ \COMMENT{\autoref{lem:helper-ms1-2-2}}\label{alg:ms1:case-2-2-else}
            \ENDIF
        \ELSE
            \RETURN $1+\max(\ms(G\ex\iN(s_2)), \ms^2(G\ex\iN(s_1)\ex s_2, N(s_2)))$ \COMMENT{An m.i.s. contains either $s_2$ or $s_1$ and two of $s_2$'s neighbours. If an m.i.s. $ms$ would contain $s_1$ and at most one of $s_2$'s neighbours, say $w$, then there is another independent set $ms\ex w\with s_2$, which is at least as large.}\label{alg:ms1:case-general}
        \ENDIF
    \end{algorithmic}
\end{algorithm}

\begin{thmproof}[ms1-correct]{theorem}{\autoref{alg:ms1} is a correct implementation of the $\ms^1$ function if the second argument has size $2$.}
    This follows from \autoref{lem:helper-general} and \autoref{lem:helper-ms1-2-2} through \ref{lem:helper-ms1-2-2-dominance}. We have only made enhancements that are clearly correct and furthermore argued in the definition of the algorithm.
\end{thmproof}

\begin{thmproof}[ms1-terminates]{theorem}{\autoref{alg:ms1} terminates with any input if the second argument has size $2$.}
    Inductively by \autoref{thm:ms-terminates} and \ref{thm:ms2-terminates}.
\end{thmproof}

\begin{thmproof}[everything-correct]{theorem}{\autoref{alg:ms}, \ref{alg:ms2} and \ref{alg:ms1} form a correct and total algorithm for the maximum independent set problem.}
    The algorithm is total by \autoref{thm:ms-terminates}, \ref{thm:ms2-terminates} and \ref{thm:ms1-terminates}. It is correct by \autoref{thm:ms-correct}, \ref{thm:ms2-correct} and \ref{thm:ms1-correct}.
\end{thmproof}