279 lines
9.7 KiB
TeX

\input{../settings/settings}
\begin{document}
\klausur{Semantic Information Processing}{Prof. C. Schlieder}{Wintersemester 15/16}{90}{}
\section*{Problem 1}
\begin{enumerate}[label=\alph*)]
\item Explain how \textit{uninformed} and \textit{informed} search strategies differ. (3 P)
\item Describe the \textit{iterative deepening} search strategy. (3 P)
\item What property characterizes an \textit{admissible} heuristics? Specify an admissible heuristics for the 8-puzzle. (3 P)
\item Places \textit{A, B, C, D, E} and \textit{F} are connected by a transport network. The distances between places are specified in the graph below. Use the \textit{A* algorithm} to find the shortest path from \textit{A} to \textit{F}. The heuristiv function \textit{h(n)} is specified in the table. You may assume, that cycles are identified and avoided. (6 P)
\end{enumerate}
\begin{center}
\begin{tikzpicture}
% Knoten
\node (Start) at (-1,1) {Start};
\node (A) at (0,1) [circle, draw] {A};
\node (B) at (3,4) [circle, draw] {B};
\node (C) at (2,0) [circle, draw] {C};
\node (D) at (3,2) [circle, draw] {D};
\node (E) at (5,0) [circle, draw] {E};
\node (F) at (7,2) [circle, draw] {F};
\node (Ziel) at (8,2) {Ziel};
% Kanten
\draw (A) -- (B) node [midway, fill=white] {30};
\draw (A) -- (C) node [midway, fill=white] {10};
\draw (B) -- (D) node [midway, fill=white] {5};
\draw (B) -- (F) node [midway, fill=white] {45};
\draw (C) -- (D) node [midway, fill=white] {10};
\draw (C) -- (E) node [midway, fill=white] {25};
\draw (D) -- (E) node [midway, fill=white] {20};
\draw (E) -- (F) node [midway, fill=white] {25};
\end{tikzpicture}
\end{center}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|}
\hline
& A & B & C & D & E & F \\
\hline
h(n) & 45 & 40 & 40 & 30 & 20 & 0 \\
\hline
\end{tabular}
\end{center}
\section*{Problem 2}
\begin{enumerate}[label=\alph*)]
\item Consider the following situation, in which a robot navigates to a charging station using the \textit{simulated annealing} search strategy. The black field is inaccessible. Search starts with the robot position as indicated. Determine the probability with which positions $s_1, s_2, and s_3$ are explored next. Use the Manhattan distance to the charging station as error measure \textit{E} and the parameter value $T = 1$. Specify the probability by appropriate formulas -- You do \textbf{not} need to numerically compute them! (3 P)
\begin{figure}[H]
\centering
\includegraphics[scale=0.4]{ws1516-SemInf-problem2a}
\label{fig:ws1516-seminf-problem2a}
\end{figure}
\item Consider the state of the TicTacToe game depicted below. MAX is about to move and places an $X$ token. Determine for all subsequent moves the MIN- and MAX-values of the game. Draw the MINMAX tree. It is possible for MIN to win the game, if both players play optimally? (8 P)
\begin{figure}[H]
\centering
\includegraphics[scale=0.3]{ws1516-SemInf-problem2c}
\label{fig:ws1516-seminf-problem2c}
\end{figure}
\item By applying \textit{alpha-beta pruning} and assuming that subtrees are searched from left to right, the colored node is not going to be evaluated. Explain why. (4 P)\\
\end{enumerate}
\begin{center}
\begin{tikzpicture}[->]
% Knoten
\node (Zug3) at (-2,0) {Zug 3: MIN};
\node (A) at (1,0) [circle, minimum size=1cm, draw] {5};
\node (B) at (3,0) [circle, minimum size=1cm, draw, fill] { };
\node (Zug2) at (-2,2) {Zug 2: MAX};
\node (C) at (0,2) [circle, minimum size=1cm, draw] {-2};
\node (D) at (2,2) [circle, minimum size=1cm, draw] { };
\node (Zug1) at (-2,4) {Zug 1: MIN};
\node (E) at (1,4) [circle, minimum size=1cm, draw] { };
% Kanten
\draw[-{>[scale=4,
length=2,
width=3]},line width=0.4pt] (A) to (D);
\draw[-{>[scale=4,
length=2,
width=3]},line width=0.4pt] (B) edge (D);
\draw[-{>[scale=4,
length=2,
width=3]},line width=0.4pt] (C) edge (E);
\draw[-{>[scale=4,
length=2,
width=3]},line width=0.4pt] (D) edge (E);
\end{tikzpicture}
\end{center}
\section*{Problem 3}
\begin{enumerate}[label=\alph*)]
\item The crossword shown below has to be filled with the words \textbf{IS}, \textbf{IN}, \textbf{SO} and \textbf{NOT}. Each word may be used once only. Represent the task as a \textit{constraint satisfaction problem}. Describe the variables, their domains, and the constraints. Draw the constraint graph. (8 P)
\begin{figure}[H]
\centering
\includegraphics[scale=0.3]{ws1516-SemInf-problem3a}
\label{fig:ws1516-seminf-problem3a}
\end{figure}
\item Aplly the \textit{arc consistency algorithm} to the constraint satisfaction problem. Describe the intermediate steps of the computation in form of a table (variables and domains). Does the algorithm solve the puzzle? (7 P)
\end{enumerate}
\section*{Problem 4}
\begin{enumerate}[label=\alph*)]
\item Formalize sentences (1), (2), and (3) in \textit{predicate logic}. Use a predicate $K(x, y)$, which expresses that person $x$ knows person $y$ and a predicate $B(x, z)$, which expresses that person $x$ attends course $z$. Avoid using further predicates. (3 P)
\begin{enumerate}[label=(\arabic*)]
\item If x and y attend the same course, then person x knows person y.
\item Alice attends the course \glqq cryptography\grqq\ and Bob attends the course \glqq cryptography\grqq .
\item Alice knows Bob and Bob knows Alice.
\end{enumerate}
\item Specify the \textit{clause forms}, which are needed to show by resolution that formula (3) can be proved from formulas (1) and (2). (3 P)
\item Provide the \textit{resolution proof}. Specifiy all resolution steps and the term substitutions they involve. (9 P)
\end{enumerate}
\section*{Problem 5}
\begin{enumerate}[label=\alph*)]
\item Explain the difference between \textit{terminological} and \textit{assertional} knowledge. Give an example for each. (4P)
\item Model the following definitions using a \textit{description logic} (e.g. SHIQ): (4 P)
\begin{enumerate}[label=(\arabic*)]
\item A Student is a person that is enrolled in a university.
\item An interested student is a student, who is interested in all lectures.
\end{enumerate}
\item Consider the following \textit{Bayesian network} with nodes \textit{A, B, C, D, E,} and probabilites as specified in the tables. What is the probability of E, if it is known that $\neg B$ and $C$. (4 P)
\vspace{1cm}
\begin{multicols}{2}
\begin{center}
\begin{tikzpicture}[thick, scale=2, every node/.style={scale=2}]
% Knoten
\node (A) at (0,2) [circle, draw] {A};
\node (B) at (2,2) [circle, draw] {B};
\node (C) at (1,1) [circle, draw] {C};
\node (D) at (0,0) [circle, draw] {D};
\node (E) at (2,0) [circle, draw] {E};
% Kanten
\draw[-{>[scale=4,
length=2,
width=3]},line width=0.4pt] (A) to (C);
\draw[-{>[scale=4,
length=2,
width=3]},line width=0.4pt] (B) edge (C);
\draw[-{>[scale=4,
length=2,
width=3]},line width=0.4pt] (C) edge (E);
\draw[-{>[scale=4,
length=2,
width=3]},line width=0.4pt] (C) edge (D);
\end{tikzpicture}
\end{center}
\begin{multicols}{2}
\begin{tabular}{|l|}
\hline
P(A) \\
\hline
0,4 \\
\hline
\end{tabular}
\begin{tabular}{|l|}
\hline
P(B) \\
\hline
0,5 \\
\hline
\end{tabular}
\end{multicols}
\begin{tabular}{|l|l|l|}
\hline
A & B & P(C) \\
\hline
1 & 1 & 0,9 \\
\hline
1 & 0 & 0,3 \\
\hline
0 & 1 & 0,6 \\
\hline
0 & 0 & 0,2 \\
\hline
\end{tabular}
\begin{multicols}{2}
\begin{tabular}{|l|l|}
\hline
C & P(D) \\
\hline
1 & 0,8 \\
\hline
0 & 0,1 \\
\hline
\end{tabular}
\begin{tabular}{|l|l|}
\hline
C & P(D) \\
\hline
1 & 0,8 \\
\hline
0 & 0,1 \\
\hline
\end{tabular}
\end{multicols}
\end{multicols}
\item Explain what it means that the event $X$ is \textit{conditionally independent} of event $Y$ given event $Z$. Use your definition to check in the Bayesian network above, whether or not $D$ is conditionally independent of $E$ given $C$. (3 P)
\end{enumerate}
\section*{Problem 6}
Users of an online dating platform are shown profile pictures from other users. Based on the picture, the user decides on whether or not to signal interest. To improve the service, prior classification results of the user are evaluated with the goal of identifying pictures that are likely to raise interest.
For user Alice the interaction history is listed below:
\begin{center}
\begin{tabular}{|l|l|l|c|}
\hline
\textbf{Alter} & \textbf{Figur} & \textbf{Haarfarbe} & \\
\hline
alt & sportlich & blond & $\heartsuit$ \\
\hline
jung & unsportlich & brünett & $\times$ \\
\hline
alt & sportlich & brünett & $\heartsuit$ \\
\hline
jung & sportlich & brünett & $\times$ \\
\hline
jung & unsportlich & blond & $\times$ \\
\hline
alt & sportlich & brünett & $\heartsuit$ \\
\hline
jung & sportlich & blond & $\times$ \\
\hline
jung & unsportlich & brünett & $\times$ \\
\hline
alt & unsportlich & brünett & $\heartsuit$ \\
\hline
alt & unsportlich & blond & $\heartsuit$ \\
\hline
\end{tabular}
\end{center}
\begin{enumerate}[label=\alph*)]
\item Apply the \textit{version space algorithm} to the first \textbf{four} examples and determine the set of most general consistent hypothesis and the set of most specific consistent hypothesis. (8 P)
\item What is \textit{information gain} used for in decision tree learning? Make an educated guess abour which of the three attributes produces the highest information gain and which the lowest. Underpin your argument with a formula -- You do \textbf{not} need to numerically evaluate the formula. (5 P)
\item Contrast \textit{feed-forward} and \textit{recurrent} neural networks. (2 P)
\end{enumerate}
\end{document}