From f329cf18ed3dbf086ec6876205173e4a563f6efd Mon Sep 17 00:00:00 2001 From: Jan Vales Date: Sun, 17 Jan 2016 06:26:32 +0100 Subject: [PATCH] Ex2 report done. --- report/content/exercise2.tex | 145 ++++++++++++++++++++++++++--------- report/content/exercise3.tex | 3 + 2 files changed, 111 insertions(+), 37 deletions(-) diff --git a/report/content/exercise2.tex b/report/content/exercise2.tex index f77de57..0b81c61 100644 --- a/report/content/exercise2.tex +++ b/report/content/exercise2.tex @@ -3,7 +3,7 @@ \subsection{Rep:2.a} In order to use scapy we need to convert out pcap-ng dump to pcap. \begin{verbatim} -$ editcap -F libpcap team15_ex21.pcapng team15_ex21.pcap +$ tshark -r team15_ex21.pcapng -w team15_ex21.pcap -F pcap \end{verbatim} We are only interested in flows with more (or equal) than 400 packets, each exported as a separate pcap file. @@ -51,6 +51,7 @@ We looked into it with Rapidminer and found a suspiciously alternating srcport j \includegraphics[width=0.6\columnwidth,keepaspectratio]{content/e21_flow_114_176_157_191_221_72_61_209_srcport.png} + \subsection{Rep:2.b} The message is \emph{\textbf{Data acquired. Key for message (len=42 \& pkts>200): nSa123 (Scott)}} @@ -86,60 +87,130 @@ if __name__ == "__main__": \section{Exercise 2 - Task 2} \subsection{Rep:2.c} +The message will be \emph{\textbf{rc4}} encrypted using the key \emph{\textbf{nSa123}}. Decrypt with: +\begin{verbatim} +$ openssl enc -d -rc4 -nosalt -k nSa123 -in stream.enc -out stream.txt +\end{verbatim} -Then we converted the file {\tt team15\_ex22.pcap} from the pcap-ng format to the pcap format to be able to use it with {\tt scapy}. - -\begin{lstlisting} -$ editcap -F libpcap team15_ex22.pcap ex22.pcap -\end{lstlisting} - -Then we filtered out the large flows with more than 200 packets and a frame length of 42 as mentioned in the solution from task 1. -% TODO \begin{didntwork} -./task2/readflows.py -% {('53.151.211.106', '217.115.203.44'): (213, 0)} +\subsection{Rep:2.d} +Redo initial conversion like in the previous task, but also apply a filter to \emph{\textbf{only retain packets of length 42}}. -filter into file -ip.addr == 53.151.211.106 and ip.addr == 217.115.203.44 and eth.len == 42 +\begin{verbatim} +$ tshark -r team15_ex22.pcapng -w team15_ex22.pcap -F pcap 'frame.len == 42' +\end{verbatim} -generate graph: -large\_flow.png +After conversion apply a modified version of ./somefilter.py as it is pretty time consuming to manually create csv files. Especially if we keep changing parameters or fields. -save bytes (ipid) from stream to file -./decode\_ipid.py +\begin{verbatim} +$ ./somefilter.py | sh +\end{verbatim} -try decoding with password from previous task -openssl enc -d -rc4 -nosalt -k nSa123 -in stream\_encrypted -out stream\_decrypted +./somefilter.py +\begin{redframe}\begin{scriptsize}\begin{verbatim} +#!/usr/bin/env python + +from scapy.all import * + +def somefilter(pcapfile): + flows = dict() + for p in PcapReader(pcapfile): + if IP in p: + src = p[IP].src + dst = p[IP].dst + + if (src,dst) in flows: + flows[(src,dst)] +=1 + else: + flows[(src,dst)] = 1 + + for flow,cnt in flows.items(): + if cnt >= 200: + print 'tshark -r '+pcapfile+' -w "flow_'+flow[0]+'_'+flow[1]+'.pcap" -F pcap ' \ + + '\'ip.src == '+flow[0]+' and ip.dst == '+flow[1]+'\'' + print 'tshark -n -r "flow_'+flow[0]+'_'+flow[1]+'.pcap" -Eheader=y -Eseparator=, -Equote=d -Tfields '\ + + '-e frame.number -e _ws.col.Time -e ip.src -e ip.dst -e _ws.col.Protocol -e frame.len '\ + + '-e _ws.col.Info -e _ws.col.dscp -e _ws.col.ipid -e _ws.col.cs -e _ws.col.srcport '\ + + '-e _ws.col.dstport -e _ws.col.proto > flow_'+flow[0]+'_'+flow[1]+'.csv' + + +if __name__ == "__main__": + somefilter("team15_ex22.pcap") +\end{verbatim}\end{scriptsize}\end{redframe} --> didn't work +We examined the dumps and suspected the covert channel to be in the IPID fields.\\ +After exporting the bytes into separate files we were stuck at decrypting the decoded bytes. -tried reversing the bytes (lower byte first, upper byte next) +\begin{verbatim} +$ ./decode\_ipid.py +$ openssl enc -d -rc4 -nosalt -k nSa123 -in stream\_encrypted -out stream\_decrypted +\end{verbatim} --> didn't work +After the hint \quote{Use the toooools!} we adapted the autocorr.py tool to not require the output parameter and wrote a shell wrapper to loop through all csv files for a given field name. -trying to decode the second-largest flow: -ip.addr == 96.55.191.225 and ip.addr == 217.115.203.44 and eth.len == 42 +\begin{verbatim} +$ ./autocorr_all.sh _ws.col.ipid +\end{verbatim} --> didn't work -% TODO \end{didntwork} +./autocorr\_all.sh +\begin{redframe}\begin{scriptsize}\begin{verbatim} +#!/bin/bash + +for f in *.csv ; do + sed -e 's/,"0x[a-fA-F0-9]\{4\} (\([0-9]\+\))",/,"\1",/' -e 's/,"UDP"$/,"17"/' \ + -e 's/,"ICMP"$/,"1"/' $f > ${f}.dehexed + echo "$f, $1: $(../../autocorr.py --input ${f}.dehexed --field ${1})" +done +\end{verbatim}\end{scriptsize}\end{redframe} -Finally we found out, that scapy removes the frame length when parsing packets with the {\tt PcapReader}. +After autocorr\_all'ing all available fields ip.proto produced an exception on one csv file. The investigation led to the addition of two new sed replace rules above.\\ -This does not happen with the {\tt PcapRawReader}, so we rewrote the script a bit. +After trying to extract and decode the Proto-field we finally came across the solution. -% TODO continue writing here +\begin{verbatim} +$ ./somedecode.py | openssl enc -d -rc4 -nosalt -k nSa123 +\end{verbatim} -filter into files +./somedecode.py +\begin{redframe}\begin{scriptsize}\begin{verbatim} +#!/usr/bin/env python + +import csv +import binascii + +def somedecode(filename): + with open(filename, 'rb') as csvfile: + spamreader = csv.reader(csvfile, delimiter=',', quotechar='"') + header = None + bits = "" + + for row in spamreader: + if header is None: + header = row + continue + + if row[12] == 'ICMP': + bits += "0" + if row[12] == 'UDP': + bits += "1" + +# print bits + #bits = bits[:-(len(bits)%8)] + print binascii.unhexlify('%x' % int(bits, 2)) + +if __name__ == "__main__": + somedecode("flow_204.10.110.7_187.10.25.137.csv") +\end{verbatim}\end{scriptsize}\end{redframe} -./readflows.py +It turned out that scapy is not really suited to handle our decoding task as it would discard the frame.len information. \url{http://stackoverflow.com/questions/21752576/whole-packet-length-scapy}\\ +This does not happen with the {\tt PcapRawReader}, so we rewrote the script.\\ -manually create csv files +Another fail was our fixation on ip.id. We tried to decode that field without looking at other possibilities. -for i in large\_flow\_*.csv ; do ./../../only\_decimal.sh \$i > \${i\%.csv}.dehexed.csv ; done -./autocorrelate.sh | grep -v "All values are identical" | sort -k2 +\subsection{Rep:2.e} +The message is \emph{\textbf{Agent South was captured! Aborting operation. (Agent Scott)}}. --> ./parse\_stream\_data.py -\subsection{Rep:2.c} -Agent South was captured! Aborting operation. (Agent Scott) +\section{Lessons learned} +Basically we have two semi-independent solutions for every task as the both of us had slightly different ways of doing things, but both wanted to learn how things work. diff --git a/report/content/exercise3.tex b/report/content/exercise3.tex index a76edf9..60f1d3c 100644 --- a/report/content/exercise3.tex +++ b/report/content/exercise3.tex @@ -1,5 +1,8 @@ \section{Exercise 3} \subsection{Rep:3.a} +Astra did this one. + + scan the host -- 2.43.0