]> git.somenet.org - pub/jan/adbs.git/blob - ex2/spark/Exercise5_SparkInScala.ipynb
update ex2.5
[pub/jan/adbs.git] / ex2 / spark / Exercise5_SparkInScala.ipynb
1 {
2  "cells": [
3   {
4    "cell_type": "markdown",
5    "metadata": {},
6    "source": [
7     "# Exercise 5 - Spark in Scala _[4 points]_\n",
8     "\n",
9     "In this exercise you have to solve the tasks given below. \n"
10    ]
11   },
12   {
13    "cell_type": "markdown",
14    "metadata": {},
15    "source": [
16     "## a) Elementary RDD functions \n",
17     "\n",
18     "(Brushing up on the basics of functional programming Scala)\n",
19     "\n",
20     "\n"
21    ]
22   },
23   {
24    "cell_type": "markdown",
25    "metadata": {},
26    "source": [
27     "####  You are given a list of the first 20 numbers of the Fibbonacci numbers. "
28    ]
29   },
30   {
31    "cell_type": "code",
32    "execution_count": null,
33    "metadata": {},
34    "outputs": [
35     {
36      "data": {
37       "text/plain": [
38        "Intitializing Scala interpreter ..."
39       ]
40      },
41      "metadata": {},
42      "output_type": "display_data"
43     }
44    ],
45    "source": [
46     "val fibs20 = sc.parallelize(List( 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181))  "
47    ]
48   },
49   {
50    "cell_type": "markdown",
51    "metadata": {},
52    "source": [
53     "####  Produce a list that contains all even number from the list 'fibs20':"
54    ]
55   },
56   {
57    "cell_type": "code",
58    "execution_count": null,
59    "metadata": {},
60    "outputs": [
61     {
62      "data": {
63       "text/plain": [
64        "Intitializing Scala interpreter ..."
65       ]
66      },
67      "metadata": {},
68      "output_type": "display_data"
69     }
70    ],
71    "source": [
72     "val evenFibs20 = fibs20.filter(x => (x % 2 == 0)).collect()"
73    ]
74   },
75   {
76    "cell_type": "markdown",
77    "metadata": {},
78    "source": [
79     "####  Compute the average value of the list 'fibs20':"
80    ]
81   },
82   {
83    "cell_type": "code",
84    "execution_count": null,
85    "metadata": {},
86    "outputs": [
87     {
88      "data": {
89       "text/plain": [
90        "Intitializing Scala interpreter ..."
91       ]
92      },
93      "metadata": {},
94      "output_type": "display_data"
95     }
96    ],
97    "source": [
98     "val avg1map = fibs20.map(x => (x, 1))\n",
99     "val avg1fold = avg1map.fold (0,0) ((x,y) => (x._1 + y._1, x._2 + y._2))\n",
100     "val avg1 = avg1fold._1 / avg1fold._2.toFloat"
101    ]
102   },
103   {
104    "cell_type": "markdown",
105    "metadata": {},
106    "source": [
107     "####  Produce a list of that shows for each element of the list 'fibs20' its absolute difference from the average:"
108    ]
109   },
110   {
111    "cell_type": "code",
112    "execution_count": null,
113    "metadata": {},
114    "outputs": [],
115    "source": [
116     "val avgDiff = fibs20.map(x => (x - avg1).abs).collect()"
117    ]
118   },
119   {
120    "cell_type": "markdown",
121    "metadata": {},
122    "source": [
123     "####  You are given a random list of words"
124    ]
125   },
126   {
127    "cell_type": "code",
128    "execution_count": null,
129    "metadata": {},
130    "outputs": [],
131    "source": [
132     "val words = sc.parallelize(List(\"automaton\", \"language\", \"logic\",\"closure\"))"
133    ]
134   },
135   {
136    "cell_type": "markdown",
137    "metadata": {},
138    "source": [
139     "####  Furthermore, we define a function that maps a word to its list of permutations"
140    ]
141   },
142   {
143    "cell_type": "code",
144    "execution_count": null,
145    "metadata": {},
146    "outputs": [],
147    "source": [
148     "def permutate (word:String) = word.permutations.toList"
149    ]
150   },
151   {
152    "cell_type": "markdown",
153    "metadata": {},
154    "source": [
155     "####  Produce a single list containing all permutations of elements from the list 'words':"
156    ]
157   },
158   {
159    "cell_type": "code",
160    "execution_count": null,
161    "metadata": {},
162    "outputs": [],
163    "source": [
164     "val wordlist = words.map(word => permutate(word)).collect().flatMap(x => x)"
165    ]
166   },
167   {
168    "cell_type": "markdown",
169    "metadata": {},
170    "source": [
171     "## b) From SQL to Dataframe (and back again)\n",
172     "\n",
173     "#### Find for each of the Spark SQL queries an equivalent one that only uses the Dataframe API (or vice versa)\n"
174    ]
175   },
176   {
177    "cell_type": "code",
178    "execution_count": null,
179    "metadata": {},
180    "outputs": [],
181    "source": [
182     "val dataPath = \"file:///home/adbs/2019S/shared/diamonds.csv\"\n",
183     "val diamonds = spark.read.format(\"csv\")\n",
184     "  .option(\"header\",\"true\")\n",
185     "  .option(\"inferSchema\", \"true\")\n",
186     "  .load(dataPath)\n",
187     "diamonds.createOrReplaceTempView(\"diamonds\")\n",
188     "\n",
189     "val articlesDF = spark.read.format(\"json\").load(\"file:///home/adbs/2019S/shared/spark/nytarticles\")\n",
190     "val commentsDF = spark.read.json(\"file:///home/adbs/2019S/shared/spark/nytcomments\")\n",
191     "articlesDF.createOrReplaceTempView(\"articles\")\n",
192     "commentsDF.createOrReplaceTempView(\"comments\")\n",
193     "// Create RDD view into dataset\n",
194     "val articlesRDD = articlesDF.rdd\n",
195     "val commentsRDD = commentsDF.rdd"
196    ]
197   },
198   {
199    "cell_type": "markdown",
200    "metadata": {},
201    "source": [
202     "#### Query 1: Transform the given Spark SQL query into the Dataframe API"
203    ]
204   },
205   {
206    "cell_type": "code",
207    "execution_count": null,
208    "metadata": {},
209    "outputs": [],
210    "source": [
211     "val query1 = spark.sql(\"SELECT COUNT(*) FROM articles WHERE sectionName='Politics'\")\n",
212     "query1.show()\n",
213     "query1.explain()"
214    ]
215   },
216   {
217    "cell_type": "code",
218    "execution_count": null,
219    "metadata": {},
220    "outputs": [],
221    "source": [
222     "//val query1df = query1.toDF()\n",
223     "//articlesDF.where(\"sectionName='Politics'\").count()\n",
224     "articlesDF.where(articlesDF.col(\"sectionName\") === \"Politics\").count()"
225    ]
226   },
227   {
228    "cell_type": "markdown",
229    "metadata": {},
230    "source": [
231     "#### Query 2: Transform the given Dataframe API query into Spark SQL"
232    ]
233   },
234   {
235    "cell_type": "code",
236    "execution_count": null,
237    "metadata": {},
238    "outputs": [
239     {
240      "data": {
241       "text/plain": [
242        "Intitializing Scala interpreter ..."
243       ]
244      },
245      "metadata": {},
246      "output_type": "display_data"
247     }
248    ],
249    "source": [
250     "val query2 = articlesDF.groupBy(\"sectionName\").count()\n",
251     "query2.show(false)\n",
252     "query2.explain()"
253    ]
254   },
255   {
256    "cell_type": "code",
257    "execution_count": null,
258    "metadata": {},
259    "outputs": [],
260    "source": [
261     "val query2sql = spark.sql(\"SELECT sectionName,COUNT(*) FROM articles GROUP BY sectionName\")\n",
262     "query2sql.show(false)\n",
263     "query2sql.explain()"
264    ]
265   },
266   {
267    "cell_type": "markdown",
268    "metadata": {},
269    "source": [
270     "#### Query 3: Transform the given Spark SQL query into the Dataframe API"
271    ]
272   },
273   {
274    "cell_type": "code",
275    "execution_count": null,
276    "metadata": {},
277    "outputs": [
278     {
279      "data": {
280       "text/plain": [
281        "Intitializing Scala interpreter ..."
282       ]
283      },
284      "metadata": {},
285      "output_type": "display_data"
286     }
287    ],
288    "source": [
289     "val query3  = spark.sql(\n",
290     "    \"SELECT a.headline, COUNT(c.commentID) AS numComments FROM articles a, comments c WHERE a.articleID = c.articleID GROUP BY a.headline\" )\n",
291     "query3.show(false) // 'false' turns of truncation of row entries\n",
292     "query3.explain()"
293    ]
294   },
295   {
296    "cell_type": "code",
297    "execution_count": null,
298    "metadata": {},
299    "outputs": [],
300    "source": [
301     "val query3df = articlesDF.crossJoin(commentsDF).filter(articlesDF.col(\"articleID\") === commentsDF.col(\"articleID\")).groupBy(articlesDF.col(\"headline\")).agg(count(\"headline\")"
302    ]
303   },
304   {
305    "cell_type": "markdown",
306    "metadata": {},
307    "source": [
308     "#### Query 4: Transform the given Spark SQL query into the Dataframe API"
309    ]
310   },
311   {
312    "cell_type": "code",
313    "execution_count": null,
314    "metadata": {},
315    "outputs": [
316     {
317      "data": {
318       "text/plain": [
319        "Intitializing Scala interpreter ..."
320       ]
321      },
322      "metadata": {},
323      "output_type": "display_data"
324     }
325    ],
326    "source": [
327     "val query4 = spark.sql(\" SELECT headline, byline, pubDate FROM articles WHERE headline RLIKE \\\"2016\\\" \")\n",
328     "query4.show(false)\n",
329     "query4.explain()"
330    ]
331   },
332   {
333    "cell_type": "code",
334    "execution_count": null,
335    "metadata": {},
336    "outputs": [],
337    "source": [
338     "val query4df = articlesDF.filter(articlesDF.col(\"headline\").rlike(\"2016\")).select(articlesDF.col(\"headline\"), articlesDF.col(\"byline\"), articlesDF.col(\"pubDate\"))"
339    ]
340   },
341   {
342    "cell_type": "markdown",
343    "metadata": {},
344    "source": [
345     "#### Query 5: Transform the given Dataframe API query into Spark SQL"
346    ]
347   },
348   {
349    "cell_type": "code",
350    "execution_count": null,
351    "metadata": {},
352    "outputs": [
353     {
354      "data": {
355       "text/plain": [
356        "Intitializing Scala interpreter ..."
357       ]
358      },
359      "metadata": {},
360      "output_type": "display_data"
361     }
362    ],
363    "source": [
364     "val query5 = articlesDF\n",
365     "      .join(commentsDF, articlesDF(\"articleID\") === commentsDF(\"articleID\"))\n",
366     "      .select(explode(articlesDF(\"keywords\")).as(\"singleKeyWords\"))\n",
367     "      .groupBy(\"singleKeyWords\")\n",
368     "      .agg(count(\"singleKeyWords\").as(\"number\"))\n",
369     "      .orderBy(desc(\"number\"))\n",
370     "query5.show(false)\n",
371     "query5.explain()"
372    ]
373   },
374   {
375    "cell_type": "markdown",
376    "metadata": {},
377    "source": [
378     "Note here that \"explode\" is a Spark SQL function that turns a tuple with column that contains a collection of objects into multiple tuples each with a single value from this collection. More information here: https://spark.apache.org/docs/2.3.0/api/sql/index.html#explode"
379    ]
380   },
381   {
382    "cell_type": "code",
383    "execution_count": null,
384    "metadata": {},
385    "outputs": [],
386    "source": [
387     "// does not work yet\n",
388     "val query5sql = spark.sql(\"SELECT COUNT(singleKeyWords), EXPLODE(keywords) AS singleKeyWords FROM articles JOIN comments ON articles.articleID = comments.articleID GROUP BY singleKeyWords ORDER BY number DESC\")"
389    ]
390   },
391   {
392    "cell_type": "markdown",
393    "metadata": {},
394    "source": [
395     "### For All Queries Above: \n",
396     "#### Analyze the plans (.explain() ) and compare performance (using the Spark Web UI). Try to reason about any major differences in the logical plans (if there are any)."
397    ]
398   },
399   {
400    "cell_type": "markdown",
401    "metadata": {},
402    "source": [
403     "## c) Wide and Narrow Dependencies\n",
404     "\n",
405     "#### Look at the Dataframe queries given as part of b) or for which you wrote the Dataframe version.\n",
406     "\n",
407     "#### Use the Spark Internal Web UI to analyse the dependencies and stages of the queries, and try to determine which commands on which Dataframes are executed as wide dependencies and which as narrow dependencies. \n"
408    ]
409   }
410  ],
411  "metadata": {
412   "kernelspec": {
413    "display_name": "Scala (spylon-kernel)",
414    "language": "scala",
415    "name": "spylon-kernel"
416   },
417   "language_info": {
418    "codemirror_mode": "text/x-scala",
419    "file_extension": ".scala",
420    "help_links": [
421     {
422      "text": "MetaKernel Magics",
423      "url": "https://github.com/calysto/metakernel/blob/master/metakernel/magics/README.md"
424     }
425    ],
426    "mimetype": "text/x-scala",
427    "name": "scala",
428    "pygments_lexer": "scala",
429    "version": "0.4.1"
430   }
431  },
432  "nbformat": 4,
433  "nbformat_minor": 2
434 }