>>> lines = sc.textFile("file:///usr/local/spark/mycode/rdd/word.txt")
>>> lines.foreach(print)
>>> words = lines.flatMap(lambda line:line.split())
>>> words.foreach(print)
>>> wordslower = words.map(lambda word:word.lower())
>>> wordslower.foreach(print)
>>> words1 = wordslower.filter(lambda words:len(words)>2)
>>> words1.foreach(print)
>>> with open("/usr/local/spark/mycode/rdd/stopwords.txt") as f:
... stops = f.read().split()
>>> words1 = words1.filter(lambda word:word not in stops)
>>> words1.collect()
>>> words1 = words1.map(lambda word:(word,1))
>>> words1.collect()
>>> words1.reduceByKey(lambda a,b:b+b).collect()
map()
map()
>>> lines1 = sc.textFile("file:///usr/local/spark/mycode/rdd/chapter4-data01.txt")
>>> group1 = lines1.map(lambda line:line.split(‘,‘)).map(lambda line:(line[1],1)).groupByKey()
>>> group1.foreach(print)
>>> lines1 = sc.textFile("file:///usr/local/spark/mycode/rdd/chapter4-data01.txt")
>>> groupNum = lines1.map(lambda line:line.split(‘,‘)).map(lambda line:(line[1],1)).reduceByKey(lambda a,b:a+b)
>>> groupNum.foreach(print)
>>> groupNum1 = lines1.map(lambda line:line.split(‘,‘)).map(lambda line:(line[0],1)).reduceByKey(lambda a,b:a+b)
>>> groupNum1.foreach(print)
原文:https://www.cnblogs.com/86xiang/p/14619664.html