林子雨编著《大数据基础编程、实验和案例教程(第2版)》教材第12章的代码

大数据学习路线图

林子雨编著《大数据基础编程、实验和案例教程(第2版)》(教材官网)教材中的命令行和代码,在纸质教材中的印刷效果不是很好,可能会影响读者对命令行和代码的理解,为了方便读者正确理解命令行和代码或者直接拷贝命令行和代码用于上机实验,这里提供全书配套的所有命令行和代码。
查看教材所有章节的代码

第12章 数据采集工具的安装和使用

教材第187页

(温馨提示:代码框上方的复制代码按钮,也就是“两张A4纸图标”,用鼠标点击复制代码按钮,就可以把代码框中的代码复制到粘贴板,粘贴到其他地方。但是,有的浏览器可能不支持该功能)

cd ~/下载
sudo tar -zxvf kafka_2.11-0.10.2.0.tgz -C /usr/local
cd /usr/local
sudo mv kafka_2.11-0.10.2.0/ ./kafka
sudo chown -R hadoop ./kafka
cd /usr/local/kafka
./bin/zookeeper-server-start.sh config/zookeeper.properties
cd /usr/local/kafka
./bin/kafka-server-start.sh config/server.properties
cd /usr/local/kafka
./bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic dblab
cd /usr/local/kafka
./bin/kafka-topics.sh --list --zookeeper localhost:2181
cd /usr/local/kafka
./bin/kafka-console-producer.sh --broker-list localhost:9092 --topic dblab

教材第188页

cd /usr/local/kafka
./bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic dblab --from-beginning
cd /usr/local/kafka
./bin/zookeeper-server-start.sh config/zookeeper.properties
cd /usr/local/kafka
./bin/kafka-server-start.sh config/server.properties

教材第189页

cd /usr/local/kafka
./bin/kafka-server-start.sh config/server.properties &
cd /usr/local/kafka
./bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic wordsendertest
#这个topic叫wordsendertest,2181是Zookeeper默认的端口号,partition是topic里面的分区数,replication-factor是备份的数量,在Kafka集群中使用,这里单机版就不用备份了
#可以用list列出所有创建的topics,来查看上面创建的topic是否存在
./bin/kafka-topics.sh --list --zookeeper localhost:2181
./bin/kafka-console-producer.sh --broker-list localhost:9092 --topic wordsendertest
cd /usr/local/kafka
./bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic wordsendertest --from-beginning

教材第190页

cd /usr/local/spark
./bin/spark-shell
scala> import org.apache.spark.streaming.kafka._
<console>:25: error: object kafka is not a member of package org.apache.spark.streaming
         import org.apache.spark.streaming.kafka._
                                           ^
cd /usr/local/spark/jars
mkdir kafka
cd ~/Downloads
cp ./spark-streaming-kafka-0-8_2.11-2.4.0.jar /usr/local/spark/jars/kafka

教材第191页

cd /usr/local/kafka/libs
ls
cp ./* /usr/local/spark/jars/kafka
cd /usr/local/spark
./bin/spark-shell --jars /usr/local/spark/jars/*:/usr/local/spark/jars/kafka/*
scala> import org.apache.spark.streaming.kafka._
//会显示下面信息
import org.apache.spark.streaming.kafka._
cd  /usr/local/spark/mycode
mkdir  kafka
cd  kafka
mkdir  -p  src/main/scala
cd  src/main/scala
vim  KafkaWordProducer.scala
package org.apache.spark.examples.streaming
import java.util.HashMap
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.spark.SparkConf
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka._
object KafkaWordProducer {
  def main(args: Array[String]) {
    if (args.length < 4) {
      System.err.println("Usage: KafkaWordCountProducer <metadataBrokerList> <topic> " +
        "<messagesPerSec> <wordsPerMessage>")
      System.exit(1)
    }
    val Array(brokers, topic, messagesPerSec, wordsPerMessage) = args
    // Zookeeper connection properties
    val props = new HashMap[String, Object]()
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers)
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
      "org.apache.kafka.common.serialization.StringSerializer")
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
      "org.apache.kafka.common.serialization.StringSerializer")
    val producer = new KafkaProducer[String, String](props)
   // Send some messages
    while(true) {
      (1 to messagesPerSec.toInt).foreach { messageNum =>
        val str = (1 to wordsPerMessage.toInt).map(x => scala.util.Random.nextInt(10).toString)
          .mkString(" ")
                    print(str)
                    println()
        val message = new ProducerRecord[String, String](topic, null, str)
        producer.send(message)
      }
     Thread.sleep(1000)
    }
  }
}

教材第193页

package org.apache.spark.examples.streaming
import org.apache.spark._
import org.apache.spark.SparkConf
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka._
import org.apache.spark.streaming.StreamingContext._
import org.apache.spark.streaming.kafka.KafkaUtils

object KafkaWordCount{
def main(args:Array[String]){
StreamingExamples.setStreamingLogLevels()
val sc = new SparkConf().setAppName("KafkaWordCount").setMaster("local[2]")
val ssc = new StreamingContext(sc,Seconds(10))
ssc.checkpoint("file:///usr/local/spark/mycode/kafka/checkpoint") //设置检查点,如果存放在HDFS上面,则写成类似ssc.checkpoint("/user/hadoop/checkpoint")这种形式,但是,要启动Hadoop
val zkQuorum = "localhost:2181" //Zookeeper服务器地址
val group = "1"  //Topic所在的group,可以设置为自己想要的名称,比如不用1,而是val group = "test-consumer-group" 
val topics = "wordsender"  //topics的名称
val numThreads = 1  //每个topic的分区数
val topicMap =topics.split(",").map((_,numThreads.toInt)).toMap
val lineMap = KafkaUtils.createStream(ssc,zkQuorum,group,topicMap)
val lines = lineMap.map(_._2)
val words = lines.flatMap(_.split(" "))
val pair = words.map(x => (x,1))
val wordCounts = pair.reduceByKeyAndWindow(_ + _,_ - _,Minutes(2),Seconds(10),2) //这行代码的含义在下一节的窗口转换操作中会有介绍
wordCounts.print
ssc.start
ssc.awaitTermination
}
}

教材第194页

package org.apache.spark.examples.streaming
import org.apache.spark.internal.Logging
import org.apache.log4j.{Level, Logger}
/** Utility functions for Spark Streaming examples. */
object StreamingExamples extends Logging {
  /** Set reasonable logging levels for streaming if the user has not configured log4j. */
  def setStreamingLogLevels() {
    val log4jInitialized = Logger.getRootLogger.getAllAppenders.hasMoreElements
    if (!log4jInitialized) {
      // We first log something to initialize Spark's default logging, then we override the
      // logging level.
      logInfo("Setting log level to [WARN] for streaming example." +
        " To override add a custom log4j.properties to the classpath.")
      Logger.getRootLogger.setLevel(Level.WARN)
    }
  }
}

教材第195页

cd  /usr/local/spark/mycode/kafka/
vim  simple.sbt
name := "Simple Project"
version := "1.0"
scalaVersion := "2.11.12"
libraryDependencies += "org.apache.spark" %% "spark-core" % "2.4.0"
libraryDependencies += "org.apache.spark" % "spark-streaming_2.11" % "2.4.0"
libraryDependencies += "org.apache.spark" % "spark-streaming-kafka-0-8_2.11" % "2.4.0" exclude("net.jpountz.lz4", "lz4")
cd  /usr/local/spark/mycode/kafka/
/usr/local/sbt/sbt  package
cd  /usr/local/hadoop
./sbin/start-dfs.sh

教材第196页

cd  /usr/local/spark
/usr/local/spark/bin/spark-submit  \
> --driver-class-path /usr/local/spark/jars/*:/usr/local/spark/jars/kafka/*  \
> --class "org.apache.spark.examples.streaming.KafkaWordProducer"   \
> /usr/local/spark/mycode/kafka/target/scala-2.11/simple-project_2.11-1.0.jar  \
> localhost:9092  wordsender  3  5
cd  /usr/local/spark
/usr/local/spark/bin/spark-submit  \
> --driver-class-path /usr/local/spark/jars/*:/usr/local/spark/jars/kafka/*  \
> --class "org.apache.spark.examples.streaming.KafkaWordCount"  \
> /usr/local/spark/mycode/kafka/target/scala-2.11/simple-project_2.11-1.0.jar