//爬取网页内容
func httpGet(url string) (result string, err error) {
resp, err1 := http.Get(url)
if err1 != nil {
err = err1
return
}
defer resp.Body.Close()
//读取网页的内容
buf := make([]byte, 1024*4)
for {
n, err := resp.Body.Read(buf)
if n == 0 { //读取结束,或者出问题
fmt.Println("resp.body err: ", err)
break
}
result += string(buf[:n])
}
return
}
func DoWork(start, end int) {
fmt.Printf("正在爬取 %d 到 %d的页面\n", start, end)
//明确目标
for i := start; i <= end; i++ {
url := "https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=" + strconv.Itoa((i-1)*50)
fmt.Println(url)
//爬取页面
result, err := httpGet(url)
if err != nil {
fmt.Println("http err: ", err)
//跳出
continue
}
fmt.Println("内容是: ", result)
}
}
func main() {
//https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=200
var start, end int
fmt.Println("请输入起始页( >= 1): ")
fmt.Scan(&start)
fmt.Printf("请输入终止页(>=起始页): ")
fmt.Scan(&end)
DoWork(start, end)
}
原文:https://blog.51cto.com/u_15144024/2840147