字符
- ——匹配任意字符 e.g: abc. 结果: abcd,abcx,abc9;
- [] ——匹配括号中任意一个字符 e.g: [abc]d 结果:ad,cd,1d;
- ——[-]中表示范围 e.g: [A-Za-z0-9];
- ^ ——[^]中表示除括号中的任意字符 e.g:[^xy]a 结果:aa,da,不能为xa,ya;
数量限定
- ? ——前面单元匹配0或1次;
- ——前面单元匹配1或多次;
- ——前面单元匹配0或多次;
- {,} ——显示个数上下线;e.g : ip地址——[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3};
其他
- \ ——转义字符;
- | ——条件或;
- () ——组成单元 如果字符串本身有括号”[(] aaa. [)]” ;
方法
//参数正则字符串,返回值*Regexp | |
str := regexp.MustCompile(string) | |
//参数要查找的数据,查找次数-1为全局,返回值二维数组,查找出的字符串+正则字符串 | |
var result [][]string = str.FindAllStringSubmatch(data, -1) |
爬虫
爬取xx园所有文章阅读量,评论,推荐;
package main | |
import ( | |
"fmt" | |
"io" | |
"net/http" | |
"regexp" | |
"strconv" | |
) | |
var readCount int = 0 | |
var commentCount int = 0 | |
var diggCount int = 0 | |
//http读取网页数据写入result返回 | |
func HttpGet(url string) (result string, err error) { | |
resp, err1 := http.Get(url) | |
if err1 != nil { | |
err = err1 | |
return | |
} | |
defer resp.Body.Close() | |
buf := make([]byte, 4096) | |
for { | |
n, err2 := resp.Body.Read(buf) | |
//fmt.Println(url) | |
if n == 0 { | |
break | |
} | |
if err2 != nil && err2 != io.EOF { | |
err = err2 | |
return | |
} | |
result += string(buf[:n]) | |
} | |
return result, err | |
} | |
//横向纵向爬取文章标题数据,并累计数值 | |
func SpiderPageDB(index int, page chan int) { | |
url := "https://www.cnblogs.com/littleperilla/default.html?page=" + strconv.Itoa(index) | |
result, err := HttpGet(url) | |
if err != nil { | |
fmt.Println("HttpGet err:", err) | |
return | |
} | |
str := regexp.MustCompile("post-view-count\">阅读[(](?s:(.*?))[)]</span>") | |
alls := str.FindAllStringSubmatch(result, -1) | |
for _, j := range alls { | |
temp, err := strconv.Atoi(j[1]) | |
if err != nil { | |
fmt.Println("string2int err:", err) | |
} | |
readCount += temp | |
} | |
str = regexp.MustCompile("post-comment-count\">评论[(](?s:(.*?))[)]</span>") | |
alls = str.FindAllStringSubmatch(result, -1) | |
for _, j := range alls { | |
temp, err := strconv.Atoi(j[1]) | |
if err != nil { | |
fmt.Println("string2int err:", err) | |
} | |
commentCount += temp | |
} | |
str = regexp.MustCompile("post-digg-count\">推荐[(](?s:(.*?))[)]</span>") | |
alls = str.FindAllStringSubmatch(result, -1) | |
for _, j := range alls { | |
temp, err := strconv.Atoi(j[1]) | |
if err != nil { | |
fmt.Println("string2int err:", err) | |
} | |
diggCount += temp | |
} | |
page <- index | |
} | |
//主要工作方法 | |
func working(start, end int) { | |
fmt.Printf("正在从%d到%d爬取中...\n", start, end) | |
//channel通知主线程是否所有go都结束 | |
page := make(chan int) | |
//多线程go程同时爬取 | |
for i := start; i <= end; i++ { | |
go SpiderPageDB(i, page) | |
} | |
for i := start; i <= end; i++ { | |
fmt.Printf("拉取到%d页\n", <-page) | |
} | |
} | |
//入口函数 | |
func main() { | |
//输入爬取的起始页 | |
var start, end int | |
fmt.Print("startPos:") | |
fmt.Scan(&start) | |
fmt.Print("endPos:") | |
fmt.Scan(&end) | |
working(start, end) | |
fmt.Println("阅读:", readCount) | |
fmt.Println("评论:", commentCount) | |
fmt.Println("推荐:", diggCount) | |
} |
效果图如下
其他实例
golang爬虫爬取经典案例豆瓣,例子可以借鉴,正则要好好利用
package main | |
import ( | |
"fmt" | |
"io" | |
"net/http" | |
"os" | |
"regexp" | |
"strconv" | |
) | |
func savToFile(index int, filmName, filmScore [][]string) { | |
f, err := os.Create("第" + strconv.Itoa(index) + "页.txt") | |
if err != nil { | |
fmt.Println("os create err", err) | |
return | |
} | |
defer f.Close() | |
// 查出有多少条 | |
n := len(filmName) | |
// 先写抬头 名称 评分 | |
f.WriteString("电影名称" + "\t\t\t" + "评分" + "\n") | |
for i := 0; i < n; i++ { | |
f.WriteString(filmName[i][1] + "\t\t\t" + filmScore[i][1] + "\n") | |
} | |
} | |
func main() { | |
var start, end int | |
fmt.Print("请输入要爬取的起始页") | |
fmt.Scan(&start) | |
fmt.Print("请输入要爬取的终止页") | |
fmt.Scan(&end) | |
working(start, end) | |
} | |
func working(start int, end int) { | |
fmt.Printf("正在爬取%d到%d页", start, end) | |
for i := start; i <= end; i++ { | |
SpiderPage(i) | |
} | |
} | |
// 爬取一个豆瓣页面数据信息保存到文档 | |
func SpiderPage(index int) { | |
// 获取url | |
url := "https://movie.douban.com/top250?start=" + strconv.Itoa((index-1)*25) + "&filter=" | |
// 爬取url对应页面 | |
result, err := HttpGet(url) | |
if err != nil { | |
fmt.Println("httpget err", err) | |
return | |
} | |
//fmt.Println("result=", result) | |
// 解析,编译正则表达式 ---电影名称 | |
ret := regexp.MustCompile(`<img width="100" alt="(?s:(.*?))"`) | |
filmName := ret.FindAllStringSubmatch(result, -1) | |
for _, name := range filmName { | |
fmt.Println("name", name[1]) | |
} | |
ret2 := regexp.MustCompile(`<span class="rating_num" property="v:average">(?s:(.*?))<`) | |
filmScore := ret2.FindAllStringSubmatch(result, -1) | |
for _, score := range filmScore { | |
fmt.Println("score", score[1]) | |
} | |
savToFile(index, filmName, filmScore) | |
} | |
// 爬取指定url页面,返回result | |
func HttpGet(url string) (result string, err error) { | |
req, _ := http.NewRequest("GET", url, nil) | |
// 设置头部信息 | |
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36 OPR/66.0.3515.115") | |
resp, err1 := (&http.Client{}).Do(req) | |
//resp, err1 := http.Get(url) //此方法已经被豆瓣视为爬虫,返回状态吗为418,所以必须伪装头部用上述办法 | |
if err1 != nil { | |
err = err1 | |
return | |
} | |
defer resp.Body.Close() | |
buf := make([]byte, 4096) | |
//循环爬取整页数据 | |
for { | |
n, err2 := resp.Body.Read(buf) | |
if n == 0 { | |
break | |
} | |
if err2 != nil && err2 != io.EOF { | |
err = err2 | |
return | |
} | |
result += string(buf[:n]) | |
} | |
return | |
} |