Regular Expression

Scrapy 4

The core that you scrapy is to find the internal relationship between the targets.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import urllib.request
import os

def url_open(url):
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36')
responce = urllib.request.urlopen(url)
html = responce.read()

return html


def get_page(url):
html = url_open(url).decode('utf-8')

a = html.find('current-comment-page') + 23
b = html.find(']', a)

return html[a:b]


def find_imgs(url):
html = url_open(url).decode('utf-8')
img_addrs = []

a = html.find('img src=')

while a != -1:
b = html.find('.jpg', a, a+255)
if b != -1:
img_addrs.append(html[a+9:b+4])
else:
b = a + 9

a = html.find('img src=', b)

return img_addrs

def save_imgs(img_addrs):
for each in img_addrs:
filename = each.split('/')[-1]
with open(filename, 'wb') as f:
img = url_open(each)
f.write(img)

def download_mm(folder='picture', pages=10):
os.mkdir(folder)
os.chdir(folder)

url = "http://jandan.net/ooxx/"
page_num = int(get_page(url))

for i in range(pages):
page_num -= i
page_url = url + 'page-' + str(page_num) + '#comments'
img_addrs = find_imgs(page_url)
save_imgs(folder, img_addrs)


if __name__ == '__main__':
download_mm()

But this code needs to be fixed with proxy.

Search for IP automatically

Regular Expression

通配符* 或者 ?
正则表达式通配符为.
可以利用\来去除通配符的原字符功能。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
>>> import re
>>> re.search(r'Aaron', 'I am Aaron.')
<_sre.SRE_Match object; span=(5, 10), match='Aaron'>
>>> re.search(r'.', 'I am Aaron.')
<_sre.SRE_Match object; span=(0, 1), match='I'>
>>> re.search(r'a.', 'I am Aaron.')
<_sre.SRE_Match object; span=(2, 4), match='am'>
>>> re.search(r'\.', 'I am Aaron.')
<_sre.SRE_Match object; span=(10, 11), match='.'>
>>> re.search(r'\d', 'I am Aaron123.')
<_sre.SRE_Match object; span=(10, 11), match='1'>
>>> re.search(r'\d\d\d\.\d\d\d\.\d\d\d.\d\d\d', '192.168.117.132')
<_sre.SRE_Match object; span=(0, 15), match='192.168.117.132'>
>>> #字符类的创建
>>> re.search(r'\d\d\d\.\d\d\d\.\d\d\d.\d\d\d', '192.168.1.1')
>>> re.search(r'[aeiou]', 'I am Aaron.')
<_sre.SRE_Match object; span=(2, 3), match='a'>
>>> # re支持大小写敏感模式
>>> re.search(r'[aeiouAEIOU]', 'I am Aaron.')
<_sre.SRE_Match object; span=(0, 1), match='I'>
>>> re.search(r'[a-z]', 'I am Aaron.')
<_sre.SRE_Match object; span=(2, 3), match='a'>
>>> re.search(r'[0-9]', 'I am Aaron123.')
<_sre.SRE_Match object; span=(10, 11), match='1'>
>>> re.search(r'[2-9]', 'I am Aaron123.')
<_sre.SRE_Match object; span=(11, 12), match='2'>
>>> re.search(r'ab[3]c', 'abbbc')
>>> re.search(r'ab{3}c', 'abbbc')
<_sre.SRE_Match object; span=(0, 5), match='abbbc'>
>>> re.search(r'ab{3,10}c', 'abbbbbc')
<_sre.SRE_Match object; span=(0, 7), match='abbbbbc'>
>>> # 正则表达式匹配的是字符串,数字只支持0-9
>>> re.search(r'[0-2][0-5][0-5]', '220')
<_sre.SRE_Match object; span=(0, 3), match='220'>
>>> re.search(r'[0-2][0-5][0-5]', '198')
>>> re.search(r'[01]\d\d|2[0-4]\d|25[0-5]', '198')
<_sre.SRE_Match object; span=(0, 3), match='198'>
>>> re.search(r'(([01]\d\d|2[0-4]\d|25[0-5])\.){3}([01]\d\d|2[0-4]\d|25[0-5])', '192.168.1.1')
>>> re.search(r'(([01]{0,1}\d{0,1}\d|2[0-4]\d|25[0-5])\.){3}([01]{0,1}\d{0,1}\d|2[0-4]\d|25[0-5])', '192.168.1.1')
<_sre.SRE_Match object; span=(0, 11), match='192.168.1.1'>

Regular Expression is an important tool for us. We need to learn to use it more proficiently.


本博客所有文章除特别声明外,均采用 CC BY-SA 4.0 协议 ,转载请注明出处!