19
2020
04

import requests and BeautifulSoup

import requestsfrom bs4 import BeautifulSoupurl = 'https://xxxxxxxxx'page = requests.get(url)page.text soup = BeautifulSoup(page.text, 'html.parser')print(soup.prettify())soup.find_all('p')soup.find_all('p')[2].get_tex
18
2020
04

python crawler

from selenium import webdriverdriver=webdriver.Chrome("xxx location")driver.get("http://xxxxxxxx") driver.page_sourcefrom bs4 import BeautifulSoupsoup = BeautifulSoup(driver.page_source,  'lxml')soup.select_one('#