import reimport jsonimport requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeurl='https://news.qq.com/a/20180520/008334.htm'fuck={}res=requests.get(url)soup = BeautifulSoup(res.text, 'html.parser')res.encoding='utf-8'fuck['title']=soup.select('.hd h1')[0].textfuck['editor']=soup.select('#QQeditor')[0].textfuck['origin']=soup.select('.a_source')[0].textfuck['time']=soup.select('.a_time')[0].text#timesource = soup.select('.time-source')[0].contents[0].strip()#time1=soup.select('.a_time')[0].contents[0].strip()#fuck['dt'] = datetime.strptime(time1,'%Y年%m月%d日%H:%M')fuck['article']='@'.join([p.text.strip() for p in soup.select('#Cnt-Main-Article-QQ p')[:]])#到哪一步fuck
#原来的一个案例 2016 import requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport reimport jsoncommenturl = 'http://comment5.news.sina.com.cn/page/info?version=1&format=js&channel=gn&newsid=comos-{}&\group=&compress=0&ie=utf-8&oe=utf-8&page=1&\page_size=20' def getCommentCounts(newsurl): m = re.search('doc-i(.*).shtml', newsurl) newsid = m.group(1) comments = requests.get(commenturl.format(newsid)) print(commenturl.format(newsid)) jd = json.loads(comments.text.strip('var data=')) return jd['result']['count']['total']def getNewsDetail(newsurl): result = {} res = requests.get(newsurl) res.encoding = 'utf-8' soup = BeautifulSoup(res.text, 'html.parser') result['title'] = soup.select('#artibodyTitle')[0].text result['newssource'] = soup.select('.time-source span a')[0].text timesource = soup.select('.time-source')[0].contents[0].strip() result['dt'] = datetime.strptime(timesource,'%Y年%m月%d日%H:%M') result['article'] = '@'.join([p.text.strip() for p in soup.select('#artibody p')[:-1]]) result['editor'] = soup.select('.article-editor')[0].text.strip('责任编辑:') result['comments'] = getCommentCounts(newsurl) return resultnewsurl = 'http://news.sina.com.cn/c/nd/2016-12-18/doc-ifxytqax6457791.shtml' #只要这条代码中的newsurl具体赋值就可以了print(getNewsDetail(newsurl))
posted on 2018-05-20 17:47 阅读( ...) 评论( ...)