温馨提示×

温馨提示×

您好,登录后才能下订单哦!

密码登录×
登录注册×
其他方式登录
点击 登录注册 即表示同意《亿速云用户服务条款》

Python抓取天猫商品详细信息及交易记录的案例

发布时间:2021-02-19 09:45:04 来源:亿速云 阅读:227 作者:小新 栏目:开发技术

这篇文章将为大家详细讲解有关Python抓取天猫商品详细信息及交易记录的案例,小编觉得挺实用的,因此分享给大家做个参考,希望大家阅读完这篇文章后可以有所收获。

一、搭建Python环境

本帖使用的是Python 2.7
涉及到的模块:spynner, scrapy, bs4, pymmssql

二、要获取的天猫数据

三、数据抓取流程

四、源代码

#coding:utf-8 import spynner from scrapy.selector import Selector from bs4 import BeautifulSoup import random import pymssql #------------------------接数据库-----------------------------# server="localhost" user="sa" password = "123456" conn=pymssql.connect(server,user,password,"TmallData") if conn:   print "DataBase connecting successfully!" else:   print "DataBase connecting error!" cursor=conn.cursor() #----------------------定义网页操作函数--------------------------# def py_click_element(browser,pos):   #点击网页中的元素   #pos example:'a[href="#description" rel="external nofollow" rel="external nofollow" ]'   browser.click(pos)   browser.wait(random.randint(3,10))   return browser def py_click_xpath(browser,xpath):   xpath=xpath+'/@href'   inner_href=Selector(text=browser.html).xpath(xpath).extract()   pos='a[href="'+str(inner_href[0])+'" rel="external nofollow" ]'   browser=py_click_element(browser, pos)   return browser def py_webpage_load(browser,url):   browser.load(url,load_timeout=60)   browser.wait(10)   return browser def py_check_element(browser,xpath):   #按照xpath查找元素,如果存在则返回True,否则返回False   if Selector(text=browser.html).xpath(xpath).extract()!=[]:     return True   else:     return False def py_extract_xpath(browser,xpath):   if py_check_element(browser, xpath):     return Selector(text=browser.html).xpath(xpath).extract()[0]   else:     return "none" def py_extract_xpaths(browser,xpaths):   #批量提取网页内容   length=len(xpaths)   results=[0]*length   for i in range(length):     results[i]=py_extract_xpath(browser, xpaths[i])   return results #-----------------------------数据库操作函数---------------------------# #-----------------------------数据提取函数----------------------------# def py_getDealReord(doc):   soup=BeautifulSoup(doc,'lxml')   tr=soup.find_all('tr')   total_dealRecord=[([0]*5)for i in range(len(tr))]    i=-1   for this_tr in tr:     i=i+1     td_user=this_tr.find_all('td',attrs={'class':"cell-align-l buyer"})     for this_td in td_user:       total_dealRecord[i][0]=this_td.getText().strip(' ')       #print username     td_style=this_tr.find_all('td',attrs={'class':"cell-align-l style"})     for this_td in td_style:       total_dealRecord[i][1]=this_td.getText(',').strip(' ')       #print style     td_quantity=this_tr.find_all('td',attrs={'class':"quantity"})     for this_td in td_quantity:       total_dealRecord[i][2]=this_td.getText().strip(' ')       #print quantity     td_dealtime=this_tr.find_all('td',attrs={'class':"dealtime"})     for this_td in td_dealtime:       total_dealRecord[i][3]=this_td.find('p',attrs={'class':"date"}).getText()       total_dealRecord[i][4]=this_td.find('p',attrs={'class':"time"}).getText()   return total_dealRecord #--------------------获取要抓取的所有商品链接-----------------------# cursor.execute(""" select * from ProductURLs where BrandName='NB' """) file=open("H:\\Eclipse\\TmallCrawling\\HTMLParse\\errLog.txt") InProductInfo=cursor.fetchall() browser=spynner.Browser() for temp_InProductInfo in InProductInfo:   url='https:'+temp_InProductInfo[2]   BrandName=temp_InProductInfo[0]   ProductType=temp_InProductInfo[1]   print BrandName,'\t',ProductType,'\t',url   #url= 'https://detail.tmall.com/item.htm?id=524425656711&rn=77636d6db8dea5e30060976fdaf9768d&abbucket=19'    try:     browser=py_webpage_load(browser, url)   except:     print "Loading webpage failed."     file.write(url)     file.write('\n')     continue   xpaths=['//*[@id="J_PromoPrice"]/dd/div/span/text()',\     '//*[@id="J_StrPriceModBox"]/dd/span/text()',\     '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/div[1]/h2/text()',\     '//*[@id="J_PostageToggleCont"]/p/span/text()',\     '//*[@id="J_EmStock"]/text()',\     '//*[@id="J_CollectCount"]/text()',\     '//*[@id="J_ItemRates"]/div/span[2]/text()',\     '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/ul/li[1]/div/span[2]/text()']   out_ProductInfo=py_extract_xpaths(browser,xpaths)   browser=py_click_element(browser,'a[href="#description" rel="external nofollow" rel="external nofollow" ]')   ProductProperty=py_extract_xpath(browser, '//*[@id="J_AttrUL"]')   soup=BeautifulSoup(ProductProperty,'lxml')   li=soup.find_all('li')   prop=''   for this_li in li:     prop=prop+this_li.getText()+'\\'   prop=prop[0:len(prop)-1]   out_ProductProperty=prop   print out_ProductProperty   cursor.execute("""   Insert into py_ProductInfo values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)   """,(BrandName,ProductType,url,\      out_ProductInfo[2],out_ProductInfo[1],\      out_ProductInfo[0],out_ProductInfo[7],\      out_ProductInfo[1],out_ProductInfo[3],\      out_ProductInfo[4],out_ProductInfo[5],\      out_ProductProperty))   conn.commit()   Deal_PageCount=0   browser=py_click_element(browser, 'a[href="#J_DealRecord" rel="external nofollow" ]')   #browser.browse(True)   DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')   out_DealRecord=py_getDealReord(DealRecord)   for temp_DealRecord in out_DealRecord:     if str(temp_DealRecord[4])=='0':       continue     cursor.execute("""     Insert into DealRecord values(%s,%s,%s,%s,%s,%s)     """,(url,temp_DealRecord[0],temp_DealRecord[1],\        temp_DealRecord[2],temp_DealRecord[3],\        temp_DealRecord[4]))     conn.commit()   Deal_PageCount=Deal_PageCount+1   print "Page ",Deal_PageCount   for i in range(6):     if (i==0) or (i==2):       continue     xpath='//*[@id="J_showBuyerList"]/div/div/a['+str(i)+']'     if py_check_element(browser,xpath):       browser=py_click_xpath(browser, xpath)       DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')       out_DealRecord=py_getDealReord(DealRecord)       for temp_DealRecord in out_DealRecord:         if str(temp_DealRecord[4])=='0':           continue         cursor.execute("""         Insert into DealRecord values(%s,%s,%s,%s,%s,%s)         """,(url,temp_DealRecord[0],temp_DealRecord[1],\            temp_DealRecord[2],temp_DealRecord[3],\            temp_DealRecord[4]))         conn.commit()       Deal_PageCount=Deal_PageCount+1       print "Page ",Deal_PageCount   while py_check_element(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]'):     browser=py_click_xpath(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]')     DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')     out_DealRecord=py_getDealReord(DealRecord)     for temp_DealRecord in out_DealRecord:       if str(temp_DealRecord[4])=='0':         continue       cursor.execute("""       Insert into DealRecord values(%s,%s,%s,%s,%s,%s)       """,(url,temp_DealRecord[0],temp_DealRecord[1],\          temp_DealRecord[2],temp_DealRecord[3],\          temp_DealRecord[4]))       conn.commit()     Deal_PageCount=Deal_PageCount+1     print "Page ",Deal_PageCount

关于“Python抓取天猫商品详细信息及交易记录的案例”这篇文章就分享到这里了,希望以上内容可以对大家有一定的帮助,使各位可以学到更多知识,如果觉得文章不错,请把它分享出去让更多的人看到。

向AI问一下细节

免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。

AI