[ milorad @ 21.12.2018. 10:50 ] @
Ako može pomoć ....Ova skripta otvara Chrome i ubacuje u google pretragu primer Blic i treba da izbaci sve linkove , i to sve smešta u exel fajl....međutim problem je što 2 puta to uradi a treći neće pa sve tako izbaci grešku: Code: konacno.py", line 21, in <module> search_button.click() File "C:\Python27\lib\site-packages\splinter\driver\webdriver\__init__.py", line 634, in click self._element.click() File "C:\Python27\lib\site-packages\selenium\webdriver\remote\webelement.py", line 80, in click self._execute(Command.CLICK_ELEMENT) File "C:\Python27\lib\site-packages\selenium\webdriver\remote\webelement.py", line 633, in _execute return self._parent.execute(command, params) File "C:\Python27\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 321, in execute self.error_handler.check_response(response) File "C:\Python27\lib\site-packages\selenium\webdriver\remote\errorhandler.py", line 242, in check_response raise exception_class(message, screen, stacktrace) ElementNotVisibleException: Message: element not interactable (Session info: chrome=71.0.3578.98) (Driver info: chromedriver=2.45.615291 (ec3682e3c9061c10f26ea9e5cdcf3c53f3f74387),platform=Windows NT 10.0.16299 x86_64) a skripta je ovako Code: from splinter import Browser import pandas as pd url = "https://www.google.com" browser = Browser('chrome') browser.visit(url) search_bar_xpath = '//*[@id="tsf"]/div[2]/div/div[1]/div/div[1]/input' search_bar = browser.find_by_xpath(search_bar_xpath)[0] search_bar.fill("Developed By Xtream Codes v1.0.60") search_button_xpath = '//*[@id="tsf"]/div[2]/div/div[2]/div[2]/div/center/input[1]' search_button = browser.find_by_xpath(search_button_xpath)[0] search_button.click() search_results_xpath = '//*[@id="rso"]/div/div/div[1]/div/div/div[1]/a[1]/div/cite' search_results = browser.find_by_xpath(search_results_xpath) scraped_data = [] for search_result in search_results: title = search_result.text.encode('utf8') link = search_result["href"] scraped_data.append((title, link)) df = pd.DataFrame(data=scraped_data, columns=["title", "link"]) df.to_csv("links.csv") Pozdrav i Hvala unapred na pomoći PS : malo da oživimo ovaj podforum |