I am current having an issue with my scraper when I set options.add_argument("--headless")
. However, it works perfectly fine when it is removed. Could anyone advise how I can achieve the same results with headless mode?
Below is my python code:
from seleniumwire import webdriver as wireDriver
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
chromedriverPath = '/Users/applepie/Desktop/chromedrivermac'
def scraper(search):
mit = "https://orbit-kb.mit.edu/hc/en-us/search?utf8=?&query=" # Empty search on mit site
mit += "+".join(search) + "&commit=Search"
results = []
options = Options()
options.add_argument("--headless")
options.add_argument("--window-size=1440, 900")
driver = webdriver.Chrome(options=options, executable_path= chromedriverPath)
driver.get(mit)
# Wait 20 seconds for page to load
timeout = 20
try:
WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.CLASS_NAME, "header")))
search_results = driver.find_element_by_class_name("search-results")
for result in search_results.find_elements_by_class_name("search-result"):
resultObject = {
"url": result.find_element_by_class_name('search-result-link').get_attribute("href")
}
results.append(resultObject)
driver.quit()
except TimeoutException:
print("Timed out waiting for page to load")
driver.quit()
return results
Here is also a screenshot of when I print(driver.page_source)
after get()
:
与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…