From baa74b8ee6c8098849ac12d8f6d892dc7934035b Mon Sep 17 00:00:00 2001 From: powe97 <116031952+powe97@users.noreply.github.com> Date: Fri, 1 Mar 2024 18:16:58 -0500 Subject: [PATCH] Fix issue where only 1 page per school would get scraped properly --- transfer_scraper/main.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/transfer_scraper/main.py b/transfer_scraper/main.py index 31acca1..daffa7d 100644 --- a/transfer_scraper/main.py +++ b/transfer_scraper/main.py @@ -126,6 +126,10 @@ def main(): user_agent = UserAgent().random options.set_preference("general.useragent.override", user_agent) + # options.set_preference("network.proxy.socks", ) + # options.set_preference("network.proxy.socks_port", ) + # options.set_preference("network.proxy.socks_remote_dns", True) + # options.set_preference("network.proxy.type", 1) print(f"Using randomized user agent {user_agent}", file=sys.stderr) driver = webdriver.Firefox(options=options) @@ -211,7 +215,7 @@ def main(): try: course_pages_len = int( driver.find_element( - "id", "lblInstWithEQPaginationInfo" + "id", "lblCourseEQPaginationInfo" ).text.split()[-1] ) except NoSuchElementException: