-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathselenium_python_ex1.py
41 lines (33 loc) · 1.24 KB
/
selenium_python_ex1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import time
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
chrome_options = Options()
chrome_options.headless = True
chrome_options.add_argument("--headless")
# initiating the webdriver. Parameter includes the path of the webdriver.
driver = webdriver.Chrome(executable_path="./chromedriver.exe", options = chrome_options)
#url of the page we want to scrape
url = "https://www.naukri.com/top-jobs-by-designations# desigtop600"
# initiating the webdriver. Parameter includes the path of the webdriver.
driver = webdriver.Chrome('./chromedriver')
driver.get(url)
# this is just to ensure that the page is loaded
time.sleep(5)
html = driver.page_source
# this renders the JS code and stores all
# of the information in static HTML code.
# Now, we could simply apply bs4 to html variable
soup = BeautifulSoup(html, "html.parser")
all_divs = soup.find('div', {'id' : 'nameSearch'})
job_profiles = all_divs.find_all('a')
# printing top ten job profiles
count = 0
for job_profile in job_profiles :
print(job_profile.text)
count = count + 1
if(count == 10) :
break
driver.close() # closing the webdriver