diff --git a/examples/JobSpy_AllSites.py b/examples/JobSpy_AllSites.py new file mode 100644 index 0000000..184e4d1 --- /dev/null +++ b/examples/JobSpy_AllSites.py @@ -0,0 +1,31 @@ +from jobspy import scrape_jobs +import pandas as pd + +jobs: pd.DataFrame = scrape_jobs( + site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"], + search_term="software engineer", + location="Dallas, TX", + results_wanted=50, # be wary the higher it is, the more likey you'll get blocked (rotating proxy should work tho) + country_indeed="USA", + offset=25 # start jobs from an offset (use if search failed and want to continue) + # proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001", +) + +# formatting for pandas +pd.set_option("display.max_columns", None) +pd.set_option("display.max_rows", None) +pd.set_option("display.width", None) +pd.set_option("display.max_colwidth", 50) # set to 0 to see full job url / desc + +# 1: output to console +print(jobs) + +# 2: output to .csv +jobs.to_csv("./jobs.csv", index=False) +print("outputted to jobs.csv") + +# 3: output to .xlsx +# jobs.to_xlsx('jobs.xlsx', index=False) + +# 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook) +# display(jobs) \ No newline at end of file diff --git a/examples/JobSpy_Demo.py b/examples/JobSpy_LongScrape.py similarity index 100% rename from examples/JobSpy_Demo.py rename to examples/JobSpy_LongScrape.py