jupytext |
kernelspec |
formats |
text_representation |
ipynb,md:myst |
extension |
format_name |
format_version |
jupytext_version |
.md |
myst |
0.13 |
1.14.5 |
|
|
display_name |
language |
name |
Python 3 (ipykernel) |
python |
python3 |
|
import json
from survey_analysis import format_data, fit_model
responses = format_data('public_survey_data.csv')
# now let's run some basic analyses to check against SurveyMonkey outputs
responses['is_member'].value_counts(normalize=True)
responses['geographic_region'].value_counts(normalize=True)
responses['career_stage'].value_counts(normalize=True)
# for the other questions, we can load in our sidecar JSON
# with question-level metadata
with open('levels.json') as f:
queries = json.load(f)
print(f'There are {len(queries.keys())} questions to consider.')
print(queries.keys())
question = 'job_board_access'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'job_board_content'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'email_access'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'email_content'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'blog_access'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'blog_content'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'twitter_access'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'twitter_content'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'podcast_access'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'podcast_content'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'facebook_content'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'youtube_access'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'youtube_content'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'linkedin_access'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'linkedin_content'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'ondemand_access'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'ondemand_content'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'content_platform'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'sig_platform'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())
question = 'content_importance'
scale_values = queries[question]
res = fit_model(responses, question, scale_values)
print(res.summary())