diff --git a/chicago/bills.py b/chicago/bills.py index c0d37ed1..46a5b6bb 100644 --- a/chicago/bills.py +++ b/chicago/bills.py @@ -121,7 +121,11 @@ def scrape(self): def extractVotes(self, action_detail_url) : action_detail_page = self.lxmlize(action_detail_url) - vote_table = action_detail_page.xpath("//table[@id='ctl00_ContentPlaceHolder1_gridVote_ctl00']")[0] + try: + vote_table = action_detail_page.xpath("//table[@id='ctl00_ContentPlaceHolder1_gridVote_ctl00']")[0] + except IndexError: + self.warning("No votes found in table") + return None, [] votes = list(self.parseDataTable(vote_table)) vote_list = [] for vote, _, _ in votes : @@ -181,11 +185,16 @@ def addDetails(self, bill, detail_url) : legislation_details = self.parseDetails(detail_div) - for related_bill in legislation_details.get('Related files', []) : - bill.add_related_bill(identifier = related_bill['label'], + title = bill.title + if ("sundry" in title.lower() + or "miscellaneous" in title.lower()): #these are ominbus + bill.add_related_bill(identifier = related_bill['label'], legislative_session = bill.legislative_session, - relation_type='pending') + relation_type='replaces') + #for now we're skipping related bills if they + #don't contain words that make us think they're + #in a ominbus relationship with each other for i, sponsor in enumerate(legislation_details.get('Sponsors', [])) : if i == 0 : @@ -244,6 +253,7 @@ def addDetails(self, bill, detail_url) : 'Published in Special Pamphlet' : None, 'Adopted as Substitute' : None, 'Deferred and Published' : None, + 'Approved as Amended' : 'passage', } VOTE_OPTIONS = {'yea' : 'yes', diff --git a/chicago/legistar.py b/chicago/legistar.py index f518589b..44f7da8c 100644 --- a/chicago/legistar.py +++ b/chicago/legistar.py @@ -13,9 +13,9 @@ class LegistarScraper(Scraper): def lxmlize(self, url, payload=None): if payload : - entry = self.urlopen(url, 'POST', payload) + entry = self.post(url, payload).text else : - entry = self.urlopen(url) + entry = self.get(url).text page = lxml.html.fromstring(entry) page.make_links_absolute(url) return page @@ -118,14 +118,15 @@ def parseDataTable(self, table): def _get_link_address(self, link): - if 'onclick' in link.attrib : + url = None + if 'onclick' in link.attrib: onclick = link.attrib['onclick'] - if onclick is not None and onclick.startswith("radopen('"): + if (onclick is not None + and (onclick.startswith("radopen('") + or onclick.startswith("window.open"))): url = self.base_url + onclick.split("'")[1] elif 'href' in link.attrib : url = link.attrib['href'] - else : - url = None return url