Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WJT's "Miscellaneous bug and pyodbc compatibility fixes" + extra unicode fixes #14

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 28 additions & 11 deletions pypyodbc.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
paramstyle = 'qmark'
threadsafety = 1
version = '1.3.0'
lowercase=True
lowercase = False

DEBUG = 0
# Comment out all "if DEBUG:" statements like below for production
Expand Down Expand Up @@ -489,15 +489,26 @@ def UCS_dec(buffer):

# This is the common case on Linux, which uses wide Python build together with
# the default unixODBC without the "-DSQL_WCHART_CONVERT" CFLAGS.
if sys.platform not in ('win32','cli'):
if UNICODE_SIZE >= SQLWCHAR_SIZE:
if sys.platform not in ('win32','cli') and UNICODE_SIZE != SQLWCHAR_SIZE:
if UNICODE_SIZE > SQLWCHAR_SIZE:
# We can only use unicode buffer if the size of wchar_t (UNICODE_SIZE) is
# the same as the size expected by the driver manager (SQLWCHAR_SIZE).
create_buffer_u = create_buffer
chars_to_bytes = lambda chars: chars * SQLWCHAR_SIZE
def create_buffer_u(init_or_size, *size_if_init):
if isinstance(init_or_size, basestring):
if size_if_init:
return create_buffer(init_or_size, chars_to_bytes(size_if_init[0]))
else:
return create_buffer(init_or_size)
else:
return create_buffer(chars_to_bytes(init_or_size))
wchar_pointer = ctypes.c_char_p

def UCS_buf(s):
return s.encode(odbc_encoding)
# c_char_p adds a single NUL-terminating byte because it assumes its argument is being
# passed to a function expecting a single NUL byte. But these functions actually take an
# array of two-byte integers, and so expect two NUL bytes' termination.
return (s + u'\x00').encode(odbc_encoding)

from_buffer_u = UCS_dec

Expand Down Expand Up @@ -1017,8 +1028,6 @@ def AllocateEnv():

def TupleRow(cursor):
"""Normal tuple with added attribute `cursor_description`, as in pyodbc.

This is the default.
"""
class Row(tuple):
cursor_description = cursor.description
Expand All @@ -1040,7 +1049,9 @@ def __getitem__(self, field):


def NamedTupleRow(cursor):
"""Named tuple to allow attribute lookup by name.
"""Named tuple to allow attribute lookup by name, as in pyodbc.

This is the default.

Requires py2.6 or above.
"""
Expand Down Expand Up @@ -1152,7 +1163,7 @@ def __init__(self, conx, row_type_callable=None):
self.stmt_h = ctypes.c_void_p()
self.connection = conx
self.ansi = conx.ansi
self.row_type_callable = row_type_callable or TupleRow
self.row_type_callable = row_type_callable or NamedTupleRow
self.statement = None
self._last_param_types = None
self._ParamBufferList = []
Expand Down Expand Up @@ -1766,11 +1777,12 @@ def _UpdateDesc(self):

if len(ColDescr) > 0:
self.description = ColDescr
# Create the row type before fetching.
self._row_type = self.row_type_callable(self)
else:
self.description = None
self._CreateColBuf()

# Create the row type before fetching.
self._row_type = self.row_type_callable(self)


def _NumOfRows(self):
Expand Down Expand Up @@ -2522,6 +2534,11 @@ def cursor(self, row_type_callable=None):
# self._cursors.append(cur)
return cur

def execute(self, sql, *args, **kwargs):
cur = self.cursor(row_type_callable=kwargs.pop('row_type_callable', None))
cur.execute(sql, *args, **kwargs)
return cur

def update_db_special_info(self):
for sql_type in (
SQL_TYPE_TIMESTAMP,
Expand Down