------------------
Cython, which derives from Pyrex, is licensed under the Python
-Software Foundation License. More precisely, all modifications
+Software Foundation License. More precisely, all modifications
made to go from Pyrex to Cython are so licensed.
See LICENSE.txt for more details.
-
+
return literals[literal[1:-1]]
else:
return literal
-
+
return [unquote(item) for item in s.split(delimiter)]
transitive_str = object()
start = end+1
class DistutilsInfo(object):
-
+
def __init__(self, source=None, exn=None):
self.values = {}
if source is not None:
value = getattr(exn, key, None)
if value:
self.values[key] = value
-
+
def merge(self, other):
if other is None:
return self
else:
self.values[key] = value
return self
-
+
def subs(self, aliases):
if aliases is None:
return self
def strip_string_literals(code, prefix='__Pyx_L'):
"""
- Normalizes every string literal to be of the form '__Pyx_Lxxx',
+ Normalizes every string literal to be of the form '__Pyx_Lxxx',
returning the normalized code and a mapping of labels to
- string literals.
+ string literals.
"""
new_code = []
literals = {}
double_q = code.find('"', q)
q = min(single_q, double_q)
if q == -1: q = max(single_q, double_q)
-
+
# We're done.
if q == -1 and hash_mark == -1:
new_code.append(code[start:])
start = q
else:
q += 1
-
+
# Process comment.
elif -1 != hash_mark and (hash_mark < q or q == -1):
end = code.find('\n', hash_mark)
new_code.append(code[start:end])
start = q
q += len(in_quote)
-
+
return "".join(new_code), literals
class DependencyTree(object):
-
+
def __init__(self, context):
self.context = context
self._transitive_cache = {}
-
+
#@cached_method
def parse_dependencies(self, source_filename):
return parse_dependencies(source_filename)
parse_dependencies = cached_method(parse_dependencies)
-
+
#@cached_method
def cimports_and_externs(self, filename):
cimports, includes, externs = self.parse_dependencies(filename)[:3]
print("Unable to locate '%s' referenced from '%s'" % (filename, include))
return tuple(cimports), tuple(externs)
cimports_and_externs = cached_method(cimports_and_externs)
-
+
def cimports(self, filename):
return self.cimports_and_externs(filename)[0]
-
+
#@cached_method
def package(self, filename):
dir = os.path.dirname(filename)
else:
return ()
package = cached_method(package)
-
+
#@cached_method
def fully_qualifeid_name(self, filename):
module = os.path.splitext(os.path.basename(filename))[0]
return '.'.join(self.package(filename) + (module,))
fully_qualifeid_name = cached_method(fully_qualifeid_name)
-
+
def find_pxd(self, module, filename=None):
if module[0] == '.':
raise NotImplementedError("New relative imports.")
return pxd
return self.context.find_pxd_file(module, None)
find_pxd = cached_method(find_pxd)
-
+
#@cached_method
def cimported_files(self, filename):
if filename[-4:] == '.pyx' and os.path.exists(filename[:-4] + '.pxd'):
print("\n\t".join(b))
return tuple(self_pxd + filter(None, [self.find_pxd(m, filename) for m in self.cimports(filename)]))
cimported_files = cached_method(cimported_files)
-
+
def immediate_dependencies(self, filename):
all = list(self.cimported_files(filename))
for extern in sum(self.cimports_and_externs(filename), ()):
all.append(os.path.normpath(os.path.join(os.path.dirname(filename), extern)))
return tuple(all)
-
+
#@cached_method
def timestamp(self, filename):
return os.path.getmtime(filename)
timestamp = cached_method(timestamp)
-
+
def extract_timestamp(self, filename):
# TODO: .h files from extern blocks
return self.timestamp(filename), filename
-
+
def newest_dependency(self, filename):
return self.transitive_merge(filename, self.extract_timestamp, max)
-
+
def distutils_info0(self, filename):
return self.parse_dependencies(filename)[3]
-
+
def distutils_info(self, filename, aliases=None, base=None):
return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge)
.subs(aliases)
.merge(base))
-
+
def transitive_merge(self, node, extract, merge):
try:
seen = self._transitive_cache[extract, merge]
seen = self._transitive_cache[extract, merge] = {}
return self.transitive_merge_helper(
node, extract, merge, seen, {}, self.cimported_files)[0]
-
+
def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing):
if node in seen:
return seen[node], None
return '%s.%s' % (base_type.__module__, base_type.__name__)
return 'object'
-def cython_inline(code,
+def cython_inline(code,
get_type=unsafe_type,
lib_dir=os.path.expanduser('~/.cython/inline'),
cython_include_dirs=['.'],
else:
return source[ix+1:]
-# Lots to be done here... It would be especially cool if compiled functions
+# Lots to be done here... It would be especially cool if compiled functions
# could invoke each other quickly.
class RuntimeCompiledFunction(object):
def __init__(self, f):
self._f = f
self._body = get_body(inspect.getsource(f))
-
+
def __call__(self, *args, **kwds):
all = getcallargs(self._f, *args, **kwds)
return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all)
self.assertEquals(inline("return global_value + 1", **test_kwds), global_value + 1)
if has_numpy:
-
+
def test_numpy(self):
import numpy
a = numpy.ndarray((10, 20))
def __init__(self):
self.lines = []
self.s = u""
-
+
def put(self, s):
self.s += s
-
+
def newline(self):
self.lines.append(self.s)
self.s = u""
-
+
def putline(self, s):
self.put(s)
self.newline()
class CodeWriter(TreeVisitor):
indent_string = u" "
-
+
def __init__(self, result = None):
super(CodeWriter, self).__init__()
if result is None:
self.numindents = 0
self.tempnames = {}
self.tempblockindex = 0
-
+
def write(self, tree):
self.visit(tree)
-
+
def indent(self):
self.numindents += 1
-
+
def dedent(self):
self.numindents -= 1
-
+
def startline(self, s = u""):
self.result.put(self.indent_string * self.numindents + s)
-
+
def put(self, s):
self.result.put(s)
-
+
def endline(self, s = u""):
self.result.putline(s)
self.visit(item.default)
self.put(u", ")
self.visit(items[-1])
-
+
def visit_Node(self, node):
raise AssertionError("Node not handled by serializer: %r" % node)
-
+
def visit_ModuleNode(self, node):
self.visitchildren(node)
-
+
def visit_StatListNode(self, node):
self.visitchildren(node)
self.indent()
self.visit(node.body)
self.dedent()
-
+
def visit_CArgDeclNode(self, node):
if node.base_type.name is not None:
self.visit(node.base_type)
if node.default is not None:
self.put(u" = ")
self.visit(node.default)
-
+
def visit_CNameDeclaratorNode(self, node):
self.put(node.name)
-
+
def visit_CSimpleBaseTypeNode(self, node):
# See Parsing.p_sign_and_longness
if node.is_basic_c_type:
self.put("short " * -node.longness)
elif node.longness > 0:
self.put("long " * node.longness)
-
+
self.put(node.name)
-
+
def visit_SingleAssignmentNode(self, node):
self.startline()
self.visit(node.lhs)
self.put(u" = ")
self.visit(node.rhs)
self.endline()
-
+
def visit_CascadedAssignmentNode(self, node):
self.startline()
for lhs in node.lhs_list:
self.put(u" = ")
self.visit(node.rhs)
self.endline()
-
+
def visit_NameNode(self, node):
self.put(node.name)
-
+
def visit_IntNode(self, node):
self.put(node.value)
def visit_PassStatNode(self, node):
self.startline(u"pass")
self.endline()
-
+
def visit_PrintStatNode(self, node):
self.startline(u"print ")
self.comma_separated_list(node.arg_tuple.args)
self.visit(node.operand1)
self.put(u" %s " % node.operator)
self.visit(node.operand2)
-
+
def visit_CVarDefNode(self, node):
self.startline(u"cdef ")
self.visit(node.base_type)
def visit_SequenceNode(self, node):
self.comma_separated_list(node.args) # Might need to discover whether we need () around tuples...hmm...
-
+
def visit_SimpleCallNode(self, node):
self.visit(node.function)
self.put(u"(")
self.startline()
self.visit(node.expr)
self.endline()
-
+
def visit_InPlaceAssignmentNode(self, node):
self.startline()
self.visit(node.lhs)
self.put(u" %s= " % node.operator)
self.visit(node.rhs)
self.endline()
-
+
def visit_WithStatNode(self, node):
self.startline()
self.put(u"with ")
self.indent()
self.visit(node.body)
self.dedent()
-
+
def visit_AttributeNode(self, node):
self.visit(node.obj)
self.put(u".%s" % node.attribute)
class AutoTestDictTransform(ScopeTrackingTransform):
# Handles autotestdict directive
- blacklist = ['__cinit__', '__dealloc__', '__richcmp__',
+ blacklist = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__',
'__len__', '__contains__']
# need one-characters subsitutions (for now) so offsets aren't off
special_chars = [(u'<', u'\xF0', u'<'),
- (u'>', u'\xF1', u'>'),
+ (u'>', u'\xF1', u'>'),
(u'&', u'\xF2', u'&')]
-
+
line_pos_comment = re.compile(r'/\*.*?<<<<<<<<<<<<<<.*?\*/\n*', re.DOTALL)
class AnnotationCCodeWriter(CCodeWriter):
self.annotations = create_from.annotations
self.code = create_from.code
self.last_pos = create_from.last_pos
-
+
def create_new(self, create_from, buffer, copy_formatting):
return AnnotationCCodeWriter(create_from, buffer, copy_formatting)
def write(self, s):
CCodeWriter.write(self, s)
self.annotation_buffer.write(s)
-
+
def mark_pos(self, pos):
if pos is not None:
CCodeWriter.mark_pos(self, pos)
def annotate(self, pos, item):
self.annotations.append((pos, item))
-
+
def save_annotation(self, source_filename, target_filename):
self.mark_pos(None)
f = Utils.open_source_file(source_filename)
all.append(((source_filename, pos[1], pos[2]+size), end))
else:
all.append((pos, start+end))
-
+
all.sort()
all.reverse()
for pos, item in all:
col += 1
line = lines[line_no]
lines[line_no] = line[:col] + item + line[col:]
-
+
html_filename = os.path.splitext(target_filename)[0] + ".html"
f = codecs.open(html_filename, "w", encoding="UTF-8")
f.write(u'<html>\n')
c_file = Utils.decode_filename(os.path.basename(target_filename))
f.write(u'<p>Raw output: <a href="%s">%s</a>\n' % (c_file, c_file))
k = 0
-
+
py_c_api = re.compile(u'(Py[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]+)\(')
py_marco_api = re.compile(u'(Py[A-Z][a-z]+_[A-Z][A-Z_]+)\(')
pyx_c_api = re.compile(u'(__Pyx_[A-Z][a-z_][A-Za-z_]+)\(')
pyx_macro_api = re.compile(u'(__Pyx_[A-Z][A-Z_]+)\(')
error_goto = re.compile(ur'((; *if .*)? \{__pyx_filename = .*goto __pyx_L\w+;\})')
refnanny = re.compile(u'(__Pyx_X?(GOT|GIVE)REF|__Pyx_RefNanny[A-Za-z]+)')
-
+
code_source_file = self.code[source_filename]
for line in lines:
code = code_source_file[k]
except KeyError:
code = ''
-
+
code = code.replace('<', '<code><</code>')
-
+
code, py_c_api_calls = py_c_api.subn(ur"<span class='py_c_api'>\1</span>(", code)
code, pyx_c_api_calls = pyx_c_api.subn(ur"<span class='pyx_c_api'>\1</span>(", code)
code, py_macro_api_calls = py_marco_api.subn(ur"<span class='py_macro_api'>\1</span>(", code)
code, pyx_macro_api_calls = pyx_macro_api.subn(ur"<span class='pyx_macro_api'>\1</span>(", code)
code, refnanny_calls = refnanny.subn(ur"<span class='refnanny'>\1</span>", code)
code, error_goto_calls = error_goto.subn(ur"<span class='error_goto'>\1</span>", code)
-
+
code = code.replace(u"<span class='error_goto'>;", u";<span class='error_goto'>")
-
+
score = 5*py_c_api_calls + 2*pyx_c_api_calls + py_macro_api_calls + pyx_macro_api_calls - refnanny_calls
color = u"FFFF%02x" % int(255/(1+score/10.0))
f.write(u"<pre class='line' style='background-color: #%s' onclick='toggleDiv(\"line%s\")'>" % (color, k))
for c, cc, html in special_chars:
line = line.replace(cc, html)
f.write(line.rstrip())
-
+
f.write(u'</pre>\n')
code = re.sub(line_pos_comment, '', code) # inline annotations are redundant
f.write(u"<pre id='line%s' class='code' style='background-color: #%s'>%s</pre>" % (k, color, code))
f.write(u'</body></html>\n')
f.close()
-
+
# TODO: make this cleaner
def escape(raw_string):
class AnnotationItem(object):
-
+
def __init__(self, style, text, tag="", size=0):
self.style = style
self.text = text
self.tag = tag
self.size = size
-
+
def start(self):
return u"<span class='tag %s' title='%s'>%s" % (self.style, self.text, self.tag)
-
+
def end(self):
return self.size, u"</span>"
return node
else:
return super(EmbedSignature, self).__call__(node)
-
+
def visit_ClassDefNode(self, node):
oldname = self.class_name
oldclass = self.class_node
def visit_DefNode(self, node):
if not self.current_directives['embedsignature']:
return node
-
+
is_constructor = False
hide_self = False
if node.entry.is_special:
if isinstance(node, ModuleNode) and len(bufvars) > 0:
- # for now...note that pos is wrong
+ # for now...note that pos is wrong
raise CompileError(node.pos, "Buffer vars not allowed in module scope")
for entry in bufvars:
if entry.type.dtype.is_ptr:
raise CompileError(node.pos, "Buffers with pointer types not yet supported.")
-
+
name = entry.name
buftype = entry.type
if buftype.ndim > self.max_ndim:
if entry.is_arg:
result.used = True
return result
-
+
stridevars = [var(Naming.bufstride_prefix, i, "0") for i in range(entry.type.ndim)]
- shapevars = [var(Naming.bufshape_prefix, i, "0") for i in range(entry.type.ndim)]
+ shapevars = [var(Naming.bufshape_prefix, i, "0") for i in range(entry.type.ndim)]
mode = entry.type.mode
if mode == 'full':
suboffsetvars = [var(Naming.bufsuboffset_prefix, i, "-1") for i in range(entry.type.ndim)]
suboffsetvars = None
entry.buffer_aux = Symtab.BufferAux(bufinfo, stridevars, shapevars, suboffsetvars)
-
+
scope.buffer_entries = bufvars
self.scope = scope
"""
if defaults is None:
defaults = buffer_defaults
-
+
posargs, dictargs = Interpreter.interpret_compiletime_options(posargs, dictargs, type_env=env, type_args = (0,'dtype'))
-
+
if len(posargs) > buffer_positional_options_count:
raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY)
assert_bool('cast')
return options
-
+
#
# Code generation
assert False
if buffer_aux.writable_needed: flags += "| PyBUF_WRITABLE"
return flags
-
+
def used_buffer_aux_vars(entry):
buffer_aux = entry.buffer_aux
buffer_aux.buffer_info_var.used = True
bufstruct = buffer_aux.buffer_info_var.cname
dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype)
-
+
return ("__Pyx_GetBufferAndValidate(&%(bufstruct)s, "
"(PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, "
- "%(cast)d, __pyx_stack)" % locals())
+ "%(cast)d, __pyx_stack)" % locals())
def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type,
is_initialized, pos, code):
However, the assignment operation may throw an exception so that the reassignment
never happens.
-
+
Depending on the circumstances there are two possible outcomes:
- Old buffer released, new acquired, rhs assigned to lhs
- Old buffer released, new acquired which fails, reaqcuire old lhs buffer
code.putln("{") # Set up necesarry stack for getbuffer
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth())
-
+
getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below
if is_initialized:
code.putln("%s = %d;" % (tmp_cname, dim))
code.put("} else ")
# check bounds in positive direction
- if signed != 0:
+ if signed != 0:
cast = ""
else:
cast = "(size_t)"
bufaux.shapevars):
if signed != 0:
code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape.cname))
-
+
# Create buffer lookup and return it
# This is done via utility macros/inline functions, which vary
# according to the access mode used.
for i, s in zip(index_cnames, bufaux.stridevars):
params.append(i)
params.append(s.cname)
-
+
# Make sure the utility code is available
if funcname not in code.globalstate.utility_codes:
code.globalstate.utility_codes.add(funcname)
char* ptr = (char*)buf;
""") % (name, funcargs) + "".join([dedent("""\
ptr += s%d * i%d;
- if (o%d >= 0) ptr = *((char**)ptr) + o%d;
+ if (o%d >= 0) ptr = *((char**)ptr) + o%d;
""") % (i, i, i, i) for i in range(nd)]
) + "\nreturn ptr;\n}")
#endif
""")
-
+
env.use_utility_code(UtilityCode(
proto = dedent("""\
#if PY_MAJOR_VERSION < 3
if name not in code.globalstate.utility_codes:
code.globalstate.utility_codes.add(name)
typecode = code.globalstate['typeinfo']
-
+
complex_possible = dtype.is_struct_or_union and dtype.can_be_complex()
-
+
declcode = dtype.declaration_code("")
if dtype.is_simple_buffer_dtype():
structinfo_name = "NULL"
typecode.putln("};", safe=True)
else:
assert False
-
+
rep = str(dtype)
if dtype.is_int:
if dtype.signed == 0:
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
- }
+ }
}
}
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
- }
+ }
}
}
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
-
+
if (ctx->packmode == '@' || ctx->packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
ctx->head->parent_offset = parent_offset;
continue;
}
-
+
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
ctx->fmt_offset += size;
-
+
--ctx->enc_count; /* Consume from buffer string */
/* Done checking, move to next field, pushing or popping struct stack if needed */
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
- return 0;
+ return 0;
}
static int __Pyx_BufFmt_FirstPack(__Pyx_BufFmt_Context* ctx) {
return NULL;
}
}
-
+
}
}
}
("type", "PyType_Type", []),
# This conflicts with the C++ bool type, and unfortunately
-# C++ is too liberal about PyObject* <-> bool conversions,
+# C++ is too liberal about PyObject* <-> bool conversions,
# resulting in unintuitive runtime behavior and segfaults.
# ("bool", "PyBool_Type", []),
("int", "PyInt_Type", []),
("long", "PyLong_Type", []),
("float", "PyFloat_Type", []),
-
+
("complex", "PyComplex_Type", [BuiltinAttribute('cval', field_type_name = 'Py_complex'),
BuiltinAttribute('real', 'cval.real', field_type = PyrexTypes.c_double_type),
BuiltinAttribute('imag', 'cval.imag', field_type = PyrexTypes.c_double_type),
]),
# ("file", "PyFile_Type", []), # not in Py3
- ("set", "PySet_Type", [BuiltinMethod("clear", "T", "i", "PySet_Clear"),
+ ("set", "PySet_Type", [BuiltinMethod("clear", "T", "i", "PySet_Clear"),
BuiltinMethod("discard", "TO", "i", "PySet_Discard"),
BuiltinMethod("add", "TO", "i", "PySet_Add"),
BuiltinMethod("pop", "T", "O", "PySet_Pop")]),
# 'file', # only in Py2.x
)
-
+
builtin_structs_table = [
('Py_buffer', 'Py_buffer',
[("buf", PyrexTypes.c_void_ptr_type),
-v, --verbose Be verbose, print file names on multiple compilation
-p, --embed-positions If specified, the positions in Cython files of each
function definition is embedded in its docstring.
- --cleanup <level> Release interned objects on python exit, for memory debugging.
- Level indicates aggressiveness, default 0 releases nothing.
- -w, --working <directory> Sets the working directory for Cython (the directory modules
+ --cleanup <level> Release interned objects on python exit, for memory debugging.
+ Level indicates aggressiveness, default 0 releases nothing.
+ -w, --working <directory> Sets the working directory for Cython (the directory modules
are searched from)
--gdb Output debug information for cygdb
return args.pop(0)
else:
bad_usage()
-
+
def get_param(option):
tail = option[2:]
if tail:
# See GlobalState.put_utility_code.
#
# hashes/equals by instance
-
+
def __init__(self, proto=None, impl=None, init=None, cleanup=None, requires=None,
proto_block='utility_code_proto'):
# proto_block: Which code block to dump prototype in. See GlobalState.
writer.put(self.cleanup)
else:
self.cleanup(writer, output.module_pos)
-
-
+
+
class FunctionState(object):
# return_label string function return point label
def __init__(self, owner, names_taken=cython.set()):
self.names_taken = names_taken
self.owner = owner
-
+
self.error_label = None
self.label_counter = 0
self.labels_used = cython.set()
if name is not None:
label += '_' + name
return label
-
+
def new_error_label(self):
old_err_lbl = self.error_label
self.error_label = self.new_label('error')
return old_err_lbl
-
+
def get_loop_labels(self):
return (
self.continue_label,
self.break_label)
-
+
def set_loop_labels(self, labels):
(self.continue_label,
self.break_label) = labels
-
+
def new_loop_labels(self):
old_labels = self.get_loop_labels()
self.set_loop_labels(
- (self.new_label("continue"),
+ (self.new_label("continue"),
self.new_label("break")))
return old_labels
-
+
def get_all_labels(self):
return (
self.continue_label,
new_labels.append(old_label)
self.set_all_labels(new_labels)
return old_labels
-
+
def use_label(self, lbl):
self.labels_used.add(lbl)
-
+
def label_used(self, lbl):
return lbl in self.labels_used
# parts {string:CCodeWriter}
-
+
# interned_strings
# consts
# interned_nums
'utility_code_def',
'end'
]
-
+
def __init__(self, writer, emit_linenums=False):
self.filename_table = {}
w = self.parts['cleanup_module']
w.putln("}")
w.exit_cfunc_scope()
-
+
def put_pyobject_decl(self, entry):
self['global_var'].putln("static PyObject *%s;" % entry.cname)
# The functions below are there in a transition phase only
# and will be deprecated. They are called from Nodes.BlockNode.
# The copy&paste duplication is intentional in order to be able
- # to see quickly how BlockNode worked, until this is replaced.
+ # to see quickly how BlockNode worked, until this is replaced.
def should_declare(self, cname, entry):
if cname in self.declared_cnames:
#
# Utility code state
#
-
+
def use_utility_code(self, utility_code):
"""
Adds code to the C file. utility_code should
- filename_table, filename_list, input_file_contents: All codewriters
coming from the same root share the same instances simultaneously.
"""
-
+
# f file output file
# buffer StringIOTree
-
+
# level int indentation level
# bol bool beginning of line?
# marker string comment to emit before next line
# about the current class one is in
globalstate = None
-
+
def __init__(self, create_from=None, buffer=None, copy_formatting=False, emit_linenums=None):
if buffer is None: buffer = StringIOTree()
self.buffer = buffer
self.last_marker_line = 0
self.source_desc = ""
self.pyclass_stack = []
-
+
self.funcstate = None
self.level = 0
self.call_level = 0
return self.buffer.getvalue()
def write(self, s):
- # also put invalid markers (lineno 0), to indicate that those lines
+ # also put invalid markers (lineno 0), to indicate that those lines
# have no Cython source code correspondence
if self.marker is None:
cython_lineno = self.last_marker_line
else:
cython_lineno = self.marker[0]
-
+
self.buffer.markers.extend([cython_lineno] * s.count('\n'))
self.buffer.write(s)
def enter_cfunc_scope(self):
self.funcstate = FunctionState(self)
-
+
def exit_cfunc_scope(self):
self.funcstate = None
self.emit_marker()
if self.emit_linenums and self.last_marker_line != 0:
self.write('\n#line %s "%s"\n' % (self.last_marker_line, self.source_desc))
-
+
if code:
if safe:
self.put_safe(code)
self.put(code)
self.write("\n");
self.bol = 1
-
+
def emit_marker(self):
self.write("\n");
self.indent()
def increase_indent(self):
self.level = self.level + 1
-
+
def decrease_indent(self):
self.level = self.level - 1
-
+
def begin_block(self):
self.putln("{")
self.increase_indent()
-
+
def end_block(self):
self.decrease_indent()
self.putln("}")
-
+
def indent(self):
self.write(" " * self.level)
self.marker = (line, marker)
if self.emit_linenums:
self.source_desc = source_desc.get_escaped_description()
-
+
def put_label(self, lbl):
if lbl in self.funcstate.labels_used:
self.putln("%s:;" % lbl)
-
+
def put_goto(self, lbl):
self.funcstate.use_label(lbl)
self.putln("goto %s;" % lbl)
-
+
def put_var_declarations(self, entries, static = 0, dll_linkage = None,
definition = True):
for entry in entries:
if not entry.in_cinclude:
self.put_var_declaration(entry, static, dll_linkage, definition)
-
+
def put_var_declaration(self, entry, static = 0, dll_linkage = None,
definition = True):
#print "Code.put_var_declaration:", entry.name, "definition =", definition ###
def put_h_guard(self, guard):
self.putln("#ifndef %s" % guard)
self.putln("#define %s" % guard)
-
+
def unlikely(self, cond):
if Options.gcc_branch_hints:
return 'unlikely(%s)' % cond
return "(PyObject *)" + entry.cname
else:
return entry.cname
-
+
def as_pyobject(self, cname, type):
from PyrexTypes import py_object_type, typecast
return typecast(py_object_type, type, cname)
-
+
def put_gotref(self, cname):
self.putln("__Pyx_GOTREF(%s);" % cname)
-
+
def put_giveref(self, cname):
self.putln("__Pyx_GIVEREF(%s);" % cname)
-
+
def put_xgiveref(self, cname):
self.putln("__Pyx_XGIVEREF(%s);" % cname)
self.putln("__Pyx_INCREF(%s);" % self.as_pyobject(cname, type))
else:
self.putln("Py_INCREF(%s);" % self.as_pyobject(cname, type))
-
+
def put_decref(self, cname, type, nanny=True):
if nanny:
self.putln("__Pyx_DECREF(%s);" % self.as_pyobject(cname, type))
def put_var_gotref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_GOTREF(%s);" % self.entry_as_pyobject(entry))
-
+
def put_var_giveref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_GIVEREF(%s);" % self.entry_as_pyobject(entry))
def put_var_incref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_INCREF(%s);" % self.entry_as_pyobject(entry))
-
+
def put_decref_clear(self, cname, type, nanny=True):
from PyrexTypes import py_object_type, typecast
if nanny:
else:
self.putln("Py_DECREF(%s); %s = 0;" % (
typecast(py_object_type, type, cname), cname))
-
+
def put_xdecref(self, cname, type, nanny=True):
if nanny:
self.putln("__Pyx_XDECREF(%s);" % self.as_pyobject(cname, type))
else:
self.putln("Py_XDECREF(%s);" % self.as_pyobject(cname, type))
-
+
def put_xdecref_clear(self, cname, type, nanny=True):
if nanny:
self.putln("__Pyx_XDECREF(%s); %s = 0;" % (
self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry))
else:
self.putln("__Pyx_DECREF(%s);" % self.entry_as_pyobject(entry))
-
+
def put_var_decref_clear(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_DECREF(%s); %s = 0;" % (
self.entry_as_pyobject(entry), entry.cname))
-
+
def put_var_xdecref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry))
-
+
def put_var_xdecref_clear(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_XDECREF(%s); %s = 0;" % (
self.entry_as_pyobject(entry), entry.cname))
-
+
def put_var_decrefs(self, entries, used_only = 0):
for entry in entries:
if not used_only or entry.used:
self.put_var_xdecref(entry)
else:
self.put_var_decref(entry)
-
+
def put_var_xdecrefs(self, entries):
for entry in entries:
self.put_var_xdecref(entry)
-
+
def put_var_xdecrefs_clear(self, entries):
for entry in entries:
self.put_var_xdecref_clear(entry)
-
+
def put_init_to_py_none(self, cname, type, nanny=True):
from PyrexTypes import py_object_type, typecast
py_none = typecast(type, py_object_type, "Py_None")
self.putln("%s = %s; __Pyx_INCREF(Py_None);" % (cname, py_none))
else:
self.putln("%s = %s; Py_INCREF(Py_None);" % (cname, py_none))
-
+
def put_init_var_to_py_none(self, entry, template = "%s", nanny=True):
code = template % entry.cname
#if entry.type.is_extension_type:
method_flags += [method_coexist]
self.putln(
'{__Pyx_NAMESTR("%s"), (PyCFunction)%s, %s, __Pyx_DOCSTR(%s)}%s' % (
- entry.name,
+ entry.name,
entry.func_cname,
"|".join(method_flags),
doc_code,
Naming.lineno_cname,
pos[1],
cinfo)
-
+
def error_goto(self, pos):
lbl = self.funcstate.error_label
self.funcstate.use_label(lbl)
def error_goto_if(self, cond, pos):
return "if (%s) %s" % (self.unlikely(cond), self.error_goto(pos))
-
+
def error_goto_if_null(self, cname, pos):
return self.error_goto_if("!%s" % cname, pos)
-
+
def error_goto_if_neg(self, cname, pos):
return self.error_goto_if("%s < 0" % cname, pos)
-
+
def error_goto_if_PyErr(self, pos):
return self.error_goto_if("PyErr_Occurred()", pos)
-
+
def lookup_filename(self, filename):
return self.globalstate.lookup_filename(filename)
def put_trace_declarations(self):
self.putln('__Pyx_TraceDeclarations');
-
+
def put_trace_call(self, name, pos):
self.putln('__Pyx_TraceCall("%s", %s[%s], %s);' % (name, Naming.filetable_cname, self.lookup_filename(pos[0]), pos[1]));
-
+
def put_trace_exception(self):
self.putln("__Pyx_TraceException();")
-
+
def put_trace_return(self, retvalue_cname):
self.putln("__Pyx_TraceReturn(%s);" % retvalue_cname)
def __init__(self, outfile_name):
self.f = Utils.open_new_file(outfile_name)
self.level = 0
-
+
def putln(self, code):
self.f.write("%s%s\n" % (" " * self.level, code))
-
+
def indent(self):
self.level += 1
-
+
def dedent(self):
self.level -= 1
import bisect, sys
-# This module keeps track of arbitrary "states" at any point of the code.
+# This module keeps track of arbitrary "states" at any point of the code.
# A state is considered known if every path to the given point agrees on
-# its state, otherwise it is None (i.e. unknown).
+# its state, otherwise it is None (i.e. unknown).
-# It might be useful to be able to "freeze" the set of states by pushing
+# It might be useful to be able to "freeze" the set of states by pushing
# all state changes to the tips of the trees for fast reading. Perhaps this
-# could be done on get_state, clearing the cache on set_state (assuming
-# incoming is immutable).
+# could be done on get_state, clearing the cache on set_state (assuming
+# incoming is immutable).
-# This module still needs a lot of work, and probably should totally be
-# redesigned. It doesn't take return, raise, continue, or break into
-# account.
+# This module still needs a lot of work, and probably should totally be
+# redesigned. It doesn't take return, raise, continue, or break into
+# account.
from Cython.Compiler.Scanning import StringSourceDescriptor
try:
self.parent = parent
self.tip = {}
self.end_pos = _END_POS
-
+
def start_branch(self, pos):
self.end_pos = pos
branch_point = BranchingControlFlow(pos, self)
if self.parent is not None:
self.parent.branches[-1] = branch_point
return branch_point.branches[0]
-
+
def next_branch(self, pos):
self.end_pos = pos
return self.parent.new_branch(pos)
-
+
def finish_branch(self, pos):
self.end_pos = pos
self.parent.end_pos = pos
return LinearControlFlow(pos, self.parent)
-
+
def get_state(self, item, pos=_END_POS):
return self.get_pos_state(item, pos)[1]
-
+
def get_pos_state(self, item, pos=_END_POS):
# do some caching
if pos > self.end_pos:
if item in current.tip:
del current.tip[item]
current._set_state_local(pos, item, state)
-
-
+
+
class LinearControlFlow(ControlFlow):
def __init__(self, start_pos=(), incoming=None, parent=None):
ControlFlow.__init__(self, start_pos, incoming, parent)
self.events = {}
-
+
def _set_state_local(self, pos, item, state):
if item in self.events:
event_list = self.events[item]
return None
def to_string(self, indent='', limit=None):
-
+
if len(self.events) == 0:
s = indent + "[no state changes]"
-
+
else:
all = []
for item, event_list in self.events.items():
if self.incoming is not limit and self.incoming is not None:
s = "%s\n%s" % (self.incoming.to_string(indent, limit=limit), s)
return s
-
-
+
+
class BranchingControlFlow(ControlFlow):
-
+
def __init__(self, start_pos, incoming, parent=None):
ControlFlow.__init__(self, start_pos, incoming, parent)
self.branches = [LinearControlFlow(start_pos, incoming, parent=self)]
self.branch_starts = [start_pos]
-
+
def _set_state_local(self, pos, item, state):
for branch_pos, branch in zip(self.branch_starts[::-1], self.branches[::-1]):
if pos >= branch_pos:
branch._set_state_local(pos, item, state)
return
-
+
def _get_pos_state_local(self, item, pos, stop_at=None):
if pos < self.end_pos:
for branch_pos, branch in zip(self.branch_starts[::-1], self.branches[::-1]):
pos=None,
defining = 1,
cname='<error>')
-
+
def lookup_type(self, name):
- # This function should go away when types are all first-level objects.
+ # This function should go away when types are all first-level objects.
type = parse_basic_type(name)
if type:
return type
utility_scope = ModuleScope(u'utility', None, context)
# These are used to optimize isinstance in FinalOptimizePhase
- type_object = utility_scope.declare_typedef('PyTypeObject',
- base_type = c_void_type,
+ type_object = utility_scope.declare_typedef('PyTypeObject',
+ base_type = c_void_type,
pos = None,
cname = 'PyTypeObject')
type_object.is_void = True
-
+
utility_scope.declare_cfunction(
'PyObject_TypeCheck',
CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None),
pos = None,
defining = 1,
cname = 'PyObject_TypeCheck')
-
+
return utility_scope
return message
class CompileError(PyrexError):
-
+
def __init__(self, position = None, message = u""):
self.position = position
self.message_only = message
Exception.__init__(self, format_error(message, position))
class CompileWarning(PyrexWarning):
-
+
def __init__(self, position = None, message = ""):
self.position = position
# Deprecated and withdrawn in 2.6:
class InternalError(Exception):
# If this is ever raised, there is a bug in the compiler.
-
+
def __init__(self, message):
self.message_only = message
Exception.__init__(self, u"Internal compiler error: %s"
class AbortError(Exception):
# Throw this to stop the compilation immediately.
-
+
def __init__(self, message):
self.message_only = message
Exception.__init__(self, u"Abort error: %s" % message)
CompileError.__init__(self, pos, message)
class NoElementTreeInstalledException(PyrexError):
- """raised when the user enabled options.gdb_debug but no ElementTree
+ """raised when the user enabled options.gdb_debug but no ElementTree
implementation was found
"""
#print "Errors.error:", repr(position), repr(message) ###
if position is None:
raise InternalError(message)
- err = CompileError(position, message)
+ err = CompileError(position, message)
if debug_exception_on_error: raise Exception(err) # debug
report_error(err)
return err
return warn
-# These functions can be used to momentarily suppress errors.
+# These functions can be used to momentarily suppress errors.
error_stack = []
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
- # is_sequence_constructor
+ # is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
-
+
result_ctype = None
type = None
temp_code = None
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
- #
- # The framework makes use of a number of abstract methods.
+ #
+ # The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
- # the LHS of an assignment or argument of a del
+ # the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
- # otherwise call not_const.
+ # otherwise call not_const.
#
- # The default implementation of check_const
+ # The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
- # assumes that the expression is not a constant
+ # assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
# sub-expressions.
#
# calculate_result_code
- # - Should return a C code fragment evaluating to the
- # result. This is only called when the result is not
+ # - Should return a C code fragment evaluating to the
+ # result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
# - Call generate_disposal_code on all sub-expressions.
#
#
-
+
is_sequence_constructor = 0
is_attribute = 0
-
+
saved_subexpr_nodes = None
is_temp = 0
is_target = 0
return self.subexprs
_get_child_attrs = __get_child_attrs
child_attrs = property(fget=_get_child_attrs)
-
+
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
-
+
def is_lvalue(self):
return 0
-
+
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
else:
nodes.append(item)
return nodes
-
+
def result(self):
if self.is_temp:
return self.temp_code
else:
return self.calculate_result_code()
-
+
def result_as(self, type = None):
# Return the result code cast to the specified C type.
return typecast(type, self.ctype(), self.result())
-
+
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
-
+
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
-
+
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
-
+
# ------------- Declaration Analysis ----------------
-
+
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
-
+
# ------------- Expression Analysis ----------------
-
+
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# and determines its value.
self.analyse_types(env)
return self.check_const()
-
+
def analyse_expressions(self, env):
# Convenience routine performing both the Type
- # Analysis and Temp Allocation phases for a whole
+ # Analysis and Temp Allocation phases for a whole
# expression.
self.analyse_types(env)
-
+
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
self.analyse_target_types(env)
-
+
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
self.analyse_types(env)
bool = self.coerce_to_boolean(env)
return bool
-
+
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
return self.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
-
+
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be infered.
if hasattr(self, 'type') and self.type is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
-
+
def infer_type(self, env):
- # Attempt to deduce the type of self.
- # Differs from analyse_types as it avoids unnecessary
+ # Attempt to deduce the type of self.
+ # Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
if hasattr(self, 'type') and self.type is not None:
return self.entry.type
else:
self.not_implemented("infer_type")
-
+
# --------------- Type Analysis ------------------
-
+
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
-
+
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
-
+
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type, return its type, else None.
return None
-
+
def analyse_types(self, env):
self.not_implemented("analyse_types")
-
+
def analyse_target_types(self, env):
self.analyse_types(env)
def check_const(self):
self.not_const()
return False
-
+
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
-
+
def check_const_addr(self):
self.addr_not_const()
return False
-
+
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
-
+
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
-
+
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
-
+
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
-
+
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
self.temp_code = None
# ---------------- Code Generation -----------------
-
+
def make_owned_reference(self, code):
# If result is a pyobject, make sure we own
# a reference to it.
if self.type.is_pyobject and not self.result_in_temp():
code.put_incref(self.result(), self.ctype())
-
+
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
-
+
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
-
+
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
-
+
def generate_disposal_code(self, code):
if self.is_temp:
if self.type.is_pyobject:
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
-
+
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_pyobject:
def generate_assignment_code(self, rhs, code):
# Stub method for nodes which are not legal as
- # the LHS of an assignment. An error will have
+ # the LHS of an assignment. An error will have
# been reported earlier.
pass
-
+
def generate_deletion_code(self, code):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
-
+
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
pass
# ---------------- Annotation ---------------------
-
+
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
-
+
# ----------------- Coercion ----------------------
-
+
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
if dst_type.is_reference:
dst_type = dst_type.ref_base_type
-
+
if dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
src = PyTypeTestNode(src, dst_type, env)
elif src.type.is_pyobject:
src = CoerceFromPyTypeNode(dst_type, src, env)
- elif (dst_type.is_complex
+ elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
return CoerceToBooleanNode(self, env)
else:
if not (type.is_int or type.is_enum or type.is_error):
- error(self.pos,
+ error(self.pos,
"Type '%s' not acceptable as a boolean" % type)
return self
-
+
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
-
+
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
-
+
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
-
+
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
-
+
subexprs = []
# Override to optimize -- we know we have no children
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
-
+
is_literal = 1
type = py_object_type
-
+
def is_simple(self):
return 1
def analyse_types(self, env):
pass
-
+
def calculate_result_code(self):
return self.value
class NoneNode(PyConstNode):
# The constant value None
-
+
value = "Py_None"
constant_result = None
-
+
nogil_check = None
def compile_time_value(self, denv):
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
-
+
value = "Py_Ellipsis"
constant_result = Ellipsis
# Abstract base type for literal constant nodes.
#
# value string C code fragment
-
+
is_literal = 1
nogil_check = None
def analyse_types(self, env):
pass # Types are held in class variables
-
+
def check_const(self):
return True
-
+
def get_constant_c_result_code(self):
return self.calculate_result_code()
def compile_time_value(self, denv):
return self.value
-
+
def calculate_result_code(self):
return str(int(self.value))
def calculate_constant_result(self):
self.constant_result = ord(self.value)
-
+
def compile_time_value(self, denv):
return ord(self.value)
-
+
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
self.result_code = code.get_py_num(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
-
+
def get_constant_c_result_code(self):
return self.value_as_c_integer_string() + self.unsigned + self.longness
def compile_time_value(self, denv):
return float(self.value)
-
+
def calculate_result_code(self):
strval = self.value
assert isinstance(strval, (str, unicode))
def analyse_as_type(self, env):
type = PyrexTypes.parse_basic_type(self.value)
- if type is not None:
+ if type is not None:
return type
from TreeFragment import TreeFragment
pos = (self.pos[0], self.pos[1], self.pos[2]-7)
def get_constant_c_result_code(self):
return None # FIXME
-
+
def calculate_result_code(self):
return self.result_code
def calculate_result_code(self):
return self.result_code
-
+
def compile_time_value(self, env):
return self.value
def calculate_result_code(self):
return self.result_code
-
+
def compile_time_value(self, env):
return self.value
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
-
+
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
-
+
def analyse_types(self, env):
self.is_temp = 1
# Imaginary number literal
#
# value float imaginary part
-
+
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, self.value)
-
+
def compile_time_value(self, denv):
return complex(0.0, self.value)
-
+
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
-
+
type = None
-
+
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
self.entry = constructor
self.type = constructor.type
return self.type
-
+
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
def generate_result_code(self, code):
pass
-
+
def calculate_result_code(self):
return "new " + self.class_type.declaration_code("")
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
-
+
is_name = True
is_cython_module = False
cython_attribute = None
node = NameNode(pos)
node.analyse_types(env, entry=entry)
return node
-
+
def as_cython_attribute(self):
return self.cython_attribute
-
+
create_analysed_rvalue = staticmethod(create_analysed_rvalue)
-
+
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
return (self.entry,)
else:
return ()
-
+
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
return type_type
else:
return self.entry.type
-
+
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
-
+
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
-
+
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
if entry and entry.as_module:
return entry.as_module
return None
-
+
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
return entry.type
else:
return None
-
+
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
return entry.type
else:
return None
-
+
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
env.control_flow.set_state(self.pos, (self.name, 'source'), 'assignment')
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
-
+
def analyse_types(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if entry.utility_code:
env.use_utility_code(entry.utility_code)
self.analyse_rvalue_entry(env)
-
+
def analyse_target_types(self, env):
self.analyse_entry(env)
if not self.is_lvalue():
if self.entry.type.is_buffer:
import Buffer
Buffer.used_buffer_aux_vars(self.entry)
-
+
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
- if not (entry.is_const or entry.is_variable
+ if not (entry.is_const or entry.is_variable
or entry.is_builtin or entry.is_cfunction
or entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
else:
- error(self.pos,
+ error(self.pos,
"'%s' is not a constant, variable or function identifier" % self.name)
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
-
+
def calculate_target_results(self, env):
pass
-
+
def check_const(self):
entry = self.entry
if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
self.not_const()
return False
return True
-
+
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
return self.entry.is_variable and \
not self.entry.type.is_array and \
not self.entry.is_readonly
-
+
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
-
+
def calculate_result_code(self):
entry = self.entry
if not entry:
return "<error>" # There was an error earlier
return entry.cname
-
+
def generate_result_code(self, code):
assert hasattr(self, 'entry')
entry = self.entry
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
elif entry.is_pyglobal or entry.is_builtin:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
code.putln(
'%s = __Pyx_GetName(%s, %s); %s' % (
self.result(),
- namespace,
+ namespace,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
elif entry.is_local and False:
# control flow not good enough yet
assigned = entry.scope.control_flow.get_state((entry.name, 'initialized'), self.pos)
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
-
+
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
Buffer.put_assign_to_buffer(self.result(), rhstmp, buffer_aux, self.entry.type,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
-
+
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
-
+
def generate_deletion_code(self, code):
if self.entry is None:
return # There was an error earlier
namespace,
self.entry.name))
else:
- code.put_error_if_neg(self.pos,
+ code.put_error_if_neg(self.pos,
'__Pyx_DelAttrString(%s, "%s")' % (
Naming.module_cname,
self.entry.name))
-
+
def annotate(self, code):
if hasattr(self, 'is_called') and self.is_called:
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
code.annotate(pos, AnnotationItem('py_call', 'python function', size=len(self.name)))
else:
code.annotate(pos, AnnotationItem('c_call', 'c function', size=len(self.name)))
-
+
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
-
+
type = py_object_type
-
+
subexprs = ['arg']
-
+
def analyse_types(self, env):
self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
class ImportNode(ExprNode):
# Used as part of import statement implementation.
- # Implements result =
+ # Implements result =
# __import__(module_name, globals(), None, name_list)
#
# module_name StringNode dotted name of module
# name_list ListNode or None list of names to be imported
-
+
type = py_object_type
-
+
subexprs = ['module_name', 'name_list']
-
+
def analyse_types(self, env):
self.module_name.analyse_types(env)
self.module_name = self.module_name.coerce_to_pyobject(env)
# Implements result = iter(sequence)
#
# sequence ExprNode
-
+
type = py_object_type
-
+
subexprs = ['sequence']
-
+
def analyse_types(self, env):
self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
# The iterator is not owned by this node.
#
# iterator ExprNode
-
+
type = py_object_type
-
+
def __init__(self, iterator, env):
self.pos = iterator.pos
self.iterator = iterator
if iterator.type.is_ptr or iterator.type.is_array:
self.type = iterator.type.base_type
self.is_temp = 1
-
+
def generate_result_code(self, code):
sequence_type = self.iterator.sequence.type
if sequence_type is list_type:
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
-
+
type = py_object_type
-
+
def __init__(self, pos, env):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
-
+
def calculate_result_code(self):
return self.var
# the regular cycle.
subexprs = []
-
+
def __init__(self, pos, type, env):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
-
+
def analyse_types(self, env):
return self.type
-
+
def generate_result_code(self, code):
pass
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
-
+
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
-
+
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
-
+
def __init__(self, pos, type=None):
self.pos = pos
self.type = type
# indices is used on buffer access, index on non-buffer access.
# The former contains a clean list of index parameters, the
# latter whatever Python object is needed for index access.
-
+
subexprs = ['base', 'index', 'indices']
indices = None
return base[index]
except Exception, e:
self.compile_time_value_error(e)
-
+
def is_ephemeral(self):
return self.base.is_ephemeral()
-
+
def analyse_target_declaration(self, env):
pass
-
+
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
template_values = [self.index]
import Nodes
type_node = Nodes.TemplatedTypeNode(
- pos = self.pos,
- positional_args = template_values,
+ pos = self.pos,
+ positional_args = template_values,
keyword_args = None)
return type_node.analyse(env, base_type = base_type)
else:
return PyrexTypes.CArrayType(base_type, int(self.index.compile_time_value(env)))
return None
-
+
def type_dependencies(self, env):
return self.base.type_dependencies(env)
-
+
def infer_type(self, env):
base_type = self.base.infer_type(env)
if isinstance(self.index, SliceNode):
# slicing!
if base_type.is_string:
- # sliced C strings must coerce to Python
+ # sliced C strings must coerce to Python
return bytes_type
elif base_type in (unicode_type, bytes_type, str_type, list_type, tuple_type):
# slicing these returns the same type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
-
+
def analyse_types(self, env):
self.analyse_base_and_index_types(env, getting = 1)
-
+
def analyse_target_types(self, env):
self.analyse_base_and_index_types(env, setting = 1)
# error messages
self.type = PyrexTypes.error_type
return
-
+
is_slice = isinstance(self.index, SliceNode)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
-
+
def is_lvalue(self):
return 1
else:
return "(%s[%s])" % (
self.base.result(), self.index.result())
-
+
def extra_index_params(self):
if self.index.type.is_int:
if self.original_index_type.signed:
else:
for i in self.indices:
i.generate_evaluation_code(code)
-
+
def generate_subexpr_disposal_code(self, code):
self.base.generate_disposal_code(code)
if not self.indices:
self.extra_index_params(),
self.result(),
code.error_goto(self.pos)))
-
+
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
function = "__Pyx_SetItemInt"
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
- # shouldn't happen here.
- # Both PyList_SetItem PyTuple_SetItem and a Py_ssize_t as input,
- # not a PyObject*, and bad conversion here would give the wrong
- # exception. Also, tuples are supposed to be immutable, and raise
- # TypeErrors when trying to set their entries (PyTuple_SetItem
- # is for creating new tuples from).
+ # shouldn't happen here.
+ # Both PyList_SetItem PyTuple_SetItem and a Py_ssize_t as input,
+ # not a PyObject*, and bad conversion here would give the wrong
+ # exception. Also, tuples are supposed to be immutable, and raise
+ # TypeErrors when trying to set their entries (PyTuple_SetItem
+ # is for creating new tuples from).
else:
function = "PyObject_SetItem"
code.putln(
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr)
code.funcstate.release_temp(ptr)
- else:
+ else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
-
+
def generate_deletion_code(self, code):
self.generate_subexpr_evaluation_code(code)
#if self.type.is_pyobject:
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
-
+
subexprs = ['base', 'start', 'stop']
def infer_type(self, env):
return base[start:stop]
except Exception, e:
self.compile_time_value_error(e)
-
+
def analyse_target_declaration(self, env):
pass
-
+
def analyse_target_types(self, env):
self.analyse_types(env)
# when assigning, we must accept any Python type
self.stop_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
def generate_assignment_code(self, rhs, code):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
- code.put_error_if_neg(self.pos,
+ code.put_error_if_neg(self.pos,
"__Pyx_PySequence_SetSlice(%s, %s, %s, %s)" % (
self.base.py_result(),
self.start_code(),
target_size, check))
code.putln(code.error_goto(self.pos))
code.putln("}")
-
+
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
-
+
def stop_code(self):
if self.stop:
return self.stop.result()
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
-
+
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return "<unused>"
-
+
class SliceNode(ExprNode):
# start:stop:step in subscript list
# start ExprNode
# stop ExprNode
# step ExprNode
-
+
type = py_object_type
is_temp = 1
self.compile_time_value_error(e)
subexprs = ['start', 'stop', 'step']
-
+
def analyse_types(self, env):
self.start.analyse_types(env)
self.stop.analyse_types(env)
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
- self.start.py_result(),
- self.stop.py_result(),
+ self.start.py_result(),
+ self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
self.function.set_cname(type.declaration_code(""))
self.analyse_c_function_call(env)
return True
-
+
def is_lvalue(self):
return self.type.is_reference
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
-
+
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
-
+
self = None
coerced_self = None
arg_tuple = None
has_optional_args = False
nogil = False
analysed = False
-
+
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
return function(*args)
except Exception, e:
self.compile_time_value_error(e)
-
+
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
-
+
def infer_type(self, env):
function = self.function
func_type = function.infer_type(env)
# Insert coerced 'self' argument into argument list.
self.args.insert(0, self.coerced_self)
self.analyse_c_function_call(env)
-
+
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary.
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
-
+
def analyse_c_function_call(self, env):
if self.function.type is error_type:
self.type = error_type
self.args[i] = self.args[i].coerce_to(formal_type, env)
for i in range(max_nargs, actual_nargs):
if self.args[i].type.is_pyobject:
- error(self.args[i].pos,
+ error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
def calculate_result_code(self):
return self.c_call_code()
-
+
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
-
+
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
-
+
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
-
+
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result())
result = "%s(%s)" % (self.function.result(),
', '.join(arg_list_code))
return result
-
+
def generate_result_code(self, code):
func_type = self.function_type()
if func_type.is_pyobject:
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
- else:
+ else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
# starstar_arg ExprNode or None Dict of extra keyword args
-
+
type = py_object_type
-
+
subexprs = ['function', 'positional_args', 'keyword_args', 'starstar_arg']
nogil_check = Node.gil_error
return function(*positional_args, **keyword_args)
except Exception, e:
self.compile_time_value_error(e)
-
+
def explicit_args_kwds(self):
if self.starstar_arg or not isinstance(self.positional_args, TupleNode):
raise CompileError(self.pos,
else:
self.type = py_object_type
self.is_temp = 1
-
+
def generate_result_code(self, code):
if self.type.is_error: return
kwargs_call_function = "PyEval_CallObjectWithKeywords"
if self.keyword_args and self.starstar_arg:
- code.put_error_if_neg(self.pos,
+ code.put_error_if_neg(self.pos,
"PyDict_Update(%s, %s)" % (
- self.keyword_args.py_result(),
+ self.keyword_args.py_result(),
self.starstar_arg.py_result()))
keyword_code = self.keyword_args.py_result()
elif self.keyword_args:
# the * argument of a function call.
#
# arg ExprNode
-
+
subexprs = ['arg']
def calculate_constant_result(self):
self.constant_result = tuple(self.base.constant_result)
-
+
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
class AttributeNode(ExprNode):
# obj.attribute
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
-
+
is_attribute = 1
subexprs = ['obj']
-
+
type = PyrexTypes.error_type
entry = None
is_called = 0
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
- self.analyse_as_python_attribute(env)
+ self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
return getattr(obj, attr)
except Exception, e:
self.compile_time_value_error(e)
-
+
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
-
+
def infer_type(self, env):
if self.analyse_as_cimported_attribute(env, 0):
return self.entry.type
def analyse_target_declaration(self, env):
pass
-
+
def analyse_target_types(self, env):
self.analyse_types(env, target = 1)
-
+
def analyse_types(self, env, target = 0):
if self.analyse_as_cimported_attribute(env, target):
return
if not target and self.analyse_as_unbound_cmethod(env):
return
self.analyse_as_ordinary_attribute(env, target)
-
+
def analyse_as_cimported_attribute(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
self.mutate_into_name_node(env, entry, target)
return 1
return 0
-
+
def analyse_as_unbound_cmethod(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type. If successful, mutates
self.mutate_into_name_node(env, ubcm_entry, None)
return 1
return 0
-
+
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
if base_type and hasattr(base_type, 'scope'):
return base_type.scope.lookup_type(self.attribute)
return None
-
+
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
if entry and entry.is_type and entry.type.is_extension_type:
return entry.type
return None
-
+
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
if entry and entry.as_module:
return entry.as_module
return None
-
+
def mutate_into_name_node(self, env, entry, target):
# Mutate this node into a NameNode and complete the
# analyse_types phase.
NameNode.analyse_target_types(self, env)
else:
NameNode.analyse_rvalue_entry(self, env)
-
+
def analyse_as_ordinary_attribute(self, env, target):
self.obj.analyse_types(env)
self.analyse_attribute(env)
if entry and entry.is_member:
entry = None
else:
- error(self.pos,
- "Cannot select attribute of incomplete type '%s'"
+ error(self.pos,
+ "Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
# method of an extension type, so we treat it like a Python
# attribute.
pass
- # If we get here, the base object is not a struct/union/extension
+ # If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
return 1
else:
return NameNode.is_lvalue(self)
-
+
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
-
+
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type:
return "((struct %s *)%s%s%s)->%s" % (
- obj.type.vtabstruct_cname, obj_code, self.op,
+ obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
else:
return self.member
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
return "%s%s%s" % (obj_code, self.op, self.member)
-
+
def generate_result_code(self, code):
interned_attr_cname = code.intern_identifier(self.attribute)
if self.is_py_attr:
and self.needs_none_check
and code.globalstate.directives['nonecheck']):
self.put_nonecheck(code)
-
+
def generate_assignment_code(self, rhs, code):
interned_attr_cname = code.intern_identifier(self.attribute)
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
- code.put_error_if_neg(self.pos,
+ code.put_error_if_neg(self.pos,
'PyObject_SetAttr(%s, %s, %s)' % (
self.obj.py_result(),
interned_attr_cname,
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
-
+
def generate_deletion_code(self, code):
interned_attr_cname = code.intern_identifier(self.attribute)
self.obj.generate_evaluation_code(code)
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
-
+
def annotate(self, code):
if self.is_py_attr:
code.annotate(self.pos, AnnotationItem('py_attr', 'python attribute', size=len(self.attribute)))
# iterator ExprNode
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
-
+
subexprs = ['args']
-
+
is_sequence_constructor = 1
unpacked_items = None
def generate_result_code(self, code):
self.generate_operation_code(code)
-
+
def generate_assignment_code(self, rhs, code):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
tuple_check = "PyTuple_CheckExact(%s)"
code.putln(
"if (%s && likely(PyTuple_GET_SIZE(%s) == %s)) {" % (
- tuple_check % rhs.py_result(),
- rhs.py_result(),
+ tuple_check % rhs.py_result(),
+ rhs.py_result(),
len(self.args)))
code.putln("PyObject* tuple = %s;" % rhs.py_result())
for item in self.unpacked_items:
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
-
+
code.putln("} else {")
if rhs.type is tuple_type:
class TupleNode(SequenceNode):
# Tuple constructor.
-
+
type = tuple_type
gil_message = "Constructing Python tuple"
return tuple(values)
except Exception, e:
self.compile_time_value_error(e)
-
+
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
code.put_giveref(arg.py_result())
if self.is_literal:
code.put_giveref(self.py_result())
-
+
def generate_subexpr_disposal_code(self, code):
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
class ListNode(SequenceNode):
# List constructor.
-
+
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
type = list_type
gil_message = "Constructing Python list"
-
+
def type_dependencies(self, env):
return ()
-
+
def infer_type(self, env):
# TOOD: Infer non-object list arrays.
return list_type
SequenceNode.analyse_types(self, env)
self.obj_conversion_errors = held_errors()
release_errors(ignore=True)
-
+
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
-
+
def release_temp(self, env):
if self.type.is_array:
- # To be valid C++, we must allocate the memory on the stack
- # manually and be sure not to reuse it for something else.
+ # To be valid C++, we must allocate the memory on the stack
+ # manually and be sure not to reuse it for something else.
pass
else:
SequenceNode.release_temp(self, env)
def calculate_result_code(self):
return self.target.result()
-
+
def generate_result_code(self, code):
self.generate_operation_code(code)
child_attrs = ['expr']
type = PyrexTypes.c_int_type
-
+
def analyse_expressions(self, env):
self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
subexprs = ['args']
gil_message = "Constructing Python set"
-
+
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
# key_value_pairs [DictItemNode]
#
# obj_conversion_errors [PyrexError] used internally
-
+
subexprs = ['key_value_pairs']
is_temp = 1
type = dict_type
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
-
+
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
return dict(pairs)
except Exception, e:
self.compile_time_value_error(e)
-
+
def type_dependencies(self, env):
return ()
-
+
def infer_type(self, env):
# TOOD: Infer struct constructors.
return dict_type
def may_be_none(self):
return False
-
+
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
-
+
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if self.type.is_pyobject:
- code.put_error_if_neg(self.pos,
+ code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
-
+
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
-
+
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
-
+
def analyse_types(self, env):
self.key.analyse_types(env)
self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
-
+
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
-
+
def __iter__(self):
return iter([self.key, self.value])
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
-
+
subexprs = ['bases', 'doc']
def analyse_types(self, env):
cname = code.intern_identifier(self.name)
if self.doc:
- code.put_error_if_neg(self.pos,
+ code.put_error_if_neg(self.pos,
'PyDict_SetItemString(%s, "__doc__", %s)' % (
self.dict.py_result(),
self.doc.py_result()))
#
# function ExprNode Function object
# self_object ExprNode self object
-
+
subexprs = ['function']
-
+
def analyse_types(self, env):
self.function.analyse_types(env)
self.type = py_object_type
# object from a class and a function.
#
# function ExprNode Function object
-
+
type = py_object_type
is_temp = 1
-
+
subexprs = ['function']
-
+
def analyse_types(self, env):
self.function.analyse_types(env)
subexprs = []
self_object = None
binding = False
-
+
type = py_object_type
is_temp = 1
-
+
def analyse_types(self, env):
if self.binding:
env.use_utility_code(binding_cfunc_utility_code)
def may_be_none(self):
return False
-
+
gil_message = "Constructing Python function"
def self_result_code(self):
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
-
+
subexprs = ['operand']
infix = True
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
-
+
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
return func(operand)
except Exception, e:
self.compile_time_value_error(e)
-
+
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_pyobject:
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
-
+
def check_const(self):
return self.operand.check_const()
-
+
def is_py_operation(self):
return self.operand.type.is_pyobject
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
-
+
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
-
+
def generate_result_code(self, code):
if self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
-
+
def generate_py_operation_code(self, code):
function = self.py_operation_function()
code.putln(
"%s = %s(%s); %s" % (
- self.result(),
- function,
+ self.result(),
+ function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
# 'not' operator
#
# operand ExprNode
-
+
type = PyrexTypes.c_bint_type
subexprs = ['operand']
-
+
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def infer_type(self, env):
return PyrexTypes.c_bint_type
-
+
def analyse_types(self, env):
self.operand.analyse_types(env)
self.operand = self.operand.coerce_to_boolean(env)
-
+
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
-
+
def generate_result_code(self, code):
pass
class UnaryPlusNode(UnopNode):
# unary '+' operator
-
+
operator = '+'
-
+
def analyse_c_operation(self, env):
self.type = self.operand.type
-
+
def py_operation_function(self):
return "PyNumber_Positive"
-
+
def calculate_result_code(self):
if self.is_cpp_operation():
return "(+%s)" % self.operand.result()
class UnaryMinusNode(UnopNode):
# unary '-' operator
-
+
operator = '-'
-
+
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = self.operand.type
self.type_error()
if self.type.is_complex:
self.infix = False
-
+
def py_operation_function(self):
return "PyNumber_Negative"
-
+
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
def py_operation_function(self):
return "PyNumber_Invert"
-
+
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
# unary * operator
operator = '*'
-
+
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
self.type = self.operand.type.base_type
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
-
+
def analyse_c_operation(self, env):
if self.operand.type.is_ptr or self.operand.type.is_numeric:
self.type = self.operand.type
# The C address-of operator.
#
# operand ExprNode
-
+
subexprs = ['operand']
-
+
def infer_type(self, env):
return PyrexTypes.c_ptr_type(self.operand.infer_type(env))
self.error("Cannot take address of Python variable")
return
self.type = PyrexTypes.c_ptr_type(argtype)
-
+
def check_const(self):
return self.operand.check_const_addr()
-
+
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
-
+
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
pass
-
+
unop_node_classes = {
"+": UnaryPlusNode,
}
def unop_node(pos, operator, operand):
- # Construct unnop node of appropriate class for
+ # Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)))
elif isinstance(operand, UnopNode) and operand.operator == operator:
warning(pos, "Python has no increment/decrement operator: %s%sx = %s(%sx) = x" % ((operator,)*4), 5)
- return unop_node_classes[operator](pos,
- operator = operator,
+ return unop_node_classes[operator](pos,
+ operator = operator,
operand = operand)
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
-
+
subexprs = ['operand']
base_type = declarator = type = None
-
+
def type_dependencies(self, env):
return ()
-
+
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
-
+
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
- # Should this be an error?
+ # Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
# we usually do not know the result of a type cast at code
# generation time
pass
-
+
def calculate_result_code(self):
if self.type.is_complex:
operand_result = self.operand.result()
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
- imag_part)
+ imag_part)
else:
return self.type.cast_code(self.operand.result())
-
+
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
-
+
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
-
+
type = PyrexTypes.c_size_t_type
def check_const(self):
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
-
+
subexprs = []
arg_type = None
-
+
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
-
+
def check_type(self):
arg_type = self.arg_type
if arg_type.is_pyobject and not arg_type.is_extension_type:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
-
+
def calculate_result_code(self):
if self.arg_type.is_extension_type:
# the size of the pointer is boring
else:
arg_code = self.arg_type.declaration_code("")
return "(sizeof(%s))" % arg_code
-
+
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
-
+
subexprs = ['operand']
-
+
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
self.check_type()
else:
self.operand.analyse_types(env)
-
+
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
-
+
def generate_result_code(self, code):
pass
#
# operand ExprNode
# literal StringNode # internal
-
+
literal = None
type = py_object_type
-
+
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
-
+
def analyse_types(self, env):
self.operand.analyse_types(env)
self.literal = StringNode(
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
-
+
def calculate_result_code(self):
return self.literal.calculate_result_code()
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
-
+
subexprs = ['operand1', 'operand2']
inplace = False
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
-
+
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env))
-
+
def analyse_types(self, env):
self.operand1.analyse_types(env)
self.operand2.analyse_types(env)
self.analyse_operation(env)
-
+
def analyse_operation(self, env):
if self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
-
+
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
-
+
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
-
+
def analyse_cpp_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
-
+
def result_type(self, type1, type2):
if self.is_py_operation_types(type1, type2):
if type2.is_string:
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
-
+
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
-
+
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
-
+
def generate_result_code(self, code):
#print "BinopNode.generate_result_code:", self.operand1, self.operand2 ###
if self.operand1.type.is_pyobject:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
- self.result(),
- function,
+ self.result(),
+ function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
- (self.operator, self.operand1.type,
+ (self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
-
+
def analyse_types(self, env):
BinopNode.analyse_types(self, env)
if self.is_py_operation():
self.type = PyrexTypes.error_type
-
+
def py_operation_function():
return ""
-
+
def calculate_result_code(self):
return "(%s %s %s)" % (
- self.operand1.result(),
- self.operator,
+ self.operand1.result(),
+ self.operator,
self.operand2.result())
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
-
+
infix = True
-
+
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
-
+
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
-
+
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
def calculate_result_code(self):
if self.infix:
return "(%s %s %s)" % (
- self.operand1.result(),
- self.operator,
+ self.operand1.result(),
+ self.operator,
self.operand2.result())
else:
func = self.type.binary_op(self.operator)
func,
self.operand1.result(),
self.operand2.result())
-
+
def is_py_operation_types(self, type1, type2):
return (type1 is PyrexTypes.c_py_unicode_type or
type2 is PyrexTypes.c_py_unicode_type or
BinopNode.is_py_operation_types(self, type1, type2))
-
+
def py_operation_function(self):
fuction = self.py_functions[self.operator]
if self.inplace:
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
-
+
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
-
+
class AddNode(NumBinopNode):
# '+' operator.
-
+
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string:
return 1
class SubNode(NumBinopNode):
# '-' operator.
-
+
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
class MulNode(NumBinopNode):
# '*' operator.
-
+
def is_py_operation_types(self, type1, type2):
if (type1.is_string and type2.is_int) \
or (type2.is_string and type1.is_int):
class DivNode(NumBinopNode):
# '/' or '//' operator.
-
+
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
- self.cdivision = (code.globalstate.directives['cdivision']
+ self.cdivision = (code.globalstate.directives['cdivision']
or not self.type.signed
or self.type.is_float)
if not self.cdivision:
code.globalstate.use_utility_code(div_int_utility_code.specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
-
+
def generate_div_warning_code(self, code):
if not self.type.is_pyobject:
if self.zerodivision_check:
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(division_overflow_test_code)
code.putln("else if (sizeof(%s) == sizeof(long) && unlikely(%s == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
- self.type.declaration_code(''),
+ self.type.declaration_code(''),
self.operand2.result(),
self.operand1.result()))
code.putln('PyErr_Format(PyExc_OverflowError, "value too large to perform division");')
code.put("if (__Pyx_cdivision_warning()) ")
code.put_goto(code.error_label)
code.putln("}")
-
+
def calculate_result_code(self):
if self.type.is_complex:
return NumBinopNode.calculate_result_code(self)
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
- self.operand1.result(),
+ self.operand1.result(),
self.operand2.result())
return "integer division or modulo by zero"
else:
return "float divmod()"
-
+
def generate_evaluation_code(self, code):
if not self.type.is_pyobject:
if self.cdivision is None:
mod_float_utility_code.specialize(self.type, math_h_modifier=self.type.math_h_modifier))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
-
+
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
- self.operand1.result(),
+ self.operand1.result(),
self.operand2.result())
else:
return "(%s %% %s)" % (
- self.operand1.result(),
+ self.operand1.result(),
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
- self.operand1.result(),
+ self.operand1.result(),
self.operand2.result())
class PowNode(NumBinopNode):
# '**' operator.
-
+
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
else:
self.pow_func = "__Pyx_pow_%s" % self.type.declaration_code('').replace(' ', '_')
env.use_utility_code(
- int_pow_utility_code.specialize(func_name=self.pow_func,
+ int_pow_utility_code.specialize(func_name=self.pow_func,
type=self.type.declaration_code('')))
def calculate_result_code(self):
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
- self.pow_func,
- typecast(self.operand1),
+ self.pow_func,
+ typecast(self.operand1),
typecast(self.operand2))
# operator string
# operand1 ExprNode
# operand2 ExprNode
-
+
subexprs = ['operand1', 'operand2']
-
+
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
self.constant_result = \
self.operand1.constant_result or \
self.operand2.constant_result
-
+
def compile_time_value(self, denv):
if self.operator == 'and':
return self.operand1.compile_time_value(denv) \
else:
return self.operand1.compile_time_value(denv) \
or self.operand2.compile_time_value(denv)
-
+
def coerce_to_boolean(self, env):
return BoolBinopNode(
self.pos,
self.type = PyrexTypes.independent_spanning_type(self.operand1.type, self.operand2.type)
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
-
+
# For what we're about to do, it's vital that
# both operands be temp nodes.
self.operand1 = self.operand1.coerce_to_simple(env)
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
-
+
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.operand1.generate_evaluation_code(code)
self.operand1.generate_post_assignment_code(code)
self.operand1.free_temps(code)
code.putln("}")
-
+
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
# test ExprNode
# true_val ExprNode
# false_val ExprNode
-
+
true_val = None
false_val = None
-
+
subexprs = ['test', 'true_val', 'false_val']
-
+
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
-
+
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(self.true_val.infer_type(env),
self.false_val.infer_type(env))
self.is_temp = 1
if self.type == PyrexTypes.error_type:
self.type_error()
-
+
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatable types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
-
+
def check_const(self):
- return (self.test.check_const()
+ return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
-
+
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
-
+
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type is PyrexTypes.c_py_unicode_type
and self.operand2.type is unicode_type))
-
+
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
return True
return False
- def generate_operation_code(self, code, result_code,
+ def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
coerce_result = "__Pyx_PyBool_FromLong"
else:
coerce_result = ""
- if 'not' in op:
+ if 'not' in op:
negation = "!"
- else:
+ else:
negation = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
coerce_result,
negation,
method,
- operand2.py_result(),
- operand1.py_result(),
+ operand2.py_result(),
+ operand1.py_result(),
got_ref,
error_clause(result_code, self.pos)))
elif (operand1.type.is_pyobject
and op not in ('is', 'is_not')):
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s" % (
- result_code,
- operand1.py_result(),
- operand2.py_result(),
+ result_code,
+ operand1.py_result(),
+ operand2.py_result(),
richcmp_constants[op],
code.error_goto_if_null(result_code, self.pos)))
code.put_gotref(result_code)
elif operand1.type.is_complex:
- if op == "!=":
+ if op == "!=":
negation = "!"
- else:
+ else:
negation = ""
code.putln("%s = %s(%s%s(%s, %s));" % (
- result_code,
+ result_code,
coerce_result,
negation,
- operand1.type.unary_op('eq'),
- operand1.result(),
+ operand1.type.unary_op('eq'),
+ operand1.result(),
operand2.result()))
else:
type1 = operand1.type
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
code.putln("%s = %s(%s %s %s);" % (
- result_code,
- coerce_result,
- code1,
- self.c_operator(op),
+ result_code,
+ coerce_result,
+ code1,
+ self.c_operator(op),
code2))
def c_operator(self, op):
return "!="
else:
return op
-
+
contains_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE long __Pyx_NegateNonNeg(long b) { return unlikely(b < 0) ? b : !b; }
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
-
+
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
-
+
child_attrs = ['operand1', 'operand2', 'cascade']
-
+
cascade = None
def infer_type(self, env):
def calculate_constant_result(self):
self.calculate_cascaded_constant_result(self.operand1.constant_result)
-
+
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
cdr = cdr.cascade
if self.is_pycmp or self.cascade:
self.is_temp = 1
-
+
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
-
+
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
-
+
def check_const(self):
if self.cascade:
self.not_const()
negation = ""
return "(%s%s(%s, %s))" % (
negation,
- self.operand1.type.binary_op('=='),
- self.operand1.result(),
+ self.operand1.type.binary_op('=='),
+ self.operand1.result(),
self.operand2.result())
elif self.is_c_string_contains():
if self.operand2.type is bytes_type:
return "(%s%s(%s, %s))" % (
negation,
method,
- self.operand2.result(),
+ self.operand2.result(),
self.operand1.result())
else:
return "(%s %s %s)" % (
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
- self.generate_operation_code(code, self.result(),
+ self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(code,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
-
+
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
-
+
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
class CascadedCmpNode(Node, CmpNode):
- # A CascadedCmpNode is not a complete expression node. It
- # hangs off the side of another comparison node, shares
- # its left operand with that node, and shares its result
+ # A CascadedCmpNode is not a complete expression node. It
+ # hangs off the side of another comparison node, shares
+ # its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
def has_python_operands(self):
return self.operand2.type.is_pyobject
-
+
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
-
+
def generate_evaluation_code(self, code, result, operand1):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
else:
code.putln("if (%s) {" % result)
self.operand2.generate_evaluation_code(code)
- self.generate_operation_code(code, result,
+ self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
}
def binop_node(pos, operator, operand1, operand2, inplace=False):
- # Construct binop node of appropriate class for
+ # Construct binop node of appropriate class for
# given operator.
- return binop_node_classes[operator](pos,
- operator = operator,
- operand1 = operand1,
+ return binop_node_classes[operator](pos,
+ operator = operator,
+ operand1 = operand1,
operand2 = operand2,
inplace = inplace)
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
-
+
subexprs = ['arg']
constant_result = not_a_constant
-
+
def __init__(self, arg):
self.pos = arg.pos
self.arg = arg
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
-
+
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
-
+
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
-
+
def calculate_result_code(self):
return self.arg.result_as(self.type)
nogil_check = Node.gil_error
gil_message = "Python type test"
-
+
def analyse_types(self, env):
pass
if self.notnone:
return False
return self.arg.may_be_none()
-
+
def result_in_temp(self):
return self.arg.result_in_temp()
-
+
def is_ephemeral(self):
return self.arg.is_ephemeral()
def calculate_result_code(self):
return self.arg.result()
-
+
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if not self.type.is_builtin_type:
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
-
+
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def calculate_result_code(self):
return self.arg.result()
-
+
def generate_result_code(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.arg.result())
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
-
+
type = py_object_type
is_temp = 1
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
-
+
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
def generate_result_code(self, code):
function = self.arg.type.to_py_function
code.putln('%s = %s(%s); %s' % (
- self.result(),
- function,
- self.arg.result(),
+ self.result(),
+ function,
+ self.arg.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if self.type.is_string and self.arg.is_ephemeral():
error(arg.pos,
"Obtaining char * from temporary Python value")
-
+
def analyse_types(self, env):
# The arg is always already analysed
pass
if self.type.is_enum:
rhs = typecast(self.type, c_long_type, rhs)
code.putln('%s = %s; %s' % (
- self.result(),
+ self.result(),
rhs,
code.error_goto_if(self.type.error_condition(self.result()), self.pos)))
if self.type.is_pyobject:
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
-
+
type = PyrexTypes.c_bint_type
_special_builtins = {
self.gil_error()
gil_message = "Truth-testing Python object"
-
+
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
-
+
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
- self.result(),
- self.arg.py_result(),
+ self.result(),
+ self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
class CoerceToComplexNode(CoercionNode):
self.type.from_parts,
real_part,
imag_part)
-
+
def generate_result_code(self, code):
pass
def analyse_types(self, env):
# The arg is always already analysed
pass
-
+
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
- # disposal code for it. The original owner of the argument
+ # disposal code for it. The original owner of the argument
# node is responsible for doing those things.
-
+
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
-
+
def __init__(self, arg):
CoercionNode.__init__(self, arg)
if hasattr(arg, 'type'):
self.result_ctype = arg.result_ctype
if hasattr(arg, 'entry'):
self.entry = arg.entry
-
+
def result(self):
return self.arg.result()
-
+
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
-
+
def infer_type(self, env):
return self.arg.infer_type(env)
self.is_temp = 1
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
-
+
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
-
+
def generate_disposal_code(self, code):
pass
-
+
def free_temps(self, code):
pass
class ModuleRefNode(ExprNode):
# Simple returns the module object
-
+
type = py_object_type
is_temp = False
subexprs = []
-
+
def analyse_types(self, env):
pass
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
-
+
subexprs = ['body']
type = py_object_type
is_temp = True
-
+
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
-""",
+""",
requires = [raise_noneindex_error_utility_code])
#------------------------------------------------------------------------------------
tuple_unpacking_error_code = UtilityCode(
proto = """
static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/
-""",
+""",
impl = """
static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
if (t == Py_None) {
__Pyx_RaiseTooManyValuesError(index);
}
}
-""",
+""",
requires = [raise_none_iter_error_utility_code,
raise_need_more_values_to_unpack,
raise_too_many_values_to_unpack]
return result; /* may be NULL */
}
}
-""",
+""",
)
""",
impl="""
static int __Pyx_cdivision_warning(void) {
- return PyErr_WarnExplicit(PyExc_RuntimeWarning,
+ return PyErr_WarnExplicit(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ",
- %(FILENAME)s,
+ %(FILENAME)s,
%(LINENO)s,
__Pyx_MODULE_NAME,
NULL);
class EmptyScope(object):
def lookup(self, name):
return None
-
+
empty_scope = EmptyScope()
def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=()):
raise CompileError(node.pos, "Type not allowed here.")
else:
return (node.compile_time_value(empty_scope), node.pos)
-
+
if optlist:
optlist = [interpret(x, ix) for ix, x in enumerate(optlist)]
if optdict:
octdigit = Any("01234567")
hexdigit = Any("0123456789ABCDEFabcdef")
indentation = Bol + Rep(Any(" \t"))
-
+
decimal = Rep1(digit)
dot = Str(".")
exponent = Any("Ee") + Opt(Any("+-")) + decimal
decimal_fract = (decimal + dot + Opt(decimal)) | (dot + decimal)
-
+
name = letter + Rep(letter | digit)
intconst = decimal | (Str("0") + ((Any("Xx") + Rep1(hexdigit)) |
(Any("Oo") + Rep1(octdigit)) |
intliteral = intconst + intsuffix
fltconst = (decimal_fract + Opt(exponent)) | (decimal + exponent)
imagconst = (intconst | fltconst) + Any("jJ")
-
+
sq_string = (
- Str("'") +
- Rep(AnyBut("\\\n'") | (Str("\\") + AnyChar)) +
+ Str("'") +
+ Rep(AnyBut("\\\n'") | (Str("\\") + AnyChar)) +
Str("'")
)
-
+
dq_string = (
- Str('"') +
- Rep(AnyBut('\\\n"') | (Str("\\") + AnyChar)) +
+ Str('"') +
+ Rep(AnyBut('\\\n"') | (Str("\\") + AnyChar)) +
Str('"')
)
-
+
non_sq = AnyBut("'") | (Str('\\') + AnyChar)
tsq_string = (
Str("'''")
- + Rep(non_sq | (Str("'") + non_sq) | (Str("''") + non_sq))
+ + Rep(non_sq | (Str("'") + non_sq) | (Str("''") + non_sq))
+ Str("'''")
)
-
+
non_dq = AnyBut('"') | (Str('\\') + AnyChar)
tdq_string = (
Str('"""')
- + Rep(non_dq | (Str('"') + non_dq) | (Str('""') + non_dq))
+ + Rep(non_dq | (Str('"') + non_dq) | (Str('""') + non_dq))
+ Str('"""')
)
-
+
beginstring = Opt(Any(string_prefixes)) + Opt(Any(raw_prefixes)) + (Str("'") | Str('"') | Str("'''") | Str('"""'))
two_oct = octdigit + octdigit
three_oct = octdigit + octdigit + octdigit
escapeseq = Str("\\") + (two_oct | three_oct |
Str('u') + four_hex | Str('x') + two_hex |
Str('U') + four_hex + four_hex | AnyChar)
-
+
deco = Str("@")
bra = Any("([{")
ket = Any(")]}")
punct = Any(":,;+-*/|&<>=.%`~^?")
diphthong = Str("==", "<>", "!=", "<=", ">=", "<<", ">>", "**", "//",
- "+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=",
+ "+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=",
"<<=", ">>=", "**=", "//=", "->")
spaces = Rep1(Any(" \t\f"))
escaped_newline = Str("\\\n")
lineterm = Eol + Opt(Str("\n"))
- comment = Str("#") + Rep(AnyBut("\n"))
-
+ comment = Str("#") + Rep(AnyBut("\n"))
+
return Lexicon([
(name, IDENT),
(intliteral, 'INT'),
(imagconst, 'IMAG'),
(deco, 'DECORATOR'),
(punct | diphthong, TEXT),
-
+
(bra, Method('open_bracket_action')),
(ket, Method('close_bracket_action')),
(lineterm, Method('newline_action')),
-
+
#(stringlit, 'STRING'),
(beginstring, Method('begin_string_action')),
-
+
(comment, IGNORE),
(spaces, IGNORE),
(escaped_newline, IGNORE),
-
+
State('INDENT', [
(comment + lineterm, Method('commentline')),
(Opt(spaces) + Opt(comment) + lineterm, IGNORE),
(indentation, Method('indentation_action')),
(Eof, Method('eof_action'))
]),
-
+
State('SQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut("'\"\n\\")), 'CHARS'),
(Str("'"), Method('end_string_action')),
(Eof, 'EOF')
]),
-
+
State('DQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut('"\n\\')), 'CHARS'),
(Str('"'), Method('end_string_action')),
(Eof, 'EOF')
]),
-
+
State('TSQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut("'\"\n\\")), 'CHARS'),
(Str("'''"), Method('end_string_action')),
(Eof, 'EOF')
]),
-
+
State('TDQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut('"\'\n\\')), 'CHARS'),
(Str('"""'), Method('end_string_action')),
(Eof, 'EOF')
]),
-
+
(Eof, Method('eof_action'))
],
-
+
# FIXME: Plex 1.9 needs different args here from Plex 1.1.4
#debug_flags = scanner_debug_flags,
#debug_file = scanner_dump_file
# include_directories [string]
# future_directives [object]
# language_level int currently 2 or 3 for Python 2/3
-
+
def __init__(self, include_directories, compiler_directives, cpp=False, language_level=2):
#self.modules = {"__builtin__" : BuiltinScope()}
import Builtin, CythonScope
self.include_directories = include_directories + [standard_include_path]
self.set_language_level(language_level)
-
+
self.gdb_debug_outputwriter = None
def set_language_level(self, level):
else:
_check_c_declarations = check_c_declarations
_specific_post_parse = None
-
+
if py and not pxd:
_align_function_definitions = AlignFunctionDefinitions(self)
else:
_align_function_definitions = None
-
+
return [
NormalizeTree(self),
PostParse(self),
debug_transform = [DebugTransform(self, options, result)]
else:
debug_transform = []
-
+
return list(itertools.chain(
[create_parse(self)],
self.create_pipeline(pxd=False, py=py),
return [parse_pxd] + self.create_pipeline(pxd=True) + [
ExtractPxdCode(self),
]
-
+
def create_py_pipeline(self, options, result):
return self.create_pyx_pipeline(options, result, py=True)
pipeline = self.create_pxd_pipeline(scope, module_name)
result = self.run_pipeline(pipeline, source_desc)
return result
-
+
def nonfatal_error(self, exc):
return Errors.report_error(exc)
error = err
return (error, data)
- def find_module(self, module_name,
+ def find_module(self, module_name,
relative_to = None, pos = None, need_pxd = 1):
# Finds and returns the module scope corresponding to
# the given relative or absolute module name. If this
except CompileError:
pass
return scope
-
+
def find_pxd_file(self, qualified_name, pos):
# Search include path for the .pxd file corresponding to the
# given fully-qualified module name.
# Search include path for the .pyx file corresponding to the
# given fully-qualified module name, as for find_pxd_file().
return self.search_include_directories(qualified_name, ".pyx", pos)
-
+
def find_include_file(self, filename, pos):
# Search list of include directories for filename.
# Reports an error and returns None if not found.
if not path:
error(pos, "'%s' not found" % filename)
return path
-
+
def search_include_directories(self, qualified_name, suffix, pos,
include=False):
# Search the list of include directories for the given
if dep_path and Utils.file_newer_than(dep_path, c_time):
return 1
return 0
-
+
def find_cimported_module_names(self, source_path):
return [ name for kind, name in self.read_dependency_file(source_path)
if kind == "cimport" ]
def is_package_dir(self, dir_path):
# Return true if the given directory is a package directory.
- for filename in ("__init__.py",
- "__init__.pyx",
+ for filename in ("__init__.py",
+ "__init__.pyx",
"__init__.pxd"):
path = os.path.join(dir_path, filename)
if Utils.path_exists(path):
# Find a top-level module, creating a new one if needed.
scope = self.lookup_submodule(name)
if not scope:
- scope = ModuleScope(name,
+ scope = ModuleScope(name,
parent_module = None, context = self)
self.modules[name] = scope
return scope
# Set up result object
result = create_default_resultobj(source, options)
-
+
# Get pipeline
if source_desc.filename.endswith(".py"):
pipeline = context.create_py_pipeline(options, result)
err, enddata = context.run_pipeline(pipeline, source)
context.teardown_errors(err, options, result)
return result
-
+
#------------------------------------------------------------------------
#
class CompilationOptions(object):
"""
Options to the Cython compiler:
-
+
show_version boolean Display version number
use_listing_file boolean Generate a .lis file
errors_to_stderr boolean Echo errors to stderr when using .lis
compiler_directives dict Overrides for pragma options (see Options.py)
evaluate_tree_assertions boolean Test support: evaluate parse tree assertions
language_level integer The Python language level: 2 or 3
-
+
cplus boolean Compile as c++ code
"""
-
+
def __init__(self, defaults = None, **kw):
self.include_path = []
if defaults:
class CompilationResult(object):
"""
Results from the Cython compiler:
-
+
c_file string or None The generated C source file
h_file string or None The generated C header file
i_file string or None The generated .pxi file
num_errors integer Number of compilation errors
compilation_source CompilationSource
"""
-
+
def __init__(self):
self.c_file = None
self.h_file = None
Results from compiling multiple Pyrex source files. A mapping
from source file paths to CompilationResult instances. Also
has the following attributes:
-
+
num_errors integer Total number of compilation errors
"""
-
+
num_errors = 0
def add(self, source, result):
def compile_single(source, options, full_module_name = None):
"""
compile_single(source, options, full_module_name)
-
+
Compile the given Pyrex implementation file and return a CompilationResult.
Always compiles a single file; does not perform timestamp checking or
recursion.
def compile_multiple(sources, options):
"""
compile_multiple(sources, options)
-
+
Compiles the given sequence of Pyrex implementation files and returns
a CompilationResultSet. Performs timestamp checking and/or recursion
if these are specified in the options.
def compile(source, options = None, full_module_name = None, **kwds):
"""
compile(source [, options], [, <option> = <value>]...)
-
+
Compile one or more Pyrex implementation files, with optional timestamp
checking and recursing on dependecies. The source argument may be a string
or a sequence of strings If it is a string and no recursion or timestamp
child_attrs = ["body"]
directives = None
-
+
def analyse_declarations(self, env):
if Options.embed_pos_in_docstring:
env.doc = EncodedString(u'File: %s (starting at line %s)' % Nodes.relative_position(self.pos))
env.doc = self.doc
env.directives = self.directives
self.body.analyse_declarations(env)
-
+
def process_implementation(self, options, result):
env = self.scope
env.return_type = PyrexTypes.c_void_type
self.generate_c_code(env, options, result)
self.generate_h_code(env, options, result)
self.generate_api_code(env, result)
-
+
def has_imported_c_functions(self):
for module in self.referenced_modules:
for entry in module.cfunc_entries:
if entry.defined_in_pxd:
return 1
return 0
-
+
def generate_dep_file(self, env, result):
modules = self.referenced_modules
if len(modules) > 1 or env.included_files:
h_code.putln("PyMODINIT_FUNC init%s(void);" % env.module_name)
h_code.putln("")
h_code.putln("#endif")
-
+
h_code.copyto(open_new_file(result.h_file))
-
+
def generate_public_declaration(self, entry, h_code, i_code):
h_code.putln("%s %s;" % (
Naming.extern_c_macro,
entry.type.declaration_code(
entry.cname, dll_linkage = "DL_IMPORT")))
if i_code:
- i_code.putln("cdef extern %s" %
+ i_code.putln("cdef extern %s" %
entry.type.declaration_code(entry.cname, pyrex = 1))
-
+
def api_name(self, env):
return env.qualified_name.replace(".", "__")
-
+
def generate_api_code(self, env, result):
api_funcs = []
public_extension_types = []
h_code.putln("}")
h_code.putln("")
h_code.putln("#endif")
-
+
h_code.copyto(open_new_file(result.api_file))
-
+
def generate_cclass_header_code(self, type, h_code):
h_code.putln("%s DL_IMPORT(PyTypeObject) %s;" % (
Naming.extern_c_macro,
type.typeobj_cname))
-
+
def generate_cclass_include_code(self, type, i_code):
i_code.putln("cdef extern class %s.%s:" % (
type.module_name, type.name))
var_entries = type.scope.var_entries
if var_entries:
for entry in var_entries:
- i_code.putln("cdef %s" %
+ i_code.putln("cdef %s" %
entry.type.declaration_code(entry.cname, pyrex = 1))
else:
i_code.putln("pass")
i_code.dedent()
-
+
def generate_c_code(self, env, options, result):
modules = self.referenced_modules
globalstate = Code.GlobalState(rootwriter, emit_linenums)
globalstate.initialize_main_c_code()
h_code = globalstate['h_code']
-
+
self.generate_module_preamble(env, modules, h_code)
globalstate.module_pos = self.pos
if Options.embed:
self.generate_main_method(env, globalstate['main_method'])
self.generate_filename_table(globalstate['filename_table'])
-
+
self.generate_declarations_for_modules(env, modules, globalstate)
h_code.write('\n')
for utilcode in env.utility_code_list:
globalstate.use_utility_code(utilcode)
globalstate.finalize_main_c_code()
-
+
f = open_new_file(result.c_file)
rootwriter.copyto(f)
if options.gdb_debug:
if Options.annotate or options.annotate:
self.annotate(rootwriter)
rootwriter.save_annotation(result.main_source_file, result.c_file)
-
+
def _serialize_lineno_map(self, env, ccodewriter):
tb = env.context.gdb_debug_outputwriter
markers = ccodewriter.buffer.allmarkers()
-
+
d = {}
- for c_lineno, cython_lineno in enumerate(markers):
+ for c_lineno, cython_lineno in enumerate(markers):
if cython_lineno > 0:
d.setdefault(cython_lineno, []).append(c_lineno + 1)
-
+
tb.start('LineNumberMapping')
for cython_lineno, c_linenos in sorted(d.iteritems()):
attrs = {
tb.end('LineNumber')
tb.end('LineNumberMapping')
tb.serialize()
-
+
def find_referenced_modules(self, env, module_list, modules_seen):
if env not in modules_seen:
modules_seen[env] = 1
if type.is_extension_type and not entry.in_cinclude:
type = entry.type
vtabslot_dict[type.objstruct_cname] = entry
-
+
def vtabstruct_cname(entry_type):
return entry_type.vtabstruct_cname
vtab_list = self.sort_types_by_inheritance(
code.putln(" #error Python headers needed to compile C extensions, please install development version of Python.")
code.putln("#else")
code.globalstate["end"].putln("#endif /* Py_PYTHON_H */")
-
+
code.put("""
#include <stddef.h> /* For offsetof */
#ifndef offsetof
code.putln('#include %s' % byte_decoded_filenname)
else:
code.putln('#include "%s"' % byte_decoded_filenname)
-
+
def generate_filename_table(self, code):
code.putln("")
code.putln("static const char *%s[] = {" % Naming.filetable_cname)
self.generate_enum_definition(entry, code)
elif type.is_extension_type:
self.generate_objstruct_definition(type, code)
-
+
def generate_gcc33_hack(self, env, code):
# Workaround for spurious warning generation in gcc 3.3
code.putln("")
tail = name
code.putln("typedef struct %s __pyx_gcc33_%s;" % (
name, tail))
-
+
def generate_typedef(self, entry, code):
base_type = entry.type.typedef_base_type
if base_type.is_numeric:
header = "%s %s {" % (kind, name)
footer = "};"
return header, footer
-
+
def generate_struct_union_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
value_code += ","
code.putln(value_code)
code.putln(footer)
-
+
def generate_typeobject_predeclaration(self, entry, code):
code.putln("")
name = entry.type.typeobj_cname
# ??? Do we really need the rest of this? ???
#else:
# code.putln("staticforward PyTypeObject %s;" % name)
-
+
def generate_exttype_vtable_struct(self, entry, code):
code.mark_pos(entry.pos)
# Generate struct declaration for an extension type's vtable.
"%s;" % method_entry.type.declaration_code("(*%s)" % method_entry.name))
code.putln(
"};")
-
+
def generate_exttype_vtabptr_declaration(self, entry, code):
code.mark_pos(entry.pos)
# Generate declaration of pointer to an extension type's vtable.
code.putln("static struct %s *%s;" % (
type.vtabstruct_cname,
type.vtabptr_cname))
-
+
def generate_objstruct_definition(self, type, code):
code.mark_pos(type.pos)
# Generate object struct definition for an
code.putln("")
for entry in env.c_class_entries:
if definition or entry.defined_in_pxd:
- code.putln("static PyTypeObject *%s = 0;" %
+ code.putln("static PyTypeObject *%s = 0;" %
entry.type.typeptr_cname)
- code.put_var_declarations(env.var_entries, static = 1,
+ code.put_var_declarations(env.var_entries, static = 1,
dll_linkage = "DL_EXPORT", definition = definition)
-
+
def generate_cfunction_predeclarations(self, env, code, definition):
for entry in env.cfunc_entries:
if entry.inline_func_in_pxd or (not entry.in_cinclude and (definition
type = entry.type
if not definition and entry.defined_in_pxd:
type = CPtrType(type)
- header = type.declaration_code(entry.cname,
+ header = type.declaration_code(entry.cname,
dll_linkage = dll_linkage)
if entry.visibility == 'private':
storage_class = "static "
storage_class,
modifiers,
header))
-
+
def generate_typeobj_definitions(self, env, code):
full_module_name = env.qualified_name
for entry in env.c_class_entries:
self.generate_method_table(scope, code)
self.generate_getset_table(scope, code)
self.generate_typeobj_definition(full_module_name, entry, code)
-
+
def generate_exttype_vtable(self, scope, code):
# Generate the definition of an extension type's vtable.
type = scope.parent_type
code.putln("static struct %s %s;" % (
type.vtabstruct_cname,
type.vtable_cname))
-
+
def generate_self_cast(self, scope, code):
type = scope.parent_type
code.putln(
"%s = (%s)o;" % (
type.declaration_code("p"),
type.declaration_code("")))
-
+
def generate_new_function(self, scope, code):
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func = scope.mangle_internal("tp_new")
else:
cinit_args = "o, a, k"
code.putln(
- "if (%s(%s) < 0) {" %
+ "if (%s(%s) < 0) {" %
(entry.func_cname, cinit_args))
code.put_decref_clear("o", py_object_type, nanny=False);
code.putln(
"return o;")
code.putln(
"}")
-
+
def generate_dealloc_function(self, scope, code):
tp_slot = TypeSlots.ConstructorSlot("tp_dealloc", '__dealloc__')
slot_func = scope.mangle_internal("tp_dealloc")
"(*Py_TYPE(o)->tp_free)(o);")
code.putln(
"}")
-
+
def generate_usr_dealloc_call(self, scope, code):
entry = scope.lookup_here("__dealloc__")
if entry:
code.putln(
"++Py_REFCNT(o);")
code.putln(
- "%s(o);" %
+ "%s(o);" %
entry.func_cname)
code.putln(
"if (PyErr_Occurred()) PyErr_WriteUnraisable(o);")
"PyErr_Restore(etype, eval, etb);")
code.putln(
"}")
-
+
def generate_traverse_function(self, scope, code):
tp_slot = TypeSlots.GCDependentSlot("tp_traverse")
slot_func = scope.mangle_internal("tp_traverse")
if entry.type.is_extension_type:
var_code = "((PyObject*)%s)" % var_code
code.putln(
- "e = (*v)(%s, a); if (e) return e;"
+ "e = (*v)(%s, a); if (e) return e;"
% var_code)
code.putln(
"}")
"return 0;")
code.putln(
"}")
-
+
def generate_clear_function(self, scope, code):
tp_slot = TypeSlots.GCDependentSlot("tp_clear")
slot_func = scope.mangle_internal("tp_clear")
"return 0;")
code.putln(
"}")
-
+
def generate_getitem_int_function(self, scope, code):
# This function is put into the sq_item slot when
# a __getitem__ method is present. It converts its
"}")
code.putln(
"}")
-
+
def generate_guarded_basetype_call(
self, base_type, substructure, slot, args, code):
if base_type:
"return v;")
code.putln(
"}")
-
+
def generate_setattro_function(self, scope, code):
# Setting and deleting an attribute are both done through
# the setattro method, so we dispatch to user's __setattr__
"}")
code.putln(
"}")
-
+
def generate_descr_get_function(self, scope, code):
# The __get__ function of a descriptor object can be
# called with NULL for the second or third arguments
"return r;")
code.putln(
"}")
-
+
def generate_descr_set_function(self, scope, code):
# Setting and deleting are both done through the __set__
# method of a descriptor, so we dispatch to user's __set__
code.putln(
"return -1;")
code.putln(
- "}")
+ "}")
code.putln(
"}")
-
+
def generate_property_accessors(self, cclass_scope, code):
for entry in cclass_scope.property_entries:
property_scope = entry.scope
self.generate_property_get_function(entry, code)
if property_scope.defines_any(["__set__", "__del__"]):
self.generate_property_set_function(entry, code)
-
+
def generate_property_get_function(self, property_entry, code):
property_scope = property_entry.scope
property_entry.getter_cname = property_scope.parent_scope.mangle(
get_entry.func_cname)
code.putln(
"}")
-
+
def generate_property_set_function(self, property_entry, code):
property_scope = property_entry.scope
property_entry.setter_cname = property_scope.parent_scope.mangle(
slot.generate(scope, code)
code.putln(
"};")
-
+
def generate_method_table(self, env, code):
code.putln("")
code.putln(
- "static PyMethodDef %s[] = {" %
+ "static PyMethodDef %s[] = {" %
env.method_table_cname)
for entry in env.pyfunc_entries:
code.put_pymethoddef(entry, ",")
"{0, 0, 0, 0}")
code.putln(
"};")
-
+
def generate_getset_table(self, env, code):
if env.property_entries:
code.putln("")
code.putln("Py_INCREF(o);")
code.put_decref(entry.cname, entry.type, nanny=False)
code.putln("%s = %s;" % (
- entry.cname,
+ entry.cname,
PyrexTypes.typecast(entry.type, py_object_type, "o")))
elif entry.type.from_py_function:
rhs = "%s(o)" % entry.type.from_py_function
code.putln("/*--- Execution code ---*/")
code.mark_pos(None)
-
+
self.body.generate_execution_code(code)
if Options.generate_cleanup_code:
if not Options.generate_cleanup_code:
return
code.globalstate.use_utility_code(register_cleanup_utility_code)
- code.putln('static PyObject *%s(CYTHON_UNUSED PyObject *self, CYTHON_UNUSED PyObject *unused) {' %
+ code.putln('static PyObject *%s(CYTHON_UNUSED PyObject *self, CYTHON_UNUSED PyObject *unused) {' %
Naming.cleanup_cname)
if Options.generate_cleanup_code >= 2:
code.putln("/*--- Global cleanup code ---*/")
code.putln("#if PY_MAJOR_VERSION < 3")
code.putln(
'%s = Py_InitModule4(__Pyx_NAMESTR("%s"), %s, %s, 0, PYTHON_API_VERSION);' % (
- env.module_cname,
- env.module_name,
- env.method_table_cname,
+ env.module_cname,
+ env.module_name,
+ env.method_table_cname,
doc))
code.putln("#else")
code.putln(
if Options.pre_import is not None:
code.putln(
'%s = PyImport_AddModule(__Pyx_NAMESTR("%s"));' % (
- Naming.preimport_cname,
+ Naming.preimport_cname,
Options.pre_import))
code.putln(
"if (!%s) %s;" % (
code.putln('if (__Pyx_ExportFunction("%s", (void (*)(void))%s, "%s") < 0) %s' % (
entry.name,
entry.cname,
- signature,
+ signature,
code.error_goto(self.pos)))
-
+
def generate_type_import_code_for_module(self, module, env, code):
# Generate type import code for all exported extension types in
# an imported module.
for entry in module.c_class_entries:
if entry.defined_in_pxd:
self.generate_type_import_code(env, entry.type, entry.pos, code)
-
+
def generate_c_function_import_code_for_module(self, module, env, code):
# Generate import code for all exported C functions in a cimported module.
entries = []
entry.type.signature_string(),
code.error_goto(self.pos)))
code.putln("Py_DECREF(%s); %s = 0;" % (temp, temp))
-
+
def generate_type_init_code(self, env, code):
# Generate type import code for extern extension types
# and type ready code for non-extern ones.
if base_type and base_type.module_name != env.qualified_name \
and not base_type.is_builtin_type:
self.generate_type_import_code(env, base_type, self.pos, code)
-
+
def use_type_import_utility_code(self, env):
env.use_utility_code(type_import_utility_code)
env.use_utility_code(import_module_utility_code)
-
+
def generate_type_import_code(self, env, type, pos, code):
# If not already done, generate code to import the typeobject of an
# extension type defined in another module, and extract its C method
"if (PyType_Ready(&%s) < 0) %s" % (
typeobj_cname,
code.error_goto(entry.pos)))
- # Fix special method docstrings. This is a bit of a hack, but
+ # Fix special method docstrings. This is a bit of a hack, but
# unless we let PyType_Ready create the slot wrappers we have
- # a significant performance hit. (See trac #561.)
+ # a significant performance hit. (See trac #561.)
for func in entry.type.scope.pyfunc_entries:
if func.is_special and Options.docstrings and func.wrapperbase_cname:
code.putln("{");
weakref_entry.cname))
else:
error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
-
+
def generate_exttype_vtable_init_code(self, entry, code):
# Generate code to initialise the C method table of an
# extension type.
meth_entry.cname,
cast,
meth_entry.func_cname))
-
+
def generate_typeptr_assignment_code(self, entry, code):
# Generate code to initialise the typeptr of an extension
# type defined in this module to point to its type object.
code.putln(
"%s = &%s;" % (
type.typeptr_cname, type.typeobj_cname))
-
+
#------------------------------------------------------------------------------------
#
# Runtime support code
if (!result)
goto bad;
if (!PyType_Check(result)) {
- PyErr_Format(PyExc_TypeError,
+ PyErr_Format(PyExc_TypeError,
"%s.%s is not a type object",
module_name, class_name);
goto bad;
}
if (!strict && ((PyTypeObject *)result)->tp_basicsize > size) {
- PyOS_snprintf(warning, sizeof(warning),
+ PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility",
module_name, class_name);
#if PY_VERSION_HEX < 0x02050000
#endif
}
else if (((PyTypeObject *)result)->tp_basicsize != size) {
- PyErr_Format(PyExc_ValueError,
+ PyErr_Format(PyExc_ValueError,
"%s.%s has the wrong size, try recompiling",
module_name, class_name);
goto bad;
""" % {'module_cleanup': Naming.cleanup_cname},
impl = """
static int __Pyx_RegisterCleanup(void) {
- /* Don't use Py_AtExit because that has a 32-call limit
- * and is called after python finalization.
+ /* Don't use Py_AtExit because that has a 32-call limit
+ * and is called after python finalization.
*/
PyObject *cleanup_func = 0;
PyObject *args = 0;
PyObject *res = 0;
int ret = -1;
-
+
cleanup_func = PyCFunction_New(&cleanup_def, 0);
args = PyTuple_New(1);
if (!cleanup_func || !args)
#endif
PyObject *name;
PyObject *item;
-
+
locals = PyDict_New(); if (!locals) goto bad;
if (__Pyx_import_all_from(locals, m) < 0) goto bad;
list = PyDict_Items(locals); if (!list) goto bad;
-
+
for(i=0; i<PyList_GET_SIZE(list); i++) {
name = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 0);
item = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 1);
#endif
}
ret = 0;
-
+
bad:
Py_XDECREF(locals);
Py_XDECREF(list);
}
""" % {'IMPORT_STAR' : Naming.import_star,
'IMPORT_STAR_SET' : Naming.import_star_set }
-
+
refnanny_utility_code = UtilityCode(proto="""
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
don't want to have to regnerate and compile all the source code
whenever the Python install directory moves (which could happen,
e.g,. when distributing binaries.)
-
+
INPUT:
a position tuple -- (absolute filename, line number column position)
"""
global absolute_path_length
if absolute_path_length==0:
- absolute_path_length = len(os.path.abspath(os.getcwd()))
+ absolute_path_length = len(os.path.abspath(os.getcwd()))
return (pos[0].get_filenametable_entry()[absolute_path_length+1:], pos[1])
def embed_position(pos, docstring):
node, code = args[:2]
marker = ' /* %s -> %s.%s %s */' % (
' ' * code.call_level,
- node.__class__.__name__,
- func.__name__,
+ node.__class__.__name__,
+ func.__name__,
node.pos[1:])
pristine = code.buffer.stream.tell()
code.putln(marker)
class VerboseCodeWriter(type):
# Set this as a metaclass to trace function calls in code.
- # This slows down code generation and makes much larger files.
+ # This slows down code generation and makes much larger files.
def __new__(cls, name, bases, attrs):
attrs = dict(attrs)
for mname, m in attrs.items():
if DebugFlags.debug_trace_code_generation:
__metaclass__ = VerboseCodeWriter
-
+
is_name = 0
is_literal = 0
temps = None
# containing nodes considered "children" in the tree. Each such attribute
# can either contain a single node or a list of nodes. See Visitor.py.
child_attrs = None
-
+
def __init__(self, pos, **kw):
self.pos = pos
self.__dict__.update(kw)
-
+
gil_message = "Operation"
nogil_check = None
def gil_error(self, env=None):
error(self.pos, "%s not allowed without gil" % self.gil_message)
-
+
cpp_message = "Operation"
-
+
def cpp_check(self, env):
if not env.is_cpp():
self.cpp_error()
if isinstance(value, list):
setattr(result, attrname, [x for x in value])
return result
-
-
+
+
#
# There are 4 phases of parse tree processing, applied in order to
# all the statements in a given scope-block:
# (2) analyse_expressions
# Determine the result types of expressions and fill in the
# 'type' attribute of each ExprNode. Insert coercion nodes into the
- # tree where needed to convert to and from Python objects.
+ # tree where needed to convert to and from Python objects.
# Allocate temporary locals for intermediate results. Fill
# in the 'result_code' attribute of each ExprNode with a C code
# fragment.
# Recursively applies the 3 processing phases to the bodies of
# functions.
#
-
+
def analyse_control_flow(self, env):
pass
-
+
def analyse_declarations(self, env):
pass
-
+
def analyse_expressions(self, env):
raise InternalError("analyse_expressions not implemented for %s" % \
self.__class__.__name__)
-
+
def generate_code(self, code):
raise InternalError("generate_code not implemented for %s" % \
self.__class__.__name__)
-
+
def annotate(self, code):
# mro does the wrong thing
if isinstance(self, BlockNode):
self.body.annotate(code)
-
+
def end_pos(self):
try:
return self._end_pos
if id(self) in encountered:
return "<%s (0x%x) -- already output>" % (self.__class__.__name__, id(self))
encountered.add(id(self))
-
+
def dump_child(x, level):
if isinstance(x, Node):
return x.dump(level, filter_out, cutoff-1, encountered)
return "[%s]" % ", ".join([dump_child(item, level) for item in x])
else:
return repr(x)
-
-
+
+
attrs = [(key, value) for key, value in self.__dict__.items() if key not in filter_out]
if len(attrs) == 0:
return "<%s (0x%x)>" % (self.__class__.__name__, id(self))
env.directives = self.directives
self.body.analyse_declarations(env)
env.directives = old
-
+
def analyse_expressions(self, env):
old = env.directives
env.directives = self.directives
self.body.generate_function_definitions(env, code)
env.directives = env_old
code.globalstate.directives = code_old
-
+
def generate_execution_code(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_execution_code(code)
code.globalstate.directives = old
-
+
def annotate(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.annotate(code)
code.globalstate.directives = old
-
+
class BlockNode(object):
# Mixin class for nodes representing a declaration block.
class StatListNode(Node):
# stats a list of StatNode
-
+
child_attrs = ["stats"]
def create_analysed(pos, env, *args, **kw):
node = StatListNode(pos, *args, **kw)
return node # No node-specific analysis necesarry
create_analysed = staticmethod(create_analysed)
-
+
def analyse_control_flow(self, env):
for stat in self.stats:
stat.analyse_control_flow(env)
#print "StatListNode.analyse_declarations" ###
for stat in self.stats:
stat.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
#print "StatListNode.analyse_expressions" ###
for stat in self.stats:
stat.analyse_expressions(env)
-
+
def generate_function_definitions(self, env, code):
#print "StatListNode.generate_function_definitions" ###
for stat in self.stats:
stat.generate_function_definitions(env, code)
-
+
def generate_execution_code(self, code):
#print "StatListNode.generate_execution_code" ###
for stat in self.stats:
code.mark_pos(stat.pos)
stat.generate_execution_code(code)
-
+
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
-
+
class StatNode(Node):
#
# (2) generate_execution_code
# Emit C code for executable statements.
#
-
+
def generate_function_definitions(self, env, code):
pass
-
+
def generate_execution_code(self, code):
raise InternalError("generate_execution_code not implemented for %s" % \
self.__class__.__name__)
class CDefExternNode(StatNode):
# include_file string or None
# body StatNode
-
+
child_attrs = ["body"]
-
+
def analyse_declarations(self, env):
if self.include_file:
env.add_include_file(self.include_file)
env.in_cinclude = 1
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
-
+
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
-
+
class CDeclaratorNode(Node):
# Part of a C declaration.
#
# analyse
# Returns (name, type) pair where name is the
- # CNameDeclaratorNode of the name being declared
+ # CNameDeclaratorNode of the name being declared
# and type is the type it is being declared as.
#
# calling_convention string Calling convention of CFuncDeclaratorNode
- # for which this is a base
+ # for which this is a base
child_attrs = []
# name string The Pyrex name being declared
# cname string or None C name, if specified
# default ExprNode or None the value assigned on declaration
-
+
child_attrs = ['default']
-
+
default = None
-
+
def analyse(self, base_type, env, nonempty = 0):
if nonempty and self.name == '':
- # May have mistaken the name for the type.
+ # May have mistaken the name for the type.
if base_type.is_ptr or base_type.is_array or base_type.is_buffer:
error(self.pos, "Missing argument name")
elif base_type.is_void:
base_type = py_object_type
self.type = base_type
return self, base_type
-
+
class CPtrDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
-
+
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty = 0):
# dimension ExprNode
child_attrs = ["base", "dimension"]
-
+
def analyse(self, base_type, env, nonempty = 0):
if base_type.is_cpp_class:
from ExprNodes import TupleNode
# exception_check boolean True if PyErr_Occurred check needed
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
-
+
child_attrs = ["base", "args", "exception_value"]
overridable = 0
is_self_arg = (i == 0 and env.is_c_class_scope))
name = name_declarator.name
if name_declarator.cname:
- error(self.pos,
+ error(self.pos,
"Function argument cannot have C name specification")
# Turn *[] argument into **
if type.is_array:
self.optional_arg_count += 1
elif self.optional_arg_count:
error(self.pos, "Non-default argument follows default argument")
-
+
if self.optional_arg_count:
scope = StructOrUnionScope()
arg_count_member = '%sn' % Naming.pyrex_prefix
cname = struct_cname)
self.op_args_struct.defined_in_pxd = 1
self.op_args_struct.used = 1
-
+
exc_val = None
exc_check = 0
if self.exception_check == '+':
error(self.pos,
"Function cannot return a function")
func_type = PyrexTypes.CFuncType(
- return_type, func_type_args, self.has_varargs,
+ return_type, func_type_args, self.has_varargs,
optional_arg_count = self.optional_arg_count,
exception_value = exc_val, exception_check = exc_check,
calling_convention = self.base.calling_convention,
base_type = self.base_type.analyse(env, could_be_name = could_be_name)
if hasattr(self.base_type, 'arg_name') and self.base_type.arg_name:
self.declarator.name = self.base_type.arg_name
- # The parser is unable to resolve the ambiguity of [] as part of the
- # type (e.g. in buffers) or empty declarator (as with arrays).
+ # The parser is unable to resolve the ambiguity of [] as part of the
+ # type (e.g. in buffers) or empty declarator (as with arrays).
# This is only arises for empty multi-dimensional arrays.
- if (base_type.is_array
- and isinstance(self.base_type, TemplatedTypeNode)
+ if (base_type.is_array
+ and isinstance(self.base_type, TemplatedTypeNode)
and isinstance(self.declarator, CArrayDeclaratorNode)):
declarator = self.declarator
while isinstance(declarator.base, CArrayDeclaratorNode):
#
# analyse
# Returns the type.
-
+
pass
-
+
def analyse_as_type(self, env):
return self.analyse(env)
-
+
class CAnalysedBaseTypeNode(Node):
# type type
-
+
child_attrs = []
-
+
def analyse(self, env, could_be_name = False):
return self.type
child_attrs = []
arg_name = None # in case the argument name was interpreted as a type
-
+
def analyse(self, env, could_be_name = False):
# Return type descriptor.
#print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ###
return PyrexTypes.error_type
class CNestedBaseTypeNode(CBaseTypeNode):
- # For C++ classes that live inside other C++ classes.
+ # For C++ classes that live inside other C++ classes.
# name string
# base_type CBaseTypeNode
dtype_node = None
name = None
-
+
def analyse(self, env, could_be_name = False, base_type = None):
if base_type is None:
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
-
+
if base_type.is_cpp_class:
# Templated class
if self.keyword_args and self.keyword_args.key_value_pairs:
return error_type
template_types.append(type)
self.type = base_type.specialize_here(self.pos, template_types)
-
+
elif base_type.is_pyobject:
# Buffer
import Buffer
for name, value in options.items() ])
self.type = PyrexTypes.BufferType(base_type, **options)
-
+
else:
# Array
empty_declarator = CNameDeclaratorNode(self.pos, name="", cname=None)
error(self.pos, "invalid array declaration")
self.type = PyrexTypes.error_type
else:
- # It would be nice to merge this class with CArrayDeclaratorNode,
+ # It would be nice to merge this class with CArrayDeclaratorNode,
# but arrays are part of the declaration, not the type...
if not self.positional_args:
dimension = None
else:
dimension = self.positional_args[0]
- self.array_declarator = CArrayDeclaratorNode(self.pos,
- base = empty_declarator,
+ self.array_declarator = CArrayDeclaratorNode(self.pos,
+ base = empty_declarator,
dimension = dimension)
self.type = self.array_declarator.analyse(base_type, env)[1]
-
+
return self.type
class CComplexBaseTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
-
+
child_attrs = ["base_type", "declarator"]
def analyse(self, env, could_be_name = False):
# in_pxd boolean
# api boolean
- # decorators [cython.locals(...)] or None
+ # decorators [cython.locals(...)] or None
# directive_locals { string : NameNode } locals defined by cython.locals(...)
child_attrs = ["base_type", "declarators"]
-
+
decorators = None
directive_locals = {}
-
+
def analyse_declarations(self, env, dest_scope = None):
if not dest_scope:
dest_scope = env
else:
need_property = False
visibility = self.visibility
-
+
for declarator in self.declarators:
name_declarator, type = declarator.analyse(base_type, env)
if not type.is_complete():
if self.directive_locals:
error(self.pos, "Decorators can only be followed by functions")
if self.in_pxd and self.visibility != 'extern':
- error(self.pos,
+ error(self.pos,
"Only 'extern' C variable declaration allowed in .pxd file")
entry = dest_scope.declare_var(name, type, declarator.pos,
cname = cname, visibility = visibility, is_cdef = 1)
entry.needs_property = need_property
-
+
class CStructOrUnionDefNode(StatNode):
# name string
# attributes [CVarDefNode] or None
# entry Entry
# packed boolean
-
+
child_attrs = ["attributes"]
def analyse_declarations(self, env):
if type == self.entry.type:
need_typedef_indirection = True
if need_typedef_indirection:
- # C can't handle typedef structs that refer to themselves.
+ # C can't handle typedef structs that refer to themselves.
struct_entry = self.entry
self.entry = env.declare_typedef(
self.name, struct_entry.type, self.pos,
# FIXME: this might be considered a hack ;-)
struct_entry.cname = struct_entry.type.cname = \
'_' + self.entry.type.typedef_cname
-
+
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
pass
# visibility "public" or "private"
# in_pxd boolean
# entry Entry
-
+
child_attrs = ["items"]
-
+
def analyse_declarations(self, env):
self.entry = env.declare_enum(self.name, self.pos,
cname = self.cname, typedef_flag = self.typedef_flag,
code.error_goto_if_null(temp, item.pos)))
code.put_gotref(temp)
code.putln('if (__Pyx_SetAttrString(%s, "%s", %s) < 0) %s' % (
- Naming.module_cname,
- item.name,
+ Naming.module_cname,
+ item.name,
temp,
code.error_goto(item.pos)))
code.put_decref_clear(temp, PyrexTypes.py_object_type)
# name string
# cname string or None
# value ExprNode or None
-
+
child_attrs = ["value"]
def analyse_declarations(self, env, enum_entry):
if not self.value.type.is_int:
self.value = self.value.coerce_to(PyrexTypes.c_int_type, env)
self.value.analyse_const_expression(env)
- entry = env.declare_const(self.name, enum_entry.type,
+ entry = env.declare_const(self.name, enum_entry.type,
self.value, self.pos, cname = self.cname,
visibility = enum_entry.visibility)
enum_entry.enum_values.append(entry)
# in_pxd boolean
child_attrs = ["base_type", "declarator"]
-
+
def analyse_declarations(self, env):
base = self.base_type.analyse(env)
name_declarator, type = self.declarator.analyse(base, env)
cname = cname, visibility = self.visibility)
if self.in_pxd and not env.in_cinclude:
entry.defined_in_pxd = 1
-
+
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
# needs_closure boolean Whether or not this function has inner functions/classes/yield
# needs_outer_scope boolean Whether or not this function requires outer scope
# directive_locals { string : NameNode } locals defined by cython.locals(...)
-
+
py_func = None
assmt = None
needs_closure = False
needs_outer_scope = False
modifiers = []
-
+
def analyse_default_values(self, env):
genv = env.global_scope()
default_seen = 0
def need_gil_acquisition(self, lenv):
return 0
-
+
def create_local_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
self.local_scope = lenv
lenv.directives = env.directives
return lenv
-
+
def generate_function_definitions(self, env, code):
import Buffer
if (self.entry.name == '__long__' and
not self.entry.scope.lookup_here('__int__')):
preprocessor_guard = None
-
+
profile = code.globalstate.directives['profile']
if profile:
if lenv.nogil:
# Generate C code for header and body of function
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
-
+
# ----- Top-level constants used by this function
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
with_pymethdef = self.needs_assignment_synthesis(env, code)
if self.py_func:
- self.py_func.generate_function_header(code,
+ self.py_func.generate_function_header(code,
with_pymethdef = with_pymethdef,
proto_only=True)
self.generate_function_header(code,
if self.return_type.is_pyobject:
init = " = NULL"
code.putln(
- "%s%s;" %
+ "%s%s;" %
(self.return_type.declaration_code(Naming.retval_cname),
init))
tempvardecl_code = code.insertion_point()
code.putln("%s = (%s)%s->tp_new(%s, %s, NULL);" % (
Naming.cur_scope_cname,
lenv.scope_class.type.declaration_code(''),
- lenv.scope_class.type.typeptr_cname,
+ lenv.scope_class.type.typeptr_cname,
lenv.scope_class.type.typeptr_cname,
Naming.empty_tuple))
code.putln("if (unlikely(!%s)) {" % Naming.cur_scope_cname)
code.put_trace_call(self.entry.name, self.pos)
# ----- Fetch arguments
self.generate_argument_parsing_code(env, code)
- # If an argument is assigned to in the body, we must
+ # If an argument is assigned to in the body, we must
# incref it to properly keep track of refcounts.
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if entry.assignments and not entry.in_closure:
code.put_var_incref(entry)
- # ----- Initialise local variables
+ # ----- Initialise local variables
for entry in lenv.var_entries:
if entry.type.is_pyobject and entry.init_to_none and entry.used:
code.put_init_var_to_py_none(entry)
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;")
code.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);")
- for entry in lenv.buffer_entries:
+ for entry in lenv.buffer_entries:
Buffer.put_release_buffer_code(code, entry)
#code.putln("%s = 0;" % entry.cname)
code.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}")
err_val = self.error_value()
exc_check = self.caller_will_check_exceptions()
if err_val is not None or exc_check:
- # TODO: Fix exception tracing (though currently unused by cProfile).
+ # TODO: Fix exception tracing (though currently unused by cProfile).
# code.globalstate.use_utility_code(get_exception_tuple_utility_code)
# code.put_trace_exception()
code.putln('__Pyx_AddTraceback("%s");' % self.entry.qualified_name)
warning(self.entry.pos, "Unraisable exception in function '%s'." \
% self.entry.qualified_name, 0)
code.putln(
- '__Pyx_WriteUnraisable("%s");' %
+ '__Pyx_WriteUnraisable("%s");' %
self.entry.qualified_name)
env.use_utility_code(unraisable_exception_utility_code)
env.use_utility_code(restore_exception_utility_code)
for entry in lenv.var_entries:
if lenv.control_flow.get_state((entry.name, 'initialized')) is not True:
entry.xdecref_cleanup = 1
-
+
for entry in lenv.var_entries:
if entry.type.is_pyobject:
if entry.used and not entry.in_closure:
code.put_var_decref(entry)
if self.needs_closure:
code.put_decref(Naming.cur_scope_cname, lenv.scope_class.type)
-
+
# ----- Return
# This code is duplicated in ModuleNode.generate_module_init_func
if not lenv.nogil:
if self.entry.is_special and self.entry.name == "__hash__":
# Returning -1 for __hash__ is supposed to signal an error
- # We do as Python instances and coerce -1 into -2.
+ # We do as Python instances and coerce -1 into -2.
code.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % (
Naming.retval_cname, Naming.retval_cname))
code.put_trace_return("Py_None")
if not lenv.nogil:
code.put_finish_refcount_context()
-
+
if acquire_gil:
code.putln("#ifdef WITH_THREAD")
code.putln("PyGILState_Release(_save);")
if not self.return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
-
+
code.putln("}")
if preprocessor_guard:
error(arg.pos,
"Argument type '%s' is incomplete" % arg.type)
return env.declare_arg(arg.name, arg.type, arg.pos)
-
+
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
if arg.type.typeobj_is_available():
arg_code = "((PyObject *)%s)" % arg.entry.cname
code.putln(
'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, "%s", %s))) %s' % (
- arg_code,
+ arg_code,
typeptr_cname,
arg.accept_none,
arg.name,
arg.name,
code.error_goto(arg.pos)))
code.putln('}')
-
+
def generate_wrapper_functions(self, code):
pass
# Python 3.0 betas have a bug in memoryview which makes it call
# getbuffer with a NULL parameter. For now we work around this;
# the following line should be removed when this bug is fixed.
- code.putln("if (%s == NULL) return 0;" % info)
+ code.putln("if (%s == NULL) return 0;" % info)
code.putln("%s->obj = Py_None; __Pyx_INCREF(Py_None);" % info)
code.put_giveref("%s->obj" % info) # Do not refnanny object within structs
# py_func wrapper for calling from Python
# overridable whether or not this is a cpdef function
# inline_in_pxd whether this is an inline function in a pxd file
-
+
child_attrs = ["base_type", "declarator", "body", "py_func"]
inline_in_pxd = False
def unqualified_name(self):
return self.entry.name
-
+
def analyse_declarations(self, env):
self.directive_locals.update(env.directives['locals'])
base_type = self.base_type.analyse(env)
# The 2 here is because we need both function and argument names.
name_declarator, type = self.declarator.analyse(base_type, env, nonempty = 2 * (self.body is not None))
if not type.is_cfunction:
- error(self.pos,
+ error(self.pos,
"Suite attached to non-function declaration")
# Remember the actual type according to the function header
# written here, because the type in the symbol table entry
name = name_declarator.name
cname = name_declarator.cname
self.entry = env.declare_cfunction(
- name, type, self.pos,
+ name, type, self.pos,
cname = cname, visibility = self.visibility,
defining = self.body is not None,
api = self.api, modifiers = self.modifiers)
self.entry.inline_func_in_pxd = self.inline_in_pxd
self.return_type = type.return_type
-
+
if self.overridable and not env.is_module_scope:
if len(self.args) < 1 or not self.args[0].type.is_pyobject:
# An error will be produced in the cdef function
self.overridable = False
-
+
if self.overridable:
import ExprNodes
py_func_body = self.call_self_node(is_module_scope = env.is_module_scope)
- self.py_func = DefNode(pos = self.pos,
+ self.py_func = DefNode(pos = self.pos,
name = self.entry.name,
args = self.args,
star_arg = None,
self.override = OverrideCheckNode(self.pos, py_func = self.py_func)
self.body = StatListNode(self.pos, stats=[self.override, self.body])
self.create_local_scope(env)
-
+
def call_self_node(self, omit_optional_args=0, is_module_scope=0):
import ExprNodes
args = self.type.args
skip_dispatch = not is_module_scope or Options.lookup_module_cpdef
c_call = ExprNodes.SimpleCallNode(self.pos, function=cfunc, args=[ExprNodes.NameNode(self.pos, name=n) for n in arg_names[1-is_module_scope:]], wrapper_call=skip_dispatch)
return ReturnStatNode(pos=self.pos, return_type=PyrexTypes.py_object_type, value=c_call)
-
+
def declare_arguments(self, env):
for arg in self.type.args:
if not arg.name:
self.modifiers[self.modifiers.index('inline')] = 'cython_inline'
code.putln("%s%s %s {" % (
storage_class,
- ' '.join(self.modifiers).upper(), # macro forms
+ ' '.join(self.modifiers).upper(), # macro forms
header))
def generate_argument_declarations(self, env, code):
def generate_keyword_list(self, code):
pass
-
+
def generate_argument_parsing_code(self, env, code):
i = 0
if self.type.optional_arg_count:
for _ in range(self.type.optional_arg_count):
code.putln('}')
code.putln('}')
-
+
def generate_argument_conversion_code(self, code):
pass
-
+
def generate_argument_type_tests(self, code):
# Generate type tests for args whose type in a parent
# class is a supertype of the declared type.
else:
#return None
return self.entry.type.exception_value
-
+
def caller_will_check_exceptions(self):
return self.entry.type.exception_check
-
+
def generate_wrapper_functions(self, code):
# If the C signature of a function has changed, we need to generate
- # wrappers to put in the slots here.
+ # wrappers to put in the slots here.
k = 0
entry = self.entry
func_type = entry.type
entry = entry.prev_entry
entry.func_cname = "%s%swrap_%s" % (self.entry.func_cname, Naming.pyrex_prefix, k)
code.putln()
- self.generate_function_header(code,
+ self.generate_function_header(code,
0,
- with_dispatch = entry.type.is_overridable,
- with_opt_args = entry.type.optional_arg_count,
+ with_dispatch = entry.type.is_overridable,
+ with_opt_args = entry.type.optional_arg_count,
cname = entry.func_cname)
if not self.return_type.is_void:
code.put('return ')
arglist.append('NULL')
code.putln('%s(%s);' % (self.entry.func_cname, ', '.join(arglist)))
code.putln('}')
-
+
class PyArgDeclNode(Node):
# Argument which must be a Python object (used
# when the def statement is inside a Python class definition.
#
# assmt AssignmentNode Function construction/assignment
-
+
child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators"]
lambda_name = None
self.num_kwonly_args = k
self.num_required_kw_args = rk
self.num_required_args = r
-
+
def as_cfunction(self, cfunc=None, scope=None):
if self.star_arg:
error(self.star_arg.pos, "cdef function cannot have star argument")
exception_value = None
else:
exception_value = ExprNodes.ConstNode(self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type)
- declarator = CFuncDeclaratorNode(self.pos,
+ declarator = CFuncDeclaratorNode(self.pos,
base = CNameDeclaratorNode(self.pos, name=self.name, cname=None),
args = self.args,
has_varargs = False,
exception_value = exception_value,
with_gil = cfunc_type.with_gil,
nogil = cfunc_type.nogil)
- return CFuncDefNode(self.pos,
+ return CFuncDefNode(self.pos,
modifiers = [],
base_type = CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type),
declarator = declarator,
visibility = 'private',
api = False,
directive_locals = getattr(cfunc, 'directive_locals', {}))
-
+
def analyse_declarations(self, env):
self.is_classmethod = self.is_staticmethod = False
if self.decorators:
other_type = type_node.analyse_as_type(env)
if other_type is None:
error(type_node.pos, "Not a type")
- elif (type is not PyrexTypes.py_object_type
+ elif (type is not PyrexTypes.py_object_type
and not type.same_as(other_type)):
error(arg.base_type.pos, "Signature does not agree with previous declaration")
error(type_node.pos, "Previous declaration here")
def signature_has_generic_args(self):
return self.entry.signature.has_generic_args
-
+
def declare_pyfunction(self, env):
#print "DefNode.declare_pyfunction:", self.name, "in", env ###
name = self.name
entry.xdecref_cleanup = 1
arg.entry = entry
env.control_flow.set_state((), (arg.name, 'initialized'), True)
-
+
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
self.analyse_default_values(env)
rhs = rhs)
self.assmt.analyse_declarations(env)
self.assmt.analyse_expressions(env)
-
+
def generate_function_header(self, code, with_pymethdef, proto_only=0):
arg_code_list = []
sig = self.entry.signature
"struct wrapperbase %s;" % self.entry.wrapperbase_cname)
if with_pymethdef:
code.put(
- "static PyMethodDef %s = " %
+ "static PyMethodDef %s = " %
self.entry.pymethdef_cname)
code.put_pymethoddef(self.entry, ";", allow_skip=False)
code.putln("%s {" % header)
code.error_goto_if(arg.type.error_condition(arg.entry.cname), arg.pos)))
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % arg.type)
-
+
def generate_arg_xdecref(self, arg, code):
if arg:
code.put_var_xdecref(arg.entry)
-
+
def generate_arg_decref(self, arg, code):
if arg:
code.put_var_decref(arg.entry)
error(arg.pos,
"Cannot convert 1 argument from '%s' to '%s'" %
(old_type, new_type))
-
+
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
func = new_type.from_py_function
if new_type.is_enum:
rhs = PyrexTypes.typecast(new_type, PyrexTypes.c_long_type, rhs)
code.putln("%s = %s; %s" % (
- lhs,
+ lhs,
rhs,
code.error_goto_if(new_type.error_condition(arg.entry.cname), arg.pos)))
else:
- error(arg.pos,
- "Cannot convert Python object argument to type '%s'"
+ error(arg.pos,
+ "Cannot convert Python object argument to type '%s'"
% new_type)
-
+
def generate_arg_conversion_to_pyobject(self, arg, code):
old_type = arg.hdr_type
func = old_type.to_py_function
def error_value(self):
return self.entry.signature.error_value
-
+
def caller_will_check_exceptions(self):
return 1
-
+
class OverrideCheckNode(StatNode):
# A Node for dispatching to the def method if it
- # is overriden.
+ # is overriden.
#
# py_func
#
# args
# func_temp
# body
-
+
child_attrs = ['body']
-
+
body = None
def analyse_expressions(self, env):
self.func_node = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
call_tuple = ExprNodes.TupleNode(self.pos, args=[ExprNodes.NameNode(self.pos, name=arg.name) for arg in self.args[first_arg:]])
call_node = ExprNodes.SimpleCallNode(self.pos,
- function=self.func_node,
+ function=self.func_node,
args=[ExprNodes.NameNode(self.pos, name=arg.name) for arg in self.args[first_arg:]])
self.body = ReturnStatNode(self.pos, value=call_node)
self.body.analyse_expressions(env)
-
+
def generate_execution_code(self, code):
interned_attr_cname = code.intern_identifier(self.py_func.entry.name)
# Check to see if we are an extension type
child_attrs = ["body", "dict", "metaclass", "mkw", "bases", "classobj", "target"]
decorators = None
py3_style_class = False # Python3 style class (bases+kwargs)
-
+
def __init__(self, pos, name, bases, doc, body, decorators = None,
keyword_args = None, starstar_arg = None):
StatNode.__init__(self, pos)
self.classobj = ExprNodes.ClassNode(pos, name = name,
bases = bases, dict = self.dict, doc = doc_node)
self.target = ExprNodes.NameNode(pos, name = name)
-
+
def as_cclass(self):
"""
Return this node as if it were declared as an extension class
else:
error(self.classobj.bases.args.pos, "C class may only have one base class")
return None
-
- return CClassDefNode(self.pos,
+
+ return CClassDefNode(self.pos,
visibility = 'private',
module_name = None,
class_name = self.name,
body = self.body,
in_pxd = False,
doc = self.doc)
-
+
def create_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
cenv = self.scope = PyClassScope(name = self.name, outer_scope = genv)
return cenv
-
+
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
cenv = self.create_scope(env)
cenv.directives = env.directives
cenv.class_obj_cname = self.target.entry.cname
self.body.analyse_declarations(cenv)
-
+
def analyse_expressions(self, env):
if self.py3_style_class:
self.bases.analyse_expressions(env)
cenv = self.scope
self.body.analyse_expressions(cenv)
self.target.analyse_target_expression(env, self.classobj)
-
+
def generate_function_definitions(self, env, code):
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
-
+
def generate_execution_code(self, code):
code.pyclass_stack.append(self)
cenv = self.scope
error(self.pos, "Object struct name specification required for "
"C class defined in 'extern from' block")
self.base_type = None
- # Now that module imports are cached, we need to
- # import the modules for extern classes.
+ # Now that module imports are cached, we need to
+ # import the modules for extern classes.
if self.module_name:
self.module = None
for module in env.cimported_modules:
warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1)
self.entry = home_scope.declare_c_class(
- name = self.class_name,
+ name = self.class_name,
pos = self.pos,
defining = has_body and self.in_pxd,
implementing = has_body and not self.in_pxd,
if self.doc and Options.docstrings:
scope.doc = embed_position(self.pos, self.doc)
-
+
if has_body:
self.body.analyse_declarations(scope)
if self.in_pxd:
else:
scope.implemented = 1
env.allocate_vtable_names(self.entry)
-
+
def analyse_expressions(self, env):
if self.body:
scope = self.entry.type.scope
self.body.analyse_expressions(scope)
-
+
def generate_function_definitions(self, env, code):
if self.body:
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
-
+
def generate_execution_code(self, code):
# This is needed to generate evaluation code for
# default values of method arguments.
if self.body:
self.body.generate_execution_code(code)
-
+
def annotate(self, code):
if self.body:
self.body.annotate(code)
# name string
# doc EncodedString or None Doc string
# body StatListNode
-
+
child_attrs = ["body"]
def analyse_declarations(self, env):
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
-
+
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
# Global variable declaration.
#
# names [string]
-
+
child_attrs = []
def analyse_declarations(self, env):
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
pass
# expr ExprNode
child_attrs = ["expr"]
-
+
def analyse_declarations(self, env):
import ExprNodes
if isinstance(self.expr, ExprNodes.GeneralCallNode):
else:
env.declare_var(var.value, type, var.pos, is_cdef = True)
self.__class__ = PassStatNode
-
+
def analyse_expressions(self, env):
self.expr.analyse_expressions(env)
-
+
def generate_execution_code(self, code):
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result():
def generate_execution_code(self, code):
self.generate_rhs_evaluation_code(code)
self.generate_assignment_code(code)
-
+
class SingleAssignmentNode(AssignmentNode):
# The simplest case:
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
# first bool Is this guaranteed the first assignment to lhs?
-
+
child_attrs = ["lhs", "rhs"]
first = False
declaration_only = False
def analyse_declarations(self, env):
import ExprNodes
-
+
# handle declarations of the form x = cython.foo()
if isinstance(self.rhs, ExprNodes.CallNode):
func_name = self.rhs.function.as_cython_attribute()
if func_name:
args, kwds = self.rhs.explicit_args_kwds()
-
+
if func_name in ['declare', 'typedef']:
if len(args) > 2 or kwds is not None:
error(self.rhs.pos, "Can only declare one type at a time.")
if not isinstance(lhs, ExprNodes.NameNode):
error(lhs.pos, "Invalid declaration.")
env.declare_typedef(lhs.name, type, self.pos, visibility='private')
-
+
elif func_name in ['struct', 'union']:
self.declaration_only = True
if len(args) > 0 or kwds is None:
env.declare_struct_or_union(name, func_name, scope, False, self.rhs.pos)
for member, type, pos in members:
scope.declare_var(member, type, pos)
-
+
if self.declaration_only:
return
else:
self.lhs.analyse_target_declaration(env)
-
+
def analyse_types(self, env, use_temp = 0):
self.rhs.analyse_types(env)
self.lhs.analyse_target_types(env)
self.rhs = self.rhs.coerce_to(self.lhs.type, env)
if use_temp:
self.rhs = self.rhs.coerce_to_temp(env)
-
+
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
-
+
def generate_assignment_code(self, code):
self.lhs.generate_assignment_code(self.rhs, code)
# Used internally:
#
# coerced_rhs_list [ExprNode] RHS coerced to type of each LHS
-
+
child_attrs = ["lhs_list", "rhs", "coerced_rhs_list"]
coerced_rhs_list = None
def analyse_declarations(self, env):
for lhs in self.lhs_list:
lhs.analyse_target_declaration(env)
-
+
def analyse_types(self, env, use_temp = 0):
self.rhs.analyse_types(env)
if not self.rhs.is_simple():
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
-
+
def generate_assignment_code(self, code):
for i in range(len(self.lhs_list)):
lhs = self.lhs_list[i]
lhs = self.lhs_list[i].annotate(code)
rhs = self.coerced_rhs_list[i].annotate(code)
self.rhs.annotate(code)
-
+
class ParallelAssignmentNode(AssignmentNode):
# A combined packing/unpacking assignment:
# before assigning to any of the left hand sides.
#
# stats [AssignmentNode] The constituent assignments
-
+
child_attrs = ["stats"]
def analyse_declarations(self, env):
for stat in self.stats:
stat.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
for stat in self.stats:
stat.analyse_types(env, use_temp = 1)
# stat.analyse_expressions_1(env, use_temp = 1)
# for stat in self.stats:
# stat.analyse_expressions_2(env)
-
+
def generate_execution_code(self, code):
for stat in self.stats:
stat.generate_rhs_evaluation_code(code)
# op char one of "+-*/%^&|"
# dup (ExprNode) copy of lhs used for operation (auto-generated)
#
- # This code is a bit tricky because in order to obey Python
- # semantics the sub-expressions (e.g. indices) of the lhs must
- # not be evaluated twice. So we must re-use the values calculated
- # in evaluation phase for the assignment phase as well.
- # Fortunately, the type of the lhs node is fairly constrained
- # (it must be a NameNode, AttributeNode, or IndexNode).
-
+ # This code is a bit tricky because in order to obey Python
+ # semantics the sub-expressions (e.g. indices) of the lhs must
+ # not be evaluated twice. So we must re-use the values calculated
+ # in evaluation phase for the assignment phase as well.
+ # Fortunately, the type of the lhs node is fairly constrained
+ # (it must be a NameNode, AttributeNode, or IndexNode).
+
child_attrs = ["lhs", "rhs"]
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
-
+
def analyse_types(self, env):
self.rhs.analyse_types(env)
self.lhs.analyse_target_types(env)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
-
+
def create_binop_node(self):
import ExprNodes
return ExprNodes.binop_node(self.pos, self.operator, self.lhs, self.rhs)
if len(self.arg_tuple.args) == 1 and self.append_newline:
arg = self.arg_tuple.args[0]
arg.generate_evaluation_code(code)
-
+
code.putln(
"if (__Pyx_PrintOne(%s, %s) < 0) %s" % (
stream_result,
# del statement
#
# args [ExprNode]
-
+
child_attrs = ["args"]
def analyse_declarations(self, env):
for arg in self.args:
arg.analyse_target_declaration(env)
-
+
def analyse_expressions(self, env):
for arg in self.args:
arg.analyse_target_expression(env, None)
# pass statement
child_attrs = []
-
+
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
pass
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
if not code.break_label:
error(self.pos, "break statement not inside loop")
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
if code.funcstate.in_try_finally:
error(self.pos, "continue statement inside try of try...finally")
#
# value ExprNode or None
# return_type PyrexType
-
+
child_attrs = ["value"]
def analyse_expressions(self, env):
if self.value:
self.value.analyse_types(env)
if return_type.is_void or return_type.is_returncode:
- error(self.value.pos,
+ error(self.value.pos,
"Return with value in void function")
else:
self.value = self.value.coerce_to(env.return_type, env)
def generate_function_definitions(self, env, code):
if self.value is not None:
self.value.generate_function_definitions(env, code)
-
+
def annotate(self, code):
if self.value:
self.value.annotate(code)
# exc_type ExprNode or None
# exc_value ExprNode or None
# exc_tb ExprNode or None
-
+
child_attrs = ["exc_type", "exc_value", "exc_tb"]
def analyse_expressions(self, env):
code.putln(code.error_goto(self.pos))
else:
error(self.pos, "Reraise not inside except clause")
-
+
class AssertStatNode(StatNode):
# assert statement
#
# cond ExprNode
# value ExprNode or None
-
+
child_attrs = ["cond", "value"]
def analyse_expressions(self, env):
nogil_check = Node.gil_error
gil_message = "Raising exception"
-
+
def generate_execution_code(self, code):
code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS")
self.cond.generate_evaluation_code(code)
# else_clause StatNode or None
child_attrs = ["if_clauses", "else_clause"]
-
+
def analyse_control_flow(self, env):
env.start_branching(self.pos)
for if_clause in self.if_clauses:
if_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
for if_clause in self.if_clauses:
if_clause.analyse_expressions(env)
#
# condition ExprNode
# body StatNode
-
+
child_attrs = ["condition", "body"]
def analyse_control_flow(self, env):
self.body.analyse_control_flow(env)
-
+
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
self.condition = \
self.condition.analyse_temp_boolean_expression(env)
def annotate(self, code):
self.condition.annotate(code)
self.body.annotate(code)
-
+
class SwitchCaseNode(StatNode):
# Generated in the optimization of an if-elif-else node
#
# conditions [ExprNode]
# body StatNode
-
+
child_attrs = ['conditions', 'body']
def generate_execution_code(self, code):
for cond in self.conditions:
cond.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
-
+
def annotate(self, code):
for cond in self.conditions:
cond.annotate(code)
# test ExprNode
# cases [SwitchCaseNode]
# else_clause StatNode or None
-
+
child_attrs = ['test', 'cases', 'else_clause']
-
+
def generate_execution_code(self, code):
self.test.generate_evaluation_code(code)
code.putln("switch (%s) {" % self.test.result())
case.annotate(code)
if self.else_clause is not None:
self.else_clause.annotate(code)
-
+
class LoopNode(object):
-
+
def analyse_control_flow(self, env):
env.start_branching(self.pos)
self.body.analyse_control_flow(env)
self.else_clause.analyse_control_flow(env)
env.finish_branching(self.end_pos())
-
+
class WhileStatNode(LoopNode, StatNode):
# while statement
#
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
self.condition = \
self.condition.analyse_temp_boolean_expression(env)
self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
-
+
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
code.putln(
# body StatNode
# else_clause StatNode
# item NextNode used internally
-
+
child_attrs = ["target", "iterator", "body", "else_clause"]
item = None
-
+
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if isinstance(self.step, ExprNodes.UnaryMinusNode):
warning(self.step.pos, "Probable infinite loop in for-from-by statment. Consider switching the directions of the relations.", 2)
self.step.analyse_types(env)
-
+
target_type = self.target.type
if self.target.type.is_numeric:
loop_type = self.target.type
if not self.bound2.is_literal:
self.bound2 = self.bound2.coerce_to_temp(env)
if self.step is not None:
- self.step = self.step.coerce_to(loop_type, env)
+ self.step = self.step.coerce_to(loop_type, env)
if not self.step.is_literal:
self.step = self.step.coerce_to_temp(env)
self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
-
+
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
from_range = self.from_range
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
if self.py_loopvar_node:
- # This mess is to make for..from loops with python targets behave
- # exactly like those with C targets with regards to re-assignment
- # of the loop variable.
+ # This mess is to make for..from loops with python targets behave
+ # exactly like those with C targets with regards to re-assignment
+ # of the loop variable.
import ExprNodes
if self.target.entry.is_pyglobal:
- # We know target is a NameNode, this is the only ugly case.
+ # We know target is a NameNode, this is the only ugly case.
target_node = ExprNodes.PyTempNode(self.target.pos, None)
target_node.allocate(code)
interned_cname = code.intern_identifier(self.target.entry.name)
code.globalstate.use_utility_code(ExprNodes.get_name_interned_utility_code)
code.putln("%s = __Pyx_GetName(%s, %s); %s" % (
target_node.result(),
- Naming.module_cname,
+ Naming.module_cname,
interned_cname,
code.error_goto_if_null(target_node.result(), self.target.pos)))
code.put_gotref(target_node.result())
target_node.release(code)
code.putln("}")
if self.py_loopvar_node:
- # This is potentially wasteful, but we don't want the semantics to
- # depend on whether or not the loop is a python type.
+ # This is potentially wasteful, but we don't want the semantics to
+ # depend on whether or not the loop is a python type.
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
if from_range:
if self.step is not None:
self.step.generate_disposal_code(code)
self.step.free_temps(code)
-
+
relation_table = {
# {relop : (initial offset, increment op)}
'<=': ("", "++"),
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
-
+
def annotate(self, code):
self.target.annotate(code)
self.bound1.annotate(code)
class WithStatNode(StatNode):
"""
Represents a Python with statement.
-
+
This is only used at parse tree level; and is not present in
analysis or generation phases.
"""
# else_clause StatNode or None
child_attrs = ["body", "except_clauses", "else_clause"]
-
+
def analyse_control_flow(self, env):
env.start_branching(self.pos)
self.body.analyse_control_flow(env)
successful_try = env.control_flow # grab this for later
env.next_branch(self.body.end_pos())
env.finish_branching(self.body.end_pos())
-
+
env.start_branching(self.except_clauses[0].pos)
for except_clause in self.except_clauses:
except_clause.analyse_control_flow(env)
env.next_branch(except_clause.end_pos())
-
+
# the else cause it executed only when the try clause finishes
env.control_flow.incoming = successful_try
if self.else_clause:
# excinfo_target is never set by the parser, but can be set by a transform
# in order to extract more extensive information about the exception as a
# sys.exc_info()-style tuple into a target variable
-
+
child_attrs = ["pattern", "target", "body", "exc_value", "excinfo_target"]
exc_value = None
if self.excinfo_target is not None:
self.excinfo_target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
import ExprNodes
genv = env.global_scope()
code.put_goto(end_label)
code.putln("}")
return
-
+
exc_vars = [code.funcstate.allocate_temp(py_object_type,
manage_ref=True)
for i in xrange(3)]
code.putln('__Pyx_AddTraceback("%s");' % self.function_name)
# We always have to fetch the exception value even if
- # there is no target, because this also normalises the
+ # there is no target, because this also normalises the
# exception and stores it in the thread state.
code.globalstate.use_utility_code(get_exception_utility_code)
exc_args = "&%s, &%s, &%s" % tuple(exc_vars)
for var in exc_vars:
code.putln("__Pyx_DECREF(%s); %s = 0;" % (var, var))
code.put_goto(end_label)
-
+
if code.label_used(code.break_label):
code.put_label(code.break_label)
for var in exc_vars:
if self.target is not None:
self.target.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
-
+
def annotate(self, code):
if self.pattern:
for pattern in self.pattern:
# it on exit.
child_attrs = ["body", "finally_clause"]
-
+
preserve_exception = 1
-
+
disallow_continue_in_try_finally = 0
# There doesn't seem to be any point in disallowing
# continue in the try block, since we have no problem
node = TryFinallyStatNode(pos, body=body, finally_clause=finally_clause)
return node
create_analysed = staticmethod(create_analysed)
-
+
def analyse_control_flow(self, env):
env.start_branching(self.pos)
self.body.analyse_control_flow(env)
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.finally_clause.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
self.finally_clause.analyse_expressions(env)
new_label = new_labels[i]
#if new_label and new_label != "<try>":
if new_label == new_error_label and self.preserve_exception:
- self.put_error_catcher(code,
+ self.put_error_catcher(code,
new_error_label, i+1, catch_label, temps_to_clean_up)
else:
code.put('%s: ' % new_label)
Naming.exc_lineno_name, Naming.lineno_cname))
code.put_goto(catch_label)
code.putln("}")
-
+
def put_error_uncatcher(self, code, i, error_label):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln(
# state string 'gil' or 'nogil'
# child_attrs = []
-
+
preserve_exception = 0
def __init__(self, pos, state, body):
# as_name string or None Name specified in "as" clause, if any
child_attrs = []
-
+
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
pass
-
+
class FromCImportStatNode(StatNode):
# from ... cimport statement
#
# module_name string Qualified name of module
# imported_names [(pos, name, as_name, kind)] Names to be imported
-
+
child_attrs = []
def analyse_declarations(self, env):
else:
error(pos, "Name '%s' not declared in module '%s'"
% (name, self.module_name))
-
+
if entry:
local_name = as_name or name
env.add_imported_entry(local_name, entry, pos)
-
+
def declaration_matches(self, entry, kind):
if not entry.is_type:
return 0
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
pass
child_attrs = ["module"]
import_star = 0
-
+
def analyse_declarations(self, env):
for name, target in self.items:
if name == "*":
self.import_star = 1
else:
target.analyse_target_declaration(env)
-
+
def analyse_expressions(self, env):
import ExprNodes
self.module.analyse_expressions(env)
if entry.type.module_name == module.qualified_name:
continue
except AttributeError:
- pass
+ pass
target.analyse_target_expression(env, None)
if target.type is py_object_type:
coerced_item = None
else:
coerced_item = self.item.coerce_to(target.type, env)
self.interned_items.append((name, target, coerced_item))
-
+
def generate_execution_code(self, code):
self.module.generate_evaluation_code(code)
if self.import_star:
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
- #define CYTHON_INLINE
+ #define CYTHON_INLINE
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
-# define CYTHON_UNUSED __attribute__ ((__unused__))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || defined(__INTEL_COMPILER)
-# define CYTHON_UNUSED __attribute__ ((__unused__))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
-# define CYTHON_UNUSED
+# define CYTHON_UNUSED
# endif
#endif
"""
#ifdef __GNUC__
/* Test for GCC > 2.95 */
-#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
+#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* __GNUC__ > 2 ... */
#------------------------------------------------------------------------------------
-# Note that cPython ignores PyTrace_EXCEPTION,
-# but maybe some other profilers don't.
+# Note that cPython ignores PyTrace_EXCEPTION,
+# but maybe some other profilers don't.
profile_utility_code = UtilityCode(proto="""
#ifndef CYTHON_PROFILE
#define __Pyx_TraceDeclarations \\
static PyCodeObject *%(FRAME_CODE)s = NULL; \\
CYTHON_FRAME_MODIFIER PyFrameObject *%(FRAME)s = NULL; \\
- int __Pyx_use_tracing = 0;
+ int __Pyx_use_tracing = 0;
#define __Pyx_TraceCall(funcname, srcfile, firstlineno) \\
if (unlikely(PyThreadState_GET()->use_tracing && PyThreadState_GET()->c_profilefunc)) { \\
#else
#define __Pyx_TraceDeclarations
- #define __Pyx_TraceCall(funcname, srcfile, firstlineno)
- #define __Pyx_TraceException()
- #define __Pyx_TraceReturn(result)
+ #define __Pyx_TraceCall(funcname, srcfile, firstlineno)
+ #define __Pyx_TraceException()
+ #define __Pyx_TraceReturn(result)
#endif /* CYTHON_PROFILE */
-"""
+"""
% {
"FRAME": Naming.frame_cname,
"FRAME_CODE": Naming.frame_code_cname,
%(EMPTY_BYTES)s /*PyObject *lnotab*/
);
-bad:
+bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
-
+
return py_code;
}
self.visitchildren(node)
self.current_scope = oldscope
return node
-
+
def visit_PrimaryCmpNode(self, node):
if node.is_ptr_contains():
-
+
# for t in operand2:
# if operand1 == t:
# res = True
# break
# else:
# res = False
-
+
pos = node.pos
res_handle = UtilNodes.TempHandle(PyrexTypes.c_bint_type)
res = res_handle.ref(pos)
cmp_node = ExprNodes.PrimaryCmpNode(
pos, operator=u'==', operand1=node.operand1, operand2=target)
if_body = Nodes.StatListNode(
- pos,
+ pos,
stats = [Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=1)),
Nodes.BreakStatNode(pos)])
if_node = Nodes.IfStatNode(
for_loop.analyse_expressions(self.current_scope)
for_loop = self(for_loop)
new_node = UtilNodes.TempResultFromStatNode(result_ref, for_loop)
-
+
if node.operator == 'not_in':
new_node = ExprNodes.NotNode(pos, operand=new_node)
return new_node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
return self._optimise_for_loop(node)
-
+
def _optimise_for_loop(self, node):
iterator = node.iterator.sequence
if iterator.type is Builtin.dict_type:
class SwitchTransform(Visitor.VisitorTransform):
"""
- This transformation tries to turn long if statements into C switch statements.
+ This transformation tries to turn long if statements into C switch statements.
The requirement is that every clause be an (or of) var == value, where the var
- is common among all clauses and both var and value are ints.
+ is common among all clauses and both var and value are ints.
"""
NO_MATCH = (None, None, None)
return UtilNodes.TempResultFromStatNode(result_ref, switch_node)
visit_Node = Visitor.VisitorTransform.recurse_to_children
-
+
class FlattenInListTransform(Visitor.VisitorTransform, SkipDeclarations):
"""
This transformation flattens "x in [val1, ..., valn]" into a sequential list
- of comparisons.
+ of comparisons.
"""
-
+
def visit_PrimaryCmpNode(self, node):
self.visitchildren(node)
if node.cascade is not None:
operand2 = arg,
cascade = None)
conds.append(ExprNodes.TypecastNode(
- pos = node.pos,
+ pos = node.pos,
operand = cond,
type = PyrexTypes.c_bint_type))
def concat(left, right):
return ExprNodes.BoolBinopNode(
- pos = node.pos,
+ pos = node.pos,
operator = conjunction,
operand1 = left,
operand2 = right)
if not index_id:
return node
rindices.append(index_id)
-
+
if set(lindices) != set(rindices):
return node
if len(set(lindices)) != len(right_indices):
is_temp = node.is_temp,
utility_code = pop_index_utility_code
)
-
+
return node
_handle_simple_method_list_pop = _handle_simple_method_object_pop
class FinalOptimizePhase(Visitor.CythonTransform):
"""
This visitor handles several commuting optimizations, and is run
- just before the C code generation phase.
-
- The optimizations currently implemented in this class are:
- - eliminate None assignment and refcounting for first assignment.
+ just before the C code generation phase.
+
+ The optimizations currently implemented in this class are:
+ - eliminate None assignment and refcounting for first assignment.
- isinstance -> typecheck for cdef types
- eliminate checks for None and/or types that became redundant after tree changes
"""
pre_import = None
docstrings = True
-# Decref global variables in this module on exit for garbage collection.
+# Decref global variables in this module on exit for garbage collection.
# 0: None, 1+: interned objects, 2+: cdef globals, 3+: types objects
# Mostly for reducing noise for Valgrind, only executes at process exit
-# (when all memory will be reclaimed anyways).
+# (when all memory will be reclaimed anyways).
generate_cleanup_code = 0
annotate = 0
# to keep going and printing further error messages.
fast_fail = False
-# This will convert statements of the form "for i in range(...)"
+# This will convert statements of the form "for i in range(...)"
# to "for i from ..." when i is a cdef'd integer type, and the direction
-# (i.e. sign of step) can be determined.
-# WARNING: This may change the semantics if the range causes assignment to
+# (i.e. sign of step) can be determined.
+# WARNING: This may change the semantics if the range causes assignment to
# i to overflow. Specifically, if this option is set, an error will be
# raised before the loop is entered, wheras without this option the loop
-# will execute until an overflowing value is encountered.
+# will execute until an overflowing value is encountered.
convert_range = 1
-# Enable this to allow one to write your_module.foo = ... to overwrite the
-# definition if the cpdef function foo, at the cost of an extra dictionary
-# lookup on every call.
-# If this is 0 it simply creates a wrapper.
+# Enable this to allow one to write your_module.foo = ... to overwrite the
+# definition if the cpdef function foo, at the cost of an extra dictionary
+# lookup on every call.
+# If this is 0 it simply creates a wrapper.
lookup_module_cpdef = 0
-# This will set local variables to None rather than NULL which may cause
-# surpress what would be an UnboundLocalError in pure Python but eliminates
-# checking for NULL on every use, and can decref rather than xdecref at the end.
+# This will set local variables to None rather than NULL which may cause
+# surpress what would be an UnboundLocalError in pure Python but eliminates
+# checking for NULL on every use, and can decref rather than xdecref at the end.
# WARNING: This is a work in progress, may currently segfault.
init_local_none = 1
-# Append the c file and line number to the traceback for exceptions.
+# Append the c file and line number to the traceback for exceptions.
c_line_in_traceback = 1
-# Whether or not to embed the Python interpreter, for use in making a
-# standalone executable. This will provide a main() method which simply
-# executes the body of this module.
+# Whether or not to embed the Python interpreter, for use in making a
+# standalone executable. This will provide a main() method which simply
+# executes the body of this module.
embed = False
'autotestdict.all': False,
'language_level': 2,
'fast_getattr': False, # Undocumented until we come up with a better way to handle this everywhere.
-
+
'warn': None,
'warn.undeclared': False,
Traceback (most recent call last):
...
ValueError: boundscheck directive must be set to True or False, got 'true'
-
+
"""
type = directive_types.get(name)
if not type: return None
class SkipDeclarations(object):
"""
- Variable and function declarations can often have a deep tree structure,
- and yet most transformations don't need to descend to this depth.
-
- Declaration nodes are removed after AnalyseDeclarationsTransform, so there
- is no need to use this for transformations after that point.
+ Variable and function declarations can often have a deep tree structure,
+ and yet most transformations don't need to descend to this depth.
+
+ Declaration nodes are removed after AnalyseDeclarationsTransform, so there
+ is no need to use this for transformations after that point.
"""
def visit_CTypeDefNode(self, node):
return node
-
+
def visit_CVarDefNode(self, node):
return node
-
+
def visit_CDeclaratorNode(self, node):
return node
-
+
def visit_CBaseTypeNode(self, node):
return node
-
+
def visit_CEnumDefNode(self, node):
return node
def visit_ParallelAssignmentNode(self, node):
return self.visit_StatNode(node, True)
-
+
def visit_CEnumDefNode(self, node):
return self.visit_StatNode(node, True)
return []
def visit_CDeclaratorNode(self, node):
- return node
+ return node
class PostParseError(CompileError): pass
- Default values to cdef assignments are turned into single
assignments following the declaration (everywhere but in class
bodies, where they raise a compile error)
-
+
- Interpret some node structures into Python runtime values.
Some nodes take compile-time arguments (currently:
TemplatedTypeNode[args] and __cythonbufferdefaults__ = {args}),
lhs_list = expr_list[:-1]
rhs = expr_list[-1]
if len(lhs_list) == 1:
- node = Nodes.SingleAssignmentNode(rhs.pos,
+ node = Nodes.SingleAssignmentNode(rhs.pos,
lhs = lhs_list[0], rhs = rhs)
else:
node = Nodes.CascadedAssignmentNode(rhs.pos,
- "def" functions are let through only if they fill the
getbuffer/releasebuffer slots
-
+
- cdef functions are let through only if they are on the
top level and are declared "inline"
"""
if (isinstance(node, Nodes.DefNode) and self.scope_type == 'cclass'
and node.name in ('__getbuffer__', '__releasebuffer__')):
err = None # allow these slots
-
+
if isinstance(node, Nodes.CFuncDefNode):
if u'inline' in node.modifiers and self.scope_type == 'pxd':
node.inline_in_pxd = True
return None
else:
return node
-
+
class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
"""
After parsing, directives can be stored in a number of places:
"""
unop_method_nodes = {
'typeof': ExprNodes.TypeofNode,
-
+
'operator.address': ExprNodes.AmpersandNode,
'operator.dereference': ExprNodes.DereferenceNode,
'operator.preincrement' : ExprNodes.inc_dec_constructor(True, '++'),
'operator.predecrement' : ExprNodes.inc_dec_constructor(True, '--'),
'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'),
'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'),
-
+
# For backwards compatability.
'address': ExprNodes.AmpersandNode,
}
binop_method_nodes = {
'operator.comma' : ExprNodes.c_binop_constructor(','),
}
-
+
special_methods = cython.set(['declare', 'union', 'struct', 'typedef', 'sizeof',
'cast', 'pointer', 'compiled', 'NULL'])
special_methods.update(unop_method_nodes.keys())
return False
else:
return True
-
+
# Set up processing and handle the cython: comments.
def visit_ModuleNode(self, node):
for key, value in node.directive_comments.items():
# want to leave the cimport node sitting in the tree
return None
return node
-
+
def visit_FromCImportStatNode(self, node):
if (node.module_name == u"cython") or \
node.module_name.startswith(u"cython."):
return None
node.imported_names = newimp
return node
-
+
def visit_FromImportStatNode(self, node):
if (node.module.module_name.value == u"cython") or \
node.module.module_name.value.startswith(u"cython."):
def visit_SingleAssignmentNode(self, node):
if (isinstance(node.rhs, ExprNodes.ImportNode) and
node.rhs.module_name.value == u'cython'):
- node = Nodes.CImportStatNode(node.pos,
+ node = Nodes.CImportStatNode(node.pos,
module_name = u'cython',
as_name = node.lhs.name)
self.visit_CImportStatNode(node)
else:
self.visitchildren(node)
return node
-
+
def visit_NameNode(self, node):
if node.name in self.cython_module_names:
node.is_cython_module = True
directives=newdirectives)
self.directives = olddirectives
return directive
-
+
# Handle decorators
def visit_FuncDefNode(self, node):
directives = self._extract_directives(node, 'function')
if EXC:
EXIT(None, None, None)
MGR = EXIT = VALUE = EXC = None
-
+
""", temps=[u'MGR', u'EXC', u"EXIT", u"VALUE"],
pipeline=[NormalizeTree(None)])
# TODO: Cleanup badly needed
TemplateTransform.temp_name_counter += 1
handle = "__tmpvar_%d" % TemplateTransform.temp_name_counter
-
+
self.visitchildren(node, ['body'])
excinfo_temp = ExprNodes.NameNode(node.pos, name=handle)#TempHandle(Builtin.tuple_type)
if node.target is not None:
# node.pos, temps=[excinfo_temp], body=try_except)
return result
-
+
def visit_ExprNode(self, node):
# With statements are never inside expressions.
return node
-
+
class DecoratorTransform(CythonTransform, SkipDeclarations):
self.env_stack = [root.scope]
# needed to determine if a cdef var is declared after it's used.
self.seen_vars_stack = []
- return super(AnalyseDeclarationsTransform, self).__call__(root)
-
+ return super(AnalyseDeclarationsTransform, self).__call__(root)
+
def visit_NameNode(self, node):
self.seen_vars_stack[-1].add(node.name)
return node
self.visitchildren(node)
self.env_stack.pop()
return node
-
+
def visit_CClassDefNode(self, node):
node = self.visit_ClassDefNode(node)
if node.scope and node.scope.implemented:
if stats:
node.body.stats += stats
return node
-
+
def visit_FuncDefNode(self, node):
self.seen_vars_stack.append(cython.set())
lenv = node.local_scope
# necessary to ensure that all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return node
-
+
def visit_CTypeDefNode(self, node):
return node
def visit_CBaseTypeNode(self, node):
return None
-
+
def visit_CEnumDefNode(self, node):
if node.visibility == 'public':
return node
# to ensure all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return None
-
+
def create_Property(self, entry):
if entry.visibility == 'public':
if entry.type.is_pyobject:
template = self.basic_property_ro
property = template.substitute({
u"ATTR": ExprNodes.AttributeNode(pos=entry.pos,
- obj=ExprNodes.NameNode(pos=entry.pos, name="self"),
+ obj=ExprNodes.NameNode(pos=entry.pos, name="self"),
attribute=entry.name),
}, pos=entry.pos).stats[0]
property.name = entry.name
# ---------------------------------------
# XXX This should go to AutoDocTransforms
# ---------------------------------------
- if (Options.docstrings and
+ if (Options.docstrings and
self.current_directives['embedsignature']):
attr_name = entry.name
type_name = entry.type.declaration_code("", for_display=1)
node.body.analyse_expressions(node.scope)
self.visitchildren(node)
return node
-
+
def visit_FuncDefNode(self, node):
node.local_scope.infer_types()
node.body.analyse_expressions(node.local_scope)
return node
class ExpandInplaceOperators(EnvTransform):
-
+
def visit_InPlaceAssignmentNode(self, node):
lhs = node.lhs
rhs = node.rhs
except ValueError:
return node
dup = lhs.__class__(**lhs.__dict__)
- binop = ExprNodes.binop_node(node.pos,
+ binop = ExprNodes.binop_node(node.pos,
operator = node.operator,
operand1 = dup,
operand2 = rhs,
dup.analyse_types(env)
binop.analyse_operation(env)
node = Nodes.SingleAssignmentNode(
- node.pos,
+ node.pos,
lhs = lhs,
rhs=binop.coerce_to(lhs.type, env))
# Use LetRefNode to avoid side effects.
class AlignFunctionDefinitions(CythonTransform):
"""
- This class takes the signatures from a .pxd file and applies them to
- the def methods in a .py file.
+ This class takes the signatures from a .pxd file and applies them to
+ the def methods in a .py file.
"""
-
+
def visit_ModuleNode(self, node):
self.scope = node.scope
self.directives = node.directives
self.visitchildren(node)
return node
-
+
def visit_PyClassDefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def:
return None
else:
return node
-
+
def visit_CClassDefNode(self, node, pxd_def=None):
if pxd_def is None:
pxd_def = self.scope.lookup(node.class_name)
if pxd_def:
self.scope = outer_scope
return node
-
+
def visit_DefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def:
node = node.as_cfunction(pxd_def)
elif self.scope.is_module_scope and self.directives['auto_cpdef']:
node = node.as_cfunction(scope=self.scope)
- # Enable this when internal def functions are allowed.
+ # Enable this when internal def functions are allowed.
# self.visitchildren(node)
return node
-
+
class MarkClosureVisitor(CythonTransform):
node.needs_closure = self.needs_closure
self.needs_closure = True
return node
-
+
def visit_CFuncDefNode(self, node):
self.visit_FuncDefNode(node)
if node.needs_closure:
else:
self.visitchildren(node)
return node
-
+
def visit_AttributeNode(self, node):
self.visitchildren(node)
return self.visit_cython_attribute(node)
def visit_NameNode(self, node):
return self.visit_cython_attribute(node)
-
+
def visit_cython_attribute(self, node):
attribute = node.as_cython_attribute()
if attribute:
node.function = ExprNodes.NameNode(node.pos, name=EncodedString('set'))
else:
error(node.function.pos, u"'%s' not a valid cython language construct" % function)
-
+
self.visitchildren(node)
return node
Create debug information and all functions' visibility to extern in order
to enable debugging.
"""
-
+
def __init__(self, context, options, result):
super(DebugTransform, self).__init__(context)
self.visited = cython.set()
- # our treebuilder and debug output writer
+ # our treebuilder and debug output writer
# (see Cython.Debugger.debug_output.CythonDebugWriter)
self.tb = self.context.gdb_debug_outputwriter
- #self.c_output_file = options.output_file
+ #self.c_output_file = options.output_file
self.c_output_file = result.c_file
-
+
# tells visit_NameNode whether it should register step-into functions
self.register_stepinto = False
-
+
def visit_ModuleNode(self, node):
self.tb.module_name = node.full_module_name
attrs = dict(
module_name=node.full_module_name,
filename=node.pos[0].filename,
c_filename=self.c_output_file)
-
+
self.tb.start('Module', attrs)
-
+
# serialize functions
self.tb.start('Functions')
self.visitchildren(node)
self.tb.end('Functions')
-
+
# 2.3 compatibility. Serialize global variables
self.tb.start('Globals')
entries = {}
for k, v in node.scope.entries.iteritems():
if (v.qualified_name not in self.visited and not
- v.name.startswith('__pyx_') and not
+ v.name.startswith('__pyx_') and not
v.type.is_cfunction and not
v.type.is_extension_type):
entries[k]= v
-
+
self.serialize_local_variables(entries)
self.tb.end('Globals')
# self.tb.end('Module') # end Module after the line number mapping in
# Cython.Compiler.ModuleNode.ModuleNode._serialize_lineno_map
return node
-
+
def visit_FuncDefNode(self, node):
self.visited.add(node.local_scope.qualified_name)
# node.entry.visibility = 'extern'
pf_cname = ''
else:
pf_cname = node.py_func.entry.func_cname
-
+
attrs = dict(
name=node.entry.name,
cname=node.entry.func_cname,
pf_cname=pf_cname,
qualified_name=node.local_scope.qualified_name,
lineno=str(node.pos[1]))
-
+
self.tb.start('Function', attrs=attrs)
-
+
self.tb.start('Locals')
self.serialize_local_variables(node.local_scope.entries)
self.tb.end('Locals')
return node
def visit_NameNode(self, node):
- if (self.register_stepinto and
- node.type.is_cfunction and
+ if (self.register_stepinto and
+ node.type.is_cfunction and
getattr(node, 'is_called', False) and
node.entry.func_cname is not None):
- # don't check node.entry.in_cinclude, as 'cdef extern: ...'
- # declared functions are not 'in_cinclude'.
- # This means we will list called 'cdef' functions as
- # "step into functions", but this is not an issue as they will be
+ # don't check node.entry.in_cinclude, as 'cdef extern: ...'
+ # declared functions are not 'in_cinclude'.
+ # This means we will list called 'cdef' functions as
+ # "step into functions", but this is not an issue as they will be
# recognized as Cython functions anyway.
attrs = dict(name=node.entry.func_cname)
self.tb.start('StepIntoFunction', attrs=attrs)
self.tb.end('StepIntoFunction')
-
+
self.visitchildren(node)
return node
-
+
def serialize_local_variables(self, entries):
for entry in entries.values():
if entry.type.is_pyobject:
vartype = 'PythonObject'
else:
vartype = 'CObject'
-
+
cname = entry.cname
# if entry.type.is_extension_type:
# cname = entry.type.typeptr_cname
-
+
if not entry.pos:
# this happens for variables that are not in the user's code,
# e.g. for the global __builtins__, __doc__, etc. We can just
lineno = '0'
else:
lineno = str(entry.pos[1])
-
+
attrs = dict(
name=entry.name,
cname=cname,
qualified_name=entry.qualified_name,
type=vartype,
lineno=lineno)
-
+
self.tb.start('LocalVar', attrs)
self.tb.end('LocalVar')
-
+
pos = s.position()
op = p_cmp_op(s)
n2 = p_starred_expr(s)
- n1 = ExprNodes.PrimaryCmpNode(pos,
+ n1 = ExprNodes.PrimaryCmpNode(pos,
operator = op, operand1 = n1, operand2 = n2)
if s.sy in comparison_ops:
n1.cascade = p_cascaded_cmp(s)
pos = s.position()
op = p_cmp_op(s)
n2 = p_starred_expr(s)
- result = ExprNodes.CascadedCmpNode(pos,
+ result = ExprNodes.CascadedCmpNode(pos,
operator = op, operand2 = n2)
if s.sy in comparison_ops:
result.cascade = p_cascaded_cmp(s)
if op == '<>':
op = '!='
return op
-
+
comparison_ops = (
- '<', '>', '==', '>=', '<=', '<>', '!=',
+ '<', '>', '==', '>=', '<=', '<>', '!=',
'in', 'is', 'not'
)
typecheck = 0
s.expect(">")
operand = p_factor(s)
- return ExprNodes.TypecastNode(pos,
- base_type = base_type,
+ return ExprNodes.TypecastNode(pos,
+ base_type = base_type,
declarator = declarator,
operand = operand,
typecheck = typecheck)
s.next()
s.expect('(')
# Here we decide if we are looking at an expression or type
- # If it is actually a type, but parsable as an expression,
- # we treat it as an expression here.
+ # If it is actually a type, but parsable as an expression,
+ # we treat it as an expression here.
if looking_at_expr(s):
operand = p_test(s)
node = ExprNodes.SizeofVarNode(pos, operand = operand)
else:
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, empty = 1)
- node = ExprNodes.SizeofTypeNode(pos,
+ node = ExprNodes.SizeofTypeNode(pos,
base_type = base_type, declarator = declarator)
s.expect(')')
return node
else: # s.sy == '.'
s.next()
name = EncodedString( p_ident(s) )
- return ExprNodes.AttributeNode(pos,
+ return ExprNodes.AttributeNode(pos,
obj = node1, attribute = name)
# arglist: argument (',' argument)* [',']
else:
arg_tuple, keyword_dict = p_call_build_packed_args(
pos, positional_args, keyword_args, star_arg)
- return ExprNodes.GeneralCallNode(pos,
+ return ExprNodes.GeneralCallNode(pos,
function = function,
positional_args = arg_tuple,
keyword_args = keyword_dict,
subscripts = p_subscript_list(s)
if len(subscripts) == 1 and len(subscripts[0]) == 2:
start, stop = subscripts[0]
- result = ExprNodes.SliceIndexNode(pos,
+ result = ExprNodes.SliceIndexNode(pos,
base = base, start = start, stop = stop)
else:
indexes = make_slice_nodes(pos, subscripts)
# comp_iter ::= comp_for | comp_if
# comp_for ::= "for" expression_list "in" testlist [comp_iter]
# comp_if ::= "if" test [comp_iter]
-
+
def p_list_maker(s):
# s.sy == '['
pos = s.position()
exprs = [expr]
s.expect(']')
return ExprNodes.ListNode(pos, args = exprs)
-
+
def p_comp_iter(s, body):
if s.sy == 'for':
return p_comp_for(s, body)
kw = p_for_bounds(s, allow_testlist=False)
kw.update(dict(else_clause = None, body = p_comp_iter(s, body)))
return Nodes.ForStatNode(pos, **kw)
-
+
def p_comp_if(s, body):
# s.sy == 'if'
pos = s.position()
s.next()
test = p_test_nocond(s)
- return Nodes.IfStatNode(pos,
+ return Nodes.IfStatNode(pos,
if_clauses = [Nodes.IfClauseNode(pos, condition = test,
body = p_comp_iter(s, body))],
else_clause = None )
break
s.next()
return exprs
-
+
#testlist: test (',' test)* [',']
rhs = expr_list[-1]
if len(expr_list) == 2:
- return Nodes.SingleAssignmentNode(rhs.pos,
+ return Nodes.SingleAssignmentNode(rhs.pos,
lhs = expr_list[0], rhs = rhs)
else:
return Nodes.CascadedAssignmentNode(rhs.pos,
s.next()
exc_tb = p_test(s)
if exc_type or exc_value or exc_tb:
- return Nodes.RaiseStatNode(pos,
+ return Nodes.RaiseStatNode(pos,
exc_type = exc_type,
exc_value = exc_value,
exc_tb = exc_tb)
for pos, target_name, dotted_name, as_name in items:
dotted_name = EncodedString(dotted_name)
if kind == 'cimport':
- stat = Nodes.CImportStatNode(pos,
+ stat = Nodes.CImportStatNode(pos,
module_name = dotted_name,
as_name = as_name)
else:
else:
name_list = None
stat = Nodes.SingleAssignmentNode(pos,
- lhs = ExprNodes.NameNode(pos,
+ lhs = ExprNodes.NameNode(pos,
name = as_name or target_name),
- rhs = ExprNodes.ImportNode(pos,
+ rhs = ExprNodes.ImportNode(pos,
module_name = ExprNodes.IdentifierStringNode(
pos, value = dotted_name),
name_list = name_list))
ExprNodes.IdentifierStringNode(name_pos, value = encoded_name))
items.append(
(name,
- ExprNodes.NameNode(name_pos,
+ ExprNodes.NameNode(name_pos,
name = as_name or name)))
import_list = ExprNodes.ListNode(
imported_names[0][0], args = imported_name_strings)
test = p_test(s)
body = p_suite(s)
else_clause = p_else_clause(s)
- return Nodes.WhileStatNode(pos,
- condition = test, body = body,
+ return Nodes.WhileStatNode(pos,
+ condition = test, body = body,
else_clause = else_clause)
def p_for_statement(s):
else_clause = p_else_clause(s)
kw.update(dict(body = body, else_clause = else_clause))
return Nodes.ForStatNode(pos, **kw)
-
+
def p_for_bounds(s, allow_testlist=True):
target = p_for_target(s)
if s.sy == 'in':
target = ExprNodes.NameNode(name2_pos, name = name2)
else:
if not target.is_name:
- error(target.pos,
+ error(target.pos,
"Target of for-from statement must be a variable name")
elif name2 != target.name:
error(name2_pos,
if rel1[0] != rel2[0]:
error(rel2_pos,
"Relation directions in for-from do not match")
- return dict(target = target,
- bound1 = bound1,
- relation1 = rel1,
+ return dict(target = target,
+ bound1 = bound1,
+ relation1 = rel1,
relation2 = rel2,
bound2 = bound2,
step = step,
body = p_with_items(s)
else:
body = p_suite(s)
- return Nodes.WithStatNode(pos, manager = manager,
+ return Nodes.WithStatNode(pos, manager = manager,
target = target, body = body)
def p_with_template(s):
else:
base_type = p_c_base_type(s, templates = templates)
declarator = p_c_declarator(s, empty = 1)
- arg = Nodes.CComplexBaseTypeNode(base_type.pos,
+ arg = Nodes.CComplexBaseTypeNode(base_type.pos,
base_type = base_type, declarator = declarator)
parsed_type = True
keyword_node = ExprNodes.IdentifierStringNode(
arg.pos, value = EncodedString(ident))
keyword_args.append((keyword_node, arg))
was_keyword = True
-
+
else:
if looking_at_expr(s):
arg = p_test(s)
else:
base_type = p_c_base_type(s, templates = templates)
declarator = p_c_declarator(s, empty = 1)
- arg = Nodes.CComplexBaseTypeNode(base_type.pos,
+ arg = Nodes.CComplexBaseTypeNode(base_type.pos,
base_type = base_type, declarator = declarator)
parsed_type = True
positional_args.append(arg)
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, empty = 1)
s.expect(')')
- return Nodes.CComplexBaseTypeNode(pos,
+ return Nodes.CComplexBaseTypeNode(pos,
base_type = base_type, declarator = declarator)
def p_c_simple_base_type(s, self_flag, nonempty, templates = None):
name = s.systring
s.next()
if nonempty and s.sy != 'IDENT':
- # Make sure this is not a declaration of a variable or function.
+ # Make sure this is not a declaration of a variable or function.
if s.sy == '(':
s.next()
if s.sy == '*' or s.sy == '**' or s.sy == '&':
s.put_back('IDENT', name)
name = None
- type_node = Nodes.CSimpleBaseTypeNode(pos,
+ type_node = Nodes.CSimpleBaseTypeNode(pos,
name = name, module_path = module_path,
is_basic_c_type = is_basic, signed = signed,
- complex = complex, longness = longness,
+ complex = complex, longness = longness,
is_self_arg = self_flag, templates = templates)
if s.sy == '[':
type_node = p_buffer_or_template(s, type_node, templates)
-
+
if s.sy == '.':
s.next()
name = p_ident(s)
type_node = Nodes.CNestedBaseTypeNode(pos, base_type = type_node, name = name)
-
+
return type_node
def p_buffer_or_template(s, base_type_node, templates):
# s.sy == '['
pos = s.position()
s.next()
- # Note that buffer_positional_options_count=1, so the only positional argument is dtype.
- # For templated types, all parameters are types.
+ # Note that buffer_positional_options_count=1, so the only positional argument is dtype.
+ # For templated types, all parameters are types.
positional_args, keyword_args = (
p_positional_and_keyword_args(s, (']',), templates)
)
keyword_args = keyword_dict,
base_type_node = base_type_node)
return result
-
+
def looking_at_name(s):
return s.sy == 'IDENT' and not s.systring in calling_convention_words
nogil = p_nogil(s)
exc_val, exc_check = p_exception_value_clause(s)
with_gil = p_with_gil(s)
- return Nodes.CFuncDeclaratorNode(pos,
+ return Nodes.CFuncDeclaratorNode(pos,
base = base, args = args, has_varargs = ellipsis,
exception_value = exc_val, exception_check = exc_check,
nogil = nogil or ctx.nogil or with_gil, with_gil = with_gil)
supported_overloaded_operators = cython.set([
- '+', '-', '*', '/', '%',
+ '+', '-', '*', '/', '%',
'++', '--', '~', '|', '&', '^', '<<', '>>', ',',
'==', '!=', '>=', '>', '<=', '<',
'[]', '()',
base = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
cmethod_flag = cmethod_flag,
assignable = assignable, nonempty = nonempty)
- result = Nodes.CPtrDeclaratorNode(pos,
+ result = Nodes.CPtrDeclaratorNode(pos,
base = base)
elif s.sy == '**': # scanner returns this as a single token
s.next()
if s.sy == '=':
s.next()
value = p_test(s)
- items.append(Nodes.CEnumDefItemNode(pos,
+ items.append(Nodes.CEnumDefItemNode(pos,
name = name, cname = cname, value = value))
def p_c_struct_or_union_definition(s, pos, ctx):
s.expect_dedent()
else:
s.expect_newline("Syntax error in struct or union definition")
- return Nodes.CStructOrUnionDefNode(pos,
+ return Nodes.CStructOrUnionDefNode(pos,
name = name, cname = cname, kind = kind, attributes = attributes,
typedef_flag = ctx.typedef_flag, visibility = ctx.visibility,
in_pxd = ctx.level == 'module_pxd', packed = packed)
% (prev_visibility, visibility))
s.next()
return visibility
-
+
def p_c_modifiers(s):
if s.sy == 'IDENT' and s.systring in ('inline',):
modifier = s.systring
result = Nodes.CFuncDefNode(pos,
visibility = ctx.visibility,
base_type = base_type,
- declarator = declarator,
+ declarator = declarator,
body = suite,
doc = doc,
modifiers = modifiers,
assignable = 1, nonempty = 1)
declarators.append(declarator)
s.expect_newline("Syntax error in C variable declaration")
- result = Nodes.CVarDefNode(pos,
+ result = Nodes.CVarDefNode(pos,
visibility = ctx.visibility,
base_type = base_type,
declarators = declarators,
s.next()
return_type_annotation = p_test(s)
doc, body = p_suite(s, Ctx(level = 'function'), with_doc = 1)
- return Nodes.DefNode(pos, name = name, args = args,
+ return Nodes.DefNode(pos, name = name, args = args,
star_arg = star_arg, starstar_arg = starstar_arg,
doc = doc, body = body, decorators = decorators,
return_type_annotation = return_type_annotation)
def cast_code(self, expr_code):
return "((%s)%s)" % (self.declaration_code(""), expr_code)
-
+
def specialization_name(self):
return self.declaration_code("").replace(" ", "__")
-
+
def base_declaration_code(self, base_code, entity_code):
if entity_code:
return "%s %s" % (base_code, entity_code)
# has_attributes boolean Has C dot-selectable attributes
# default_value string Initial value
#
- # declaration_code(entity_code,
+ # declaration_code(entity_code,
# for_display = 0, dll_linkage = None, pyrex = 0)
# Returns a code fragment for the declaration of an entity
# of this type, given a code fragment for the entity.
# Coerces array type into pointer type for use as
# a formal argument type.
#
-
+
is_pyobject = 0
is_unspecified = 0
is_extension_type = 0
is_buffer = 0
has_attributes = 0
default_value = ""
-
+
def resolve(self):
# If a typedef, returns the base type.
return self
-
+
def specialize(self, values):
# TODO(danilo): Override wherever it makes sense.
return self
-
+
def literal_code(self, value):
# Returns a C code fragment representing a literal
# value of this type.
return str(value)
-
+
def __str__(self):
return self.declaration_code("", for_display = 1).strip()
-
+
def same_as(self, other_type, **kwds):
return self.same_as_resolved_type(other_type.resolve(), **kwds)
-
+
def same_as_resolved_type(self, other_type):
return self == other_type or other_type is error_type
-
+
def subtype_of(self, other_type):
return self.subtype_of_resolved_type(other_type.resolve())
-
+
def subtype_of_resolved_type(self, other_type):
return self.same_as(other_type)
-
+
def assignable_from(self, src_type):
return self.assignable_from_resolved_type(src_type.resolve())
-
+
def assignable_from_resolved_type(self, src_type):
return self.same_as(src_type)
-
+
def as_argument_type(self):
return self
-
+
def is_complete(self):
# A type is incomplete if it is an unsized array,
# a struct whose attributes are not defined, etc.
return "%s(%s)" % (dll_linkage, base_code)
else:
return base_code
-
+
def create_typedef_type(name, base_type, cname, is_external=0):
if base_type.is_complex:
if is_external:
# typedef_cname string
# typedef_base_type PyrexType
# typedef_is_external bool
-
+
is_typedef = 1
typedef_is_external = 0
to_py_utility_code = None
from_py_utility_code = None
-
-
+
+
def __init__(self, name, base_type, cname, is_external=0):
assert not base_type.is_complex
self.typedef_name = name
self.typedef_cname = cname
self.typedef_base_type = base_type
self.typedef_is_external = is_external
-
+
def resolve(self):
return self.typedef_base_type.resolve()
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.typedef_name
else:
base_code = public_decl(self.typedef_cname, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
-
+
def as_argument_type(self):
return self
def cast_code(self, expr_code):
# If self is really an array (rather than pointer), we can't cast.
- # For example, the gmp mpz_t.
+ # For example, the gmp mpz_t.
if self.typedef_base_type.is_array:
base_type = self.typedef_base_type.base_type
return CPtrType(base_type).cast_code(expr_code)
def __repr__(self):
return "<CTypedefType %s>" % self.typedef_cname
-
+
def __str__(self):
return self.typedef_name
# Delegates most attribute
# lookups to the base type. ANYTHING NOT DEFINED
# HERE IS DELEGATED!
-
+
# dtype PyrexType
# ndim int
# mode str
self.mode = mode
self.negative_indices = negative_indices
self.cast = cast
-
+
def as_argument_type(self):
return self
buffer_defaults = None
is_extern = False
is_subclassed = False
-
+
def __str__(self):
return "Python object"
-
+
def __repr__(self):
return "<PyObjectType>"
def assignable_from(self, src_type):
# except for pointers, conversion will be attempted
return not src_type.is_ptr or src_type.is_string
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "object"
self.cname = cname
self.typeptr_cname = "(&%s)" % cname
self.objstruct_cname = objstruct_cname
-
+
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
-
+
def __str__(self):
return "%s object" % self.name
-
+
def __repr__(self):
return "<%s>"% self.cname
-
+
def assignable_from(self, src_type):
if isinstance(src_type, BuiltinObjectType):
return src_type.name == self.name
src_type.name == self.name)
else:
return True
-
+
def typeobj_is_available(self):
return True
-
+
def attributes_known(self):
return True
-
+
def subtype_of(self, type):
return type.is_pyobject and self.assignable_from(type)
error = '(PyErr_Format(PyExc_TypeError, "Expected %s, got %%.200s", Py_TYPE(%s)->tp_name), 0)' % (self.name, arg)
return check + '||' + error
- def declaration_code(self, entity_code,
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
# vtabstruct_cname string Name of C method table struct
# vtabptr_cname string Name of pointer to C method table
# vtable_cname string Name of C method table definition
-
+
is_extension_type = 1
has_attributes = 1
-
+
objtypedef_cname = None
-
+
def __init__(self, name, typedef_flag, base_type, is_external=0):
self.name = name
self.scope = None
self.vtabptr_cname = None
self.vtable_cname = None
self.is_external = is_external
-
+
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
-
+
def subtype_of_resolved_type(self, other_type):
if other_type.is_extension_type:
return self is other_type or (
self.base_type and self.base_type.subtype_of(other_type))
else:
return other_type is py_object_type
-
+
def typeobj_is_available(self):
# Do we have a pointer to the type object?
return self.typeptr_cname
-
+
def typeobj_is_imported(self):
# If we don't know the C name of the type object but we do
# know which module it's defined in, it will be imported.
return self.typeobj_cname is None and self.module_name is not None
-
+
def assignable_from(self, src_type):
if self == src_type:
return True
return self.assignable_from(src_type.base_type)
return False
- def declaration_code(self, entity_code,
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0, deref = 0):
if pyrex or for_display:
base_code = self.name
def attributes_known(self):
return self.scope is not None
-
+
def __str__(self):
return self.name
-
+
def __repr__(self):
return "<PyExtensionType %s%s>" % (self.scope.class_name,
("", " typedef")[self.typedef_flag])
-
+
class CType(PyrexType):
#
# to_py_function string C function for converting to Python object
# from_py_function string C function for constructing from Python object
#
-
+
to_py_function = None
from_py_function = None
exception_value = None
def create_to_py_utility_code(self, env):
return self.to_py_function is not None
-
+
def create_from_py_utility_code(self, env):
return self.from_py_function is not None
#
is_void = 1
-
+
def __repr__(self):
return "<CVoidType>"
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "void"
else:
base_code = public_decl("void", dll_linkage)
return self.base_declaration_code(base_code, entity_code)
-
+
def is_complete(self):
return 0
# rank integer Relative size
# signed integer 0 = unsigned, 1 = unspecified, 2 = explicitly signed
#
-
+
is_numeric = 1
default_value = "0"
-
+
sign_words = ("unsigned ", "", "signed ")
-
+
def __init__(self, rank, signed = 1):
self.rank = rank
self.signed = signed
-
+
def sign_and_name(self):
s = self.sign_words[self.signed]
n = rank_to_type_name[self.rank]
return s + n
-
+
def __repr__(self):
return "<CNumericType %s>" % self.sign_and_name()
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
type_name = self.sign_and_name()
if pyrex or for_display:
} else {
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
- return _PyLong_FromByteArray(bytes, sizeof(%(type)s),
+ return _PyLong_FromByteArray(bytes, sizeof(%(type)s),
little, !is_unsigned);
}
}
from_py_function = "__pyx_PyFloat_AsDouble"
exception_value = -1
-
+
def __init__(self, rank, math_h_modifier = ''):
CNumericType.__init__(self, rank, 1)
self.math_h_modifier = math_h_modifier
-
+
def assignable_from_resolved_type(self, src_type):
return (src_type.is_numeric and not src_type.is_complex) or src_type is error_type
class CComplexType(CNumericType):
-
+
is_complex = 1
to_py_function = "__pyx_PyComplex_FromComplex"
has_attributes = 1
scope = None
-
+
def __init__(self, real_type):
while real_type.is_typedef and not real_type.typedef_is_external:
real_type = real_type.typedef_base_type
self.funcsuffix = real_type.math_h_modifier
else:
self.funcsuffix = "_%s" % real_type.specialization_name()
-
+
self.real_type = real_type
CNumericType.__init__(self, real_type.rank + 0.5, real_type.signed)
self.binops = {}
return self.real_type == other.real_type
else:
return False
-
+
def __ne__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type != other.real_type
def __hash__(self):
return ~hash(self.real_type)
- def declaration_code(self, entity_code,
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
real_code = self.real_type.declaration_code("", for_display, dll_linkage, pyrex)
real_type_name = real_type_name.replace('long__double','long_double')
real_type_name = real_type_name.replace('PY_LONG_LONG','long_long')
return Naming.type_prefix + real_type_name + "_complex"
-
+
def assignable_from(self, src_type):
# Temporary hack/feature disabling, see #441
if (not src_type.is_complex and src_type.is_numeric and src_type.is_typedef
return False
else:
return super(CComplexType, self).assignable_from(src_type)
-
+
def assignable_from_resolved_type(self, src_type):
return (src_type.is_complex and self.real_type.assignable_from_resolved_type(src_type.real_type)
- or src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type)
+ or src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type)
or src_type is error_type)
-
+
def attributes_known(self):
if self.scope is None:
import Symtab
complex_arithmetic_utility_code):
env.use_utility_code(
utility_code.specialize(
- self,
+ self,
real_type = self.real_type.declaration_code(''),
m = self.funcsuffix,
is_float = self.real_type.is_float))
complex_from_py_utility_code):
env.use_utility_code(
utility_code.specialize(
- self,
+ self,
real_type = self.real_type.declaration_code(''),
m = self.funcsuffix,
is_float = self.real_type.is_float))
self.from_py_function = "__Pyx_PyComplex_As_" + self.specialization_name()
return True
-
+
def lookup_op(self, nargs, op):
try:
return self.binops[nargs, op]
def unary_op(self, op):
return self.lookup_op(1, op)
-
+
def binary_op(self, op):
return self.lookup_op(2, op)
-
+
complex_ops = {
(1, '-'): 'neg',
(1, 'zero'): 'is_zero',
class CArrayType(CType):
# base_type CType Element type
# size integer or None Number of elements
-
+
is_array = 1
-
+
def __init__(self, base_type, size):
self.base_type = base_type
self.size = size
if base_type is c_char_type:
self.is_string = 1
-
+
def __repr__(self):
return "<CArrayType %s %s>" % (self.size, repr(self.base_type))
-
+
def same_as_resolved_type(self, other_type):
return ((other_type.is_array and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
-
+
def assignable_from_resolved_type(self, src_type):
# Can't assign to a variable of an array type
return 0
-
+
def element_ptr_type(self):
return c_ptr_type(self.base_type)
- def declaration_code(self, entity_code,
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if self.size is not None:
dimension_code = self.size
return self.base_type.declaration_code(
"%s[%s]" % (entity_code, dimension_code),
for_display, dll_linkage, pyrex)
-
+
def as_argument_type(self):
return c_ptr_type(self.base_type)
-
+
def is_complete(self):
return self.size is not None
class CPtrType(CType):
# base_type CType Referenced type
-
+
is_ptr = 1
default_value = "0"
-
+
def __init__(self, base_type):
self.base_type = base_type
-
+
def __repr__(self):
return "<CPtrType %s>" % repr(self.base_type)
-
+
def same_as_resolved_type(self, other_type):
return ((other_type.is_ptr and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CPtrType.declaration_code: pointer to", self.base_type ###
return self.base_type.declaration_code(
"*%s" % entity_code,
for_display, dll_linkage, pyrex)
-
+
def assignable_from_resolved_type(self, other_type):
if other_type is error_type:
return 1
return self.base_type.pointer_assignable_from_resolved_type(other_type)
else:
return 0
- if (self.base_type.is_cpp_class and other_type.is_ptr
+ if (self.base_type.is_cpp_class and other_type.is_ptr
and other_type.base_type.is_cpp_class and other_type.base_type.is_subclass(self.base_type)):
return 1
if other_type.is_array or other_type.is_ptr:
return self.base_type.is_void or self.base_type.same_as(other_type.base_type)
return 0
-
+
def specialize(self, values):
base_type = self.base_type.specialize(values)
if base_type == self.base_type:
class CNullPtrType(CPtrType):
is_null_ptr = 1
-
+
class CReferenceType(BaseType):
def __repr__(self):
return "<CReferenceType %s>" % repr(self.ref_base_type)
-
+
def __str__(self):
return "%s &" % self.ref_base_type
def as_argument_type(self):
return self
- def declaration_code(self, entity_code,
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CReferenceType.declaration_code: pointer to", self.base_type ###
return self.ref_base_type.declaration_code(
"&%s" % entity_code,
for_display, dll_linkage, pyrex)
-
+
def specialize(self, values):
base_type = self.ref_base_type.specialize(values)
if base_type == self.ref_base_type:
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
# templates [string] or None
-
+
is_cfunction = 1
original_sig = None
-
+
def __init__(self, return_type, args, has_varargs = 0,
exception_value = None, exception_check = 0, calling_convention = "",
nogil = 0, with_gil = 0, is_overridable = 0, optional_arg_count = 0,
self.with_gil = with_gil
self.is_overridable = is_overridable
self.templates = templates
-
+
def __repr__(self):
arg_reprs = map(repr, self.args)
if self.has_varargs:
self.calling_convention_prefix(),
",".join(arg_reprs),
except_clause)
-
+
def calling_convention_prefix(self):
cc = self.calling_convention
if cc:
return cc + " "
else:
return ""
-
+
def same_c_signature_as(self, other_type, as_cmethod = 0):
return self.same_c_signature_as_resolved_type(
other_type.resolve(), as_cmethod)
def compatible_signature_with(self, other_type, as_cmethod = 0):
return self.compatible_signature_with_resolved_type(other_type.resolve(), as_cmethod)
-
+
def compatible_signature_with_resolved_type(self, other_type, as_cmethod):
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
if as_cmethod:
self.args[0] = other_type.args[0]
return 1
-
-
+
+
def narrower_c_signature_than(self, other_type, as_cmethod = 0):
return self.narrower_c_signature_than_resolved_type(other_type.resolve(), as_cmethod)
-
+
def narrower_c_signature_than_resolved_type(self, other_type, as_cmethod):
if other_type is error_type:
return 1
sc1 = self.calling_convention == '__stdcall'
sc2 = other.calling_convention == '__stdcall'
return sc1 == sc2
-
+
def same_exception_signature_as(self, other_type):
return self.same_exception_signature_as_resolved_type(
other_type.resolve())
def same_exception_signature_as_resolved_type(self, other_type):
return self.exception_value == other_type.exception_value \
and self.exception_check == other_type.exception_check
-
+
def same_as_resolved_type(self, other_type, as_cmethod = 0):
return self.same_c_signature_as_resolved_type(other_type, as_cmethod) \
and self.same_exception_signature_as_resolved_type(other_type) \
and self.nogil == other_type.nogil
-
+
def pointer_assignable_from_resolved_type(self, other_type):
return self.same_c_signature_as_resolved_type(other_type) \
and self.same_exception_signature_as_resolved_type(other_type) \
and not (self.nogil and not other_type.nogil)
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0,
with_calling_convention = 1):
arg_decl_list = []
return self.return_type.declaration_code(
"%s%s(%s)%s" % (cc, entity_code, arg_decl_code, trailer),
for_display, dll_linkage, pyrex)
-
+
def function_header_code(self, func_name, arg_code):
return "%s%s(%s)" % (self.calling_convention_prefix(),
func_name, arg_code)
def signature_cast_string(self):
s = self.declaration_code("(*)", with_calling_convention=False)
return '(%s)' % s
-
+
def specialize(self, values):
if self.templates is None:
new_templates = None
is_overridable = self.is_overridable,
optional_arg_count = self.optional_arg_count,
templates = new_templates)
-
+
def opt_arg_cname(self, arg_name):
return self.op_arg_struct.base_type.scope.lookup(arg_name).cname
self.type = type
self.pos = pos
self.needs_type_test = False # TODO: should these defaults be set in analyse_types()?
-
+
def __repr__(self):
return "%s:%s" % (self.name, repr(self.type))
-
+
def declaration_code(self, for_display = 0):
return self.type.declaration_code(self.cname, for_display)
-
+
def specialize(self, values):
return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname)
return isinstance(other, StructUtilityCode) and self.header == other.header
def __hash__(self):
return hash(self.header)
-
+
def put_code(self, output):
code = output['utility_code_def']
proto = output['utility_code_proto']
-
+
code.putln("%s {" % self.header)
code.putln("PyObject* res;")
code.putln("PyObject* member;")
if self.forward_decl:
proto.putln(self.type.declaration_code('') + ';')
proto.putln(self.header + ";")
-
+
class CStructOrUnionType(CType):
# name string
# scope StructOrUnionScope, or None if incomplete
# typedef_flag boolean
# packed boolean
-
+
# entry Entry
-
+
is_struct_or_union = 1
has_attributes = 1
-
+
def __init__(self, name, kind, scope, typedef_flag, cname, packed=False):
self.name = name
self.cname = cname
self.exception_check = True
self._convert_code = None
self.packed = packed
-
+
def create_to_py_utility_code(self, env):
if env.outer_scope is None:
return False
return False
forward_decl = (self.entry.visibility != 'extern')
self._convert_code = StructUtilityCode(self, forward_decl)
-
+
env.use_utility_code(self._convert_code)
return True
-
+
def __repr__(self):
return "<CStructOrUnionType %s %s%s>" % (self.name, self.cname,
("", " typedef")[self.typedef_flag])
- def declaration_code(self, entity_code,
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
def is_complete(self):
return self.scope is not None
-
+
def attributes_known(self):
return self.is_complete()
# cname string
# scope CppClassScope
# templates [string] or None
-
+
is_cpp_class = 1
has_attributes = 1
exception_check = True
namespace = None
-
+
def __init__(self, name, scope, cname, base_classes, templates = None, template_type = None):
self.name = name
self.cname = cname
error(pos, "'%s' type is not a template" % self);
return PyrexTypes.error_type
if len(self.templates) != len(template_values):
- error(pos, "%s templated type receives %d arguments, got %d" %
+ error(pos, "%s templated type receives %d arguments, got %d" %
(self.name, len(self.templates), len(template_values)))
return error_type
return self.specialize(dict(zip(self.templates, template_values)))
-
+
def specialize(self, values):
if not self.templates and not self.namespace:
return self
if base_class.is_subclass(other_type):
return 1
return 0
-
+
def same_as_resolved_type(self, other_type):
if other_type.is_cpp_class:
if self == other_type:
if other_type is error_type:
return True
return other_type.is_cpp_class and other_type.is_subclass(self)
-
+
def attributes_known(self):
return self.scope is not None
class TemplatePlaceholderType(CType):
-
+
def __init__(self, name):
self.name = name
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if entity_code:
return self.name + " " + entity_code
else:
return self.name
-
+
def specialize(self, values):
if self in values:
return values[self]
return self.name == other_type.name
else:
return 0
-
+
def __hash__(self):
return hash(self.name)
-
+
def __cmp__(self, other):
if isinstance(other, TemplatePlaceholderType):
return cmp(self.name, other.name)
self.cname = cname
self.values = []
self.typedef_flag = typedef_flag
-
+
def __str__(self):
return self.name
-
+
def __repr__(self):
return "<CEnumType %s %s%s>" % (self.name, self.cname,
("", " typedef")[self.typedef_flag])
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
is_string = 1
is_unicode = 0
-
+
to_py_function = "PyBytes_FromString"
from_py_function = "PyBytes_AsString"
exception_value = "NULL"
class CUTF8CharArrayType(CStringType, CArrayType):
# C 'char []' type.
-
+
is_unicode = 1
-
+
to_py_function = "PyUnicode_DecodeUTF8"
exception_value = "NULL"
-
+
def __init__(self, size):
CArrayType.__init__(self, c_char_type, size)
class CCharArrayType(CStringType, CArrayType):
# C 'char []' type.
-
+
def __init__(self, size):
CArrayType.__init__(self, c_char_type, size)
-
+
class CCharPtrType(CStringType, CPtrType):
# C 'char *' type.
-
+
def __init__(self):
CPtrType.__init__(self, c_char_type)
class CUCharPtrType(CStringType, CPtrType):
# C 'unsigned char *' type.
-
+
to_py_function = "__Pyx_PyBytes_FromUString"
from_py_function = "__Pyx_PyBytes_AsUString"
class UnspecifiedType(PyrexType):
# Used as a placeholder until the type can be determined.
-
+
is_unspecified = 1
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<unspecified>"
-
+
def same_as_resolved_type(self, other_type):
return False
-
+
class ErrorType(PyrexType):
# Used to prevent propagation of error messages.
-
+
is_error = 1
exception_value = "0"
exception_check = 0
to_py_function = "dummy"
from_py_function = "dummy"
-
+
def create_to_py_utility_code(self, env):
return True
-
+
def create_from_py_utility_code(self, env):
return True
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<error>"
-
+
def same_as_resolved_type(self, other_type):
return 1
-
+
def error_condition(self, result_code):
return "dummy"
def is_promotion(src_type, dst_type):
# It's hard to find a hard definition of promotion, but empirical
- # evidence suggests that the below is all that's allowed.
+ # evidence suggests that the below is all that's allowed.
if src_type.is_numeric:
if dst_type.same_as(c_int_type):
unsigned = (not src_type.signed)
functions based on how much work must be done to convert the
arguments, with the following priorities:
* identical types or pointers to identical types
- * promotions
+ * promotions
* non-Python types
That is, we prefer functions where no arguments need converted,
and failing that, functions where only promotions are required, and
the same weight, we return None (as there is no best match). If pos
is not None, we also generate an error.
"""
- # TODO: args should be a list of types, not a list of Nodes.
+ # TODO: args should be a list of types, not a list of Nodes.
actual_nargs = len(args)
candidates = []
errors.append((func, error_mesg))
continue
candidates.append((func, func_type))
-
+
# Optimize the most common case of no overloading...
if len(candidates) == 1:
return candidates[0][0]
else:
error(pos, "no suitable method found")
return None
-
+
possibilities = []
bad_types = []
for func, func_type in candidates:
return ntype
widest_type = CComplexType(
widest_numeric_type(
- real_type(type1),
+ real_type(type1),
real_type(type2)))
elif type1.is_enum and type2.is_enum:
widest_type = c_int_type
# Find type descriptor for simple type given name and modifiers.
# Returns None if arguments don't make sense.
return modifiers_and_name_to_type.get((signed, longness, name))
-
+
def parse_basic_type(name):
base = None
if name.startswith('p_'):
if name.startswith('u'):
name = name[1:]
signed = 0
- elif (name.startswith('s') and
+ elif (name.startswith('s') and
not name.startswith('short')):
name = name[1:]
signed = 2
def same_type(type1, type2):
return type1.same_as(type2)
-
+
def assignable_from(type1, type2):
return type1.assignable_from(type2)
if not lexicon:
lexicon = make_lexicon()
return lexicon
-
+
#------------------------------------------------------------------
py_reserved_words = [
def __init__(self, name):
self.name = name
self.__name__ = name # for Plex tracing
-
+
def __call__(self, stream, text):
return getattr(stream, self.name)(text)
def __init__(self, outer = None):
self.entries = {}
self.outer = outer
-
+
def declare(self, name, value):
self.entries[name] = value
-
+
def lookup_here(self, name):
return self.entries[name]
-
+
def __contains__(self, name):
return name in self.entries
-
+
def lookup(self, name):
try:
return self.lookup_here(name)
self.filename = filename
self.set_file_type_from_name(filename)
self._cmp_name = filename
-
+
def get_lines(self, encoding=None, error_handling=None):
return Utils.open_source_file(
self.filename, encoding=encoding,
error_handling=error_handling,
# newline normalisation is costly before Py2.6
require_normalised_newlines=False)
-
+
def get_description(self):
return self.filename
-
+
def get_filenametable_entry(self):
return self.filename
-
+
def __eq__(self, other):
return isinstance(other, FileSourceDescriptor) and self.filename == other.filename
#self.set_file_type_from_name(name)
self.codelines = [x + "\n" for x in code.split("\n")]
self._cmp_name = name
-
+
def get_lines(self, encoding=None, error_handling=None):
if not encoding:
return self.codelines
# compile_time_eval boolean In a true conditional compilation context
# compile_time_expr boolean In a compile-time expression context
- def __init__(self, file, filename, parent_scanner = None,
+ def __init__(self, file, filename, parent_scanner = None,
scope = None, context = None, source_encoding=None, parse_comments=True, initial_pos=None):
Scanner.__init__(self, get_lexicon(), file, filename, initial_pos)
if parent_scanner:
def commentline(self, text):
if self.parse_comments:
- self.produce('commentline', text)
-
+ self.produce('commentline', text)
+
def current_level(self):
return self.indentation_stack[-1]
if self.bracket_nesting_level == 0:
self.begin('INDENT')
self.produce('NEWLINE', '')
-
+
string_states = {
"'": 'SQ_STRING',
'"': 'DQ_STRING',
"'''": 'TSQ_STRING',
'"""': 'TDQ_STRING'
}
-
+
def begin_string_action(self, text):
if text[:1] in string_prefixes:
text = text[1:]
text = text[1:]
self.begin(self.string_states[text])
self.produce('BEGIN_STRING')
-
+
def end_string_action(self, text):
self.begin('')
self.produce('END_STRING')
-
+
def unclosed_string_action(self, text):
self.end_string_action(text)
self.error("Unclosed string literal")
else:
t = "%s %s" % (self.sy, self.systring)
print("--- %3d %2d %s" % (line, col, t))
-
+
def peek(self):
saved = self.sy, self.systring
self.next()
self.unread(*next)
self.sy, self.systring = saved
return next
-
+
def put_back(self, sy, systring):
self.unread(self.sy, self.systring)
self.sy = sy
self.systring = systring
-
+
def unread(self, token, value):
# This method should be added to Plex
self.queue.insert(0, (token, value))
-
+
def error(self, message, pos = None, fatal = True):
if pos is None:
pos = self.position()
err = error(pos, "Possible inconsistent indentation")
err = error(pos, message)
if fatal: raise err
-
+
def expect(self, what, message = None):
if self.sy == what:
self.next()
else:
self.expected(what, message)
-
+
def expect_keyword(self, what, message = None):
if self.sy == IDENT and self.systring == what:
self.next()
else:
self.expected(what, message)
-
+
def expected(self, what, message = None):
if message:
self.error(message)
else:
found = self.sy
self.error("Expected '%s', found '%s'" % (what, found))
-
+
def expect_indent(self):
self.expect('INDENT',
"Expected an increase in indentation level")
nice_identifier = re.compile('^[a-zA-Z0-0_]+$').match
iso_c99_keywords = set(
-['auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
- 'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if',
- 'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof',
- 'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void',
+['auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
+ 'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if',
+ 'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof',
+ 'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void',
'volatile', 'while',
'_Bool', '_Complex'', _Imaginary', 'inline', 'restrict'])
class BufferAux(object):
writable_needed = False
-
+
def __init__(self, buffer_info_var, stridevars, shapevars,
suboffsetvars):
self.buffer_info_var = buffer_info_var
self.stridevars = stridevars
self.shapevars = shapevars
self.suboffsetvars = suboffsetvars
-
+
def __repr__(self):
return "<BufferAux %r>" % self.__dict__
self.init = init
self.overloaded_alternatives = []
self.assignments = []
-
+
def __repr__(self):
return "Entry(name=%s, type=%s)" % (self.name, self.type)
-
+
def redeclared(self, pos):
error(pos, "'%s' does not match previous declaration" % self.name)
error(self.pos, "Previous declaration is here")
-
+
def all_alternatives(self):
return [self] + self.overloaded_alternatives
scope_prefix = ""
in_cinclude = 0
nogil = 0
-
+
def __init__(self, name, outer_scope, parent_scope):
# The outer_scope is the next scope in the lookup chain.
# The parent_scope is used to derive the qualified name of this scope.
def start_branching(self, pos):
self.control_flow = self.control_flow.start_branch(pos)
-
+
def next_branch(self, pos):
self.control_flow = self.control_flow.next_branch(pos)
-
+
def finish_branching(self, pos):
self.control_flow = self.control_flow.finish_branch(pos)
-
+
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.qualified_name)
def qualifying_scope(self):
return self.parent_scope
-
+
def mangle(self, prefix, name = None):
if name:
return "%s%s%s" % (prefix, self.scope_prefix, name)
else:
return self.parent_scope.mangle(prefix, self.name)
-
+
def mangle_internal(self, name):
# Mangle an internal name so as not to clash with any
# user-defined name in this scope.
def global_scope(self):
# Return the module-level scope containing this scope.
return self.outer_scope.global_scope()
-
+
def builtin_scope(self):
# Return the module-level scope containing this scope.
return self.outer_scope.builtin_scope()
def declare(self, name, cname, type, pos, visibility):
# Create new entry, and add to dictionary if
- # name is not None. Reports a warning if already
+ # name is not None. Reports a warning if already
# declared.
if type.is_buffer and not isinstance(self, LocalScope):
error(pos, ERR_BUF_LOCALONLY)
if not self.in_cinclude and cname and re.match("^_[_A-Z]+$", cname):
- # See http://www.gnu.org/software/libc/manual/html_node/Reserved-Names.html#Reserved-Names
+ # See http://www.gnu.org/software/libc/manual/html_node/Reserved-Names.html#Reserved-Names
warning(pos, "'%s' is a reserved name in C." % cname, -1)
entries = self.entries
if name and name in entries:
entry.scope = self
entry.visibility = visibility
return entry
-
+
def qualify_name(self, name):
return EncodedString("%s.%s" % (self.qualified_name, name))
entry.is_const = 1
entry.value_node = value
return entry
-
- def declare_type(self, name, type, pos,
+
+ def declare_type(self, name, type, pos,
cname = None, visibility = 'private', defining = 1):
# Add an entry for a type definition.
if not cname:
self.type_entries.append(entry)
# here we would set as_variable to an object representing this type
return entry
-
+
def declare_typedef(self, name, base_type, pos, cname = None,
visibility = 'private'):
if not cname:
else:
cname = self.mangle(Naming.type_prefix, name)
try:
- type = PyrexTypes.create_typedef_type(name, base_type, cname,
+ type = PyrexTypes.create_typedef_type(name, base_type, cname,
(visibility == 'extern'))
except ValueError, e:
error(pos, e.args[0])
entry = self.declare_type(name, type, pos, cname, visibility)
type.qualified_name = entry.qualified_name
return entry
-
- def declare_struct_or_union(self, name, kind, scope,
+
+ def declare_struct_or_union(self, name, kind, scope,
typedef_flag, pos, cname = None, visibility = 'private',
packed = False):
# Add an entry for a struct or union definition.
if not scope and not entry.type.scope:
self.check_for_illegal_incomplete_ctypedef(typedef_flag, pos)
return entry
-
+
def declare_cpp_class(self, name, scope,
pos, cname = None, base_classes = [],
visibility = 'extern', templates = None):
for T in templates:
template_entry = entry.type.scope.declare(T.name, T.name, T, None, 'extern')
template_entry.is_type = 1
-
+
def declare_inherited_attributes(entry, base_classes):
for base_class in base_classes:
declare_inherited_attributes(entry, base_class.base_classes)
- entry.type.scope.declare_inherited_cpp_attributes(base_class.scope)
+ entry.type.scope.declare_inherited_cpp_attributes(base_class.scope)
if entry.type.scope:
declare_inherited_attributes(entry, base_classes)
if self.is_cpp_class_scope:
if typedef_flag != entry.type.typedef_flag:
error(pos, "'%s' previously declared using '%s'" % (
entry.name, ("cdef", "ctypedef")[entry.type.typedef_flag]))
-
+
def check_previous_visibility(self, entry, visibility, pos):
if entry.visibility != visibility:
error(pos, "'%s' previously declared as '%s'" % (
entry.name, entry.visibility))
-
+
def declare_enum(self, name, pos, cname, typedef_flag,
visibility = 'private'):
if name:
visibility = visibility)
entry.enum_values = []
self.sue_entries.append(entry)
- return entry
-
- def declare_var(self, name, type, pos,
+ return entry
+
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
# Add an entry for a variable.
if not cname:
entry.is_variable = 1
self.control_flow.set_state((), (name, 'initialized'), False)
return entry
-
+
def declare_builtin(self, name, pos):
return self.outer_scope.declare_builtin(name, pos)
def register_pyfunction(self, entry):
self.pyfunc_entries.append(entry)
-
- def declare_cfunction(self, name, type, pos,
+
+ def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', defining = 0,
api = 0, in_pxd = 0, modifiers = (), utility_code = None):
# Add an entry for a C function.
entry.func_modifiers = modifiers
entry.utility_code = utility_code
return entry
-
+
def add_cfunction(self, name, type, pos, cname, visibility, modifiers):
# Add a C function entry without giving it a func_cname.
entry = self.declare(name, cname, type, pos, visibility)
entry.func_modifiers = modifiers
self.cfunc_entries.append(entry)
return entry
-
+
def find(self, name, pos):
# Look up name, report error if not found.
entry = self.lookup(name)
return entry
else:
error(pos, "'%s' is not declared" % name)
-
+
def find_imported_module(self, path, pos):
# Look up qualified name, must be a module, report error if not found.
# Path is a list of names.
error(pos, "'%s' is not a cimported module" % '.'.join(path))
return None
return scope
-
+
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
def lookup_here(self, name):
# Look up in this scope only, return None if not found.
return self.entries.get(name, None)
-
+
def lookup_target(self, name):
# Look up name in this scope only. Declare as Python
# variable if not found.
if not entry:
entry = self.declare_var(name, py_object_type, None)
return entry
-
+
def lookup_type(self, name):
entry = self.lookup(name)
if entry and entry.is_type:
return entry.type
-
+
def lookup_operator(self, operator, operands):
if operands[0].type.is_cpp_class:
obj_type = operands[0].type
def generate_library_function_declarations(self, code):
# Generate extern decls for C library funcs used.
pass
-
+
def defines_any(self, names):
# Test whether any of the given names are
# defined in this scope.
for name in names:
- if name in self.entries:
+ if name in self.entries:
return 1
return 0
-
+
def infer_types(self):
from TypeInference import get_type_inferer
get_type_inferer().infer_types(self)
-
+
def is_cpp(self):
outer = self.outer_scope
if outer is None:
def __init__(self):
Scope.__init__(self, Options.pre_import, None, None)
-
+
def declare_builtin(self, name, pos):
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_variable = True
class BuiltinScope(Scope):
# The builtin namespace.
-
+
def __init__(self):
if Options.pre_import is None:
Scope.__init__(self, "__builtin__", None, None)
else:
Scope.__init__(self, "__builtin__", PreImportScope(), None)
self.type_names = {}
-
+
for name, definition in self.builtin_entries.iteritems():
cname, type = definition
self.declare_var(name, type, None, cname)
-
+
def declare_builtin(self, name, pos):
if not hasattr(builtins, name):
if self.outer_scope is not None:
return self.outer_scope.declare_builtin(name, pos)
else:
error(pos, "undeclared name not builtin: %s"%name)
-
+
def declare_builtin_cfunction(self, name, type, cname, python_equiv = None,
utility_code = None):
# If python_equiv == "*", the Python equivalent has the same name
var_entry.utility_code = utility_code
entry.as_variable = var_entry
return entry
-
+
def declare_builtin_type(self, name, cname, utility_code = None, objstruct_cname = None):
name = EncodedString(name)
type = PyrexTypes.BuiltinObjectType(name, cname, objstruct_cname)
# types_imported {PyrexType : 1} Set of types for which import code generated
# has_import_star boolean Module contains import *
# cpp boolean Compiling a C++ file
-
+
is_module_scope = 1
has_import_star = 0
self.namespace_cname = self.module_cname
for name in ['__builtins__', '__name__', '__file__', '__doc__']:
self.declare_var(EncodedString(name), py_object_type, None)
-
+
def qualifying_scope(self):
return self.parent_module
-
+
def global_scope(self):
return self
-
+
def declare_builtin(self, name, pos):
if not hasattr(builtins, name) and name != 'xrange':
# 'xrange' is special cased in Code.py
# has not been referenced before.
return self.global_scope().context.find_module(
module_name, relative_to = self.parent_module, pos = pos)
-
+
def find_submodule(self, name):
# Find and return scope for a submodule of this module,
# creating a new empty one if necessary. Doesn't parse .pxd.
scope = self.lookup_submodule(name)
if not scope:
- scope = ModuleScope(name,
+ scope = ModuleScope(name,
parent_module = self, context = self.context)
self.module_entries[name] = scope
return scope
-
+
def lookup_submodule(self, name):
# Return scope for submodule of this module, or None.
return self.module_entries.get(name, None)
-
+
def add_include_file(self, filename):
if filename not in self.python_include_files \
and filename not in self.include_files:
self.include_files.append(filename)
-
+
def add_imported_module(self, scope):
if scope not in self.cimported_modules:
for filename in scope.include_files:
self.cimported_modules.append(scope)
for m in scope.cimported_modules:
self.add_imported_module(m)
-
+
def add_imported_entry(self, name, entry, pos):
if entry not in self.entries:
self.entries[name] = entry
else:
warning(pos, "'%s' redeclared " % name, 0)
-
+
def declare_module(self, name, scope, pos):
# Declare a cimported module. This is represented as a
# Python module-level variable entry with a module
# every module appearing in an import list.
# It shouldn't be an error for a module
# name to appear again, and indeed the generated
- # code compiles fine.
+ # code compiles fine.
return entry
warning(pos, "'%s' redeclared " % name, 0)
return None
entry.as_module = scope
self.add_imported_module(scope)
return entry
-
- def declare_var(self, name, type, pos,
+
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
# Add an entry for a global variable. If it is a Python
- # object type, and not declared with cdef, it will live
- # in the module dictionary, otherwise it will be a C
+ # object type, and not declared with cdef, it will live
+ # in the module dictionary, otherwise it will be a C
# global variable.
- entry = Scope.declare_var(self, name, type, pos,
+ entry = Scope.declare_var(self, name, type, pos,
cname, visibility, is_cdef)
if not visibility in ('private', 'public', 'extern'):
error(pos, "Module-level variable cannot be declared %s" % visibility)
entry.init = 0
self.var_entries.append(entry)
return entry
-
+
def declare_global(self, name, pos):
entry = self.lookup_here(name)
if not entry:
self.declare_var(name, py_object_type, pos)
-
+
def use_utility_code(self, new_code):
if new_code is not None:
self.utility_code_list.append(new_code)
buffer_defaults = None):
# If this is a non-extern typedef class, expose the typedef, but use
# the non-typedef struct internally to avoid needing forward
- # declarations for anonymous structs.
+ # declarations for anonymous structs.
if typedef_flag and visibility != 'extern':
if visibility != 'public':
warning(pos, "ctypedef only valid for public and extern classes", 2)
elif not entry.in_cinclude:
type.objstruct_cname = self.mangle(Naming.objstruct_prefix, name)
else:
- error(entry.pos,
+ error(entry.pos,
"Object name required for 'public' or 'extern' C class")
self.attach_var_entry_to_c_class(entry)
self.c_class_entries.append(entry)
if objstruct_cname:
if type.objstruct_cname and type.objstruct_cname != objstruct_cname:
error(pos, "Object struct name differs from previous declaration")
- type.objstruct_cname = objstruct_cname
+ type.objstruct_cname = objstruct_cname
if typeobj_cname:
if type.typeobj_cname and type.typeobj_cname != typeobj_cname:
error(pos, "Type object name differs from previous declaration")
type.typeobj_cname = typeobj_cname
#
- # Return new or existing entry
+ # Return new or existing entry
#
return entry
-
+
def check_for_illegal_incomplete_ctypedef(self, typedef_flag, pos):
if typedef_flag and not self.in_cinclude:
error(pos, "Forward-referenced type must use 'cdef', not 'ctypedef'")
-
+
def allocate_vtable_names(self, entry):
# If extension type has a vtable, allocate vtable struct and
# slot names for it.
self.check_c_class(entry)
def check_c_functions(self):
- # Performs post-analysis checking making sure all
+ # Performs post-analysis checking making sure all
# defined c functions are actually implemented.
for name, entry in self.entries.items():
if entry.is_cfunction:
- if (entry.defined_in_pxd
+ if (entry.defined_in_pxd
and entry.scope is self
and entry.visibility != 'extern'
- and not entry.in_cinclude
+ and not entry.in_cinclude
and not entry.is_implemented):
error(entry.pos, "Non-extern C function '%s' declared but not defined" % name)
-
+
def attach_var_entry_to_c_class(self, entry):
# The name of an extension class has to serve as both a type
# name and a variable name holding the type object. It is
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
entry.as_variable = var_entry
-
+
def is_cpp(self):
return self.cpp
-
+
def infer_types(self):
from TypeInference import PyObjectTypeInferer
PyObjectTypeInferer().infer_types(self)
-
+
class LocalScope(Scope):
def __init__(self, name, outer_scope, parent_scope = None):
if parent_scope is None:
parent_scope = outer_scope
Scope.__init__(self, name, outer_scope, parent_scope)
-
+
def mangle(self, prefix, name):
return prefix + name
self.arg_entries.append(entry)
self.control_flow.set_state((), (name, 'source'), 'arg')
return entry
-
- def declare_var(self, name, type, pos,
+
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
# Add an entry for a local variable.
if visibility in ('public', 'readonly'):
error(pos, "Local variable cannot be declared %s" % visibility)
- entry = Scope.declare_var(self, name, type, pos,
+ entry = Scope.declare_var(self, name, type, pos,
cname, visibility, is_cdef)
if type.is_pyobject and not Options.init_local_none:
entry.init = "0"
entry.is_local = 1
self.var_entries.append(entry)
return entry
-
+
def declare_global(self, name, pos):
# Pull entry from global scope into local scope.
if self.lookup_here(name):
else:
entry = self.global_scope().lookup_target(name)
self.entries[name] = entry
-
+
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
if entry.scope is not self and entry.scope.is_closure_scope:
if hasattr(entry.scope, "scope_class"):
raise InternalError, "lookup() after scope class created."
- # The actual c fragment for the different scopes differs
+ # The actual c fragment for the different scopes differs
# on the outside and inside, so we make a new entry
entry.in_closure = True
# Would it be better to declare_var here?
self.entries[name] = inner_entry
return inner_entry
return entry
-
+
def mangle_closure_cnames(self, outer_scope_cname):
for entry in self.entries.values():
if entry.from_closure:
# for entry in self.entries.values() + self.temp_entries:
# entry.in_closure = 1
# LocalScope.mangle_closure_cnames(self, scope_var)
-
+
# def mangle(self, prefix, name):
# return "%s->%s" % (self.cur_scope_cname, name)
# return "%s->%s" % (self.closure_cname, name)
class StructOrUnionScope(Scope):
# Namespace of a C struct or union.
-
+
def __init__(self, name="?"):
Scope.__init__(self, name, None, None)
- def declare_var(self, name, type, pos,
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0, allow_pyobject = 0):
# Add an entry for an attribute.
if not cname:
"C struct/union member cannot be declared %s" % visibility)
return entry
- def declare_cfunction(self, name, type, pos,
+ def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', defining = 0,
api = 0, in_pxd = 0, modifiers = ()): # currently no utility code ...
return self.declare_var(name, type, pos, cname, visibility)
if entry:
return entry
if name == "classmethod":
- # We don't want to use the builtin classmethod here 'cause it won't do the
- # right thing in this scope (as the class memebers aren't still functions).
- # Don't want to add a cfunction to this scope 'cause that would mess with
- # the type definition, so we just return the right entry.
+ # We don't want to use the builtin classmethod here 'cause it won't do the
+ # right thing in this scope (as the class memebers aren't still functions).
+ # Don't want to add a cfunction to this scope 'cause that would mess with
+ # the type definition, so we just return the right entry.
self.use_utility_code(classmethod_utility_code)
entry = Entry(
- "classmethod",
- "__Pyx_Method_ClassMethod",
+ "classmethod",
+ "__Pyx_Method_ClassMethod",
PyrexTypes.CFuncType(
py_object_type,
[PyrexTypes.CFuncTypeArg("", py_object_type, None)], 0, 0))
entry.is_cfunction = 1
return entry
-
+
class PyClassScope(ClassScope):
# Namespace of a Python class.
# class_obj_cname string C variable holding class object
is_py_class_scope = 1
-
- def declare_var(self, name, type, pos,
+
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
if type is unspecified_type:
type = py_object_type
# Add an entry for a class attribute.
- entry = Scope.declare_var(self, name, type, pos,
+ entry = Scope.declare_var(self, name, type, pos,
cname, visibility, is_cdef)
entry.is_pyglobal = 1
entry.is_pyclass_attr = 1
# defined boolean Defined in .pxd file
# implemented boolean Defined in .pyx file
# inherited_var_entries [Entry] Adapted var entries from base class
-
+
is_c_class_scope = 1
-
+
def __init__(self, name, outer_scope, visibility):
ClassScope.__init__(self, name, outer_scope)
if visibility != 'extern':
self.inherited_var_entries = []
self.defined = 0
self.implemented = 0
-
+
def needs_gc(self):
# If the type or any of its base types have Python-valued
# C attributes, then it needs to participate in GC.
self.parent_type.base_type.scope is not None and
self.parent_type.base_type.scope.needs_gc())
- def declare_var(self, name, type, pos,
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
if is_cdef:
# Add an entry for an attribute.
"C attributes cannot be added in implementation part of"
" extension type defined in a pxd")
if get_special_method_signature(name):
- error(pos,
+ error(pos,
"The name '%s' is reserved for a special method."
% name)
if not cname:
if type is unspecified_type:
type = py_object_type
# Add an entry for a class attribute.
- entry = Scope.declare_var(self, name, type, pos,
+ entry = Scope.declare_var(self, name, type, pos,
cname, visibility, is_cdef)
entry.is_member = 1
entry.is_pyglobal = 1 # xxx: is_pyglobal changes behaviour in so many places that
self.pyfunc_entries.append(entry)
return entry
-
+
def lookup_here(self, name):
if name == "__new__":
name = EncodedString("__cinit__")
return ClassScope.lookup_here(self, name)
-
+
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private',
defining = 0, api = 0, in_pxd = 0, modifiers = (),
entry.func_cname = self.mangle(Naming.func_prefix, name)
entry.utility_code = utility_code
return entry
-
+
def add_cfunction(self, name, type, pos, cname, visibility, modifiers):
# Add a cfunction entry without giving it a func_cname.
prev_entry = self.lookup_here(name)
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_property = 1
entry.doc = doc
- entry.scope = PropertyScope(name,
+ entry.scope = PropertyScope(name,
outer_scope = self.global_scope(), parent_scope = self)
entry.scope.parent_type = self.parent_type
self.property_entries.append(entry)
return entry
-
+
def declare_inherited_c_attributes(self, base_scope):
# Declare entries for all the C attributes of an
# inherited type, with cnames modified appropriately
return "%s.%s" % (Naming.obj_base_cname, base_entry.cname)
for base_entry in \
base_scope.inherited_var_entries + base_scope.var_entries:
- entry = self.declare(base_entry.name, adapt(base_entry.cname),
+ entry = self.declare(base_entry.name, adapt(base_entry.cname),
base_entry.type, None, 'private')
entry.is_variable = 1
self.inherited_var_entries.append(entry)
base_entry.pos, adapt(base_entry.cname),
base_entry.visibility, base_entry.func_modifiers)
entry.is_inherited = 1
-
-
+
+
class CppClassScope(Scope):
# Namespace of a C++ class.
-
+
is_cpp_class_scope = 1
-
+
default_constructor = None
-
+
def __init__(self, name, outer_scope):
Scope.__init__(self, name, outer_scope, None)
self.directives = outer_scope.directives
self.inherited_var_entries = []
- def declare_var(self, name, type, pos,
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'extern', is_cdef = 0, allow_pyobject = 0):
# Add an entry for an attribute.
if not cname:
error(pos,
"C++ class member cannot be a Python object")
return entry
-
+
def check_base_default_constructor(self, pos):
# Look for default constructors in all base classes.
if self.default_constructor is None:
#print base_entry.name, self.entries
if base_entry.name in self.entries:
base_entry.name
- entry = self.declare(base_entry.name, base_entry.cname,
+ entry = self.declare(base_entry.name, base_entry.cname,
base_entry.type, None, 'extern')
entry.is_variable = 1
self.inherited_var_entries.append(entry)
base_entry.visibility, base_entry.func_modifiers,
utility_code = base_entry.utility_code)
entry.is_inherited = 1
-
+
def specialize(self, values):
scope = CppClassScope(self.name, self.outer_scope)
for entry in self.entries.values():
return scope
def add_include_file(self, filename):
- self.outer_scope.add_include_file(filename)
-
+ self.outer_scope.add_include_file(filename)
+
class PropertyScope(Scope):
# Scope holding the __get__, __set__ and __del__ methods for
# a property of an extension type.
# parent_type PyExtensionType The type to which the property belongs
is_property_scope = 1
-
+
def declare_pyfunction(self, name, pos, allow_redefine=False):
# Add an entry for a method.
signature = get_property_accessor_signature(name)
def not_parseable(self, expected_error, s):
e = self.should_fail(lambda: self.fragment(s), Errors.CompileError)
self.assertEqual(expected_error, e.message_only)
-
+
def test_basic(self):
t = self.parse(u"cdef object[float, 4, ndim=2, foo=foo] x")
bufnode = t.stats[0].base_type
self.assertEqual(2, len(bufnode.positional_args))
# print bufnode.dump()
# should put more here...
-
+
def test_type_pos(self):
self.parse(u"cdef object[short unsigned int, 3] x")
self.parse_opts(opts, expect_error=True)
# e = self.should_fail(lambda: self.parse_opts(opts))
self.assertEqual(expected_err, self.error.message_only)
-
+
def __test_basic(self):
buf = self.parse_opts(u"unsigned short int, 3")
self.assert_(isinstance(buf.dtype_node, CSimpleBaseTypeNode))
self.assert_(isinstance(buf.dtype_node, CSimpleBaseTypeNode))
self.assert_(buf.dtype_node.signed == 0 and buf.dtype_node.longness == -1)
self.assertEqual(3, buf.ndim)
-
+
def __test_ndim(self):
self.parse_opts(u"int, 2")
self.non_parse(ERR_BUF_NDIM, u"int, 'a'")
def decorated():
pass
""")
-
+
self.assertCode(u"""
def decorator(fun):
return fun
body: ExprStatNode
expr: NameNode
""", self.treetypes(t))
-
+
def test_wrap_singlestat(self):
t = self.run_pipeline([NormalizeTree(None)], u"if x: y")
self.assertLines(u"""
stats[0]: ExprStatNode
expr: NameNode
""", self.treetypes(t))
-
+
def test_pass_eliminated(self):
t = self.run_pipeline([NormalizeTree(None)], u"pass")
$0_2(None, None, None)
""", t)
-
+
# TODO: Re-enable once they're more robust.
if sys.version_info[:2] >= (2, 5) and False:
DebuggerTestCase = object
class TestDebugTransform(DebuggerTestCase):
-
+
def elem_hasattrs(self, elem, attrs):
# we shall supporteth python 2.3 !
return all([attr in elem.attrib for attr in attrs])
-
+
def test_debug_info(self):
try:
assert os.path.exists(self.debug_dest)
-
+
t = DebugWriter.etree.parse(self.debug_dest)
# the xpath of the standard ElementTree is primitive, don't use
# anything fancy
xml_globals = dict(
[(e.attrib['name'], e.attrib['type']) for e in L])
self.assertEqual(len(L), len(xml_globals))
-
+
L = list(t.find('/Module/Functions'))
assert L
xml_funcs = dict([(e.attrib['qualified_name'], e) for e in L])
self.assertEqual(len(L), len(xml_funcs))
-
+
# test globals
self.assertEqual('CObject', xml_globals.get('c_var'))
self.assertEqual('PythonObject', xml_globals.get('python_var'))
-
+
# test functions
funcnames = 'codefile.spam', 'codefile.ham', 'codefile.eggs'
required_xml_attrs = 'name', 'cname', 'qualified_name'
assert all([f in xml_funcs for f in funcnames])
spam, ham, eggs = [xml_funcs[funcname] for funcname in funcnames]
-
+
self.assertEqual(spam.attrib['name'], 'spam')
self.assertNotEqual('spam', spam.attrib['cname'])
assert self.elem_hasattrs(spam, required_xml_attrs)
names = [e.attrib['name'] for e in spam_locals]
self.assertEqual(list('abcd'), names)
assert self.elem_hasattrs(spam_locals[0], required_xml_attrs)
-
+
# test arguments of functions
spam_arguments = list(spam.find('Arguments'))
assert spam_arguments
self.assertEqual(1, len(list(spam_arguments)))
-
+
# test step-into functions
step_into = spam.find('StepIntoFunctions')
spam_stepinto = [x.attrib['name'] for x in step_into]
except:
print open(self.debug_dest).read()
raise
-
-
+
+
if __name__ == "__main__":
import unittest
import Cython.Compiler.Naming as Naming
class TestTreeFragments(CythonTest):
-
+
def test_basic(self):
F = self.fragment(u"x = 4")
T = F.copy()
self.assertCode(u"x = 4", T)
-
+
def test_copy_is_taken(self):
F = self.fragment(u"if True: x = 4")
T1 = F.root
v = F.root.stats[1].rhs.operand2.operand1
a = T.stats[1].rhs.operand2.operand1
self.assertEquals(v.pos, a.pos)
-
+
def test_temps(self):
TemplateTransform.temp_name_counter = 0
F = self.fragment(u"""
def __init__(self, include_directories, name):
Main.Context.__init__(self, include_directories, {})
self.module_name = name
-
+
def find_module(self, module_name, relative_to = None, pos = None, need_pxd = 1):
if module_name != self.module_name:
raise AssertionError("Not yet supporting any cimports/includes from string code snippets")
return ModuleScope(module_name, parent_module = None, context = self)
-
+
def parse_from_strings(name, code, pxds={}, level=None, initial_pos=None):
"""
Utility method to parse a (unicode) string of code. This is mostly
used for internal Cython compiler purposes (creating code snippets
that transforms should emit, as well as unit testing).
-
+
code - a unicode string containing Cython (module-level) code
name - a descriptive name for the code source (to use in error messages etc.)
"""
def __init__(self, pos):
super(ApplyPositionAndCopy, self).__init__()
self.pos = pos
-
+
def visit_Node(self, node):
copy = super(ApplyPositionAndCopy, self).visit_Node(node)
copy.pos = self.pos
class TemplateTransform(VisitorTransform):
"""
Makes a copy of a template tree while doing substitutions.
-
+
A dictionary "substitutions" should be passed in when calling
the transform; mapping names to replacement nodes. Then replacement
happens like this:
Also a list "temps" should be passed. Any names listed will
be transformed into anonymous, temporary names.
-
+
Currently supported for tempnames is:
NameNode
(various function and class definition nodes etc. should be added to this)
-
+
Each replacement node gets the position of the substituted node
recursively applied to every member node.
"""
c.pos = self.pos
self.visitchildren(c)
return c
-
+
def try_substitution(self, node, key):
sub = self.substitutions.get(key)
if sub is not None:
return ApplyPositionAndCopy(pos)(sub)
else:
return self.visit_Node(node) # make copy as usual
-
+
def visit_NameNode(self, node):
temphandle = self.tempmap.get(node.name)
if temphandle:
return self.try_substitution(node, node.expr.name)
else:
return self.visit_Node(node)
-
+
def copy_code_tree(node):
return TreeCopier()(node)
minindent = min([len(INDENT_RE.match(x).group(0)) for x in lines])
lines = [x[minindent:] for x in lines]
return lines
-
+
class TreeFragment(object):
def __init__(self, code, name="(tree fragment)", pxds={}, temps=[], pipeline=[], level=None, initial_pos=None):
if isinstance(code, unicode):
- def fmt(x): return u"\n".join(strip_common_indent(x.split(u"\n")))
-
+ def fmt(x): return u"\n".join(strip_common_indent(x.split(u"\n")))
+
fmt_code = fmt(code)
fmt_pxds = {}
for key, value in pxds.iteritems():
for node in result:
for child in iter_recursive(node):
yield child
-
+
return select
def handle_attribute(next, token):
for result_node in rhs_select(subresult):
yield node
return select
-
+
operations = {
"@": handle_attribute,
object_expr = TypedExprNode(py_object_type)
class MarkAssignments(CythonTransform):
-
+
def mark_assignment(self, lhs, rhs):
if isinstance(lhs, (ExprNodes.NameNode, Nodes.PyArgDeclNode)):
if lhs.entry is None:
else:
# Could use this info to infer cdef class attributes...
pass
-
+
def visit_SingleAssignmentNode(self, node):
self.mark_assignment(node.lhs, node.rhs)
self.visitchildren(node)
self.mark_assignment(lhs, node.rhs)
self.visitchildren(node)
return node
-
+
def visit_InPlaceAssignmentNode(self, node):
self.mark_assignment(node.lhs, node.create_binop_node())
self.visitchildren(node)
self.mark_assignment(node.target, arg)
if len(sequence.args) > 2:
self.mark_assignment(
- node.target,
+ node.target,
ExprNodes.binop_node(node.pos,
'+',
sequence.args[0],
self.mark_assignment(node.target, node.bound1)
if node.step is not None:
self.mark_assignment(node.target,
- ExprNodes.binop_node(node.pos,
- '+',
- node.bound1,
+ ExprNodes.binop_node(node.pos,
+ '+',
+ node.bound1,
node.step))
self.visitchildren(node)
return node
self.mark_assignment(node.target, object_expr)
self.visitchildren(node)
return node
-
+
def visit_FromCImportStatNode(self, node):
pass # Can't be assigned to...
def __call__(self, root):
self.env_stack = []
self.env = root.scope
- return super(MarkOverflowingArithmetic, self).__call__(root)
+ return super(MarkOverflowingArithmetic, self).__call__(root)
def visit_safe_node(self, node):
self.might_overflow, saved = False, self.might_overflow
self.visitchildren(node)
self.might_overflow = saved
return node
-
+
def visit_FuncDefNode(self, node):
self.env_stack.append(self.env)
self.env = node.local_scope
if entry:
entry.might_overflow = True
return node
-
+
def visit_BinopNode(self, node):
if node.operator in '&|^':
return self.visit_neutral_node(node)
else:
return self.visit_dangerous_node(node)
-
+
visit_UnopNode = visit_neutral_node
-
+
visit_UnaryMinusNode = visit_dangerous_node
-
+
visit_InPlaceAssignmentNode = visit_dangerous_node
-
+
visit_Node = visit_safe_node
-
+
def visit_assignment(self, lhs, rhs):
- if (isinstance(rhs, ExprNodes.IntNode)
+ if (isinstance(rhs, ExprNodes.IntNode)
and isinstance(lhs, ExprNodes.NameNode)
and Utils.long_literal(rhs.value)):
entry = lhs.entry or self.env.lookup(lhs.name)
if entry:
entry.might_overflow = True
-
+
def visit_SingleAssignmentNode(self, node):
self.visit_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
-
+
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.visit_assignment(lhs, node.rhs)
break
if not ready_to_infer:
break
-
+
# We can't figure out the rest with this algorithm, let them be objects.
for entry in dependancies_by_entry:
entry.type = py_object_type
# operations without other int types, so this is safe, too
return result_type
elif result_type.is_ptr and not (result_type.is_int and result_type.rank == 0):
- # Any pointer except (signed|unsigned|) char* can't implicitly
+ # Any pointer except (signed|unsigned|) char* can't implicitly
# become a PyObject.
return result_type
elif result_type.is_cpp_class:
# used, won't arise in pure Python, and there shouldn't be side
# effects, so I'm declaring this safe.
return result_type
- # TODO: double complex should be OK as well, but we need
+ # TODO: double complex should be OK as well, but we need
# to make sure everything is supported.
elif result_type.is_int and not might_overflow:
return result_type
import StringEncoding
import sys
-invisible = ['__cinit__', '__dealloc__', '__richcmp__',
+invisible = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__']
class Signature(object):
# '*' rest of args passed as generic Python
# arg tuple and kw dict (must be last
# char in format string)
-
+
format_map = {
'O': PyrexTypes.py_object_type,
'v': PyrexTypes.c_void_type,
# 'T', '-' and '*' are handled otherwise
# and are not looked up in here
}
-
+
error_value_map = {
'O': "NULL",
'T': "NULL",
'r': "-1",
'z': "-1",
}
-
+
def __init__(self, arg_format, ret_format):
self.has_dummy_arg = 0
self.has_generic_args = 0
self.fixed_arg_format = arg_format
self.ret_format = ret_format
self.error_value = self.error_value_map.get(ret_format, None)
-
+
def num_fixed_args(self):
return len(self.fixed_arg_format)
-
+
def is_self_arg(self, i):
# argument is 'self' for methods or 'class' for classmethods
return self.fixed_arg_format[i] == 'T'
-
+
def returns_self_type(self):
# return type is same as 'self' argument type
return self.ret_format == 'T'
-
+
def fixed_arg_type(self, i):
return self.format_map[self.fixed_arg_format[i]]
-
+
def return_type(self):
return self.format_map[self.ret_format]
def exception_value(self):
return self.error_value_map.get(self.ret_format)
-
+
def function_type(self, self_arg_override=None):
# Construct a C function type descriptor for this signature
args = []
if preprocessor_guard:
code.putln("#endif")
- # Some C implementations have trouble statically
- # initialising a global with a pointer to an extern
+ # Some C implementations have trouble statically
+ # initialising a global with a pointer to an extern
# function, so we initialise some of the type slots
# in the module init function instead.
value = self.slot_code(scope)
if value != "0":
code.putln("%s.%s = %s;" % (
- scope.parent_type.typeobj_cname,
- self.slot_name,
+ scope.parent_type.typeobj_cname,
+ self.slot_name,
value
)
)
# Descriptor for a type slot with a fixed value.
#
# value string
-
+
def __init__(self, slot_name, value, py3=True, py2=True, ifdef=None):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
self.value = value
-
+
def slot_code(self, scope):
return self.value
class EmptySlot(FixedSlot):
# Descriptor for a type slot whose value is always 0.
-
+
def __init__(self, slot_name, py3=True, py2=True, ifdef=None):
FixedSlot.__init__(self, slot_name, "0", py3=py3, py2=py2, ifdef=ifdef)
# signature Signature
# method_name string The __xxx__ name of the method
# alternatives [string] Alternative list of __xxx__ names for the method
-
- def __init__(self, signature, slot_name, method_name, fallback=None,
+
+ def __init__(self, signature, slot_name, method_name, fallback=None,
py3=True, py2=True, ifdef=None):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
self.signature = signature
class GCDependentSlot(InternalMethodSlot):
# Descriptor for a slot whose value depends on whether
# the type participates in GC.
-
+
def __init__(self, slot_name, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
-
+
def slot_code(self, scope):
if not scope.needs_gc():
return "0"
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
-
-
+
+
class ConstructorSlot(InternalMethodSlot):
# Descriptor for tp_new and tp_dealloc.
-
+
def __init__(self, slot_name, method, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.method = method
-
+
def slot_code(self, scope):
if scope.parent_type.base_type \
and not scope.has_pyobject_attrs \
# defined, the method will not be synthesized and an
# alternative default value will be placed in the type
# slot.
-
+
def __init__(self, slot_name, user_methods, default_value, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.user_methods = user_methods
self.default_value = default_value
-
+
def slot_code(self, scope):
if scope.defines_any(self.user_methods):
return InternalMethodSlot.slot_code(self, scope)
class TypeFlagsSlot(SlotDescriptor):
# Descriptor for the type flags slot.
-
+
def slot_code(self, scope):
value = "Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER"
if not scope.directives['final']:
class DocStringSlot(SlotDescriptor):
# Descriptor for the docstring slot.
-
+
def slot_code(self, scope):
if scope.doc is not None:
if scope.doc.is_unicode:
# Descriptor for a substructure of the type object.
#
# sub_slots [SlotDescriptor]
-
+
def __init__(self, sub_slots, slot_type, slot_name):
SlotDescriptor.__init__(self, slot_name)
self.sub_slots = sub_slots
self.slot_type = slot_type
substructures.append(self)
-
+
def substructure_cname(self, scope):
return "%s%s_%s" % (Naming.pyrex_prefix, self.slot_name, scope.class_name)
-
+
def slot_code(self, scope):
return "&%s" % self.substructure_cname(scope)
-
+
def generate_substructure(self, scope, code):
code.putln("")
code.putln(
class MethodTableSlot(SlotDescriptor):
# Slot descriptor for the method table.
-
+
def slot_code(self, scope):
return scope.method_table_cname
class MemberTableSlot(SlotDescriptor):
# Slot descriptor for the table of Python-accessible attributes.
-
+
def slot_code(self, scope):
return "0"
class GetSetSlot(SlotDescriptor):
# Slot descriptor for the table of attribute get & set methods.
-
+
def slot_code(self, scope):
if scope.property_entries:
return scope.getset_table_cname
def __init__(self, name):
SlotDescriptor.__init__(self, name, dynamic = 1)
-
+
def generate_dynamic_init_code(self, scope, code):
base_type = scope.parent_type.base_type
if base_type:
code.putln("%s.%s = %s;" % (
- scope.parent_type.typeobj_cname,
+ scope.parent_type.typeobj_cname,
self.slot_name,
base_type.typeptr_cname))
-
+
# The following dictionary maps __xxx__ method names to slot descriptors.
method_name_to_slot = {}
# Return signature of accessor for an extension type
# property, else None.
return property_accessor_signatures.get(name)
-
+
def get_base_slot_function(scope, slot):
- # Returns the function implementing this slot in the baseclass.
+ # Returns the function implementing this slot in the baseclass.
# This is useful for enabling the compiler to optimize calls
- # that recursively climb the class hierarchy.
+ # that recursively climb the class hierarchy.
base_type = scope.parent_type.base_type
if scope.parent_scope is base_type.scope.parent_scope:
parent_slot = slot.slot_code(base_type.scope)
MethodSlot(unaryfunc, "nb_float", "__float__"),
MethodSlot(unaryfunc, "nb_oct", "__oct__", py3 = False),
MethodSlot(unaryfunc, "nb_hex", "__hex__", py3 = False),
-
+
# Added in release 2.0
MethodSlot(ibinaryfunc, "nb_inplace_add", "__iadd__"),
MethodSlot(ibinaryfunc, "nb_inplace_subtract", "__isub__"),
MethodSlot(ibinaryfunc, "nb_inplace_and", "__iand__"),
MethodSlot(ibinaryfunc, "nb_inplace_xor", "__ixor__"),
MethodSlot(ibinaryfunc, "nb_inplace_or", "__ior__"),
-
+
# Added in release 2.2
# The following require the Py_TPFLAGS_HAVE_CLASS flag
MethodSlot(binaryfunc, "nb_floor_divide", "__floordiv__"),
EmptySlot("tp_setattr"),
MethodSlot(cmpfunc, "tp_compare", "__cmp__", py3 = '<RESERVED>'),
MethodSlot(reprfunc, "tp_repr", "__repr__"),
-
+
SuiteSlot(PyNumberMethods, "PyNumberMethods", "tp_as_number"),
SuiteSlot(PySequenceMethods, "PySequenceMethods", "tp_as_sequence"),
SuiteSlot(PyMappingMethods, "PyMappingMethods", "tp_as_mapping"),
MethodSlot(hashfunc, "tp_hash", "__hash__"),
MethodSlot(callfunc, "tp_call", "__call__"),
MethodSlot(reprfunc, "tp_str", "__str__"),
-
+
SyntheticSlot("tp_getattro", ["__getattr__","__getattribute__"], "0"), #"PyObject_GenericGetAttr"),
SyntheticSlot("tp_setattro", ["__setattr__", "__delattr__"], "0"), #"PyObject_GenericSetAttr"),
SuiteSlot(PyBufferProcs, "PyBufferProcs", "tp_as_buffer"),
-
+
TypeFlagsSlot("tp_flags"),
DocStringSlot("tp_doc"),
MethodTableSlot("tp_methods"),
MemberTableSlot("tp_members"),
GetSetSlot("tp_getset"),
-
+
BaseClassSlot("tp_base"), #EmptySlot("tp_base"),
EmptySlot("tp_dict"),
-
+
SyntheticSlot("tp_descr_get", ["__get__"], "0"),
SyntheticSlot("tp_descr_set", ["__set__", "__delete__"], "0"),
-
+
EmptySlot("tp_dictoffset"),
-
+
MethodSlot(initproc, "tp_init", "__init__"),
EmptySlot("tp_alloc"), #FixedSlot("tp_alloc", "PyType_GenericAlloc"),
InternalMethodSlot("tp_new"),
EmptySlot("tp_free"),
-
+
EmptySlot("tp_is_gc"),
EmptySlot("tp_bases"),
EmptySlot("tp_mro"),
MethodSlot(descrdelfunc, "", "__delete__")
-# Method flags for python-exposed methods.
+# Method flags for python-exposed methods.
method_noargs = "METH_NOARGS"
method_onearg = "METH_O"
def analyse_types(self, env):
assert self.type == self.handle.type
-
+
def analyse_target_types(self, env):
assert self.type == self.handle.type
class TempsBlockNode(Node):
# THIS IS DEPRECATED, USE LetNode instead
-
+
"""
Creates a block which allocates temporary variables.
This is used by transforms to output constructs that need
to make use of a temporary variable. Simply pass the types
of the needed temporaries to the constructor.
-
+
The variables can be referred to using a TempRefNode
(which can be constructed by calling get_ref_node).
"""
# temps [TempHandle]
# body StatNode
-
+
child_attrs = ["body"]
def generate_execution_code(self, code):
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
-
+
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
-
+
def annotate(self, code):
self.body.annotate(code)
def generate_result_code(self, code):
pass
-
+
def generate_disposal_code(self, code):
pass
-
+
def generate_assignment_code(self, rhs, code):
if self.type.is_pyobject:
rhs.make_owned_reference(code)
def allocate_temps(self, env):
pass
-
+
def release_temp(self, env):
pass
self.setup_temp_expr(code)
self.subexpression.generate_evaluation_code(code)
self.teardown_temp_expr(code)
-
+
LetRefNode = ResultRefNode
class LetNode(Nodes.StatNode, LetNodeMixin):
def analyse_declarations(self, env):
self.temp_expression.analyse_declarations(env)
self.body.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
self.temp_expression.analyse_expressions(env)
self.body.analyse_expressions(env)
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
-
+
def analyse_types(self, env):
self.body.analyse_expressions(env)
containing child nodes or lists of child nodes. Lists are not considered
part of the tree structure (i.e. contained nodes are considered direct
children of the parent node).
-
+
visit_children visits each of the children of a given node (see the visit_children
documentation). When recursing the tree using visit_children, an attribute
access_path is maintained which gives information about the current location
in the tree as a stack of tuples: (parent_node, attrname, index), representing
the node, attribute and optional list index that was taken in each step in the path to
the current node.
-
+
Example:
-
+
>>> class SampleNode(object):
... child_attrs = ["head", "body"]
... def __init__(self, value, head=None, body=None):
def dump_node(self, node, indent=0):
ignored = list(node.child_attrs) + [u'child_attrs', u'pos',
- u'gil_message', u'cpp_message',
+ u'gil_message', u'cpp_message',
u'subexprs']
values = []
pos = node.pos
"""
Visits the children of the given parent. If parent is None, returns
immediately (returning None).
-
+
The return value is a dictionary giving the results for each
child (mapping the attribute name to either the return value
or a list of return values (in the case of multiple children
"""
A tree transform is a base class for visitors that wants to do stream
processing of the structure (rather than attributes etc.) of a tree.
-
+
It implements __call__ to simply visit the argument node.
-
+
It requires the visitor methods to return the nodes which should take
the place of the visited node in the result tree (which can be the same
or one or more replacement). Specifically, if the return value from
a visitor method is:
-
+
- [] or None; the visited node will be removed (set to None if an attribute and
removed if in a list)
- A single node; the visited node will be replaced by the returned node.
else:
newlist.append(x)
setattr(parent, attr, newlist)
- return result
+ return result
def recurse_to_children(self, node):
self.visitchildren(node)
return node
-
+
def __call__(self, root):
return self._visit(root)
# Keeps track of type of scopes
#scope_type: can be either of 'module', 'function', 'cclass', 'pyclass', 'struct'
#scope_node: the node that owns the current scope
-
+
def visit_ModuleNode(self, node):
self.scope_type = 'module'
self.scope_node = node
self.visitchildren(node)
self.scope_type, self.scope_node = prev
return node
-
+
def visit_CClassDefNode(self, node):
return self.visit_scope(node, 'cclass')
class EnvTransform(CythonTransform):
"""
- This transformation keeps a stack of the environments.
+ This transformation keeps a stack of the environments.
"""
def __call__(self, root):
self.env_stack = [root.scope]
- return super(EnvTransform, self).__call__(root)
+ return super(EnvTransform, self).__call__(root)
def current_env(self):
return self.env_stack[-1]
path to the cython project directory should be given (the parent directory of
cython_debug).
-Additional gdb args can be provided only if a path to the project directory is
+Additional gdb args can be provided only if a path to the project directory is
given.
"""
def make_command_file(path_to_debug_info, prefix_code='', no_import=False):
if not no_import:
- pattern = os.path.join(path_to_debug_info,
+ pattern = os.path.join(path_to_debug_info,
'cython_debug',
'cython_debug_info_*')
debug_files = glob.glob(pattern)
if not debug_files:
sys.exit('%s.\nNo debug files were found in %s. Aborting.' % (
usage, os.path.abspath(path_to_debug_info)))
-
+
fd, tempfilename = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
f.write(prefix_code)
f.write('set breakpoint pending on\n')
f.write("set print pretty on\n")
f.write('python from Cython.Debugger import libcython, libpython\n')
-
+
if no_import:
# don't do this, this overrides file command in .gdbinit
# f.write("file %s\n" % sys.executable)
'stripped). Some functionality may not work (properly).\\n')
end
'''))
-
+
f.close()
-
+
return tempfilename
def main(path_to_debug_info=None, gdb_argv=None, no_import=False):
"""
Start the Cython debugger. This tells gdb to import the Cython and Python
- extensions (libcython.py and libpython.py) and it enables gdb's pending
+ extensions (libcython.py and libpython.py) and it enables gdb's pending
breakpoints.
-
+
path_to_debug_info is the path to the Cython build directory
gdb_argv is the list of options to gdb
no_import tells cygdb whether it should import debug information
path_to_debug_info = sys.argv[1]
else:
path_to_debug_info = os.curdir
-
+
if gdb_argv is None:
gdb_argv = sys.argv[2:]
-
+
if path_to_debug_info == '--':
no_import = True
-
+
tempfilename = make_command_file(path_to_debug_info, no_import=no_import)
p = subprocess.Popen(['gdb', '-command', tempfilename] + gdb_argv)
while True:
class CythonDebugWriter(object):
"""
Class to output debugging information for cygdb
-
+
It writes debug information to cython_debug/cython_debug_info_<modulename>
in the build directory.
"""
-
+
def __init__(self, output_dir):
if etree is None:
raise Errors.NoElementTreeInstalledException()
-
+
self.output_dir = os.path.join(output_dir, 'cython_debug')
self.tb = etree.TreeBuilder()
# set by Cython.Compiler.ParseTreeTransforms.DebugTransform
self.module_name = None
self.start('cython_debug', attrs=dict(version='1.0'))
-
+
def start(self, name, attrs=None):
self.tb.start(name, attrs or {})
-
+
def end(self, name):
self.tb.end(name)
kw = {}
if have_lxml:
kw['pretty_print'] = True
-
+
fn = "cython_debug_info_" + self.module_name
et.write(os.path.join(self.output_dir, fn), encoding="UTF-8", **kw)
-
+
interpreter_path = os.path.join(self.output_dir, 'interpreter')
with open(interpreter_path, 'w') as f:
f.write(sys.executable)
source_to_lineno = dict((line.strip(), i + 1) for i, line in enumerate(f))
class DebuggerTestCase(unittest.TestCase):
-
+
def setUp(self):
"""
Run gdb and have cygdb import the debug information from the code
"""
self.tempdir = tempfile.mkdtemp()
self.destfile = os.path.join(self.tempdir, 'codefile.pyx')
- self.debug_dest = os.path.join(self.tempdir,
- 'cython_debug',
+ self.debug_dest = os.path.join(self.tempdir,
+ 'cython_debug',
'cython_debug_info_codefile')
self.cfuncs_destfile = os.path.join(self.tempdir, 'cfuncs')
-
+
self.cwd = os.getcwd()
os.chdir(self.tempdir)
-
+
shutil.copy(codefile, self.destfile)
shutil.copy(cfuncs_file, self.cfuncs_destfile + '.c')
-
+
compiler = ccompiler.new_compiler()
compiler.compile(['cfuncs.c'], debug=True, extra_postargs=['-fPIC'])
-
+
opts = dict(
test_directory=self.tempdir,
module='codefile',
)
-
+
cython_compile_testcase = runtests.CythonCompileTestCase(
workdir=self.tempdir,
# we clean up everything (not only compiled files)
cleanup_workdir=False,
**opts
)
-
+
cython_compile_testcase.run_cython(
targetdir=self.tempdir,
incdir=None,
},
**opts
)
-
+
cython_compile_testcase.run_distutils(
incdir=None,
workdir=self.tempdir,
extra_extension_args={'extra_objects':['cfuncs.o']},
**opts
)
-
+
# ext = Cython.Distutils.extension.Extension(
# 'codefile',
- # ['codefile.pyx'],
+ # ['codefile.pyx'],
# pyrex_gdb=True,
# extra_objects=['cfuncs.o'])
- #
+ #
# distutils.core.setup(
# script_args=['build_ext', '--inplace'],
# ext_modules=[ext],
# cmdclass=dict(build_ext=Cython.Distutils.build_ext)
# )
-
+
def tearDown(self):
os.chdir(self.cwd)
shutil.rmtree(self.tempdir)
class GdbDebuggerTestCase(DebuggerTestCase):
def setUp(self):
super(GdbDebuggerTestCase, self).setUp()
-
+
prefix_code = textwrap.dedent('''\
python
-
+
import os
import sys
import traceback
-
+
def excepthook(type, value, tb):
traceback.print_exception(type, value, tb)
os._exit(1)
-
+
sys.excepthook = excepthook
-
+
# Have tracebacks end up on sys.stderr (gdb replaces sys.stderr
# with an object that calls gdb.write())
sys.stderr = sys.__stderr__
-
+
end
''')
-
+
code = textwrap.dedent('''\
python
-
+
from Cython.Debugger.Tests import test_libcython_in_gdb
test_libcython_in_gdb.main(version=%r)
-
+
end
''' % (sys.version_info[:2],))
-
- self.gdb_command_file = cygdb.make_command_file(self.tempdir,
+
+ self.gdb_command_file = cygdb.make_command_file(self.tempdir,
prefix_code)
-
+
with open(self.gdb_command_file, 'a') as f:
f.write(code)
-
+
args = ['gdb', '-batch', '-x', self.gdb_command_file, '-n', '--args',
sys.executable, '-c', 'import codefile']
-
+
paths = []
path = os.environ.get('PYTHONPATH')
if path:
paths.append(os.path.dirname(os.path.dirname(
os.path.abspath(Cython.__file__))))
env = dict(os.environ, PYTHONPATH=os.pathsep.join(paths))
-
+
try:
p = subprocess.Popen(['gdb', '-v'], stdout=subprocess.PIPE)
have_gdb = True
gdb_version = p.stdout.read().decode('ascii')
p.wait()
p.stdout.close()
-
+
if have_gdb:
# Based on Lib/test/test_gdb.py
regex = "^GNU gdb [^\d]*(\d+)\.(\d+)"
stdout=open(os.devnull, 'w'),
stderr=subprocess.PIPE,
env=env)
-
+
def tearDown(self):
super(GdbDebuggerTestCase, self).tearDown()
if self.p:
self.p.stderr.close()
self.p.wait()
os.remove(self.gdb_command_file)
-
-
+
+
class TestAll(GdbDebuggerTestCase):
-
+
def test_all(self):
if self.p is None:
return
-
+
out, err = self.p.communicate()
border = '*' * 30
start = '%s v INSIDE GDB v %s' % (border, border)
some_c_function(void)
{
int a, b, c;
-
+
a = 1;
b = 2;
}
class DebugTestCase(unittest.TestCase):
"""
- Base class for test cases. On teardown it kills the inferior and unsets
+ Base class for test cases. On teardown it kills the inferior and unsets
all breakpoints.
"""
-
+
def __init__(self, name):
super(DebugTestCase, self).__init__(name)
self.cy = libcython.cy
'codefile.ham']
self.eggs_func = libcython.cy.functions_by_qualified_name[
'codefile.eggs']
-
+
def read_var(self, varname, cast_to=None):
result = gdb.parse_and_eval('$cy_cvalue("%s")' % varname)
if cast_to:
result = cast_to(result)
-
+
return result
-
+
def local_info(self):
return gdb.execute('info locals', to_string=True)
-
+
def lineno_equals(self, source_line=None, lineno=None):
if source_line is not None:
lineno = test_libcython.source_to_lineno[source_line]
gdb.execute('kill inferior 1', to_string=True)
except RuntimeError:
pass
-
+
gdb.execute('set args -c "import codefile"')
-
+
libcython.cy.step.static_breakpoints.clear()
libcython.cy.step.runtime_breakpoints.clear()
libcython.cy.step.init_breakpoints()
class TestDebugInformationClasses(DebugTestCase):
-
+
def test_CythonModule(self):
"test that debug information was parsed properly into data structures"
self.assertEqual(self.module.name, 'codefile')
- global_vars = ('c_var', 'python_var', '__name__',
+ global_vars = ('c_var', 'python_var', '__name__',
'__builtins__', '__doc__', '__file__')
assert set(global_vars).issubset(self.module.globals)
-
+
def test_CythonVariable(self):
module_globals = self.module.globals
c_var = module_globals['c_var']
self.assertEqual(c_var.type, libcython.CObject)
self.assertEqual(python_var.type, libcython.PythonObject)
self.assertEqual(c_var.qualified_name, 'codefile.c_var')
-
+
def test_CythonFunction(self):
self.assertEqual(self.spam_func.qualified_name, 'codefile.spam')
- self.assertEqual(self.spam_meth.qualified_name,
+ self.assertEqual(self.spam_meth.qualified_name,
'codefile.SomeClass.spam')
self.assertEqual(self.spam_func.module, self.module)
-
+
assert self.eggs_func.pf_cname
assert not self.ham_func.pf_cname
assert not self.spam_func.pf_cname
assert not self.spam_meth.pf_cname
-
+
self.assertEqual(self.spam_func.type, libcython.CObject)
self.assertEqual(self.ham_func.type, libcython.CObject)
-
+
self.assertEqual(self.spam_func.arguments, ['a'])
- self.assertEqual(self.spam_func.step_into_functions,
+ self.assertEqual(self.spam_func.step_into_functions,
set(['puts', 'some_c_function']))
-
+
expected_lineno = test_libcython.source_to_lineno['def spam(a=0):']
self.assertEqual(self.spam_func.lineno, expected_lineno)
self.assertEqual(sorted(self.spam_func.locals), list('abcd'))
class TestParameters(unittest.TestCase):
-
+
def test_parameters(self):
gdb.execute('set cy_colorize_code on')
assert libcython.parameters.colorize_code
def test_break(self):
breakpoint_amount = len(gdb.breakpoints())
gdb.execute('cy break codefile.spam')
-
+
self.assertEqual(len(gdb.breakpoints()), breakpoint_amount + 1)
bp = gdb.breakpoints()[-1]
self.assertEqual(bp.type, gdb.BP_BREAKPOINT)
assert self.spam_func.cname in bp.location
assert bp.enabled
-
+
def test_python_break(self):
gdb.execute('cy break -p join')
assert 'def join(' in gdb.execute('cy run', to_string=True)
class TestKilled(DebugTestCase):
-
+
def test_abort(self):
gdb.execute("set args -c 'import os; os.abort()'")
output = gdb.execute('cy run', to_string=True)
assert 'abort' in output.lower()
class DebugStepperTestCase(DebugTestCase):
-
+
def step(self, varnames_and_values, source_line=None, lineno=None):
gdb.execute(self.command)
for varname, value in varnames_and_values:
self.assertEqual(self.read_var(varname), value, self.local_info())
-
+
self.lineno_equals(source_line, lineno)
class TestStep(DebugStepperTestCase):
"""
- Test stepping. Stepping happens in the code found in
+ Test stepping. Stepping happens in the code found in
Cython/Debugger/Tests/codefile.
"""
-
+
def test_cython_step(self):
gdb.execute('cy break codefile.spam')
-
+
gdb.execute('run', to_string=True)
self.lineno_equals('def spam(a=0):')
-
+
gdb.execute('cy step', to_string=True)
self.lineno_equals('b = c = d = 0')
-
+
self.command = 'cy step'
self.step([('b', 0)], source_line='b = 1')
self.step([('b', 1), ('c', 0)], source_line='c = 2')
self.step([('c', 2)], source_line='int(10)')
self.step([], source_line='puts("spam")')
-
+
gdb.execute('cont', to_string=True)
self.assertEqual(len(gdb.inferiors()), 1)
self.assertEqual(gdb.inferiors()[0].pid, 0)
-
+
def test_c_step(self):
self.break_and_run('some_c_function()')
gdb.execute('cy step', to_string=True)
self.assertEqual(gdb.selected_frame().name(), 'some_c_function')
-
+
def test_python_step(self):
self.break_and_run('os.path.join("foo", "bar")')
-
+
result = gdb.execute('cy step', to_string=True)
-
+
curframe = gdb.selected_frame()
self.assertEqual(curframe.name(), 'PyEval_EvalFrameEx')
-
+
pyframe = libpython.Frame(curframe).get_pyop()
self.assertEqual(str(pyframe.co_name), 'join')
assert re.match(r'\d+ def join\(', result), result
class TestNext(DebugStepperTestCase):
-
+
def test_cython_next(self):
self.break_and_run('c = 2')
class TestLocalsGlobals(DebugTestCase):
-
+
def test_locals(self):
self.break_and_run('int(10)')
-
+
result = gdb.execute('cy locals', to_string=True)
assert 'a = 0', repr(result)
assert 'b = (int) 1', result
assert 'c = (int) 2' in result, repr(result)
-
+
def test_globals(self):
self.break_and_run('int(10)')
-
+
result = gdb.execute('cy globals', to_string=True)
assert '__name__ ' in result, repr(result)
assert '__doc__ ' in result, repr(result)
class TestBacktrace(DebugTestCase):
-
+
def test_backtrace(self):
libcython.parameters.colorize_code.value = False
-
+
self.break_and_run('os.path.join("foo", "bar")')
result = gdb.execute('cy bt', to_string=True)
-
+
_debug(libpython.execute, libpython._execute, gdb.execute)
_debug(gdb.execute('cy list', to_string=True))
_debug(repr(result))
-
- assert re.search(r'\#\d+ *0x.* in spam\(\) at .*codefile\.pyx:22',
+
+ assert re.search(r'\#\d+ *0x.* in spam\(\) at .*codefile\.pyx:22',
result), result
assert 'os.path.join("foo", "bar")' in result, result
-
+
gdb.execute("cy step")
-
+
gdb.execute('cy bt')
result = gdb.execute('cy bt -a', to_string=True)
assert re.search(r'\#0 *0x.* in main\(\) at', result), result
class TestFunctions(DebugTestCase):
-
+
def test_functions(self):
self.break_and_run('c = 2')
result = gdb.execute('print $cy_cname("b")', to_string=True)
assert re.search('__pyx_.*b', result), result
-
+
result = gdb.execute('print $cy_lineno()', to_string=True)
supposed_lineno = test_libcython.source_to_lineno['c = 2']
assert str(supposed_lineno) in result, (supposed_lineno, result)
-
+
result = gdb.execute('print $cy_cvalue("b")', to_string=True)
assert '= 1' in result
-
+
class TestPrint(DebugTestCase):
-
+
def test_print(self):
self.break_and_run('c = 2')
result = gdb.execute('cy print b', to_string=True)
self.break_and_run('os.path.join("foo", "bar")')
gdb.execute('cy step')
self.assertRaises(RuntimeError, gdb.execute, 'cy down')
-
+
result = gdb.execute('cy up', to_string=True)
assert 'spam()' in result
assert 'os.path.join("foo", "bar")' in result
class TestExec(DebugTestCase):
-
+
def setUp(self):
super(TestExec, self).setUp()
self.fd, self.tmpfilename = tempfile.mkstemp()
self.tmpfile = os.fdopen(self.fd, 'r+')
-
+
def tearDown(self):
super(TestExec, self).tearDown()
-
+
try:
self.tmpfile.close()
finally:
os.remove(self.tmpfilename)
-
+
def eval_command(self, command):
- gdb.execute('cy exec open(%r, "w").write(str(%s))' %
+ gdb.execute('cy exec open(%r, "w").write(str(%s))' %
(self.tmpfilename, command))
return self.tmpfile.read().strip()
-
+
def test_cython_exec(self):
self.break_and_run('os.path.join("foo", "bar")')
-
+
# test normal behaviour
self.assertEqual("[0]", self.eval_command('[a]'))
-
+
# test multiline code
result = gdb.execute(textwrap.dedent('''\
cy exec
pass
-
+
"nothing"
end
'''))
result = self.tmpfile.read().rstrip()
self.assertEqual('', result)
-
+
def test_python_exec(self):
self.break_and_run('os.path.join("foo", "bar")')
gdb.execute('cy step')
-
+
gdb.execute('cy exec some_random_var = 14')
self.assertEqual('14', self.eval_command('some_random_var'))
def _debug(*messages):
if _do_debug:
- messages = itertools.chain([sys._getframe(1).f_code.co_name, ':'],
+ messages = itertools.chain([sys._getframe(1).f_code.co_name, ':'],
messages)
_debug_file.write(' '.join(str(msg) for msg in messages) + '\n')
else:
m = __import__(modulename, fromlist=[''])
tests = inspect.getmembers(m, inspect.isclass)
-
+
# test_support.run_unittest(tests)
-
+
test_loader = unittest.TestLoader()
suite = unittest.TestSuite(
[test_loader.loadTestsFromTestCase(cls) for name, cls in tests])
-
+
result = unittest.TextTestRunner(verbosity=1).run(suite)
return result.wasSuccessful()
returned to the parent test process.
"""
from Cython.Debugger.Tests import test_libpython_in_gdb
-
+
success_libcython = run_unittest_in_module(__name__)
success_libpython = run_unittest_in_module(test_libpython_in_gdb.__name__)
-
+
if not success_libcython or not success_libpython:
sys.exit(1)
-
+
def main(version, trace_code=False):
- global inferior_python_version
-
+ global inferior_python_version
+
inferior_python_version = version
-
+
if trace_code:
tracer = trace.Trace(count=False, trace=True, outfile=sys.stderr,
ignoredirs=[sys.prefix, sys.exec_prefix])
# -*- coding: UTF-8 -*-
"""
-Test libpython.py. This is already partly tested by test_libcython_in_gdb and
+Test libpython.py. This is already partly tested by test_libcython_in_gdb and
Lib/test/test_gdb.py in the Python source. These tests are run in gdb and
called from test_libcython_in_gdb.main()
"""
"""
Test whether types of Python objects are correctly inferred and that
the right libpython.PySomeTypeObjectPtr classes are instantiated.
-
- Also test whether values are appropriately formatted (don't be too
+
+ Also test whether values are appropriately formatted (don't be too
laborious as Lib/test/test_gdb.py already covers this extensively).
-
- Don't take care of decreffing newly allocated objects as a new
+
+ Don't take care of decreffing newly allocated objects as a new
interpreter is started for every test anyway.
"""
-
+
def setUp(self):
super(TestPrettyPrinters, self).setUp()
self.break_and_run('b = c = d = 0')
-
+
def get_pyobject(self, code):
value = gdb.parse_and_eval(code)
assert libpython.pointervalue(value) != 0
return value
-
+
def pyobject_fromcode(self, code, gdbvar=None):
if gdbvar is not None:
d = {'varname':gdbvar, 'code':code}
gdb.execute('set $%(varname)s = %(code)s' % d)
code = '$' + gdbvar
-
+
return libpython.PyObjectPtr.from_pyobject_ptr(self.get_pyobject(code))
-
+
def get_repr(self, pyobject):
return pyobject.get_truncated_repr(libpython.MAX_OUTPUT_LEN)
-
+
def alloc_bytestring(self, string, gdbvar=None):
if inferior_python_version < (3, 0):
funcname = 'PyString_FromString'
else:
funcname = 'PyBytes_FromString'
-
+
assert '"' not in string
-
+
# ensure double quotes
code = '(PyObject *) %s("%s")' % (funcname, string)
return self.pyobject_fromcode(code, gdbvar=gdbvar)
-
+
def alloc_unicodestring(self, string, gdbvar=None):
self.alloc_bytestring(string.encode('UTF-8'), gdbvar='_temp')
-
+
postfix = libpython.get_inferior_unicode_postfix()
funcname = 'PyUnicode%s_FromEncodedObject' % (postfix,)
-
+
return self.pyobject_fromcode(
- '(PyObject *) %s($_temp, "UTF-8", "strict")' % funcname,
+ '(PyObject *) %s($_temp, "UTF-8", "strict")' % funcname,
gdbvar=gdbvar)
-
+
def test_bytestring(self):
bytestring = self.alloc_bytestring("spam")
-
+
if inferior_python_version < (3, 0):
bytestring_class = libpython.PyStringObjectPtr
expected = repr("spam")
else:
bytestring_class = libpython.PyBytesObjectPtr
expected = "b'spam'"
-
+
self.assertEqual(type(bytestring), bytestring_class)
self.assertEqual(self.get_repr(bytestring), expected)
-
+
def test_unicode(self):
unicode_string = self.alloc_unicodestring(u"spam ἄλφα")
-
+
expected = "'spam ἄλφα'"
if inferior_python_version < (3, 0):
expected = 'u' + expected
-
+
self.assertEqual(type(unicode_string), libpython.PyUnicodeObjectPtr)
self.assertEqual(self.get_repr(unicode_string), expected)
-
+
def test_int(self):
if inferior_python_version < (3, 0):
intval = self.pyobject_fromcode('PyInt_FromLong(100)')
self.assertEqual(type(intval), libpython.PyIntObjectPtr)
self.assertEqual(self.get_repr(intval), '100')
-
+
def test_long(self):
- longval = self.pyobject_fromcode('PyLong_FromLong(200)',
+ longval = self.pyobject_fromcode('PyLong_FromLong(200)',
gdbvar='longval')
assert gdb.parse_and_eval('$longval->ob_type == &PyLong_Type')
-
+
self.assertEqual(type(longval), libpython.PyLongObjectPtr)
self.assertEqual(self.get_repr(longval), '200')
-
+
def test_frame_type(self):
frame = self.pyobject_fromcode('PyEval_GetFrame()')
-
+
self.assertEqual(type(frame), libpython.PyFrameObjectPtr)
-
+
except Exception:
traceback.print_exc()
raise
-
+
return wrapper
def default_selected_gdb_frame(err=True):
frame = frame or gdb.selected_frame()
except RuntimeError:
raise gdb.GdbError("No frame is currently selected.")
-
+
if err and frame.name() is None:
raise NoFunctionNameInFrameError()
-
+
return function(self, frame, *args, **kwargs)
return wrapper
return decorator
raise gdb.GdbError('Selected frame does not correspond with a '
'Cython function we know about.')
return function(self, *args, **kwargs)
- return wrapper
+ return wrapper
def dispatch_on_frame(c_command, python_command=None):
def decorator(function):
def wrapper(self, *args, **kwargs):
is_cy = self.is_cython_function()
is_py = self.is_python_function()
-
+
if is_cy or (is_py and not python_command):
function(self, *args, **kwargs)
elif is_py:
else:
raise gdb.GdbError("Not a function cygdb knows about. "
"Use the normal GDB commands instead.")
-
+
return wrapper
return decorator
gdb.selected_frame()
except RuntimeError:
raise gdb.GdbError("No frame is currently selected.")
-
+
return function(*args, **kwargs)
return wrapper
-
+
def gdb_function_value_to_unicode(function):
@functools.wraps(function)
# {c_lineno: cython_lineno}
self.lineno_c2cy = {}
self.functions = {}
-
+
def qualified_name(self, varname):
return '.'.join(self.name, varname)
self.lineno = int(lineno)
class CythonFunction(CythonVariable):
- def __init__(self,
- module,
- name,
- cname,
+ def __init__(self,
+ module,
+ name,
+ cname,
pf_cname,
- qualified_name,
- lineno,
+ qualified_name,
+ lineno,
type=CObject):
- super(CythonFunction, self).__init__(name,
- cname,
- qualified_name,
+ super(CythonFunction, self).__init__(name,
+ cname,
+ qualified_name,
type,
lineno)
self.module = module
# General purpose classes
class CythonBase(object):
-
+
@default_selected_gdb_frame(err=False)
def is_cython_function(self, frame):
return frame.name() in self.cy.functions_by_cname
pyframe = libpython.Frame(frame).get_pyop()
return pyframe and not pyframe.is_optimized_out()
return False
-
+
@default_selected_gdb_frame()
def get_c_function_name(self, frame):
return frame.name()
@default_selected_gdb_frame()
def get_c_lineno(self, frame):
return frame.find_sal().line
-
+
@default_selected_gdb_frame()
def get_cython_function(self, frame):
result = self.cy.functions_by_cname.get(frame.name())
if result is None:
raise NoCythonFunctionInFrameError()
-
+
return result
-
+
@default_selected_gdb_frame()
def get_cython_lineno(self, frame):
"""
- Get the current Cython line number. Returns 0 if there is no
+ Get the current Cython line number. Returns 0 if there is no
correspondence between the C and Cython code.
"""
cyfunc = self.get_cython_function(frame)
return cyfunc.module.lineno_c2cy.get(self.get_c_lineno(frame), 0)
-
+
@default_selected_gdb_frame()
def get_source_desc(self, frame):
filename = lineno = lexer = None
filename = pyframeobject.filename()
lineno = pyframeobject.current_line_num()
-
+
if pygments:
lexer = pygments.lexers.PythonLexer(stripall=False)
else:
lineno = symbol_and_line_obj.line
if pygments:
lexer = pygments.lexers.CLexer(stripall=False)
-
+
return SourceFileDescriptor(filename, lexer), lineno
@default_selected_gdb_frame()
def get_source_line(self, frame):
source_desc, lineno = self.get_source_desc()
return source_desc.get_source(lineno)
-
+
@default_selected_gdb_frame()
def is_relevant_function(self, frame):
"""
return name in cython_func.step_into_functions
return False
-
+
@default_selected_gdb_frame(err=False)
def print_stackframe(self, frame, index, is_c=False):
"""
# raising GdbError when calling self.cy.cy_cvalue.invoke()
selected_frame = gdb.selected_frame()
frame.select()
-
+
try:
source_desc, lineno = self.get_source_desc(frame)
except NoFunctionNameInFrameError:
if pyframe is None or pyframe.is_optimized_out():
# print this python function as a C function
return self.print_stackframe(frame, index, is_c=True)
-
+
func_name = pyframe.co_name
func_cname = 'PyEval_EvalFrameEx'
func_args = []
elif self.is_cython_function(frame):
cyfunc = self.get_cython_function(frame)
f = lambda arg: self.cy.cy_cvalue.invoke(arg, frame=frame)
-
+
func_name = cyfunc.name
func_cname = cyfunc.cname
func_args = [] # [(arg, f(arg)) for arg in cyfunc.arguments]
func_name = frame.name()
func_cname = func_name
func_args = []
-
+
try:
gdb_value = gdb.parse_and_eval(func_cname)
except RuntimeError:
else:
# Seriously? Why is the address not an int?
func_address = int(str(gdb_value.address).split()[0], 0)
-
+
a = ', '.join('%s=%s' % (name, val) for name, val in func_args)
print '#%-2d 0x%016x in %s(%s)' % (index, func_address, func_name, a),
-
+
if source_desc.filename is not None:
print 'at %s:%s' % (source_desc.filename, lineno),
-
+
print
-
+
try:
print ' ' + source_desc.get_source(lineno)
except gdb.GdbError:
pass
-
+
selected_frame.select()
-
+
def get_remote_cython_globals_dict(self):
m = gdb.parse_and_eval('__pyx_m')
-
+
try:
PyModuleObject = gdb.lookup_type('PyModuleObject')
except RuntimeError:
raise gdb.GdbError(textwrap.dedent("""\
- Unable to lookup type PyModuleObject, did you compile python
+ Unable to lookup type PyModuleObject, did you compile python
with debugging support (-g)?"""))
-
+
m = m.cast(PyModuleObject.pointer())
return m['md_dict']
-
-
+
+
def get_cython_globals_dict(self):
"""
Get the Cython globals dict where the remote names are turned into
"""
remote_dict = self.get_remote_cython_globals_dict()
pyobject_dict = libpython.PyObjectPtr.from_pyobject_ptr(remote_dict)
-
+
result = {}
seen = set()
for k, v in pyobject_dict.iteritems():
result[k.proxyval(seen)] = v
-
+
return result
def print_gdb_value(self, name, value, max_name_length=None, prefix=''):
typename = ''
else:
typename = '(%s) ' % (value.type,)
-
+
if max_name_length is None:
print '%s%s = %s%s' % (prefix, name, typename, value)
else:
- print '%s%-*s = %s%s' % (prefix, max_name_length, name, typename,
+ print '%s%-*s = %s%s' % (prefix, max_name_length, name, typename,
value)
def is_initialized(self, cython_func, local_name):
# to provide "correct" colouring, the entire code needs to be
# lexed. However, this makes a lot of things terribly slow, so
# we decide not to. Besides, it's unlikely to matter.
-
+
if lex_source and lex_entire:
f = self.lex(f.read()).splitlines()
-
+
slice = itertools.islice(f, start - 1, stop - 1)
-
+
for idx, line in enumerate(slice):
if start + idx == mark_line:
prefix = '>'
else:
prefix = ' '
-
+
if lex_source and not lex_entire:
line = self.lex(line)
yield '%s %4d %s' % (prefix, start + idx, line.rstrip())
- def get_source(self, start, stop=None, lex_source=True, mark_line=0,
+ def get_source(self, start, stop=None, lex_source=True, mark_line=0,
lex_entire=False):
exc = gdb.GdbError('Unable to retrieve source code')
-
+
if not self.filename:
raise exc
-
+
start = max(start, 1)
if stop is None:
stop = start + 1
"""
Base class for Cython-command related erorrs
"""
-
+
def __init__(self, *args):
args = args or (self.msg,)
super(CyGDBError, self).__init__(*args)
-
+
class NoCythonFunctionInFrameError(CyGDBError):
"""
- raised when the user requests the current cython function, which is
+ raised when the user requests the current cython function, which is
unavailable
"""
msg = "Current function is a function cygdb doesn't know about"
class NoFunctionNameInFrameError(NoCythonFunctionInFrameError):
"""
- raised when the name of the C function could not be determined
+ raised when the name of the C function could not be determined
in the current C stack frame
"""
msg = ('C function name could not be determined in the current C stack '
"""
Base class for cython parameters
"""
-
+
def __init__(self, name, command_class, parameter_class, default=None):
self.show_doc = self.set_doc = self.__class__.__doc__
- super(CythonParameter, self).__init__(name, command_class,
+ super(CythonParameter, self).__init__(name, command_class,
parameter_class)
if default is not None:
self.value = default
-
+
def __nonzero__(self):
return bool(self.value)
-
+
__bool__ = __nonzero__ # python 3
class CompleteUnqualifiedFunctionNames(CythonParameter):
"""
Have 'cy break' complete unqualified function or method names.
- """
+ """
class ColorizeSourceCode(CythonParameter):
"""
Simple container class that might get more functionality in the distant
future (mostly to remind us that we're dealing with parameters).
"""
-
+
def __init__(self):
self.complete_unqualified = CompleteUnqualifiedFunctionNames(
'cy_complete_unqualified',
gdb.COMMAND_FILES,
gdb.PARAM_STRING,
"dark")
-
+
parameters = CythonParameters()
"""
Base class for Cython commands
"""
-
+
command_class = gdb.COMMAND_NONE
-
+
@classmethod
def _register(cls, clsname, args, kwargs):
if not hasattr(cls, 'completer_class'):
return cls(clsname, cls.command_class, *args, **kwargs)
else:
- return cls(clsname, cls.command_class, cls.completer_class,
+ return cls(clsname, cls.command_class, cls.completer_class,
*args, **kwargs)
-
+
@classmethod
def register(cls, *args, **kwargs):
alias = getattr(cls, 'alias', None)
if alias:
cls._register(cls.alias, args, kwargs)
-
+
return cls._register(cls.name, args, kwargs)
class CyCy(CythonCommand):
"""
Invoke a Cython command. Available commands are:
-
+
cy import
cy break
cy step
cy globals
cy exec
"""
-
+
name = 'cy'
command_class = gdb.COMMAND_NONE
completer_class = gdb.COMPLETE_COMMAND
-
+
def __init__(self, name, command_class, completer_class):
# keep the signature 2.5 compatible (i.e. do not use f(*a, k=v)
- super(CythonCommand, self).__init__(name, command_class,
+ super(CythonCommand, self).__init__(name, command_class,
completer_class, prefix=True)
-
+
commands = dict(
import_ = CyImport.register(),
break_ = CyBreak.register(),
cy_cvalue = CyCValue('cy_cvalue'),
cy_lineno = CyLine('cy_lineno'),
)
-
+
for command_name, command in commands.iteritems():
command.cy = self
setattr(self, command_name, command)
-
+
self.cy = self
-
+
# Cython module namespace
self.cython_namespace = {}
-
- # maps (unique) qualified function names (e.g.
+
+ # maps (unique) qualified function names (e.g.
# cythonmodule.ClassName.method_name) to the CythonFunction object
self.functions_by_qualified_name = {}
-
+
# unique cnames of Cython functions
self.functions_by_cname = {}
-
- # map function names like method_name to a list of all such
+
+ # map function names like method_name to a list of all such
# CythonFunction objects
self.functions_by_name = collections.defaultdict(list)
Import debug information outputted by the Cython compiler
Example: cy import FILE...
"""
-
+
name = 'cy import'
command_class = gdb.COMMAND_STATUS
completer_class = gdb.COMPLETE_FILENAME
-
+
def invoke(self, args, from_tty):
args = args.encode(_filesystemencoding)
for arg in string_to_argv(args):
try:
f = open(arg)
except OSError, e:
- raise gdb.GdbError('Unable to open file %r: %s' %
+ raise gdb.GdbError('Unable to open file %r: %s' %
(args, e.args[1]))
-
+
t = etree.parse(f)
-
+
for module in t.getroot():
cython_module = CythonModule(**module.attrib)
self.cy.cython_namespace[cython_module.name] = cython_module
-
+
for variable in module.find('Globals'):
d = variable.attrib
cython_module.globals[d['name']] = CythonVariable(**d)
-
+
for function in module.find('Functions'):
- cython_function = CythonFunction(module=cython_module,
+ cython_function = CythonFunction(module=cython_module,
**function.attrib)
# update the global function mappings
name = cython_function.name
qname = cython_function.qualified_name
-
+
self.cy.functions_by_name[name].append(cython_function)
self.cy.functions_by_qualified_name[
cython_function.qualified_name] = cython_function
self.cy.functions_by_cname[
cython_function.cname] = cython_function
-
+
d = cython_module.functions[qname] = cython_function
-
+
for local in function.find('Locals'):
d = local.attrib
cython_function.locals[d['name']] = CythonVariable(**d)
for step_into_func in function.find('StepIntoFunctions'):
d = step_into_func.attrib
cython_function.step_into_functions.add(d['name'])
-
+
cython_function.arguments.extend(
funcarg.tag for funcarg in function.find('Arguments'))
class CyBreak(CythonCommand):
"""
Set a breakpoint for Cython code using Cython qualified name notation, e.g.:
-
+
cy break cython_modulename.ClassName.method_name...
-
+
or normal notation:
-
+
cy break function_or_method_name...
-
+
or for a line number:
-
+
cy break cython_module:lineno...
-
+
Set a Python breakpoint:
Break on any function or method named 'func' in module 'modname'
-
+
cy break -p modname.func...
-
+
Break on any function or method named 'func'
-
+
cy break -p func...
"""
-
+
name = 'cy break'
command_class = gdb.COMMAND_BREAKPOINTS
-
+
def _break_pyx(self, name):
modulename, _, lineno = name.partition(':')
lineno = int(lineno)
else:
raise GdbError("Not a valid line number. "
"Does it contain actual code?")
-
+
def _break_funcname(self, funcname):
func = self.cy.functions_by_qualified_name.get(funcname)
break_funcs = [func]
-
+
if not func:
funcs = self.cy.functions_by_name.get(funcname)
if not funcs:
gdb.execute('break ' + funcname)
return
-
+
if len(funcs) > 1:
# multiple functions, let the user pick one
print 'There are multiple such functions:'
for idx, func in enumerate(funcs):
print '%3d) %s' % (idx, func.qualified_name)
-
+
while True:
try:
result = raw_input(
elif result.lower() == 'a':
break_funcs = funcs
break
- elif (result.isdigit() and
+ elif (result.isdigit() and
0 <= int(result) < len(funcs)):
break_funcs = [funcs[int(result)]]
break
print 'Not understood...'
else:
break_funcs = [funcs[0]]
-
+
for func in break_funcs:
gdb.execute('break %s' % func.cname)
if func.pf_cname:
gdb.execute('break %s' % func.pf_cname)
-
+
def invoke(self, function_names, from_tty):
argv = string_to_argv(function_names.encode('UTF-8'))
if function_names.startswith('-p'):
python_breakpoints = True
else:
python_breakpoints = False
-
+
for funcname in argv:
if python_breakpoints:
gdb.execute('py-break %s' % funcname)
self._break_pyx(funcname)
else:
self._break_funcname(funcname)
-
+
@dont_suppress_errors
def complete(self, text, word):
names = self.cy.functions_by_qualified_name
words = text.strip().split()
if words and '.' in words[-1]:
lastword = words[-1]
- compl = [n for n in self.cy.functions_by_qualified_name
+ compl = [n for n in self.cy.functions_by_qualified_name
if n.startswith(lastword)]
else:
seen = set(text[:-len(word)].split())
return [n for n in names if n.startswith(word) and n not in seen]
-
+
if len(lastword) > len(word):
# readline sees something (e.g. a '.') as a word boundary, so don't
# "recomplete" this prefix
strip_prefix_length = len(lastword) - len(word)
compl = [n[strip_prefix_length:] for n in compl]
-
+
return compl
Base class for CyStep and CyNext. It implements the interface dictated by
libpython.GenericCodeStepper.
"""
-
+
def lineno(self, frame):
# Take care of the Python and Cython levels. We need to care for both
# as we can't simply dispath to 'py-step', since that would work for
return self.get_cython_lineno(frame)
else:
return libpython.py_step.lineno(frame)
-
+
def get_source_line(self, frame):
try:
line = super(CythonCodeStepper, self).get_source_line(frame)
def runtime_break_functions(self):
if self.is_cython_function():
return self.get_cython_function().step_into_functions
-
+
def static_break_functions(self):
result = ['PyEval_EvalFrameEx']
result.extend(self.cy.functions_by_cname)
command = 'step'
else:
command = 'next'
-
+
self.finish_executing(gdb.execute(command, to_string=True))
else:
self.step()
class CyStep(CythonCodeStepper):
"Step through Cython, Python or C code."
-
+
name = 'cy step'
stepinto = True
class CyRun(CythonCodeStepper):
"""
- Run a Cython program. This is like the 'run' command, except that it
+ Run a Cython program. This is like the 'run' command, except that it
displays Cython or Python source lines as well
"""
-
+
name = 'cy run'
-
+
invoke = CythonCodeStepper.run
class CyCont(CyRun):
"""
- Continue a Cython program. This is like the 'run' command, except that it
+ Continue a Cython program. This is like the 'run' command, except that it
displays Cython or Python source lines as well.
"""
-
+
name = 'cy cont'
invoke = CythonCodeStepper.cont
"""
name = 'cy up'
_command = 'up'
-
+
def invoke(self, *args):
try:
gdb.execute(self._command, to_string=True)
gdb.execute(self._command, to_string=True)
except RuntimeError, e:
raise gdb.GdbError(*e.args)
-
+
frame = gdb.selected_frame()
index = 0
while frame:
frame = frame.older()
index += 1
-
+
self.print_stackframe(index=index - 1)
"""
Go down a Cython, Python or relevant C frame.
"""
-
+
name = 'cy down'
_command = 'down'
Select a frame. Use frame numbers as listed in `cy backtrace`.
This command is useful because `cy backtrace` prints a reversed backtrace.
"""
-
+
name = 'cy select'
-
+
def invoke(self, stackno, from_tty):
try:
stackno = int(stackno)
except ValueError:
raise gdb.GdbError("Not a valid number: %r" % (stackno,))
-
+
frame = gdb.selected_frame()
while frame.newer():
frame = frame.newer()
-
+
stackdepth = self._stackdepth(frame)
-
+
try:
gdb.execute('select %d' % (stackdepth - stackno - 1,))
except RuntimeError, e:
class CyBacktrace(CythonCommand):
'Print the Cython stack'
-
+
name = 'cy bt'
alias = 'cy backtrace'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
-
+
@require_running_program
def invoke(self, args, from_tty):
# get the first frame
selected_frame = frame = gdb.selected_frame()
while frame.older():
frame = frame.older()
-
+
print_all = args == '-a'
-
+
index = 0
while frame:
is_c = False
-
+
is_relevant = False
try:
is_relevant = self.is_relevant_function(frame)
except CyGDBError:
pass
-
+
if print_all or is_relevant:
self.print_stackframe(frame, index)
-
+
index += 1
frame = frame.newer()
-
+
selected_frame.select()
List Cython source code. To disable to customize colouring see the cy_*
parameters.
"""
-
+
name = 'cy list'
command_class = gdb.COMMAND_FILES
completer_class = gdb.COMPLETE_NONE
-
+
@dispatch_on_frame(c_command='list')
def invoke(self, _, from_tty):
sd, lineno = self.get_source_desc()
- source = sd.get_source(lineno - 5, lineno + 5, mark_line=lineno,
+ source = sd.get_source(lineno - 5, lineno + 5, mark_line=lineno,
lex_entire=True)
print source
"""
Print a Cython variable using 'cy-print x' or 'cy-print module.function.x'
"""
-
+
name = 'cy print'
command_class = gdb.COMMAND_DATA
-
+
def invoke(self, name, from_tty, max_name_length=None):
if self.is_python_function():
return gdb.execute('py-print ' + name)
value = value.dereference()
else:
break
-
+
self.print_gdb_value(name, value, max_name_length)
else:
gdb.execute('print ' + name)
-
+
def complete(self):
if self.is_cython_function():
f = self.get_cython_function()
"""
List the locals from the current Cython frame.
"""
-
+
name = 'cy locals'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
-
+
@dispatch_on_frame(c_command='info locals', python_command='py-locals')
def invoke(self, args, from_tty):
local_cython_vars = self.get_cython_function().locals
if self.is_initialized(self.get_cython_function(), cyvar.name):
value = gdb.parse_and_eval(cyvar.cname)
if not value.is_optimized_out:
- self.print_gdb_value(cyvar.name, value,
+ self.print_gdb_value(cyvar.name, value,
max_name_length, '')
"""
List the globals from the current Cython module.
"""
-
+
name = 'cy globals'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
-
+
@dispatch_on_frame(c_command='info variables', python_command='py-globals')
def invoke(self, args, from_tty):
global_python_dict = self.get_cython_globals_dict()
module_globals = self.get_cython_function().module.globals
-
+
max_globals_len = 0
max_globals_dict_len = 0
if module_globals:
max_globals_len = len(max(module_globals, key=len))
if global_python_dict:
max_globals_dict_len = len(max(global_python_dict))
-
+
max_name_length = max(max_globals_len, max_globals_dict_len)
-
+
seen = set()
print 'Python globals:'
for k, v in sorted(global_python_dict.iteritems(), key=sortkey):
v = v.get_truncated_repr(libpython.MAX_OUTPUT_LEN)
seen.add(k)
print ' %-*s = %s' % (max_name_length, k, v)
-
+
print 'C globals:'
for name, cyvar in sorted(module_globals.iteritems(), key=sortkey):
if name not in seen:
"""
Execute Python code in the nearest Python or Cython frame.
"""
-
+
name = '-cy-exec'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
-
+
def _fill_locals_dict(self, executor, local_dict_pointer):
"Fill a remotely allocated dict with values from the Cython C stack"
cython_func = self.get_cython_function()
current_lineno = self.get_cython_lineno()
-
+
for name, cyvar in cython_func.locals.iteritems():
- if (cyvar.type == PythonObject and
+ if (cyvar.type == PythonObject and
self.is_initialized(cython_func, name)):
-
+
try:
val = gdb.parse_and_eval(cyvar.cname)
except RuntimeError:
else:
if val.is_optimized_out:
continue
-
+
pystringp = executor.alloc_pystring(name)
code = '''
(PyObject *) PyDict_SetItem(
finally:
# PyDict_SetItem doesn't steal our reference
executor.decref(pystringp)
-
+
def _find_first_cython_or_python_frame(self):
frame = gdb.selected_frame()
while frame:
- if (self.is_cython_function(frame) or
+ if (self.is_cython_function(frame) or
self.is_python_function(frame)):
return frame
-
+
frame = frame.older()
-
+
raise gdb.GdbError("There is no Cython or Python frame on the stack.")
-
+
def invoke(self, expr, from_tty):
frame = self._find_first_cython_or_python_frame()
if self.is_python_function(frame):
libpython.py_exec.invoke(expr, from_tty)
return
-
+
expr, input_type = self.readcode(expr)
executor = libpython.PythonCodeExecutor()
-
+
with libpython.FetchAndRestoreError():
- # get the dict of Cython globals and construct a dict in the
+ # get the dict of Cython globals and construct a dict in the
# inferior with Cython locals
global_dict = gdb.parse_and_eval(
'(PyObject *) PyModule_GetDict(__pyx_m)')
local_dict = gdb.parse_and_eval('(PyObject *) PyDict_New()')
-
+
cython_function = self.get_cython_function()
-
+
try:
- self._fill_locals_dict(executor,
+ self._fill_locals_dict(executor,
libpython.pointervalue(local_dict))
executor.evalcode(expr, input_type, global_dict, local_dict)
finally:
"""
Get the C name of a Cython variable in the current context.
Examples:
-
+
print $cy_cname("function")
print $cy_cname("Class.method")
print $cy_cname("module.function")
"""
-
+
@require_cython_frame
@gdb_function_value_to_unicode
def invoke(self, cyname, frame=None):
frame = frame or gdb.selected_frame()
cname = None
-
+
if self.is_cython_function(frame):
cython_function = self.get_cython_function(frame)
if cyname in cython_function.locals:
qname = '%s.%s' % (cython_function.module.name, cyname)
if qname in cython_function.module.functions:
cname = cython_function.module.functions[qname].cname
-
+
if not cname:
cname = self.cy.functions_by_qualified_name.get(cyname)
-
+
if not cname:
raise gdb.GdbError('No such Cython variable: %s' % cyname)
-
+
return cname
"""
Get the value of a Cython variable.
"""
-
+
@require_cython_frame
@gdb_function_value_to_unicode
def invoke(self, cyname, frame=None):
"""
Get the current Cython line.
"""
-
+
@require_cython_frame
def invoke(self):
return self.get_cython_lineno()
all_pretty_typenames = set()
class PrettyPrinterTrackerMeta(type):
-
+
def __init__(self, name, bases, dict):
super(PrettyPrinterTrackerMeta, self).__init__(name, bases, dict)
all_pretty_typenames.add(self._typename)
Note that at every stage the underlying pointer could be NULL, point
to corrupt data, etc; this is the debugger, after all.
"""
-
+
__metaclass__ = PrettyPrinterTrackerMeta
-
+
_typename = 'PyObject'
-
+
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
#print 'tp_flags = 0x%08x' % tp_flags
#print 'tp_name = %r' % tp_name
-
+
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'instance': PyInstanceObjectPtr,
}
if tp_name in name_map:
return name_map[tp_name]
-
+
if tp_flags & Py_TPFLAGS_HEAPTYPE:
return HeapTypeObjectPtr
class HeapTypeObjectPtr(PyObjectPtr):
_typename = 'PyObject'
-
+
def get_attr_dict(self):
'''
Get the PyDictObject ptr representing the attribute dictionary
within the process being debugged.
"""
_typename = 'PyBaseExceptionObject'
-
+
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value)
-
+
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
class PyInstanceObjectPtr(PyObjectPtr):
_typename = 'PyInstanceObject'
-
+
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
class PyListObjectPtr(PyObjectPtr):
_typename = 'PyListObject'
-
+
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
-
+
def proxyval(self, visited):
'''
Python's Include/longobjrep.h has this declaration:
where SHIFT can be either:
#define PyLong_SHIFT 30
#define PyLong_SHIFT 15
- '''
+ '''
ob_size = long(self.field('ob_size'))
if ob_size == 0:
return 0L
<bool> instances (Py_True/Py_False) within the process being debugged.
"""
_typename = 'PyBoolObject'
-
+
def proxyval(self, visited):
castto = gdb.lookup_type('PyLongObject').pointer()
self._gdbval = self._gdbval.cast(castto)
class PyBytesObjectPtr(PyObjectPtr):
_typename = 'PyBytesObject'
-
+
def __str__(self):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
- return ''.join(struct.pack('b', field_ob_sval[i])
+ return ''.join(struct.pack('b', field_ob_sval[i])
for i in safe_range(field_ob_size))
def proxyval(self, visited):
quote = "'"
if "'" in proxy and not '"' in proxy:
quote = '"'
-
+
if py3:
out.write('b')
else:
# Python 2, write the 'u'
out.write('u')
-
+
if "'" in proxy and '"' not in proxy:
quote = '"'
else:
namespace = self.get_namespace(pyop_frame)
namespace = [(name.proxyval(set()), val) for name, val in namespace]
-
+
if namespace:
name, val = max(namespace, key=lambda (name, val): len(name))
max_name_length = len(name)
-
+
for name, pyop_value in namespace:
value = pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)
print ('%-*s = %s' % (max_name_length, name, value))
class PyGlobals(PyLocals):
'List all the globals in the currently select Python frame'
-
+
def get_namespace(self, pyop_frame):
return pyop_frame.iter_globals()
class PyNameEquals(gdb.Function):
-
+
def _get_pycurframe_attr(self, attr):
frame = Frame(gdb.selected_frame())
if frame.is_evalframeex():
pyframe = frame.get_pyop()
if pyframe is None:
return None
-
+
return getattr(pyframe, attr).proxyval(set())
-
+
return None
-
+
def invoke(self, funcname):
attr = self._get_pycurframe_attr('co_name')
return attr is not None and attr == funcname.string()
class PyModEquals(PyNameEquals):
-
+
def invoke(self, modname):
attr = self._get_pycurframe_attr('co_filename')
if attr is not None:
class PyBreak(gdb.Command):
"""
Set a Python breakpoint. Examples:
-
+
Break on any function or method named 'func' in module 'modname'
-
- py-break modname.func
-
+
+ py-break modname.func
+
Break on any function or method named 'func'
-
+
py-break func
"""
-
+
def invoke(self, funcname, from_tty):
if '.' in funcname:
modname, dot, funcname = funcname.rpartition('.')
- cond = '$pyname_equals("%s") && $pymod_equals("%s")' % (funcname,
+ cond = '$pyname_equals("%s") && $pymod_equals("%s")' % (funcname,
modname)
else:
cond = '$pyname_equals("%s")' % funcname
"""
State that helps to provide a reentrant gdb.execute() function.
"""
-
+
def __init__(self):
self.fd, self.filename = tempfile.mkstemp()
self.file = os.fdopen(self.fd, 'r+')
_execute("set logging file %s" % self.filename)
self.file_position_stack = []
-
+
atexit.register(os.close, self.fd)
atexit.register(os.remove, self.filename)
-
+
def __enter__(self):
if not self.file_position_stack:
_execute("set logging redirect on")
_execute("set logging on")
_execute("set pagination off")
-
+
self.file_position_stack.append(os.fstat(self.fd).st_size)
return self
-
+
def getoutput(self):
gdb.flush()
self.file.seek(self.file_position_stack[-1])
result = self.file.read()
return result
-
+
def __exit__(self, exc_type, exc_val, tb):
startpos = self.file_position_stack.pop()
self.file.seek(startpos)
def execute(command, from_tty=False, to_string=False):
"""
Replace gdb.execute() with this function and have it accept a 'to_string'
- argument (new in 7.2). Have it properly capture stderr also. Ensure
+ argument (new in 7.2). Have it properly capture stderr also. Ensure
reentrancy.
"""
if to_string:
# Woooh, another bug in gdb! Is there an end in sight?
# http://sourceware.org/bugzilla/show_bug.cgi?id=12212
return gdb.inferiors()[0]
-
+
selected_thread = gdb.selected_thread()
-
+
for inferior in gdb.inferiors():
for thread in inferior.threads():
if thread == selected_thread:
class GenericCodeStepper(gdb.Command):
"""
- Superclass for code stepping. Subclasses must implement the following
+ Superclass for code stepping. Subclasses must implement the following
methods:
-
+
lineno(frame)
tells the current line number (only called for a relevant frame)
-
+
is_relevant_function(frame)
tells whether we care about frame 'frame'
-
+
get_source_line(frame)
get the line of source code for the current line (only called for a
- relevant frame). If the source code cannot be retrieved this
+ relevant frame). If the source code cannot be retrieved this
function should return None
-
- static_break_functions()
- returns an iterable of function names that are considered relevant
- and should halt step-into execution. This is needed to provide a
+
+ static_break_functions()
+ returns an iterable of function names that are considered relevant
+ and should halt step-into execution. This is needed to provide a
performing step-into
-
- runtime_break_functions
- list of functions that we should break into depending on the
+
+ runtime_break_functions
+ list of functions that we should break into depending on the
context
This class provides an 'invoke' method that invokes a 'step' or 'step-over'
depending on the 'stepinto' argument.
"""
-
+
stepper = False
static_breakpoints = {}
runtime_breakpoints = {}
-
+
def __init__(self, name, stepinto=False):
- super(GenericCodeStepper, self).__init__(name,
+ super(GenericCodeStepper, self).__init__(name,
gdb.COMMAND_RUNNING,
gdb.COMPLETE_NONE)
self.stepinto = stepinto
def init_breakpoints(self):
"""
Keep all breakpoints around and simply disable/enable them each time
- we are stepping. We need this because if you set and delete a
+ we are stepping. We need this because if you set and delete a
breakpoint, gdb will not repeat your command (this is due to 'delete').
- We also can't use the breakpoint API because there's no option to make
+ We also can't use the breakpoint API because there's no option to make
breakpoint setting silent.
-
+
This method must be called whenever the list of functions we should
step into changes. It can be called on any GenericCodeStepper instance.
"""
break_funcs = set(self.static_break_functions())
-
+
for funcname in break_funcs:
if funcname not in self.static_breakpoints:
try:
result = gdb.execute(textwrap.dedent("""\
python bp = gdb.Breakpoint(%r, gdb.BP_BREAKPOINT, \
internal=True); \
- print bp.number""",
+ print bp.number""",
to_string=True))
-
+
breakpoint = int(result)
self.static_breakpoints[funcname] = breakpoint
-
+
for bp in set(self.static_breakpoints) - break_funcs:
gdb.execute("delete " + self.static_breakpoints[bp])
-
+
self.disable_breakpoints()
def enable_breakpoints(self):
for bp in self.static_breakpoints.itervalues():
gdb.execute('enable ' + bp)
-
+
runtime_break_functions = self.runtime_break_functions()
if runtime_break_functions is None:
return
-
+
for funcname in runtime_break_functions:
- if (funcname not in self.static_breakpoints and
+ if (funcname not in self.static_breakpoints and
funcname not in self.runtime_breakpoints):
self.runtime_breakpoints[funcname] = self._break_func(funcname)
elif funcname in self.runtime_breakpoints:
gdb.execute('enable ' + self.runtime_breakpoints[funcname])
-
+
def disable_breakpoints(self):
chain = itertools.chain(self.static_breakpoints.itervalues(),
self.runtime_breakpoints.itervalues())
for bp in chain:
gdb.execute('disable ' + bp)
-
+
def runtime_break_functions(self):
"""
- Implement this if the list of step-into functions depends on the
+ Implement this if the list of step-into functions depends on the
context.
"""
-
+
def stopped(self, result):
match = re.search('^Program received signal .*', result, re.MULTILINE)
if match:
return result
else:
return None
-
+
def _stackdepth(self, frame):
depth = 0
while frame:
frame = frame.older()
depth += 1
-
+
return depth
-
+
def finish_executing(self, result):
"""
After doing some kind of code running in the inferior, print the line
else:
frame = gdb.selected_frame()
output = None
-
+
if self.is_relevant_function(frame):
output = self.get_source_line(frame)
-
+
if output is None:
pframe = getattr(self, 'print_stackframe', None)
if pframe:
print result.strip()
else:
print output
-
+
def _finish(self):
"""
- Execute until the function returns (or until something else makes it
+ Execute until the function returns (or until something else makes it
stop)
"""
if gdb.selected_frame().older() is not None:
else:
# outermost frame, continue
return gdb.execute('cont', to_string=True)
-
+
def finish(self, *args):
"""
Execute until the function returns to a relevant caller.
"""
-
+
while True:
result = self._finish()
-
+
try:
frame = gdb.selected_frame()
except RuntimeError:
break
-
+
hitbp = re.search(r'Breakpoint (\d+)', result)
is_relavant = self.is_relevant_function(frame)
if hitbp or is_relavant or self.stopped(result):
break
-
+
self.finish_executing(result)
-
+
def _step(self):
"""
Do a single step or step-over. Returns the result of the last gdb
"""
if self.stepinto:
self.enable_breakpoints()
-
+
beginframe = gdb.selected_frame()
beginline = self.lineno(beginframe)
if not self.stepinto:
depth = self._stackdepth(beginframe)
-
+
newframe = beginframe
result = ''
result = gdb.execute('next', to_string=True)
else:
result = self._finish()
-
+
if self.stopped(result):
break
-
+
newframe = gdb.selected_frame()
is_relevant_function = self.is_relevant_function(newframe)
try:
framename = newframe.name()
except RuntimeError:
framename = None
-
+
m = re.search(r'Breakpoint (\d+)', result)
if m:
bp = self.runtime_breakpoints.get(framename)
# that the function, in case hit by a runtime breakpoint,
# is in the right context
break
-
+
if newframe != beginframe:
# new function
-
+
if not self.stepinto:
# see if we returned to the caller
newdepth = self._stackdepth(newframe)
- is_relevant_function = (newdepth < depth and
+ is_relevant_function = (newdepth < depth and
is_relevant_function)
-
+
if is_relevant_function:
break
else:
if self.lineno(newframe) > beginline:
break
-
+
if self.stepinto:
self.disable_breakpoints()
return result
-
+
def step(self, *args):
return self.finish_executing(self._step())
def run(self, *args):
self.finish_executing(gdb.execute('run', to_string=True))
-
+
def cont(self, *args):
self.finish_executing(gdb.execute('cont', to_string=True))
class PythonCodeStepper(GenericCodeStepper):
-
+
def pyframe(self, frame):
pyframe = Frame(frame).get_pyop()
if pyframe:
raise gdb.GdbError(
"Unable to find the Python frame, run your code with a debug "
"build (configure with --with-pydebug or compile with -g).")
-
+
def lineno(self, frame):
return self.pyframe(frame).current_line_num()
-
+
def is_relevant_function(self, frame):
return Frame(frame).is_evalframeex()
def get_source_line(self, frame):
try:
pyframe = self.pyframe(frame)
- return '%4d %s' % (pyframe.current_line_num(),
+ return '%4d %s' % (pyframe.current_line_num(),
pyframe.current_line().rstrip())
except IOError, e:
return None
-
+
def static_break_functions(self):
yield 'PyEval_EvalFrameEx'
class PyStep(PythonCodeStepper):
"Step through Python code."
-
+
invoke = PythonCodeStepper.step
-
+
class PyNext(PythonCodeStepper):
"Step-over Python code."
-
+
invoke = PythonCodeStepper.step
-
+
class PyFinish(PythonCodeStepper):
"Execute until function returns to a caller."
-
+
invoke = PythonCodeStepper.finish
class PyRun(PythonCodeStepper):
"Run the program."
-
+
invoke = PythonCodeStepper.run
class PyCont(PythonCodeStepper):
-
+
invoke = PythonCodeStepper.cont
def _pointervalue(gdbval):
"""
- Return the value of the pionter as a Python int.
-
+ Return the value of the pionter as a Python int.
+
gdbval.type must be a pointer type
"""
# don't convert with int() as it will raise a RuntimeError
# work around yet another bug in gdb where you get random behaviour
# and tracebacks
pass
-
+
return pointer
def get_inferior_unicode_postfix():
try:
gdb.parse_and_eval('PyUnicode_FromEncodedObject')
except RuntimeError:
- try:
+ try:
gdb.parse_and_eval('PyUnicodeUCS2_FromEncodedObject')
except RuntimeError:
return 'UCS4'
return 'UCS2'
else:
return ''
-
+
class PythonCodeExecutor(object):
-
+
def malloc(self, size):
chunk = (gdb.parse_and_eval("(void *) malloc((size_t) %d)" % size))
-
+
pointer = pointervalue(chunk)
if pointer == 0:
raise gdb.GdbError("No memory could be allocated in the inferior.")
-
+
return pointer
-
+
def alloc_string(self, string):
pointer = self.malloc(len(string))
get_selected_inferior().write_memory(pointer, string)
-
+
return pointer
-
+
def alloc_pystring(self, string):
stringp = self.alloc_string(string)
PyString_FromStringAndSize = 'PyString_FromStringAndSize'
-
+
try:
gdb.parse_and_eval(PyString_FromStringAndSize)
except RuntimeError:
# Python 3
- PyString_FromStringAndSize = ('PyUnicode%s_FromStringAndSize' %
+ PyString_FromStringAndSize = ('PyUnicode%s_FromStringAndSize' %
(get_inferior_unicode_postfix,))
try:
PyString_FromStringAndSize, stringp, len(string)))
finally:
self.free(stringp)
-
+
pointer = pointervalue(result)
if pointer == 0:
raise gdb.GdbError("Unable to allocate Python string in "
"the inferior.")
-
+
return pointer
-
+
def free(self, pointer):
gdb.parse_and_eval("free((void *) %d)" % pointer)
-
+
def incref(self, pointer):
"Increment the reference count of a Python object in the inferior."
gdb.parse_and_eval('Py_IncRef((PyObject *) %d)' % pointer)
-
+
def decref(self, pointer):
"Decrement the reference count of a Python object in the inferior."
# Py_DecRef is like Py_XDECREF, but a function. So we don't have
# to check for NULL. This should also decref all our allocated
# Python strings.
gdb.parse_and_eval('Py_DecRef((PyObject *) %d)' % pointer)
-
+
def evalcode(self, code, input_type, global_dict=None, local_dict=None):
"""
Evaluate python code `code` given as a string in the inferior and
return the result as a gdb.Value. Returns a new reference in the
inferior.
-
+
Of course, executing any code in the inferior may be dangerous and may
leave the debuggee in an unsafe state or terminate it alltogether.
"""
if '\0' in code:
raise gdb.GdbError("String contains NUL byte.")
-
+
code += '\0'
-
+
pointer = self.alloc_string(code)
-
+
globalsp = pointervalue(global_dict)
localsp = pointervalue(local_dict)
-
+
if globalsp == 0 or localsp == 0:
raise gdb.GdbError("Unable to obtain or create locals or globals.")
-
+
code = """
PyRun_String(
(char *) %(code)d,
(int) %(start)d,
(PyObject *) %(globals)s,
(PyObject *) %(locals)d)
- """ % dict(code=pointer, start=input_type,
+ """ % dict(code=pointer, start=input_type,
globals=globalsp, locals=localsp)
-
+
with FetchAndRestoreError():
try:
self.decref(gdb.parse_and_eval(code))
def __init__(self):
self.sizeof_PyObjectPtr = gdb.lookup_type('PyObject').pointer().sizeof
self.pointer = self.malloc(self.sizeof_PyObjectPtr * 3)
-
+
type = self.pointer
value = self.pointer + self.sizeof_PyObjectPtr
traceback = self.pointer + self.sizeof_PyObjectPtr * 2
-
+
self.errstate = type, value, traceback
-
+
def __enter__(self):
gdb.parse_and_eval("PyErr_Fetch(%d, %d, %d)" % self.errstate)
-
+
def __exit__(self, *args):
if gdb.parse_and_eval("(int) PyErr_Occurred()"):
gdb.parse_and_eval("PyErr_Print()")
-
+
pyerr_restore = ("PyErr_Restore("
"(PyObject *) *%d,"
"(PyObject *) *%d,"
"(PyObject *) *%d)")
-
+
try:
gdb.parse_and_eval(pyerr_restore % self.errstate)
finally:
super(FixGdbCommand, self).__init__(command, gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
self.actual_command = actual_command
-
+
def fix_gdb(self):
"""
- So, you must be wondering what the story is this time! Yeeees, indeed,
+ So, you must be wondering what the story is this time! Yeeees, indeed,
I have quite the story for you! It seems that invoking either 'cy exec'
- and 'py-exec' work perfectly fine, but after this gdb's python API is
- entirely broken. Some unset exception value is still set?
+ and 'py-exec' work perfectly fine, but after this gdb's python API is
+ entirely broken. Some unset exception value is still set?
sys.exc_clear() didn't help. A demonstration:
-
+
(gdb) cy exec 'hello'
'hello'
(gdb) python gdb.execute('cont')
Error while executing Python code.
(gdb) python gdb.execute('cont')
[15148 refs]
-
+
Program exited normally.
"""
- warnings.filterwarnings('ignore', r'.*', RuntimeWarning,
+ warnings.filterwarnings('ignore', r'.*', RuntimeWarning,
re.escape(__name__))
try:
long(gdb.parse_and_eval("(void *) 0")) == 0
except RuntimeError:
pass
# warnings.resetwarnings()
-
+
def invoke(self, args, from_tty):
self.fix_gdb()
try:
class PyExec(gdb.Command):
-
+
def readcode(self, expr):
if expr:
return expr, Py_single_input
else:
if line.rstrip() == 'end':
break
-
+
lines.append(line)
-
+
return '\n'.join(lines), Py_file_input
-
+
def invoke(self, expr, from_tty):
expr, input_type = self.readcode(expr)
-
+
executor = PythonCodeExecutor()
global_dict = gdb.parse_and_eval('PyEval_GetGlobals()')
local_dict = gdb.parse_and_eval('PyEval_GetLocals()')
-
+
if pointervalue(global_dict) == 0 or pointervalue(local_dict) == 0:
raise gdb.GdbError("Unable to find the locals or globals of the "
"most recent Python function (relative to the "
"selected frame).")
-
+
executor.evalcode(expr, input_type, global_dict, local_dict)
py_exec = FixGdbCommand('py-exec', '-py-exec')
# so that *our* build_ext can make use of it.
from Cython.Distutils.build_ext import build_ext
-
+
# from extension import Extension
class Optimization(object):
def __init__(self):
self.flags = (
- 'OPT',
+ 'OPT',
'CFLAGS',
'CPPFLAGS',
- 'EXTRA_CFLAGS',
+ 'EXTRA_CFLAGS',
'BASECFLAGS',
'PY_CFLAGS',
)
self.state = sysconfig.get_config_vars(*self.flags)
self.config_vars = sysconfig.get_config_vars()
-
-
+
+
def disable_optimization(self):
"disable optimization for the C or C++ compiler"
badoptions = ('-O1', '-O2', '-O3')
-
+
for flag, option in zip(self.flags, self.state):
if option is not None:
L = [opt for opt in option.split() if opt not in badoptions]
self.config_vars[flag] = ' '.join(L)
-
+
def restore_state(self):
"restore the original state"
for flag, option in zip(self.flags, self.state):
for x in it:
if x:
return True
-
+
return False
-
+
class build_ext(_build_ext.build_ext):
])
boolean_options.extend([
- 'pyrex-cplus', 'pyrex-create-listing', 'pyrex-line-directives',
+ 'pyrex-cplus', 'pyrex-create-listing', 'pyrex-line-directives',
'pyrex-c-in-temp', 'pyrex-gdb',
])
if self.pyrex_directives is None:
self.pyrex_directives = {}
# finalize_options ()
-
+
def run(self):
# We have one shot at this before build_ext initializes the compiler.
# If --pyrex-gdb is in effect as a command line option or as option
# of any Extension module, disable optimization for the C or C++
# compiler.
- if (self.pyrex_gdb or any([getattr(ext, 'pyrex_gdb', False)
+ if (self.pyrex_gdb or any([getattr(ext, 'pyrex_gdb', False)
for ext in self.extensions])):
optimization.disable_optimization()
-
+
_build_ext.build_ext.run(self)
-
+
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
-
+
for ext in self.extensions:
ext.sources = self.cython_sources(ext.sources, ext)
self.build_extension(ext)
# cplus = self.pyrex_cplus or \
# (extension.language != None and \
# extension.language.lower() == 'c++')
-
+
create_listing = self.pyrex_create_listing or \
getattr(extension, 'pyrex_create_listing', 0)
line_directives = self.pyrex_line_directives or \
output_dir = os.curdir
else:
output_dir = self.build_lib
- options = CompilationOptions(pyrex_default_options,
+ options = CompilationOptions(pyrex_default_options,
use_listing_file = create_listing,
include_path = includes,
compiler_directives = directives,
# PyObject* PyNumber_Add(PyObject *o1, PyObject *o2)
#
# in your file after any .pxi includes. Cython will use the latest
-# declaration.
+# declaration.
#
# Cython takes care of this automatically for anything of type object.
## More precisely, I think the correct convention for
## any funny reference counting.
## (2) Declare output as object if a new reference is returned.
## (3) Declare output as PyObject* if a borrowed reference is returned.
-##
+##
## This way when you call objects, no cast is needed, and if the api
## calls returns a new reference (which is about 95% of them), then
## you can just assign to a variable of type object. With borrowed
## to your object, so you're OK, as long as you relealize this
## and use the result of an explicit cast to <object> as a borrowed
## reference (and you can call Py_INCREF if you want to turn it
-## into another reference for some reason).
-#
+## into another reference for some reason).
+#
# "The reference count is important because today's computers have
# a finite (and often severely limited) memory size; it counts how
# many different places there are that have a reference to an
# count as there are distinct memory locations in virtual memory
# (assuming sizeof(long) >= sizeof(char*)). Thus, the reference
# count increment is a simple operation.
-#
+#
# It is not necessary to increment an object's reference count for
# every local variable that contains a pointer to an object. In
# theory, the object's reference count goes up by one when the
# counts.
# Py_RETURN_FALSE
- # Return Py_False from a function, properly incrementing its reference count.
+ # Return Py_False from a function, properly incrementing its reference count.
# Py_RETURN_TRUE
- # Return Py_True from a function, properly incrementing its reference count.
+ # Return Py_True from a function, properly incrementing its reference count.
object PyBool_FromLong(long v)
# Return value: New reference.
- # Return a new reference to Py_True or Py_False depending on the truth value of v.
+ # Return a new reference to Py_True or Py_False depending on the truth value of v.
# is 'C') or Fortran-style (fortran is 'F') contiguous or either
# one (fortran is 'A'). Return 0 otherwise.
- void PyBuffer_FillContiguousStrides(int ndims,
- Py_ssize_t *shape,
+ void PyBuffer_FillContiguousStrides(int ndims,
+ Py_ssize_t *shape,
Py_ssize_t *strides,
int itemsize,
char fort)
# These functions raise TypeError when expecting a string
# parameter and are called with a non-string parameter.
# PyStringObject
- # This subtype of PyObject represents a Python bytes object.
+ # This subtype of PyObject represents a Python bytes object.
# PyTypeObject PyBytes_Type
# This instance of PyTypeObject represents the Python bytes type;
# it is the same object as bytes and types.BytesType in the Python
- # layer.
+ # layer.
bint PyBytes_Check(object o)
# Return true if the object o is a string object or an instance of
- # a subtype of the string type.
+ # a subtype of the string type.
bint PyBytes_CheckExact(object o)
- # Return true if the object o is a string object, but not an instance of a subtype of the string type.
+ # Return true if the object o is a string object, but not an instance of a subtype of the string type.
bytes PyBytes_FromString(char *v)
# Return value: New reference.
bytes PyBytes_FromFormatV(char *format, va_list vargs)
# Return value: New reference.
- # Identical to PyBytes_FromFormat() except that it takes exactly two arguments.
+ # Identical to PyBytes_FromFormat() except that it takes exactly two arguments.
Py_ssize_t PyBytes_Size(object string) except -1
- # Return the length of the string in string object string.
+ # Return the length of the string in string object string.
Py_ssize_t PyBytes_GET_SIZE(object string)
- # Macro form of PyBytes_Size() but without error checking.
+ # Macro form of PyBytes_Size() but without error checking.
char* PyBytes_AsString(object string) except NULL
# Return a NUL-terminated representation of the contents of
# version of the object. If length is NULL, the resulting buffer
# may not contain NUL characters; if it does, the function returns
# -1 and a TypeError is raised.
-
+
# The buffer refers to an internal string buffer of obj, not a
# copy. The data must not be modified in any way, unless the
# string was just created using PyBytes_FromStringAndSize(NULL,
############################################################################
# PyComplexObject
- # This subtype of PyObject represents a Python complex number object.
+ # This subtype of PyObject represents a Python complex number object.
ctypedef class __builtin__.complex [object PyComplexObject]:
cdef Py_complex cval
object PyComplex_FromCComplex(Py_complex v)
# Return value: New reference.
- # Create a new Python complex number object from a C Py_complex value.
+ # Create a new Python complex number object from a C Py_complex value.
object PyComplex_FromDoubles(double real, double imag)
# Return value: New reference.
- # Return a new PyComplexObject object from real and imag.
+ # Return a new PyComplexObject object from real and imag.
double PyComplex_RealAsDouble(object op) except? -1
- # Return the real part of op as a C double.
+ # Return the real part of op as a C double.
double PyComplex_ImagAsDouble(object op) except? -1
- # Return the imaginary part of op as a C double.
+ # Return the imaginary part of op as a C double.
Py_complex PyComplex_AsCComplex(object op)
# Return the Py_complex value of the complex number op.
#
# This subtype of PyObject represents a Python dictionary object
# (i.e. the 'dict' type).
-
+
# PyTypeObject PyDict_Type
#
# This instance of PyTypeObject represents the Python dictionary
bint PyDict_Check(object p)
# Return true if p is a dict object or an instance of a subtype of
- # the dict type.
+ # the dict type.
bint PyDict_CheckExact(object p)
# Return true if p is a dict object, but not an instance of a
object PyDict_New()
# Return value: New reference.
- # Return a new empty dictionary, or NULL on failure.
+ # Return a new empty dictionary, or NULL on failure.
object PyDictProxy_New(object dict)
# Return value: New reference.
# Return a proxy object for a mapping which enforces read-only
# behavior. This is normally used to create a proxy to prevent
- # modification of the dictionary for non-dynamic class types.
+ # modification of the dictionary for non-dynamic class types.
void PyDict_Clear(object p)
- # Empty an existing dictionary of all key-value pairs.
+ # Empty an existing dictionary of all key-value pairs.
int PyDict_Contains(object p, object key) except -1
# Determine if dictionary p contains key. If an item in p is
# matches key, return 1, otherwise return 0. On error, return
- # -1. This is equivalent to the Python expression "key in p".
+ # -1. This is equivalent to the Python expression "key in p".
object PyDict_Copy(object p)
# Return value: New reference.
from cpython.ref cimport PyObject
cdef extern from "Python.h":
-
+
#####################################################################
# 3. Exception Handling
#####################################################################
# performance.
void PyErr_Clear()
- # Clear the error indicator. If the error indicator is not set, there is no effect.
+ # Clear the error indicator. If the error indicator is not set, there is no effect.
void PyErr_Fetch(PyObject** ptype, PyObject** pvalue, PyObject** ptraceback)
# Retrieve the error indicator into three variables whose
# parsed, but the width part is ignored.
void PyErr_SetNone(object type)
- # This is a shorthand for "PyErr_SetObject(type, Py_None)".
+ # This is a shorthand for "PyErr_SetObject(type, Py_None)".
int PyErr_BadArgument() except 0
cdef extern from "Python.h":
############################################################################
- # 7.2.3
+ # 7.2.3
############################################################################
# PyFloatObject
#
# This subtype of PyObject represents a Python floating point object.
-
+
# PyTypeObject PyFloat_Type
#
# This instance of PyTypeObject represents the Python floating
bint PyFloat_CheckExact(object p)
# Return true if its argument is a PyFloatObject, but not a
- # subtype of PyFloatObject.
+ # subtype of PyFloatObject.
object PyFloat_FromString(object str, char **pend)
# Return value: New reference.
object PyFloat_FromDouble(double v)
# Return value: New reference.
- # Create a PyFloatObject object from v, or NULL on failure.
+ # Create a PyFloatObject object from v, or NULL on failure.
double PyFloat_AsDouble(object pyfloat) except? -1
# Return a C double representation of the contents of pyfloat.
PyObject* PyFunction_GetCode(object op) except? NULL
# Return value: Borrowed reference.
- # Return the code object associated with the function object op.
+ # Return the code object associated with the function object op.
PyObject* PyFunction_GetGlobals(object op) except? NULL
# Return value: Borrowed reference.
- # Return the globals dictionary associated with the function object op.
+ # Return the globals dictionary associated with the function object op.
PyObject* PyFunction_GetModule(object op) except? NULL
# Return value: Borrowed reference.
int PyFunction_SetDefaults(object op, object defaults) except -1
# Set the argument default values for the function object
# op. defaults must be Py_None or a tuple.
- # Raises SystemError and returns -1 on failure.
+ # Raises SystemError and returns -1 on failure.
PyObject* PyFunction_GetClosure(object op) except? NULL
# Return value: Borrowed reference.
int PyFunction_SetClosure(object op, object closure) except -1
# Set the closure associated with the function object op. closure
# must be Py_None or a tuple of cell objects.
- # Raises SystemError and returns -1 on failure.
+ # Raises SystemError and returns -1 on failure.
cdef extern from "Python.h":
-
+
############################################################################
# 7.5.2 Instance Objects
############################################################################
# PyTypeObject PyInstance_Type
#
# Type object for class instances.
-
+
int PyInstance_Check(object obj)
- # Return true if obj is an instance.
+ # Return true if obj is an instance.
object PyInstance_New(object cls, object arg, object kw)
# Return value: New reference.
# whether the value just happened to be -1.
long PyInt_AS_LONG(object io)
- # Return the value of the object io. No error checking is performed.
+ # Return the value of the object io. No error checking is performed.
unsigned long PyInt_AsUnsignedLongMask(object io) except? -1
# Will first attempt to cast the object to a PyIntObject or
# PyLongObject, if it is not already one, and then return its
# value as unsigned long. This function does not check for
- # overflow.
+ # overflow.
PY_LONG_LONG PyInt_AsUnsignedLongLongMask(object io) except? -1
# Will first attempt to cast the object to a PyIntObject or
# PyLongObject, if it is not already one, and then return its
- # value as unsigned long long, without checking for overflow.
+ # value as unsigned long long, without checking for overflow.
Py_ssize_t PyInt_AsSsize_t(object io) except? -1
# Will first attempt to cast the object to a PyIntObject or
# 6.5 Iterator Protocol
############################################################################
bint PyIter_Check(object o)
- # Return true if the object o supports the iterator protocol.
+ # Return true if the object o supports the iterator protocol.
object PyIter_Next(object o)
# Return value: New reference.
# functions such as PySequence_SetItem() or expose the object to
# Python code before setting all items to a real object with
# PyList_SetItem().
-
+
bint PyList_Check(object p)
# Return true if p is a list object or an instance of a subtype of
# the list type.
-
+
bint PyList_CheckExact(object p)
# Return true if p is a list object, but not an instance of a
# subtype of the list type.
# to "len(list)" on a list object.
Py_ssize_t PyList_GET_SIZE(object list)
- # Macro form of PyList_Size() without error checking.
+ # Macro form of PyList_Size() without error checking.
PyObject* PyList_GetItem(object list, Py_ssize_t index) except NULL
- # Return value: Borrowed reference.
+ # Return value: Borrowed reference.
# Return the object at position pos in the list pointed to by
# p. The position must be positive, indexing from the end of the
# list is not supported. If pos is out of bounds, return NULL and
PyObject* PyList_GET_ITEM(object list, Py_ssize_t i)
# Return value: Borrowed reference.
- # Macro form of PyList_GetItem() without error checking.
+ # Macro form of PyList_GetItem() without error checking.
int PyList_SetItem(object list, Py_ssize_t index, object item) except -1
# Set the item at index index in list to item. Return 0 on success
# type. This is the same object as long and types.LongType.
bint PyLong_Check(object p)
- # Return true if its argument is a PyLongObject or a subtype of PyLongObject.
+ # Return true if its argument is a PyLongObject or a subtype of PyLongObject.
bint PyLong_CheckExact(object p)
# Return true if its argument is a PyLongObject, but not a subtype of PyLongObject.
object PyLong_FromLong(long v)
# Return value: New reference.
- # Return a new PyLongObject object from v, or NULL on failure.
+ # Return a new PyLongObject object from v, or NULL on failure.
object PyLong_FromUnsignedLong(unsigned long v)
# Return value: New reference.
- # Return a new PyLongObject object from a C unsigned long, or NULL on failure.
+ # Return a new PyLongObject object from a C unsigned long, or NULL on failure.
object PyLong_FromLongLong(PY_LONG_LONG v)
# Return value: New reference.
- # Return a new PyLongObject object from a C long long, or NULL on failure.
+ # Return a new PyLongObject object from a C long long, or NULL on failure.
object PyLong_FromUnsignedLongLong(uPY_LONG_LONG v)
# Return value: New reference.
- # Return a new PyLongObject object from a C unsigned long long, or NULL on failure.
+ # Return a new PyLongObject object from a C unsigned long long, or NULL on failure.
object PyLong_FromDouble(double v)
# Return value: New reference.
- # Return a new PyLongObject object from the integer part of v, or NULL on failure.
+ # Return a new PyLongObject object from the integer part of v, or NULL on failure.
object PyLong_FromString(char *str, char **pend, int base)
# Return value: New reference.
# the Unicode string, length gives the number of characters, and
# base is the radix for the conversion. The radix must be in the
# range [2, 36]; if it is out of range, ValueError will be
- # raised.
+ # raised.
object PyLong_FromVoidPtr(void *p)
# Return value: New reference.
# Return a C unsigned long long from a Python long integer. If
# pylong cannot be represented as an unsigned long long, an
# OverflowError will be raised if the value is positive, or a
- # TypeError will be raised if the value is negative.
+ # TypeError will be raised if the value is negative.
unsigned long PyLong_AsUnsignedLongMask(object io) except? -1
# Return a C unsigned long from a Python long integer, without
- # checking for overflow.
+ # checking for overflow.
uPY_LONG_LONG PyLong_AsUnsignedLongLongMask(object io) except? -1
#unsigned PY_LONG_LONG PyLong_AsUnsignedLongLongMask(object io)
# sizeof(TYPE)) bytes. Returns a pointer cast to TYPE*.
void PyMem_Del(void *p)
- # Same as PyMem_Free().
+ # Same as PyMem_Free().
# In addition, the following macro sets are provided for calling
# the Python memory allocator directly, without involving the C
# There are some useful functions that are useful for working with method objects.
# PyTypeObject PyMethod_Type
- # This instance of PyTypeObject represents the Python method type. This is exposed to Python programs as types.MethodType.
+ # This instance of PyTypeObject represents the Python method type. This is exposed to Python programs as types.MethodType.
bint PyMethod_Check(object o)
# Return true if o is a method object (has type
PyObject* PyMethod_GET_CLASS(object meth)
# Return value: Borrowed reference.
- # Macro version of PyMethod_Class() which avoids error checking.
+ # Macro version of PyMethod_Class() which avoids error checking.
PyObject* PyMethod_Function(object meth) except NULL
# Return value: Borrowed reference.
- # Return the function object associated with the method meth.
+ # Return the function object associated with the method meth.
PyObject* PyMethod_GET_FUNCTION(object meth)
# Return value: Borrowed reference.
- # Macro version of PyMethod_Function() which avoids error checking.
+ # Macro version of PyMethod_Function() which avoids error checking.
PyObject* PyMethod_Self(object meth) except? NULL
# Return value: Borrowed reference.
- # Return the instance associated with the method meth if it is bound, otherwise return NULL.
+ # Return the instance associated with the method meth if it is bound, otherwise return NULL.
PyObject* PyMethod_GET_SELF(object meth)
# Return value: Borrowed reference.
- # Macro version of PyMethod_Self() which avoids error checking.
+ # Macro version of PyMethod_Self() which avoids error checking.
# object.
bint PyModule_CheckExact(object p)
- # Return true if p is a module object, but not a subtype of PyModule_Type.
+ # Return true if p is a module object, but not a subtype of PyModule_Type.
object PyModule_New(char *name)
# Return value: New reference.
# Add an object to module as name. This is a convenience function
# which can be used from the module's initialization
# function. This steals a reference to value. Return -1 on error,
- # 0 on success.
+ # 0 on success.
int PyModule_AddIntant(object module, char *name, long value) except -1
# Add an integer ant to module as name. This convenience
# function can be used from the module's initialization
- # function. Return -1 on error, 0 on success.
+ # function. Return -1 on error, 0 on success.
int PyModule_AddStringant(object module, char *name, char *value) except -1
# Add a string constant to module as name. This convenience
# function can be used from the module's initialization
# function. The string value must be null-terminated. Return -1 on
- # error, 0 on success.
+ # error, 0 on success.
object PyNumber_FloorDivide(object o1, object o2)
# Return value: New reference.
# Return the floor of o1 divided by o2, or NULL on failure. This
- # is equivalent to the ``classic'' division of integers.
+ # is equivalent to the ``classic'' division of integers.
object PyNumber_TrueDivide(object o1, object o2)
# Return value: New reference.
# ``approximate'' because binary floating point numbers are
# approximate; it is not possible to represent all real numbers in
# base two. This function can return a floating point value when
- # passed two integers.
+ # passed two integers.
object PyNumber_Remainder(object o1, object o2)
# Return value: New reference.
object PyNumber_Or(object o1, object o2)
# Return value: New reference.
- # Returns the ``bitwise or'' of o1 and o2 on success, or NULL on failure. This is the equivalent of the Python expression "o1 | o2".
+ # Returns the ``bitwise or'' of o1 and o2 on success, or NULL on failure. This is the equivalent of the Python expression "o1 | o2".
object PyNumber_InPlaceAdd(object o1, object o2)
# Return value: New reference.
# Returns the mathematical floor of dividing o1 by o2, or NULL on
# failure. The operation is done in-place when o1 supports
# it. This is the equivalent of the Python statement "o1 //=
- # o2".
+ # o2".
object PyNumber_InPlaceTrueDivide(object o1, object o2)
# Return value: New reference.
# approximate; it is not possible to represent all real numbers in
# base two. This function can return a floating point value when
# passed two integers. The operation is done in-place when o1
- # supports it.
+ # supports it.
object PyNumber_InPlaceRemainder(object o1, object o2)
# Return value: New reference.
# exception that will be raised (usually IndexError or
# OverflowError). If exc is NULL, then the exception is cleared
# and the value is clipped to PY_SSIZE_T_MIN for a negative
- # integer or PY_SSIZE_T_MAX for a positive integer.
+ # integer or PY_SSIZE_T_MAX for a positive integer.
bint PyIndex_Check(object o)
# Returns True if o is an index integer (has the nb_index slot of
from libc.stdio cimport FILE
cdef extern from "Python.h":
-
+
#####################################################################
# 6.1 Object Protocol
#####################################################################
# Delete attribute named attr_name, for object o. Returns -1 on
# failure. This is the equivalent of the Python statement "del
# o.attr_name".
-
+
int Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, Py_GE
object PyObject_RichCompare(object o1, object o2, int opid)
# instance and cls is neither a type object, nor a class object,
# nor a tuple, inst must have a __class__ attribute -- the class
# relationship of the value of that attribute with cls will be
- # used to determine the result of this function.
+ # used to determine the result of this function.
# Subclass determination is done in a fairly straightforward way,
# but includes a wrinkle that implementors of extensions to the
# Call a callable Python object callable, with a variable number
# of PyObject* arguments. The arguments are provided as a variable
# number of parameters followed by NULL. Returns the result of the
- # call on success, or NULL on failure.
+ # call on success, or NULL on failure.
#PyObject* PyObject_CallMethodObjArgs(object o, object name, ..., NULL)
object PyObject_CallMethodObjArgs(object o, object name, ...)
# In other words, if PyCapsule_IsValid() returns a true value,
# calls to any of the accessors (any function starting with
# PyCapsule_Get()) are guaranteed to succeed.
- #
+ #
# Return a nonzero value if the object is valid and matches the
# name passed in. Return 0 otherwise. This function will not fail.
# Py_XINCREF().
void Py_XINCREF(PyObject* o)
- # Increment the reference count for object o. The object may be NULL, in which case the macro has no effect.
+ # Increment the reference count for object o. The object may be NULL, in which case the macro has no effect.
void Py_DECREF(object o)
# Decrement the reference count for object o. The object must not
# this is equivalent to the Python expression "len(o)".
Py_ssize_t PySequence_Length(object o) except -1
- # Alternate name for PySequence_Size().
+ # Alternate name for PySequence_Size().
object PySequence_Concat(object o1, object o2)
# Return value: New reference.
PyObject** PySequence_Fast_ITEMS(object o)
# Return the underlying array of PyObject pointers. Assumes that o
- # was returned by PySequence_Fast() and o is not NULL.
+ # was returned by PySequence_Fast() and o is not NULL.
object PySequence_ITEM(object o, Py_ssize_t i)
# Return value: New reference.
# structure.
# PyTypeObject PySet_Type
- # This is an instance of PyTypeObject representing the Python set type.
+ # This is an instance of PyTypeObject representing the Python set type.
# PyTypeObject PyFrozenSet_Type
- # This is an instance of PyTypeObject representing the Python frozenset type.
+ # This is an instance of PyTypeObject representing the Python frozenset type.
# The following type check macros work on pointers to any Python
# object. Likewise, the constructor functions work with any
# an instance of a subtype.
bint PyFrozenSet_CheckExact(object p)
- # Return true if p is a frozenset object but not an instance of a subtype.
+ # Return true if p is a frozenset object but not an instance of a subtype.
object PySet_New(object iterable)
# Return value: New reference.
# set, frozenset, or an instance of a subtype.
int PySet_GET_SIZE(object anyset)
- # Macro form of PySet_Size() without error checking.
+ # Macro form of PySet_Size() without error checking.
bint PySet_Contains(object anyset, object key) except -1
# Return 1 if found, 0 if not found, and -1 if an error is
# not an instance of set or its subtype.
int PySet_Clear(object set)
- # Empty an existing set of all elements.
+ # Empty an existing set of all elements.
# These functions raise TypeError when expecting a string
# parameter and are called with a non-string parameter.
# PyStringObject
- # This subtype of PyObject represents a Python string object.
+ # This subtype of PyObject represents a Python string object.
# PyTypeObject PyString_Type
# This instance of PyTypeObject represents the Python string type;
# it is the same object as str and types.StringType in the Python
- # layer.
+ # layer.
bint PyString_Check(object o)
# Return true if the object o is a string object or an instance of
- # a subtype of the string type.
+ # a subtype of the string type.
bint PyString_CheckExact(object o)
- # Return true if the object o is a string object, but not an instance of a subtype of the string type.
+ # Return true if the object o is a string object, but not an instance of a subtype of the string type.
object PyString_FromString(char *v)
# Return value: New reference.
object PyString_FromFormatV(char *format, va_list vargs)
# Return value: New reference.
- # Identical to PyString_FromFormat() except that it takes exactly two arguments.
+ # Identical to PyString_FromFormat() except that it takes exactly two arguments.
Py_ssize_t PyString_Size(object string) except -1
- # Return the length of the string in string object string.
+ # Return the length of the string in string object string.
Py_ssize_t PyString_GET_SIZE(object string)
- # Macro form of PyString_Size() but without error checking.
+ # Macro form of PyString_Size() but without error checking.
char* PyString_AsString(object string) except NULL
# Return a NUL-terminated representation of the contents of
# version of the object. If length is NULL, the resulting buffer
# may not contain NUL characters; if it does, the function returns
# -1 and a TypeError is raised.
-
+
# The buffer refers to an internal string buffer of obj, not a
# copy. The data must not be modified in any way, unless the
# string was just created using PyString_FromStringAndSize(NULL,
tuple PyTuple_New(Py_ssize_t len)
# Return value: New reference.
- # Return a new tuple object of size len, or NULL on failure.
+ # Return a new tuple object of size len, or NULL on failure.
tuple PyTuple_Pack(Py_ssize_t n, ...)
# Return value: New reference.
# equivalent to "Py_BuildValue("(OO)", a, b)".
int PyTuple_Size(object p) except -1
- # Take a pointer to a tuple object, and return the size of that tuple.
+ # Take a pointer to a tuple object, and return the size of that tuple.
int PyTuple_GET_SIZE(object p)
# Return the size of the tuple p, which must be non-NULL and point
PyObject* PyTuple_GET_ITEM(object p, Py_ssize_t pos)
# Return value: Borrowed reference.
- # Like PyTuple_GetItem(), but does no checking of its arguments.
+ # Like PyTuple_GetItem(), but does no checking of its arguments.
tuple PyTuple_GetSlice(object p, Py_ssize_t low, Py_ssize_t high)
# Return value: New reference.
- # Take a slice of the tuple pointed to by p from low to high and return it as a new tuple.
+ # Take a slice of the tuple pointed to by p from low to high and return it as a new tuple.
int PyTuple_SetItem(object p, Py_ssize_t pos, object o)
# Insert a reference to object o at position pos of the tuple
cdef extern from "Python.h":
- # The C structure of the objects used to describe built-in types.
+ # The C structure of the objects used to describe built-in types.
############################################################################
# 7.1.1 Type Objects
bint PyType_IS_GC(object o)
# Return true if the type object includes support for the cycle
- # detector; this tests the type flag Py_TPFLAGS_HAVE_GC.
+ # detector; this tests the type flag Py_TPFLAGS_HAVE_GC.
bint PyType_IsSubtype(object a, object b)
- # Return true if a is a subtype of b.
+ # Return true if a is a subtype of b.
object PyType_GenericAlloc(object type, Py_ssize_t nitems)
# Return value: New reference.
# Return 1 or 0 depending on whether ch is an uppercase character.
bint Py_UNICODE_ISUPPER(Py_UNICODE ch)
-
- # Return 1 or 0 depending on whether ch is a titlecase character.
+
+ # Return 1 or 0 depending on whether ch is a titlecase character.
bint Py_UNICODE_ISTITLE(Py_UNICODE ch)
- # Return 1 or 0 depending on whether ch is a linebreak character.
+ # Return 1 or 0 depending on whether ch is a linebreak character.
bint Py_UNICODE_ISLINEBREAK(Py_UNICODE ch)
- # Return 1 or 0 depending on whether ch is a decimal character.
+ # Return 1 or 0 depending on whether ch is a decimal character.
bint Py_UNICODE_ISDECIMAL(Py_UNICODE ch)
- # Return 1 or 0 depending on whether ch is a digit character.
+ # Return 1 or 0 depending on whether ch is a digit character.
bint Py_UNICODE_ISDIGIT(Py_UNICODE ch)
- # Return 1 or 0 depending on whether ch is a numeric character.
+ # Return 1 or 0 depending on whether ch is a numeric character.
bint Py_UNICODE_ISNUMERIC(Py_UNICODE ch)
- # Return 1 or 0 depending on whether ch is an alphabetic character.
+ # Return 1 or 0 depending on whether ch is an alphabetic character.
bint Py_UNICODE_ISALPHA(Py_UNICODE ch)
- # Return 1 or 0 depending on whether ch is an alphanumeric character.
+ # Return 1 or 0 depending on whether ch is an alphanumeric character.
bint Py_UNICODE_ISALNUM(Py_UNICODE ch)
- # Return the character ch converted to lower case.
+ # Return the character ch converted to lower case.
Py_UNICODE Py_UNICODE_TOLOWER(Py_UNICODE ch)
- # Return the character ch converted to upper case.
+ # Return the character ch converted to upper case.
Py_UNICODE Py_UNICODE_TOUPPER(Py_UNICODE ch)
- # Return the character ch converted to title case.
+ # Return the character ch converted to title case.
Py_UNICODE Py_UNICODE_TOTITLE(Py_UNICODE ch)
# Return the character ch converted to a decimal positive
# Py_UNICODE buffer, NULL if unicode is not a Unicode object.
Py_UNICODE* PyUnicode_AsUnicode(object o) except NULL
- # Return the length of the Unicode object.
+ # Return the length of the Unicode object.
Py_ssize_t PyUnicode_GetSize(object o) except -1
# Coerce an encoded object obj to an Unicode object and return a
# raised by the codec.
object PyUnicode_EncodeUTF8(Py_UNICODE *s, Py_ssize_t size, char *errors)
- # Encode a Unicode objects using UTF-8 and return the result as Python string object. Error handling is ``strict''. Return NULL if an exception was raised by the codec.
+ # Encode a Unicode objects using UTF-8 and return the result as Python string object. Error handling is ``strict''. Return NULL if an exception was raised by the codec.
object PyUnicode_AsUTF8String(object unicode)
# These are the UTF-16 codec APIs:
# Decode length bytes from a UTF-16 encoded buffer string and
# return the corresponding Unicode object. errors (if non-NULL)
# defines the error handling. It defaults to ``strict''.
- #
+ #
# If byteorder is non-NULL, the decoder starts decoding using the
# given byte order:
#
enum: SIGUSR2
enum: SIGWINCH
enum: SIGINFO
-
+
cdef FILE *stdin
cdef FILE *stdout
cdef FILE *stderr
-
+
enum: FOPEN_MAX
enum: FILENAME_MAX
FILE *fopen (const_char *FILENAME, const_char *OPENTYPE)
float strtof (const_char *STRING, char **TAILPTR)
double strtod (const_char *STRING, char **TAILPTR)
long double strtold (const_char *STRING, char **TAILPTR)
-
+
# 7.20.2 Pseudo-random sequence generation functions
enum: RAND_MAX
int rand ()
#7.20.5 Searching and sorting utilities
void *bsearch (const_void *KEY, const_void *ARRAY,
- size_t COUNT, size_t SIZE,
+ size_t COUNT, size_t SIZE,
int (*COMPARE)(const_void *, const_void *))
void qsort (void *ARRAY, size_t COUNT, size_t SIZE,
int (*COMPARE)(const_void *, const_void *))
char *strndup (const_char *S, size_t SIZE)
char *strcat (char *TO, const_char *FROM)
char *strncat (char *TO, const_char *FROM, size_t SIZE)
-
+
int strcmp (const_char *S1, const_char *S2)
int strcasecmp (const_char *S1, const_char *S2)
int strncmp (const_char *S1, const_char *S2, size_t SIZE)
NPY_BYTE
NPY_UBYTE
NPY_SHORT
- NPY_USHORT
+ NPY_USHORT
NPY_INT
- NPY_UINT
+ NPY_UINT
NPY_LONG
NPY_ULONG
NPY_LONGLONG
NPY_ULONGLONG
NPY_FLOAT
- NPY_DOUBLE
+ NPY_DOUBLE
NPY_LONGDOUBLE
NPY_CFLOAT
NPY_CDOUBLE
NPY_INOUT_FARRAY
NPY_UPDATE_ALL
-
+
cdef enum:
NPY_MAXDIMS
npy_intp NPY_MAX_ELSIZE
ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
-
+
ctypedef class numpy.dtype [object PyArray_Descr]:
# Use PyDataType_* macros when possible, however there are no macros
# for accessing some of the fields, so some are defined. Please
# For use in situations where ndarray can't replace PyArrayObject*,
# like PyArrayObject**.
pass
-
+
ctypedef class numpy.ndarray [object PyArrayObject]:
cdef __cythonbufferdefaults__ = {"mode": "strided"}
-
+
cdef:
# Only taking a few of the most commonly used and stable fields.
# One should use PyArray_* macros instead to access the C fields.
char *data
int ndim "nd"
- npy_intp *shape "dimensions"
+ npy_intp *shape "dimensions"
npy_intp *strides
dtype descr
PyObject* base
cdef int copy_shape, i, ndim
cdef int endian_detector = 1
cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
-
+
ndim = PyArray_NDIM(self)
if sizeof(npy_intp) != sizeof(Py_ssize_t):
if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
raise ValueError(u"ndarray is not C contiguous")
-
+
if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
raise ValueError(u"ndarray is not Fortran contiguous")
ctypedef signed int npy_int32
ctypedef signed long long npy_int64
ctypedef signed long long npy_int96
- ctypedef signed long long npy_int128
+ ctypedef signed long long npy_int128
ctypedef unsigned char npy_uint8
ctypedef unsigned short npy_uint16
ctypedef struct npy_complex192:
double real
double imag
-
+
ctypedef struct npy_complex256:
double real
double imag
object PyArray_GETITEM(ndarray arr, void *itemptr)
int PyArray_SETITEM(ndarray arr, void *itemptr, object obj)
-
+
bint PyTypeNum_ISBOOL(int)
bint PyTypeNum_ISUNSIGNED(int)
bint PyTypeNum_ISSIGNED(int)
bint PyArray_SAMESHAPE(ndarray, ndarray)
npy_intp PyArray_SIZE(ndarray)
npy_intp PyArray_NBYTES(ndarray)
-
+
object PyArray_FROM_O(object)
object PyArray_FROM_OF(object m, int flags)
bint PyArray_FROM_OT(object m, int type)
object PyArray_CheckAxis (ndarray, int *, int)
npy_intp PyArray_OverflowMultiplyList (npy_intp *, int)
int PyArray_CompareString (char *, char *, size_t)
-
+
# Typedefs that matches the runtime dtype objects in
# the numpy module.
cdef int endian_detector = 1
cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
cdef tuple fields
-
+
for childname in descr.names:
fields = descr.fields[childname]
child, new_offset = fields
#
# A proper PEP 3118 exporter for other clients than Cython
# must deal properly with this!
-
+
# Output padding bytes
while offset[0] < new_offset:
f[0] = 120 # "x"; pad byte
offset[0] += 1
offset[0] += child.itemsize
-
+
if not PyDataType_HASFIELDS(child):
t = child.type_num
if end - f < 5:
cdef extern from "numpy/ufuncobject.h":
ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *)
-
+
ctypedef extern class numpy.ufunc [object PyUFuncObject]:
cdef:
int nin, nout, nargs
int fcntl(int, int, ...)
int open(char *, int, ...)
#int open (char *, int, mode_t)
-
+
enum: F_TEST
enum: F_TLOCK
enum: F_ULOCK
-
+
# pathconf()
# _PC_*
class Begin(Action):
"""
Begin(state_name) is a Plex action which causes the Scanner to
- enter the state |state_name|. See the docstring of Plex.Lexicon
+ enter the state |state_name|. See the docstring of Plex.Lexicon
for more information.
"""
class Ignore(Action):
"""
IGNORE is a Plex action which causes its associated token
- to be ignored. See the docstring of Plex.Lexicon for more
+ to be ignored. See the docstring of Plex.Lexicon for more
information.
"""
def perform(self, token_stream, text):
class Text(Action):
"""
TEXT is a Plex action which causes the text of a token to
- be returned as the value of the token. See the docstring of
+ be returned as the value of the token. See the docstring of
Plex.Lexicon for more information.
"""
#for old_state in old_state_set.keys():
#new_state.merge_actions(old_state)
return new_state
-
+
def highest_priority_action(self, state_set):
best_action = None
best_priority = LOWEST_PRIORITY
best_action = state.action
best_priority = priority
return best_action
-
+
# def old_to_new_set(self, old_state_set):
# """
# Return the new state corresponding to a set of old states as
old_state_set = self.new_to_old_dict[id(new_state)]
file.write(" State %s <-- %s\n" % (
new_state['number'], state_set_str(old_state_set)))
-
+
self.state_name = state_name
def __str__(self):
- return ("'%s', line %d, char %d: Token not recognised in state %s"
+ return ("'%s', line %d, char %d: Token not recognised in state %s"
% (self.position + (repr(self.state_name),)))
else:
action = Actions.Call(action_spec)
final_state = machine.new_state()
- re.build_machine(machine, initial_state, final_state,
+ re.build_machine(machine, initial_state, final_state,
match_bol = 1, nocase = 0)
final_state.set_action(action, priority = -token_number)
except Errors.PlexError, e:
def get_initial_state(self, name):
return self.initial_states[name]
-
+
def dump(self, file):
file.write("Plex.Machine:\n")
if self.initial_states is not None:
def add_transition(self, event, new_state):
self.transitions.add(event, new_state)
-
+
def link_to(self, state):
"""Add an epsilon-move from this state to another state."""
self.add_transition('', state)
def set_action(self, action, priority):
- """Make this an accepting state with the given action. If
+ """Make this an accepting state with the given action. If
there is already an action, choose the action with highest
priority."""
if priority > self.action_priority:
states = None # [state]
# where state = {event:state, 'else':state, 'action':Action}
next_number = 1 # for debugging
-
+
new_state_template = {
'':None, 'bol':None, 'eol':None, 'eof':None, 'else':None
}
-
+
def __init__(self, old_machine = None):
self.initial_states = initial_states = {}
self.states = []
else:
new_state[event] = None
new_state['action'] = old_state.action
-
+
def __del__(self):
for state in self.states:
state.clear()
-
+
def new_state(self, action = None):
number = self.next_number
self.next_number = number + 1
result['action'] = action
self.states.append(result)
return result
-
+
def make_initial_state(self, name, state):
self.initial_states[name] = state
-
+
def add_transitions(self, state, event, new_state, maxint=sys.maxint):
if type(event) is tuple:
code0, code1 = event
code0 = code0 + 1
else:
state[event] = new_state
-
+
def get_initial_state(self, name):
return self.initial_states[name]
-
+
def dump(self, file):
file.write("Plex.FastMachine:\n")
file.write(" Initial states:\n")
action = state['action']
if action is not None:
file.write(" %s\n" % action)
-
+
def dump_transitions(self, state, file):
chars_leading_to_state = {}
special_to_state = {}
c2 = c2 + 1
result.append((chr(c1), chr(c2)))
return tuple(result)
-
+
def ranges_to_string(self, range_list):
return ','.join(map(self.range_to_string, range_list))
-
+
def range_to_string(self, range_tuple):
(c1, c2) = range_tuple
if c1 == c2:
with a code |c| in the range |code1| <= |c| < |code2|.
"""
if code1 <= nl_code < code2:
- return Alt(RawCodeRange(code1, nl_code),
- RawNewline,
+ return Alt(RawCodeRange(code1, nl_code),
+ RawNewline,
RawCodeRange(nl_code + 1, code2))
else:
return RawCodeRange(code1, code2)
nullable = 1 # True if this RE can match 0 input symbols
match_nl = 1 # True if this RE can match a string ending with '\n'
str = None # Set to a string to override the class's __str__ result
-
- def build_machine(self, machine, initial_state, final_state,
+
+ def build_machine(self, machine, initial_state, final_state,
match_bol, nocase):
"""
This method should add states to |machine| to implement this
beginning of a line. If nocase is true, upper and lower case
letters should be treated as equivalent.
"""
- raise NotImplementedError("%s.build_machine not implemented" %
+ raise NotImplementedError("%s.build_machine not implemented" %
self.__class__.__name__)
-
+
def build_opt(self, m, initial_state, c):
"""
Given a state |s| of machine |m|, return a new state
def check_string(self, num, value):
if type(value) != type(''):
self.wrong_type(num, value, "string")
-
+
def check_char(self, num, value):
self.check_string(num, value)
if len(value) != 1:
raise Errors.PlexTypeError("Invalid type for argument %d of Plex.%s "
"(expected %s, got %s" % (
num, self.__class__.__name__, expected, got))
-
+
#
# Primitive RE constructors
# -------------------------
## """
## Char(c) is an RE which matches the character |c|.
## """
-
+
## nullable = 0
-
+
## def __init__(self, char):
## self.char = char
## self.match_nl = char == '\n'
-
+
## def build_machine(self, m, initial_state, final_state, match_bol, nocase):
## c = self.char
## if match_bol and c != BOL:
range = None # (code, code)
uppercase_range = None # (code, code) or None
lowercase_range = None # (code, code) or None
-
+
def __init__(self, code1, code2):
self.range = (code1, code2)
self.uppercase_range = uppercase_range(code1, code2)
self.lowercase_range = lowercase_range(code1, code2)
-
+
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
if match_bol:
initial_state = self.build_opt(m, initial_state, BOL)
initial_state.add_transition(self.uppercase_range, final_state)
if self.lowercase_range:
initial_state.add_transition(self.lowercase_range, final_state)
-
+
def calc_str(self):
return "CodeRange(%d,%d)" % (self.code1, self.code2)
if not re.nullable:
break
self.match_nl = match_nl
-
+
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
re_list = self.re_list
if len(re_list) == 0:
class SwitchCase(RE):
"""
- SwitchCase(re, nocase) is an RE which matches the same strings as RE,
+ SwitchCase(re, nocase) is an RE which matches the same strings as RE,
but treating upper and lower case letters according to |nocase|. If
|nocase| is true, case is ignored, otherwise it is not.
"""
self.match_nl = re.match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
- self.re.build_machine(m, initial_state, final_state, match_bol,
+ self.re.build_machine(m, initial_state, final_state, match_bol,
self.nocase)
def calc_str(self):
def Case(re):
"""
Case(re) is an RE which matches the same strings as RE, but treating
- upper and lower case letters as distinct, i.e. it cancels the effect
+ upper and lower case letters as distinct, i.e. it cancels the effect
of any enclosing NoCase().
"""
return SwitchCase(re, nocase = 0)
#
# RE Constants
#
-
+
Bol = Char(BOL)
Bol.__doc__ = \
"""
buf_start_pos=long, buf_len=long, buf_index=long,
trace=bint, discard=long, data=unicode, buffer=unicode)
cdef run_machine_inlined(self)
-
+
cdef begin(self, state)
cdef produce(self, value, text = *)
position() --> (name, line, col)
Returns the position of the last token read using the
read() method.
-
+
begin(state_name)
Causes scanner to change state.
-
+
produce(value [, text])
Causes return of a token value to the caller of the
Scanner.
self.start_col = 0
self.text = None
self.state_name = None
-
+
self.lexicon = lexicon
self.stream = stream
self.name = name
class RegexpSyntaxError(PlexError):
pass
-
+
def re(s):
"""
Convert traditional string representation of regular expression |s|
self.i = -1
self.end = 0
self.next()
-
+
def parse_re(self):
re = self.parse_alt()
if not self.end:
self.error("Unexpected %s" % repr(self.c))
return re
-
+
def parse_alt(self):
"""Parse a set of alternative regexps."""
re = self.parse_seq()
re_list.append(self.parse_seq())
re = Alt(*re_list)
return re
-
+
def parse_seq(self):
"""Parse a sequence of regexps."""
re_list = []
while not self.end and not self.c in "|)":
re_list.append(self.parse_mod())
return Seq(*re_list)
-
+
def parse_mod(self):
"""Parse a primitive regexp followed by *, +, ? modifiers."""
re = self.parse_prim()
c = self.get()
re = Char(c)
return re
-
+
def parse_charset(self):
"""Parse a charset. Does not include the surrounding []."""
char_list = []
return AnyBut(chars)
else:
return Any(chars)
-
+
def next(self):
"""Advance to the next char."""
s = self.s
else:
self.c = ''
self.end = 1
-
+
def get(self):
if self.end:
self.error("Premature end of string")
c = self.c
self.next()
return c
-
+
def lookahead(self, n):
"""Look ahead n chars."""
j = self.i + n
self.next()
else:
self.error("Missing %s" % repr(c))
-
+
def error(self, mess):
"""Raise exception to signal syntax error in regexp."""
raise RegexpSyntaxError("Syntax error in regexp %s at position %d: %s" % (
repr(self.s), self.i, mess))
-
-
+
+
class TransitionMap(object):
"""
A TransitionMap maps an input event to a set of states.
- An input event is one of: a range of character codes,
- the empty string (representing an epsilon move), or one
+ An input event is one of: a range of character codes,
+ the empty string (representing an epsilon move), or one
of the special symbols BOL, EOL, EOF.
-
- For characters, this implementation compactly represents
+
+ For characters, this implementation compactly represents
the map by means of a list:
-
+
[code_0, states_0, code_1, states_1, code_2, states_2,
..., code_n-1, states_n-1, code_n]
-
- where |code_i| is a character code, and |states_i| is a
+
+ where |code_i| is a character code, and |states_i| is a
set of states corresponding to characters with codes |c|
in the range |code_i| <= |c| <= |code_i+1|.
-
+
The following invariants hold:
n >= 1
code_0 == -maxint
code_n == maxint
code_i < code_i+1 for i in 0..n-1
states_0 == states_n-1
-
+
Mappings for the special events '', BOL, EOL, EOF are
kept separately in a dictionary.
"""
-
+
map = None # The list of codes and states
special = None # Mapping for special events
-
+
def __init__(self, map = None, special = None):
if not map:
map = [-maxint, {}, maxint]
self.map = map
self.special = special
#self.check() ###
-
+
def add(self, event, new_state,
TupleType = tuple):
"""
i = i + 2
else:
self.get_special(event).update(new_set)
-
+
def get_epsilon(self,
none = None):
"""
Return the mapping for epsilon, or None.
"""
return self.special.get('', none)
-
+
def iteritems(self,
len = len):
"""
result.append((event, set))
return iter(result)
items = iteritems
-
+
# ------------------- Private methods --------------------
def split(self, code,
len = len, maxint = maxint):
"""
- Search the list for the position of the split point for |code|,
- inserting a new split point if necessary. Returns index |i| such
+ Search the list for the position of the split point for |code|,
+ inserting a new split point if necessary. Returns index |i| such
that |code| == |map[i]|.
"""
# We use a funky variation on binary search.
map[hi:hi] = [code, map[hi - 1].copy()]
#self.check() ###
return hi
-
+
def get_special(self, event):
"""
Get state set for special event, adding a new entry if necessary.
return set
# --------------------- Conversion methods -----------------------
-
+
def __str__(self):
map_strs = []
map = self.map
','.join(map_strs),
special_strs
)
-
+
# --------------------- Debugging methods -----------------------
-
+
def check(self):
"""Check data structure integrity."""
if not self.map[-3] < self.map[-1]:
print(self)
assert 0
-
+
def dump(self, file):
map = self.map
i = 0
if not event:
event = 'empty'
self.dump_trans(event, set, file)
-
+
def dump_range(self, code0, code1, set, file):
if set:
if code0 == -maxint:
elif code0 == code1 - 1:
k = self.dump_char(code0)
else:
- k = "%s..%s" % (self.dump_char(code0),
+ k = "%s..%s" % (self.dump_char(code0),
self.dump_char(code1 - 1))
self.dump_trans(k, set, file)
-
+
def dump_char(self, code):
if 0 <= code <= 255:
return repr(chr(code))
else:
return "chr(%d)" % code
-
+
def dump_trans(self, key, set, file):
file.write(" %s --> %s\n" % (key, self.dump_set(set)))
-
+
def dump_set(self, set):
return state_set_str(set)
def state_set_str(set):
return "[%s]" % ','.join(["S%d" % state.number for state in set])
-
-
+
+
def typeof(arg):
return type(arg)
-
+
def address(arg):
return pointer(type(arg))([arg])
-
+
def declare(type=None, value=None, **kwds):
if type is not None and hasattr(type, '__call__'):
if value:
self._items = []
else:
raise ValueError
-
+
def __getitem__(self, ix):
if ix < 0:
raise IndexError("negative indexing not allowed in C")
return self._items[ix]
-
+
def __setitem__(self, ix, value):
if ix < 0:
raise IndexError("negative indexing not allowed in C")
self._items[ix] = cast(self._basetype, value)
-
+
class ArrayType(PointerType):
-
+
def __init__(self):
self._items = [None] * self._n
class StructType(CythonType):
-
+
def __init__(self, **data):
for key, value in data.iteritems():
setattr(self, key, value)
-
+
def __setattr__(self, key, value):
if key in self._members:
self.__dict__[key] = cast(self._members[key], value)
else:
raise AttributeError("Struct has no member '%s'" % key)
-
+
class UnionType(CythonType):
raise AttributeError("Union can only store one field at a time.")
for key, value in data.iteritems():
setattr(self, key, value)
-
+
def __setattr__(self, key, value):
if key in '__dict__':
CythonType.__setattr__(self, key, value)
def __init__(self, type):
self._basetype = type
-
+
def __call__(self, value=None):
if value is not None:
value = cast(self._basetype, value)
return value
-
+
py_int = int
if name != 'Py_UNICODE' and not name.endswith('size_t'):
gs['u'+name] = typedef(py_int)
gs['s'+name] = typedef(py_int)
-
+
for name in float_types:
gs[name] = typedef(py_float)
def allmarkers(self):
children = self.prepended_children
return [m for c in children for m in c.allmarkers()] + self.markers
-
+
__doc__ = r"""
Implements a buffer with insertion points. When you know you need to
name = u"%s[%d]" % tip[1:3]
else:
name = tip[1]
-
+
self.result.append(u" " * self._indents +
u"%s: %s" % (name, node.__class__.__name__))
self._indents += 1
self.listing_file = Errors.listing_file
self.echo_file = Errors.echo_file
Errors.listing_file = Errors.echo_file = None
-
+
def tearDown(self):
Errors.listing_file = self.listing_file
Errors.echo_file = self.echo_file
-
+
def assertLines(self, expected, result):
"Checks that the given strings or lists of strings are equal line by line"
if not isinstance(expected, list): expected = expected.split(u"\n")
def assertCode(self, expected, result_tree):
result_lines = self.codeToLines(result_tree)
-
+
expected_lines = strip_common_indent(expected.split("\n"))
-
+
for idx, (line, expected_line) in enumerate(zip(result_lines, expected_lines)):
self.assertEqual(expected_line, line, "Line %d:\nGot: %s\nExp: %s" % (idx, line, expected_line))
self.assertEqual(len(result_lines), len(expected_lines),
are testing; pyx should be either a string (passed to the parser to
create a post-parse tree) or a node representing input to pipeline.
The result will be a transformed result.
-
+
- Check that the tree is correct. If wanted, assertCode can be used, which
takes a code string as expected, and a ModuleNode in result_tree
(it serializes the ModuleNode to a string and compares line-by-line).
-
+
All code strings are first stripped for whitespace lines and then common
indentation.
-
+
Plans: One could have a pxd dictionary parameter to run_pipeline.
"""
-
+
def run_pipeline(self, pipeline, pyx, pxds={}):
tree = self.fragment(pyx, pxds).root
# Run pipeline
for T in pipeline:
tree = T(tree)
- return tree
+ return tree
class TreeAssertVisitor(VisitorTransform):
class TestCodeWriter(CythonTest):
# CythonTest uses the CodeWriter heavily, so do some checking by
# roundtripping Cython code through the test framework.
-
+
# Note that this test is dependant upon the normal Cython parser
# to generate the input trees to the CodeWriter. This save *a lot*
# of time; better to spend that time writing other tests than perfecting
def test_if(self):
self.t(u"if x:\n pass")
-
+
def test_ifelifelse(self):
self.t(u"""
if x:
else:
pass
""")
-
+
def test_def(self):
self.t(u"""
def f(x, y, z):
cdef int hello
cdef int hello = 4, x = 3, y, z
""")
-
+
def test_for_loop(self):
self.t(u"""
for x, y, z in f(g(h(34) * 2) + 23):
def test_attribute(self):
self.t(u"a.x")
-
+
if __name__ == "__main__":
import unittest
unittest.main()
linemap = dict(enumerate(code.splitlines()))
class TestStringIOTree(unittest.TestCase):
-
+
def setUp(self):
self.tree = stringtree.StringIOTree()
-
+
def test_markers(self):
assert not self.tree.allmarkers()
-
+
def test_insertion(self):
self.write_lines((1, 2, 3))
line_4_to_6_insertion_point = self.tree.insertion_point()
self.write_lines((7, 8))
line_9_to_13_insertion_point = self.tree.insertion_point()
self.write_lines((14, 15, 16))
-
+
line_4_insertion_point = line_4_to_6_insertion_point.insertion_point()
self.write_lines((5, 6), tree=line_4_to_6_insertion_point)
-
+
line_9_to_12_insertion_point = (
line_9_to_13_insertion_point.insertion_point())
self.write_line(13, tree=line_9_to_13_insertion_point)
-
+
self.write_line(4, tree=line_4_insertion_point)
self.write_line(9, tree=line_9_to_12_insertion_point)
line_10_insertion_point = line_9_to_12_insertion_point.insertion_point()
self.write_line(11, tree=line_9_to_12_insertion_point)
self.write_line(10, tree=line_10_insertion_point)
self.write_line(12, tree=line_9_to_12_insertion_point)
-
+
self.assertEqual(self.tree.allmarkers(), range(1, 17))
self.assertEqual(code.strip(), self.tree.getvalue().strip())
-
-
+
+
def write_lines(self, linenos, tree=None):
for lineno in linenos:
self.write_line(lineno, tree=tree)
-
+
def write_line(self, lineno, tree=None):
if tree is None:
tree = self.tree
"""This class is used to keep useful information about the execution of a
test method.
"""
-
+
# Possible test outcomes
(SUCCESS, FAILURE, ERROR) = range(3)
-
+
def __init__(self, test_result, test_method, outcome=SUCCESS, err=None):
"Create a new instance of _TestInfo."
self.test_result = test_result
self.test_method = test_method
self.outcome = outcome
self.err = err
-
+
def get_elapsed_time(self):
"""Return the time that shows how long the test method took to
execute.
"""
return self.test_result.stop_time - self.test_result.start_time
-
+
def get_description(self):
"Return a text representation of the test method."
return self.test_result.getDescription(self.test_method)
-
+
def get_error_info(self):
"""Return a text representation of an exception thrown by a test
method.
self.successes = []
self.callback = None
self.elapsed_times = elapsed_times
-
+
def _prepare_callback(self, test_info, target_list, verbose_str,
short_str):
"""Append a _TestInfo to the given target list and sets a callback
"""This callback prints the test method outcome to the stream,
as well as the elapsed time.
"""
-
+
# Ignore the elapsed times for a more reliable unit testing
if not self.elapsed_times:
self.start_time = self.stop_time = 0
-
+
if self.showAll:
self.stream.writeln('(%.3fs) %s' % \
(test_info.get_elapsed_time(), verbose_str))
elif self.dots:
self.stream.write(short_str)
self.callback = callback
-
+
def startTest(self, test):
"Called before execute each test method."
self.start_time = time.time()
TestResult.startTest(self, test)
-
+
if self.showAll:
self.stream.write(' ' + self.getDescription(test))
self.stream.write(" ... ")
-
+
def stopTest(self, test):
"Called after execute each test method."
_TextTestResult.stopTest(self, test)
self.stop_time = time.time()
-
+
if self.callback and callable(self.callback):
self.callback()
self.callback = None
-
+
def addSuccess(self, test):
"Called when a test executes successfully."
self._prepare_callback(_TestInfo(self, test), \
self.successes, 'OK', '.')
-
+
def addFailure(self, test, err):
"Called when a test method fails."
self._prepare_callback(_TestInfo(self, test, _TestInfo.FAILURE, err), \
self.failures, 'FAIL', 'F')
-
+
def addError(self, test, err):
"Called when a test method raises an error."
self._prepare_callback(_TestInfo(self, test, _TestInfo.ERROR, err), \
self.errors, 'ERROR', 'E')
-
+
def printErrorList(self, flavour, errors):
"Write some information about the FAIL or ERROR to the stream."
for test_info in errors:
will be generated for each TestCase.
"""
tests_by_testcase = {}
-
+
for tests in (self.successes, self.failures, self.errors):
for test_info in tests:
testcase = type(test_info.test_method)
-
+
# Ignore module name if it is '__main__'
module = testcase.__module__ + '.'
if module == '__main__.':
module = ''
testcase_name = module + testcase.__name__
-
+
if not tests_by_testcase.has_key(testcase_name):
tests_by_testcase[testcase_name] = []
tests_by_testcase[testcase_name].append(test_info)
-
+
return tests_by_testcase
-
+
def _report_testsuite(suite_name, tests, xml_document):
"Appends the testsuite section to the XML document."
testsuite = xml_document.createElement('testsuite')
xml_document.appendChild(testsuite)
-
+
testsuite.setAttribute('name', str(suite_name))
testsuite.setAttribute('tests', str(len(tests)))
-
+
testsuite.setAttribute('time', '%.3f' % \
sum(map(lambda e: e.get_elapsed_time(), tests)))
-
+
failures = filter(lambda e: e.outcome==_TestInfo.FAILURE, tests)
testsuite.setAttribute('failures', str(len(failures)))
-
+
errors = filter(lambda e: e.outcome==_TestInfo.ERROR, tests)
testsuite.setAttribute('errors', str(len(errors)))
-
+
return testsuite
-
+
_report_testsuite = staticmethod(_report_testsuite)
-
+
def _report_testcase(suite_name, test_result, xml_testsuite, xml_document):
"Appends a testcase section to the XML document."
testcase = xml_document.createElement('testcase')
xml_testsuite.appendChild(testcase)
-
+
testcase.setAttribute('classname', str(suite_name))
testcase.setAttribute('name', test_result.test_method.shortDescription()
or getattr(test_result.test_method, '_testMethodName',
str(test_result.test_method)))
testcase.setAttribute('time', '%.3f' % test_result.get_elapsed_time())
-
+
if (test_result.outcome != _TestInfo.SUCCESS):
elem_name = ('failure', 'error')[test_result.outcome-1]
failure = xml_document.createElement(elem_name)
testcase.appendChild(failure)
-
+
failure.setAttribute('type', str(test_result.err[0].__name__))
failure.setAttribute('message', str(test_result.err[1]))
-
+
error_info = test_result.get_error_info()
failureText = xml_document.createCDATASection(error_info)
failure.appendChild(failureText)
-
+
_report_testcase = staticmethod(_report_testcase)
-
+
def _report_output(test_runner, xml_testsuite, xml_document):
"Appends the system-out and system-err sections to the XML document."
systemout = xml_document.createElement('system-out')
xml_testsuite.appendChild(systemout)
-
+
stdout = test_runner.stdout.getvalue()
systemout_text = xml_document.createCDATASection(stdout)
systemout.appendChild(systemout_text)
-
+
systemerr = xml_document.createElement('system-err')
xml_testsuite.appendChild(systemerr)
-
+
stderr = test_runner.stderr.getvalue()
systemerr_text = xml_document.createCDATASection(stderr)
systemerr.appendChild(systemerr_text)
-
+
_report_output = staticmethod(_report_output)
-
+
def generate_reports(self, test_runner):
"Generates the XML reports to a given XMLTestRunner object."
from xml.dom.minidom import Document
all_results = self._get_info_by_testcase()
-
+
if type(test_runner.output) == str and not \
os.path.exists(test_runner.output):
os.makedirs(test_runner.output)
-
+
for suite, tests in all_results.items():
doc = Document()
-
+
# Build the XML file
testsuite = _XMLTestResult._report_testsuite(suite, tests, doc)
for test in tests:
_XMLTestResult._report_testcase(suite, test, testsuite, doc)
_XMLTestResult._report_output(test_runner, testsuite, doc)
xml_content = doc.toprettyxml(indent='\t')
-
+
if type(test_runner.output) is str:
report_file = open('%s%sTEST-%s.xml' % \
(test_runner.output, os.sep, suite), 'w')
TextTestRunner.__init__(self, stream, descriptions, verbosity)
self.output = output
self.elapsed_times = elapsed_times
-
+
def _make_result(self):
"""Create the TestResult object which will be used to store
information about the executed tests.
"""
return _XMLTestResult(self.stream, self.descriptions, \
self.verbosity, self.elapsed_times)
-
+
def _patch_standard_output(self):
"""Replace the stdout and stderr streams with string-based streams
in order to capture the tests' output.
(self.old_stdout, self.old_stderr) = (sys.stdout, sys.stderr)
(sys.stdout, sys.stderr) = (self.stdout, self.stderr) = \
(StringIO(), StringIO())
-
+
def _restore_standard_output(self):
"Restore the stdout and stderr streams."
(sys.stdout, sys.stderr) = (self.old_stdout, self.old_stderr)
-
+
def run(self, test):
"Run the given test case or test suite."
-
+
try:
# Prepare the test execution
self._patch_standard_output()
result = self._make_result()
-
+
# Print a nice header
self.stream.writeln()
self.stream.writeln('Running tests...')
self.stream.writeln(result.separator2)
-
+
# Execute tests
start_time = time.time()
test(result)
stop_time = time.time()
time_taken = stop_time - start_time
-
+
# Print results
result.printErrors()
self.stream.writeln(result.separator2)
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", time_taken))
self.stream.writeln()
-
+
# Error traces
if not result.wasSuccessful():
self.stream.write("FAILED (")
self.stream.writeln(")")
else:
self.stream.writeln("OK")
-
+
# Generate reports
self.stream.writeln()
self.stream.writeln('Generating XML reports...')
result.generate_reports(self)
finally:
self._restore_standard_output()
-
+
return result
def open_new_file(path):
if os.path.exists(path):
- # Make sure to create a new file here so we can
- # safely hard link the output files.
+ # Make sure to create a new file here so we can
+ # safely hard link the output files.
os.unlink(path)
# we use the ISO-8859-1 encoding here because we only write pure
__version__ = "0.14.rc0"
-# Void cython.* directives (for case insensitive operating systems).
+# Void cython.* directives (for case insensitive operating systems).
from Cython.Shadow import *
You have two installation options:
-(1) Run the setup.py script in this directory
+(1) Run the setup.py script in this directory
as follows:
python setup.py install
(2) If you prefer not to modify your Python
installation, arrange for the directory
- containing this file (INSTALL.txt) to be in
+ containing this file (INSTALL.txt) to be in
your PYTHONPATH. On unix, also put the bin
directory on your PATH.
Catch floating point exceptions?
-Check that forward-declared non-external extension types
+Check that forward-declared non-external extension types
are defined.
Generate type test when casting from one Python type
-----------------------------------------------------------------------------
-v --version Display version number of cython compiler
-l --create-listing Write error messages to a .lis file
- -I --include-dir <directory> Search for include files in named
+ -I --include-dir <directory> Search for include files in named
directory (may be repeated)
-o --output-file <filename> Specify name of generated C file (only
one source file allowed if this is used)
cdef describe(self):
print "This is a parrot."
-
+
cdef action(self):
print "Polly wants a cracker!"
cdef struct spam:
pass
-
+
ctypedef union eggs:
pass
-
+
cdef enum ham:
pass
-
+
cdef extern spam s
cdef extern eggs e
cdef extern ham h
cdef extern from "cheese.h":
ctypedef int camembert
-
+
struct roquefort:
int x
-
+
char *swiss
-
+
void cheddar()
class external.runny [object runny_obj]:
enum Eggs:
runny, firm, hard
-
+
struct Spam:
int i
-
+
union Soviet:
char c
cdef class Spam:
cdef public object eggs
-
+
def __getattr__(self, name):
print "Spam getattr:", name
print "Entering tomato"
raise Exception("Eject! Eject! Eject!")
print "Leaving tomato"
-
+
cdef void sandwich():
print "Entering sandwich"
tomato()
cdef class FancySpamDish(SpamDish):
cdef int lettuce
-
+
cdef void describe(self):
print "This dish contains", self.spam, "tons of spam",
print "and", self.lettuce, "milligrams of lettuce."
cdef action(self):
print "This parrot is resting."
-
+
cdef plumage(self):
print "Lovely plumage!"
cdef class Parrot:
cdef object plumage
-
+
def __init__(self):
self.plumage = "yellow"
-
+
def describe(self):
print "This bird has lovely", self.plumage, "plumage."
cdef public int tons
cdef readonly float tastiness
cdef int temperature
-
+
def __init__(self, tons, tastiness, temperature):
self.tons = tons
self.tastiness = tastiness
self.temperature = temperature
-
+
def get_temperature(self):
return self.temperature
cdef extern from "numeric.h":
-
+
struct PyArray_Descr:
int type_num, elsize
char type
-
+
ctypedef class Numeric.ArrayType [object PyArrayObject]:
cdef char *data
cdef int nd
cdef object base
cdef PyArray_Descr *descr
cdef int flags
-
+
def ogle(ArrayType a):
print "No. of dimensions:", a.nd
print " Dim Value"
cdef class CheeseShop:
cdef object cheeses
-
+
def __cinit__(self):
self.cheeses = []
property cheese:
-
+
"A senseless waste of a property."
-
+
def __get__(self):
return "We don't have: %s" % self.cheeses
def __set__(self, value):
self.cheeses.append(value)
-
+
def __del__(self):
del self.cheeses[:]
-# This file contains tests corresponding to unresolved bugs,
-# which will be skipped in the normal testing run.
+# This file contains tests corresponding to unresolved bugs,
+# which will be skipped in the normal testing run.
methodmangling_T5
class_attribute_init_values_T18
cdef public struct Foo:
int a, b
-
+
ctypedef struct Blarg:
int c, d
@cy.boundscheck(True)
def g(object[int, ndim=2] buf):
- # The below line should have no meaning
+ # The below line should have no meaning
# cython: boundscheck = False
# even if the above line doesn't follow indentation.
print buf[3, 2] # bc
def i(object[int] buf):
with bc(True):
print buf[3] # bs
-
+
from cython cimport warn as my_warn
@my_warn(undeclared=True)
def f(obj, int i, float f, char *s1, char s2[]):
pass
-
+
cdef g(obj, int i, float f, char *s1, char s2[]):
pass
-
+
cdef do_g(object (*func)(object, int, float, char*, char*)):
return func(1, 2, 3.14159, "a", "b")
-
+
do_g(&g)
p1 = p2
obj1 = i1
i1 = obj1
- p1 = obj1
+ p1 = obj1
p1 = "spanish inquisition"
-foo()
+foo()
void foo_clear(foo_t)
cdef foo_t value
-foo_init(value)
-foo_clear(value)
+foo_init(value)
+foo_clear(value)
cdef void *pointer = <void*> value
-foo_init(<foo_t>pointer)
-foo_clear(<foo_t>pointer)
+foo_init(<foo_t>pointer)
+foo_clear(<foo_t>pointer)
s1 = c
i = s1
-eggs()
+eggs()
class Parrot:
"Standard Norwegian Blue."
-
+
def admire_plumage(self):
"Lovely, ain't it?"
cdef class Spam:
cdef int tons
-
+
cdef void add_tons(self, int x):
pass
cdef class Swallow:
pass
-
+
def f(Grail g):
cdef int i = 0
cdef Swallow s
cdef class Spam:
-
+
def __delattr__(self, n):
pass
cdef class Spam:
-
+
def __delitem__(self, i):
pass
cdef class Spam:
-
+
def __delslice__(self, Py_ssize_t i, Py_ssize_t j):
pass
cdef class Spam:
-
+
def __getattr__(self, x):
pass
pass
cdef class Norwegian(Parrot):
-
+
def __delitem__(self, i):
pass
pass
cdef class Norwegian(Parrot):
-
+
def __setitem__(self, i, x):
pass
property eggs:
"Ova"
-
+
def __get__(self):
pass
-
+
def __set__(self, x):
pass
-
+
def __del__(self):
pass
cdef class Spam:
-
+
def __setattr__(self, n, x):
pass
cdef class Spam:
-
+
def __setitem__(self, i, x):
pass
cdef class Spam:
-
+
def __setslice__(self, Py_ssize_t i, Py_ssize_t j, x):
pass
i = 2
break
i = 3
-
+
for i in b:
i = 1
-
+
for a in "spam":
i = 1
-
+
for a[b] in c:
i = 1
-
+
for a,b in c:
i = 1
-
+
for a in b,c:
i = 1
-
-
+
+
from spam import eggs
from spam.morespam import bacon, eggs, ham
from spam import eggs as ova
-
+
f(x = 42, **b)
f(a, *b)
f(a, x = 42, *b, **c)
-
+
cdef void f():
cdef Color e
cdef int i
-
+
i = red
i = red + 1
i = red | 1
int i
int priv_i
-
+
void priv_f():
global priv_i
priv_i = 42
struct PubFoo:
int i
-
+
int pub_v
-
+
void pub_f():
pass
-
+
class PubBlarg [object PubBlargObj, type PubBlargType]:
pass
import spam.eggs
import spam, eggs, ham
import spam as tasty
-
+
array1[obj2] = int3
obj1[int2] = obj3
obj1[obj2] = 42
-
+
f(None, None, None)
cdef class Position:
cdef readonly CoordSyst parent
-
+
cdef class Point(Position):
cdef void bug(self):
test(self.parent._matrix)
from libc.signal cimport *
-cdef void sighdl(int signum) nogil:
+cdef void sighdl(int signum) nogil:
pass
cdef sighandler_t h
#define POINT_H
namespace geometry {
-
+
struct Point
{
double x;
pass
ctypedef struct PyObject:
- Py_ssize_t ob_refcnt
+ Py_ssize_t ob_refcnt
PyTypeObject *ob_type
cdef extern from "longintrepr.h":
cdef struct _longobject:
- int ob_refcnt
+ int ob_refcnt
PyTypeObject *ob_type
# int ob_size # not in Py3k
unsigned int *ob_digit
def test(temp = long(0)):
- cdef _longobject *l
- l = <_longobject *> temp
+ cdef _longobject *l
+ l = <_longobject *> temp
#print sizeof(l.ob_size) # not in Py3k
print sizeof(l.ob_digit[0])
def f(a, b, c, x):
cdef int i
a = b + c
-
+
try:
i = 1
raise x
i = 2
except a:
i = 3
-
+
try:
i = 1
except a:
i = 2
except b:
i = 3
-
+
try:
i = 1
except a, b:
i = 2
-
+
try:
i = 1
except a:
i = 2
except:
i = 3
-
+
try:
i = 1
except (a, b), c[42]:
i = 2
-
+
for a in b:
try:
c = x * 42
def g(a, b, c, x):
cdef int i
a = b + c
-
+
try:
i = 1
raise x
i = 2
except a:
i = 3
-
+
try:
i = 1
except a:
i = 2
except b:
i = 3
-
+
try:
i = 1
except a as b:
i = 2
-
+
try:
i = 1
except a:
i = 2
except:
i = 3
-
+
try:
i = 1
except (a, b) as c:
def f(a, b, c, x):
cdef int i
a = b + c
-
+
try:
return
raise a
finally:
c = a - b
-
+
for a in b:
try:
continue
c = a * b
finally:
i = 42
-
-
+
+
double y
double z
-cdef foo(int, int i,
- list, list L,
+cdef foo(int, int i,
+ list, list L,
point, point p, point* ps)
-
+
cdef foo(int i0, int i, list L0, list L, point p0, point p, point* ps):
pass
-
+
cdef class A:
cdef list
cdef list L
# Possibly empty declarators
cdef point(self, int, int i, list, list L, point, point p, point* ps):
pass
-
+
cdef class B(A):
cdef point(self, o, int i, oo, list L, ooo, point p, point* ps):
pass
def f(a, b):
cdef int i = 5
-
+
while a:
x = 1
while a+b:
x = 1
-
+
while i:
x = 1
else:
x = 2
-
+
while i:
x = 1
break
x = 2
else:
x = 3
-
+
while i:
x = 1
continue
x = 2
-
+
p2 = obj # error
obj = p2 # error
-
+
_ERRORS = u"""
5:16: Cannot assign type 'char *' to 'int'
6:17: Cannot convert Python object to 'int *'
void v, # incomplete argument type
int a[]):
pass
-
+
cdef NoSuchType* ptr
ptr = None # This should not produce another error
def f():
cdef object[fakeoption=True] buf1
- cdef object[int, -1] buf1b
+ cdef object[int, -1] buf1b
cdef object[ndim=-1] buf2
cdef object[int, 'a'] buf3
cdef object[int,2,3,4,5,6] buf4
cdef void f():
global a
a = 42 # assignment to non-lvalue
-
+
_ERRORS = u"""
6:3: Assignment to non-lvalue 'a'
"""
ctypedef struct Foo:
int i
-
+
ctypedef struct Blarg:
char c
obj1 = obj2[ptr1::] # error
obj1 = obj2[:ptr1:] # error
obj1 = obj2[::ptr1] # error
-
+
cdef int a
cdef int* int_ptr
pass
for a in int_ptr[2:2:a]:
pass
-
+
_ERRORS = u"""
3:20: Cannot convert 'int *' to Python object
4:21: Cannot convert 'int *' to Python object
cdef void *p
p = <void *>blarg # ok
p = <void *>(obj + blarg) # error - temporary
-
+
_ERRORS = u"""
6:5: Casting temporary Python object to non-numeric non-Python type
"""
# coding=ASCII
"""
-Tr\8fs bien.
+Tr\8fs bien.
"""
_ERRORS = u"""
cdef void r() nogil:
q()
-
+
cdef object m():
cdef object x, y, obj
cdef int i, j, k
__doc__ = u"""
-__getattribute__ and __getattr__ special methods and subclasses.
+__getattribute__ and __getattr__ special methods and subclasses.
-getattr does not override members.
+getattr does not override members.
>>> a = getattr_boring()
>>> a.boring_member
10
"""
cdef class boring:
- cdef readonly int boring_member
+ cdef readonly int boring_member
def __init__(self):
self.boring_member = 10
cdef readonly int boring_getattribute_member
cdef class boring_boring_getattribute(boring_getattribute):
- cdef readonly int boring_boring_getattribute_member
+ cdef readonly int boring_boring_getattribute_member
cdef class boring_getattr(_getattr):
cdef readonly int boring_getattr_member
cdef class boring_boring_getattr(boring_getattr):
- cdef readonly int boring_boring_getattr_member
+ cdef readonly int boring_boring_getattr_member
cdef class getattribute_boring_boring_getattr(boring_boring_getattr):
def __getattribute__(self,n):
cdef class MyOtherCdefClass:
"""
Needs no hack
-
+
>>> True
True
"""
cdef class MyOtherCdefClass:
"""
Needs no hack
-
+
>>> True
True
"""
cdef class MyOtherCdefClass:
"""
Needs no hack
-
+
>>> True
True
"""
import sys
import re
exclude = []#re.compile('object').search]
-
+
def testcase(func):
for e in exclude:
if e(func.__name__):
"""
Apparently, doctest won't handle mixed exceptions and print
stats, so need to circumvent this.
-
+
>>> A = IntMockBuffer("A", range(6))
>>> A.resetlog()
>>> acquire_raise(A)
>>> A.printlog()
acquired A
released A
-
+
"""
cdef object[int] buf
buf = o
0 1 2 3 4 5 END
released A
"""
- cdef int i
+ cdef int i
for i in range(n):
print bufarg[i],
print 'END'
acquired A
0 1 2 3 4 5 END
released A
-
+
"""
cdef object[int] buf = obj
cdef int i
#
# Getting items and index bounds checking
-#
+#
@testcase
def get_int_2d(object[int, ndim=2] buf, int i, int j):
"""
"""
Uses get_int_2d to read back the value afterwards. For pure
unit test, one should support reading in MockBuffer instead.
-
+
>>> C = IntMockBuffer("C", range(6), (2,3))
>>> set_int_2d(C, 1, 1, 10)
acquired C
acquired C
released C
8
-
+
>>> set_int_2d(C, -2, -3, 9)
acquired C
released C
Traceback (most recent call last):
...
IndexError: Out of bounds on buffer access (axis 1)
-
+
"""
buf[i, j] = value
"""
The most interesting thing here is to inspect the C source and
make sure optimal code is produced.
-
+
>>> A = IntMockBuffer(None, range(6))
>>> no_negative_indices(A, 3)
3
def wraparound_directive(object[int] buf, int pos_idx, int neg_idx):
"""
Again, the most interesting thing here is to inspect the C source.
-
+
>>> A = IntMockBuffer(None, range(4))
>>> wraparound_directive(A, 2, -1)
5
['FORMAT', 'ND', 'STRIDES', 'C_CONTIGUOUS']
"""
return buf[2]
-
+
@testcase
def c_contig_2d(object[int, ndim=2, mode='c'] buf):
"""
Multi-dim has seperate implementation
-
+
>>> A = IntMockBuffer(None, range(12), shape=(3,4))
>>> c_contig_2d(A)
7
def f_contig_2d(object[int, ndim=2, mode='fortran'] buf):
"""
Must set up strides manually to ensure Fortran ordering.
-
+
>>> A = IntMockBuffer(None, range(12), shape=(4,3), strides=(1, 4))
>>> f_contig_2d(A)
7
def unsafe_get_nonegative(object[int, negative_indices=False] buf, int idx):
"""
Also inspect the C source to see that it is optimal...
-
+
>>> A = IntMockBuffer(None, range(10), shape=(3,), offset=5)
>>> unsafe_get_nonegative(A, -2)
3
with cython.boundscheck(True):
two = buf[safe_idx]
return (one, two)
-
+
#
# Coercions
#
@testcase
def coercions(object[unsigned char] uc):
"""
-TODO
+TODO
"""
print type(uc[0])
uc[0] = -1
def printbuf_int_2d(o, shape):
"""
Strided:
-
+
>>> printbuf_int_2d(IntMockBuffer("A", range(6), (2,3)), (2,3))
acquired A
0 1 2 END
Traceback (most recent call last):
...
ValueError: Buffer dtype mismatch, expected 'td_h_short' but got 'int'
- """
+ """
cdef int i
for i in range(shape[0]):
print buf[i],
>>> get_refcount(a), get_refcount(b)
(2, 2)
>>> addref(a)
- >>> A = ObjectMockBuffer(None, [1, a]) # 1, ...,otherwise it thinks nested lists...
+ >>> A = ObjectMockBuffer(None, [1, a]) # 1, ...,otherwise it thinks nested lists...
>>> get_refcount(a), get_refcount(b)
(3, 2)
>>> assign_to_object(A, 1, b)
>>> decref(b)
"""
buf[idx] = obj
-
+
@testcase
def assign_temporary_to_object(object[object] buf):
"""
>>> assign_temporary_to_object(A)
>>> get_refcount(a)
2
-
+
>>> printbuf_object(A, (2,))
{4: 23} 2
{1: 8} 2
def buffer_cast_fails(object[char, cast=True] buf):
"""
Cannot cast between datatype of different sizes.
-
+
>>> buffer_cast_fails(IntMockBuffer(None, [0]))
Traceback (most recent call last):
...
cdef Py_ssize_t* shape
cdef Py_ssize_t* suboffsets
cdef object label, log
-
+
cdef readonly object recieved_flags, release_ok
cdef public object fail
-
+
def __init__(self, label, data, shape=None, strides=None, format=None, offset=0):
# It is important not to store references to data after the constructor
# as refcounting is checked on object buffers.
self.strides = self.list_to_sizebuf(strides)
self.shape = self.list_to_sizebuf(shape)
-
+
def __dealloc__(self):
stdlib.free(self.strides)
stdlib.free(self.shape)
# must recursively free indirect...
else:
stdlib.free(self.buffer)
-
+
cdef void* create_buffer(self, data):
cdef char* buf = <char*>stdlib.malloc(len(data) * self.itemsize)
cdef char* it = buf
for name, value in available_flags:
if (value & flags) == value:
self.recieved_flags.append(name)
-
+
buffer.buf = <void*>(<char*>self.buffer + (<int>self.offset * self.itemsize))
buffer.obj = self
buffer.len = self.len
self.release_ok = False
if self.label:
msg = "released %s" % self.label
- print msg
+ print msg
self.log += msg + "\n"
def printlog(self):
print "ERROR, not subclassed", self.__class__
cdef get_default_format(self):
print "ERROR, not subclassed", self.__class__
-
+
cdef class CharMockBuffer(MockBuffer):
cdef int write(self, char* buf, object value) except -1:
(<char*>buf)[0] = <int>value
cdef get_itemsize(self): return sizeof(void*)
cdef get_default_format(self): return b"@O"
-
+
cdef class IntStridedMockBuffer(IntMockBuffer):
cdef __cythonbufferdefaults__ = {"mode" : "strided"}
-
+
cdef class ErrorBuffer:
cdef object label
-
+
def __init__(self, label):
self.label = label
For IntStridedMockBuffer, mode should be
"strided" by defaults which should show
up in the flags.
-
+
>>> A = IntStridedMockBuffer("A", range(10))
>>> bufdefaults1(A)
acquired A
['FORMAT', 'ND', 'STRIDES']
"""
pass
-
+
#
# Structs
s = <MyStruct*>buf;
s.a, s.b, s.c, s.d, s.e = value
return 0
-
+
cdef get_itemsize(self): return sizeof(MyStruct)
cdef get_default_format(self): return b"2bq2i"
s = <NestedStruct*>buf;
s.x.a, s.x.b, s.y.a, s.y.b, s.z = value
return 0
-
+
cdef get_itemsize(self): return sizeof(NestedStruct)
cdef get_default_format(self): return b"2T{ii}i"
def basic_struct(object[MyStruct] buf):
"""
See also buffmt.pyx
-
+
>>> basic_struct(MyStructMockBuffer(None, [(1, 2, 3, 4, 5)]))
1 2 3 4 5
>>> basic_struct(MyStructMockBuffer(None, [(1, 2, 3, 4, 5)], format="bbqii"))
def nested_struct(object[NestedStruct] buf):
"""
See also buffmt.pyx
-
+
>>> nested_struct(NestedStructMockBuffer(None, [(1, 2, 3, 4, 5)]))
1 2 3 4 5
>>> nested_struct(NestedStructMockBuffer(None, [(1, 2, 3, 4, 5)], format="T{ii}T{2i}i"))
s = <LongComplex*>buf;
s.real, s.imag = value
return 0
-
+
cdef get_itemsize(self): return sizeof(LongComplex)
cdef get_default_format(self): return b"Zg"
cdef struct align_of_float_helper:
char ch
- float d
+ float d
cdef struct align_of_int_helper:
char ch
int i
if int_align != 4 or sizeof(int) != 4:
raise RuntimeError("Alignment or size of int is %d on this system, please report to cython-dev for a testcase fix" % int_align)
-
+
cdef class MockBuffer:
cdef Py_ssize_t zero
cdef Py_ssize_t minusone
cdef object format
cdef object itemsize
-
+
def __init__(self, format, itemsize):
self.format = unicode(format).encode(u"ASCII")
self.itemsize = itemsize
self.zero = 0
self.minusone = -1
-
+
def __getbuffer__(self, Py_buffer* info, int flags):
info.buf = NULL
info.strides = &self.zero
Traceback (most recent call last):
...
ValueError: Buffer dtype mismatch, expected 'int' but got 'char'
-
+
>>> _int("if")
Traceback (most recent call last):
...
...
ValueError: Item size of buffer (1 byte) does not match size of 'float' (4 bytes)
- """
+ """
cdef object[float] buf = MockBuffer("f", 1)
@testcase
def char3int(fmt):
"""
>>> char3int("ciii")
- >>> char3int("c1i1i1i")
+ >>> char3int("c1i1i1i")
>>> char3int("c3i")
>>> char3int("ci2i")
ValueError: Buffer dtype mismatch; next field is at offset 1 but 4 expected
#TODO char3int("=cxxx@iii")
-
+
Error:
>>> char3int("cii")
Traceback (most recent call last):
>>> complex_test("3Zf")
>>> complex_test("6f")
>>> complex_test("3T{Zf}")
-
+
>>> complex_test("fZfZff")
Traceback (most recent call last):
...
ValueError: Buffer dtype mismatch, expected 'float' but got 'complex float' in 'ComplexFloat.imag'
-
+
"""
cdef object obj = MockBuffer(fmt, sizeof(ComplexTest))
cdef object[ComplexTest] buf1 = obj
-
+
@testcase
def alignment_string(fmt, exc=None):
def mixed_complex_struct():
"""
Triggering a specific execution path for this case.
-
+
>>> mixed_complex_struct()
Traceback (most recent call last):
...
char a
int b
PackedSubStruct sub
-
+
@testcase
def packed_struct(fmt):
"""
Assuming int is four bytes:
-
+
>>> packed_struct("^cici")
>>> packed_struct("=cibi")
Traceback (most recent call last):
...
ValueError: Buffer packing mode currently only allowed at beginning of format string (this is a defect)
-
+
However aligned access won't work:
>>> packed_struct("@cici")
Traceback (most recent call last):
"""
cdef int (*func_ptr)(int)
-
+
def __init__(self):
self.func_ptr = &func
cdef int do_it(self, int s):
cdef int r = first_call(self).func_ptr(s) # the temp for first_call(self) not properly freed
return r
-
+
def test(self, s):
return self.do_it(s)
cdef A first_call(A x):
return x
-
+
cdef int func(int s):
return s*s
TypeError: h() takes at least 3 positional arguments (2 given)
"""
print a, b, c, u'*', len(args), len(kwargs)
-
+
args = (9,8,7)
import sys
0
>>> test_noargs(g)
0
-
+
# and some errors:
>>> test_noargs(h)
Traceback (most recent call last):
cdef object o = int(8)
print a, b, c, p[0], before, g, o
-# Also test that pruning cdefs doesn't hurt
+# Also test that pruning cdefs doesn't hurt
def empty():
cdef int i
cdef int foo(int a, int b=1, int c=1):
return a+b*c
-
+
def test_foo():
"""
>>> test_foo()
print "args", args, "kwds", kwds
return func(*args, **kwds)
return f
-
+
cdef class Num:
cdef int n
-
+
def __init__(self, n):
self.n = n
-
+
def __repr__(self):
return "Num(%s)" % self.n
-
+
@print_args
def is_prime(self, bint print_factors=False):
if self.n == 2:
def test_pow(double complex z, double complex w, tol=None):
"""
Various implementations produce slightly different results...
-
+
>>> a = complex(3, 1)
>>> test_pow(a, 1)
(3+1j)
cdef cppclass Shape:
float area()
-
+
cdef cppclass Circle(Shape):
int radius
Circle(int)
-
+
cdef cppclass Rectangle(Shape):
int width
int height
Rectangle()
Rectangle(int, int)
-
+
cdef cppclass Square(Rectangle):
int side
Square(int)
-
+
int constructor_count, destructor_count
def test_new_del():
cdef int raise_int_raw "raise_int"(bint fire) except +
cdef int raise_int_value "raise_int"(bint fire) except +ValueError
cdef int raise_int_custom "raise_int"(bint fire) except +raise_py_error
-
+
cdef int raise_index_raw "raise_index"(bint fire) except +
cdef int raise_index_value "raise_index"(bint fire) except +ValueError
cdef int raise_index_custom "raise_index"(bint fire) except +raise_py_error
namespace outer {
-
+
int x = 10;
-
+
int outer_value = 10;
-
+
namespace inner {
-
+
int x = 100;
-
+
int inner_value = 100;
-
+
}
-
+
}
namespace A {
-
+
typedef int A_t;
-
+
A_t A_func(A_t first, A_t second) {
return first + second;
}
-
+
}
void set(T)
T get()
bint operator==(Wrap[T])
-
+
cdef cppclass Pair[T1,T2]:
Pair(T1,T2)
T1 first()
try:
wrap = new Wrap[Pair[int, Pair[int, double]]](
Pair[int, Pair[int, double]](i,Pair[int, double](j, x)))
- return (wrap.get().first(),
+ return (wrap.get().first(),
wrap.get().second().first(),
- wrap.get().second().second(),
+ wrap.get().second().second(),
deref(wrap) == deref(wrap))
finally:
del wrap
out(t[0] * 1)
out(t[0] / 1)
out(t[0] % 1)
-
+
out(t[0] & 1)
out(t[0] | 1)
out(t[0] ^ 1)
-
+
out(t[0] << 1)
out(t[0] >> 1)
UN_OP(~);
UN_OP(!);
UN_OP(&);
-
+
UN_OP(++);
UN_OP(--);
POST_UN_OP(++);
POST_UN_OP(--);
-
+
BIN_OP(+);
BIN_OP(-);
BIN_OP(*);
BIN_OP(<<);
BIN_OP(>>);
-
+
BIN_OP(|);
BIN_OP(&);
BIN_OP(^);
void assign(int, T)
void clear()
int size()
-
+
cppclass iterator:
T operator*()
iterator operator++()
void set(T)
T get()
bint operator==(Wrap[T])
-
+
cdef cppclass Pair[T1,T2]:
Pair(T1,T2)
T1 first()
cdef class TestExtPtr:
cdef void* p
- def __init__(self, int i):
+ def __init__(self, int i):
cdef _aux aux
aux.i = i
self.p = aux.p
print PyType_Check2([])
print PyType_Check3(list)
print PyType_Check3([])
-
+
>>> test(d, Unhashable())
Traceback (most recent call last):
ValueError
-
+
>>> test(None, 1) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...subscriptable...
... refcount1 = sys.getrefcount(SampleException)
... test_func()
... refcount2 = sys.getrefcount(SampleException)
-...
+...
... assert_refcount(refcount1, refcount2, test_func)
... assert_refcount(initial_refcount, refcount2, test_func)
... refcount3 = sys.getrefcount(SampleException)
cdef class Spam:
cdef int tons
-
+
cdef void add_tons(self, int x):
self.tons = self.tons + x
def test_tuple(tuple t):
"""
Actual builtin types are restrictive wrt subclassing so optimizations can be safely performed.
-
+
>>> test_tuple((1,2))
2
>>> class tuple_subclass(tuple): pass
cdef class Spam:
property eggs:
-
+
def __get__(self):
"""
This is the docstring for Spam.eggs.__get__
def __cinit__(self, eggs):
self.eggs = eggs
self.ham = 42
-
+
def __dealloc__(self):
self.ham = 0
-
+
def eat(self):
gobble(self.eggs, self.ham)
def range_loop_indices():
"""
- Optimized integer for loops using range() should follow Python behavior,
+ Optimized integer for loops using range() should follow Python behavior,
and leave the index variable with the last value of the range.
"""
cdef int i, j, k=0, l=10, m=10
def from_loop_indices():
"""
- for-from-loops should follow C behavior, and leave the index variable
+ for-from-loops should follow C behavior, and leave the index variable
incremented one step after the last iteration.
"""
cdef int i, j, k
__doc__ = """
>>> A.foo = foo
>>> print A().foo()
-
+
"""
class A:
def __init__(self, n):
self._n = n
-
+
def __repr__(self):
return "SomeNumber(%s)" % self._n
Traceback (most recent call last):
...
TypeError: That's kind of a round number...
-
+
"""
cdef class A:
elif b:
x = 2
return x
-
+
def h(a, b):
"""
>>> h(1,2)
return a
def temps(x):
- return ident(1) if ident(x) < ident(5) else ident(10)
+ return ident(1) if ident(x) < ident(5) else ident(10)
def nested(x):
return 1 if x == 1 else (2 if x == 2 else 3)
import types
cdef long maxunicode
cdef type t
-
+
from sys import maxunicode
print maxunicode == sys.maxunicode
from types import ModuleType as t
return o[i]
-# These make sure that our fast indexing works with large and unsigned types.
+# These make sure that our fast indexing works with large and unsigned types.
def test_unsigned_long():
"""
def side_effect(x):
print u"side effect", x
return x
-
+
cdef int c_side_effect(int x):
print u"c side effect", x
return x
-
+
def test_side_effects():
"""
>>> test_side_effects()
nested.a.value = 2
nested.a.value += 3
assert nested.a.value == 5
-
+
nested.a.inner.x = 5
nested.a.inner.x += 10
assert nested.a.inner.x == 15
array.a[0].value = 2
array.a[c_side_effect(0)].value += 3
assert array.a[0].value == 5
-
+
array.a[1].inner.x = 5
array.a[c_side_effect(1)].inner.x += 10
assert array.a[1].inner.x == 15
cdef unsigned long ua = 1UL
cdef long long aa = 0xFFFFFFFFFFFFFFFFLL
cdef unsigned long long uaa = 0xFFFFFFFFFFFFFFFFULL
-
+
return a, ua, aa, uaa
-
+
def py_longs():
return 1, 1L, 100000000000000000000000000000000, -100000000000000000000000000000000
k[0] -= 1
return a(k[0], b, x1, x2, x3, x4)
return compute(x4) + compute(x5) if k[0] <= 0 else b()
-
+
"""
>>> add_large() == 2147483647 + 2147483647
True
-
+
#>>> add_large_c() == 2147483647 + 2147483647
#True
"""
[0, 1, 2, 3, 4, 5, 6, 7]
>>> while L:
... _ = simple_pop(L)
-
+
>>> L
[]
>>> simple_pop(L)
[0, 1, 2, 3, 4, 5, 6, 7]
>>> while L:
... _ = simple_pop_typed(L)
-
+
>>> L
[]
>>> simple_pop_typed(L)
Traceback (most recent call last):
...
IndexError: pop index out of range
-
+
>>> while L:
... _ = index_pop(L, 0)
-
+
>>> L
[]
-
+
>>> index_pop(L, 0)
Traceback (most recent call last):
...
Traceback (most recent call last):
...
IndexError: pop index out of range
-
+
>>> while L:
... _ = index_pop_typed(L, 0)
-
+
>>> L
[]
-
+
>>> index_pop_typed(L, 0)
Traceback (most recent call last):
...
a \three \line
raw string with some backslashes.'''
m = 'Three backslashed ordinaries: \c\g\+'
- n = '''Triple single quoted string
+ n = '''Triple single quoted string
with ' and " quotes'''
- o = """Triple double quoted string
+ o = """Triple double quoted string
with ' and " quotes"""
p = "name_like_string"
q = "NameLikeString2"
True
"""
pass
-
+
g = 42
x = u"spam"
y = u"eggs"
except:
raise
except (AttributeError,
- KeyError,
- IndexError,
+ KeyError,
+ IndexError,
ValueError) as e:
val = e
raise e
void import_array()
void import_umath()
-if 0:
+if 0:
import_array()
import_umath()
[[ 16. 17. 18. 19.]
[ 20. 21. 22. 23.]]]
6.0 0.0 13.0 8.0
-
+
>>> obj_array()
[a 1 {}]
a 1 {}
Traceback (most recent call last):
...
ValueError: ndarray is not C contiguous
-
+
>>> test_dtype('b', inc1_byte)
>>> test_dtype('B', inc1_ubyte)
>>> test_dtype('h', inc1_short)
>>> test_dtype('I', inc1_uint)
>>> test_dtype('l', inc1_long)
>>> test_dtype('L', inc1_ulong)
-
+
>>> test_dtype('f', inc1_float)
>>> test_dtype('d', inc1_double)
>>> test_dtype('g', inc1_longdouble)
Traceback (most recent call last):
...
ValueError: Non-native byte order not supported
-
+
>>> test_recordarray()
-
+
>>> print(test_nested_dtypes(np.zeros((3,), dtype=np.dtype([\
('a', np.dtype('i,i')),\
('b', np.dtype('i,i'))\
]))))
- array([((0, 0), (0, 0)), ((1, 2), (1, 4)), ((1, 2), (1, 4))],
+ array([((0, 0), (0, 0)), ((1, 2), (1, 4)), ((1, 2), (1, 4))],
dtype=[('a', [('f0', '!i4'), ('f1', '!i4')]), ('b', [('f0', '!i4'), ('f1', '!i4')])])
>>> print(test_nested_dtypes(np.zeros((3,), dtype=np.dtype([\
ValueError: Buffer dtype mismatch, expected 'int' but got 'float' in 'DoubleInt.y'
>>> print(test_packed_align(np.zeros((1,), dtype=np.dtype('b,i', align=False))))
- array([(22, 23)],
+ array([(22, 23)],
dtype=[('f0', '|i1'), ('f1', '!i4')])
>>> print(test_unpacked_align(np.zeros((1,), dtype=np.dtype('b,i', align=True))))
- array([(22, 23)],
+ array([(22, 23)],
dtype=[('f0', '|i1'), ('', '|V3'), ('f1', '!i4')])
>>> print(test_packed_align(np.zeros((1,), dtype=np.dtype('b,i', align=True))))
8,16
>>> test_point_record()
- array([(0.0, 0.0), (1.0, -1.0), (2.0, -2.0)],
+ array([(0.0, 0.0), (1.0, -1.0), (2.0, -2.0)],
dtype=[('x', '!f8'), ('y', '!f8')])
-
+
"""
except:
__doc__ = u""
Since Py2.3 doctest don't support <BLANKLINE>, manually replace blank lines
with <_BLANKLINE_>
"""
- return unicode(arr).replace(u'\n\n', u'\n<_BLANKLINE_>\n')
+ return unicode(arr).replace(u'\n\n', u'\n<_BLANKLINE_>\n')
def basic():
cdef object[int, ndim=2] buf = np.arange(10, dtype=b'i').reshape((2, 5))
def inc1_cfloat_struct(np.ndarray[np.cfloat_t] arr):
arr[1].real += 1
arr[1].imag += 1
-
+
def inc1_cdouble_struct(np.ndarray[np.cdouble_t] arr):
arr[1].real += 1
arr[1].imag += 1
def inc1_int32_t(np.ndarray[np.int32_t] arr): arr[1] += 1
def inc1_float64_t(np.ndarray[np.float64_t] arr): arr[1] += 1
-
+
def test_dtype(dtype, inc1):
if dtype in ("g", np.longdouble,
"G", np.clongdouble):
def test_unpacked_align(np.ndarray[UnpackedStruct] arr):
arr[0].a = 22
- arr[0].b = 23
+ arr[0].b = 23
return repr(arr).replace('<', '!').replace('>', '!')
def test_complextypes():
cdef class Tri:
def test(self):
return 1
-
+
cdef class Curseur:
cdef Tri tri
def detail(self):
i = sizeof(xx)
ptr = cython.declare(cython.p_int, cython.address(y))
return y, ptr[0]
-
+
@cython.locals(x=cython.double, n=cython.int)
def test_cast(x):
"""
"""
n = cython.cast(cython.int, x)
return n
-
+
@cython.locals(x=cython.int, y=cython.p_int)
def test_address(x):
"""
## i = sizeof(xx)
## ptr = cython.declare(cython.p_int, cython.address(y))
## return y, ptr[0]
-
+
@cython.locals(x=cython.double, n=cython.int)
def test_cast(x):
"""
"""
n = cython.cast(cython.int, x)
return n
-
+
@cython.locals(x=cython.int, y=cython.p_int)
def test_address(x):
"""
"""
cdef extern from "complexobject.h":
-
+
struct Py_complex:
double real
double imag
-
+
ctypedef class __builtin__.complex [object PyComplexObject]:
cdef Py_complex cval
-
+
def spam(complex c):
print u"Real:", c.cval.real
print u"Imag:", c.cval.imag
for i in l:
if i > 1:
return i
-
+
def go_tuple():
"""
>>> go_tuple()
... foo()
... except Exception, e:
... print("%s: %s" % (e.__class__.__name__, e))
-ValueError:
+ValueError:
>>> try:
... bar()
... except Exception, e:
void memcpy(char *d, char *s, int n)
from cpython cimport PyUnicode_DecodeUTF8
-
+
def spam():
cdef char buf[12]
memcpy(buf, "Ftang\0Ftang!", sizeof(buf))
cdef class Spam:
cdef int tons
-
+
def __cinit__(self):
self.tons = 17
-
+
def __dealloc__(self):
print self.tons, u"tons of spam is history."
-
+
def get_tons(self):
return self.tons
-
+
def set_tons(self, x):
self.tons = x
#define SHAPES_H
namespace shapes {
-
+
int constructor_count = 0;
int destructor_count = 0;
-
+
class Shape
{
public:
virtual float area() = 0;
- Shape() { constructor_count++; }
+ Shape() { constructor_count++; }
virtual ~Shape() { destructor_count++; }
};
{
public:
Rectangle() { }
- Rectangle(int width, int height)
+ Rectangle(int width, int height)
{
this->width = width;
this->height = height;
f(a, b,)
g(1, 2.0, "spam")
g(a, b, c)
-
+
def fail0(a, b):
"""
>>> fail0(1,2)
TypeError: f() takes exactly 2 positional arguments (0 given)
"""
f()
-
+
def fail1(a, b):
"""
>>> fail1(1,2)
obj1 = obj2[:obj4:obj5]
obj1 = obj2[obj3:obj4:obj5]
obj1 = obj2[int3:int4:int5]
-
+
>>> double_ptr_slice(0, L, 3, 7)
>>> double_ptr_slice(5, L, 3, 7)
>>> double_ptr_slice(9, L, 3, 7)
-
+
>>> double_ptr_slice(EqualsEvens(), L, 0, 10)
>>> double_ptr_slice(EqualsEvens(), L, 1, 10)
"""
# generate its own wrapper. (This wrapper would be used, for instance,
# when using the special method as a bound method.)
-# To test this, we go through and verify that each affected special
+# To test this, we go through and verify that each affected special
# method works as a bound method.
# Special methods that are treated the same under Python 2 and 3 are
-# tested here; see also special_methods_T561_py2.pyx and
+# tested here; see also special_methods_T561_py2.pyx and
# special_methods_T561_py3.pyx for tests of the differences between
# Python 2 and 3.
>>> vs0_len()
VS __len__ 0
0
- >>> # If you define either setitem or delitem, you get wrapper objects
+ >>> # If you define either setitem or delitem, you get wrapper objects
>>> # for both methods. (This behavior is unchanged by #561.)
>>> si_setitem = SetItem().__setitem__
>>> si_setitem('foo', 'bar')
>>> g11 = object.__getattribute__(GetAttribute(), '__getattribute__')
>>> g11('attr')
GetAttribute getattribute 'attr'
- >>> # If you define either setattr or delattr, you get wrapper objects
+ >>> # If you define either setattr or delattr, you get wrapper objects
>>> # for both methods. (This behavior is unchanged by #561.)
>>> sa_setattr = SetAttr().__setattr__
>>> sa_setattr('foo', 'bar')
>>> vs0_get = vs0.__get__
>>> vs0_get('instance', 'owner')
VS __get__ 0 'instance' 'owner'
- >>> # If you define either set or delete, you get wrapper objects
+ >>> # If you define either set or delete, you get wrapper objects
>>> # for both methods. (This behavior is unchanged by #561.)
>>> s_set = Set().__set__
>>> s_set('instance', 'val')
>>> vs0_getslice(13, 42)
VS __getslice__ 0 13 42
>>> # Cython supports setslice and delslice only for Python 2.
- >>> # If you define either setslice or delslice, you get wrapper objects
+ >>> # If you define either setslice or delslice, you get wrapper objects
>>> # for both methods. (This behavior is unchanged by #561.)
>>> ss_setslice = SetSlice().__setslice__
>>> ss_setslice(13, 42, 'foo')
"""
>>> subs('testing a subtype')
'testing a subtype'
-
+
# >>> csub('testing a subtype')
# 'testing a subtype'
# >>> csubs('testing a subtype')
True
>>> len(u6)
7
-
+
>>> newlines == "Aaa\n"
True
"""
"""
cdef Point p = Point(x, y, color)
return p
-
+
def test_constructor_kwds(x, y, color):
"""
>>> test_constructor_kwds(1.25, 2.5, 128)
"""
cdef Point p = Point(x=x, y=y, color=color)
return p
-
+
def test_dict_construction(x, y, color):
"""
>>> test_dict_construction(4, 5, 64)
bint is_integral
int_or_float data
void* ptr
-
+
def test_pointers(int n, double x):
"""
>>> test_pointers(100, 2.71828)
int1 = int2 - int3
obj1 = obj2 - int3
return int1, obj1
-
+
def p():
"""
>>> p()
cdef int bitsize(A a):
return 1
-
+
coeffs = [A()]
class B:
assert typeof(c) == "Python object", typeof(c)
d = -int(5)
assert typeof(d) == "Python object", typeof(d)
-
+
def builtin_type_operations():
"""
assert typeof(T1) == "tuple object", typeof(T1)
T2 = () * 2
assert typeof(T2) == "tuple object", typeof(T2)
-
+
def cascade():
"""
>>> cascade()
assert typeof(d) == "long", typeof(d)
# we special-case inference to type str, see
- # trac #553
+ # trac #553
s = "abc"
assert typeof(s) == "Python object", typeof(s)
cdef str t = "def"
double actual_double
DoubleTypedef float_isreally_double
LongDoubleTypedef float_isreally_longdouble
-
+
def __init__(self):
self.actual_double = 42.0
self.float_isreally_double = 42.0
print typeof(x)
print typeof(None)
used = i, l, ll, <long>iptr, <long>iptrptr, a, b, x
-
+
def expression():
"""
>>> expression()
>>> print_all( text.split(sep) )
ab jd
sdflk as sa
- sadas asdas fsdf
+ sadas asdas fsdf
>>> print_all( split_sep(text, sep) )
ab jd
sdflk as sa
- sadas asdas fsdf
+ sadas asdas fsdf
"""
return s.split(sep)
"""
>>> print_all( text.split(sep, 1) )
ab jd
- sdflk as sa sadas asdas fsdf
+ sdflk as sa sadas asdas fsdf
>>> print_all( split_sep_max(text, sep, 1) )
ab jd
- sdflk as sa sadas asdas fsdf
+ sdflk as sa sadas asdas fsdf
"""
return s.split(sep, max)
"""
>>> print_all( text.split(sep, 1) )
ab jd
- sdflk as sa sadas asdas fsdf
+ sdflk as sa sadas asdas fsdf
>>> print_all( split_sep_max_int(text, sep) )
ab jd
- sdflk as sa sadas asdas fsdf
+ sdflk as sa sadas asdas fsdf
"""
return s.split(sep, 1)
>>> print_all( multiline_text.splitlines() )
ab jd
sdflk as sa
- sadas asdas fsdf
+ sadas asdas fsdf
>>> len(splitlines(multiline_text))
3
>>> print_all( splitlines(multiline_text) )
ab jd
sdflk as sa
- sadas asdas fsdf
+ sadas asdas fsdf
"""
return s.splitlines()
<BLANKLINE>
sdflk as sa
<BLANKLINE>
- sadas asdas fsdf
+ sadas asdas fsdf
>>> len(splitlines_keep(multiline_text, True))
3
>>> print_all( splitlines_keep(multiline_text, True) )
<BLANKLINE>
sdflk as sa
<BLANKLINE>
- sadas asdas fsdf
+ sadas asdas fsdf
"""
return s.splitlines(keep)
<BLANKLINE>
sdflk as sa
<BLANKLINE>
- sadas asdas fsdf
+ sadas asdas fsdf
>>> print_all( multiline_text.splitlines(False) )
ab jd
sdflk as sa
- sadas asdas fsdf
+ sadas asdas fsdf
>>> len(splitlines_keep_bint(multiline_text))
7
>>> print_all( splitlines_keep_bint(multiline_text) )
<BLANKLINE>
sdflk as sa
<BLANKLINE>
- sadas asdas fsdf
+ sadas asdas fsdf
--
ab jd
sdflk as sa
- sadas asdas fsdf
+ sadas asdas fsdf
"""
return s.splitlines(True) + ['--'] + s.splitlines(False)
def replace(unicode s, substring, repl):
"""
>>> print( text.replace('sa', 'SA') )
- ab jd sdflk as SA SAdas asdas fsdf
+ ab jd sdflk as SA SAdas asdas fsdf
>>> print( replace(text, 'sa', 'SA') )
- ab jd sdflk as SA SAdas asdas fsdf
+ ab jd sdflk as SA SAdas asdas fsdf
"""
return s.replace(substring, repl)
def replace_maxcount(unicode s, substring, repl, maxcount):
"""
>>> print( text.replace('sa', 'SA', 1) )
- ab jd sdflk as SA sadas asdas fsdf
+ ab jd sdflk as SA sadas asdas fsdf
>>> print( replace_maxcount(text, 'sa', 'SA', 1) )
- ab jd sdflk as SA sadas asdas fsdf
+ ab jd sdflk as SA sadas asdas fsdf
"""
return s.replace(substring, repl, maxcount)
for x in range(-i,i):
times += 1
return times
-
+
def rangelist():
cdef unsigned int i = 3
return list(range(-i, i))
def __exit__(self, a, b, tb):
print u"exit", typename(a), typename(b), typename(tb)
return self.exit_ret
-
+
def __enter__(self):
print u"enter"
return self.value
"""
with ContextManager(u"value"):
print u"hello"
-
+
def basic():
"""
>>> basic()
"""
with ContextManager(u"value") as x:
print x
-
+
def with_pass():
"""
>>> with_pass()
"""
with ContextManager(u"value") as x:
pass
-
+
def with_return():
"""
>>> with_return()
DoubleKeeper::DoubleKeeper ()
- : number (1.0)
+ : number (1.0)
{
}
class DoubleKeeper
{
double number;
-
+
public:
DoubleKeeper ();
DoubleKeeper (double number);
virtual ~DoubleKeeper ();
-
+
void set_number (double num);
void set_number (void);
double get_number () const;
cdef extern from "cpp_references_helper.h":
cdef int& ref_func(int&)
-
+
cdef int ref_var_value
cdef int& ref_var
class DoubleKeeper
{
double number;
-
+
public:
DoubleKeeper (double number);
virtual ~DoubleKeeper ();
-
+
void set_number (double num);
double get_number () const;
virtual double transmogrify (double value) const;