from Nodes import Node
import PyrexTypes
from PyrexTypes import py_object_type, c_long_type, typecast, error_type
+from Builtin import list_type, tuple_type, dict_type
import Symtab
import Options
+from Annotate import AnnotationItem
from Cython.Debugging import print_call_chain
from DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
+
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# Cached result of subexpr_nodes()
result_ctype = None
+ type = None
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
saved_subexpr_nodes = None
is_temp = 0
+ def get_child_attrs(self):
+ """Automatically provide the contents of subexprs as children, unless child_attr
+ has been declared. See Nodes.Node.get_child_accessors."""
+ if self.child_attrs is not None:
+ return self.child_attrs
+ elif self.subexprs is not None:
+ return self.subexprs
+
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
def allocate_target_temps(self, env, rhs):
# Perform temp allocation for the LHS of an assignment.
if debug_temp_alloc:
- print self, "Allocating target temps"
+ print("%s Allocating target temps" % self)
self.allocate_subexpr_temps(env)
self.result_code = self.target_code()
if rhs:
# is used as the result instead of allocating a new
# one.
if debug_temp_alloc:
- print self, "Allocating temps"
+ print("%s Allocating temps" % self)
self.allocate_subexpr_temps(env)
self.allocate_temp(env, result)
if self.is_temp:
# Allocate temporary variables for all sub-expressions
# of this node.
if debug_temp_alloc:
- print self, "Allocating temps for:", self.subexprs
+ print("%s Allocating temps for: %s" % (self, self.subexprs))
for node in self.subexpr_nodes():
if node:
if debug_temp_alloc:
- print self, "Allocating temps for", node
+ print("%s Allocating temps for %s" % (self, node))
node.allocate_temps(env)
def allocate_temp(self, env, result = None):
# is used as the result instead of allocating a new
# one.
if debug_temp_alloc:
- print self, "Allocating temp"
+ print("%s Allocating temp" % self)
if result:
if not self.is_temp:
raise InternalError("Result forced on non-temp node")
else:
self.result_code = None
if debug_temp_alloc:
- print self, "Allocated result", self.result_code
+ print("%s Allocated result %s" % (self, self.result_code))
else:
self.result_code = self.calculate_result_code()
# otherwise release results of its sub-expressions.
if self.is_temp:
if debug_temp_alloc:
- print self, "Releasing result", self.result_code
+ print("%s Releasing result %s" % (self, self.result_code))
env.release_temp(self.result_code)
else:
self.release_subexpr_temps(env)
code.put_incref(self.result_code, self.ctype())
def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
# will have been reported earlier.
pass
+ # ---------------- Annotation ---------------------
+
+ def annotate(self, code):
+ for node in self.subexpr_nodes():
+ node.annotate(code)
+
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
src = CoerceFromPyTypeNode(dst_type, src, env)
else: # neither src nor dst are py types
# Added the string comparison, since for c types that
- # is enough, but SageX gets confused when the types are
+ # is enough, but Cython gets confused when the types are
# in different files.
if not (str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
error(self.pos, "Cannot assign type '%s' to '%s'" %
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
+ is_literal = 1
+
def is_simple(self):
return 1
def compile_time_value(self, denv):
return None
+class BoolNode(PyConstNode):
+ # The constant value True or False
+
+ def compile_time_value(self, denv):
+ return self.value
+
+ def calculate_result_code(self):
+ if self.value:
+ return "Py_True"
+ else:
+ return "Py_False"
+
+ def coerce_to(self, dst_type, env):
+ value = self.value
+ if dst_type.is_numeric:
+ return IntNode(self.pos, value=int(self.value)).coerce_to(dst_type, env)
+ else:
+ return PyConstNode.coerce_to(self, dst_type, env)
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
return str(self.value)
def compile_time_value(self, denv):
- return int(self.value)
+ return int(self.value, 0)
class FloatNode(ConstNode):
def compile_time_value(self, denv):
return float(self.value)
+
+ def calculate_result_code(self):
+ strval = str(self.value)
+ if strval == 'nan':
+ return "(Py_HUGE_VAL * 0)"
+ elif strval == 'inf':
+ return "Py_HUGE_VAL"
+ elif strval == '-inf':
+ return "(-Py_HUGE_VAL)"
+ else:
+ return strval
class StringNode(ConstNode):
type = PyrexTypes.c_char_ptr_type
def compile_time_value(self, denv):
- return eval('"%s"' % self.value)
+ return self.value
def analyse_types(self, env):
self.entry = env.add_string_const(self.value)
def coerce_to(self, dst_type, env):
+ if dst_type.is_int:
+ if not self.type.is_pyobject and len(self.entry.init) == 1:
+ # we use the *encoded* value here
+ return CharNode(self.pos, value=self.entry.init)
+ else:
+ error(self.pos, "Only coerce single-character ascii strings can be used as ints.")
+ return self
# Arrange for a Python version of the string to be pre-allocated
# when coercing to a Python type.
if dst_type.is_pyobject and not self.type.is_pyobject:
return self.entry.cname
+class IdentifierStringNode(ConstNode):
+ # A Python string that behaves like an identifier, e.g. for
+ # keyword arguments in a call, or for imported names
+ type = PyrexTypes.py_object_type
+
+ def analyse_types(self, env):
+ self.cname = env.intern_identifier(self.value)
+
+ def calculate_result_code(self):
+ return self.cname
+
+
class LongNode(AtomicExprNode):
# Python long integer literal
#
try:
return denv.lookup(self.name)
except KeyError:
- error(self.pos, "Compile-time name '%s' not defined", self.name)
+ error(self.pos, "Compile-time name '%s' not defined" % self.name)
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
if entry and entry.is_cfunction:
var_entry = entry.as_variable
if var_entry:
+ if var_entry.is_builtin and Options.cache_builtins:
+ var_entry = env.declare_builtin(var_entry.name, self.pos)
node = NameNode(self.pos, name = self.name)
node.entry = var_entry
node.analyse_rvalue_entry(env)
self.entry = env.lookup_here(self.name)
if not self.entry:
self.entry = env.declare_var(self.name, py_object_type, self.pos)
+ env.control_flow.set_state(self.pos, (self.name, 'initalized'), True)
+ env.control_flow.set_state(self.pos, (self.name, 'source'), 'assignment')
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
+ if self.entry.is_pyglobal and self.entry.is_member:
+ env.use_utility_code(type_cache_invalidation_code)
def analyse_types(self, env):
self.entry = env.lookup(self.name)
self.is_temp = 0
else:
self.is_temp = 1
- if Options.intern_names:
- env.use_utility_code(get_name_interned_utility_code)
- else:
- env.use_utility_code(get_name_utility_code)
+ env.use_utility_code(get_name_interned_utility_code)
def analyse_entry(self, env):
#print "NameNode.analyse_entry:", self.name ###
self.type = type
if entry.is_pyglobal or entry.is_builtin:
assert type.is_pyobject, "Python global or builtin not a Python object"
- if Options.intern_names:
- self.interned_cname = self.entry.interned_cname = env.intern(self.entry.name)
+ self.interned_cname = self.entry.interned_cname = \
+ env.intern_identifier(self.entry.name)
def check_identifier_kind(self):
#print "NameNode.check_identifier_kind:", self.entry.name ###
def check_const(self):
entry = self.entry
- if not (entry.is_const or entry.is_cfunction or entry.is_builtin):
+ if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
self.not_const()
def check_const_addr(self):
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.namespace_cname
- if Options.intern_names:
- code.putln(
- '%s = __Pyx_GetName(%s, %s); %s' % (
- self.result_code,
- namespace,
- self.interned_cname,
- code.error_goto_if_null(self.result_code, self.pos)))
- else:
- code.putln(
- '%s = __Pyx_GetName(%s, "%s"); %s' % (
- self.result_code,
- namespace,
- self.entry.name,
- code.error_goto_if_null(self.result_code, self.pos)))
+ code.putln(
+ '%s = __Pyx_GetName(%s, %s); %s' % (
+ self.result_code,
+ namespace,
+ self.interned_cname,
+ code.error_goto_if_null(self.result_code, self.pos)))
+ elif entry.is_local and False:
+ # control flow not good enough yet
+ assigned = entry.scope.control_flow.get_state((entry.name, 'initalized'), self.pos)
+ if assigned is False:
+ error(self.pos, "local variable '%s' referenced before assignment" % entry.name)
+ elif not Options.init_local_none and assigned is None:
+ code.putln('if (%s == 0) { PyErr_SetString(PyExc_UnboundLocalError, "%s"); %s }' % (entry.cname, entry.name, code.error_goto(self.pos)))
+ entry.scope.control_flow.set_state(self.pos, (entry.name, 'initalized'), True)
def generate_assignment_code(self, rhs, code):
#print "NameNode.generate_assignment_code:", self.name ###
if entry.is_pyglobal:
namespace = self.entry.namespace_cname
if entry.is_member:
- # if we entry is a member we have to cheat: SetAttr does not work
+ # if the entry is a member we have to cheat: SetAttr does not work
# on types, so we create a descriptor which is then added to tp_dict
- if Options.intern_names:
- code.put_error_if_neg(self.pos,
- 'PyDict_SetItem(%s->tp_dict, %s, %s)' % (
- namespace,
- self.interned_cname,
- rhs.py_result()))
- else:
- code.put_error_if_neg(self.pos,
- 'PyDict_SetItemString(%s->tp_dict, %s, %s)' % (
- namespace,
- entry.name,
- rhs.py_result()))
-
+ code.put_error_if_neg(self.pos,
+ 'PyDict_SetItem(%s->tp_dict, %s, %s)' % (
+ namespace,
+ self.interned_cname,
+ rhs.py_result()))
+ # in Py2.6+, we need to invalidate the method cache
+ code.putln("__Pyx_TypeModified((PyTypeObject*)%s);" %
+ entry.scope.parent_type.typeptr_cname)
else:
- if Options.intern_names:
- code.put_error_if_neg(self.pos,
- 'PyObject_SetAttr(%s, %s, %s)' % (
- namespace,
- self.interned_cname,
- rhs.py_result()))
- else:
- code.put_error_if_neg(self.pos,
- 'PyObject_SetAttrString(%s, "%s", %s)' % (
- namespace,
- entry.name,
- rhs.py_result()))
+ code.put_error_if_neg(self.pos,
+ 'PyObject_SetAttr(%s, %s, %s)' % (
+ namespace,
+ self.interned_cname,
+ rhs.py_result()))
if debug_disposal_code:
- print "NameNode.generate_assignment_code:"
- print "...generating disposal code for", rhs
+ print("NameNode.generate_assignment_code:")
+ print("...generating disposal code for %s" % rhs)
rhs.generate_disposal_code(code)
else:
#print "...LHS type", self.type, "ctype", self.ctype() ###
#print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
rhs.make_owned_reference(code)
- code.put_decref(self.result_code, self.ctype())
+ if entry.is_local and not Options.init_local_none:
+ initalized = entry.scope.control_flow.get_state((entry.name, 'initalized'), self.pos)
+ if initalized is True:
+ code.put_decref(self.result_code, self.ctype())
+ elif initalized is None:
+ code.put_xdecref(self.result_code, self.ctype())
+ else:
+ code.put_decref(self.result_code, self.ctype())
code.putln('%s = %s;' % (self.result_code, rhs.result_as(self.ctype())))
if debug_disposal_code:
- print "NameNode.generate_assignment_code:"
- print "...generating post-assignment code for", rhs
+ print("NameNode.generate_assignment_code:")
+ print("...generating post-assignment code for %s" % rhs)
rhs.generate_post_assignment_code(code)
def generate_deletion_code(self, code):
'PyObject_DelAttrString(%s, "%s")' % (
Naming.module_cname,
self.entry.name))
+
+ def annotate(self, code):
+ if hasattr(self, 'is_called') and self.is_called:
+ pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
+ if self.type.is_pyobject:
+ code.annotate(pos, AnnotationItem('py_call', 'python function', size=len(self.name)))
+ else:
+ code.annotate(pos, AnnotationItem('c_call', 'c function', size=len(self.name)))
class BackquoteNode(ExprNode):
# Implements result =
# __import__(module_name, globals(), None, name_list)
#
- # module_name StringNode dotted name of module
- # name_list ListNode or None list of names to be imported
+ # module_name IdentifierStringNode dotted name of module
+ # name_list ListNode or None list of names to be imported
subexprs = ['module_name', 'name_list']
function = self.function
function.is_called = 1
self.function.analyse_types(env)
+ if function.is_attribute and function.is_py_attr and \
+ function.attribute == "append" and len(self.args) == 1:
+ # L.append(x) is almost always applied to a list
+ self.py_func = self.function
+ self.function = NameNode(pos=self.function.pos, name="__Pyx_PyObject_Append")
+ self.function.analyse_types(env)
+ self.self = self.py_func.obj
+ function.obj = CloneNode(self.self)
+ env.use_utility_code(append_utility_code)
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
function.obj = CloneNode(self.self)
func_type = self.function_type()
if func_type.is_pyobject:
- if self.args:
- self.arg_tuple = TupleNode(self.pos, args = self.args)
- self.arg_tuple.analyse_types(env)
- else:
- self.arg_tuple = None
+ self.arg_tuple = TupleNode(self.pos, args = self.args)
+ self.arg_tuple.analyse_types(env)
self.args = None
self.type = py_object_type
self.is_temp = 1
self.result_code = "<error>"
return
# Check no. of args
- expected_nargs = len(func_type.args)
+ max_nargs = len(func_type.args)
+ expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
if actual_nargs < expected_nargs \
- or (not func_type.has_varargs and actual_nargs > expected_nargs):
+ or (not func_type.has_varargs and actual_nargs > max_nargs):
expected_str = str(expected_nargs)
if func_type.has_varargs:
expected_str = "at least " + expected_str
+ elif func_type.optional_arg_count:
+ if actual_nargs < max_nargs:
+ expected_str = "at least " + expected_str
+ else:
+ expected_str = "at most " + str(max_nargs)
error(self.pos,
"Call with wrong number of arguments (expected %s, got %s)"
% (expected_str, actual_nargs))
self.result_code = "<error>"
return
# Coerce arguments
- for i in range(expected_nargs):
+ for i in range(min(max_nargs, actual_nargs)):
formal_type = func_type.args[i].type
self.args[i] = self.args[i].coerce_to(formal_type, env)
- for i in range(expected_nargs, actual_nargs):
+ for i in range(max_nargs, actual_nargs):
if self.args[i].type.is_pyobject:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
-
+ # C++ exception handler
+ if func_type.exception_check == '+':
+ if func_type.exception_value is None:
+ env.use_utility_code(cpp_exception_utility_code)
+
def calculate_result_code(self):
return self.c_call_code()
return "<error>"
formal_args = func_type.args
arg_list_code = []
- for (formal_arg, actual_arg) in \
- zip(formal_args, self.args):
+ args = zip(formal_args, self.args)
+ max_nargs = len(func_type.args)
+ expected_nargs = max_nargs - func_type.optional_arg_count
+ actual_nargs = len(self.args)
+ for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
+
+ if func_type.optional_arg_count:
+ if expected_nargs == actual_nargs:
+ optional_args = 'NULL'
+ else:
+ optional_arg_code = [str(actual_nargs - expected_nargs)]
+ for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
+ arg_code = actual_arg.result_as(formal_arg.type)
+ optional_arg_code.append(arg_code)
+# for formal_arg in formal_args[actual_nargs:max_nargs]:
+# optional_arg_code.append(formal_arg.type.cast_code('0'))
+ optional_arg_struct = '{%s}' % ','.join(optional_arg_code)
+ optional_args = PyrexTypes.c_void_ptr_type.cast_code(
+ '&' + func_type.op_arg_struct.base_type.cast_code(optional_arg_struct))
+ arg_list_code.append(optional_args)
+
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result_code)
result = "%s(%s)" % (self.function.result_code,
- join(arg_list_code, ","))
+ join(arg_list_code, ", "))
if self.wrapper_call or \
- self.function.entry.is_unbound_cmethod and self.function.entry.is_overridable:
+ self.function.entry.is_unbound_cmethod and self.function.entry.type.is_overridable:
result = "(%s = 1, %s)" % (Naming.skip_dispatch_cname, result)
return result
def generate_result_code(self, code):
func_type = self.function_type()
if func_type.is_pyobject:
- if self.arg_tuple:
- arg_code = self.arg_tuple.py_result()
- else:
- arg_code = "0"
+ arg_code = self.arg_tuple.py_result()
code.putln(
- "%s = PyObject_CallObject(%s, %s); %s" % (
+ "%s = PyObject_Call(%s, %s, NULL); %s" % (
self.result_code,
self.function.py_result(),
arg_code,
rhs = typecast(py_object_type, self.type, rhs)
else:
lhs = ""
+ if func_type.exception_check == '+':
+ if func_type.exception_value is None:
+ raise_py_exception = "__Pyx_CppExn2PyErr()"
+ elif func_type.exception_value.type.is_pyobject:
+ raise_py_exception = 'PyErr_SetString(%s, "")' % func_type.exception_value.entry.cname
+ else:
+ raise_py_exception = '%s(); if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError , "Error converting c++ exception.")' % func_type.exception_value.entry.cname
+ code.putln(
+ "try {%s%s;} catch(...) {%s; %s}" % (
+ lhs,
+ rhs,
+ raise_py_exception,
+ code.error_goto(self.pos)))
+ return
code.putln(
"%s%s; %s" % (
lhs,
else:
keyword_code = None
if not keyword_code:
- call_code = "PyObject_CallObject(%s, %s)" % (
+ call_code = "PyObject_Call(%s, %s, NULL)" % (
self.function.py_result(),
self.positional_args.py_result())
else:
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
- # interned_attr_cname string C name of interned attribute name
+ # interned_attr_cname string C name of interned attribute name
is_attribute = 1
subexprs = ['obj']
entry = None
is_called = 0
+ def coerce_to(self, dst_type, env):
+ # If coercing to a generic pyobject and this is a cpdef function
+ # we can create the corresponding attribute
+ if dst_type is py_object_type:
+ entry = self.entry
+ if entry and entry.is_cfunction and entry.as_variable:
+ # must be a cpdef function
+ self.is_temp = 1
+ self.entry = entry.as_variable
+ self.analyse_as_python_attribute(env)
+ return self
+ return ExprNode.coerce_to(self, dst_type, env)
+
def compile_time_value(self, denv):
attr = self.attribute
if attr.beginswith("__") and attr.endswith("__"):
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
ubcm_entry.is_unbound_cmethod = 1
- ubcm_entry.is_overridable = entry.is_overridable
self.mutate_into_name_node(env, ubcm_entry, None)
return 1
return 0
if self.obj.type.is_string:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
- if obj_type.is_ptr:
+ if obj_type.is_ptr or obj_type.is_array:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type:
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
+ self.analyse_as_python_attribute(env)
+
+ def analyse_as_python_attribute(self, env):
+ obj_type = self.obj.type
+ self.member = self.attribute
if obj_type.is_pyobject:
self.type = py_object_type
self.is_py_attr = 1
- if Options.intern_names:
- self.interned_attr_cname = env.intern(self.attribute)
+ self.interned_attr_cname = env.intern_identifier(self.attribute)
else:
if not obj_type.is_error:
error(self.pos,
obj_code = obj.result_as(obj.type)
#print "...obj_code =", obj_code ###
if self.entry and self.entry.is_cmethod:
- return "((struct %s *)%s%s%s)->%s" % (
- obj.type.vtabstruct_cname, obj_code, self.op,
- obj.type.vtabslot_cname, self.member)
+ if obj.type.is_extension_type:
+ return "((struct %s *)%s%s%s)->%s" % (
+ obj.type.vtabstruct_cname, obj_code, self.op,
+ obj.type.vtabslot_cname, self.member)
+ else:
+ return self.member
else:
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
- if Options.intern_names:
- code.putln(
- '%s = PyObject_GetAttr(%s, %s); %s' % (
- self.result_code,
- self.obj.py_result(),
- self.interned_attr_cname,
- code.error_goto_if_null(self.result_code, self.pos)))
- else:
- code.putln(
- '%s = PyObject_GetAttrString(%s, "%s"); %s' % (
- self.result_code,
- self.objpy_result(),
- self.attribute,
- code.error_goto_if_null(self.result_code, self.pos)))
+ code.putln(
+ '%s = PyObject_GetAttr(%s, %s); %s' % (
+ self.result_code,
+ self.obj.py_result(),
+ self.interned_attr_cname,
+ code.error_goto_if_null(self.result_code, self.pos)))
def generate_assignment_code(self, rhs, code):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
- if Options.intern_names:
- code.put_error_if_neg(self.pos,
- 'PyObject_SetAttr(%s, %s, %s)' % (
- self.obj.py_result(),
- self.interned_attr_cname,
- rhs.py_result()))
- else:
- code.put_error_if_neg(self.pos,
- 'PyObject_SetAttrString(%s, "%s", %s)' % (
- self.obj.py_result(),
- self.attribute,
- rhs.py_result()))
+ code.put_error_if_neg(self.pos,
+ 'PyObject_SetAttr(%s, %s, %s)' % (
+ self.obj.py_result(),
+ self.interned_attr_cname,
+ rhs.py_result()))
rhs.generate_disposal_code(code)
else:
select_code = self.result_code
def generate_deletion_code(self, code):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
- if Options.intern_names:
- code.put_error_if_neg(self.pos,
- 'PyObject_DelAttr(%s, %s)' % (
- self.obj.py_result(),
- self.interned_attr_cname))
- else:
- code.put_error_if_neg(self.pos,
- 'PyObject_DelAttrString(%s, "%s")' % (
- self.obj.py_result(),
- self.attribute))
+ code.put_error_if_neg(self.pos,
+ 'PyObject_DelAttr(%s, %s)' % (
+ self.obj.py_result(),
+ self.interned_attr_cname))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
+
+ def annotate(self, code):
+ if self.is_py_attr:
+ code.annotate(self.pos, AnnotationItem('py_attr', 'python attribute', size=len(self.attribute)))
+ else:
+ code.annotate(self.pos, AnnotationItem('c_attr', 'c attribute', size=len(self.attribute)))
#-------------------------------------------------------------------
#
rhs.py_result(),
rhs.py_result(),
len(self.args)))
+ code.putln("PyObject* tuple = %s;" % rhs.py_result())
for i in range(len(self.args)):
item = self.unpacked_items[i]
code.putln(
- "%s = PyTuple_GET_ITEM(%s, %s);" % (
+ "%s = PyTuple_GET_ITEM(tuple, %s);" % (
item.result_code,
- rhs.py_result(),
i))
code.put_incref(item.result_code, item.ctype())
value_node = self.coerced_unpacked_items[i]
rhs.generate_disposal_code(code)
for i in range(len(self.args)):
item = self.unpacked_items[i]
- unpack_code = "__Pyx_UnpackItem(%s)" % (
- self.iterator.py_result())
+ unpack_code = "__Pyx_UnpackItem(%s, %d)" % (
+ self.iterator.py_result(), i)
code.putln(
"%s = %s; %s" % (
item.result_code,
"__Pyx_EndUnpack(%s)" % (
self.iterator.py_result()))
if debug_disposal_code:
- print "UnpackNode.generate_assignment_code:"
- print "...generating disposal code for", iterator
+ print("UnpackNode.generate_assignment_code:")
+ print("...generating disposal code for %s" % self.iterator)
self.iterator.generate_disposal_code(code)
code.putln("}")
+
+ def annotate(self, code):
+ for arg in self.args:
+ arg.annotate(code)
+ if self.unpacked_items:
+ for arg in self.unpacked_items:
+ arg.annotate(code)
+ for arg in self.coerced_unpacked_items:
+ arg.annotate(code)
class TupleNode(SequenceNode):
# Tuple constructor.
+
+ def analyse_types(self, env):
+ if len(self.args) == 0:
+ self.is_temp = 0
+ self.is_literal = 1
+ else:
+ SequenceNode.analyse_types(self, env)
+ self.type = tuple_type
+
+ def calculate_result_code(self):
+ if len(self.args) > 0:
+ error(self.pos, "Positive length tuples must be constructed.")
+ else:
+ return Naming.empty_tuple
def compile_time_value(self, denv):
values = self.compile_time_value_list(denv)
self.compile_time_value_error(e)
def generate_operation_code(self, code):
+ if len(self.args) == 0:
+ # result_code is Naming.empty_tuple
+ return
code.putln(
"%s = PyTuple_New(%s); %s" % (
self.result_code,
class ListNode(SequenceNode):
# List constructor.
+ def analyse_types(self, env):
+ SequenceNode.analyse_types(self, env)
+ self.type = list_type
+
def compile_time_value(self, denv):
return self.compile_time_value_list(denv)
class ListComprehensionNode(SequenceNode):
subexprs = []
+ is_sequence_constructor = 0 # not unpackable
def analyse_types(self, env):
- self.type = py_object_type
+ self.type = list_type
self.is_temp = 1
self.append.target = self # this is a CloneNode used in the PyList_Append in the inner loop
def allocate_temps(self, env, result = None):
if debug_temp_alloc:
- print self, "Allocating temps"
+ print("%s Allocating temps" % self)
self.allocate_temp(env, result)
self.loop.analyse_declarations(env)
self.loop.analyse_expressions(env)
0,
code.error_goto_if_null(self.result_code, self.pos)))
self.loop.generate_execution_code(code)
+
+ def annotate(self, code):
+ self.loop.annotate(code)
class ListComprehensionAppendNode(ExprNode):
class DictNode(ExprNode):
# Dictionary constructor.
#
- # key_value_pairs [(ExprNode, ExprNode)]
+ # key_value_pairs [DictItemNode]
+
+ subexprs = ['key_value_pairs']
def compile_time_value(self, denv):
- pairs = [(key.compile_time_value(denv), value.compile_time_value(denv))
- for (key, value) in self.key_value_pairs]
+ pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
+ for item in self.key_value_pairs]
try:
return dict(pairs)
except Exception, e:
self.compile_time_value_error(e)
def analyse_types(self, env):
- new_pairs = []
- for key, value in self.key_value_pairs:
- key.analyse_types(env)
- value.analyse_types(env)
- key = key.coerce_to_pyobject(env)
- value = value.coerce_to_pyobject(env)
- new_pairs.append((key, value))
- self.key_value_pairs = new_pairs
- self.type = py_object_type
+ for item in self.key_value_pairs:
+ item.analyse_types(env)
+ self.type = dict_type
self.is_temp = 1
def allocate_temps(self, env, result = None):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
self.allocate_temp(env, result)
- for key, value in self.key_value_pairs:
- key.allocate_temps(env)
- value.allocate_temps(env)
- key.release_temp(env)
- value.release_temp(env)
+ for item in self.key_value_pairs:
+ item.key.allocate_temps(env)
+ item.value.allocate_temps(env)
+ item.key.release_temp(env)
+ item.value.release_temp(env)
def generate_evaluation_code(self, code):
# Custom method used here because key-value
"%s = PyDict_New(); %s" % (
self.result_code,
code.error_goto_if_null(self.result_code, self.pos)))
- for key, value in self.key_value_pairs:
- key.generate_evaluation_code(code)
- value.generate_evaluation_code(code)
+ for item in self.key_value_pairs:
+ item.generate_evaluation_code(code)
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result_code,
- key.py_result(),
- value.py_result()))
- key.generate_disposal_code(code)
- value.generate_disposal_code(code)
-
+ item.key.py_result(),
+ item.value.py_result()))
+ item.generate_disposal_code(code)
+
+ def annotate(self, code):
+ for item in self.key_value_pairs:
+ item.annotate(code)
+
+class DictItemNode(ExprNode):
+ # Represents a single item in a DictNode
+ #
+ # key ExprNode
+ # value ExprNode
+ subexprs = ['key', 'value']
+
+ def analyse_types(self, env):
+ self.key.analyse_types(env)
+ self.value.analyse_types(env)
+ self.key = self.key.coerce_to_pyobject(env)
+ self.value = self.value.coerce_to_pyobject(env)
+
+ def generate_evaluation_code(self, code):
+ self.key.generate_evaluation_code(code)
+ self.value.generate_evaluation_code(code)
+
+ def generate_disposal_code(self, code):
+ self.key.generate_disposal_code(code)
+ self.value.generate_disposal_code(code)
+
class ClassNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
- # name ExprNode Name of the class
+ # name EncodedString Name of the class
+ # cname string Class name as a Python string
# bases ExprNode Base class tuple
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name string Name of defining module
- subexprs = ['name', 'bases', 'doc']
-
+ subexprs = ['bases', 'doc']
+
def analyse_types(self, env):
- self.name.analyse_types(env)
- self.name = self.name.coerce_to_pyobject(env)
+ self.cname = env.intern_identifier(self.name)
self.bases.analyse_types(env)
if self.doc:
self.doc.analyse_types(env)
self.type = py_object_type
self.is_temp = 1
env.use_utility_code(create_class_utility_code);
-
+
def generate_result_code(self, code):
if self.doc:
code.put_error_if_neg(self.pos,
self.result_code,
self.bases.py_result(),
self.dict.py_result(),
- self.name.py_result(),
+ self.cname,
self.module_name,
code.error_goto_if_null(self.result_code, self.pos)))
# Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
- return IntNode(pos = operand.pos, value = -int(operand.value))
+ return IntNode(pos = operand.pos, value = str(-int(operand.value, 0)))
+ elif isinstance(operand, UnopNode) and operand.operator == operator:
+ warning(pos, "Python has no increment/decrement operator: %s%sx = %s(%sx) = x" % ((operator,)*4), 5)
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
def analyse_types(self, env):
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
- if self.type.is_cfunction:
- error(self.pos,
- "Cannot cast to a function type")
- self.type = PyrexTypes.error_type
self.operand.analyse_types(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
- if from_py and not to_py and self.operand.is_ephemeral():
- error(self.pos, "Casting temporary Python object to non-Python type")
+ if from_py and not to_py and self.operand.is_ephemeral() and not self.type.is_numeric:
+ error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
self.result_ctype = py_object_type
- self.is_temp = 1
+ self.is_temp = 1
+ if self.operand.type.to_py_function:
+ self.operand = self.operand.coerce_to_pyobject(env)
+ else:
+ warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type))
+ elif from_py and not to_py:
+ if self.type.from_py_function:
+ self.operand = self.operand.coerce_to(self.type, env)
+ else:
+ warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.type, self.operand.type))
+ elif from_py and to_py:
+ if self.typecheck and self.type.is_extension_type:
+ self.operand = PyTypeTestNode(self.operand, self.type, env)
def check_const(self):
self.operand.check_const()
#
#-------------------------------------------------------------------
+def _not_in(x, seq):
+ return x not in seq
+
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'-': operator.sub,
#'/': operator.truediv,
'^': operator.xor,
- 'in': lambda x, y: x in y,
- 'not_in': lambda x, y: x not in y,
+ 'in': operator.contains,
+ 'not_in': _not_in,
}
def get_compile_time_binop(node):
"/": "PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
- "**": "PyNumber_Power"
+ "**": "PyNumber_Power"
}
if (type1.is_extension_type or type2.is_extension_type) \
and not type1.same_as(type2):
common_type = py_object_type
+ elif type1.is_numeric:
+ common_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
common_type = type1
code1 = operand1.result_as(common_type)
# Instead, we override all the framework methods
# which use it.
+ child_attrs = ['operand1', 'operand2', 'cascade']
+
cascade = None
def compile_time_value(self, denv):
self.is_pycmp = self.is_python_comparison()
if self.is_pycmp:
self.coerce_operands_to_pyobjects(env)
+ if self.has_int_operands():
+ self.coerce_chars_to_ints(env)
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
-
+
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
+ def has_int_operands(self):
+ return (self.operand1.type.is_int or self.operand2.type.is_int) \
+ or (self.cascade and self.cascade.has_int_operands())
+
+ def coerce_chars_to_ints(self, env):
+ if self.operand1.type.is_string:
+ self.operand1 = self.operand1.coerce_to(PyrexTypes.c_uchar_type, env)
+ if self.operand2.type.is_string:
+ self.operand2 = self.operand2.coerce_to(PyrexTypes.c_uchar_type, env)
+ if self.cascade:
+ self.cascade.coerce_chars_to_ints(env)
+
def allocate_subexpr_temps(self, env):
self.operand1.allocate_temps(env)
self.operand2.allocate_temps(env)
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
+
+ def annotate(self, code):
+ self.operand1.annotate(code)
+ self.operand2.annotate(code)
+ if self.cascade:
+ self.cascade.annotate(code)
class CascadedCmpNode(Node, CmpNode):
# operand2 ExprNode
# cascade CascadedCmpNode
+ child_attrs = ['operand2', 'cascade']
+
cascade = None
def analyse_types(self, env, operand1):
def has_python_operands(self):
return self.operand2.type.is_pyobject
-
+
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
+ def has_int_operands(self):
+ return self.operand2.type.is_int
+
+ def coerce_chars_to_ints(self, env):
+ if self.operand2.type.is_string:
+ self.operand2 = self.operand2.coerce_to(PyrexTypes.c_uchar_type, env)
+
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2.generate_disposal_code(code)
code.putln("}")
+ def annotate(self, code):
+ self.operand2.annotate(code)
+ if self.cascade:
+ self.cascade.annotate(code)
+
binop_node_classes = {
"or": BoolBinopNode,
self.pos = arg.pos
self.arg = arg
if debug_coercion:
- print self, "Coercing", self.arg
+ print("%s Coercing %s" % (self, self.arg))
+
+ def annotate(self, code):
+ self.arg.annotate(code)
+ if self.arg.type != self.type:
+ file, line, col = self.pos
+ code.annotate((file, line, col-1), AnnotationItem(style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
class CastNode(CoercionNode):
def __init__(self, arg, dst_type, env):
# The arg is know to be a Python object, and
# the dst_type is known to be an extension type.
- assert dst_type.is_extension_type, "PyTypeTest on non extension type"
+ assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type"
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
env.use_utility_code(type_test_utility_code)
+ def analyse_types(self, env):
+ pass
+
def result_in_temp(self):
return self.arg.result_in_temp()
def generate_result_code(self, code):
if self.type.typeobj_is_available():
code.putln(
- "if (!__Pyx_TypeTest(%s, %s)) %s" % (
- self.arg.py_result(),
- self.type.typeptr_cname,
+ "if (!(%s)) %s" % (
+ self.type.type_test_code(self.arg.py_result()),
code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
code.putln('%s = %s; %s' % (
self.result_code,
rhs,
- code.error_goto_if(self.error_cond(), self.pos)))
-
- def error_cond(self):
- conds = []
- if self.type.is_string:
- conds.append("(!%s)" % self.result_code)
- elif self.type.exception_value is not None:
- conds.append("(%s == %s)" % (self.result_code, self.type.exception_value))
- if self.type.exception_check:
- conds.append("PyErr_Occurred()")
- if len(conds) > 0:
- return " && ".join(conds)
- else:
- return 0
+ code.error_goto_if(self.type.error_condition(self.result_code), self.pos)))
class CoerceToBooleanNode(CoercionNode):
#
#------------------------------------------------------------------------------------
-get_name_utility_code = [
-"""
-static PyObject *__Pyx_GetName(PyObject *dict, char *name); /*proto*/
-""","""
-static PyObject *__Pyx_GetName(PyObject *dict, char *name) {
- PyObject *result;
- result = PyObject_GetAttrString(dict, name);
- if (!result)
- PyErr_SetString(PyExc_NameError, name);
- return result;
-}
-"""]
-
get_name_interned_utility_code = [
"""
static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
unpacking_utility_code = [
"""
-static PyObject *__Pyx_UnpackItem(PyObject *); /*proto*/
+static PyObject *__Pyx_UnpackItem(PyObject *, Py_ssize_t index); /*proto*/
static int __Pyx_EndUnpack(PyObject *); /*proto*/
""","""
-static void __Pyx_UnpackError(void) {
- PyErr_SetString(PyExc_ValueError, "unpack sequence of wrong size");
-}
-
-static PyObject *__Pyx_UnpackItem(PyObject *iter) {
+static PyObject *__Pyx_UnpackItem(PyObject *iter, Py_ssize_t index) {
PyObject *item;
if (!(item = PyIter_Next(iter))) {
- if (!PyErr_Occurred())
- __Pyx_UnpackError();
+ if (!PyErr_Occurred()) {
+ PyErr_Format(PyExc_ValueError,
+ #if PY_VERSION_HEX < 0x02050000
+ "need more than %d values to unpack", (int)index);
+ #else
+ "need more than %zd values to unpack", index);
+ #endif
+ }
}
return item;
}
PyObject *item;
if ((item = PyIter_Next(iter))) {
Py_DECREF(item);
- __Pyx_UnpackError();
+ PyErr_SetString(PyExc_ValueError, "too many values to unpack");
return -1;
}
else if (!PyErr_Occurred())
if (obj == Py_None || PyObject_TypeCheck(obj, type))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %s to %s",
- obj->ob_type->tp_name, type->tp_name);
+ Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
"""]
{
PyObject *py_modname;
PyObject *result = 0;
-
+
+ #if PY_MAJOR_VERSION < 3
py_modname = PyString_FromString(modname);
+ #else
+ py_modname = PyUnicode_FromString(modname);
+ #endif
if (!py_modname)
goto bad;
if (PyDict_SetItemString(dict, "__module__", py_modname) < 0)
goto bad;
+ #if PY_MAJOR_VERSION < 3
result = PyClass_New(bases, dict, name);
+ #else
+ result = PyObject_CallFunctionObjArgs((PyObject *)&PyType_Type, name, bases, dict, NULL);
+ #endif
bad:
Py_XDECREF(py_modname);
return result;
"""]
#------------------------------------------------------------------------------------
+
+cpp_exception_utility_code = [
+"""
+#ifndef __Pyx_CppExn2PyErr
+static void __Pyx_CppExn2PyErr() {
+ try {
+ if (PyErr_Occurred())
+ ; // let the latest Python exn pass through and ignore the current one
+ else
+ throw;
+ } catch (const std::out_of_range& exn) {
+ // catch out_of_range explicitly so the proper Python exn may be raised
+ PyErr_SetString(PyExc_IndexError, exn.what());
+ } catch (const std::exception& exn) {
+ PyErr_SetString(PyExc_RuntimeError, exn.what());
+ }
+ catch (...)
+ {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown exception");
+ }
+}
+#endif
+""",""]
+
+#------------------------------------------------------------------------------------
+
+append_utility_code = [
+"""
+static INLINE PyObject* __Pyx_PyObject_Append(PyObject* L, PyObject* x) {
+ if (likely(PyList_CheckExact(L))) {
+ if (PyList_Append(L, x) < 0) return NULL;
+ Py_INCREF(Py_None);
+ return Py_None; // this is just to have an accurate signature
+ }
+ else {
+ return PyObject_CallMethod(L, "append", "(O)", x);
+ }
+}
+""",""
+]
+
+#------------------------------------------------------------------------------------
+
+type_cache_invalidation_code = [
+"""
+#if PY_VERSION_HEX >= 0x02060000
+static void __Pyx_TypeModified(PyTypeObject* type); /*proto*/
+#else
+ #define __Pyx_TypeModified(t)
+#endif
+""","""
+#if PY_VERSION_HEX >= 0x02060000
+/* copied from typeobject.c in Python 3.0a5 */
+static void __Pyx_TypeModified(PyTypeObject* type) {
+ PyObject *raw, *ref;
+ Py_ssize_t i, n;
+
+ if (!PyType_HasFeature(type, Py_TPFLAGS_VALID_VERSION_TAG))
+ return;
+
+ raw = type->tp_subclasses;
+ if (raw != NULL) {
+ n = PyList_GET_SIZE(raw);
+ for (i = 0; i < n; i++) {
+ ref = PyList_GET_ITEM(raw, i);
+ ref = PyWeakref_GET_OBJECT(ref);
+ if (ref != Py_None) {
+ __Pyx_TypeModified((PyTypeObject *)ref);
+ }
+ }
+ }
+ type->tp_flags &= ~Py_TPFLAGS_VALID_VERSION_TAG;
+}
+#endif
+"""
+]