import Nodes
from Nodes import Node
import PyrexTypes
-from PyrexTypes import py_object_type, c_long_type, typecast, error_type, unspecified_type
+from PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
+ unspecified_type
from Builtin import list_type, tuple_type, set_type, dict_type, \
unicode_type, str_type, bytes_type, type_type
import Builtin
import Symtab
import Options
from Annotate import AnnotationItem
+from Cython import Utils
from Cython.Debugging import print_call_chain
from DebugFlags import debug_disposal_code, debug_temp_alloc, \
# ConstantFolding transform will do this.
pass
+ def has_constant_result(self):
+ return self.constant_result is not constant_value_not_set and \
+ self.constant_result is not not_a_constant
+
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
# we ensure that all disposal has been done by the
# time we get the result.
self.analyse_types(env)
- bool = self.coerce_to_boolean(env)
- temp_bool = bool.coerce_to_temp(env)
- return temp_bool
-
+ return self.coerce_to_boolean(env).coerce_to_simple(env)
+
# --------------- Type Inference -----------------
def type_dependencies(self, env):
for sub in self.subexpr_nodes():
sub.free_temps(code)
+ def generate_function_definitions(self, env, code):
+ pass
+
# ---------------- Annotation ---------------------
def annotate(self, code):
if dst_type.is_pyobject:
if not src.type.is_pyobject:
- src = CoerceToPyTypeNode(src, env)
+ if dst_type is bytes_type and src.type.is_int:
+ src = CoerceIntToBytesNode(src, env)
+ else:
+ src = CoerceToPyTypeNode(src, env)
if not src.type.subtype_of(dst_type):
if not isinstance(src, NoneNode):
src = PyTypeTestNode(src, dst_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
+
+ # if it's constant, calculate the result now
+ if self.has_constant_result():
+ bool_value = bool(self.constant_result)
+ return BoolNode(self.pos, value=bool_value,
+ constant_result=bool_value)
+
type = self.type
if type.is_pyobject or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
else:
- if not type.is_int and not type.is_error:
+ if not (type.is_int or type.is_enum or type.is_error):
error(self.pos,
"Type '%s' not acceptable as a boolean" % type)
return self
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
-
+
+ def may_be_none(self):
+ return self.type.is_pyobject
+
def as_cython_attribute(self):
return None
+ def as_none_safe_node(self, message, error="PyExc_TypeError"):
+ # Wraps the node in a NoneCheckNode if it is not known to be
+ # not-None (e.g. because it is a Python literal).
+ if self.may_be_none():
+ return NoneCheckNode(self, error, message)
+ else:
+ return self
+
+
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
def is_simple(self):
return 1
-
+
+ def may_be_none(self):
+ return False
+
def analyse_types(self, env):
pass
def compile_time_value(self, denv):
return None
-
+
+ def may_be_none(self):
+ return True
+
+
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
def is_simple(self):
return 1
-
+
+ def may_be_none(self):
+ return False
+
def analyse_types(self, env):
pass # Types are held in class variables
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
- return str(self.value) + self.unsigned + self.longness
+ value = self.value
+ if isinstance(value, basestring) and len(value) > 2:
+ # must convert C-incompatible Py3 oct/bin notations
+ if value[1] in 'oO':
+ value = value[0] + value[2:] # '0o123' => '0123'
+ elif value[1] in 'bB':
+ value = int(value[2:], 2)
+ return str(value) + self.unsigned + self.longness
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
- self.constant_result = int(self.value, 0)
+ self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
- return int(self.value, 0)
+ return Utils.str_to_number(self.value)
class FloatNode(ConstNode):
return float(self.value)
def calculate_result_code(self):
- strval = repr(float(self.value))
- if strval == 'nan':
+ strval = self.value
+ assert isinstance(strval, (str, unicode))
+ cmpval = repr(float(strval))
+ if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
- elif strval == 'inf':
+ elif cmpval == 'inf':
return "Py_HUGE_VAL"
- elif strval == '-inf':
+ elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
def can_coerce_to_char_literal(self):
return len(self.value) == 1
+ def coerce_to_boolean(self, env):
+ # This is special because we start off as a C char*. Testing
+ # that for truth directly would yield the wrong result.
+ return BoolNode(self.pos, value=bool(self.value))
+
def coerce_to(self, dst_type, env):
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
- error(self.pos, "Only single-character strings can be coerced into ints.")
+ error(self.pos, "Only single-character string literals can be coerced into ints.")
+ return self
+ if dst_type is PyrexTypes.c_py_unicode_type:
+ error(self.pos, "Bytes literals cannot coerce to Py_UNICODE, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value)
def coerce_to(self, dst_type, env):
if dst_type is self.type:
pass
+ elif dst_type is PyrexTypes.c_py_unicode_type:
+ if not self.can_coerce_to_char_literal():
+ error(self.pos, "Only single-character Unicode string literals can be coerced into Py_UNICODE.")
+ return self
+ int_value = ord(self.value)
+ return IntNode(self.pos, value=int_value, constant_result=int_value)
elif not dst_type.is_pyobject:
- error(self.pos, "Unicode objects do not support coercion to C types.")
+ error(self.pos, "Unicode literals do not support coercion to C types other than Py_UNICODE.")
elif dst_type is not py_object_type:
if not self.check_for_coercion_error(dst_type):
self.fail_assignment(dst_type)
return self
+ def can_coerce_to_char_literal(self):
+ return len(self.value) == 1
+
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(self.value)
type = py_object_type
def calculate_constant_result(self):
- self.constant_result = long(self.value)
+ self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
- return long(self.value)
+ return Utils.str_to_number(self.value)
def analyse_types(self, env):
self.is_temp = 1
+ def may_be_none(self):
+ return False
+
gil_message = "Constructing Python long int"
def generate_result_code(self, code):
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
+ def may_be_none(self):
+ return False
+
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
# C++ new statement
#
- # cppclass string c++ class to create
- # template_parameters None or [ExprNode] temlate parameters, if any
+ # cppclass node c++ class to create
+
+ type = None
def infer_type(self, env):
- entry = env.lookup(self.cppclass)
- if entry is None or not entry.is_cpp_class:
+ type = self.cppclass.analyse_as_type(env)
+ if type is None or not type.is_cpp_class:
error(self.pos, "new operator can only be applied to a C++ class")
+ self.type = error_type
return
self.cpp_check(env)
- if self.template_parameters is not None:
- template_types = [v.analyse_as_type(env) for v in self.template_parameters]
- type = entry.type.specialize_here(self.pos, template_types)
- else:
- type = entry.type
constructor = type.scope.lookup(u'<init>')
if constructor is None:
return_type = PyrexTypes.CFuncType(type, [])
return self.type
def analyse_types(self, env):
- self.infer_type(env)
-
+ if self.type is None:
+ self.infer_type(env)
+
+ def may_be_none(self):
+ return False
+
def generate_result_code(self, code):
pass
else:
type = py_object_type
self.entry = env.declare_var(self.name, type, self.pos)
- env.control_flow.set_state(self.pos, (self.name, 'initalized'), True)
+ env.control_flow.set_state(self.pos, (self.name, 'initialized'), True)
env.control_flow.set_state(self.pos, (self.name, 'source'), 'assignment')
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
elif entry.is_local and False:
# control flow not good enough yet
- assigned = entry.scope.control_flow.get_state((entry.name, 'initalized'), self.pos)
+ assigned = entry.scope.control_flow.get_state((entry.name, 'initialized'), self.pos)
if assigned is False:
error(self.pos, "local variable '%s' referenced before assignment" % entry.name)
elif not Options.init_local_none and assigned is None:
code.putln('if (%s == 0) { PyErr_SetString(PyExc_UnboundLocalError, "%s"); %s }' %
(entry.cname, entry.name, code.error_goto(self.pos)))
- entry.scope.control_flow.set_state(self.pos, (entry.name, 'initalized'), True)
+ entry.scope.control_flow.set_state(self.pos, (entry.name, 'initialized'), True)
def generate_assignment_code(self, rhs, code):
#print "NameNode.generate_assignment_code:", self.name ###
rhs.make_owned_reference(code)
if entry.is_cglobal:
code.put_gotref(self.py_result())
- if self.use_managed_ref and not self.lhs_of_first_assignment:
- if entry.is_local and not Options.init_local_none:
- initalized = entry.scope.control_flow.get_state((entry.name, 'initalized'), self.pos)
- if initalized is True:
+ if not self.lhs_of_first_assignment:
+ if entry.is_local and not Options.init_local_none:
+ initialized = entry.scope.control_flow.get_state((entry.name, 'initialized'), self.pos)
+ if initialized is True:
+ code.put_decref(self.result(), self.ctype())
+ elif initialized is None:
+ code.put_xdecref(self.result(), self.ctype())
+ else:
code.put_decref(self.result(), self.ctype())
- elif initalized is None:
- code.put_xdecref(self.result(), self.ctype())
- else:
- code.put_decref(self.result(), self.ctype())
- if self.use_managed_ref:
if entry.is_cglobal:
code.put_giveref(rhs.py_result())
- code.putln('%s = %s;' % (self.result(), rhs.result_as(self.ctype())))
+
+ code.putln('%s = %s;' % (self.result(),
+ rhs.result_as(self.ctype())))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
return self.base.type_dependencies(env)
def infer_type(self, env):
- if isinstance(self.base, (StringNode, UnicodeNode)): # FIXME: BytesNode?
+ if isinstance(self.base, BytesNode):
return py_object_type
base_type = self.base.infer_type(env)
if base_type.is_ptr or base_type.is_array:
return base_type.base_type
+ elif base_type is unicode_type and self.index.infer_type(env).is_int:
+ # Py_UNICODE will automatically coerce to a unicode string
+ # if required, so this is safe. We only infer Py_UNICODE
+ # when the index is a C integer type. Otherwise, we may
+ # need to use normal Python item access, in which case
+ # it's faster to return the one-char unicode string than
+ # to receive it, throw it away, and potentially rebuild it
+ # on a subsequent PyObject coercion.
+ return PyrexTypes.c_py_unicode_type
+ elif base_type in (str_type, unicode_type):
+ # these types will always return themselves on Python indexing
+ return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
else:
self.base.entry.buffer_aux.writable_needed = True
else:
+ base_type = self.base.type
if isinstance(self.index, TupleNode):
self.index.analyse_types(env, skip_children=skip_child_analysis)
elif not skip_child_analysis:
self.index.analyse_types(env)
self.original_index_type = self.index.type
- if self.base.type.is_pyobject:
+ if base_type is PyrexTypes.c_py_unicode_type:
+ # we infer Py_UNICODE for unicode strings in some
+ # cases, but indexing must still work for them
+ if self.index.constant_result in (0, -1):
+ # FIXME: we know that this node is redundant -
+ # currently, this needs to get handled in Optimize.py
+ pass
+ self.base = self.base.coerce_to_pyobject(env)
+ base_type = self.base.type
+ if base_type.is_pyobject:
if self.index.type.is_int:
if (not setting
- and (self.base.type is list_type or self.base.type is tuple_type)
+ and (base_type in (list_type, tuple_type, unicode_type))
and (not self.index.type.signed or isinstance(self.index, IntNode) and int(self.index.value) >= 0)
and not env.directives['boundscheck']):
self.is_temp = 0
else:
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
- self.type = py_object_type
+ if self.index.type.is_int and base_type is unicode_type:
+ # Py_UNICODE will automatically coerce to a unicode string
+ # if required, so this is fast and safe
+ self.type = PyrexTypes.c_py_unicode_type
+ else:
+ self.type = py_object_type
else:
- if self.base.type.is_ptr or self.base.type.is_array:
- self.type = self.base.type.base_type
+ if base_type.is_ptr or base_type.is_array:
+ self.type = base_type.base_type
if self.index.type.is_pyobject:
self.index = self.index.coerce_to(
PyrexTypes.c_py_ssize_t_type, env)
error(self.pos,
"Invalid index type '%s'" %
self.index.type)
- elif self.base.type.is_cpp_class:
+ elif base_type.is_cpp_class:
function = env.lookup_operator("[]", [self.base, self.index])
- function = self.base.type.scope.lookup("operator[]")
if function is None:
- error(self.pos, "Indexing '%s' not supported for index type '%s'" % (self.base.type, self.index.type))
+ error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
self.index = self.index.coerce_to(func_type.args[0].type, env)
self.type = func_type.return_type
if setting and not func_type.return_type.is_reference:
- error(self.pos, "Can't set non-reference '%s'" % self.type)
+ error(self.pos, "Can't set non-reference result '%s'" % self.type)
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
- self.base.type)
+ base_type)
self.type = PyrexTypes.error_type
+
gil_message = "Indexing Python object"
def nogil_check(self, env):
return "PyList_GET_ITEM(%s, %s)" % (self.base.result(), self.index.result())
elif self.base.type is tuple_type:
return "PyTuple_GET_ITEM(%s, %s)" % (self.base.result(), self.index.result())
+ elif self.base.type is unicode_type and self.type is PyrexTypes.c_py_unicode_type:
+ return "PyUnicode_AS_UNICODE(%s)[%s]" % (self.base.result(), self.index.result())
else:
return "(%s[%s])" % (
self.base.result(), self.index.result())
# is_temp is True, so must pull out value and incref it.
code.putln("%s = *%s;" % (self.result(), self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.result())
- elif self.type.is_pyobject and self.is_temp:
- if self.index.type.is_int:
- index_code = self.index.result()
- if self.base.type is list_type:
- function = "__Pyx_GetItemInt_List"
- elif self.base.type is tuple_type:
- function = "__Pyx_GetItemInt_Tuple"
+ elif self.is_temp:
+ if self.type.is_pyobject:
+ if self.index.type.is_int:
+ index_code = self.index.result()
+ if self.base.type is list_type:
+ function = "__Pyx_GetItemInt_List"
+ elif self.base.type is tuple_type:
+ function = "__Pyx_GetItemInt_Tuple"
+ else:
+ function = "__Pyx_GetItemInt"
+ code.globalstate.use_utility_code(getitem_int_utility_code)
else:
- function = "__Pyx_GetItemInt"
- code.globalstate.use_utility_code(getitem_int_utility_code)
- else:
- function = "PyObject_GetItem"
- index_code = self.index.py_result()
- sign_code = ""
- code.putln(
- "%s = %s(%s, %s%s); if (!%s) %s" % (
- self.result(),
- function,
- self.base.py_result(),
- index_code,
- self.extra_index_params(),
- self.result(),
- code.error_goto(self.pos)))
- code.put_gotref(self.py_result())
+ index_code = self.index.py_result()
+ if self.base.type is dict_type:
+ function = "__Pyx_PyDict_GetItem"
+ code.globalstate.use_utility_code(getitem_dict_utility_code)
+ else:
+ function = "PyObject_GetItem"
+ code.putln(
+ "%s = %s(%s, %s%s); if (!%s) %s" % (
+ self.result(),
+ function,
+ self.base.py_result(),
+ index_code,
+ self.extra_index_params(),
+ self.result(),
+ code.error_goto(self.pos)))
+ code.put_gotref(self.py_result())
+ elif self.type is PyrexTypes.c_py_unicode_type and self.base.type is unicode_type:
+ assert self.index.type.is_int
+ index_code = self.index.result()
+ function = "__Pyx_GetItemInt_Unicode"
+ code.globalstate.use_utility_code(getitem_int_pyunicode_utility_code)
+ code.putln(
+ "%s = %s(%s, %s%s); if (unlikely(%s == (Py_UNICODE)-1)) %s;" % (
+ self.result(),
+ function,
+ self.base.py_result(),
+ index_code,
+ self.extra_index_params(),
+ self.result(),
+ code.error_goto(self.pos)))
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_SetItem"
- # It would seem that we could specalized lists/tuples, but that
+ # It would seem that we could specialized lists/tuples, but that
# shouldn't happen here.
# Both PyList_SetItem PyTuple_SetItem and a Py_ssize_t as input,
# not a PyObject*, and bad conversion here would give the wrong
if self.base.type.is_string:
if self.stop is None:
code.putln(
- "%s = __Pyx_PyBytes_FromString(%s + %s); %s" % (
+ "%s = PyBytes_FromString(%s + %s); %s" % (
self.result(),
self.base.result(),
self.start_code(),
code.error_goto_if_null(self.result(), self.pos)))
else:
code.putln(
- "%s = __Pyx_PyBytes_FromStringAndSize(%s + %s, %s - %s); %s" % (
+ "%s = PyBytes_FromStringAndSize(%s + %s, %s - %s); %s" % (
self.result(),
self.base.result(),
self.start_code(),
self.base.py_result(),
self.start_code(),
self.stop_code(),
- rhs.result()))
+ rhs.py_result()))
else:
start_offset = ''
if self.start:
self.analyse_types(env)
self.coerce_to(type, env)
return True
+ elif type and type.is_cpp_class:
+ for arg in self.args:
+ arg.analyse_types(env)
+ constructor = type.scope.lookup("<init>")
+ self.function = RawCNameExprNode(self.function.pos, constructor.type)
+ self.function.entry = constructor
+ self.function.set_cname(type.declaration_code(""))
+ self.analyse_c_function_call(env)
+ return True
+
+ def is_lvalue(self):
+ return self.type.is_reference
def nogil_check(self, env):
func_type = self.function_type()
return func_type
def analyse_c_function_call(self, env):
+ if self.function.type is error_type:
+ self.type = error_type
+ return
if self.function.type.is_cpp_class:
- function = self.function.type.scope.lookup("operator()")
- if function is None:
+ overloaded_entry = self.function.type.scope.lookup("operator()")
+ if overloaded_entry is None:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
+ elif hasattr(self.function, 'entry'):
+ overloaded_entry = self.function.entry
else:
- function = self.function.entry
- entry = PyrexTypes.best_match(self.args, function.all_alternatives(), self.pos)
- if not entry:
- self.type = PyrexTypes.error_type
- self.result_code = "<error>"
- return
- self.function.entry = entry
- self.function.type = entry.type
- func_type = self.function_type()
+ overloaded_entry = None
+ if overloaded_entry:
+ entry = PyrexTypes.best_match(self.args, overloaded_entry.all_alternatives(), self.pos)
+ if not entry:
+ self.type = PyrexTypes.error_type
+ self.result_code = "<error>"
+ return
+ self.function.entry = entry
+ self.function.type = entry.type
+ func_type = self.function_type()
+ else:
+ func_type = self.function_type()
+ if not func_type.is_cfunction:
+ error(self.pos, "Calling non-function type '%s'" % func_type)
+ self.type = PyrexTypes.error_type
+ self.result_code = "<error>"
+ return
# Check no. of args
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
self.type = tuple_type
self.is_temp = 1
+ def may_be_none(self):
+ return False
+
nogil_check = Node.gil_error
gil_message = "Constructing Python tuple"
module_scope = self.obj.analyse_as_module(env)
if module_scope:
return module_scope.lookup_type(self.attribute)
+ if not isinstance(self.obj, (UnicodeNode, StringNode, BytesNode)):
+ base_type = self.obj.analyse_as_type(env)
+ if base_type and hasattr(base_type, 'scope'):
+ return base_type.scope.lookup_type(self.attribute)
return None
def analyse_as_extension_type(self, env):
self.type = py_object_type
self.is_temp = 1
+ def may_be_none(self):
+ return False
+
def analyse_target_types(self, env):
self.iterator = PyTempNode(self.pos, env)
self.unpacked_items = []
# allocates the temps in a rather hacky way -- the assignment
# is evaluated twice, within each if-block.
- code.globalstate.use_utility_code(unpacking_utility_code)
-
if rhs.type is tuple_type:
tuple_check = "likely(%s != Py_None)"
else:
rhs.py_result(), len(self.args)))
code.putln(code.error_goto(self.pos))
else:
+ code.globalstate.use_utility_code(unpacking_utility_code)
+
self.iterator.allocate(code)
code.putln(
"%s = PyObject_GetIter(%s); %s" % (
# generate_evaluation_code which will do that.
-class ComprehensionNode(ExprNode):
+class ScopedExprNode(ExprNode):
+ # Abstract base class for ExprNodes that have their own local
+ # scope, such as generator expressions.
+ #
+ # expr_scope Scope the inner scope of the expression
+
+ subexprs = []
+ expr_scope = None
+
+ def analyse_types(self, env):
+ # nothing to do here, the children will be analysed separately
+ pass
+
+ def analyse_expressions(self, env):
+ # nothing to do here, the children will be analysed separately
+ pass
+
+ def analyse_scoped_expressions(self, env):
+ # this is called with the expr_scope as env
+ pass
+
+ def init_scope(self, outer_scope, expr_scope=None):
+ self.expr_scope = expr_scope
+
+
+class ComprehensionNode(ScopedExprNode):
subexprs = ["target"]
child_attrs = ["loop", "append"]
+ # different behaviour in Py2 and Py3: leak loop variables or not?
+ has_local_scope = False # Py2 behaviour as default
+
def infer_type(self, env):
return self.target.infer_type(env)
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
- self.loop.analyse_declarations(env)
+ self.init_scope(env)
+ if self.expr_scope is not None:
+ self.loop.analyse_declarations(self.expr_scope)
+ else:
+ self.loop.analyse_declarations(env)
+
+ def init_scope(self, outer_scope, expr_scope=None):
+ if expr_scope is not None:
+ self.expr_scope = expr_scope
+ elif self.has_local_scope:
+ self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
+ else:
+ self.expr_scope = None
def analyse_types(self, env):
self.target.analyse_expressions(env)
self.type = self.target.type
- self.loop.analyse_expressions(env)
+ if not self.has_local_scope:
+ self.loop.analyse_expressions(env)
+
+ def analyse_scoped_expressions(self, env):
+ if self.has_local_scope:
+ self.loop.analyse_expressions(env)
+
+ def may_be_none(self):
+ return False
def calculate_result_code(self):
return self.target.result()
self.loop.annotate(code)
-class ComprehensionAppendNode(ExprNode):
+class ComprehensionAppendNode(Node):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
- subexprs = ['expr']
+
+ child_attrs = ['expr']
type = PyrexTypes.c_int_type
- def analyse_types(self, env):
- self.expr.analyse_types(env)
+ def analyse_expressions(self, env):
+ self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
self.expr = self.expr.coerce_to_pyobject(env)
- self.is_temp = 1
- def generate_result_code(self, code):
+ def generate_execution_code(self, code):
if self.target.type is list_type:
function = "PyList_Append"
elif self.target.type is set_type:
else:
raise InternalError(
"Invalid type for comprehension node: %s" % self.target.type)
-
- code.putln("%s = %s(%s, (PyObject*)%s); %s" %
- (self.result(),
- function,
- self.target.result(),
- self.expr.result(),
- code.error_goto_if(self.result(), self.pos)))
+
+ self.expr.generate_evaluation_code(code)
+ code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
+ function,
+ self.target.result(),
+ self.expr.result()
+ ), self.pos))
+ self.expr.generate_disposal_code(code)
+ self.expr.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ self.expr.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.expr.annotate(code)
class DictComprehensionAppendNode(ComprehensionAppendNode):
- subexprs = ['key_expr', 'value_expr']
+ child_attrs = ['key_expr', 'value_expr']
- def analyse_types(self, env):
- self.key_expr.analyse_types(env)
+ def analyse_expressions(self, env):
+ self.key_expr.analyse_expressions(env)
if not self.key_expr.type.is_pyobject:
self.key_expr = self.key_expr.coerce_to_pyobject(env)
- self.value_expr.analyse_types(env)
+ self.value_expr.analyse_expressions(env)
if not self.value_expr.type.is_pyobject:
self.value_expr = self.value_expr.coerce_to_pyobject(env)
- self.is_temp = 1
+
+ def generate_execution_code(self, code):
+ self.key_expr.generate_evaluation_code(code)
+ self.value_expr.generate_evaluation_code(code)
+ code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
+ self.target.result(),
+ self.key_expr.result(),
+ self.value_expr.result()
+ ), self.pos))
+ self.key_expr.generate_disposal_code(code)
+ self.key_expr.free_temps(code)
+ self.value_expr.generate_disposal_code(code)
+ self.value_expr.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ self.key_expr.generate_function_definitions(env, code)
+ self.value_expr.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.key_expr.annotate(code)
+ self.value_expr.annotate(code)
+
+
+class GeneratorExpressionNode(ScopedExprNode):
+ # A generator expression, e.g. (i for i in range(10))
+ #
+ # Result is a generator.
+ #
+ # loop ForStatNode the for-loop, containing a YieldExprNode
+
+ child_attrs = ["loop"]
+
+ type = py_object_type
+
+ def analyse_declarations(self, env):
+ self.init_scope(env)
+ self.loop.analyse_declarations(self.expr_scope)
+
+ def init_scope(self, outer_scope, expr_scope=None):
+ if expr_scope is not None:
+ self.expr_scope = expr_scope
+ else:
+ self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
+
+ def analyse_types(self, env):
+ self.is_temp = True
+
+ def analyse_scoped_expressions(self, env):
+ self.loop.analyse_expressions(env)
+
+ def may_be_none(self):
+ return False
+
+ def annotate(self, code):
+ self.loop.annotate(code)
+
+
+class InlinedGeneratorExpressionNode(GeneratorExpressionNode):
+ # An inlined generator expression for which the result is
+ # calculated inside of the loop. This will only be created by
+ # transforms when replacing builtin calls on generator
+ # expressions.
+ #
+ # loop ForStatNode the for-loop, not containing any YieldExprNodes
+ # result_node ResultRefNode the reference to the result value temp
+ # orig_func String the name of the builtin function this node replaces
+
+ child_attrs = ["loop"]
+
+ def analyse_types(self, env):
+ self.type = self.result_node.type
+ self.is_temp = True
+
+ def coerce_to(self, dst_type, env):
+ if self.orig_func == 'sum' and dst_type.is_numeric:
+ # we can optimise by dropping the aggregation variable into C
+ self.result_node.type = self.type = dst_type
+ return self
+ return GeneratorExpressionNode.coerce_to(self, dst_type, env)
def generate_result_code(self, code):
- code.putln("%s = PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s); %s" %
- (self.result(),
- self.target.result(),
- self.key_expr.result(),
- self.value_expr.result(),
- code.error_goto_if(self.result(), self.pos)))
+ self.result_node.result_code = self.result()
+ self.loop.generate_execution_code(code)
class SetNode(ExprNode):
self.type = set_type
self.is_temp = 1
+ def may_be_none(self):
+ return False
+
def calculate_constant_result(self):
self.constant_result = set([
arg.constant_result for arg in self.args])
item.analyse_types(env)
self.obj_conversion_errors = held_errors()
release_errors(ignore=True)
+
+ def may_be_none(self):
+ return False
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.is_temp = 1
env.use_utility_code(create_class_utility_code);
+ def may_be_none(self):
+ return False
+
gil_message = "Constructing Python class"
def generate_result_code(self, code):
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
+class BoundMethodNode(ExprNode):
+ # Helper class used in the implementation of Python
+ # class definitions. Constructs an bound method
+ # object from a class and a function.
+ #
+ # function ExprNode Function object
+ # self_object ExprNode self object
+
+ subexprs = ['function']
+
+ def analyse_types(self, env):
+ self.function.analyse_types(env)
+ self.type = py_object_type
+ self.is_temp = 1
+
+ gil_message = "Constructing an bound method"
+
+ def generate_result_code(self, code):
+ code.putln(
+ "%s = PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % (
+ self.result(),
+ self.function.py_result(),
+ self.self_object.py_result(),
+ self.self_object.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
class UnboundMethodNode(ExprNode):
# Helper class used in the implementation of Python
def analyse_types(self, env):
self.function.analyse_types(env)
+ def may_be_none(self):
+ return False
+
gil_message = "Constructing an unbound method"
def generate_result_code(self, code):
code.put_gotref(self.py_result())
-class PyCFunctionNode(AtomicExprNode):
+class PyCFunctionNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
+ # self_object ExprNode or None
+ # binding bool
+
+ subexprs = []
+ self_object = None
+ binding = False
type = py_object_type
is_temp = 1
def analyse_types(self, env):
- pass
+ if self.binding:
+ env.use_utility_code(binding_cfunc_utility_code)
+
+ def may_be_none(self):
+ return False
gil_message = "Constructing Python function"
+ def self_result_code(self):
+ if self.self_object is None:
+ self_result = "NULL"
+ else:
+ self_result = self.self_object.py_result()
+ return self_result
+
def generate_result_code(self, code):
+ if self.binding:
+ constructor = "%s_New" % Naming.binding_cfunc
+ else:
+ constructor = "PyCFunction_New"
code.putln(
- "%s = PyCFunction_New(&%s, 0); %s" % (
+ "%s = %s(&%s, %s); %s" % (
self.result(),
+ constructor,
self.pymethdef_cname,
+ self.self_result_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
+class InnerFunctionNode(PyCFunctionNode):
+ # Special PyCFunctionNode that depends on a closure class
+ #
+ binding = True
+
+ def self_result_code(self):
+ return "((PyObject*)%s)" % Naming.cur_scope_cname
+
+class LambdaNode(InnerFunctionNode):
+ # Lambda expression node (only used as a function reference)
+ #
+ # args [CArgDeclNode] formal arguments
+ # star_arg PyArgDeclNode or None * argument
+ # starstar_arg PyArgDeclNode or None ** argument
+ # lambda_name string a module-globally unique lambda name
+ # result_expr ExprNode
+ # def_node DefNode the underlying function 'def' node
+
+ child_attrs = ['def_node']
+
+ def_node = None
+ name = StringEncoding.EncodedString('<lambda>')
+
+ def analyse_declarations(self, env):
+ #self.def_node.needs_closure = self.needs_closure
+ self.def_node.analyse_declarations(env)
+ self.pymethdef_cname = self.def_node.entry.pymethdef_cname
+ env.add_lambda_def(self.def_node)
+
+class YieldExprNode(ExprNode):
+ # Yield expression node
+ #
+ # arg ExprNode the value to return from the generator
+ # label_name string name of the C label used for this yield
+
+ subexprs = []
+ type = py_object_type
+
+ def analyse_types(self, env):
+ self.is_temp = 1
+ if self.arg is not None:
+ self.arg.analyse_types(env)
+ if not self.arg.type.is_pyobject:
+ self.arg = self.arg.coerce_to_pyobject(env)
+ error(self.pos, "Generators are not supported")
+
+ def generate_result_code(self, code):
+ self.label_name = code.new_label('resume_from_yield')
+ code.use_label(self.label_name)
+ code.putln("/* FIXME: save temporary variables */")
+ code.putln("/* FIXME: return from function, yielding value */")
+ code.put_label(self.label_name)
+ code.putln("/* FIXME: restore temporary variables and */")
+ code.putln("/* FIXME: extract sent value from closure */")
+
+
#-------------------------------------------------------------------
#
# Unary operator nodes
def is_cpp_operation(self):
type = self.operand.type
- return type.is_cpp_class or type.is_reference and type.base_type.is_cpp_class
+ return type.is_cpp_class
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def analyse_cpp_operation(self, env):
type = self.operand.type
- if type.is_ptr or type.is_reference:
+ if type.is_ptr:
type = type.base_type
- entry = env.lookup(type.name)
- function = entry.type.scope.lookup("operator%s" % self.operator)
+ function = type.scope.lookup("operator%s" % self.operator)
if not function:
error(self.pos, "'%s' operator not defined for %s"
% (self.operator, type))
if from_py and not to_py and self.operand.is_ephemeral() and not self.type.is_numeric:
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
- if self.operand.type.can_coerce_to_pyobject(env):
+ if self.type is bytes_type and self.operand.type.is_int:
+ # FIXME: the type cast node isn't needed in this case
+ # and can be dropped once analyse_types() can return a
+ # different node
+ self.operand = CoerceIntToBytesNode(self.operand, env)
+ elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
self.operand = self.operand.coerce_to_pyobject(env)
else:
self.pos, value=StringEncoding.EncodedString(str(self.operand.type)))
self.literal.analyse_types(env)
self.literal = self.literal.coerce_to_pyobject(env)
-
+
+ def may_be_none(self):
+ return False
+
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
self.operand2.analyse_types(env)
if self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
- self.type = py_object_type
+ self.type = self.result_type(self.operand1.type,
+ self.operand2.type)
+ assert self.type.is_pyobject
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
return type1.is_pyobject or type2.is_pyobject
def is_cpp_operation(self):
- type1 = self.operand1.type
- type2 = self.operand2.type
- if type1.is_reference:
- type1 = type1.base_type
- if type2.is_reference:
- type2 = type2.base_type
- return (type1.is_cpp_class
- or type2.is_cpp_class)
+ return (self.operand1.type.is_cpp_class
+ or self.operand2.type.is_cpp_class)
def analyse_cpp_operation(self, env):
type1 = self.operand1.type
return type1
# multiplication of containers/numbers with an
# integer value always (?) returns the same type
- if type1.is_int:
- return type2
- elif type2.is_int:
+ if type2.is_int:
return type1
+ elif type2.is_builtin_type and type1.is_int and self.operator == '*':
+ # multiplication of containers/numbers with an
+ # integer value always (?) returns the same type
+ return type2
return py_object_type
else:
return self.compute_c_result_type(type1, type2)
self.operand1.result(),
self.operand2.result())
+ def is_py_operation_types(self, type1, type2):
+ return (type1 is PyrexTypes.c_py_unicode_type or
+ type2 is PyrexTypes.c_py_unicode_type or
+ BinopNode.is_py_operation_types(self, type1, type2))
+
def py_operation_function(self):
return self.py_functions[self.operator]
if not self.type.is_pyobject:
self.zerodivision_check = (
self.cdivision is None and not env.directives['cdivision']
- and (self.operand2.constant_result is not_a_constant or
+ and (not self.operand2.has_constant_result() or
self.operand2.constant_result == 0))
if self.zerodivision_check or env.directives['cdivision_warnings']:
# Need to check ahead of time to warn or raise zero division error
return "(%s / %s)" % (op1, op2)
else:
return "__Pyx_div_%s(%s, %s)" % (
- self.type.specalization_name(),
+ self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
- self.type.specalization_name(),
+ self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
- return PyrexTypes.spanning_type(type1, type2)
+ return PyrexTypes.independent_spanning_type(type1, type2)
def calculate_constant_result(self):
if self.operator == 'and':
def analyse_types(self, env):
self.operand1.analyse_types(env)
self.operand2.analyse_types(env)
- self.type = PyrexTypes.spanning_type(self.operand1.type, self.operand2.type)
+ self.type = PyrexTypes.independent_spanning_type(self.operand1.type, self.operand2.type)
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
- return self.compute_result_type(self.true_val.infer_type(env),
- self.false_val.infer_type(env))
+ return PyrexTypes.independent_spanning_type(self.true_val.infer_type(env),
+ self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.test = self.test.coerce_to_boolean(env)
self.true_val.analyse_types(env)
self.false_val.analyse_types(env)
- self.type = self.compute_result_type(self.true_val.type, self.false_val.type)
+ self.type = PyrexTypes.independent_spanning_type(self.true_val.type, self.false_val.type)
if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject:
self.true_val = self.true_val.coerce_to(self.type, env)
self.false_val = self.false_val.coerce_to(self.type, env)
if self.type == PyrexTypes.error_type:
self.type_error()
- def compute_result_type(self, type1, type2):
- if type1 == type2:
- return type1
- elif type1.is_numeric and type2.is_numeric:
- return PyrexTypes.widest_numeric_type(type1, type2)
- elif type1.is_extension_type and type1.subtype_of_resolved_type(type2):
- return type2
- elif type2.is_extension_type and type2.subtype_of_resolved_type(type1):
- return type1
- elif type1.is_pyobject or type2.is_pyobject:
- return py_object_type
- elif type1.assignable_from(type2):
- return type1
- elif type2.assignable_from(type1):
- return type2
- else:
- return PyrexTypes.error_type
-
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatable types in conditional expression (%s; %s)" %
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
- def infer_types(self, env):
+ def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
func = compile_time_binary_operators[self.operator]
operand2_result = self.operand2.constant_result
result = func(operand1_result, operand2_result)
- if result and self.cascade:
- result = result and \
- self.cascade.cascaded_compile_time_value(operand2_result)
- self.constant_result = result
-
+ if self.cascade:
+ self.cascade.calculate_cascaded_constant_result(operand2_result)
+ if self.cascade.constant_result:
+ self.constant_result = result and self.cascade.constant_result
+ else:
+ self.constant_result = result
+
def cascaded_compile_time_value(self, operand1, denv):
func = get_compile_time_binop(self)
operand2 = self.operand2.compile_time_value(denv)
cascade = self.cascade
if cascade:
# FIXME: I bet this must call cascaded_compile_time_value()
- result = result and cascade.compile_time_value(operand2, denv)
+ result = result and cascade.cascaded_compile_time_value(operand2, denv)
return result
def is_cpp_comparison(self):
- type1 = self.operand1.type
- type2 = self.operand2.type
- if type1.is_reference:
- type1 = type1.base_type
- if type2.is_reference:
- type2 = type2.base_type
- return type1.is_cpp_class or type2.is_cpp_class
+ return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
def find_common_int_type(self, env, op, operand1, operand2):
# type1 != type2 and at least one of the types is not a C int
type1_can_be_int = False
type2_can_be_int = False
- if isinstance(operand1, (StringNode, BytesNode)) \
+ if isinstance(operand1, (StringNode, BytesNode, UnicodeNode)) \
and operand1.can_coerce_to_char_literal():
type1_can_be_int = True
- if isinstance(operand2, (StringNode, BytesNode)) \
+ if isinstance(operand2, (StringNode, BytesNode, UnicodeNode)) \
and operand2.can_coerce_to_char_literal():
type2_can_be_int = True
(op, operand1.type, operand2.type))
def is_python_comparison(self):
- return (self.has_python_operands()
- or (self.cascade and self.cascade.is_python_comparison())
- or self.operator in ('in', 'not_in'))
+ return not self.is_c_string_contains() and (
+ self.has_python_operands()
+ or (self.cascade and self.cascade.is_python_comparison())
+ or self.operator in ('in', 'not_in'))
def coerce_operands_to(self, dst_type, env):
operand2 = self.operand2
def is_python_result(self):
return ((self.has_python_operands() and
- self.operator not in ('is', 'is_not', 'in', 'not_in'))
+ self.operator not in ('is', 'is_not', 'in', 'not_in') and
+ not self.is_c_string_contains())
or (self.cascade and self.cascade.is_python_result()))
+ def is_c_string_contains(self):
+ return self.operator in ('in', 'not_in') and \
+ ((self.operand1.type.is_int
+ and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
+ (self.operand1.type is PyrexTypes.c_py_unicode_type
+ and self.operand2.type is unicode_type))
+
def generate_operation_code(self, code, result_code,
operand1, op , operand2):
- if self.type is PyrexTypes.py_object_type:
+ if self.type.is_pyobject:
coerce_result = "__Pyx_PyBool_FromLong"
else:
coerce_result = ""
else:
negation = ""
if op == 'in' or op == 'not_in':
- code.globalstate.use_utility_code(contians_utility_code)
- if self.type is PyrexTypes.py_object_type:
+ code.globalstate.use_utility_code(contains_utility_code)
+ if self.type.is_pyobject:
coerce_result = "__Pyx_PyBoolOrNull_FromLong"
if op == 'not_in':
negation = "__Pyx_NegateNonNeg"
method = "PyDict_Contains"
else:
method = "PySequence_Contains"
- if self.type is PyrexTypes.py_object_type:
+ if self.type.is_pyobject:
error_clause = code.error_goto_if_null
got_ref = "__Pyx_XGOTREF(%s); " % result_code
else:
else:
return op
-contians_utility_code = UtilityCode(
+contains_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE long __Pyx_NegateNonNeg(long b) { return unlikely(b < 0) ? b : !b; }
static CYTHON_INLINE PyObject* __Pyx_PyBoolOrNull_FromLong(long b) {
}
""")
+char_in_bytes_utility_code = UtilityCode(
+proto="""
+static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character); /*proto*/
+""",
+impl="""
+static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character) {
+ const Py_ssize_t length = PyBytes_GET_SIZE(bytes);
+ char* char_start = PyBytes_AS_STRING(bytes);
+ char* pos;
+ for (pos=char_start; pos < char_start+length; pos++) {
+ if (character == pos[0]) return 1;
+ }
+ return 0;
+}
+""")
+
+pyunicode_in_unicode_utility_code = UtilityCode(
+proto="""
+static CYTHON_INLINE int __Pyx_UnicodeContains(PyObject* unicode, Py_UNICODE character); /*proto*/
+""",
+impl="""
+static CYTHON_INLINE int __Pyx_UnicodeContains(PyObject* unicode, Py_UNICODE character) {
+ const Py_ssize_t length = PyUnicode_GET_SIZE(unicode);
+ Py_UNICODE* char_start = PyUnicode_AS_UNICODE(unicode);
+ Py_UNICODE* pos;
+ for (pos=char_start; pos < char_start+length; pos++) {
+ if (character == pos[0]) return 1;
+ }
+ return 0;
+}
+""")
+
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
return ()
def calculate_constant_result(self):
- self.constant_result = self.calculate_cascaded_constant_result(
- self.operand1.constant_result)
+ self.calculate_cascaded_constant_result(self.operand1.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
self.cascade.analyse_types(env)
if self.operator in ('in', 'not_in'):
- common_type = py_object_type
- self.is_pycmp = True
+ if self.is_c_string_contains():
+ self.is_pycmp = False
+ common_type = None
+ if self.cascade:
+ error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
+ return
+ if self.operand2.type is unicode_type:
+ env.use_utility_code(pyunicode_in_unicode_utility_code)
+ else:
+ if self.operand1.type is PyrexTypes.c_uchar_type:
+ self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
+ if self.operand2.type is not bytes_type:
+ self.operand2 = self.operand2.coerce_to(bytes_type, env)
+ env.use_utility_code(char_in_bytes_utility_code)
+ self.operand2 = self.operand2.as_none_safe_node(
+ "argument of type 'NoneType' is not iterable")
+ else:
+ common_type = py_object_type
+ self.is_pycmp = True
else:
common_type = self.find_common_type(env, self.operator, self.operand1)
self.is_pycmp = common_type.is_pyobject
- if not common_type.is_error:
+ if common_type is not None and not common_type.is_error:
if self.operand1.type != common_type:
self.operand1 = self.operand1.coerce_to(common_type, env)
self.coerce_operands_to(common_type, env)
self.operand1.type.binary_op('=='),
self.operand1.result(),
self.operand2.result())
+ elif self.is_c_string_contains():
+ if self.operand2.type is bytes_type:
+ method = "__Pyx_BytesContains"
+ else:
+ method = "__Pyx_UnicodeContains"
+ if self.operator == "not_in":
+ negation = "!"
+ else:
+ negation = ""
+ return "(%s%s(%s, %s))" % (
+ negation,
+ method,
+ self.operand2.result(),
+ self.operand1.result())
else:
return "(%s %s %s)" % (
self.operand1.result(),
def type_dependencies(self, env):
return ()
+ def has_constant_result(self):
+ return self.constant_result is not constant_value_not_set and \
+ self.constant_result is not not_a_constant
+
def analyse_types(self, env):
self.operand2.analyse_types(env)
if self.cascade:
def analyse_types(self, env):
pass
+ def may_be_none(self):
+ return False
+
def result_in_temp(self):
return self.arg.result_in_temp()
CoercionNode.__init__(self, arg)
if not arg.type.create_to_py_utility_code(env):
error(arg.pos,
- "Cannot convert '%s' to Python object" % arg.type)
+ "Cannot convert '%s' to Python object" % arg.type)
if type is not py_object_type:
self.type = py_object_type
elif arg.type.is_string:
- self.type = Builtin.bytes_type
+ self.type = bytes_type
+ elif arg.type is PyrexTypes.c_py_unicode_type:
+ self.type = unicode_type
gil_message = "Converting to Python object"
+ def may_be_none(self):
+ # FIXME: is this always safe?
+ return False
+
def coerce_to_boolean(self, env):
return self.arg.coerce_to_boolean(env).coerce_to_temp(env)
code.put_gotref(self.py_result())
+class CoerceIntToBytesNode(CoerceToPyTypeNode):
+ # This node is used to convert a C int type to a Python bytes
+ # object.
+
+ is_temp = 1
+
+ def __init__(self, arg, env):
+ arg = arg.coerce_to_simple(env)
+ CoercionNode.__init__(self, arg)
+ self.type = Builtin.bytes_type
+
+ def generate_result_code(self, code):
+ arg = self.arg
+ arg_result = arg.result()
+ if arg.type not in (PyrexTypes.c_char_type,
+ PyrexTypes.c_uchar_type,
+ PyrexTypes.c_schar_type):
+ if arg.type.signed:
+ code.putln("if ((%s < 0) || (%s > 255)) {" % (
+ arg_result, arg_result))
+ else:
+ code.putln("if (%s > 255) {" % arg_result)
+ code.putln('PyErr_Format(PyExc_OverflowError, '
+ '"value too large to pack into a byte"); %s' % (
+ code.error_goto(self.pos)))
+ code.putln('}')
+ temp = None
+ if arg.type is not PyrexTypes.c_char_type:
+ temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
+ code.putln("%s = (char)%s;" % (temp, arg_result))
+ arg_result = temp
+ code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
+ self.result(),
+ arg_result,
+ code.error_goto_if_null(self.result(), self.pos)))
+ if temp is not None:
+ code.funcstate.release_temp(temp)
+ code.put_gotref(self.py_result())
+
+
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
self.is_temp = 1
if not result_type.create_from_py_utility_code(env):
error(arg.pos,
- "Cannot convert Python object to '%s'" % result_type)
+ "Cannot convert Python object to '%s'" % result_type)
if self.type.is_string and self.arg.is_ephemeral():
error(arg.pos,
- "Obtaining char * from temporary Python value")
+ "Obtaining char * from temporary Python value")
def analyse_types(self, env):
# The arg is always already analysed
# in a boolean context.
type = PyrexTypes.c_bint_type
-
+
+ _special_builtins = {
+ Builtin.list_type : 'PyList_GET_SIZE',
+ Builtin.tuple_type : 'PyTuple_GET_SIZE',
+ Builtin.bytes_type : 'PyBytes_GET_SIZE',
+ Builtin.unicode_type : 'PyUnicode_GET_SIZE',
+ }
+
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
if arg.type.is_pyobject:
self.is_temp = 1
def nogil_check(self, env):
- if self.arg.type.is_pyobject:
+ if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
self.gil_error()
gil_message = "Truth-testing Python object"
return "(%s != 0)" % self.arg.result()
def generate_result_code(self, code):
- if self.arg.type.is_pyobject:
+ if not self.is_temp:
+ return
+ test_func = self._special_builtins.get(self.arg.type)
+ if test_func is not None:
+ code.putln("%s = (%s != Py_None) && (%s(%s) != 0);" % (
+ self.result(),
+ self.arg.py_result(),
+ test_func,
+ self.arg.py_result()))
+ else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
self.result(),
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = self.arg.type
+ self.constant_result = self.arg.constant_result
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
+ if self.arg.is_simple():
+ return self.arg
self.type = self.arg.type
self.result_ctype = self.type
return self
def analyse_types(self, env):
pass
+ def may_be_none(self):
+ return False
+
def calculate_result_code(self):
return Naming.module_cname
pass
def generate_result_code(self, code):
- code.putln('%s = __Pyx_GetAttrString(%s, "__doc__");' %
- (self.result(), self.body.result()))
+ code.putln('%s = __Pyx_GetAttrString(%s, "__doc__"); %s' % (
+ self.result(), self.body.result(),
+ code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
""",
impl = """
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) {
- PyObject *__import__ = 0;
+ PyObject *py_import = 0;
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
- __import__ = __Pyx_GetAttrString(%(BUILTINS)s, "__import__");
- if (!__import__)
+ py_import = __Pyx_GetAttrString(%(BUILTINS)s, "__import__");
+ if (!py_import)
goto bad;
if (from_list)
list = from_list;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
- module = PyObject_CallFunctionObjArgs(__import__,
+ module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, NULL);
bad:
Py_XDECREF(empty_list);
- Py_XDECREF(__import__);
+ Py_XDECREF(py_import);
Py_XDECREF(empty_dict);
return module;
}
#------------------------------------------------------------------------------------
-# If the is_unsigned flag is set, we need to do some extra work to make
-# sure the index doesn't become negative.
+raise_noneattr_error_utility_code = UtilityCode(
+proto = """
+static CYTHON_INLINE void __Pyx_RaiseNoneAttributeError(const char* attrname);
+""",
+impl = '''
+static CYTHON_INLINE void __Pyx_RaiseNoneAttributeError(const char* attrname) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%s'", attrname);
+}
+''')
+
+raise_noneindex_error_utility_code = UtilityCode(
+proto = """
+static CYTHON_INLINE void __Pyx_RaiseNoneIndexingError(void);
+""",
+impl = '''
+static CYTHON_INLINE void __Pyx_RaiseNoneIndexingError(void) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is unsubscriptable");
+}
+''')
+
+raise_none_iter_error_utility_code = UtilityCode(
+proto = """
+static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
+""",
+impl = '''
+static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+}
+''')
+
+#------------------------------------------------------------------------------------
+
+getitem_dict_utility_code = UtilityCode(
+proto = """
+
+#if PY_MAJOR_VERSION >= 3
+static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
+ PyObject *value;
+ if (unlikely(d == Py_None)) {
+ __Pyx_RaiseNoneIndexingError();
+ return NULL;
+ }
+ value = PyDict_GetItemWithError(d, key);
+ if (unlikely(!value)) {
+ if (!PyErr_Occurred())
+ PyErr_SetObject(PyExc_KeyError, key);
+ return NULL;
+ }
+ Py_INCREF(value);
+ return value;
+}
+#else
+ #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
+#endif
+""",
+requires = [raise_noneindex_error_utility_code])
+
+#------------------------------------------------------------------------------------
+
+getitem_int_pyunicode_utility_code = UtilityCode(
+proto = '''
+#define __Pyx_GetItemInt_Unicode(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \\
+ __Pyx_GetItemInt_Unicode_Fast(o, i) : \\
+ __Pyx_GetItemInt_Unicode_Generic(o, to_py_func(i)))
+
+static CYTHON_INLINE Py_UNICODE __Pyx_GetItemInt_Unicode_Fast(PyObject* ustring, Py_ssize_t i) {
+ if (likely((0 <= i) & (i < PyUnicode_GET_SIZE(ustring)))) {
+ return PyUnicode_AS_UNICODE(ustring)[i];
+ } else if ((-PyUnicode_GET_SIZE(ustring) <= i) & (i < 0)) {
+ i += PyUnicode_GET_SIZE(ustring);
+ return PyUnicode_AS_UNICODE(ustring)[i];
+ } else {
+ PyErr_SetString(PyExc_IndexError, "string index out of range");
+ return (Py_UNICODE)-1;
+ }
+}
+
+static CYTHON_INLINE Py_UNICODE __Pyx_GetItemInt_Unicode_Generic(PyObject* ustring, PyObject* j) {
+ Py_UNICODE uchar;
+ PyObject *uchar_string;
+ if (!j) return (Py_UNICODE)-1;
+ uchar_string = PyObject_GetItem(ustring, j);
+ Py_DECREF(j);
+ if (!uchar_string) return (Py_UNICODE)-1;
+ uchar = PyUnicode_AS_UNICODE(uchar_string)[0];
+ Py_DECREF(uchar_string);
+ return uchar;
+}
+''')
getitem_int_utility_code = UtilityCode(
proto = """
""" + ''.join([
"""
-#define __Pyx_GetItemInt_%(type)s(o, i, size, to_py_func) ((size <= sizeof(Py_ssize_t)) ? \\
- __Pyx_GetItemInt_%(type)s_Fast(o, i, size <= sizeof(long)) : \\
+#define __Pyx_GetItemInt_%(type)s(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \\
+ __Pyx_GetItemInt_%(type)s_Fast(o, i) : \\
__Pyx_GetItemInt_Generic(o, to_py_func(i)))
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_%(type)s_Fast(PyObject *o, Py_ssize_t i, int fits_long) {
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_%(type)s_Fast(PyObject *o, Py_ssize_t i) {
if (likely(o != Py_None)) {
if (likely((0 <= i) & (i < Py%(type)s_GET_SIZE(o)))) {
PyObject *r = Py%(type)s_GET_ITEM(o, i);
return r;
}
}
- return __Pyx_GetItemInt_Generic(o, fits_long ? PyInt_FromLong(i) : PyLong_FromLongLong(i));
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
""" % {'type' : type_name} for type_name in ('List', 'Tuple')
]) + """
-#define __Pyx_GetItemInt(o, i, size, to_py_func) ((size <= sizeof(Py_ssize_t)) ? \\
- __Pyx_GetItemInt_Fast(o, i, size <= sizeof(long)) : \\
+#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \\
+ __Pyx_GetItemInt_Fast(o, i) : \\
__Pyx_GetItemInt_Generic(o, to_py_func(i)))
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int fits_long) {
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) {
PyObject *r;
if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) {
r = PyList_GET_ITEM(o, i);
r = PySequence_GetItem(o, i);
}
else {
- r = __Pyx_GetItemInt_Generic(o, fits_long ? PyInt_FromLong(i) : PyLong_FromLongLong(i));
+ r = __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
return r;
}
setitem_int_utility_code = UtilityCode(
proto = """
-#define __Pyx_SetItemInt(o, i, v, size, to_py_func) ((size <= sizeof(Py_ssize_t)) ? \\
- __Pyx_SetItemInt_Fast(o, i, v, size <= sizeof(long)) : \\
+#define __Pyx_SetItemInt(o, i, v, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \\
+ __Pyx_SetItemInt_Fast(o, i, v) : \\
__Pyx_SetItemInt_Generic(o, to_py_func(i), v))
static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
return r;
}
-static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int fits_long) {
+static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v) {
if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) {
Py_INCREF(v);
Py_DECREF(PyList_GET_ITEM(o, i));
else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_ass_item && (likely(i >= 0)))
return PySequence_SetItem(o, i, v);
else {
- PyObject *j = fits_long ? PyInt_FromLong(i) : PyLong_FromLongLong(i);
+ PyObject *j = PyInt_FromSsize_t(i);
return __Pyx_SetItemInt_Generic(o, j, v);
}
}
delitem_int_utility_code = UtilityCode(
proto = """
-#define __Pyx_DelItemInt(o, i, size, to_py_func) ((size <= sizeof(Py_ssize_t)) ? \\
- __Pyx_DelItemInt_Fast(o, i, size <= sizeof(long)) : \\
+#define __Pyx_DelItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \\
+ __Pyx_DelItemInt_Fast(o, i) : \\
__Pyx_DelItem_Generic(o, to_py_func(i)))
static CYTHON_INLINE int __Pyx_DelItem_Generic(PyObject *o, PyObject *j) {
return r;
}
-static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i, int fits_long) {
+static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i) {
if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_ass_item && likely(i >= 0))
return PySequence_DelItem(o, i);
else {
- PyObject *j = fits_long ? PyInt_FromLong(i) : PyLong_FromLongLong(i);
+ PyObject *j = PyInt_FromSsize_t(i);
return __Pyx_DelItem_Generic(o, j);
}
}
#------------------------------------------------------------------------------------
-raise_noneattr_error_utility_code = UtilityCode(
-proto = """
-static CYTHON_INLINE void __Pyx_RaiseNoneAttributeError(const char* attrname);
-""",
-impl = '''
-static CYTHON_INLINE void __Pyx_RaiseNoneAttributeError(const char* attrname) {
- PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%s'", attrname);
-}
-''')
-
-raise_noneindex_error_utility_code = UtilityCode(
-proto = """
-static CYTHON_INLINE void __Pyx_RaiseNoneIndexingError(void);
-""",
-impl = '''
-static CYTHON_INLINE void __Pyx_RaiseNoneIndexingError(void) {
- PyErr_SetString(PyExc_TypeError, "'NoneType' object is unsubscriptable");
-}
-''')
-
-raise_none_iter_error_utility_code = UtilityCode(
-proto = """
-static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
-""",
-impl = '''
-static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
- PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
-}
-''')
-
raise_too_many_values_to_unpack = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(void);
#define UNARY_NEG_WOULD_OVERFLOW(x) \
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
""")
+
+
+binding_cfunc_utility_code = UtilityCode(
+proto="""
+#define %(binding_cfunc)s_USED 1
+
+typedef struct {
+ PyCFunctionObject func;
+} %(binding_cfunc)s_object;
+
+PyTypeObject %(binding_cfunc)s_type;
+PyTypeObject *%(binding_cfunc)s = NULL;
+
+PyObject *%(binding_cfunc)s_NewEx(PyMethodDef *ml, PyObject *self, PyObject *module); /* proto */
+#define %(binding_cfunc)s_New(ml, self) %(binding_cfunc)s_NewEx(ml, self, NULL)
+
+int %(binding_cfunc)s_init(void); /* proto */
+""" % Naming.__dict__,
+impl="""
+
+PyObject *%(binding_cfunc)s_NewEx(PyMethodDef *ml, PyObject *self, PyObject *module) {
+ %(binding_cfunc)s_object *op = PyObject_GC_New(%(binding_cfunc)s_object, %(binding_cfunc)s);
+ if (op == NULL)
+ return NULL;
+ op->func.m_ml = ml;
+ Py_XINCREF(self);
+ op->func.m_self = self;
+ Py_XINCREF(module);
+ op->func.m_module = module;
+ _PyObject_GC_TRACK(op);
+ return (PyObject *)op;
+}
+
+static void %(binding_cfunc)s_dealloc(%(binding_cfunc)s_object *m) {
+ _PyObject_GC_UNTRACK(m);
+ Py_XDECREF(m->func.m_self);
+ Py_XDECREF(m->func.m_module);
+ PyObject_GC_Del(m);
+}
+
+static PyObject *%(binding_cfunc)s_descr_get(PyObject *func, PyObject *obj, PyObject *type) {
+ if (obj == Py_None)
+ obj = NULL;
+ return PyMethod_New(func, obj, type);
+}
+
+int %(binding_cfunc)s_init(void) {
+ %(binding_cfunc)s_type = PyCFunction_Type;
+ %(binding_cfunc)s_type.tp_name = "cython_binding_builtin_function_or_method";
+ %(binding_cfunc)s_type.tp_dealloc = (destructor)%(binding_cfunc)s_dealloc;
+ %(binding_cfunc)s_type.tp_descr_get = %(binding_cfunc)s_descr_get;
+ if (PyType_Ready(&%(binding_cfunc)s_type) < 0) {
+ return -1;
+ }
+ %(binding_cfunc)s = &%(binding_cfunc)s_type;
+ return 0;
+
+}
+""" % Naming.__dict__)