Fix length calculation so it doesn't assume the length of the encoded
[portage.git] / pym / portage / tests / ebuild / test_array_fromfile_eof.py
1 # Copyright 2009 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
3 # $Id$
4
5 import array
6 import pty
7 import tempfile
8
9 from portage import _unicode_decode
10 from portage import _unicode_encode
11 from portage.tests import TestCase
12
13 class ArrayFromfileEofTestCase(TestCase):
14
15         def testArrayFromfileEof(self):
16                 # This tests if the following python issue is fixed
17                 # in the currently running version of python:
18                 #   http://bugs.python.org/issue5334
19
20                 input_data = "an arbitrary string"
21                 input_bytes = _unicode_encode(input_data,
22                         encoding='utf_8', errors='strict')
23                 f = tempfile.TemporaryFile()
24                 f.write(input_bytes)
25
26                 f.seek(0)
27                 data = []
28                 eof = False
29                 while not eof:
30                         a = array.array('B')
31                         try:
32                                 a.fromfile(f, len(input_bytes) + 1)
33                         except EOFError:
34                                 # python-3.0 lost data here
35                                 eof = True
36
37                         if not a:
38                                 eof = True
39                         else:
40                                 data.append(_unicode_decode(a.tostring(),
41                                         encoding='utf_8', errors='strict'))
42
43                 f.close()
44
45                 self.assertEqual(input_data, ''.join(data))