##// END OF EJS Templates
dirstate-item: implement `merged` in a simpler way...
marmoute -
r48742:7c37d153 default
parent child Browse files
Show More
@@ -1,815 +1,815 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It contains:
51 It contains:
52
52
53 - state (one of 'n', 'a', 'r', 'm')
53 - state (one of 'n', 'a', 'r', 'm')
54 - mode,
54 - mode,
55 - size,
55 - size,
56 - mtime,
56 - mtime,
57 """
57 """
58
58
59 _wc_tracked = attr.ib()
59 _wc_tracked = attr.ib()
60 _p1_tracked = attr.ib()
60 _p1_tracked = attr.ib()
61 _p2_tracked = attr.ib()
61 _p2_tracked = attr.ib()
62 # the three item above should probably be combined
62 # the three item above should probably be combined
63 #
63 #
64 # However it is unclear if they properly cover some of the most advanced
64 # However it is unclear if they properly cover some of the most advanced
65 # merge case. So we should probably wait on this to be settled.
65 # merge case. So we should probably wait on this to be settled.
66 _merged = attr.ib()
66 _merged = attr.ib()
67 _clean_p1 = attr.ib()
67 _clean_p1 = attr.ib()
68 _clean_p2 = attr.ib()
68 _clean_p2 = attr.ib()
69 _possibly_dirty = attr.ib()
69 _possibly_dirty = attr.ib()
70 _mode = attr.ib()
70 _mode = attr.ib()
71 _size = attr.ib()
71 _size = attr.ib()
72 _mtime = attr.ib()
72 _mtime = attr.ib()
73
73
74 def __init__(
74 def __init__(
75 self,
75 self,
76 wc_tracked=False,
76 wc_tracked=False,
77 p1_tracked=False,
77 p1_tracked=False,
78 p2_tracked=False,
78 p2_tracked=False,
79 merged=False,
79 merged=False,
80 clean_p1=False,
80 clean_p1=False,
81 clean_p2=False,
81 clean_p2=False,
82 possibly_dirty=False,
82 possibly_dirty=False,
83 parentfiledata=None,
83 parentfiledata=None,
84 ):
84 ):
85 if merged and (clean_p1 or clean_p2):
85 if merged and (clean_p1 or clean_p2):
86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
87 raise error.ProgrammingError(msg)
87 raise error.ProgrammingError(msg)
88
88
89 self._wc_tracked = wc_tracked
89 self._wc_tracked = wc_tracked
90 self._p1_tracked = p1_tracked
90 self._p1_tracked = p1_tracked
91 self._p2_tracked = p2_tracked
91 self._p2_tracked = p2_tracked
92 self._merged = merged
92 self._merged = merged
93 self._clean_p1 = clean_p1
93 self._clean_p1 = clean_p1
94 self._clean_p2 = clean_p2
94 self._clean_p2 = clean_p2
95 self._possibly_dirty = possibly_dirty
95 self._possibly_dirty = possibly_dirty
96 if parentfiledata is None:
96 if parentfiledata is None:
97 self._mode = None
97 self._mode = None
98 self._size = None
98 self._size = None
99 self._mtime = None
99 self._mtime = None
100 else:
100 else:
101 self._mode = parentfiledata[0]
101 self._mode = parentfiledata[0]
102 self._size = parentfiledata[1]
102 self._size = parentfiledata[1]
103 self._mtime = parentfiledata[2]
103 self._mtime = parentfiledata[2]
104
104
105 @classmethod
105 @classmethod
106 def new_added(cls):
106 def new_added(cls):
107 """constructor to help legacy API to build a new "added" item
107 """constructor to help legacy API to build a new "added" item
108
108
109 Should eventually be removed
109 Should eventually be removed
110 """
110 """
111 instance = cls()
111 instance = cls()
112 instance._wc_tracked = True
112 instance._wc_tracked = True
113 instance._p1_tracked = False
113 instance._p1_tracked = False
114 instance._p2_tracked = False
114 instance._p2_tracked = False
115 return instance
115 return instance
116
116
117 @classmethod
117 @classmethod
118 def new_merged(cls):
118 def new_merged(cls):
119 """constructor to help legacy API to build a new "merged" item
119 """constructor to help legacy API to build a new "merged" item
120
120
121 Should eventually be removed
121 Should eventually be removed
122 """
122 """
123 instance = cls()
123 instance = cls()
124 instance._wc_tracked = True
124 instance._wc_tracked = True
125 instance._p1_tracked = True # might not be True because of rename ?
125 instance._p1_tracked = True # might not be True because of rename ?
126 instance._p2_tracked = True # might not be True because of rename ?
126 instance._p2_tracked = True # might not be True because of rename ?
127 instance._merged = True
127 instance._merged = True
128 return instance
128 return instance
129
129
130 @classmethod
130 @classmethod
131 def new_from_p2(cls):
131 def new_from_p2(cls):
132 """constructor to help legacy API to build a new "from_p2" item
132 """constructor to help legacy API to build a new "from_p2" item
133
133
134 Should eventually be removed
134 Should eventually be removed
135 """
135 """
136 instance = cls()
136 instance = cls()
137 instance._wc_tracked = True
137 instance._wc_tracked = True
138 instance._p1_tracked = False # might actually be True
138 instance._p1_tracked = False # might actually be True
139 instance._p2_tracked = True
139 instance._p2_tracked = True
140 instance._clean_p2 = True
140 instance._clean_p2 = True
141 return instance
141 return instance
142
142
143 @classmethod
143 @classmethod
144 def new_possibly_dirty(cls):
144 def new_possibly_dirty(cls):
145 """constructor to help legacy API to build a new "possibly_dirty" item
145 """constructor to help legacy API to build a new "possibly_dirty" item
146
146
147 Should eventually be removed
147 Should eventually be removed
148 """
148 """
149 instance = cls()
149 instance = cls()
150 instance._wc_tracked = True
150 instance._wc_tracked = True
151 instance._p1_tracked = True
151 instance._p1_tracked = True
152 instance._possibly_dirty = True
152 instance._possibly_dirty = True
153 return instance
153 return instance
154
154
155 @classmethod
155 @classmethod
156 def new_normal(cls, mode, size, mtime):
156 def new_normal(cls, mode, size, mtime):
157 """constructor to help legacy API to build a new "normal" item
157 """constructor to help legacy API to build a new "normal" item
158
158
159 Should eventually be removed
159 Should eventually be removed
160 """
160 """
161 assert size != FROM_P2
161 assert size != FROM_P2
162 assert size != NONNORMAL
162 assert size != NONNORMAL
163 instance = cls()
163 instance = cls()
164 instance._wc_tracked = True
164 instance._wc_tracked = True
165 instance._p1_tracked = True
165 instance._p1_tracked = True
166 instance._mode = mode
166 instance._mode = mode
167 instance._size = size
167 instance._size = size
168 instance._mtime = mtime
168 instance._mtime = mtime
169 return instance
169 return instance
170
170
171 @classmethod
171 @classmethod
172 def from_v1_data(cls, state, mode, size, mtime):
172 def from_v1_data(cls, state, mode, size, mtime):
173 """Build a new DirstateItem object from V1 data
173 """Build a new DirstateItem object from V1 data
174
174
175 Since the dirstate-v1 format is frozen, the signature of this function
175 Since the dirstate-v1 format is frozen, the signature of this function
176 is not expected to change, unlike the __init__ one.
176 is not expected to change, unlike the __init__ one.
177 """
177 """
178 if state == b'm':
178 if state == b'm':
179 return cls.new_merged()
179 return cls.new_merged()
180 elif state == b'a':
180 elif state == b'a':
181 return cls.new_added()
181 return cls.new_added()
182 elif state == b'r':
182 elif state == b'r':
183 instance = cls()
183 instance = cls()
184 instance._wc_tracked = False
184 instance._wc_tracked = False
185 if size == NONNORMAL:
185 if size == NONNORMAL:
186 instance._merged = True
186 instance._merged = True
187 instance._p1_tracked = (
187 instance._p1_tracked = (
188 True # might not be True because of rename ?
188 True # might not be True because of rename ?
189 )
189 )
190 instance._p2_tracked = (
190 instance._p2_tracked = (
191 True # might not be True because of rename ?
191 True # might not be True because of rename ?
192 )
192 )
193 elif size == FROM_P2:
193 elif size == FROM_P2:
194 instance._clean_p2 = True
194 instance._clean_p2 = True
195 instance._p1_tracked = (
195 instance._p1_tracked = (
196 False # We actually don't know (file history)
196 False # We actually don't know (file history)
197 )
197 )
198 instance._p2_tracked = True
198 instance._p2_tracked = True
199 else:
199 else:
200 instance._p1_tracked = True
200 instance._p1_tracked = True
201 return instance
201 return instance
202 elif state == b'n':
202 elif state == b'n':
203 if size == FROM_P2:
203 if size == FROM_P2:
204 return cls.new_from_p2()
204 return cls.new_from_p2()
205 elif size == NONNORMAL:
205 elif size == NONNORMAL:
206 return cls.new_possibly_dirty()
206 return cls.new_possibly_dirty()
207 elif mtime == AMBIGUOUS_TIME:
207 elif mtime == AMBIGUOUS_TIME:
208 instance = cls.new_normal(mode, size, 42)
208 instance = cls.new_normal(mode, size, 42)
209 instance._mtime = None
209 instance._mtime = None
210 instance._possibly_dirty = True
210 instance._possibly_dirty = True
211 return instance
211 return instance
212 else:
212 else:
213 return cls.new_normal(mode, size, mtime)
213 return cls.new_normal(mode, size, mtime)
214 else:
214 else:
215 raise RuntimeError(b'unknown state: %s' % state)
215 raise RuntimeError(b'unknown state: %s' % state)
216
216
217 def set_possibly_dirty(self):
217 def set_possibly_dirty(self):
218 """Mark a file as "possibly dirty"
218 """Mark a file as "possibly dirty"
219
219
220 This means the next status call will have to actually check its content
220 This means the next status call will have to actually check its content
221 to make sure it is correct.
221 to make sure it is correct.
222 """
222 """
223 self._possibly_dirty = True
223 self._possibly_dirty = True
224
224
225 def set_untracked(self):
225 def set_untracked(self):
226 """mark a file as untracked in the working copy
226 """mark a file as untracked in the working copy
227
227
228 This will ultimately be called by command like `hg remove`.
228 This will ultimately be called by command like `hg remove`.
229 """
229 """
230 # backup the previous state (useful for merge)
230 # backup the previous state (useful for merge)
231 self._wc_tracked = False
231 self._wc_tracked = False
232 self._mode = None
232 self._mode = None
233 self._size = None
233 self._size = None
234 self._mtime = None
234 self._mtime = None
235
235
236 @property
236 @property
237 def mode(self):
237 def mode(self):
238 return self.v1_mode()
238 return self.v1_mode()
239
239
240 @property
240 @property
241 def size(self):
241 def size(self):
242 return self.v1_size()
242 return self.v1_size()
243
243
244 @property
244 @property
245 def mtime(self):
245 def mtime(self):
246 return self.v1_mtime()
246 return self.v1_mtime()
247
247
248 @property
248 @property
249 def state(self):
249 def state(self):
250 """
250 """
251 States are:
251 States are:
252 n normal
252 n normal
253 m needs merging
253 m needs merging
254 r marked for removal
254 r marked for removal
255 a marked for addition
255 a marked for addition
256
256
257 XXX This "state" is a bit obscure and mostly a direct expression of the
257 XXX This "state" is a bit obscure and mostly a direct expression of the
258 dirstatev1 format. It would make sense to ultimately deprecate it in
258 dirstatev1 format. It would make sense to ultimately deprecate it in
259 favor of the more "semantic" attributes.
259 favor of the more "semantic" attributes.
260 """
260 """
261 return self.v1_state()
261 return self.v1_state()
262
262
263 @property
263 @property
264 def tracked(self):
264 def tracked(self):
265 """True is the file is tracked in the working copy"""
265 """True is the file is tracked in the working copy"""
266 return self._wc_tracked
266 return self._wc_tracked
267
267
268 @property
268 @property
269 def added(self):
269 def added(self):
270 """True if the file has been added"""
270 """True if the file has been added"""
271 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
271 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
272
272
273 @property
273 @property
274 def merged(self):
274 def merged(self):
275 """True if the file has been merged
275 """True if the file has been merged
276
276
277 Should only be set if a merge is in progress in the dirstate
277 Should only be set if a merge is in progress in the dirstate
278 """
278 """
279 return self.v1_state() == b'm'
279 return self._wc_tracked and self._merged
280
280
281 @property
281 @property
282 def from_p2(self):
282 def from_p2(self):
283 """True if the file have been fetched from p2 during the current merge
283 """True if the file have been fetched from p2 during the current merge
284
284
285 This is only True is the file is currently tracked.
285 This is only True is the file is currently tracked.
286
286
287 Should only be set if a merge is in progress in the dirstate
287 Should only be set if a merge is in progress in the dirstate
288 """
288 """
289 return self.v1_state() == b'n' and self.v1_size() == FROM_P2
289 return self.v1_state() == b'n' and self.v1_size() == FROM_P2
290
290
291 @property
291 @property
292 def from_p2_removed(self):
292 def from_p2_removed(self):
293 """True if the file has been removed, but was "from_p2" initially
293 """True if the file has been removed, but was "from_p2" initially
294
294
295 This property seems like an abstraction leakage and should probably be
295 This property seems like an abstraction leakage and should probably be
296 dealt in this class (or maybe the dirstatemap) directly.
296 dealt in this class (or maybe the dirstatemap) directly.
297 """
297 """
298 return self.v1_state() == b'r' and self.v1_size() == FROM_P2
298 return self.v1_state() == b'r' and self.v1_size() == FROM_P2
299
299
300 @property
300 @property
301 def removed(self):
301 def removed(self):
302 """True if the file has been removed"""
302 """True if the file has been removed"""
303 return self.v1_state() == b'r'
303 return self.v1_state() == b'r'
304
304
305 @property
305 @property
306 def merged_removed(self):
306 def merged_removed(self):
307 """True if the file has been removed, but was "merged" initially
307 """True if the file has been removed, but was "merged" initially
308
308
309 This property seems like an abstraction leakage and should probably be
309 This property seems like an abstraction leakage and should probably be
310 dealt in this class (or maybe the dirstatemap) directly.
310 dealt in this class (or maybe the dirstatemap) directly.
311 """
311 """
312 return self.v1_state() == b'r' and self.v1_size() == NONNORMAL
312 return self.v1_state() == b'r' and self.v1_size() == NONNORMAL
313
313
314 @property
314 @property
315 def dm_nonnormal(self):
315 def dm_nonnormal(self):
316 """True is the entry is non-normal in the dirstatemap sense
316 """True is the entry is non-normal in the dirstatemap sense
317
317
318 There is no reason for any code, but the dirstatemap one to use this.
318 There is no reason for any code, but the dirstatemap one to use this.
319 """
319 """
320 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
320 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
321
321
322 @property
322 @property
323 def dm_otherparent(self):
323 def dm_otherparent(self):
324 """True is the entry is `otherparent` in the dirstatemap sense
324 """True is the entry is `otherparent` in the dirstatemap sense
325
325
326 There is no reason for any code, but the dirstatemap one to use this.
326 There is no reason for any code, but the dirstatemap one to use this.
327 """
327 """
328 return self.v1_size() == FROM_P2
328 return self.v1_size() == FROM_P2
329
329
330 def v1_state(self):
330 def v1_state(self):
331 """return a "state" suitable for v1 serialization"""
331 """return a "state" suitable for v1 serialization"""
332 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
332 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
333 # the object has no state to record, this is -currently-
333 # the object has no state to record, this is -currently-
334 # unsupported
334 # unsupported
335 raise RuntimeError('untracked item')
335 raise RuntimeError('untracked item')
336 elif not self._wc_tracked:
336 elif not self._wc_tracked:
337 return b'r'
337 return b'r'
338 elif self._merged:
338 elif self._merged:
339 return b'm'
339 return b'm'
340 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
340 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
341 return b'a'
341 return b'a'
342 elif self._clean_p2 and self._wc_tracked:
342 elif self._clean_p2 and self._wc_tracked:
343 return b'n'
343 return b'n'
344 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
344 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
345 return b'n'
345 return b'n'
346 elif self._possibly_dirty:
346 elif self._possibly_dirty:
347 return b'n'
347 return b'n'
348 elif self._wc_tracked:
348 elif self._wc_tracked:
349 return b'n'
349 return b'n'
350 else:
350 else:
351 raise RuntimeError('unreachable')
351 raise RuntimeError('unreachable')
352
352
353 def v1_mode(self):
353 def v1_mode(self):
354 """return a "mode" suitable for v1 serialization"""
354 """return a "mode" suitable for v1 serialization"""
355 return self._mode if self._mode is not None else 0
355 return self._mode if self._mode is not None else 0
356
356
357 def v1_size(self):
357 def v1_size(self):
358 """return a "size" suitable for v1 serialization"""
358 """return a "size" suitable for v1 serialization"""
359 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
359 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
360 # the object has no state to record, this is -currently-
360 # the object has no state to record, this is -currently-
361 # unsupported
361 # unsupported
362 raise RuntimeError('untracked item')
362 raise RuntimeError('untracked item')
363 elif not self._wc_tracked:
363 elif not self._wc_tracked:
364 # File was deleted
364 # File was deleted
365 if self._merged:
365 if self._merged:
366 return NONNORMAL
366 return NONNORMAL
367 elif self._clean_p2:
367 elif self._clean_p2:
368 return FROM_P2
368 return FROM_P2
369 else:
369 else:
370 return 0
370 return 0
371 elif self._merged:
371 elif self._merged:
372 return FROM_P2
372 return FROM_P2
373 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
373 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
374 # Added
374 # Added
375 return NONNORMAL
375 return NONNORMAL
376 elif self._clean_p2 and self._wc_tracked:
376 elif self._clean_p2 and self._wc_tracked:
377 return FROM_P2
377 return FROM_P2
378 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
378 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
379 return FROM_P2
379 return FROM_P2
380 elif self._possibly_dirty:
380 elif self._possibly_dirty:
381 if self._size is None:
381 if self._size is None:
382 return NONNORMAL
382 return NONNORMAL
383 else:
383 else:
384 return self._size
384 return self._size
385 elif self._wc_tracked:
385 elif self._wc_tracked:
386 return self._size
386 return self._size
387 else:
387 else:
388 raise RuntimeError('unreachable')
388 raise RuntimeError('unreachable')
389
389
390 def v1_mtime(self):
390 def v1_mtime(self):
391 """return a "mtime" suitable for v1 serialization"""
391 """return a "mtime" suitable for v1 serialization"""
392 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
392 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
393 # the object has no state to record, this is -currently-
393 # the object has no state to record, this is -currently-
394 # unsupported
394 # unsupported
395 raise RuntimeError('untracked item')
395 raise RuntimeError('untracked item')
396 elif not self._wc_tracked:
396 elif not self._wc_tracked:
397 return 0
397 return 0
398 elif self._possibly_dirty:
398 elif self._possibly_dirty:
399 return AMBIGUOUS_TIME
399 return AMBIGUOUS_TIME
400 elif self._merged:
400 elif self._merged:
401 return AMBIGUOUS_TIME
401 return AMBIGUOUS_TIME
402 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
402 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
403 return AMBIGUOUS_TIME
403 return AMBIGUOUS_TIME
404 elif self._clean_p2 and self._wc_tracked:
404 elif self._clean_p2 and self._wc_tracked:
405 return AMBIGUOUS_TIME
405 return AMBIGUOUS_TIME
406 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
406 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
407 return AMBIGUOUS_TIME
407 return AMBIGUOUS_TIME
408 elif self._wc_tracked:
408 elif self._wc_tracked:
409 if self._mtime is None:
409 if self._mtime is None:
410 return 0
410 return 0
411 else:
411 else:
412 return self._mtime
412 return self._mtime
413 else:
413 else:
414 raise RuntimeError('unreachable')
414 raise RuntimeError('unreachable')
415
415
416 def need_delay(self, now):
416 def need_delay(self, now):
417 """True if the stored mtime would be ambiguous with the current time"""
417 """True if the stored mtime would be ambiguous with the current time"""
418 return self.v1_state() == b'n' and self.v1_mtime() == now
418 return self.v1_state() == b'n' and self.v1_mtime() == now
419
419
420
420
421 def gettype(q):
421 def gettype(q):
422 return int(q & 0xFFFF)
422 return int(q & 0xFFFF)
423
423
424
424
425 class BaseIndexObject(object):
425 class BaseIndexObject(object):
426 # Can I be passed to an algorithme implemented in Rust ?
426 # Can I be passed to an algorithme implemented in Rust ?
427 rust_ext_compat = 0
427 rust_ext_compat = 0
428 # Format of an index entry according to Python's `struct` language
428 # Format of an index entry according to Python's `struct` language
429 index_format = revlog_constants.INDEX_ENTRY_V1
429 index_format = revlog_constants.INDEX_ENTRY_V1
430 # Size of a C unsigned long long int, platform independent
430 # Size of a C unsigned long long int, platform independent
431 big_int_size = struct.calcsize(b'>Q')
431 big_int_size = struct.calcsize(b'>Q')
432 # Size of a C long int, platform independent
432 # Size of a C long int, platform independent
433 int_size = struct.calcsize(b'>i')
433 int_size = struct.calcsize(b'>i')
434 # An empty index entry, used as a default value to be overridden, or nullrev
434 # An empty index entry, used as a default value to be overridden, or nullrev
435 null_item = (
435 null_item = (
436 0,
436 0,
437 0,
437 0,
438 0,
438 0,
439 -1,
439 -1,
440 -1,
440 -1,
441 -1,
441 -1,
442 -1,
442 -1,
443 sha1nodeconstants.nullid,
443 sha1nodeconstants.nullid,
444 0,
444 0,
445 0,
445 0,
446 revlog_constants.COMP_MODE_INLINE,
446 revlog_constants.COMP_MODE_INLINE,
447 revlog_constants.COMP_MODE_INLINE,
447 revlog_constants.COMP_MODE_INLINE,
448 )
448 )
449
449
450 @util.propertycache
450 @util.propertycache
451 def entry_size(self):
451 def entry_size(self):
452 return self.index_format.size
452 return self.index_format.size
453
453
454 @property
454 @property
455 def nodemap(self):
455 def nodemap(self):
456 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
456 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
457 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
457 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
458 return self._nodemap
458 return self._nodemap
459
459
460 @util.propertycache
460 @util.propertycache
461 def _nodemap(self):
461 def _nodemap(self):
462 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
462 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
463 for r in range(0, len(self)):
463 for r in range(0, len(self)):
464 n = self[r][7]
464 n = self[r][7]
465 nodemap[n] = r
465 nodemap[n] = r
466 return nodemap
466 return nodemap
467
467
468 def has_node(self, node):
468 def has_node(self, node):
469 """return True if the node exist in the index"""
469 """return True if the node exist in the index"""
470 return node in self._nodemap
470 return node in self._nodemap
471
471
472 def rev(self, node):
472 def rev(self, node):
473 """return a revision for a node
473 """return a revision for a node
474
474
475 If the node is unknown, raise a RevlogError"""
475 If the node is unknown, raise a RevlogError"""
476 return self._nodemap[node]
476 return self._nodemap[node]
477
477
478 def get_rev(self, node):
478 def get_rev(self, node):
479 """return a revision for a node
479 """return a revision for a node
480
480
481 If the node is unknown, return None"""
481 If the node is unknown, return None"""
482 return self._nodemap.get(node)
482 return self._nodemap.get(node)
483
483
484 def _stripnodes(self, start):
484 def _stripnodes(self, start):
485 if '_nodemap' in vars(self):
485 if '_nodemap' in vars(self):
486 for r in range(start, len(self)):
486 for r in range(start, len(self)):
487 n = self[r][7]
487 n = self[r][7]
488 del self._nodemap[n]
488 del self._nodemap[n]
489
489
490 def clearcaches(self):
490 def clearcaches(self):
491 self.__dict__.pop('_nodemap', None)
491 self.__dict__.pop('_nodemap', None)
492
492
493 def __len__(self):
493 def __len__(self):
494 return self._lgt + len(self._extra)
494 return self._lgt + len(self._extra)
495
495
496 def append(self, tup):
496 def append(self, tup):
497 if '_nodemap' in vars(self):
497 if '_nodemap' in vars(self):
498 self._nodemap[tup[7]] = len(self)
498 self._nodemap[tup[7]] = len(self)
499 data = self._pack_entry(len(self), tup)
499 data = self._pack_entry(len(self), tup)
500 self._extra.append(data)
500 self._extra.append(data)
501
501
502 def _pack_entry(self, rev, entry):
502 def _pack_entry(self, rev, entry):
503 assert entry[8] == 0
503 assert entry[8] == 0
504 assert entry[9] == 0
504 assert entry[9] == 0
505 return self.index_format.pack(*entry[:8])
505 return self.index_format.pack(*entry[:8])
506
506
507 def _check_index(self, i):
507 def _check_index(self, i):
508 if not isinstance(i, int):
508 if not isinstance(i, int):
509 raise TypeError(b"expecting int indexes")
509 raise TypeError(b"expecting int indexes")
510 if i < 0 or i >= len(self):
510 if i < 0 or i >= len(self):
511 raise IndexError
511 raise IndexError
512
512
513 def __getitem__(self, i):
513 def __getitem__(self, i):
514 if i == -1:
514 if i == -1:
515 return self.null_item
515 return self.null_item
516 self._check_index(i)
516 self._check_index(i)
517 if i >= self._lgt:
517 if i >= self._lgt:
518 data = self._extra[i - self._lgt]
518 data = self._extra[i - self._lgt]
519 else:
519 else:
520 index = self._calculate_index(i)
520 index = self._calculate_index(i)
521 data = self._data[index : index + self.entry_size]
521 data = self._data[index : index + self.entry_size]
522 r = self._unpack_entry(i, data)
522 r = self._unpack_entry(i, data)
523 if self._lgt and i == 0:
523 if self._lgt and i == 0:
524 offset = revlogutils.offset_type(0, gettype(r[0]))
524 offset = revlogutils.offset_type(0, gettype(r[0]))
525 r = (offset,) + r[1:]
525 r = (offset,) + r[1:]
526 return r
526 return r
527
527
528 def _unpack_entry(self, rev, data):
528 def _unpack_entry(self, rev, data):
529 r = self.index_format.unpack(data)
529 r = self.index_format.unpack(data)
530 r = r + (
530 r = r + (
531 0,
531 0,
532 0,
532 0,
533 revlog_constants.COMP_MODE_INLINE,
533 revlog_constants.COMP_MODE_INLINE,
534 revlog_constants.COMP_MODE_INLINE,
534 revlog_constants.COMP_MODE_INLINE,
535 )
535 )
536 return r
536 return r
537
537
538 def pack_header(self, header):
538 def pack_header(self, header):
539 """pack header information as binary"""
539 """pack header information as binary"""
540 v_fmt = revlog_constants.INDEX_HEADER
540 v_fmt = revlog_constants.INDEX_HEADER
541 return v_fmt.pack(header)
541 return v_fmt.pack(header)
542
542
543 def entry_binary(self, rev):
543 def entry_binary(self, rev):
544 """return the raw binary string representing a revision"""
544 """return the raw binary string representing a revision"""
545 entry = self[rev]
545 entry = self[rev]
546 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
546 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
547 if rev == 0:
547 if rev == 0:
548 p = p[revlog_constants.INDEX_HEADER.size :]
548 p = p[revlog_constants.INDEX_HEADER.size :]
549 return p
549 return p
550
550
551
551
552 class IndexObject(BaseIndexObject):
552 class IndexObject(BaseIndexObject):
553 def __init__(self, data):
553 def __init__(self, data):
554 assert len(data) % self.entry_size == 0, (
554 assert len(data) % self.entry_size == 0, (
555 len(data),
555 len(data),
556 self.entry_size,
556 self.entry_size,
557 len(data) % self.entry_size,
557 len(data) % self.entry_size,
558 )
558 )
559 self._data = data
559 self._data = data
560 self._lgt = len(data) // self.entry_size
560 self._lgt = len(data) // self.entry_size
561 self._extra = []
561 self._extra = []
562
562
563 def _calculate_index(self, i):
563 def _calculate_index(self, i):
564 return i * self.entry_size
564 return i * self.entry_size
565
565
566 def __delitem__(self, i):
566 def __delitem__(self, i):
567 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
567 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
568 raise ValueError(b"deleting slices only supports a:-1 with step 1")
568 raise ValueError(b"deleting slices only supports a:-1 with step 1")
569 i = i.start
569 i = i.start
570 self._check_index(i)
570 self._check_index(i)
571 self._stripnodes(i)
571 self._stripnodes(i)
572 if i < self._lgt:
572 if i < self._lgt:
573 self._data = self._data[: i * self.entry_size]
573 self._data = self._data[: i * self.entry_size]
574 self._lgt = i
574 self._lgt = i
575 self._extra = []
575 self._extra = []
576 else:
576 else:
577 self._extra = self._extra[: i - self._lgt]
577 self._extra = self._extra[: i - self._lgt]
578
578
579
579
580 class PersistentNodeMapIndexObject(IndexObject):
580 class PersistentNodeMapIndexObject(IndexObject):
581 """a Debug oriented class to test persistent nodemap
581 """a Debug oriented class to test persistent nodemap
582
582
583 We need a simple python object to test API and higher level behavior. See
583 We need a simple python object to test API and higher level behavior. See
584 the Rust implementation for more serious usage. This should be used only
584 the Rust implementation for more serious usage. This should be used only
585 through the dedicated `devel.persistent-nodemap` config.
585 through the dedicated `devel.persistent-nodemap` config.
586 """
586 """
587
587
588 def nodemap_data_all(self):
588 def nodemap_data_all(self):
589 """Return bytes containing a full serialization of a nodemap
589 """Return bytes containing a full serialization of a nodemap
590
590
591 The nodemap should be valid for the full set of revisions in the
591 The nodemap should be valid for the full set of revisions in the
592 index."""
592 index."""
593 return nodemaputil.persistent_data(self)
593 return nodemaputil.persistent_data(self)
594
594
595 def nodemap_data_incremental(self):
595 def nodemap_data_incremental(self):
596 """Return bytes containing a incremental update to persistent nodemap
596 """Return bytes containing a incremental update to persistent nodemap
597
597
598 This containst the data for an append-only update of the data provided
598 This containst the data for an append-only update of the data provided
599 in the last call to `update_nodemap_data`.
599 in the last call to `update_nodemap_data`.
600 """
600 """
601 if self._nm_root is None:
601 if self._nm_root is None:
602 return None
602 return None
603 docket = self._nm_docket
603 docket = self._nm_docket
604 changed, data = nodemaputil.update_persistent_data(
604 changed, data = nodemaputil.update_persistent_data(
605 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
605 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
606 )
606 )
607
607
608 self._nm_root = self._nm_max_idx = self._nm_docket = None
608 self._nm_root = self._nm_max_idx = self._nm_docket = None
609 return docket, changed, data
609 return docket, changed, data
610
610
611 def update_nodemap_data(self, docket, nm_data):
611 def update_nodemap_data(self, docket, nm_data):
612 """provide full block of persisted binary data for a nodemap
612 """provide full block of persisted binary data for a nodemap
613
613
614 The data are expected to come from disk. See `nodemap_data_all` for a
614 The data are expected to come from disk. See `nodemap_data_all` for a
615 produceur of such data."""
615 produceur of such data."""
616 if nm_data is not None:
616 if nm_data is not None:
617 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
617 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
618 if self._nm_root:
618 if self._nm_root:
619 self._nm_docket = docket
619 self._nm_docket = docket
620 else:
620 else:
621 self._nm_root = self._nm_max_idx = self._nm_docket = None
621 self._nm_root = self._nm_max_idx = self._nm_docket = None
622
622
623
623
624 class InlinedIndexObject(BaseIndexObject):
624 class InlinedIndexObject(BaseIndexObject):
625 def __init__(self, data, inline=0):
625 def __init__(self, data, inline=0):
626 self._data = data
626 self._data = data
627 self._lgt = self._inline_scan(None)
627 self._lgt = self._inline_scan(None)
628 self._inline_scan(self._lgt)
628 self._inline_scan(self._lgt)
629 self._extra = []
629 self._extra = []
630
630
631 def _inline_scan(self, lgt):
631 def _inline_scan(self, lgt):
632 off = 0
632 off = 0
633 if lgt is not None:
633 if lgt is not None:
634 self._offsets = [0] * lgt
634 self._offsets = [0] * lgt
635 count = 0
635 count = 0
636 while off <= len(self._data) - self.entry_size:
636 while off <= len(self._data) - self.entry_size:
637 start = off + self.big_int_size
637 start = off + self.big_int_size
638 (s,) = struct.unpack(
638 (s,) = struct.unpack(
639 b'>i',
639 b'>i',
640 self._data[start : start + self.int_size],
640 self._data[start : start + self.int_size],
641 )
641 )
642 if lgt is not None:
642 if lgt is not None:
643 self._offsets[count] = off
643 self._offsets[count] = off
644 count += 1
644 count += 1
645 off += self.entry_size + s
645 off += self.entry_size + s
646 if off != len(self._data):
646 if off != len(self._data):
647 raise ValueError(b"corrupted data")
647 raise ValueError(b"corrupted data")
648 return count
648 return count
649
649
650 def __delitem__(self, i):
650 def __delitem__(self, i):
651 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
651 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
652 raise ValueError(b"deleting slices only supports a:-1 with step 1")
652 raise ValueError(b"deleting slices only supports a:-1 with step 1")
653 i = i.start
653 i = i.start
654 self._check_index(i)
654 self._check_index(i)
655 self._stripnodes(i)
655 self._stripnodes(i)
656 if i < self._lgt:
656 if i < self._lgt:
657 self._offsets = self._offsets[:i]
657 self._offsets = self._offsets[:i]
658 self._lgt = i
658 self._lgt = i
659 self._extra = []
659 self._extra = []
660 else:
660 else:
661 self._extra = self._extra[: i - self._lgt]
661 self._extra = self._extra[: i - self._lgt]
662
662
663 def _calculate_index(self, i):
663 def _calculate_index(self, i):
664 return self._offsets[i]
664 return self._offsets[i]
665
665
666
666
667 def parse_index2(data, inline, revlogv2=False):
667 def parse_index2(data, inline, revlogv2=False):
668 if not inline:
668 if not inline:
669 cls = IndexObject2 if revlogv2 else IndexObject
669 cls = IndexObject2 if revlogv2 else IndexObject
670 return cls(data), None
670 return cls(data), None
671 cls = InlinedIndexObject
671 cls = InlinedIndexObject
672 return cls(data, inline), (0, data)
672 return cls(data, inline), (0, data)
673
673
674
674
675 def parse_index_cl_v2(data):
675 def parse_index_cl_v2(data):
676 return IndexChangelogV2(data), None
676 return IndexChangelogV2(data), None
677
677
678
678
679 class IndexObject2(IndexObject):
679 class IndexObject2(IndexObject):
680 index_format = revlog_constants.INDEX_ENTRY_V2
680 index_format = revlog_constants.INDEX_ENTRY_V2
681
681
682 def replace_sidedata_info(
682 def replace_sidedata_info(
683 self,
683 self,
684 rev,
684 rev,
685 sidedata_offset,
685 sidedata_offset,
686 sidedata_length,
686 sidedata_length,
687 offset_flags,
687 offset_flags,
688 compression_mode,
688 compression_mode,
689 ):
689 ):
690 """
690 """
691 Replace an existing index entry's sidedata offset and length with new
691 Replace an existing index entry's sidedata offset and length with new
692 ones.
692 ones.
693 This cannot be used outside of the context of sidedata rewriting,
693 This cannot be used outside of the context of sidedata rewriting,
694 inside the transaction that creates the revision `rev`.
694 inside the transaction that creates the revision `rev`.
695 """
695 """
696 if rev < 0:
696 if rev < 0:
697 raise KeyError
697 raise KeyError
698 self._check_index(rev)
698 self._check_index(rev)
699 if rev < self._lgt:
699 if rev < self._lgt:
700 msg = b"cannot rewrite entries outside of this transaction"
700 msg = b"cannot rewrite entries outside of this transaction"
701 raise KeyError(msg)
701 raise KeyError(msg)
702 else:
702 else:
703 entry = list(self[rev])
703 entry = list(self[rev])
704 entry[0] = offset_flags
704 entry[0] = offset_flags
705 entry[8] = sidedata_offset
705 entry[8] = sidedata_offset
706 entry[9] = sidedata_length
706 entry[9] = sidedata_length
707 entry[11] = compression_mode
707 entry[11] = compression_mode
708 entry = tuple(entry)
708 entry = tuple(entry)
709 new = self._pack_entry(rev, entry)
709 new = self._pack_entry(rev, entry)
710 self._extra[rev - self._lgt] = new
710 self._extra[rev - self._lgt] = new
711
711
712 def _unpack_entry(self, rev, data):
712 def _unpack_entry(self, rev, data):
713 data = self.index_format.unpack(data)
713 data = self.index_format.unpack(data)
714 entry = data[:10]
714 entry = data[:10]
715 data_comp = data[10] & 3
715 data_comp = data[10] & 3
716 sidedata_comp = (data[10] & (3 << 2)) >> 2
716 sidedata_comp = (data[10] & (3 << 2)) >> 2
717 return entry + (data_comp, sidedata_comp)
717 return entry + (data_comp, sidedata_comp)
718
718
719 def _pack_entry(self, rev, entry):
719 def _pack_entry(self, rev, entry):
720 data = entry[:10]
720 data = entry[:10]
721 data_comp = entry[10] & 3
721 data_comp = entry[10] & 3
722 sidedata_comp = (entry[11] & 3) << 2
722 sidedata_comp = (entry[11] & 3) << 2
723 data += (data_comp | sidedata_comp,)
723 data += (data_comp | sidedata_comp,)
724
724
725 return self.index_format.pack(*data)
725 return self.index_format.pack(*data)
726
726
727 def entry_binary(self, rev):
727 def entry_binary(self, rev):
728 """return the raw binary string representing a revision"""
728 """return the raw binary string representing a revision"""
729 entry = self[rev]
729 entry = self[rev]
730 return self._pack_entry(rev, entry)
730 return self._pack_entry(rev, entry)
731
731
732 def pack_header(self, header):
732 def pack_header(self, header):
733 """pack header information as binary"""
733 """pack header information as binary"""
734 msg = 'version header should go in the docket, not the index: %d'
734 msg = 'version header should go in the docket, not the index: %d'
735 msg %= header
735 msg %= header
736 raise error.ProgrammingError(msg)
736 raise error.ProgrammingError(msg)
737
737
738
738
739 class IndexChangelogV2(IndexObject2):
739 class IndexChangelogV2(IndexObject2):
740 index_format = revlog_constants.INDEX_ENTRY_CL_V2
740 index_format = revlog_constants.INDEX_ENTRY_CL_V2
741
741
742 def _unpack_entry(self, rev, data, r=True):
742 def _unpack_entry(self, rev, data, r=True):
743 items = self.index_format.unpack(data)
743 items = self.index_format.unpack(data)
744 entry = items[:3] + (rev, rev) + items[3:8]
744 entry = items[:3] + (rev, rev) + items[3:8]
745 data_comp = items[8] & 3
745 data_comp = items[8] & 3
746 sidedata_comp = (items[8] >> 2) & 3
746 sidedata_comp = (items[8] >> 2) & 3
747 return entry + (data_comp, sidedata_comp)
747 return entry + (data_comp, sidedata_comp)
748
748
749 def _pack_entry(self, rev, entry):
749 def _pack_entry(self, rev, entry):
750 assert entry[3] == rev, entry[3]
750 assert entry[3] == rev, entry[3]
751 assert entry[4] == rev, entry[4]
751 assert entry[4] == rev, entry[4]
752 data = entry[:3] + entry[5:10]
752 data = entry[:3] + entry[5:10]
753 data_comp = entry[10] & 3
753 data_comp = entry[10] & 3
754 sidedata_comp = (entry[11] & 3) << 2
754 sidedata_comp = (entry[11] & 3) << 2
755 data += (data_comp | sidedata_comp,)
755 data += (data_comp | sidedata_comp,)
756 return self.index_format.pack(*data)
756 return self.index_format.pack(*data)
757
757
758
758
759 def parse_index_devel_nodemap(data, inline):
759 def parse_index_devel_nodemap(data, inline):
760 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
760 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
761 return PersistentNodeMapIndexObject(data), None
761 return PersistentNodeMapIndexObject(data), None
762
762
763
763
764 def parse_dirstate(dmap, copymap, st):
764 def parse_dirstate(dmap, copymap, st):
765 parents = [st[:20], st[20:40]]
765 parents = [st[:20], st[20:40]]
766 # dereference fields so they will be local in loop
766 # dereference fields so they will be local in loop
767 format = b">cllll"
767 format = b">cllll"
768 e_size = struct.calcsize(format)
768 e_size = struct.calcsize(format)
769 pos1 = 40
769 pos1 = 40
770 l = len(st)
770 l = len(st)
771
771
772 # the inner loop
772 # the inner loop
773 while pos1 < l:
773 while pos1 < l:
774 pos2 = pos1 + e_size
774 pos2 = pos1 + e_size
775 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
775 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
776 pos1 = pos2 + e[4]
776 pos1 = pos2 + e[4]
777 f = st[pos2:pos1]
777 f = st[pos2:pos1]
778 if b'\0' in f:
778 if b'\0' in f:
779 f, c = f.split(b'\0')
779 f, c = f.split(b'\0')
780 copymap[f] = c
780 copymap[f] = c
781 dmap[f] = DirstateItem.from_v1_data(*e[:4])
781 dmap[f] = DirstateItem.from_v1_data(*e[:4])
782 return parents
782 return parents
783
783
784
784
785 def pack_dirstate(dmap, copymap, pl, now):
785 def pack_dirstate(dmap, copymap, pl, now):
786 now = int(now)
786 now = int(now)
787 cs = stringio()
787 cs = stringio()
788 write = cs.write
788 write = cs.write
789 write(b"".join(pl))
789 write(b"".join(pl))
790 for f, e in pycompat.iteritems(dmap):
790 for f, e in pycompat.iteritems(dmap):
791 if e.need_delay(now):
791 if e.need_delay(now):
792 # The file was last modified "simultaneously" with the current
792 # The file was last modified "simultaneously" with the current
793 # write to dirstate (i.e. within the same second for file-
793 # write to dirstate (i.e. within the same second for file-
794 # systems with a granularity of 1 sec). This commonly happens
794 # systems with a granularity of 1 sec). This commonly happens
795 # for at least a couple of files on 'update'.
795 # for at least a couple of files on 'update'.
796 # The user could change the file without changing its size
796 # The user could change the file without changing its size
797 # within the same second. Invalidate the file's mtime in
797 # within the same second. Invalidate the file's mtime in
798 # dirstate, forcing future 'status' calls to compare the
798 # dirstate, forcing future 'status' calls to compare the
799 # contents of the file if the size is the same. This prevents
799 # contents of the file if the size is the same. This prevents
800 # mistakenly treating such files as clean.
800 # mistakenly treating such files as clean.
801 e.set_possibly_dirty()
801 e.set_possibly_dirty()
802
802
803 if f in copymap:
803 if f in copymap:
804 f = b"%s\0%s" % (f, copymap[f])
804 f = b"%s\0%s" % (f, copymap[f])
805 e = _pack(
805 e = _pack(
806 b">cllll",
806 b">cllll",
807 e.v1_state(),
807 e.v1_state(),
808 e.v1_mode(),
808 e.v1_mode(),
809 e.v1_size(),
809 e.v1_size(),
810 e.v1_mtime(),
810 e.v1_mtime(),
811 len(f),
811 len(f),
812 )
812 )
813 write(e)
813 write(e)
814 write(f)
814 write(f)
815 return cs.getvalue()
815 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now