##// END OF EJS Templates
dirstate-item: add more logic to `from_p2`...
marmoute -
r48747:97e9f3fd default
parent child Browse files
Show More
@@ -1,815 +1,817 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It contains:
51 It contains:
52
52
53 - state (one of 'n', 'a', 'r', 'm')
53 - state (one of 'n', 'a', 'r', 'm')
54 - mode,
54 - mode,
55 - size,
55 - size,
56 - mtime,
56 - mtime,
57 """
57 """
58
58
59 _wc_tracked = attr.ib()
59 _wc_tracked = attr.ib()
60 _p1_tracked = attr.ib()
60 _p1_tracked = attr.ib()
61 _p2_tracked = attr.ib()
61 _p2_tracked = attr.ib()
62 # the three item above should probably be combined
62 # the three item above should probably be combined
63 #
63 #
64 # However it is unclear if they properly cover some of the most advanced
64 # However it is unclear if they properly cover some of the most advanced
65 # merge case. So we should probably wait on this to be settled.
65 # merge case. So we should probably wait on this to be settled.
66 _merged = attr.ib()
66 _merged = attr.ib()
67 _clean_p1 = attr.ib()
67 _clean_p1 = attr.ib()
68 _clean_p2 = attr.ib()
68 _clean_p2 = attr.ib()
69 _possibly_dirty = attr.ib()
69 _possibly_dirty = attr.ib()
70 _mode = attr.ib()
70 _mode = attr.ib()
71 _size = attr.ib()
71 _size = attr.ib()
72 _mtime = attr.ib()
72 _mtime = attr.ib()
73
73
74 def __init__(
74 def __init__(
75 self,
75 self,
76 wc_tracked=False,
76 wc_tracked=False,
77 p1_tracked=False,
77 p1_tracked=False,
78 p2_tracked=False,
78 p2_tracked=False,
79 merged=False,
79 merged=False,
80 clean_p1=False,
80 clean_p1=False,
81 clean_p2=False,
81 clean_p2=False,
82 possibly_dirty=False,
82 possibly_dirty=False,
83 parentfiledata=None,
83 parentfiledata=None,
84 ):
84 ):
85 if merged and (clean_p1 or clean_p2):
85 if merged and (clean_p1 or clean_p2):
86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
87 raise error.ProgrammingError(msg)
87 raise error.ProgrammingError(msg)
88
88
89 self._wc_tracked = wc_tracked
89 self._wc_tracked = wc_tracked
90 self._p1_tracked = p1_tracked
90 self._p1_tracked = p1_tracked
91 self._p2_tracked = p2_tracked
91 self._p2_tracked = p2_tracked
92 self._merged = merged
92 self._merged = merged
93 self._clean_p1 = clean_p1
93 self._clean_p1 = clean_p1
94 self._clean_p2 = clean_p2
94 self._clean_p2 = clean_p2
95 self._possibly_dirty = possibly_dirty
95 self._possibly_dirty = possibly_dirty
96 if parentfiledata is None:
96 if parentfiledata is None:
97 self._mode = None
97 self._mode = None
98 self._size = None
98 self._size = None
99 self._mtime = None
99 self._mtime = None
100 else:
100 else:
101 self._mode = parentfiledata[0]
101 self._mode = parentfiledata[0]
102 self._size = parentfiledata[1]
102 self._size = parentfiledata[1]
103 self._mtime = parentfiledata[2]
103 self._mtime = parentfiledata[2]
104
104
105 @classmethod
105 @classmethod
106 def new_added(cls):
106 def new_added(cls):
107 """constructor to help legacy API to build a new "added" item
107 """constructor to help legacy API to build a new "added" item
108
108
109 Should eventually be removed
109 Should eventually be removed
110 """
110 """
111 instance = cls()
111 instance = cls()
112 instance._wc_tracked = True
112 instance._wc_tracked = True
113 instance._p1_tracked = False
113 instance._p1_tracked = False
114 instance._p2_tracked = False
114 instance._p2_tracked = False
115 return instance
115 return instance
116
116
117 @classmethod
117 @classmethod
118 def new_merged(cls):
118 def new_merged(cls):
119 """constructor to help legacy API to build a new "merged" item
119 """constructor to help legacy API to build a new "merged" item
120
120
121 Should eventually be removed
121 Should eventually be removed
122 """
122 """
123 instance = cls()
123 instance = cls()
124 instance._wc_tracked = True
124 instance._wc_tracked = True
125 instance._p1_tracked = True # might not be True because of rename ?
125 instance._p1_tracked = True # might not be True because of rename ?
126 instance._p2_tracked = True # might not be True because of rename ?
126 instance._p2_tracked = True # might not be True because of rename ?
127 instance._merged = True
127 instance._merged = True
128 return instance
128 return instance
129
129
130 @classmethod
130 @classmethod
131 def new_from_p2(cls):
131 def new_from_p2(cls):
132 """constructor to help legacy API to build a new "from_p2" item
132 """constructor to help legacy API to build a new "from_p2" item
133
133
134 Should eventually be removed
134 Should eventually be removed
135 """
135 """
136 instance = cls()
136 instance = cls()
137 instance._wc_tracked = True
137 instance._wc_tracked = True
138 instance._p1_tracked = False # might actually be True
138 instance._p1_tracked = False # might actually be True
139 instance._p2_tracked = True
139 instance._p2_tracked = True
140 instance._clean_p2 = True
140 instance._clean_p2 = True
141 return instance
141 return instance
142
142
143 @classmethod
143 @classmethod
144 def new_possibly_dirty(cls):
144 def new_possibly_dirty(cls):
145 """constructor to help legacy API to build a new "possibly_dirty" item
145 """constructor to help legacy API to build a new "possibly_dirty" item
146
146
147 Should eventually be removed
147 Should eventually be removed
148 """
148 """
149 instance = cls()
149 instance = cls()
150 instance._wc_tracked = True
150 instance._wc_tracked = True
151 instance._p1_tracked = True
151 instance._p1_tracked = True
152 instance._possibly_dirty = True
152 instance._possibly_dirty = True
153 return instance
153 return instance
154
154
155 @classmethod
155 @classmethod
156 def new_normal(cls, mode, size, mtime):
156 def new_normal(cls, mode, size, mtime):
157 """constructor to help legacy API to build a new "normal" item
157 """constructor to help legacy API to build a new "normal" item
158
158
159 Should eventually be removed
159 Should eventually be removed
160 """
160 """
161 assert size != FROM_P2
161 assert size != FROM_P2
162 assert size != NONNORMAL
162 assert size != NONNORMAL
163 instance = cls()
163 instance = cls()
164 instance._wc_tracked = True
164 instance._wc_tracked = True
165 instance._p1_tracked = True
165 instance._p1_tracked = True
166 instance._mode = mode
166 instance._mode = mode
167 instance._size = size
167 instance._size = size
168 instance._mtime = mtime
168 instance._mtime = mtime
169 return instance
169 return instance
170
170
171 @classmethod
171 @classmethod
172 def from_v1_data(cls, state, mode, size, mtime):
172 def from_v1_data(cls, state, mode, size, mtime):
173 """Build a new DirstateItem object from V1 data
173 """Build a new DirstateItem object from V1 data
174
174
175 Since the dirstate-v1 format is frozen, the signature of this function
175 Since the dirstate-v1 format is frozen, the signature of this function
176 is not expected to change, unlike the __init__ one.
176 is not expected to change, unlike the __init__ one.
177 """
177 """
178 if state == b'm':
178 if state == b'm':
179 return cls.new_merged()
179 return cls.new_merged()
180 elif state == b'a':
180 elif state == b'a':
181 return cls.new_added()
181 return cls.new_added()
182 elif state == b'r':
182 elif state == b'r':
183 instance = cls()
183 instance = cls()
184 instance._wc_tracked = False
184 instance._wc_tracked = False
185 if size == NONNORMAL:
185 if size == NONNORMAL:
186 instance._merged = True
186 instance._merged = True
187 instance._p1_tracked = (
187 instance._p1_tracked = (
188 True # might not be True because of rename ?
188 True # might not be True because of rename ?
189 )
189 )
190 instance._p2_tracked = (
190 instance._p2_tracked = (
191 True # might not be True because of rename ?
191 True # might not be True because of rename ?
192 )
192 )
193 elif size == FROM_P2:
193 elif size == FROM_P2:
194 instance._clean_p2 = True
194 instance._clean_p2 = True
195 instance._p1_tracked = (
195 instance._p1_tracked = (
196 False # We actually don't know (file history)
196 False # We actually don't know (file history)
197 )
197 )
198 instance._p2_tracked = True
198 instance._p2_tracked = True
199 else:
199 else:
200 instance._p1_tracked = True
200 instance._p1_tracked = True
201 return instance
201 return instance
202 elif state == b'n':
202 elif state == b'n':
203 if size == FROM_P2:
203 if size == FROM_P2:
204 return cls.new_from_p2()
204 return cls.new_from_p2()
205 elif size == NONNORMAL:
205 elif size == NONNORMAL:
206 return cls.new_possibly_dirty()
206 return cls.new_possibly_dirty()
207 elif mtime == AMBIGUOUS_TIME:
207 elif mtime == AMBIGUOUS_TIME:
208 instance = cls.new_normal(mode, size, 42)
208 instance = cls.new_normal(mode, size, 42)
209 instance._mtime = None
209 instance._mtime = None
210 instance._possibly_dirty = True
210 instance._possibly_dirty = True
211 return instance
211 return instance
212 else:
212 else:
213 return cls.new_normal(mode, size, mtime)
213 return cls.new_normal(mode, size, mtime)
214 else:
214 else:
215 raise RuntimeError(b'unknown state: %s' % state)
215 raise RuntimeError(b'unknown state: %s' % state)
216
216
217 def set_possibly_dirty(self):
217 def set_possibly_dirty(self):
218 """Mark a file as "possibly dirty"
218 """Mark a file as "possibly dirty"
219
219
220 This means the next status call will have to actually check its content
220 This means the next status call will have to actually check its content
221 to make sure it is correct.
221 to make sure it is correct.
222 """
222 """
223 self._possibly_dirty = True
223 self._possibly_dirty = True
224
224
225 def set_untracked(self):
225 def set_untracked(self):
226 """mark a file as untracked in the working copy
226 """mark a file as untracked in the working copy
227
227
228 This will ultimately be called by command like `hg remove`.
228 This will ultimately be called by command like `hg remove`.
229 """
229 """
230 # backup the previous state (useful for merge)
230 # backup the previous state (useful for merge)
231 self._wc_tracked = False
231 self._wc_tracked = False
232 self._mode = None
232 self._mode = None
233 self._size = None
233 self._size = None
234 self._mtime = None
234 self._mtime = None
235
235
236 @property
236 @property
237 def mode(self):
237 def mode(self):
238 return self.v1_mode()
238 return self.v1_mode()
239
239
240 @property
240 @property
241 def size(self):
241 def size(self):
242 return self.v1_size()
242 return self.v1_size()
243
243
244 @property
244 @property
245 def mtime(self):
245 def mtime(self):
246 return self.v1_mtime()
246 return self.v1_mtime()
247
247
248 @property
248 @property
249 def state(self):
249 def state(self):
250 """
250 """
251 States are:
251 States are:
252 n normal
252 n normal
253 m needs merging
253 m needs merging
254 r marked for removal
254 r marked for removal
255 a marked for addition
255 a marked for addition
256
256
257 XXX This "state" is a bit obscure and mostly a direct expression of the
257 XXX This "state" is a bit obscure and mostly a direct expression of the
258 dirstatev1 format. It would make sense to ultimately deprecate it in
258 dirstatev1 format. It would make sense to ultimately deprecate it in
259 favor of the more "semantic" attributes.
259 favor of the more "semantic" attributes.
260 """
260 """
261 return self.v1_state()
261 return self.v1_state()
262
262
263 @property
263 @property
264 def tracked(self):
264 def tracked(self):
265 """True is the file is tracked in the working copy"""
265 """True is the file is tracked in the working copy"""
266 return self._wc_tracked
266 return self._wc_tracked
267
267
268 @property
268 @property
269 def added(self):
269 def added(self):
270 """True if the file has been added"""
270 """True if the file has been added"""
271 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
271 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
272
272
273 @property
273 @property
274 def merged(self):
274 def merged(self):
275 """True if the file has been merged
275 """True if the file has been merged
276
276
277 Should only be set if a merge is in progress in the dirstate
277 Should only be set if a merge is in progress in the dirstate
278 """
278 """
279 return self._wc_tracked and self._merged
279 return self._wc_tracked and self._merged
280
280
281 @property
281 @property
282 def from_p2(self):
282 def from_p2(self):
283 """True if the file have been fetched from p2 during the current merge
283 """True if the file have been fetched from p2 during the current merge
284
284
285 This is only True is the file is currently tracked.
285 This is only True is the file is currently tracked.
286
286
287 Should only be set if a merge is in progress in the dirstate
287 Should only be set if a merge is in progress in the dirstate
288 """
288 """
289 return self._wc_tracked and self._clean_p2
289 if not self._wc_tracked:
290 return False
291 return self._clean_p2 or (not self._p1_tracked and self._p2_tracked)
290
292
291 @property
293 @property
292 def from_p2_removed(self):
294 def from_p2_removed(self):
293 """True if the file has been removed, but was "from_p2" initially
295 """True if the file has been removed, but was "from_p2" initially
294
296
295 This property seems like an abstraction leakage and should probably be
297 This property seems like an abstraction leakage and should probably be
296 dealt in this class (or maybe the dirstatemap) directly.
298 dealt in this class (or maybe the dirstatemap) directly.
297 """
299 """
298 return self.removed and self._clean_p2
300 return self.removed and self._clean_p2
299
301
300 @property
302 @property
301 def removed(self):
303 def removed(self):
302 """True if the file has been removed"""
304 """True if the file has been removed"""
303 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
305 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
304
306
305 @property
307 @property
306 def merged_removed(self):
308 def merged_removed(self):
307 """True if the file has been removed, but was "merged" initially
309 """True if the file has been removed, but was "merged" initially
308
310
309 This property seems like an abstraction leakage and should probably be
311 This property seems like an abstraction leakage and should probably be
310 dealt in this class (or maybe the dirstatemap) directly.
312 dealt in this class (or maybe the dirstatemap) directly.
311 """
313 """
312 return self.removed and self._merged
314 return self.removed and self._merged
313
315
314 @property
316 @property
315 def dm_nonnormal(self):
317 def dm_nonnormal(self):
316 """True is the entry is non-normal in the dirstatemap sense
318 """True is the entry is non-normal in the dirstatemap sense
317
319
318 There is no reason for any code, but the dirstatemap one to use this.
320 There is no reason for any code, but the dirstatemap one to use this.
319 """
321 """
320 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
322 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
321
323
322 @property
324 @property
323 def dm_otherparent(self):
325 def dm_otherparent(self):
324 """True is the entry is `otherparent` in the dirstatemap sense
326 """True is the entry is `otherparent` in the dirstatemap sense
325
327
326 There is no reason for any code, but the dirstatemap one to use this.
328 There is no reason for any code, but the dirstatemap one to use this.
327 """
329 """
328 return self.v1_size() == FROM_P2
330 return self.v1_size() == FROM_P2
329
331
330 def v1_state(self):
332 def v1_state(self):
331 """return a "state" suitable for v1 serialization"""
333 """return a "state" suitable for v1 serialization"""
332 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
334 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
333 # the object has no state to record, this is -currently-
335 # the object has no state to record, this is -currently-
334 # unsupported
336 # unsupported
335 raise RuntimeError('untracked item')
337 raise RuntimeError('untracked item')
336 elif not self._wc_tracked:
338 elif not self._wc_tracked:
337 return b'r'
339 return b'r'
338 elif self._merged:
340 elif self._merged:
339 return b'm'
341 return b'm'
340 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
342 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
341 return b'a'
343 return b'a'
342 elif self._clean_p2 and self._wc_tracked:
344 elif self._clean_p2 and self._wc_tracked:
343 return b'n'
345 return b'n'
344 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
346 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
345 return b'n'
347 return b'n'
346 elif self._possibly_dirty:
348 elif self._possibly_dirty:
347 return b'n'
349 return b'n'
348 elif self._wc_tracked:
350 elif self._wc_tracked:
349 return b'n'
351 return b'n'
350 else:
352 else:
351 raise RuntimeError('unreachable')
353 raise RuntimeError('unreachable')
352
354
353 def v1_mode(self):
355 def v1_mode(self):
354 """return a "mode" suitable for v1 serialization"""
356 """return a "mode" suitable for v1 serialization"""
355 return self._mode if self._mode is not None else 0
357 return self._mode if self._mode is not None else 0
356
358
357 def v1_size(self):
359 def v1_size(self):
358 """return a "size" suitable for v1 serialization"""
360 """return a "size" suitable for v1 serialization"""
359 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
361 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
360 # the object has no state to record, this is -currently-
362 # the object has no state to record, this is -currently-
361 # unsupported
363 # unsupported
362 raise RuntimeError('untracked item')
364 raise RuntimeError('untracked item')
363 elif not self._wc_tracked:
365 elif not self._wc_tracked:
364 # File was deleted
366 # File was deleted
365 if self._merged:
367 if self._merged:
366 return NONNORMAL
368 return NONNORMAL
367 elif self._clean_p2:
369 elif self._clean_p2:
368 return FROM_P2
370 return FROM_P2
369 else:
371 else:
370 return 0
372 return 0
371 elif self._merged:
373 elif self._merged:
372 return FROM_P2
374 return FROM_P2
373 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
375 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
374 # Added
376 # Added
375 return NONNORMAL
377 return NONNORMAL
376 elif self._clean_p2 and self._wc_tracked:
378 elif self._clean_p2 and self._wc_tracked:
377 return FROM_P2
379 return FROM_P2
378 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
380 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
379 return FROM_P2
381 return FROM_P2
380 elif self._possibly_dirty:
382 elif self._possibly_dirty:
381 if self._size is None:
383 if self._size is None:
382 return NONNORMAL
384 return NONNORMAL
383 else:
385 else:
384 return self._size
386 return self._size
385 elif self._wc_tracked:
387 elif self._wc_tracked:
386 return self._size
388 return self._size
387 else:
389 else:
388 raise RuntimeError('unreachable')
390 raise RuntimeError('unreachable')
389
391
390 def v1_mtime(self):
392 def v1_mtime(self):
391 """return a "mtime" suitable for v1 serialization"""
393 """return a "mtime" suitable for v1 serialization"""
392 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
394 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
393 # the object has no state to record, this is -currently-
395 # the object has no state to record, this is -currently-
394 # unsupported
396 # unsupported
395 raise RuntimeError('untracked item')
397 raise RuntimeError('untracked item')
396 elif not self._wc_tracked:
398 elif not self._wc_tracked:
397 return 0
399 return 0
398 elif self._possibly_dirty:
400 elif self._possibly_dirty:
399 return AMBIGUOUS_TIME
401 return AMBIGUOUS_TIME
400 elif self._merged:
402 elif self._merged:
401 return AMBIGUOUS_TIME
403 return AMBIGUOUS_TIME
402 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
404 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
403 return AMBIGUOUS_TIME
405 return AMBIGUOUS_TIME
404 elif self._clean_p2 and self._wc_tracked:
406 elif self._clean_p2 and self._wc_tracked:
405 return AMBIGUOUS_TIME
407 return AMBIGUOUS_TIME
406 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
408 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
407 return AMBIGUOUS_TIME
409 return AMBIGUOUS_TIME
408 elif self._wc_tracked:
410 elif self._wc_tracked:
409 if self._mtime is None:
411 if self._mtime is None:
410 return 0
412 return 0
411 else:
413 else:
412 return self._mtime
414 return self._mtime
413 else:
415 else:
414 raise RuntimeError('unreachable')
416 raise RuntimeError('unreachable')
415
417
416 def need_delay(self, now):
418 def need_delay(self, now):
417 """True if the stored mtime would be ambiguous with the current time"""
419 """True if the stored mtime would be ambiguous with the current time"""
418 return self.v1_state() == b'n' and self.v1_mtime() == now
420 return self.v1_state() == b'n' and self.v1_mtime() == now
419
421
420
422
421 def gettype(q):
423 def gettype(q):
422 return int(q & 0xFFFF)
424 return int(q & 0xFFFF)
423
425
424
426
425 class BaseIndexObject(object):
427 class BaseIndexObject(object):
426 # Can I be passed to an algorithme implemented in Rust ?
428 # Can I be passed to an algorithme implemented in Rust ?
427 rust_ext_compat = 0
429 rust_ext_compat = 0
428 # Format of an index entry according to Python's `struct` language
430 # Format of an index entry according to Python's `struct` language
429 index_format = revlog_constants.INDEX_ENTRY_V1
431 index_format = revlog_constants.INDEX_ENTRY_V1
430 # Size of a C unsigned long long int, platform independent
432 # Size of a C unsigned long long int, platform independent
431 big_int_size = struct.calcsize(b'>Q')
433 big_int_size = struct.calcsize(b'>Q')
432 # Size of a C long int, platform independent
434 # Size of a C long int, platform independent
433 int_size = struct.calcsize(b'>i')
435 int_size = struct.calcsize(b'>i')
434 # An empty index entry, used as a default value to be overridden, or nullrev
436 # An empty index entry, used as a default value to be overridden, or nullrev
435 null_item = (
437 null_item = (
436 0,
438 0,
437 0,
439 0,
438 0,
440 0,
439 -1,
441 -1,
440 -1,
442 -1,
441 -1,
443 -1,
442 -1,
444 -1,
443 sha1nodeconstants.nullid,
445 sha1nodeconstants.nullid,
444 0,
446 0,
445 0,
447 0,
446 revlog_constants.COMP_MODE_INLINE,
448 revlog_constants.COMP_MODE_INLINE,
447 revlog_constants.COMP_MODE_INLINE,
449 revlog_constants.COMP_MODE_INLINE,
448 )
450 )
449
451
450 @util.propertycache
452 @util.propertycache
451 def entry_size(self):
453 def entry_size(self):
452 return self.index_format.size
454 return self.index_format.size
453
455
454 @property
456 @property
455 def nodemap(self):
457 def nodemap(self):
456 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
458 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
457 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
459 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
458 return self._nodemap
460 return self._nodemap
459
461
460 @util.propertycache
462 @util.propertycache
461 def _nodemap(self):
463 def _nodemap(self):
462 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
464 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
463 for r in range(0, len(self)):
465 for r in range(0, len(self)):
464 n = self[r][7]
466 n = self[r][7]
465 nodemap[n] = r
467 nodemap[n] = r
466 return nodemap
468 return nodemap
467
469
468 def has_node(self, node):
470 def has_node(self, node):
469 """return True if the node exist in the index"""
471 """return True if the node exist in the index"""
470 return node in self._nodemap
472 return node in self._nodemap
471
473
472 def rev(self, node):
474 def rev(self, node):
473 """return a revision for a node
475 """return a revision for a node
474
476
475 If the node is unknown, raise a RevlogError"""
477 If the node is unknown, raise a RevlogError"""
476 return self._nodemap[node]
478 return self._nodemap[node]
477
479
478 def get_rev(self, node):
480 def get_rev(self, node):
479 """return a revision for a node
481 """return a revision for a node
480
482
481 If the node is unknown, return None"""
483 If the node is unknown, return None"""
482 return self._nodemap.get(node)
484 return self._nodemap.get(node)
483
485
484 def _stripnodes(self, start):
486 def _stripnodes(self, start):
485 if '_nodemap' in vars(self):
487 if '_nodemap' in vars(self):
486 for r in range(start, len(self)):
488 for r in range(start, len(self)):
487 n = self[r][7]
489 n = self[r][7]
488 del self._nodemap[n]
490 del self._nodemap[n]
489
491
490 def clearcaches(self):
492 def clearcaches(self):
491 self.__dict__.pop('_nodemap', None)
493 self.__dict__.pop('_nodemap', None)
492
494
493 def __len__(self):
495 def __len__(self):
494 return self._lgt + len(self._extra)
496 return self._lgt + len(self._extra)
495
497
496 def append(self, tup):
498 def append(self, tup):
497 if '_nodemap' in vars(self):
499 if '_nodemap' in vars(self):
498 self._nodemap[tup[7]] = len(self)
500 self._nodemap[tup[7]] = len(self)
499 data = self._pack_entry(len(self), tup)
501 data = self._pack_entry(len(self), tup)
500 self._extra.append(data)
502 self._extra.append(data)
501
503
502 def _pack_entry(self, rev, entry):
504 def _pack_entry(self, rev, entry):
503 assert entry[8] == 0
505 assert entry[8] == 0
504 assert entry[9] == 0
506 assert entry[9] == 0
505 return self.index_format.pack(*entry[:8])
507 return self.index_format.pack(*entry[:8])
506
508
507 def _check_index(self, i):
509 def _check_index(self, i):
508 if not isinstance(i, int):
510 if not isinstance(i, int):
509 raise TypeError(b"expecting int indexes")
511 raise TypeError(b"expecting int indexes")
510 if i < 0 or i >= len(self):
512 if i < 0 or i >= len(self):
511 raise IndexError
513 raise IndexError
512
514
513 def __getitem__(self, i):
515 def __getitem__(self, i):
514 if i == -1:
516 if i == -1:
515 return self.null_item
517 return self.null_item
516 self._check_index(i)
518 self._check_index(i)
517 if i >= self._lgt:
519 if i >= self._lgt:
518 data = self._extra[i - self._lgt]
520 data = self._extra[i - self._lgt]
519 else:
521 else:
520 index = self._calculate_index(i)
522 index = self._calculate_index(i)
521 data = self._data[index : index + self.entry_size]
523 data = self._data[index : index + self.entry_size]
522 r = self._unpack_entry(i, data)
524 r = self._unpack_entry(i, data)
523 if self._lgt and i == 0:
525 if self._lgt and i == 0:
524 offset = revlogutils.offset_type(0, gettype(r[0]))
526 offset = revlogutils.offset_type(0, gettype(r[0]))
525 r = (offset,) + r[1:]
527 r = (offset,) + r[1:]
526 return r
528 return r
527
529
528 def _unpack_entry(self, rev, data):
530 def _unpack_entry(self, rev, data):
529 r = self.index_format.unpack(data)
531 r = self.index_format.unpack(data)
530 r = r + (
532 r = r + (
531 0,
533 0,
532 0,
534 0,
533 revlog_constants.COMP_MODE_INLINE,
535 revlog_constants.COMP_MODE_INLINE,
534 revlog_constants.COMP_MODE_INLINE,
536 revlog_constants.COMP_MODE_INLINE,
535 )
537 )
536 return r
538 return r
537
539
538 def pack_header(self, header):
540 def pack_header(self, header):
539 """pack header information as binary"""
541 """pack header information as binary"""
540 v_fmt = revlog_constants.INDEX_HEADER
542 v_fmt = revlog_constants.INDEX_HEADER
541 return v_fmt.pack(header)
543 return v_fmt.pack(header)
542
544
543 def entry_binary(self, rev):
545 def entry_binary(self, rev):
544 """return the raw binary string representing a revision"""
546 """return the raw binary string representing a revision"""
545 entry = self[rev]
547 entry = self[rev]
546 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
548 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
547 if rev == 0:
549 if rev == 0:
548 p = p[revlog_constants.INDEX_HEADER.size :]
550 p = p[revlog_constants.INDEX_HEADER.size :]
549 return p
551 return p
550
552
551
553
552 class IndexObject(BaseIndexObject):
554 class IndexObject(BaseIndexObject):
553 def __init__(self, data):
555 def __init__(self, data):
554 assert len(data) % self.entry_size == 0, (
556 assert len(data) % self.entry_size == 0, (
555 len(data),
557 len(data),
556 self.entry_size,
558 self.entry_size,
557 len(data) % self.entry_size,
559 len(data) % self.entry_size,
558 )
560 )
559 self._data = data
561 self._data = data
560 self._lgt = len(data) // self.entry_size
562 self._lgt = len(data) // self.entry_size
561 self._extra = []
563 self._extra = []
562
564
563 def _calculate_index(self, i):
565 def _calculate_index(self, i):
564 return i * self.entry_size
566 return i * self.entry_size
565
567
566 def __delitem__(self, i):
568 def __delitem__(self, i):
567 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
569 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
568 raise ValueError(b"deleting slices only supports a:-1 with step 1")
570 raise ValueError(b"deleting slices only supports a:-1 with step 1")
569 i = i.start
571 i = i.start
570 self._check_index(i)
572 self._check_index(i)
571 self._stripnodes(i)
573 self._stripnodes(i)
572 if i < self._lgt:
574 if i < self._lgt:
573 self._data = self._data[: i * self.entry_size]
575 self._data = self._data[: i * self.entry_size]
574 self._lgt = i
576 self._lgt = i
575 self._extra = []
577 self._extra = []
576 else:
578 else:
577 self._extra = self._extra[: i - self._lgt]
579 self._extra = self._extra[: i - self._lgt]
578
580
579
581
580 class PersistentNodeMapIndexObject(IndexObject):
582 class PersistentNodeMapIndexObject(IndexObject):
581 """a Debug oriented class to test persistent nodemap
583 """a Debug oriented class to test persistent nodemap
582
584
583 We need a simple python object to test API and higher level behavior. See
585 We need a simple python object to test API and higher level behavior. See
584 the Rust implementation for more serious usage. This should be used only
586 the Rust implementation for more serious usage. This should be used only
585 through the dedicated `devel.persistent-nodemap` config.
587 through the dedicated `devel.persistent-nodemap` config.
586 """
588 """
587
589
588 def nodemap_data_all(self):
590 def nodemap_data_all(self):
589 """Return bytes containing a full serialization of a nodemap
591 """Return bytes containing a full serialization of a nodemap
590
592
591 The nodemap should be valid for the full set of revisions in the
593 The nodemap should be valid for the full set of revisions in the
592 index."""
594 index."""
593 return nodemaputil.persistent_data(self)
595 return nodemaputil.persistent_data(self)
594
596
595 def nodemap_data_incremental(self):
597 def nodemap_data_incremental(self):
596 """Return bytes containing a incremental update to persistent nodemap
598 """Return bytes containing a incremental update to persistent nodemap
597
599
598 This containst the data for an append-only update of the data provided
600 This containst the data for an append-only update of the data provided
599 in the last call to `update_nodemap_data`.
601 in the last call to `update_nodemap_data`.
600 """
602 """
601 if self._nm_root is None:
603 if self._nm_root is None:
602 return None
604 return None
603 docket = self._nm_docket
605 docket = self._nm_docket
604 changed, data = nodemaputil.update_persistent_data(
606 changed, data = nodemaputil.update_persistent_data(
605 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
607 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
606 )
608 )
607
609
608 self._nm_root = self._nm_max_idx = self._nm_docket = None
610 self._nm_root = self._nm_max_idx = self._nm_docket = None
609 return docket, changed, data
611 return docket, changed, data
610
612
611 def update_nodemap_data(self, docket, nm_data):
613 def update_nodemap_data(self, docket, nm_data):
612 """provide full block of persisted binary data for a nodemap
614 """provide full block of persisted binary data for a nodemap
613
615
614 The data are expected to come from disk. See `nodemap_data_all` for a
616 The data are expected to come from disk. See `nodemap_data_all` for a
615 produceur of such data."""
617 produceur of such data."""
616 if nm_data is not None:
618 if nm_data is not None:
617 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
619 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
618 if self._nm_root:
620 if self._nm_root:
619 self._nm_docket = docket
621 self._nm_docket = docket
620 else:
622 else:
621 self._nm_root = self._nm_max_idx = self._nm_docket = None
623 self._nm_root = self._nm_max_idx = self._nm_docket = None
622
624
623
625
624 class InlinedIndexObject(BaseIndexObject):
626 class InlinedIndexObject(BaseIndexObject):
625 def __init__(self, data, inline=0):
627 def __init__(self, data, inline=0):
626 self._data = data
628 self._data = data
627 self._lgt = self._inline_scan(None)
629 self._lgt = self._inline_scan(None)
628 self._inline_scan(self._lgt)
630 self._inline_scan(self._lgt)
629 self._extra = []
631 self._extra = []
630
632
631 def _inline_scan(self, lgt):
633 def _inline_scan(self, lgt):
632 off = 0
634 off = 0
633 if lgt is not None:
635 if lgt is not None:
634 self._offsets = [0] * lgt
636 self._offsets = [0] * lgt
635 count = 0
637 count = 0
636 while off <= len(self._data) - self.entry_size:
638 while off <= len(self._data) - self.entry_size:
637 start = off + self.big_int_size
639 start = off + self.big_int_size
638 (s,) = struct.unpack(
640 (s,) = struct.unpack(
639 b'>i',
641 b'>i',
640 self._data[start : start + self.int_size],
642 self._data[start : start + self.int_size],
641 )
643 )
642 if lgt is not None:
644 if lgt is not None:
643 self._offsets[count] = off
645 self._offsets[count] = off
644 count += 1
646 count += 1
645 off += self.entry_size + s
647 off += self.entry_size + s
646 if off != len(self._data):
648 if off != len(self._data):
647 raise ValueError(b"corrupted data")
649 raise ValueError(b"corrupted data")
648 return count
650 return count
649
651
650 def __delitem__(self, i):
652 def __delitem__(self, i):
651 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
653 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
652 raise ValueError(b"deleting slices only supports a:-1 with step 1")
654 raise ValueError(b"deleting slices only supports a:-1 with step 1")
653 i = i.start
655 i = i.start
654 self._check_index(i)
656 self._check_index(i)
655 self._stripnodes(i)
657 self._stripnodes(i)
656 if i < self._lgt:
658 if i < self._lgt:
657 self._offsets = self._offsets[:i]
659 self._offsets = self._offsets[:i]
658 self._lgt = i
660 self._lgt = i
659 self._extra = []
661 self._extra = []
660 else:
662 else:
661 self._extra = self._extra[: i - self._lgt]
663 self._extra = self._extra[: i - self._lgt]
662
664
663 def _calculate_index(self, i):
665 def _calculate_index(self, i):
664 return self._offsets[i]
666 return self._offsets[i]
665
667
666
668
667 def parse_index2(data, inline, revlogv2=False):
669 def parse_index2(data, inline, revlogv2=False):
668 if not inline:
670 if not inline:
669 cls = IndexObject2 if revlogv2 else IndexObject
671 cls = IndexObject2 if revlogv2 else IndexObject
670 return cls(data), None
672 return cls(data), None
671 cls = InlinedIndexObject
673 cls = InlinedIndexObject
672 return cls(data, inline), (0, data)
674 return cls(data, inline), (0, data)
673
675
674
676
675 def parse_index_cl_v2(data):
677 def parse_index_cl_v2(data):
676 return IndexChangelogV2(data), None
678 return IndexChangelogV2(data), None
677
679
678
680
679 class IndexObject2(IndexObject):
681 class IndexObject2(IndexObject):
680 index_format = revlog_constants.INDEX_ENTRY_V2
682 index_format = revlog_constants.INDEX_ENTRY_V2
681
683
682 def replace_sidedata_info(
684 def replace_sidedata_info(
683 self,
685 self,
684 rev,
686 rev,
685 sidedata_offset,
687 sidedata_offset,
686 sidedata_length,
688 sidedata_length,
687 offset_flags,
689 offset_flags,
688 compression_mode,
690 compression_mode,
689 ):
691 ):
690 """
692 """
691 Replace an existing index entry's sidedata offset and length with new
693 Replace an existing index entry's sidedata offset and length with new
692 ones.
694 ones.
693 This cannot be used outside of the context of sidedata rewriting,
695 This cannot be used outside of the context of sidedata rewriting,
694 inside the transaction that creates the revision `rev`.
696 inside the transaction that creates the revision `rev`.
695 """
697 """
696 if rev < 0:
698 if rev < 0:
697 raise KeyError
699 raise KeyError
698 self._check_index(rev)
700 self._check_index(rev)
699 if rev < self._lgt:
701 if rev < self._lgt:
700 msg = b"cannot rewrite entries outside of this transaction"
702 msg = b"cannot rewrite entries outside of this transaction"
701 raise KeyError(msg)
703 raise KeyError(msg)
702 else:
704 else:
703 entry = list(self[rev])
705 entry = list(self[rev])
704 entry[0] = offset_flags
706 entry[0] = offset_flags
705 entry[8] = sidedata_offset
707 entry[8] = sidedata_offset
706 entry[9] = sidedata_length
708 entry[9] = sidedata_length
707 entry[11] = compression_mode
709 entry[11] = compression_mode
708 entry = tuple(entry)
710 entry = tuple(entry)
709 new = self._pack_entry(rev, entry)
711 new = self._pack_entry(rev, entry)
710 self._extra[rev - self._lgt] = new
712 self._extra[rev - self._lgt] = new
711
713
712 def _unpack_entry(self, rev, data):
714 def _unpack_entry(self, rev, data):
713 data = self.index_format.unpack(data)
715 data = self.index_format.unpack(data)
714 entry = data[:10]
716 entry = data[:10]
715 data_comp = data[10] & 3
717 data_comp = data[10] & 3
716 sidedata_comp = (data[10] & (3 << 2)) >> 2
718 sidedata_comp = (data[10] & (3 << 2)) >> 2
717 return entry + (data_comp, sidedata_comp)
719 return entry + (data_comp, sidedata_comp)
718
720
719 def _pack_entry(self, rev, entry):
721 def _pack_entry(self, rev, entry):
720 data = entry[:10]
722 data = entry[:10]
721 data_comp = entry[10] & 3
723 data_comp = entry[10] & 3
722 sidedata_comp = (entry[11] & 3) << 2
724 sidedata_comp = (entry[11] & 3) << 2
723 data += (data_comp | sidedata_comp,)
725 data += (data_comp | sidedata_comp,)
724
726
725 return self.index_format.pack(*data)
727 return self.index_format.pack(*data)
726
728
727 def entry_binary(self, rev):
729 def entry_binary(self, rev):
728 """return the raw binary string representing a revision"""
730 """return the raw binary string representing a revision"""
729 entry = self[rev]
731 entry = self[rev]
730 return self._pack_entry(rev, entry)
732 return self._pack_entry(rev, entry)
731
733
732 def pack_header(self, header):
734 def pack_header(self, header):
733 """pack header information as binary"""
735 """pack header information as binary"""
734 msg = 'version header should go in the docket, not the index: %d'
736 msg = 'version header should go in the docket, not the index: %d'
735 msg %= header
737 msg %= header
736 raise error.ProgrammingError(msg)
738 raise error.ProgrammingError(msg)
737
739
738
740
739 class IndexChangelogV2(IndexObject2):
741 class IndexChangelogV2(IndexObject2):
740 index_format = revlog_constants.INDEX_ENTRY_CL_V2
742 index_format = revlog_constants.INDEX_ENTRY_CL_V2
741
743
742 def _unpack_entry(self, rev, data, r=True):
744 def _unpack_entry(self, rev, data, r=True):
743 items = self.index_format.unpack(data)
745 items = self.index_format.unpack(data)
744 entry = items[:3] + (rev, rev) + items[3:8]
746 entry = items[:3] + (rev, rev) + items[3:8]
745 data_comp = items[8] & 3
747 data_comp = items[8] & 3
746 sidedata_comp = (items[8] >> 2) & 3
748 sidedata_comp = (items[8] >> 2) & 3
747 return entry + (data_comp, sidedata_comp)
749 return entry + (data_comp, sidedata_comp)
748
750
749 def _pack_entry(self, rev, entry):
751 def _pack_entry(self, rev, entry):
750 assert entry[3] == rev, entry[3]
752 assert entry[3] == rev, entry[3]
751 assert entry[4] == rev, entry[4]
753 assert entry[4] == rev, entry[4]
752 data = entry[:3] + entry[5:10]
754 data = entry[:3] + entry[5:10]
753 data_comp = entry[10] & 3
755 data_comp = entry[10] & 3
754 sidedata_comp = (entry[11] & 3) << 2
756 sidedata_comp = (entry[11] & 3) << 2
755 data += (data_comp | sidedata_comp,)
757 data += (data_comp | sidedata_comp,)
756 return self.index_format.pack(*data)
758 return self.index_format.pack(*data)
757
759
758
760
759 def parse_index_devel_nodemap(data, inline):
761 def parse_index_devel_nodemap(data, inline):
760 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
762 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
761 return PersistentNodeMapIndexObject(data), None
763 return PersistentNodeMapIndexObject(data), None
762
764
763
765
764 def parse_dirstate(dmap, copymap, st):
766 def parse_dirstate(dmap, copymap, st):
765 parents = [st[:20], st[20:40]]
767 parents = [st[:20], st[20:40]]
766 # dereference fields so they will be local in loop
768 # dereference fields so they will be local in loop
767 format = b">cllll"
769 format = b">cllll"
768 e_size = struct.calcsize(format)
770 e_size = struct.calcsize(format)
769 pos1 = 40
771 pos1 = 40
770 l = len(st)
772 l = len(st)
771
773
772 # the inner loop
774 # the inner loop
773 while pos1 < l:
775 while pos1 < l:
774 pos2 = pos1 + e_size
776 pos2 = pos1 + e_size
775 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
777 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
776 pos1 = pos2 + e[4]
778 pos1 = pos2 + e[4]
777 f = st[pos2:pos1]
779 f = st[pos2:pos1]
778 if b'\0' in f:
780 if b'\0' in f:
779 f, c = f.split(b'\0')
781 f, c = f.split(b'\0')
780 copymap[f] = c
782 copymap[f] = c
781 dmap[f] = DirstateItem.from_v1_data(*e[:4])
783 dmap[f] = DirstateItem.from_v1_data(*e[:4])
782 return parents
784 return parents
783
785
784
786
785 def pack_dirstate(dmap, copymap, pl, now):
787 def pack_dirstate(dmap, copymap, pl, now):
786 now = int(now)
788 now = int(now)
787 cs = stringio()
789 cs = stringio()
788 write = cs.write
790 write = cs.write
789 write(b"".join(pl))
791 write(b"".join(pl))
790 for f, e in pycompat.iteritems(dmap):
792 for f, e in pycompat.iteritems(dmap):
791 if e.need_delay(now):
793 if e.need_delay(now):
792 # The file was last modified "simultaneously" with the current
794 # The file was last modified "simultaneously" with the current
793 # write to dirstate (i.e. within the same second for file-
795 # write to dirstate (i.e. within the same second for file-
794 # systems with a granularity of 1 sec). This commonly happens
796 # systems with a granularity of 1 sec). This commonly happens
795 # for at least a couple of files on 'update'.
797 # for at least a couple of files on 'update'.
796 # The user could change the file without changing its size
798 # The user could change the file without changing its size
797 # within the same second. Invalidate the file's mtime in
799 # within the same second. Invalidate the file's mtime in
798 # dirstate, forcing future 'status' calls to compare the
800 # dirstate, forcing future 'status' calls to compare the
799 # contents of the file if the size is the same. This prevents
801 # contents of the file if the size is the same. This prevents
800 # mistakenly treating such files as clean.
802 # mistakenly treating such files as clean.
801 e.set_possibly_dirty()
803 e.set_possibly_dirty()
802
804
803 if f in copymap:
805 if f in copymap:
804 f = b"%s\0%s" % (f, copymap[f])
806 f = b"%s\0%s" % (f, copymap[f])
805 e = _pack(
807 e = _pack(
806 b">cllll",
808 b">cllll",
807 e.v1_state(),
809 e.v1_state(),
808 e.v1_mode(),
810 e.v1_mode(),
809 e.v1_size(),
811 e.v1_size(),
810 e.v1_mtime(),
812 e.v1_mtime(),
811 len(f),
813 len(f),
812 )
814 )
813 write(e)
815 write(e)
814 write(f)
816 write(f)
815 return cs.getvalue()
817 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now