##// END OF EJS Templates
dirstate-item: point out that `merged` is set only with p1_tracked...
marmoute -
r48925:e2da3ec9 default
parent child Browse files
Show More
@@ -1,841 +1,842 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It hold multiple attributes
51 It hold multiple attributes
52
52
53 # about file tracking
53 # about file tracking
54 - wc_tracked: is the file tracked by the working copy
54 - wc_tracked: is the file tracked by the working copy
55 - p1_tracked: is the file tracked in working copy first parent
55 - p1_tracked: is the file tracked in working copy first parent
56 - p2_tracked: is the file tracked in working copy second parent
56 - p2_tracked: is the file tracked in working copy second parent
57
57
58 # about what possible merge action related to this file
58 # about what possible merge action related to this file
59 - clean_p1: merge picked the file content from p1
59 - clean_p1: merge picked the file content from p1
60 - clean_p2: merge picked the file content from p2
60 - clean_p2: merge picked the file content from p2
61 - merged: file gather changes from both side.
61 - merged: file gather changes from both side.
62
62
63 # about the file state expected from p1 manifest:
63 # about the file state expected from p1 manifest:
64 - mode: the file mode in p1
64 - mode: the file mode in p1
65 - size: the file size in p1
65 - size: the file size in p1
66
66
67 # about the file state on disk last time we saw it:
67 # about the file state on disk last time we saw it:
68 - mtime: the last known clean mtime for the file.
68 - mtime: the last known clean mtime for the file.
69
69
70 The last three item (mode, size and mtime) can be None if no meaningful (or
70 The last three item (mode, size and mtime) can be None if no meaningful (or
71 trusted) value exists.
71 trusted) value exists.
72
72
73 """
73 """
74
74
75 _wc_tracked = attr.ib()
75 _wc_tracked = attr.ib()
76 _p1_tracked = attr.ib()
76 _p1_tracked = attr.ib()
77 _p2_tracked = attr.ib()
77 _p2_tracked = attr.ib()
78 # the three item above should probably be combined
78 # the three item above should probably be combined
79 #
79 #
80 # However it is unclear if they properly cover some of the most advanced
80 # However it is unclear if they properly cover some of the most advanced
81 # merge case. So we should probably wait on this to be settled.
81 # merge case. So we should probably wait on this to be settled.
82 _merged = attr.ib()
82 _merged = attr.ib()
83 _clean_p1 = attr.ib()
83 _clean_p1 = attr.ib()
84 _clean_p2 = attr.ib()
84 _clean_p2 = attr.ib()
85 _possibly_dirty = attr.ib()
85 _possibly_dirty = attr.ib()
86 _mode = attr.ib()
86 _mode = attr.ib()
87 _size = attr.ib()
87 _size = attr.ib()
88 _mtime = attr.ib()
88 _mtime = attr.ib()
89
89
90 def __init__(
90 def __init__(
91 self,
91 self,
92 wc_tracked=False,
92 wc_tracked=False,
93 p1_tracked=False,
93 p1_tracked=False,
94 p2_tracked=False,
94 p2_tracked=False,
95 merged=False,
95 merged=False,
96 clean_p1=False,
96 clean_p1=False,
97 clean_p2=False,
97 clean_p2=False,
98 possibly_dirty=False,
98 possibly_dirty=False,
99 parentfiledata=None,
99 parentfiledata=None,
100 ):
100 ):
101 if merged and (clean_p1 or clean_p2):
101 if merged and (clean_p1 or clean_p2):
102 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
102 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
103 raise error.ProgrammingError(msg)
103 raise error.ProgrammingError(msg)
104
104
105 assert not (merged and not p1_tracked)
105 self._wc_tracked = wc_tracked
106 self._wc_tracked = wc_tracked
106 self._p1_tracked = p1_tracked
107 self._p1_tracked = p1_tracked
107 self._p2_tracked = p2_tracked
108 self._p2_tracked = p2_tracked
108 self._merged = merged
109 self._merged = merged
109 self._clean_p1 = clean_p1
110 self._clean_p1 = clean_p1
110 self._clean_p2 = clean_p2
111 self._clean_p2 = clean_p2
111 self._possibly_dirty = possibly_dirty
112 self._possibly_dirty = possibly_dirty
112 if parentfiledata is None:
113 if parentfiledata is None:
113 self._mode = None
114 self._mode = None
114 self._size = None
115 self._size = None
115 self._mtime = None
116 self._mtime = None
116 else:
117 else:
117 self._mode = parentfiledata[0]
118 self._mode = parentfiledata[0]
118 self._size = parentfiledata[1]
119 self._size = parentfiledata[1]
119 self._mtime = parentfiledata[2]
120 self._mtime = parentfiledata[2]
120
121
121 @classmethod
122 @classmethod
122 def new_added(cls):
123 def new_added(cls):
123 """constructor to help legacy API to build a new "added" item
124 """constructor to help legacy API to build a new "added" item
124
125
125 Should eventually be removed
126 Should eventually be removed
126 """
127 """
127 instance = cls()
128 instance = cls()
128 instance._wc_tracked = True
129 instance._wc_tracked = True
129 instance._p1_tracked = False
130 instance._p1_tracked = False
130 instance._p2_tracked = False
131 instance._p2_tracked = False
131 return instance
132 return instance
132
133
133 @classmethod
134 @classmethod
134 def new_merged(cls):
135 def new_merged(cls):
135 """constructor to help legacy API to build a new "merged" item
136 """constructor to help legacy API to build a new "merged" item
136
137
137 Should eventually be removed
138 Should eventually be removed
138 """
139 """
139 instance = cls()
140 instance = cls()
140 instance._wc_tracked = True
141 instance._wc_tracked = True
141 instance._p1_tracked = True # might not be True because of rename ?
142 instance._p1_tracked = True # might not be True because of rename ?
142 instance._p2_tracked = True # might not be True because of rename ?
143 instance._p2_tracked = True # might not be True because of rename ?
143 instance._merged = True
144 instance._merged = True
144 return instance
145 return instance
145
146
146 @classmethod
147 @classmethod
147 def new_from_p2(cls):
148 def new_from_p2(cls):
148 """constructor to help legacy API to build a new "from_p2" item
149 """constructor to help legacy API to build a new "from_p2" item
149
150
150 Should eventually be removed
151 Should eventually be removed
151 """
152 """
152 instance = cls()
153 instance = cls()
153 instance._wc_tracked = True
154 instance._wc_tracked = True
154 instance._p1_tracked = False # might actually be True
155 instance._p1_tracked = False # might actually be True
155 instance._p2_tracked = True
156 instance._p2_tracked = True
156 instance._clean_p2 = True
157 instance._clean_p2 = True
157 return instance
158 return instance
158
159
159 @classmethod
160 @classmethod
160 def new_possibly_dirty(cls):
161 def new_possibly_dirty(cls):
161 """constructor to help legacy API to build a new "possibly_dirty" item
162 """constructor to help legacy API to build a new "possibly_dirty" item
162
163
163 Should eventually be removed
164 Should eventually be removed
164 """
165 """
165 instance = cls()
166 instance = cls()
166 instance._wc_tracked = True
167 instance._wc_tracked = True
167 instance._p1_tracked = True
168 instance._p1_tracked = True
168 instance._possibly_dirty = True
169 instance._possibly_dirty = True
169 return instance
170 return instance
170
171
171 @classmethod
172 @classmethod
172 def new_normal(cls, mode, size, mtime):
173 def new_normal(cls, mode, size, mtime):
173 """constructor to help legacy API to build a new "normal" item
174 """constructor to help legacy API to build a new "normal" item
174
175
175 Should eventually be removed
176 Should eventually be removed
176 """
177 """
177 assert size != FROM_P2
178 assert size != FROM_P2
178 assert size != NONNORMAL
179 assert size != NONNORMAL
179 instance = cls()
180 instance = cls()
180 instance._wc_tracked = True
181 instance._wc_tracked = True
181 instance._p1_tracked = True
182 instance._p1_tracked = True
182 instance._mode = mode
183 instance._mode = mode
183 instance._size = size
184 instance._size = size
184 instance._mtime = mtime
185 instance._mtime = mtime
185 return instance
186 return instance
186
187
187 @classmethod
188 @classmethod
188 def from_v1_data(cls, state, mode, size, mtime):
189 def from_v1_data(cls, state, mode, size, mtime):
189 """Build a new DirstateItem object from V1 data
190 """Build a new DirstateItem object from V1 data
190
191
191 Since the dirstate-v1 format is frozen, the signature of this function
192 Since the dirstate-v1 format is frozen, the signature of this function
192 is not expected to change, unlike the __init__ one.
193 is not expected to change, unlike the __init__ one.
193 """
194 """
194 if state == b'm':
195 if state == b'm':
195 return cls.new_merged()
196 return cls.new_merged()
196 elif state == b'a':
197 elif state == b'a':
197 return cls.new_added()
198 return cls.new_added()
198 elif state == b'r':
199 elif state == b'r':
199 instance = cls()
200 instance = cls()
200 instance._wc_tracked = False
201 instance._wc_tracked = False
201 if size == NONNORMAL:
202 if size == NONNORMAL:
202 instance._merged = True
203 instance._merged = True
203 instance._p1_tracked = (
204 instance._p1_tracked = (
204 True # might not be True because of rename ?
205 True # might not be True because of rename ?
205 )
206 )
206 instance._p2_tracked = (
207 instance._p2_tracked = (
207 True # might not be True because of rename ?
208 True # might not be True because of rename ?
208 )
209 )
209 elif size == FROM_P2:
210 elif size == FROM_P2:
210 instance._clean_p2 = True
211 instance._clean_p2 = True
211 instance._p1_tracked = (
212 instance._p1_tracked = (
212 False # We actually don't know (file history)
213 False # We actually don't know (file history)
213 )
214 )
214 instance._p2_tracked = True
215 instance._p2_tracked = True
215 else:
216 else:
216 instance._p1_tracked = True
217 instance._p1_tracked = True
217 return instance
218 return instance
218 elif state == b'n':
219 elif state == b'n':
219 if size == FROM_P2:
220 if size == FROM_P2:
220 return cls.new_from_p2()
221 return cls.new_from_p2()
221 elif size == NONNORMAL:
222 elif size == NONNORMAL:
222 return cls.new_possibly_dirty()
223 return cls.new_possibly_dirty()
223 elif mtime == AMBIGUOUS_TIME:
224 elif mtime == AMBIGUOUS_TIME:
224 instance = cls.new_normal(mode, size, 42)
225 instance = cls.new_normal(mode, size, 42)
225 instance._mtime = None
226 instance._mtime = None
226 instance._possibly_dirty = True
227 instance._possibly_dirty = True
227 return instance
228 return instance
228 else:
229 else:
229 return cls.new_normal(mode, size, mtime)
230 return cls.new_normal(mode, size, mtime)
230 else:
231 else:
231 raise RuntimeError(b'unknown state: %s' % state)
232 raise RuntimeError(b'unknown state: %s' % state)
232
233
233 def set_possibly_dirty(self):
234 def set_possibly_dirty(self):
234 """Mark a file as "possibly dirty"
235 """Mark a file as "possibly dirty"
235
236
236 This means the next status call will have to actually check its content
237 This means the next status call will have to actually check its content
237 to make sure it is correct.
238 to make sure it is correct.
238 """
239 """
239 self._possibly_dirty = True
240 self._possibly_dirty = True
240
241
241 def set_clean(self, mode, size, mtime):
242 def set_clean(self, mode, size, mtime):
242 """mark a file as "clean" cancelling potential "possibly dirty call"
243 """mark a file as "clean" cancelling potential "possibly dirty call"
243
244
244 Note: this function is a descendant of `dirstate.normal` and is
245 Note: this function is a descendant of `dirstate.normal` and is
245 currently expected to be call on "normal" entry only. There are not
246 currently expected to be call on "normal" entry only. There are not
246 reason for this to not change in the future as long as the ccode is
247 reason for this to not change in the future as long as the ccode is
247 updated to preserve the proper state of the non-normal files.
248 updated to preserve the proper state of the non-normal files.
248 """
249 """
249 self._wc_tracked = True
250 self._wc_tracked = True
250 self._p1_tracked = True
251 self._p1_tracked = True
251 self._p2_tracked = False # this might be wrong
252 self._p2_tracked = False # this might be wrong
252 self._merged = False
253 self._merged = False
253 self._clean_p2 = False
254 self._clean_p2 = False
254 self._possibly_dirty = False
255 self._possibly_dirty = False
255 self._mode = mode
256 self._mode = mode
256 self._size = size
257 self._size = size
257 self._mtime = mtime
258 self._mtime = mtime
258
259
259 def set_tracked(self):
260 def set_tracked(self):
260 """mark a file as tracked in the working copy
261 """mark a file as tracked in the working copy
261
262
262 This will ultimately be called by command like `hg add`.
263 This will ultimately be called by command like `hg add`.
263 """
264 """
264 self._wc_tracked = True
265 self._wc_tracked = True
265 # `set_tracked` is replacing various `normallookup` call. So we set
266 # `set_tracked` is replacing various `normallookup` call. So we set
266 # "possibly dirty" to stay on the safe side.
267 # "possibly dirty" to stay on the safe side.
267 #
268 #
268 # Consider dropping this in the future in favor of something less broad.
269 # Consider dropping this in the future in favor of something less broad.
269 self._possibly_dirty = True
270 self._possibly_dirty = True
270
271
271 def set_untracked(self):
272 def set_untracked(self):
272 """mark a file as untracked in the working copy
273 """mark a file as untracked in the working copy
273
274
274 This will ultimately be called by command like `hg remove`.
275 This will ultimately be called by command like `hg remove`.
275 """
276 """
276 self._wc_tracked = False
277 self._wc_tracked = False
277 self._mode = None
278 self._mode = None
278 self._size = None
279 self._size = None
279 self._mtime = None
280 self._mtime = None
280
281
281 def drop_merge_data(self):
282 def drop_merge_data(self):
282 """remove all "merge-only" from a DirstateItem
283 """remove all "merge-only" from a DirstateItem
283
284
284 This is to be call by the dirstatemap code when the second parent is dropped
285 This is to be call by the dirstatemap code when the second parent is dropped
285 """
286 """
286 if not (self.merged or self.from_p2):
287 if not (self.merged or self.from_p2):
287 return
288 return
288 self._p1_tracked = self.merged # why is this not already properly set ?
289 self._p1_tracked = self.merged # why is this not already properly set ?
289
290
290 self._merged = False
291 self._merged = False
291 self._clean_p1 = False
292 self._clean_p1 = False
292 self._clean_p2 = False
293 self._clean_p2 = False
293 self._p2_tracked = False
294 self._p2_tracked = False
294 self._possibly_dirty = True
295 self._possibly_dirty = True
295 self._mode = None
296 self._mode = None
296 self._size = None
297 self._size = None
297 self._mtime = None
298 self._mtime = None
298
299
299 @property
300 @property
300 def mode(self):
301 def mode(self):
301 return self.v1_mode()
302 return self.v1_mode()
302
303
303 @property
304 @property
304 def size(self):
305 def size(self):
305 return self.v1_size()
306 return self.v1_size()
306
307
307 @property
308 @property
308 def mtime(self):
309 def mtime(self):
309 return self.v1_mtime()
310 return self.v1_mtime()
310
311
311 @property
312 @property
312 def state(self):
313 def state(self):
313 """
314 """
314 States are:
315 States are:
315 n normal
316 n normal
316 m needs merging
317 m needs merging
317 r marked for removal
318 r marked for removal
318 a marked for addition
319 a marked for addition
319
320
320 XXX This "state" is a bit obscure and mostly a direct expression of the
321 XXX This "state" is a bit obscure and mostly a direct expression of the
321 dirstatev1 format. It would make sense to ultimately deprecate it in
322 dirstatev1 format. It would make sense to ultimately deprecate it in
322 favor of the more "semantic" attributes.
323 favor of the more "semantic" attributes.
323 """
324 """
324 if not self.any_tracked:
325 if not self.any_tracked:
325 return b'?'
326 return b'?'
326 return self.v1_state()
327 return self.v1_state()
327
328
328 @property
329 @property
329 def tracked(self):
330 def tracked(self):
330 """True is the file is tracked in the working copy"""
331 """True is the file is tracked in the working copy"""
331 return self._wc_tracked
332 return self._wc_tracked
332
333
333 @property
334 @property
334 def any_tracked(self):
335 def any_tracked(self):
335 """True is the file is tracked anywhere (wc or parents)"""
336 """True is the file is tracked anywhere (wc or parents)"""
336 return self._wc_tracked or self._p1_tracked or self._p2_tracked
337 return self._wc_tracked or self._p1_tracked or self._p2_tracked
337
338
338 @property
339 @property
339 def added(self):
340 def added(self):
340 """True if the file has been added"""
341 """True if the file has been added"""
341 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
342 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
342
343
343 @property
344 @property
344 def maybe_clean(self):
345 def maybe_clean(self):
345 """True if the file has a chance to be in the "clean" state"""
346 """True if the file has a chance to be in the "clean" state"""
346 if not self._wc_tracked:
347 if not self._wc_tracked:
347 return False
348 return False
348 elif self.added:
349 elif self.added:
349 return False
350 return False
350 elif self._merged:
351 elif self._merged:
351 return False
352 return False
352 elif self._clean_p2:
353 elif self._clean_p2:
353 return False
354 return False
354 return True
355 return True
355
356
356 @property
357 @property
357 def merged(self):
358 def merged(self):
358 """True if the file has been merged
359 """True if the file has been merged
359
360
360 Should only be set if a merge is in progress in the dirstate
361 Should only be set if a merge is in progress in the dirstate
361 """
362 """
362 return self._wc_tracked and self._merged
363 return self._wc_tracked and self._merged
363
364
364 @property
365 @property
365 def from_p2(self):
366 def from_p2(self):
366 """True if the file have been fetched from p2 during the current merge
367 """True if the file have been fetched from p2 during the current merge
367
368
368 This is only True is the file is currently tracked.
369 This is only True is the file is currently tracked.
369
370
370 Should only be set if a merge is in progress in the dirstate
371 Should only be set if a merge is in progress in the dirstate
371 """
372 """
372 if not self._wc_tracked:
373 if not self._wc_tracked:
373 return False
374 return False
374 return self._clean_p2
375 return self._clean_p2
375
376
376 @property
377 @property
377 def removed(self):
378 def removed(self):
378 """True if the file has been removed"""
379 """True if the file has been removed"""
379 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
380 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
380
381
381 def v1_state(self):
382 def v1_state(self):
382 """return a "state" suitable for v1 serialization"""
383 """return a "state" suitable for v1 serialization"""
383 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
384 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
384 # the object has no state to record, this is -currently-
385 # the object has no state to record, this is -currently-
385 # unsupported
386 # unsupported
386 raise RuntimeError('untracked item')
387 raise RuntimeError('untracked item')
387 elif self.removed:
388 elif self.removed:
388 return b'r'
389 return b'r'
389 elif self.merged:
390 elif self.merged:
390 return b'm'
391 return b'm'
391 elif self.added:
392 elif self.added:
392 return b'a'
393 return b'a'
393 else:
394 else:
394 return b'n'
395 return b'n'
395
396
396 def v1_mode(self):
397 def v1_mode(self):
397 """return a "mode" suitable for v1 serialization"""
398 """return a "mode" suitable for v1 serialization"""
398 return self._mode if self._mode is not None else 0
399 return self._mode if self._mode is not None else 0
399
400
400 def v1_size(self):
401 def v1_size(self):
401 """return a "size" suitable for v1 serialization"""
402 """return a "size" suitable for v1 serialization"""
402 if not self.any_tracked:
403 if not self.any_tracked:
403 # the object has no state to record, this is -currently-
404 # the object has no state to record, this is -currently-
404 # unsupported
405 # unsupported
405 raise RuntimeError('untracked item')
406 raise RuntimeError('untracked item')
406 elif self.removed and self._merged:
407 elif self.removed and self._merged:
407 return NONNORMAL
408 return NONNORMAL
408 elif self.removed and self._clean_p2:
409 elif self.removed and self._clean_p2:
409 return FROM_P2
410 return FROM_P2
410 elif self.removed:
411 elif self.removed:
411 return 0
412 return 0
412 elif self.merged:
413 elif self.merged:
413 return FROM_P2
414 return FROM_P2
414 elif self.added:
415 elif self.added:
415 return NONNORMAL
416 return NONNORMAL
416 elif self.from_p2:
417 elif self.from_p2:
417 return FROM_P2
418 return FROM_P2
418 elif self._possibly_dirty:
419 elif self._possibly_dirty:
419 return self._size if self._size is not None else NONNORMAL
420 return self._size if self._size is not None else NONNORMAL
420 else:
421 else:
421 return self._size
422 return self._size
422
423
423 def v1_mtime(self):
424 def v1_mtime(self):
424 """return a "mtime" suitable for v1 serialization"""
425 """return a "mtime" suitable for v1 serialization"""
425 if not self.any_tracked:
426 if not self.any_tracked:
426 # the object has no state to record, this is -currently-
427 # the object has no state to record, this is -currently-
427 # unsupported
428 # unsupported
428 raise RuntimeError('untracked item')
429 raise RuntimeError('untracked item')
429 elif self.removed:
430 elif self.removed:
430 return 0
431 return 0
431 elif self._possibly_dirty:
432 elif self._possibly_dirty:
432 return AMBIGUOUS_TIME
433 return AMBIGUOUS_TIME
433 elif self.merged:
434 elif self.merged:
434 return AMBIGUOUS_TIME
435 return AMBIGUOUS_TIME
435 elif self.added:
436 elif self.added:
436 return AMBIGUOUS_TIME
437 return AMBIGUOUS_TIME
437 elif self.from_p2:
438 elif self.from_p2:
438 return AMBIGUOUS_TIME
439 return AMBIGUOUS_TIME
439 else:
440 else:
440 return self._mtime if self._mtime is not None else 0
441 return self._mtime if self._mtime is not None else 0
441
442
442 def need_delay(self, now):
443 def need_delay(self, now):
443 """True if the stored mtime would be ambiguous with the current time"""
444 """True if the stored mtime would be ambiguous with the current time"""
444 return self.v1_state() == b'n' and self.v1_mtime() == now
445 return self.v1_state() == b'n' and self.v1_mtime() == now
445
446
446
447
447 def gettype(q):
448 def gettype(q):
448 return int(q & 0xFFFF)
449 return int(q & 0xFFFF)
449
450
450
451
451 class BaseIndexObject(object):
452 class BaseIndexObject(object):
452 # Can I be passed to an algorithme implemented in Rust ?
453 # Can I be passed to an algorithme implemented in Rust ?
453 rust_ext_compat = 0
454 rust_ext_compat = 0
454 # Format of an index entry according to Python's `struct` language
455 # Format of an index entry according to Python's `struct` language
455 index_format = revlog_constants.INDEX_ENTRY_V1
456 index_format = revlog_constants.INDEX_ENTRY_V1
456 # Size of a C unsigned long long int, platform independent
457 # Size of a C unsigned long long int, platform independent
457 big_int_size = struct.calcsize(b'>Q')
458 big_int_size = struct.calcsize(b'>Q')
458 # Size of a C long int, platform independent
459 # Size of a C long int, platform independent
459 int_size = struct.calcsize(b'>i')
460 int_size = struct.calcsize(b'>i')
460 # An empty index entry, used as a default value to be overridden, or nullrev
461 # An empty index entry, used as a default value to be overridden, or nullrev
461 null_item = (
462 null_item = (
462 0,
463 0,
463 0,
464 0,
464 0,
465 0,
465 -1,
466 -1,
466 -1,
467 -1,
467 -1,
468 -1,
468 -1,
469 -1,
469 sha1nodeconstants.nullid,
470 sha1nodeconstants.nullid,
470 0,
471 0,
471 0,
472 0,
472 revlog_constants.COMP_MODE_INLINE,
473 revlog_constants.COMP_MODE_INLINE,
473 revlog_constants.COMP_MODE_INLINE,
474 revlog_constants.COMP_MODE_INLINE,
474 )
475 )
475
476
476 @util.propertycache
477 @util.propertycache
477 def entry_size(self):
478 def entry_size(self):
478 return self.index_format.size
479 return self.index_format.size
479
480
480 @property
481 @property
481 def nodemap(self):
482 def nodemap(self):
482 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
483 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
483 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
484 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
484 return self._nodemap
485 return self._nodemap
485
486
486 @util.propertycache
487 @util.propertycache
487 def _nodemap(self):
488 def _nodemap(self):
488 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
489 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
489 for r in range(0, len(self)):
490 for r in range(0, len(self)):
490 n = self[r][7]
491 n = self[r][7]
491 nodemap[n] = r
492 nodemap[n] = r
492 return nodemap
493 return nodemap
493
494
494 def has_node(self, node):
495 def has_node(self, node):
495 """return True if the node exist in the index"""
496 """return True if the node exist in the index"""
496 return node in self._nodemap
497 return node in self._nodemap
497
498
498 def rev(self, node):
499 def rev(self, node):
499 """return a revision for a node
500 """return a revision for a node
500
501
501 If the node is unknown, raise a RevlogError"""
502 If the node is unknown, raise a RevlogError"""
502 return self._nodemap[node]
503 return self._nodemap[node]
503
504
504 def get_rev(self, node):
505 def get_rev(self, node):
505 """return a revision for a node
506 """return a revision for a node
506
507
507 If the node is unknown, return None"""
508 If the node is unknown, return None"""
508 return self._nodemap.get(node)
509 return self._nodemap.get(node)
509
510
510 def _stripnodes(self, start):
511 def _stripnodes(self, start):
511 if '_nodemap' in vars(self):
512 if '_nodemap' in vars(self):
512 for r in range(start, len(self)):
513 for r in range(start, len(self)):
513 n = self[r][7]
514 n = self[r][7]
514 del self._nodemap[n]
515 del self._nodemap[n]
515
516
516 def clearcaches(self):
517 def clearcaches(self):
517 self.__dict__.pop('_nodemap', None)
518 self.__dict__.pop('_nodemap', None)
518
519
519 def __len__(self):
520 def __len__(self):
520 return self._lgt + len(self._extra)
521 return self._lgt + len(self._extra)
521
522
522 def append(self, tup):
523 def append(self, tup):
523 if '_nodemap' in vars(self):
524 if '_nodemap' in vars(self):
524 self._nodemap[tup[7]] = len(self)
525 self._nodemap[tup[7]] = len(self)
525 data = self._pack_entry(len(self), tup)
526 data = self._pack_entry(len(self), tup)
526 self._extra.append(data)
527 self._extra.append(data)
527
528
528 def _pack_entry(self, rev, entry):
529 def _pack_entry(self, rev, entry):
529 assert entry[8] == 0
530 assert entry[8] == 0
530 assert entry[9] == 0
531 assert entry[9] == 0
531 return self.index_format.pack(*entry[:8])
532 return self.index_format.pack(*entry[:8])
532
533
533 def _check_index(self, i):
534 def _check_index(self, i):
534 if not isinstance(i, int):
535 if not isinstance(i, int):
535 raise TypeError(b"expecting int indexes")
536 raise TypeError(b"expecting int indexes")
536 if i < 0 or i >= len(self):
537 if i < 0 or i >= len(self):
537 raise IndexError
538 raise IndexError
538
539
539 def __getitem__(self, i):
540 def __getitem__(self, i):
540 if i == -1:
541 if i == -1:
541 return self.null_item
542 return self.null_item
542 self._check_index(i)
543 self._check_index(i)
543 if i >= self._lgt:
544 if i >= self._lgt:
544 data = self._extra[i - self._lgt]
545 data = self._extra[i - self._lgt]
545 else:
546 else:
546 index = self._calculate_index(i)
547 index = self._calculate_index(i)
547 data = self._data[index : index + self.entry_size]
548 data = self._data[index : index + self.entry_size]
548 r = self._unpack_entry(i, data)
549 r = self._unpack_entry(i, data)
549 if self._lgt and i == 0:
550 if self._lgt and i == 0:
550 offset = revlogutils.offset_type(0, gettype(r[0]))
551 offset = revlogutils.offset_type(0, gettype(r[0]))
551 r = (offset,) + r[1:]
552 r = (offset,) + r[1:]
552 return r
553 return r
553
554
554 def _unpack_entry(self, rev, data):
555 def _unpack_entry(self, rev, data):
555 r = self.index_format.unpack(data)
556 r = self.index_format.unpack(data)
556 r = r + (
557 r = r + (
557 0,
558 0,
558 0,
559 0,
559 revlog_constants.COMP_MODE_INLINE,
560 revlog_constants.COMP_MODE_INLINE,
560 revlog_constants.COMP_MODE_INLINE,
561 revlog_constants.COMP_MODE_INLINE,
561 )
562 )
562 return r
563 return r
563
564
564 def pack_header(self, header):
565 def pack_header(self, header):
565 """pack header information as binary"""
566 """pack header information as binary"""
566 v_fmt = revlog_constants.INDEX_HEADER
567 v_fmt = revlog_constants.INDEX_HEADER
567 return v_fmt.pack(header)
568 return v_fmt.pack(header)
568
569
569 def entry_binary(self, rev):
570 def entry_binary(self, rev):
570 """return the raw binary string representing a revision"""
571 """return the raw binary string representing a revision"""
571 entry = self[rev]
572 entry = self[rev]
572 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
573 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
573 if rev == 0:
574 if rev == 0:
574 p = p[revlog_constants.INDEX_HEADER.size :]
575 p = p[revlog_constants.INDEX_HEADER.size :]
575 return p
576 return p
576
577
577
578
578 class IndexObject(BaseIndexObject):
579 class IndexObject(BaseIndexObject):
579 def __init__(self, data):
580 def __init__(self, data):
580 assert len(data) % self.entry_size == 0, (
581 assert len(data) % self.entry_size == 0, (
581 len(data),
582 len(data),
582 self.entry_size,
583 self.entry_size,
583 len(data) % self.entry_size,
584 len(data) % self.entry_size,
584 )
585 )
585 self._data = data
586 self._data = data
586 self._lgt = len(data) // self.entry_size
587 self._lgt = len(data) // self.entry_size
587 self._extra = []
588 self._extra = []
588
589
589 def _calculate_index(self, i):
590 def _calculate_index(self, i):
590 return i * self.entry_size
591 return i * self.entry_size
591
592
592 def __delitem__(self, i):
593 def __delitem__(self, i):
593 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
594 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
594 raise ValueError(b"deleting slices only supports a:-1 with step 1")
595 raise ValueError(b"deleting slices only supports a:-1 with step 1")
595 i = i.start
596 i = i.start
596 self._check_index(i)
597 self._check_index(i)
597 self._stripnodes(i)
598 self._stripnodes(i)
598 if i < self._lgt:
599 if i < self._lgt:
599 self._data = self._data[: i * self.entry_size]
600 self._data = self._data[: i * self.entry_size]
600 self._lgt = i
601 self._lgt = i
601 self._extra = []
602 self._extra = []
602 else:
603 else:
603 self._extra = self._extra[: i - self._lgt]
604 self._extra = self._extra[: i - self._lgt]
604
605
605
606
606 class PersistentNodeMapIndexObject(IndexObject):
607 class PersistentNodeMapIndexObject(IndexObject):
607 """a Debug oriented class to test persistent nodemap
608 """a Debug oriented class to test persistent nodemap
608
609
609 We need a simple python object to test API and higher level behavior. See
610 We need a simple python object to test API and higher level behavior. See
610 the Rust implementation for more serious usage. This should be used only
611 the Rust implementation for more serious usage. This should be used only
611 through the dedicated `devel.persistent-nodemap` config.
612 through the dedicated `devel.persistent-nodemap` config.
612 """
613 """
613
614
614 def nodemap_data_all(self):
615 def nodemap_data_all(self):
615 """Return bytes containing a full serialization of a nodemap
616 """Return bytes containing a full serialization of a nodemap
616
617
617 The nodemap should be valid for the full set of revisions in the
618 The nodemap should be valid for the full set of revisions in the
618 index."""
619 index."""
619 return nodemaputil.persistent_data(self)
620 return nodemaputil.persistent_data(self)
620
621
621 def nodemap_data_incremental(self):
622 def nodemap_data_incremental(self):
622 """Return bytes containing a incremental update to persistent nodemap
623 """Return bytes containing a incremental update to persistent nodemap
623
624
624 This containst the data for an append-only update of the data provided
625 This containst the data for an append-only update of the data provided
625 in the last call to `update_nodemap_data`.
626 in the last call to `update_nodemap_data`.
626 """
627 """
627 if self._nm_root is None:
628 if self._nm_root is None:
628 return None
629 return None
629 docket = self._nm_docket
630 docket = self._nm_docket
630 changed, data = nodemaputil.update_persistent_data(
631 changed, data = nodemaputil.update_persistent_data(
631 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
632 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
632 )
633 )
633
634
634 self._nm_root = self._nm_max_idx = self._nm_docket = None
635 self._nm_root = self._nm_max_idx = self._nm_docket = None
635 return docket, changed, data
636 return docket, changed, data
636
637
637 def update_nodemap_data(self, docket, nm_data):
638 def update_nodemap_data(self, docket, nm_data):
638 """provide full block of persisted binary data for a nodemap
639 """provide full block of persisted binary data for a nodemap
639
640
640 The data are expected to come from disk. See `nodemap_data_all` for a
641 The data are expected to come from disk. See `nodemap_data_all` for a
641 produceur of such data."""
642 produceur of such data."""
642 if nm_data is not None:
643 if nm_data is not None:
643 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
644 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
644 if self._nm_root:
645 if self._nm_root:
645 self._nm_docket = docket
646 self._nm_docket = docket
646 else:
647 else:
647 self._nm_root = self._nm_max_idx = self._nm_docket = None
648 self._nm_root = self._nm_max_idx = self._nm_docket = None
648
649
649
650
650 class InlinedIndexObject(BaseIndexObject):
651 class InlinedIndexObject(BaseIndexObject):
651 def __init__(self, data, inline=0):
652 def __init__(self, data, inline=0):
652 self._data = data
653 self._data = data
653 self._lgt = self._inline_scan(None)
654 self._lgt = self._inline_scan(None)
654 self._inline_scan(self._lgt)
655 self._inline_scan(self._lgt)
655 self._extra = []
656 self._extra = []
656
657
657 def _inline_scan(self, lgt):
658 def _inline_scan(self, lgt):
658 off = 0
659 off = 0
659 if lgt is not None:
660 if lgt is not None:
660 self._offsets = [0] * lgt
661 self._offsets = [0] * lgt
661 count = 0
662 count = 0
662 while off <= len(self._data) - self.entry_size:
663 while off <= len(self._data) - self.entry_size:
663 start = off + self.big_int_size
664 start = off + self.big_int_size
664 (s,) = struct.unpack(
665 (s,) = struct.unpack(
665 b'>i',
666 b'>i',
666 self._data[start : start + self.int_size],
667 self._data[start : start + self.int_size],
667 )
668 )
668 if lgt is not None:
669 if lgt is not None:
669 self._offsets[count] = off
670 self._offsets[count] = off
670 count += 1
671 count += 1
671 off += self.entry_size + s
672 off += self.entry_size + s
672 if off != len(self._data):
673 if off != len(self._data):
673 raise ValueError(b"corrupted data")
674 raise ValueError(b"corrupted data")
674 return count
675 return count
675
676
676 def __delitem__(self, i):
677 def __delitem__(self, i):
677 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
678 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
678 raise ValueError(b"deleting slices only supports a:-1 with step 1")
679 raise ValueError(b"deleting slices only supports a:-1 with step 1")
679 i = i.start
680 i = i.start
680 self._check_index(i)
681 self._check_index(i)
681 self._stripnodes(i)
682 self._stripnodes(i)
682 if i < self._lgt:
683 if i < self._lgt:
683 self._offsets = self._offsets[:i]
684 self._offsets = self._offsets[:i]
684 self._lgt = i
685 self._lgt = i
685 self._extra = []
686 self._extra = []
686 else:
687 else:
687 self._extra = self._extra[: i - self._lgt]
688 self._extra = self._extra[: i - self._lgt]
688
689
689 def _calculate_index(self, i):
690 def _calculate_index(self, i):
690 return self._offsets[i]
691 return self._offsets[i]
691
692
692
693
693 def parse_index2(data, inline, revlogv2=False):
694 def parse_index2(data, inline, revlogv2=False):
694 if not inline:
695 if not inline:
695 cls = IndexObject2 if revlogv2 else IndexObject
696 cls = IndexObject2 if revlogv2 else IndexObject
696 return cls(data), None
697 return cls(data), None
697 cls = InlinedIndexObject
698 cls = InlinedIndexObject
698 return cls(data, inline), (0, data)
699 return cls(data, inline), (0, data)
699
700
700
701
701 def parse_index_cl_v2(data):
702 def parse_index_cl_v2(data):
702 return IndexChangelogV2(data), None
703 return IndexChangelogV2(data), None
703
704
704
705
705 class IndexObject2(IndexObject):
706 class IndexObject2(IndexObject):
706 index_format = revlog_constants.INDEX_ENTRY_V2
707 index_format = revlog_constants.INDEX_ENTRY_V2
707
708
708 def replace_sidedata_info(
709 def replace_sidedata_info(
709 self,
710 self,
710 rev,
711 rev,
711 sidedata_offset,
712 sidedata_offset,
712 sidedata_length,
713 sidedata_length,
713 offset_flags,
714 offset_flags,
714 compression_mode,
715 compression_mode,
715 ):
716 ):
716 """
717 """
717 Replace an existing index entry's sidedata offset and length with new
718 Replace an existing index entry's sidedata offset and length with new
718 ones.
719 ones.
719 This cannot be used outside of the context of sidedata rewriting,
720 This cannot be used outside of the context of sidedata rewriting,
720 inside the transaction that creates the revision `rev`.
721 inside the transaction that creates the revision `rev`.
721 """
722 """
722 if rev < 0:
723 if rev < 0:
723 raise KeyError
724 raise KeyError
724 self._check_index(rev)
725 self._check_index(rev)
725 if rev < self._lgt:
726 if rev < self._lgt:
726 msg = b"cannot rewrite entries outside of this transaction"
727 msg = b"cannot rewrite entries outside of this transaction"
727 raise KeyError(msg)
728 raise KeyError(msg)
728 else:
729 else:
729 entry = list(self[rev])
730 entry = list(self[rev])
730 entry[0] = offset_flags
731 entry[0] = offset_flags
731 entry[8] = sidedata_offset
732 entry[8] = sidedata_offset
732 entry[9] = sidedata_length
733 entry[9] = sidedata_length
733 entry[11] = compression_mode
734 entry[11] = compression_mode
734 entry = tuple(entry)
735 entry = tuple(entry)
735 new = self._pack_entry(rev, entry)
736 new = self._pack_entry(rev, entry)
736 self._extra[rev - self._lgt] = new
737 self._extra[rev - self._lgt] = new
737
738
738 def _unpack_entry(self, rev, data):
739 def _unpack_entry(self, rev, data):
739 data = self.index_format.unpack(data)
740 data = self.index_format.unpack(data)
740 entry = data[:10]
741 entry = data[:10]
741 data_comp = data[10] & 3
742 data_comp = data[10] & 3
742 sidedata_comp = (data[10] & (3 << 2)) >> 2
743 sidedata_comp = (data[10] & (3 << 2)) >> 2
743 return entry + (data_comp, sidedata_comp)
744 return entry + (data_comp, sidedata_comp)
744
745
745 def _pack_entry(self, rev, entry):
746 def _pack_entry(self, rev, entry):
746 data = entry[:10]
747 data = entry[:10]
747 data_comp = entry[10] & 3
748 data_comp = entry[10] & 3
748 sidedata_comp = (entry[11] & 3) << 2
749 sidedata_comp = (entry[11] & 3) << 2
749 data += (data_comp | sidedata_comp,)
750 data += (data_comp | sidedata_comp,)
750
751
751 return self.index_format.pack(*data)
752 return self.index_format.pack(*data)
752
753
753 def entry_binary(self, rev):
754 def entry_binary(self, rev):
754 """return the raw binary string representing a revision"""
755 """return the raw binary string representing a revision"""
755 entry = self[rev]
756 entry = self[rev]
756 return self._pack_entry(rev, entry)
757 return self._pack_entry(rev, entry)
757
758
758 def pack_header(self, header):
759 def pack_header(self, header):
759 """pack header information as binary"""
760 """pack header information as binary"""
760 msg = 'version header should go in the docket, not the index: %d'
761 msg = 'version header should go in the docket, not the index: %d'
761 msg %= header
762 msg %= header
762 raise error.ProgrammingError(msg)
763 raise error.ProgrammingError(msg)
763
764
764
765
765 class IndexChangelogV2(IndexObject2):
766 class IndexChangelogV2(IndexObject2):
766 index_format = revlog_constants.INDEX_ENTRY_CL_V2
767 index_format = revlog_constants.INDEX_ENTRY_CL_V2
767
768
768 def _unpack_entry(self, rev, data, r=True):
769 def _unpack_entry(self, rev, data, r=True):
769 items = self.index_format.unpack(data)
770 items = self.index_format.unpack(data)
770 entry = items[:3] + (rev, rev) + items[3:8]
771 entry = items[:3] + (rev, rev) + items[3:8]
771 data_comp = items[8] & 3
772 data_comp = items[8] & 3
772 sidedata_comp = (items[8] >> 2) & 3
773 sidedata_comp = (items[8] >> 2) & 3
773 return entry + (data_comp, sidedata_comp)
774 return entry + (data_comp, sidedata_comp)
774
775
775 def _pack_entry(self, rev, entry):
776 def _pack_entry(self, rev, entry):
776 assert entry[3] == rev, entry[3]
777 assert entry[3] == rev, entry[3]
777 assert entry[4] == rev, entry[4]
778 assert entry[4] == rev, entry[4]
778 data = entry[:3] + entry[5:10]
779 data = entry[:3] + entry[5:10]
779 data_comp = entry[10] & 3
780 data_comp = entry[10] & 3
780 sidedata_comp = (entry[11] & 3) << 2
781 sidedata_comp = (entry[11] & 3) << 2
781 data += (data_comp | sidedata_comp,)
782 data += (data_comp | sidedata_comp,)
782 return self.index_format.pack(*data)
783 return self.index_format.pack(*data)
783
784
784
785
785 def parse_index_devel_nodemap(data, inline):
786 def parse_index_devel_nodemap(data, inline):
786 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
787 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
787 return PersistentNodeMapIndexObject(data), None
788 return PersistentNodeMapIndexObject(data), None
788
789
789
790
790 def parse_dirstate(dmap, copymap, st):
791 def parse_dirstate(dmap, copymap, st):
791 parents = [st[:20], st[20:40]]
792 parents = [st[:20], st[20:40]]
792 # dereference fields so they will be local in loop
793 # dereference fields so they will be local in loop
793 format = b">cllll"
794 format = b">cllll"
794 e_size = struct.calcsize(format)
795 e_size = struct.calcsize(format)
795 pos1 = 40
796 pos1 = 40
796 l = len(st)
797 l = len(st)
797
798
798 # the inner loop
799 # the inner loop
799 while pos1 < l:
800 while pos1 < l:
800 pos2 = pos1 + e_size
801 pos2 = pos1 + e_size
801 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
802 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
802 pos1 = pos2 + e[4]
803 pos1 = pos2 + e[4]
803 f = st[pos2:pos1]
804 f = st[pos2:pos1]
804 if b'\0' in f:
805 if b'\0' in f:
805 f, c = f.split(b'\0')
806 f, c = f.split(b'\0')
806 copymap[f] = c
807 copymap[f] = c
807 dmap[f] = DirstateItem.from_v1_data(*e[:4])
808 dmap[f] = DirstateItem.from_v1_data(*e[:4])
808 return parents
809 return parents
809
810
810
811
811 def pack_dirstate(dmap, copymap, pl, now):
812 def pack_dirstate(dmap, copymap, pl, now):
812 now = int(now)
813 now = int(now)
813 cs = stringio()
814 cs = stringio()
814 write = cs.write
815 write = cs.write
815 write(b"".join(pl))
816 write(b"".join(pl))
816 for f, e in pycompat.iteritems(dmap):
817 for f, e in pycompat.iteritems(dmap):
817 if e.need_delay(now):
818 if e.need_delay(now):
818 # The file was last modified "simultaneously" with the current
819 # The file was last modified "simultaneously" with the current
819 # write to dirstate (i.e. within the same second for file-
820 # write to dirstate (i.e. within the same second for file-
820 # systems with a granularity of 1 sec). This commonly happens
821 # systems with a granularity of 1 sec). This commonly happens
821 # for at least a couple of files on 'update'.
822 # for at least a couple of files on 'update'.
822 # The user could change the file without changing its size
823 # The user could change the file without changing its size
823 # within the same second. Invalidate the file's mtime in
824 # within the same second. Invalidate the file's mtime in
824 # dirstate, forcing future 'status' calls to compare the
825 # dirstate, forcing future 'status' calls to compare the
825 # contents of the file if the size is the same. This prevents
826 # contents of the file if the size is the same. This prevents
826 # mistakenly treating such files as clean.
827 # mistakenly treating such files as clean.
827 e.set_possibly_dirty()
828 e.set_possibly_dirty()
828
829
829 if f in copymap:
830 if f in copymap:
830 f = b"%s\0%s" % (f, copymap[f])
831 f = b"%s\0%s" % (f, copymap[f])
831 e = _pack(
832 e = _pack(
832 b">cllll",
833 b">cllll",
833 e.v1_state(),
834 e.v1_state(),
834 e.v1_mode(),
835 e.v1_mode(),
835 e.v1_size(),
836 e.v1_size(),
836 e.v1_mtime(),
837 e.v1_mtime(),
837 len(f),
838 len(f),
838 )
839 )
839 write(e)
840 write(e)
840 write(f)
841 write(f)
841 return cs.getvalue()
842 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now