##// END OF EJS Templates
dirstate-v2: Use attributes as intended instead of properties in v2_data()...
Simon Sapin -
r49043:db589732 default
parent child Browse files
Show More
@@ -1,789 +1,789 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11 import struct
11 import struct
12 import zlib
12 import zlib
13
13
14 from ..node import (
14 from ..node import (
15 nullrev,
15 nullrev,
16 sha1nodeconstants,
16 sha1nodeconstants,
17 )
17 )
18 from ..thirdparty import attr
18 from ..thirdparty import attr
19 from .. import (
19 from .. import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 revlogutils,
22 revlogutils,
23 util,
23 util,
24 )
24 )
25
25
26 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import nodemap as nodemaputil
27 from ..revlogutils import constants as revlog_constants
27 from ..revlogutils import constants as revlog_constants
28
28
29 stringio = pycompat.bytesio
29 stringio = pycompat.bytesio
30
30
31
31
32 _pack = struct.pack
32 _pack = struct.pack
33 _unpack = struct.unpack
33 _unpack = struct.unpack
34 _compress = zlib.compress
34 _compress = zlib.compress
35 _decompress = zlib.decompress
35 _decompress = zlib.decompress
36
36
37
37
38 # a special value used internally for `size` if the file come from the other parent
38 # a special value used internally for `size` if the file come from the other parent
39 FROM_P2 = -2
39 FROM_P2 = -2
40
40
41 # a special value used internally for `size` if the file is modified/merged/added
41 # a special value used internally for `size` if the file is modified/merged/added
42 NONNORMAL = -1
42 NONNORMAL = -1
43
43
44 # a special value used internally for `time` if the time is ambigeous
44 # a special value used internally for `time` if the time is ambigeous
45 AMBIGUOUS_TIME = -1
45 AMBIGUOUS_TIME = -1
46
46
47 # Bits of the `flags` byte inside a node in the file format
47 # Bits of the `flags` byte inside a node in the file format
48 DIRSTATE_V2_WDIR_TRACKED = 1 << 0
48 DIRSTATE_V2_WDIR_TRACKED = 1 << 0
49 DIRSTATE_V2_P1_TRACKED = 1 << 1
49 DIRSTATE_V2_P1_TRACKED = 1 << 1
50 DIRSTATE_V2_P2_INFO = 1 << 2
50 DIRSTATE_V2_P2_INFO = 1 << 2
51 DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 3
51 DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 3
52 DIRSTATE_V2_HAS_MTIME = 1 << 4
52 DIRSTATE_V2_HAS_MTIME = 1 << 4
53 DIRSTATE_V2_MODE_EXEC_PERM = 1 << 5
53 DIRSTATE_V2_MODE_EXEC_PERM = 1 << 5
54 DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 6
54 DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 6
55
55
56
56
57 @attr.s(slots=True, init=False)
57 @attr.s(slots=True, init=False)
58 class DirstateItem(object):
58 class DirstateItem(object):
59 """represent a dirstate entry
59 """represent a dirstate entry
60
60
61 It hold multiple attributes
61 It hold multiple attributes
62
62
63 # about file tracking
63 # about file tracking
64 - wc_tracked: is the file tracked by the working copy
64 - wc_tracked: is the file tracked by the working copy
65 - p1_tracked: is the file tracked in working copy first parent
65 - p1_tracked: is the file tracked in working copy first parent
66 - p2_info: the file has been involved in some merge operation. Either
66 - p2_info: the file has been involved in some merge operation. Either
67 because it was actually merged, or because the p2 version was
67 because it was actually merged, or because the p2 version was
68 ahead, or because some rename moved it there. In either case
68 ahead, or because some rename moved it there. In either case
69 `hg status` will want it displayed as modified.
69 `hg status` will want it displayed as modified.
70
70
71 # about the file state expected from p1 manifest:
71 # about the file state expected from p1 manifest:
72 - mode: the file mode in p1
72 - mode: the file mode in p1
73 - size: the file size in p1
73 - size: the file size in p1
74
74
75 These value can be set to None, which mean we don't have a meaningful value
75 These value can be set to None, which mean we don't have a meaningful value
76 to compare with. Either because we don't really care about them as there
76 to compare with. Either because we don't really care about them as there
77 `status` is known without having to look at the disk or because we don't
77 `status` is known without having to look at the disk or because we don't
78 know these right now and a full comparison will be needed to find out if
78 know these right now and a full comparison will be needed to find out if
79 the file is clean.
79 the file is clean.
80
80
81 # about the file state on disk last time we saw it:
81 # about the file state on disk last time we saw it:
82 - mtime: the last known clean mtime for the file.
82 - mtime: the last known clean mtime for the file.
83
83
84 This value can be set to None if no cachable state exist. Either because we
84 This value can be set to None if no cachable state exist. Either because we
85 do not care (see previous section) or because we could not cache something
85 do not care (see previous section) or because we could not cache something
86 yet.
86 yet.
87 """
87 """
88
88
89 _wc_tracked = attr.ib()
89 _wc_tracked = attr.ib()
90 _p1_tracked = attr.ib()
90 _p1_tracked = attr.ib()
91 _p2_info = attr.ib()
91 _p2_info = attr.ib()
92 _mode = attr.ib()
92 _mode = attr.ib()
93 _size = attr.ib()
93 _size = attr.ib()
94 _mtime = attr.ib()
94 _mtime = attr.ib()
95
95
96 def __init__(
96 def __init__(
97 self,
97 self,
98 wc_tracked=False,
98 wc_tracked=False,
99 p1_tracked=False,
99 p1_tracked=False,
100 p2_info=False,
100 p2_info=False,
101 has_meaningful_data=True,
101 has_meaningful_data=True,
102 has_meaningful_mtime=True,
102 has_meaningful_mtime=True,
103 parentfiledata=None,
103 parentfiledata=None,
104 ):
104 ):
105 self._wc_tracked = wc_tracked
105 self._wc_tracked = wc_tracked
106 self._p1_tracked = p1_tracked
106 self._p1_tracked = p1_tracked
107 self._p2_info = p2_info
107 self._p2_info = p2_info
108
108
109 self._mode = None
109 self._mode = None
110 self._size = None
110 self._size = None
111 self._mtime = None
111 self._mtime = None
112 if parentfiledata is None:
112 if parentfiledata is None:
113 has_meaningful_mtime = False
113 has_meaningful_mtime = False
114 has_meaningful_data = False
114 has_meaningful_data = False
115 if has_meaningful_data:
115 if has_meaningful_data:
116 self._mode = parentfiledata[0]
116 self._mode = parentfiledata[0]
117 self._size = parentfiledata[1]
117 self._size = parentfiledata[1]
118 if has_meaningful_mtime:
118 if has_meaningful_mtime:
119 self._mtime = parentfiledata[2]
119 self._mtime = parentfiledata[2]
120
120
121 @classmethod
121 @classmethod
122 def from_v2_data(cls, flags, size, mtime):
122 def from_v2_data(cls, flags, size, mtime):
123 """Build a new DirstateItem object from V2 data"""
123 """Build a new DirstateItem object from V2 data"""
124 has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
124 has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
125 mode = None
125 mode = None
126 if has_mode_size:
126 if has_mode_size:
127 assert stat.S_IXUSR == 0o100
127 assert stat.S_IXUSR == 0o100
128 if flags & DIRSTATE_V2_MODE_EXEC_PERM:
128 if flags & DIRSTATE_V2_MODE_EXEC_PERM:
129 mode = 0o755
129 mode = 0o755
130 else:
130 else:
131 mode = 0o644
131 mode = 0o644
132 if flags & DIRSTATE_V2_MODE_IS_SYMLINK:
132 if flags & DIRSTATE_V2_MODE_IS_SYMLINK:
133 mode |= stat.S_IFLNK
133 mode |= stat.S_IFLNK
134 else:
134 else:
135 mode |= stat.S_IFREG
135 mode |= stat.S_IFREG
136 return cls(
136 return cls(
137 wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
137 wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
138 p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
138 p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
139 p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
139 p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
140 has_meaningful_data=has_mode_size,
140 has_meaningful_data=has_mode_size,
141 has_meaningful_mtime=bool(flags & DIRSTATE_V2_HAS_MTIME),
141 has_meaningful_mtime=bool(flags & DIRSTATE_V2_HAS_MTIME),
142 parentfiledata=(mode, size, mtime),
142 parentfiledata=(mode, size, mtime),
143 )
143 )
144
144
145 @classmethod
145 @classmethod
146 def from_v1_data(cls, state, mode, size, mtime):
146 def from_v1_data(cls, state, mode, size, mtime):
147 """Build a new DirstateItem object from V1 data
147 """Build a new DirstateItem object from V1 data
148
148
149 Since the dirstate-v1 format is frozen, the signature of this function
149 Since the dirstate-v1 format is frozen, the signature of this function
150 is not expected to change, unlike the __init__ one.
150 is not expected to change, unlike the __init__ one.
151 """
151 """
152 if state == b'm':
152 if state == b'm':
153 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
153 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
154 elif state == b'a':
154 elif state == b'a':
155 return cls(wc_tracked=True)
155 return cls(wc_tracked=True)
156 elif state == b'r':
156 elif state == b'r':
157 if size == NONNORMAL:
157 if size == NONNORMAL:
158 p1_tracked = True
158 p1_tracked = True
159 p2_info = True
159 p2_info = True
160 elif size == FROM_P2:
160 elif size == FROM_P2:
161 p1_tracked = False
161 p1_tracked = False
162 p2_info = True
162 p2_info = True
163 else:
163 else:
164 p1_tracked = True
164 p1_tracked = True
165 p2_info = False
165 p2_info = False
166 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
166 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
167 elif state == b'n':
167 elif state == b'n':
168 if size == FROM_P2:
168 if size == FROM_P2:
169 return cls(wc_tracked=True, p2_info=True)
169 return cls(wc_tracked=True, p2_info=True)
170 elif size == NONNORMAL:
170 elif size == NONNORMAL:
171 return cls(wc_tracked=True, p1_tracked=True)
171 return cls(wc_tracked=True, p1_tracked=True)
172 elif mtime == AMBIGUOUS_TIME:
172 elif mtime == AMBIGUOUS_TIME:
173 return cls(
173 return cls(
174 wc_tracked=True,
174 wc_tracked=True,
175 p1_tracked=True,
175 p1_tracked=True,
176 has_meaningful_mtime=False,
176 has_meaningful_mtime=False,
177 parentfiledata=(mode, size, 42),
177 parentfiledata=(mode, size, 42),
178 )
178 )
179 else:
179 else:
180 return cls(
180 return cls(
181 wc_tracked=True,
181 wc_tracked=True,
182 p1_tracked=True,
182 p1_tracked=True,
183 parentfiledata=(mode, size, mtime),
183 parentfiledata=(mode, size, mtime),
184 )
184 )
185 else:
185 else:
186 raise RuntimeError(b'unknown state: %s' % state)
186 raise RuntimeError(b'unknown state: %s' % state)
187
187
188 def set_possibly_dirty(self):
188 def set_possibly_dirty(self):
189 """Mark a file as "possibly dirty"
189 """Mark a file as "possibly dirty"
190
190
191 This means the next status call will have to actually check its content
191 This means the next status call will have to actually check its content
192 to make sure it is correct.
192 to make sure it is correct.
193 """
193 """
194 self._mtime = None
194 self._mtime = None
195
195
196 def set_clean(self, mode, size, mtime):
196 def set_clean(self, mode, size, mtime):
197 """mark a file as "clean" cancelling potential "possibly dirty call"
197 """mark a file as "clean" cancelling potential "possibly dirty call"
198
198
199 Note: this function is a descendant of `dirstate.normal` and is
199 Note: this function is a descendant of `dirstate.normal` and is
200 currently expected to be call on "normal" entry only. There are not
200 currently expected to be call on "normal" entry only. There are not
201 reason for this to not change in the future as long as the ccode is
201 reason for this to not change in the future as long as the ccode is
202 updated to preserve the proper state of the non-normal files.
202 updated to preserve the proper state of the non-normal files.
203 """
203 """
204 self._wc_tracked = True
204 self._wc_tracked = True
205 self._p1_tracked = True
205 self._p1_tracked = True
206 self._mode = mode
206 self._mode = mode
207 self._size = size
207 self._size = size
208 self._mtime = mtime
208 self._mtime = mtime
209
209
210 def set_tracked(self):
210 def set_tracked(self):
211 """mark a file as tracked in the working copy
211 """mark a file as tracked in the working copy
212
212
213 This will ultimately be called by command like `hg add`.
213 This will ultimately be called by command like `hg add`.
214 """
214 """
215 self._wc_tracked = True
215 self._wc_tracked = True
216 # `set_tracked` is replacing various `normallookup` call. So we mark
216 # `set_tracked` is replacing various `normallookup` call. So we mark
217 # the files as needing lookup
217 # the files as needing lookup
218 #
218 #
219 # Consider dropping this in the future in favor of something less broad.
219 # Consider dropping this in the future in favor of something less broad.
220 self._mtime = None
220 self._mtime = None
221
221
222 def set_untracked(self):
222 def set_untracked(self):
223 """mark a file as untracked in the working copy
223 """mark a file as untracked in the working copy
224
224
225 This will ultimately be called by command like `hg remove`.
225 This will ultimately be called by command like `hg remove`.
226 """
226 """
227 self._wc_tracked = False
227 self._wc_tracked = False
228 self._mode = None
228 self._mode = None
229 self._size = None
229 self._size = None
230 self._mtime = None
230 self._mtime = None
231
231
232 def drop_merge_data(self):
232 def drop_merge_data(self):
233 """remove all "merge-only" from a DirstateItem
233 """remove all "merge-only" from a DirstateItem
234
234
235 This is to be call by the dirstatemap code when the second parent is dropped
235 This is to be call by the dirstatemap code when the second parent is dropped
236 """
236 """
237 if self._p2_info:
237 if self._p2_info:
238 self._p2_info = False
238 self._p2_info = False
239 self._mode = None
239 self._mode = None
240 self._size = None
240 self._size = None
241 self._mtime = None
241 self._mtime = None
242
242
243 @property
243 @property
244 def mode(self):
244 def mode(self):
245 return self.v1_mode()
245 return self.v1_mode()
246
246
247 @property
247 @property
248 def size(self):
248 def size(self):
249 return self.v1_size()
249 return self.v1_size()
250
250
251 @property
251 @property
252 def mtime(self):
252 def mtime(self):
253 return self.v1_mtime()
253 return self.v1_mtime()
254
254
255 @property
255 @property
256 def state(self):
256 def state(self):
257 """
257 """
258 States are:
258 States are:
259 n normal
259 n normal
260 m needs merging
260 m needs merging
261 r marked for removal
261 r marked for removal
262 a marked for addition
262 a marked for addition
263
263
264 XXX This "state" is a bit obscure and mostly a direct expression of the
264 XXX This "state" is a bit obscure and mostly a direct expression of the
265 dirstatev1 format. It would make sense to ultimately deprecate it in
265 dirstatev1 format. It would make sense to ultimately deprecate it in
266 favor of the more "semantic" attributes.
266 favor of the more "semantic" attributes.
267 """
267 """
268 if not self.any_tracked:
268 if not self.any_tracked:
269 return b'?'
269 return b'?'
270 return self.v1_state()
270 return self.v1_state()
271
271
272 @property
272 @property
273 def tracked(self):
273 def tracked(self):
274 """True is the file is tracked in the working copy"""
274 """True is the file is tracked in the working copy"""
275 return self._wc_tracked
275 return self._wc_tracked
276
276
277 @property
277 @property
278 def any_tracked(self):
278 def any_tracked(self):
279 """True is the file is tracked anywhere (wc or parents)"""
279 """True is the file is tracked anywhere (wc or parents)"""
280 return self._wc_tracked or self._p1_tracked or self._p2_info
280 return self._wc_tracked or self._p1_tracked or self._p2_info
281
281
282 @property
282 @property
283 def added(self):
283 def added(self):
284 """True if the file has been added"""
284 """True if the file has been added"""
285 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
285 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
286
286
287 @property
287 @property
288 def maybe_clean(self):
288 def maybe_clean(self):
289 """True if the file has a chance to be in the "clean" state"""
289 """True if the file has a chance to be in the "clean" state"""
290 if not self._wc_tracked:
290 if not self._wc_tracked:
291 return False
291 return False
292 elif not self._p1_tracked:
292 elif not self._p1_tracked:
293 return False
293 return False
294 elif self._p2_info:
294 elif self._p2_info:
295 return False
295 return False
296 return True
296 return True
297
297
298 @property
298 @property
299 def p1_tracked(self):
299 def p1_tracked(self):
300 """True if the file is tracked in the first parent manifest"""
300 """True if the file is tracked in the first parent manifest"""
301 return self._p1_tracked
301 return self._p1_tracked
302
302
303 @property
303 @property
304 def p2_info(self):
304 def p2_info(self):
305 """True if the file needed to merge or apply any input from p2
305 """True if the file needed to merge or apply any input from p2
306
306
307 See the class documentation for details.
307 See the class documentation for details.
308 """
308 """
309 return self._wc_tracked and self._p2_info
309 return self._wc_tracked and self._p2_info
310
310
311 @property
311 @property
312 def removed(self):
312 def removed(self):
313 """True if the file has been removed"""
313 """True if the file has been removed"""
314 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
314 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
315
315
316 def v2_data(self):
316 def v2_data(self):
317 """Returns (flags, mode, size, mtime) for v2 serialization"""
317 """Returns (flags, mode, size, mtime) for v2 serialization"""
318 flags = 0
318 flags = 0
319 if self._wc_tracked:
319 if self._wc_tracked:
320 flags |= DIRSTATE_V2_WDIR_TRACKED
320 flags |= DIRSTATE_V2_WDIR_TRACKED
321 if self._p1_tracked:
321 if self._p1_tracked:
322 flags |= DIRSTATE_V2_P1_TRACKED
322 flags |= DIRSTATE_V2_P1_TRACKED
323 if self._p2_info:
323 if self._p2_info:
324 flags |= DIRSTATE_V2_P2_INFO
324 flags |= DIRSTATE_V2_P2_INFO
325 if self.mode is not None and self.size is not None:
325 if self._mode is not None and self._size is not None:
326 flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE
326 flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE
327 if self.mode & stat.S_IXUSR:
327 if self.mode & stat.S_IXUSR:
328 flags |= DIRSTATE_V2_MODE_EXEC_PERM
328 flags |= DIRSTATE_V2_MODE_EXEC_PERM
329 if stat.S_ISLNK(self.mode):
329 if stat.S_ISLNK(self.mode):
330 flags |= DIRSTATE_V2_MODE_IS_SYMLINK
330 flags |= DIRSTATE_V2_MODE_IS_SYMLINK
331 if self.mtime is not None:
331 if self._mtime is not None:
332 flags |= DIRSTATE_V2_HAS_MTIME
332 flags |= DIRSTATE_V2_HAS_MTIME
333 return (flags, self.size or 0, self.mtime or 0)
333 return (flags, self._size or 0, self._mtime or 0)
334
334
335 def v1_state(self):
335 def v1_state(self):
336 """return a "state" suitable for v1 serialization"""
336 """return a "state" suitable for v1 serialization"""
337 if not self.any_tracked:
337 if not self.any_tracked:
338 # the object has no state to record, this is -currently-
338 # the object has no state to record, this is -currently-
339 # unsupported
339 # unsupported
340 raise RuntimeError('untracked item')
340 raise RuntimeError('untracked item')
341 elif self.removed:
341 elif self.removed:
342 return b'r'
342 return b'r'
343 elif self._p1_tracked and self._p2_info:
343 elif self._p1_tracked and self._p2_info:
344 return b'm'
344 return b'm'
345 elif self.added:
345 elif self.added:
346 return b'a'
346 return b'a'
347 else:
347 else:
348 return b'n'
348 return b'n'
349
349
350 def v1_mode(self):
350 def v1_mode(self):
351 """return a "mode" suitable for v1 serialization"""
351 """return a "mode" suitable for v1 serialization"""
352 return self._mode if self._mode is not None else 0
352 return self._mode if self._mode is not None else 0
353
353
354 def v1_size(self):
354 def v1_size(self):
355 """return a "size" suitable for v1 serialization"""
355 """return a "size" suitable for v1 serialization"""
356 if not self.any_tracked:
356 if not self.any_tracked:
357 # the object has no state to record, this is -currently-
357 # the object has no state to record, this is -currently-
358 # unsupported
358 # unsupported
359 raise RuntimeError('untracked item')
359 raise RuntimeError('untracked item')
360 elif self.removed and self._p1_tracked and self._p2_info:
360 elif self.removed and self._p1_tracked and self._p2_info:
361 return NONNORMAL
361 return NONNORMAL
362 elif self._p2_info:
362 elif self._p2_info:
363 return FROM_P2
363 return FROM_P2
364 elif self.removed:
364 elif self.removed:
365 return 0
365 return 0
366 elif self.added:
366 elif self.added:
367 return NONNORMAL
367 return NONNORMAL
368 elif self._size is None:
368 elif self._size is None:
369 return NONNORMAL
369 return NONNORMAL
370 else:
370 else:
371 return self._size
371 return self._size
372
372
373 def v1_mtime(self):
373 def v1_mtime(self):
374 """return a "mtime" suitable for v1 serialization"""
374 """return a "mtime" suitable for v1 serialization"""
375 if not self.any_tracked:
375 if not self.any_tracked:
376 # the object has no state to record, this is -currently-
376 # the object has no state to record, this is -currently-
377 # unsupported
377 # unsupported
378 raise RuntimeError('untracked item')
378 raise RuntimeError('untracked item')
379 elif self.removed:
379 elif self.removed:
380 return 0
380 return 0
381 elif self._mtime is None:
381 elif self._mtime is None:
382 return AMBIGUOUS_TIME
382 return AMBIGUOUS_TIME
383 elif self._p2_info:
383 elif self._p2_info:
384 return AMBIGUOUS_TIME
384 return AMBIGUOUS_TIME
385 elif not self._p1_tracked:
385 elif not self._p1_tracked:
386 return AMBIGUOUS_TIME
386 return AMBIGUOUS_TIME
387 else:
387 else:
388 return self._mtime
388 return self._mtime
389
389
390 def need_delay(self, now):
390 def need_delay(self, now):
391 """True if the stored mtime would be ambiguous with the current time"""
391 """True if the stored mtime would be ambiguous with the current time"""
392 return self.v1_state() == b'n' and self.v1_mtime() == now
392 return self.v1_state() == b'n' and self.v1_mtime() == now
393
393
394
394
395 def gettype(q):
395 def gettype(q):
396 return int(q & 0xFFFF)
396 return int(q & 0xFFFF)
397
397
398
398
399 class BaseIndexObject(object):
399 class BaseIndexObject(object):
400 # Can I be passed to an algorithme implemented in Rust ?
400 # Can I be passed to an algorithme implemented in Rust ?
401 rust_ext_compat = 0
401 rust_ext_compat = 0
402 # Format of an index entry according to Python's `struct` language
402 # Format of an index entry according to Python's `struct` language
403 index_format = revlog_constants.INDEX_ENTRY_V1
403 index_format = revlog_constants.INDEX_ENTRY_V1
404 # Size of a C unsigned long long int, platform independent
404 # Size of a C unsigned long long int, platform independent
405 big_int_size = struct.calcsize(b'>Q')
405 big_int_size = struct.calcsize(b'>Q')
406 # Size of a C long int, platform independent
406 # Size of a C long int, platform independent
407 int_size = struct.calcsize(b'>i')
407 int_size = struct.calcsize(b'>i')
408 # An empty index entry, used as a default value to be overridden, or nullrev
408 # An empty index entry, used as a default value to be overridden, or nullrev
409 null_item = (
409 null_item = (
410 0,
410 0,
411 0,
411 0,
412 0,
412 0,
413 -1,
413 -1,
414 -1,
414 -1,
415 -1,
415 -1,
416 -1,
416 -1,
417 sha1nodeconstants.nullid,
417 sha1nodeconstants.nullid,
418 0,
418 0,
419 0,
419 0,
420 revlog_constants.COMP_MODE_INLINE,
420 revlog_constants.COMP_MODE_INLINE,
421 revlog_constants.COMP_MODE_INLINE,
421 revlog_constants.COMP_MODE_INLINE,
422 )
422 )
423
423
424 @util.propertycache
424 @util.propertycache
425 def entry_size(self):
425 def entry_size(self):
426 return self.index_format.size
426 return self.index_format.size
427
427
428 @property
428 @property
429 def nodemap(self):
429 def nodemap(self):
430 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
430 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
431 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
431 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
432 return self._nodemap
432 return self._nodemap
433
433
434 @util.propertycache
434 @util.propertycache
435 def _nodemap(self):
435 def _nodemap(self):
436 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
436 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
437 for r in range(0, len(self)):
437 for r in range(0, len(self)):
438 n = self[r][7]
438 n = self[r][7]
439 nodemap[n] = r
439 nodemap[n] = r
440 return nodemap
440 return nodemap
441
441
442 def has_node(self, node):
442 def has_node(self, node):
443 """return True if the node exist in the index"""
443 """return True if the node exist in the index"""
444 return node in self._nodemap
444 return node in self._nodemap
445
445
446 def rev(self, node):
446 def rev(self, node):
447 """return a revision for a node
447 """return a revision for a node
448
448
449 If the node is unknown, raise a RevlogError"""
449 If the node is unknown, raise a RevlogError"""
450 return self._nodemap[node]
450 return self._nodemap[node]
451
451
452 def get_rev(self, node):
452 def get_rev(self, node):
453 """return a revision for a node
453 """return a revision for a node
454
454
455 If the node is unknown, return None"""
455 If the node is unknown, return None"""
456 return self._nodemap.get(node)
456 return self._nodemap.get(node)
457
457
458 def _stripnodes(self, start):
458 def _stripnodes(self, start):
459 if '_nodemap' in vars(self):
459 if '_nodemap' in vars(self):
460 for r in range(start, len(self)):
460 for r in range(start, len(self)):
461 n = self[r][7]
461 n = self[r][7]
462 del self._nodemap[n]
462 del self._nodemap[n]
463
463
464 def clearcaches(self):
464 def clearcaches(self):
465 self.__dict__.pop('_nodemap', None)
465 self.__dict__.pop('_nodemap', None)
466
466
467 def __len__(self):
467 def __len__(self):
468 return self._lgt + len(self._extra)
468 return self._lgt + len(self._extra)
469
469
470 def append(self, tup):
470 def append(self, tup):
471 if '_nodemap' in vars(self):
471 if '_nodemap' in vars(self):
472 self._nodemap[tup[7]] = len(self)
472 self._nodemap[tup[7]] = len(self)
473 data = self._pack_entry(len(self), tup)
473 data = self._pack_entry(len(self), tup)
474 self._extra.append(data)
474 self._extra.append(data)
475
475
476 def _pack_entry(self, rev, entry):
476 def _pack_entry(self, rev, entry):
477 assert entry[8] == 0
477 assert entry[8] == 0
478 assert entry[9] == 0
478 assert entry[9] == 0
479 return self.index_format.pack(*entry[:8])
479 return self.index_format.pack(*entry[:8])
480
480
481 def _check_index(self, i):
481 def _check_index(self, i):
482 if not isinstance(i, int):
482 if not isinstance(i, int):
483 raise TypeError(b"expecting int indexes")
483 raise TypeError(b"expecting int indexes")
484 if i < 0 or i >= len(self):
484 if i < 0 or i >= len(self):
485 raise IndexError
485 raise IndexError
486
486
487 def __getitem__(self, i):
487 def __getitem__(self, i):
488 if i == -1:
488 if i == -1:
489 return self.null_item
489 return self.null_item
490 self._check_index(i)
490 self._check_index(i)
491 if i >= self._lgt:
491 if i >= self._lgt:
492 data = self._extra[i - self._lgt]
492 data = self._extra[i - self._lgt]
493 else:
493 else:
494 index = self._calculate_index(i)
494 index = self._calculate_index(i)
495 data = self._data[index : index + self.entry_size]
495 data = self._data[index : index + self.entry_size]
496 r = self._unpack_entry(i, data)
496 r = self._unpack_entry(i, data)
497 if self._lgt and i == 0:
497 if self._lgt and i == 0:
498 offset = revlogutils.offset_type(0, gettype(r[0]))
498 offset = revlogutils.offset_type(0, gettype(r[0]))
499 r = (offset,) + r[1:]
499 r = (offset,) + r[1:]
500 return r
500 return r
501
501
502 def _unpack_entry(self, rev, data):
502 def _unpack_entry(self, rev, data):
503 r = self.index_format.unpack(data)
503 r = self.index_format.unpack(data)
504 r = r + (
504 r = r + (
505 0,
505 0,
506 0,
506 0,
507 revlog_constants.COMP_MODE_INLINE,
507 revlog_constants.COMP_MODE_INLINE,
508 revlog_constants.COMP_MODE_INLINE,
508 revlog_constants.COMP_MODE_INLINE,
509 )
509 )
510 return r
510 return r
511
511
512 def pack_header(self, header):
512 def pack_header(self, header):
513 """pack header information as binary"""
513 """pack header information as binary"""
514 v_fmt = revlog_constants.INDEX_HEADER
514 v_fmt = revlog_constants.INDEX_HEADER
515 return v_fmt.pack(header)
515 return v_fmt.pack(header)
516
516
517 def entry_binary(self, rev):
517 def entry_binary(self, rev):
518 """return the raw binary string representing a revision"""
518 """return the raw binary string representing a revision"""
519 entry = self[rev]
519 entry = self[rev]
520 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
520 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
521 if rev == 0:
521 if rev == 0:
522 p = p[revlog_constants.INDEX_HEADER.size :]
522 p = p[revlog_constants.INDEX_HEADER.size :]
523 return p
523 return p
524
524
525
525
526 class IndexObject(BaseIndexObject):
526 class IndexObject(BaseIndexObject):
527 def __init__(self, data):
527 def __init__(self, data):
528 assert len(data) % self.entry_size == 0, (
528 assert len(data) % self.entry_size == 0, (
529 len(data),
529 len(data),
530 self.entry_size,
530 self.entry_size,
531 len(data) % self.entry_size,
531 len(data) % self.entry_size,
532 )
532 )
533 self._data = data
533 self._data = data
534 self._lgt = len(data) // self.entry_size
534 self._lgt = len(data) // self.entry_size
535 self._extra = []
535 self._extra = []
536
536
537 def _calculate_index(self, i):
537 def _calculate_index(self, i):
538 return i * self.entry_size
538 return i * self.entry_size
539
539
540 def __delitem__(self, i):
540 def __delitem__(self, i):
541 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
541 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
542 raise ValueError(b"deleting slices only supports a:-1 with step 1")
542 raise ValueError(b"deleting slices only supports a:-1 with step 1")
543 i = i.start
543 i = i.start
544 self._check_index(i)
544 self._check_index(i)
545 self._stripnodes(i)
545 self._stripnodes(i)
546 if i < self._lgt:
546 if i < self._lgt:
547 self._data = self._data[: i * self.entry_size]
547 self._data = self._data[: i * self.entry_size]
548 self._lgt = i
548 self._lgt = i
549 self._extra = []
549 self._extra = []
550 else:
550 else:
551 self._extra = self._extra[: i - self._lgt]
551 self._extra = self._extra[: i - self._lgt]
552
552
553
553
554 class PersistentNodeMapIndexObject(IndexObject):
554 class PersistentNodeMapIndexObject(IndexObject):
555 """a Debug oriented class to test persistent nodemap
555 """a Debug oriented class to test persistent nodemap
556
556
557 We need a simple python object to test API and higher level behavior. See
557 We need a simple python object to test API and higher level behavior. See
558 the Rust implementation for more serious usage. This should be used only
558 the Rust implementation for more serious usage. This should be used only
559 through the dedicated `devel.persistent-nodemap` config.
559 through the dedicated `devel.persistent-nodemap` config.
560 """
560 """
561
561
562 def nodemap_data_all(self):
562 def nodemap_data_all(self):
563 """Return bytes containing a full serialization of a nodemap
563 """Return bytes containing a full serialization of a nodemap
564
564
565 The nodemap should be valid for the full set of revisions in the
565 The nodemap should be valid for the full set of revisions in the
566 index."""
566 index."""
567 return nodemaputil.persistent_data(self)
567 return nodemaputil.persistent_data(self)
568
568
569 def nodemap_data_incremental(self):
569 def nodemap_data_incremental(self):
570 """Return bytes containing a incremental update to persistent nodemap
570 """Return bytes containing a incremental update to persistent nodemap
571
571
572 This containst the data for an append-only update of the data provided
572 This containst the data for an append-only update of the data provided
573 in the last call to `update_nodemap_data`.
573 in the last call to `update_nodemap_data`.
574 """
574 """
575 if self._nm_root is None:
575 if self._nm_root is None:
576 return None
576 return None
577 docket = self._nm_docket
577 docket = self._nm_docket
578 changed, data = nodemaputil.update_persistent_data(
578 changed, data = nodemaputil.update_persistent_data(
579 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
579 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
580 )
580 )
581
581
582 self._nm_root = self._nm_max_idx = self._nm_docket = None
582 self._nm_root = self._nm_max_idx = self._nm_docket = None
583 return docket, changed, data
583 return docket, changed, data
584
584
585 def update_nodemap_data(self, docket, nm_data):
585 def update_nodemap_data(self, docket, nm_data):
586 """provide full block of persisted binary data for a nodemap
586 """provide full block of persisted binary data for a nodemap
587
587
588 The data are expected to come from disk. See `nodemap_data_all` for a
588 The data are expected to come from disk. See `nodemap_data_all` for a
589 produceur of such data."""
589 produceur of such data."""
590 if nm_data is not None:
590 if nm_data is not None:
591 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
591 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
592 if self._nm_root:
592 if self._nm_root:
593 self._nm_docket = docket
593 self._nm_docket = docket
594 else:
594 else:
595 self._nm_root = self._nm_max_idx = self._nm_docket = None
595 self._nm_root = self._nm_max_idx = self._nm_docket = None
596
596
597
597
598 class InlinedIndexObject(BaseIndexObject):
598 class InlinedIndexObject(BaseIndexObject):
599 def __init__(self, data, inline=0):
599 def __init__(self, data, inline=0):
600 self._data = data
600 self._data = data
601 self._lgt = self._inline_scan(None)
601 self._lgt = self._inline_scan(None)
602 self._inline_scan(self._lgt)
602 self._inline_scan(self._lgt)
603 self._extra = []
603 self._extra = []
604
604
605 def _inline_scan(self, lgt):
605 def _inline_scan(self, lgt):
606 off = 0
606 off = 0
607 if lgt is not None:
607 if lgt is not None:
608 self._offsets = [0] * lgt
608 self._offsets = [0] * lgt
609 count = 0
609 count = 0
610 while off <= len(self._data) - self.entry_size:
610 while off <= len(self._data) - self.entry_size:
611 start = off + self.big_int_size
611 start = off + self.big_int_size
612 (s,) = struct.unpack(
612 (s,) = struct.unpack(
613 b'>i',
613 b'>i',
614 self._data[start : start + self.int_size],
614 self._data[start : start + self.int_size],
615 )
615 )
616 if lgt is not None:
616 if lgt is not None:
617 self._offsets[count] = off
617 self._offsets[count] = off
618 count += 1
618 count += 1
619 off += self.entry_size + s
619 off += self.entry_size + s
620 if off != len(self._data):
620 if off != len(self._data):
621 raise ValueError(b"corrupted data")
621 raise ValueError(b"corrupted data")
622 return count
622 return count
623
623
624 def __delitem__(self, i):
624 def __delitem__(self, i):
625 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
625 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
626 raise ValueError(b"deleting slices only supports a:-1 with step 1")
626 raise ValueError(b"deleting slices only supports a:-1 with step 1")
627 i = i.start
627 i = i.start
628 self._check_index(i)
628 self._check_index(i)
629 self._stripnodes(i)
629 self._stripnodes(i)
630 if i < self._lgt:
630 if i < self._lgt:
631 self._offsets = self._offsets[:i]
631 self._offsets = self._offsets[:i]
632 self._lgt = i
632 self._lgt = i
633 self._extra = []
633 self._extra = []
634 else:
634 else:
635 self._extra = self._extra[: i - self._lgt]
635 self._extra = self._extra[: i - self._lgt]
636
636
637 def _calculate_index(self, i):
637 def _calculate_index(self, i):
638 return self._offsets[i]
638 return self._offsets[i]
639
639
640
640
641 def parse_index2(data, inline, revlogv2=False):
641 def parse_index2(data, inline, revlogv2=False):
642 if not inline:
642 if not inline:
643 cls = IndexObject2 if revlogv2 else IndexObject
643 cls = IndexObject2 if revlogv2 else IndexObject
644 return cls(data), None
644 return cls(data), None
645 cls = InlinedIndexObject
645 cls = InlinedIndexObject
646 return cls(data, inline), (0, data)
646 return cls(data, inline), (0, data)
647
647
648
648
649 def parse_index_cl_v2(data):
649 def parse_index_cl_v2(data):
650 return IndexChangelogV2(data), None
650 return IndexChangelogV2(data), None
651
651
652
652
653 class IndexObject2(IndexObject):
653 class IndexObject2(IndexObject):
654 index_format = revlog_constants.INDEX_ENTRY_V2
654 index_format = revlog_constants.INDEX_ENTRY_V2
655
655
656 def replace_sidedata_info(
656 def replace_sidedata_info(
657 self,
657 self,
658 rev,
658 rev,
659 sidedata_offset,
659 sidedata_offset,
660 sidedata_length,
660 sidedata_length,
661 offset_flags,
661 offset_flags,
662 compression_mode,
662 compression_mode,
663 ):
663 ):
664 """
664 """
665 Replace an existing index entry's sidedata offset and length with new
665 Replace an existing index entry's sidedata offset and length with new
666 ones.
666 ones.
667 This cannot be used outside of the context of sidedata rewriting,
667 This cannot be used outside of the context of sidedata rewriting,
668 inside the transaction that creates the revision `rev`.
668 inside the transaction that creates the revision `rev`.
669 """
669 """
670 if rev < 0:
670 if rev < 0:
671 raise KeyError
671 raise KeyError
672 self._check_index(rev)
672 self._check_index(rev)
673 if rev < self._lgt:
673 if rev < self._lgt:
674 msg = b"cannot rewrite entries outside of this transaction"
674 msg = b"cannot rewrite entries outside of this transaction"
675 raise KeyError(msg)
675 raise KeyError(msg)
676 else:
676 else:
677 entry = list(self[rev])
677 entry = list(self[rev])
678 entry[0] = offset_flags
678 entry[0] = offset_flags
679 entry[8] = sidedata_offset
679 entry[8] = sidedata_offset
680 entry[9] = sidedata_length
680 entry[9] = sidedata_length
681 entry[11] = compression_mode
681 entry[11] = compression_mode
682 entry = tuple(entry)
682 entry = tuple(entry)
683 new = self._pack_entry(rev, entry)
683 new = self._pack_entry(rev, entry)
684 self._extra[rev - self._lgt] = new
684 self._extra[rev - self._lgt] = new
685
685
686 def _unpack_entry(self, rev, data):
686 def _unpack_entry(self, rev, data):
687 data = self.index_format.unpack(data)
687 data = self.index_format.unpack(data)
688 entry = data[:10]
688 entry = data[:10]
689 data_comp = data[10] & 3
689 data_comp = data[10] & 3
690 sidedata_comp = (data[10] & (3 << 2)) >> 2
690 sidedata_comp = (data[10] & (3 << 2)) >> 2
691 return entry + (data_comp, sidedata_comp)
691 return entry + (data_comp, sidedata_comp)
692
692
693 def _pack_entry(self, rev, entry):
693 def _pack_entry(self, rev, entry):
694 data = entry[:10]
694 data = entry[:10]
695 data_comp = entry[10] & 3
695 data_comp = entry[10] & 3
696 sidedata_comp = (entry[11] & 3) << 2
696 sidedata_comp = (entry[11] & 3) << 2
697 data += (data_comp | sidedata_comp,)
697 data += (data_comp | sidedata_comp,)
698
698
699 return self.index_format.pack(*data)
699 return self.index_format.pack(*data)
700
700
701 def entry_binary(self, rev):
701 def entry_binary(self, rev):
702 """return the raw binary string representing a revision"""
702 """return the raw binary string representing a revision"""
703 entry = self[rev]
703 entry = self[rev]
704 return self._pack_entry(rev, entry)
704 return self._pack_entry(rev, entry)
705
705
706 def pack_header(self, header):
706 def pack_header(self, header):
707 """pack header information as binary"""
707 """pack header information as binary"""
708 msg = 'version header should go in the docket, not the index: %d'
708 msg = 'version header should go in the docket, not the index: %d'
709 msg %= header
709 msg %= header
710 raise error.ProgrammingError(msg)
710 raise error.ProgrammingError(msg)
711
711
712
712
713 class IndexChangelogV2(IndexObject2):
713 class IndexChangelogV2(IndexObject2):
714 index_format = revlog_constants.INDEX_ENTRY_CL_V2
714 index_format = revlog_constants.INDEX_ENTRY_CL_V2
715
715
716 def _unpack_entry(self, rev, data, r=True):
716 def _unpack_entry(self, rev, data, r=True):
717 items = self.index_format.unpack(data)
717 items = self.index_format.unpack(data)
718 entry = items[:3] + (rev, rev) + items[3:8]
718 entry = items[:3] + (rev, rev) + items[3:8]
719 data_comp = items[8] & 3
719 data_comp = items[8] & 3
720 sidedata_comp = (items[8] >> 2) & 3
720 sidedata_comp = (items[8] >> 2) & 3
721 return entry + (data_comp, sidedata_comp)
721 return entry + (data_comp, sidedata_comp)
722
722
723 def _pack_entry(self, rev, entry):
723 def _pack_entry(self, rev, entry):
724 assert entry[3] == rev, entry[3]
724 assert entry[3] == rev, entry[3]
725 assert entry[4] == rev, entry[4]
725 assert entry[4] == rev, entry[4]
726 data = entry[:3] + entry[5:10]
726 data = entry[:3] + entry[5:10]
727 data_comp = entry[10] & 3
727 data_comp = entry[10] & 3
728 sidedata_comp = (entry[11] & 3) << 2
728 sidedata_comp = (entry[11] & 3) << 2
729 data += (data_comp | sidedata_comp,)
729 data += (data_comp | sidedata_comp,)
730 return self.index_format.pack(*data)
730 return self.index_format.pack(*data)
731
731
732
732
733 def parse_index_devel_nodemap(data, inline):
733 def parse_index_devel_nodemap(data, inline):
734 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
734 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
735 return PersistentNodeMapIndexObject(data), None
735 return PersistentNodeMapIndexObject(data), None
736
736
737
737
738 def parse_dirstate(dmap, copymap, st):
738 def parse_dirstate(dmap, copymap, st):
739 parents = [st[:20], st[20:40]]
739 parents = [st[:20], st[20:40]]
740 # dereference fields so they will be local in loop
740 # dereference fields so they will be local in loop
741 format = b">cllll"
741 format = b">cllll"
742 e_size = struct.calcsize(format)
742 e_size = struct.calcsize(format)
743 pos1 = 40
743 pos1 = 40
744 l = len(st)
744 l = len(st)
745
745
746 # the inner loop
746 # the inner loop
747 while pos1 < l:
747 while pos1 < l:
748 pos2 = pos1 + e_size
748 pos2 = pos1 + e_size
749 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
749 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
750 pos1 = pos2 + e[4]
750 pos1 = pos2 + e[4]
751 f = st[pos2:pos1]
751 f = st[pos2:pos1]
752 if b'\0' in f:
752 if b'\0' in f:
753 f, c = f.split(b'\0')
753 f, c = f.split(b'\0')
754 copymap[f] = c
754 copymap[f] = c
755 dmap[f] = DirstateItem.from_v1_data(*e[:4])
755 dmap[f] = DirstateItem.from_v1_data(*e[:4])
756 return parents
756 return parents
757
757
758
758
759 def pack_dirstate(dmap, copymap, pl, now):
759 def pack_dirstate(dmap, copymap, pl, now):
760 now = int(now)
760 now = int(now)
761 cs = stringio()
761 cs = stringio()
762 write = cs.write
762 write = cs.write
763 write(b"".join(pl))
763 write(b"".join(pl))
764 for f, e in pycompat.iteritems(dmap):
764 for f, e in pycompat.iteritems(dmap):
765 if e.need_delay(now):
765 if e.need_delay(now):
766 # The file was last modified "simultaneously" with the current
766 # The file was last modified "simultaneously" with the current
767 # write to dirstate (i.e. within the same second for file-
767 # write to dirstate (i.e. within the same second for file-
768 # systems with a granularity of 1 sec). This commonly happens
768 # systems with a granularity of 1 sec). This commonly happens
769 # for at least a couple of files on 'update'.
769 # for at least a couple of files on 'update'.
770 # The user could change the file without changing its size
770 # The user could change the file without changing its size
771 # within the same second. Invalidate the file's mtime in
771 # within the same second. Invalidate the file's mtime in
772 # dirstate, forcing future 'status' calls to compare the
772 # dirstate, forcing future 'status' calls to compare the
773 # contents of the file if the size is the same. This prevents
773 # contents of the file if the size is the same. This prevents
774 # mistakenly treating such files as clean.
774 # mistakenly treating such files as clean.
775 e.set_possibly_dirty()
775 e.set_possibly_dirty()
776
776
777 if f in copymap:
777 if f in copymap:
778 f = b"%s\0%s" % (f, copymap[f])
778 f = b"%s\0%s" % (f, copymap[f])
779 e = _pack(
779 e = _pack(
780 b">cllll",
780 b">cllll",
781 e.v1_state(),
781 e.v1_state(),
782 e.v1_mode(),
782 e.v1_mode(),
783 e.v1_size(),
783 e.v1_size(),
784 e.v1_mtime(),
784 e.v1_mtime(),
785 len(f),
785 len(f),
786 )
786 )
787 write(e)
787 write(e)
788 write(f)
788 write(f)
789 return cs.getvalue()
789 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now