##// END OF EJS Templates
dirstate-entry: turn dirstate tuple into a real object (like in C)...
marmoute -
r48296:d4c79557 default
parent child Browse files
Show More
@@ -1,437 +1,456 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from .. import (
17 from .. import (
18 error,
18 error,
19 pycompat,
19 pycompat,
20 revlogutils,
20 revlogutils,
21 util,
21 util,
22 )
22 )
23
23
24 from ..revlogutils import nodemap as nodemaputil
24 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import constants as revlog_constants
25 from ..revlogutils import constants as revlog_constants
26
26
27 stringio = pycompat.bytesio
27 stringio = pycompat.bytesio
28
28
29
29
30 _pack = struct.pack
30 _pack = struct.pack
31 _unpack = struct.unpack
31 _unpack = struct.unpack
32 _compress = zlib.compress
32 _compress = zlib.compress
33 _decompress = zlib.decompress
33 _decompress = zlib.decompress
34
34
35 # Some code below makes tuples directly because it's more convenient. However,
35
36 # code outside this module should always use dirstatetuple.
36 class dirstatetuple(object):
37 def dirstatetuple(*x):
37 """represent a dirstate entry
38 """the four items are:
38
39 It contains:
40
39 - state (one of 'n', 'a', 'r', 'm')
41 - state (one of 'n', 'a', 'r', 'm')
40 - mode,
42 - mode,
41 - size,
43 - size,
42 - mtime,
44 - mtime,
43 """
45 """
44
46
45 # x is a tuple
47 __slot__ = ('_state', '_mode', '_size', '_mtime')
46 return x
48
49 def __init__(self, state, mode, size, mtime):
50 self._state = state
51 self._mode = mode
52 self._size = size
53 self._mtime = mtime
54
55 def __getitem__(self, idx):
56 if idx == 0 or idx == -4:
57 return self._state
58 elif idx == 1 or idx == -3:
59 return self._mode
60 elif idx == 2 or idx == -2:
61 return self._size
62 elif idx == 3 or idx == -1:
63 return self._mtime
64 else:
65 raise IndexError(idx)
47
66
48
67
49 def gettype(q):
68 def gettype(q):
50 return int(q & 0xFFFF)
69 return int(q & 0xFFFF)
51
70
52
71
53 class BaseIndexObject(object):
72 class BaseIndexObject(object):
54 # Can I be passed to an algorithme implemented in Rust ?
73 # Can I be passed to an algorithme implemented in Rust ?
55 rust_ext_compat = 0
74 rust_ext_compat = 0
56 # Format of an index entry according to Python's `struct` language
75 # Format of an index entry according to Python's `struct` language
57 index_format = revlog_constants.INDEX_ENTRY_V1
76 index_format = revlog_constants.INDEX_ENTRY_V1
58 # Size of a C unsigned long long int, platform independent
77 # Size of a C unsigned long long int, platform independent
59 big_int_size = struct.calcsize(b'>Q')
78 big_int_size = struct.calcsize(b'>Q')
60 # Size of a C long int, platform independent
79 # Size of a C long int, platform independent
61 int_size = struct.calcsize(b'>i')
80 int_size = struct.calcsize(b'>i')
62 # An empty index entry, used as a default value to be overridden, or nullrev
81 # An empty index entry, used as a default value to be overridden, or nullrev
63 null_item = (
82 null_item = (
64 0,
83 0,
65 0,
84 0,
66 0,
85 0,
67 -1,
86 -1,
68 -1,
87 -1,
69 -1,
88 -1,
70 -1,
89 -1,
71 sha1nodeconstants.nullid,
90 sha1nodeconstants.nullid,
72 0,
91 0,
73 0,
92 0,
74 revlog_constants.COMP_MODE_INLINE,
93 revlog_constants.COMP_MODE_INLINE,
75 revlog_constants.COMP_MODE_INLINE,
94 revlog_constants.COMP_MODE_INLINE,
76 )
95 )
77
96
78 @util.propertycache
97 @util.propertycache
79 def entry_size(self):
98 def entry_size(self):
80 return self.index_format.size
99 return self.index_format.size
81
100
82 @property
101 @property
83 def nodemap(self):
102 def nodemap(self):
84 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
103 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
85 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
104 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
86 return self._nodemap
105 return self._nodemap
87
106
88 @util.propertycache
107 @util.propertycache
89 def _nodemap(self):
108 def _nodemap(self):
90 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
109 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
91 for r in range(0, len(self)):
110 for r in range(0, len(self)):
92 n = self[r][7]
111 n = self[r][7]
93 nodemap[n] = r
112 nodemap[n] = r
94 return nodemap
113 return nodemap
95
114
96 def has_node(self, node):
115 def has_node(self, node):
97 """return True if the node exist in the index"""
116 """return True if the node exist in the index"""
98 return node in self._nodemap
117 return node in self._nodemap
99
118
100 def rev(self, node):
119 def rev(self, node):
101 """return a revision for a node
120 """return a revision for a node
102
121
103 If the node is unknown, raise a RevlogError"""
122 If the node is unknown, raise a RevlogError"""
104 return self._nodemap[node]
123 return self._nodemap[node]
105
124
106 def get_rev(self, node):
125 def get_rev(self, node):
107 """return a revision for a node
126 """return a revision for a node
108
127
109 If the node is unknown, return None"""
128 If the node is unknown, return None"""
110 return self._nodemap.get(node)
129 return self._nodemap.get(node)
111
130
112 def _stripnodes(self, start):
131 def _stripnodes(self, start):
113 if '_nodemap' in vars(self):
132 if '_nodemap' in vars(self):
114 for r in range(start, len(self)):
133 for r in range(start, len(self)):
115 n = self[r][7]
134 n = self[r][7]
116 del self._nodemap[n]
135 del self._nodemap[n]
117
136
118 def clearcaches(self):
137 def clearcaches(self):
119 self.__dict__.pop('_nodemap', None)
138 self.__dict__.pop('_nodemap', None)
120
139
121 def __len__(self):
140 def __len__(self):
122 return self._lgt + len(self._extra)
141 return self._lgt + len(self._extra)
123
142
124 def append(self, tup):
143 def append(self, tup):
125 if '_nodemap' in vars(self):
144 if '_nodemap' in vars(self):
126 self._nodemap[tup[7]] = len(self)
145 self._nodemap[tup[7]] = len(self)
127 data = self._pack_entry(len(self), tup)
146 data = self._pack_entry(len(self), tup)
128 self._extra.append(data)
147 self._extra.append(data)
129
148
130 def _pack_entry(self, rev, entry):
149 def _pack_entry(self, rev, entry):
131 assert entry[8] == 0
150 assert entry[8] == 0
132 assert entry[9] == 0
151 assert entry[9] == 0
133 return self.index_format.pack(*entry[:8])
152 return self.index_format.pack(*entry[:8])
134
153
135 def _check_index(self, i):
154 def _check_index(self, i):
136 if not isinstance(i, int):
155 if not isinstance(i, int):
137 raise TypeError(b"expecting int indexes")
156 raise TypeError(b"expecting int indexes")
138 if i < 0 or i >= len(self):
157 if i < 0 or i >= len(self):
139 raise IndexError
158 raise IndexError
140
159
141 def __getitem__(self, i):
160 def __getitem__(self, i):
142 if i == -1:
161 if i == -1:
143 return self.null_item
162 return self.null_item
144 self._check_index(i)
163 self._check_index(i)
145 if i >= self._lgt:
164 if i >= self._lgt:
146 data = self._extra[i - self._lgt]
165 data = self._extra[i - self._lgt]
147 else:
166 else:
148 index = self._calculate_index(i)
167 index = self._calculate_index(i)
149 data = self._data[index : index + self.entry_size]
168 data = self._data[index : index + self.entry_size]
150 r = self._unpack_entry(i, data)
169 r = self._unpack_entry(i, data)
151 if self._lgt and i == 0:
170 if self._lgt and i == 0:
152 offset = revlogutils.offset_type(0, gettype(r[0]))
171 offset = revlogutils.offset_type(0, gettype(r[0]))
153 r = (offset,) + r[1:]
172 r = (offset,) + r[1:]
154 return r
173 return r
155
174
156 def _unpack_entry(self, rev, data):
175 def _unpack_entry(self, rev, data):
157 r = self.index_format.unpack(data)
176 r = self.index_format.unpack(data)
158 r = r + (
177 r = r + (
159 0,
178 0,
160 0,
179 0,
161 revlog_constants.COMP_MODE_INLINE,
180 revlog_constants.COMP_MODE_INLINE,
162 revlog_constants.COMP_MODE_INLINE,
181 revlog_constants.COMP_MODE_INLINE,
163 )
182 )
164 return r
183 return r
165
184
166 def pack_header(self, header):
185 def pack_header(self, header):
167 """pack header information as binary"""
186 """pack header information as binary"""
168 v_fmt = revlog_constants.INDEX_HEADER
187 v_fmt = revlog_constants.INDEX_HEADER
169 return v_fmt.pack(header)
188 return v_fmt.pack(header)
170
189
171 def entry_binary(self, rev):
190 def entry_binary(self, rev):
172 """return the raw binary string representing a revision"""
191 """return the raw binary string representing a revision"""
173 entry = self[rev]
192 entry = self[rev]
174 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
193 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
175 if rev == 0:
194 if rev == 0:
176 p = p[revlog_constants.INDEX_HEADER.size :]
195 p = p[revlog_constants.INDEX_HEADER.size :]
177 return p
196 return p
178
197
179
198
180 class IndexObject(BaseIndexObject):
199 class IndexObject(BaseIndexObject):
181 def __init__(self, data):
200 def __init__(self, data):
182 assert len(data) % self.entry_size == 0, (
201 assert len(data) % self.entry_size == 0, (
183 len(data),
202 len(data),
184 self.entry_size,
203 self.entry_size,
185 len(data) % self.entry_size,
204 len(data) % self.entry_size,
186 )
205 )
187 self._data = data
206 self._data = data
188 self._lgt = len(data) // self.entry_size
207 self._lgt = len(data) // self.entry_size
189 self._extra = []
208 self._extra = []
190
209
191 def _calculate_index(self, i):
210 def _calculate_index(self, i):
192 return i * self.entry_size
211 return i * self.entry_size
193
212
194 def __delitem__(self, i):
213 def __delitem__(self, i):
195 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
214 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
196 raise ValueError(b"deleting slices only supports a:-1 with step 1")
215 raise ValueError(b"deleting slices only supports a:-1 with step 1")
197 i = i.start
216 i = i.start
198 self._check_index(i)
217 self._check_index(i)
199 self._stripnodes(i)
218 self._stripnodes(i)
200 if i < self._lgt:
219 if i < self._lgt:
201 self._data = self._data[: i * self.entry_size]
220 self._data = self._data[: i * self.entry_size]
202 self._lgt = i
221 self._lgt = i
203 self._extra = []
222 self._extra = []
204 else:
223 else:
205 self._extra = self._extra[: i - self._lgt]
224 self._extra = self._extra[: i - self._lgt]
206
225
207
226
208 class PersistentNodeMapIndexObject(IndexObject):
227 class PersistentNodeMapIndexObject(IndexObject):
209 """a Debug oriented class to test persistent nodemap
228 """a Debug oriented class to test persistent nodemap
210
229
211 We need a simple python object to test API and higher level behavior. See
230 We need a simple python object to test API and higher level behavior. See
212 the Rust implementation for more serious usage. This should be used only
231 the Rust implementation for more serious usage. This should be used only
213 through the dedicated `devel.persistent-nodemap` config.
232 through the dedicated `devel.persistent-nodemap` config.
214 """
233 """
215
234
216 def nodemap_data_all(self):
235 def nodemap_data_all(self):
217 """Return bytes containing a full serialization of a nodemap
236 """Return bytes containing a full serialization of a nodemap
218
237
219 The nodemap should be valid for the full set of revisions in the
238 The nodemap should be valid for the full set of revisions in the
220 index."""
239 index."""
221 return nodemaputil.persistent_data(self)
240 return nodemaputil.persistent_data(self)
222
241
223 def nodemap_data_incremental(self):
242 def nodemap_data_incremental(self):
224 """Return bytes containing a incremental update to persistent nodemap
243 """Return bytes containing a incremental update to persistent nodemap
225
244
226 This containst the data for an append-only update of the data provided
245 This containst the data for an append-only update of the data provided
227 in the last call to `update_nodemap_data`.
246 in the last call to `update_nodemap_data`.
228 """
247 """
229 if self._nm_root is None:
248 if self._nm_root is None:
230 return None
249 return None
231 docket = self._nm_docket
250 docket = self._nm_docket
232 changed, data = nodemaputil.update_persistent_data(
251 changed, data = nodemaputil.update_persistent_data(
233 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
252 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
234 )
253 )
235
254
236 self._nm_root = self._nm_max_idx = self._nm_docket = None
255 self._nm_root = self._nm_max_idx = self._nm_docket = None
237 return docket, changed, data
256 return docket, changed, data
238
257
239 def update_nodemap_data(self, docket, nm_data):
258 def update_nodemap_data(self, docket, nm_data):
240 """provide full block of persisted binary data for a nodemap
259 """provide full block of persisted binary data for a nodemap
241
260
242 The data are expected to come from disk. See `nodemap_data_all` for a
261 The data are expected to come from disk. See `nodemap_data_all` for a
243 produceur of such data."""
262 produceur of such data."""
244 if nm_data is not None:
263 if nm_data is not None:
245 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
264 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
246 if self._nm_root:
265 if self._nm_root:
247 self._nm_docket = docket
266 self._nm_docket = docket
248 else:
267 else:
249 self._nm_root = self._nm_max_idx = self._nm_docket = None
268 self._nm_root = self._nm_max_idx = self._nm_docket = None
250
269
251
270
252 class InlinedIndexObject(BaseIndexObject):
271 class InlinedIndexObject(BaseIndexObject):
253 def __init__(self, data, inline=0):
272 def __init__(self, data, inline=0):
254 self._data = data
273 self._data = data
255 self._lgt = self._inline_scan(None)
274 self._lgt = self._inline_scan(None)
256 self._inline_scan(self._lgt)
275 self._inline_scan(self._lgt)
257 self._extra = []
276 self._extra = []
258
277
259 def _inline_scan(self, lgt):
278 def _inline_scan(self, lgt):
260 off = 0
279 off = 0
261 if lgt is not None:
280 if lgt is not None:
262 self._offsets = [0] * lgt
281 self._offsets = [0] * lgt
263 count = 0
282 count = 0
264 while off <= len(self._data) - self.entry_size:
283 while off <= len(self._data) - self.entry_size:
265 start = off + self.big_int_size
284 start = off + self.big_int_size
266 (s,) = struct.unpack(
285 (s,) = struct.unpack(
267 b'>i',
286 b'>i',
268 self._data[start : start + self.int_size],
287 self._data[start : start + self.int_size],
269 )
288 )
270 if lgt is not None:
289 if lgt is not None:
271 self._offsets[count] = off
290 self._offsets[count] = off
272 count += 1
291 count += 1
273 off += self.entry_size + s
292 off += self.entry_size + s
274 if off != len(self._data):
293 if off != len(self._data):
275 raise ValueError(b"corrupted data")
294 raise ValueError(b"corrupted data")
276 return count
295 return count
277
296
278 def __delitem__(self, i):
297 def __delitem__(self, i):
279 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
298 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
280 raise ValueError(b"deleting slices only supports a:-1 with step 1")
299 raise ValueError(b"deleting slices only supports a:-1 with step 1")
281 i = i.start
300 i = i.start
282 self._check_index(i)
301 self._check_index(i)
283 self._stripnodes(i)
302 self._stripnodes(i)
284 if i < self._lgt:
303 if i < self._lgt:
285 self._offsets = self._offsets[:i]
304 self._offsets = self._offsets[:i]
286 self._lgt = i
305 self._lgt = i
287 self._extra = []
306 self._extra = []
288 else:
307 else:
289 self._extra = self._extra[: i - self._lgt]
308 self._extra = self._extra[: i - self._lgt]
290
309
291 def _calculate_index(self, i):
310 def _calculate_index(self, i):
292 return self._offsets[i]
311 return self._offsets[i]
293
312
294
313
295 def parse_index2(data, inline, revlogv2=False):
314 def parse_index2(data, inline, revlogv2=False):
296 if not inline:
315 if not inline:
297 cls = IndexObject2 if revlogv2 else IndexObject
316 cls = IndexObject2 if revlogv2 else IndexObject
298 return cls(data), None
317 return cls(data), None
299 cls = InlinedIndexObject
318 cls = InlinedIndexObject
300 return cls(data, inline), (0, data)
319 return cls(data, inline), (0, data)
301
320
302
321
303 def parse_index_cl_v2(data):
322 def parse_index_cl_v2(data):
304 return IndexChangelogV2(data), None
323 return IndexChangelogV2(data), None
305
324
306
325
307 class IndexObject2(IndexObject):
326 class IndexObject2(IndexObject):
308 index_format = revlog_constants.INDEX_ENTRY_V2
327 index_format = revlog_constants.INDEX_ENTRY_V2
309
328
310 def replace_sidedata_info(
329 def replace_sidedata_info(
311 self,
330 self,
312 rev,
331 rev,
313 sidedata_offset,
332 sidedata_offset,
314 sidedata_length,
333 sidedata_length,
315 offset_flags,
334 offset_flags,
316 compression_mode,
335 compression_mode,
317 ):
336 ):
318 """
337 """
319 Replace an existing index entry's sidedata offset and length with new
338 Replace an existing index entry's sidedata offset and length with new
320 ones.
339 ones.
321 This cannot be used outside of the context of sidedata rewriting,
340 This cannot be used outside of the context of sidedata rewriting,
322 inside the transaction that creates the revision `rev`.
341 inside the transaction that creates the revision `rev`.
323 """
342 """
324 if rev < 0:
343 if rev < 0:
325 raise KeyError
344 raise KeyError
326 self._check_index(rev)
345 self._check_index(rev)
327 if rev < self._lgt:
346 if rev < self._lgt:
328 msg = b"cannot rewrite entries outside of this transaction"
347 msg = b"cannot rewrite entries outside of this transaction"
329 raise KeyError(msg)
348 raise KeyError(msg)
330 else:
349 else:
331 entry = list(self[rev])
350 entry = list(self[rev])
332 entry[0] = offset_flags
351 entry[0] = offset_flags
333 entry[8] = sidedata_offset
352 entry[8] = sidedata_offset
334 entry[9] = sidedata_length
353 entry[9] = sidedata_length
335 entry[11] = compression_mode
354 entry[11] = compression_mode
336 entry = tuple(entry)
355 entry = tuple(entry)
337 new = self._pack_entry(rev, entry)
356 new = self._pack_entry(rev, entry)
338 self._extra[rev - self._lgt] = new
357 self._extra[rev - self._lgt] = new
339
358
340 def _unpack_entry(self, rev, data):
359 def _unpack_entry(self, rev, data):
341 data = self.index_format.unpack(data)
360 data = self.index_format.unpack(data)
342 entry = data[:10]
361 entry = data[:10]
343 data_comp = data[10] & 3
362 data_comp = data[10] & 3
344 sidedata_comp = (data[10] & (3 << 2)) >> 2
363 sidedata_comp = (data[10] & (3 << 2)) >> 2
345 return entry + (data_comp, sidedata_comp)
364 return entry + (data_comp, sidedata_comp)
346
365
347 def _pack_entry(self, rev, entry):
366 def _pack_entry(self, rev, entry):
348 data = entry[:10]
367 data = entry[:10]
349 data_comp = entry[10] & 3
368 data_comp = entry[10] & 3
350 sidedata_comp = (entry[11] & 3) << 2
369 sidedata_comp = (entry[11] & 3) << 2
351 data += (data_comp | sidedata_comp,)
370 data += (data_comp | sidedata_comp,)
352
371
353 return self.index_format.pack(*data)
372 return self.index_format.pack(*data)
354
373
355 def entry_binary(self, rev):
374 def entry_binary(self, rev):
356 """return the raw binary string representing a revision"""
375 """return the raw binary string representing a revision"""
357 entry = self[rev]
376 entry = self[rev]
358 return self._pack_entry(rev, entry)
377 return self._pack_entry(rev, entry)
359
378
360 def pack_header(self, header):
379 def pack_header(self, header):
361 """pack header information as binary"""
380 """pack header information as binary"""
362 msg = 'version header should go in the docket, not the index: %d'
381 msg = 'version header should go in the docket, not the index: %d'
363 msg %= header
382 msg %= header
364 raise error.ProgrammingError(msg)
383 raise error.ProgrammingError(msg)
365
384
366
385
367 class IndexChangelogV2(IndexObject2):
386 class IndexChangelogV2(IndexObject2):
368 index_format = revlog_constants.INDEX_ENTRY_CL_V2
387 index_format = revlog_constants.INDEX_ENTRY_CL_V2
369
388
370 def _unpack_entry(self, rev, data, r=True):
389 def _unpack_entry(self, rev, data, r=True):
371 items = self.index_format.unpack(data)
390 items = self.index_format.unpack(data)
372 entry = items[:3] + (rev, rev) + items[3:8]
391 entry = items[:3] + (rev, rev) + items[3:8]
373 data_comp = items[8] & 3
392 data_comp = items[8] & 3
374 sidedata_comp = (items[8] >> 2) & 3
393 sidedata_comp = (items[8] >> 2) & 3
375 return entry + (data_comp, sidedata_comp)
394 return entry + (data_comp, sidedata_comp)
376
395
377 def _pack_entry(self, rev, entry):
396 def _pack_entry(self, rev, entry):
378 assert entry[3] == rev, entry[3]
397 assert entry[3] == rev, entry[3]
379 assert entry[4] == rev, entry[4]
398 assert entry[4] == rev, entry[4]
380 data = entry[:3] + entry[5:10]
399 data = entry[:3] + entry[5:10]
381 data_comp = entry[10] & 3
400 data_comp = entry[10] & 3
382 sidedata_comp = (entry[11] & 3) << 2
401 sidedata_comp = (entry[11] & 3) << 2
383 data += (data_comp | sidedata_comp,)
402 data += (data_comp | sidedata_comp,)
384 return self.index_format.pack(*data)
403 return self.index_format.pack(*data)
385
404
386
405
387 def parse_index_devel_nodemap(data, inline):
406 def parse_index_devel_nodemap(data, inline):
388 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
407 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
389 return PersistentNodeMapIndexObject(data), None
408 return PersistentNodeMapIndexObject(data), None
390
409
391
410
392 def parse_dirstate(dmap, copymap, st):
411 def parse_dirstate(dmap, copymap, st):
393 parents = [st[:20], st[20:40]]
412 parents = [st[:20], st[20:40]]
394 # dereference fields so they will be local in loop
413 # dereference fields so they will be local in loop
395 format = b">cllll"
414 format = b">cllll"
396 e_size = struct.calcsize(format)
415 e_size = struct.calcsize(format)
397 pos1 = 40
416 pos1 = 40
398 l = len(st)
417 l = len(st)
399
418
400 # the inner loop
419 # the inner loop
401 while pos1 < l:
420 while pos1 < l:
402 pos2 = pos1 + e_size
421 pos2 = pos1 + e_size
403 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
422 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
404 pos1 = pos2 + e[4]
423 pos1 = pos2 + e[4]
405 f = st[pos2:pos1]
424 f = st[pos2:pos1]
406 if b'\0' in f:
425 if b'\0' in f:
407 f, c = f.split(b'\0')
426 f, c = f.split(b'\0')
408 copymap[f] = c
427 copymap[f] = c
409 dmap[f] = e[:4]
428 dmap[f] = e[:4]
410 return parents
429 return parents
411
430
412
431
413 def pack_dirstate(dmap, copymap, pl, now):
432 def pack_dirstate(dmap, copymap, pl, now):
414 now = int(now)
433 now = int(now)
415 cs = stringio()
434 cs = stringio()
416 write = cs.write
435 write = cs.write
417 write(b"".join(pl))
436 write(b"".join(pl))
418 for f, e in pycompat.iteritems(dmap):
437 for f, e in pycompat.iteritems(dmap):
419 if e[0] == b'n' and e[3] == now:
438 if e[0] == b'n' and e[3] == now:
420 # The file was last modified "simultaneously" with the current
439 # The file was last modified "simultaneously" with the current
421 # write to dirstate (i.e. within the same second for file-
440 # write to dirstate (i.e. within the same second for file-
422 # systems with a granularity of 1 sec). This commonly happens
441 # systems with a granularity of 1 sec). This commonly happens
423 # for at least a couple of files on 'update'.
442 # for at least a couple of files on 'update'.
424 # The user could change the file without changing its size
443 # The user could change the file without changing its size
425 # within the same second. Invalidate the file's mtime in
444 # within the same second. Invalidate the file's mtime in
426 # dirstate, forcing future 'status' calls to compare the
445 # dirstate, forcing future 'status' calls to compare the
427 # contents of the file if the size is the same. This prevents
446 # contents of the file if the size is the same. This prevents
428 # mistakenly treating such files as clean.
447 # mistakenly treating such files as clean.
429 e = dirstatetuple(e[0], e[1], e[2], -1)
448 e = dirstatetuple(e[0], e[1], e[2], -1)
430 dmap[f] = e
449 dmap[f] = e
431
450
432 if f in copymap:
451 if f in copymap:
433 f = b"%s\0%s" % (f, copymap[f])
452 f = b"%s\0%s" % (f, copymap[f])
434 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
453 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
435 write(e)
454 write(e)
436 write(f)
455 write(f)
437 return cs.getvalue()
456 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now