##// END OF EJS Templates
revlog: simplify the replace_sidedata_info code...
marmoute -
r48018:78230d03 default
parent child Browse files
Show More
@@ -1,404 +1,404 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from .. import (
17 from .. import (
18 error,
18 error,
19 pycompat,
19 pycompat,
20 util,
20 util,
21 )
21 )
22
22
23 from ..revlogutils import nodemap as nodemaputil
23 from ..revlogutils import nodemap as nodemaputil
24 from ..revlogutils import constants as revlog_constants
24 from ..revlogutils import constants as revlog_constants
25
25
26 stringio = pycompat.bytesio
26 stringio = pycompat.bytesio
27
27
28
28
29 _pack = struct.pack
29 _pack = struct.pack
30 _unpack = struct.unpack
30 _unpack = struct.unpack
31 _compress = zlib.compress
31 _compress = zlib.compress
32 _decompress = zlib.decompress
32 _decompress = zlib.decompress
33
33
34 # Some code below makes tuples directly because it's more convenient. However,
34 # Some code below makes tuples directly because it's more convenient. However,
35 # code outside this module should always use dirstatetuple.
35 # code outside this module should always use dirstatetuple.
36 def dirstatetuple(*x):
36 def dirstatetuple(*x):
37 # x is a tuple
37 # x is a tuple
38 return x
38 return x
39
39
40
40
41 def gettype(q):
41 def gettype(q):
42 return int(q & 0xFFFF)
42 return int(q & 0xFFFF)
43
43
44
44
45 def offset_type(offset, type):
45 def offset_type(offset, type):
46 return int(int(offset) << 16 | type)
46 return int(int(offset) << 16 | type)
47
47
48
48
49 class BaseIndexObject(object):
49 class BaseIndexObject(object):
50 # Format of an index entry according to Python's `struct` language
50 # Format of an index entry according to Python's `struct` language
51 index_format = revlog_constants.INDEX_ENTRY_V1
51 index_format = revlog_constants.INDEX_ENTRY_V1
52 # Size of a C unsigned long long int, platform independent
52 # Size of a C unsigned long long int, platform independent
53 big_int_size = struct.calcsize(b'>Q')
53 big_int_size = struct.calcsize(b'>Q')
54 # Size of a C long int, platform independent
54 # Size of a C long int, platform independent
55 int_size = struct.calcsize(b'>i')
55 int_size = struct.calcsize(b'>i')
56 # An empty index entry, used as a default value to be overridden, or nullrev
56 # An empty index entry, used as a default value to be overridden, or nullrev
57 null_item = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid, 0, 0)
57 null_item = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid, 0, 0)
58
58
59 @util.propertycache
59 @util.propertycache
60 def entry_size(self):
60 def entry_size(self):
61 return self.index_format.size
61 return self.index_format.size
62
62
63 @property
63 @property
64 def nodemap(self):
64 def nodemap(self):
65 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
65 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
66 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
66 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
67 return self._nodemap
67 return self._nodemap
68
68
69 @util.propertycache
69 @util.propertycache
70 def _nodemap(self):
70 def _nodemap(self):
71 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
71 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
72 for r in range(0, len(self)):
72 for r in range(0, len(self)):
73 n = self[r][7]
73 n = self[r][7]
74 nodemap[n] = r
74 nodemap[n] = r
75 return nodemap
75 return nodemap
76
76
77 def has_node(self, node):
77 def has_node(self, node):
78 """return True if the node exist in the index"""
78 """return True if the node exist in the index"""
79 return node in self._nodemap
79 return node in self._nodemap
80
80
81 def rev(self, node):
81 def rev(self, node):
82 """return a revision for a node
82 """return a revision for a node
83
83
84 If the node is unknown, raise a RevlogError"""
84 If the node is unknown, raise a RevlogError"""
85 return self._nodemap[node]
85 return self._nodemap[node]
86
86
87 def get_rev(self, node):
87 def get_rev(self, node):
88 """return a revision for a node
88 """return a revision for a node
89
89
90 If the node is unknown, return None"""
90 If the node is unknown, return None"""
91 return self._nodemap.get(node)
91 return self._nodemap.get(node)
92
92
93 def _stripnodes(self, start):
93 def _stripnodes(self, start):
94 if '_nodemap' in vars(self):
94 if '_nodemap' in vars(self):
95 for r in range(start, len(self)):
95 for r in range(start, len(self)):
96 n = self[r][7]
96 n = self[r][7]
97 del self._nodemap[n]
97 del self._nodemap[n]
98
98
99 def clearcaches(self):
99 def clearcaches(self):
100 self.__dict__.pop('_nodemap', None)
100 self.__dict__.pop('_nodemap', None)
101
101
102 def __len__(self):
102 def __len__(self):
103 return self._lgt + len(self._extra)
103 return self._lgt + len(self._extra)
104
104
105 def append(self, tup):
105 def append(self, tup):
106 if '_nodemap' in vars(self):
106 if '_nodemap' in vars(self):
107 self._nodemap[tup[7]] = len(self)
107 self._nodemap[tup[7]] = len(self)
108 data = self._pack_entry(tup)
108 data = self._pack_entry(tup)
109 self._extra.append(data)
109 self._extra.append(data)
110
110
111 def _pack_entry(self, entry):
111 def _pack_entry(self, entry):
112 assert entry[8] == 0
112 assert entry[8] == 0
113 assert entry[9] == 0
113 assert entry[9] == 0
114 return self.index_format.pack(*entry[:8])
114 return self.index_format.pack(*entry[:8])
115
115
116 def _check_index(self, i):
116 def _check_index(self, i):
117 if not isinstance(i, int):
117 if not isinstance(i, int):
118 raise TypeError(b"expecting int indexes")
118 raise TypeError(b"expecting int indexes")
119 if i < 0 or i >= len(self):
119 if i < 0 or i >= len(self):
120 raise IndexError
120 raise IndexError
121
121
122 def __getitem__(self, i):
122 def __getitem__(self, i):
123 if i == -1:
123 if i == -1:
124 return self.null_item
124 return self.null_item
125 self._check_index(i)
125 self._check_index(i)
126 if i >= self._lgt:
126 if i >= self._lgt:
127 data = self._extra[i - self._lgt]
127 data = self._extra[i - self._lgt]
128 else:
128 else:
129 index = self._calculate_index(i)
129 index = self._calculate_index(i)
130 data = self._data[index : index + self.entry_size]
130 data = self._data[index : index + self.entry_size]
131 r = self._unpack_entry(data)
131 r = self._unpack_entry(data)
132 if self._lgt and i == 0:
132 if self._lgt and i == 0:
133 r = (offset_type(0, gettype(r[0])),) + r[1:]
133 r = (offset_type(0, gettype(r[0])),) + r[1:]
134 return r
134 return r
135
135
136 def _unpack_entry(self, data):
136 def _unpack_entry(self, data):
137 r = self.index_format.unpack(data)
137 r = self.index_format.unpack(data)
138 r = r + (0, 0)
138 r = r + (0, 0)
139 return r
139 return r
140
140
141 def pack_header(self, header):
141 def pack_header(self, header):
142 """pack header information as binary"""
142 """pack header information as binary"""
143 v_fmt = revlog_constants.INDEX_HEADER
143 v_fmt = revlog_constants.INDEX_HEADER
144 return v_fmt.pack(header)
144 return v_fmt.pack(header)
145
145
146 def entry_binary(self, rev):
146 def entry_binary(self, rev):
147 """return the raw binary string representing a revision"""
147 """return the raw binary string representing a revision"""
148 entry = self[rev]
148 entry = self[rev]
149 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
149 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
150 if rev == 0:
150 if rev == 0:
151 p = p[revlog_constants.INDEX_HEADER.size :]
151 p = p[revlog_constants.INDEX_HEADER.size :]
152 return p
152 return p
153
153
154
154
155 class IndexObject(BaseIndexObject):
155 class IndexObject(BaseIndexObject):
156 def __init__(self, data):
156 def __init__(self, data):
157 assert len(data) % self.entry_size == 0, (
157 assert len(data) % self.entry_size == 0, (
158 len(data),
158 len(data),
159 self.entry_size,
159 self.entry_size,
160 len(data) % self.entry_size,
160 len(data) % self.entry_size,
161 )
161 )
162 self._data = data
162 self._data = data
163 self._lgt = len(data) // self.entry_size
163 self._lgt = len(data) // self.entry_size
164 self._extra = []
164 self._extra = []
165
165
166 def _calculate_index(self, i):
166 def _calculate_index(self, i):
167 return i * self.entry_size
167 return i * self.entry_size
168
168
169 def __delitem__(self, i):
169 def __delitem__(self, i):
170 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
170 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
171 raise ValueError(b"deleting slices only supports a:-1 with step 1")
171 raise ValueError(b"deleting slices only supports a:-1 with step 1")
172 i = i.start
172 i = i.start
173 self._check_index(i)
173 self._check_index(i)
174 self._stripnodes(i)
174 self._stripnodes(i)
175 if i < self._lgt:
175 if i < self._lgt:
176 self._data = self._data[: i * self.entry_size]
176 self._data = self._data[: i * self.entry_size]
177 self._lgt = i
177 self._lgt = i
178 self._extra = []
178 self._extra = []
179 else:
179 else:
180 self._extra = self._extra[: i - self._lgt]
180 self._extra = self._extra[: i - self._lgt]
181
181
182
182
183 class PersistentNodeMapIndexObject(IndexObject):
183 class PersistentNodeMapIndexObject(IndexObject):
184 """a Debug oriented class to test persistent nodemap
184 """a Debug oriented class to test persistent nodemap
185
185
186 We need a simple python object to test API and higher level behavior. See
186 We need a simple python object to test API and higher level behavior. See
187 the Rust implementation for more serious usage. This should be used only
187 the Rust implementation for more serious usage. This should be used only
188 through the dedicated `devel.persistent-nodemap` config.
188 through the dedicated `devel.persistent-nodemap` config.
189 """
189 """
190
190
191 def nodemap_data_all(self):
191 def nodemap_data_all(self):
192 """Return bytes containing a full serialization of a nodemap
192 """Return bytes containing a full serialization of a nodemap
193
193
194 The nodemap should be valid for the full set of revisions in the
194 The nodemap should be valid for the full set of revisions in the
195 index."""
195 index."""
196 return nodemaputil.persistent_data(self)
196 return nodemaputil.persistent_data(self)
197
197
198 def nodemap_data_incremental(self):
198 def nodemap_data_incremental(self):
199 """Return bytes containing a incremental update to persistent nodemap
199 """Return bytes containing a incremental update to persistent nodemap
200
200
201 This containst the data for an append-only update of the data provided
201 This containst the data for an append-only update of the data provided
202 in the last call to `update_nodemap_data`.
202 in the last call to `update_nodemap_data`.
203 """
203 """
204 if self._nm_root is None:
204 if self._nm_root is None:
205 return None
205 return None
206 docket = self._nm_docket
206 docket = self._nm_docket
207 changed, data = nodemaputil.update_persistent_data(
207 changed, data = nodemaputil.update_persistent_data(
208 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
208 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
209 )
209 )
210
210
211 self._nm_root = self._nm_max_idx = self._nm_docket = None
211 self._nm_root = self._nm_max_idx = self._nm_docket = None
212 return docket, changed, data
212 return docket, changed, data
213
213
214 def update_nodemap_data(self, docket, nm_data):
214 def update_nodemap_data(self, docket, nm_data):
215 """provide full block of persisted binary data for a nodemap
215 """provide full block of persisted binary data for a nodemap
216
216
217 The data are expected to come from disk. See `nodemap_data_all` for a
217 The data are expected to come from disk. See `nodemap_data_all` for a
218 produceur of such data."""
218 produceur of such data."""
219 if nm_data is not None:
219 if nm_data is not None:
220 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
220 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
221 if self._nm_root:
221 if self._nm_root:
222 self._nm_docket = docket
222 self._nm_docket = docket
223 else:
223 else:
224 self._nm_root = self._nm_max_idx = self._nm_docket = None
224 self._nm_root = self._nm_max_idx = self._nm_docket = None
225
225
226
226
227 class InlinedIndexObject(BaseIndexObject):
227 class InlinedIndexObject(BaseIndexObject):
228 def __init__(self, data, inline=0):
228 def __init__(self, data, inline=0):
229 self._data = data
229 self._data = data
230 self._lgt = self._inline_scan(None)
230 self._lgt = self._inline_scan(None)
231 self._inline_scan(self._lgt)
231 self._inline_scan(self._lgt)
232 self._extra = []
232 self._extra = []
233
233
234 def _inline_scan(self, lgt):
234 def _inline_scan(self, lgt):
235 off = 0
235 off = 0
236 if lgt is not None:
236 if lgt is not None:
237 self._offsets = [0] * lgt
237 self._offsets = [0] * lgt
238 count = 0
238 count = 0
239 while off <= len(self._data) - self.entry_size:
239 while off <= len(self._data) - self.entry_size:
240 start = off + self.big_int_size
240 start = off + self.big_int_size
241 (s,) = struct.unpack(
241 (s,) = struct.unpack(
242 b'>i',
242 b'>i',
243 self._data[start : start + self.int_size],
243 self._data[start : start + self.int_size],
244 )
244 )
245 if lgt is not None:
245 if lgt is not None:
246 self._offsets[count] = off
246 self._offsets[count] = off
247 count += 1
247 count += 1
248 off += self.entry_size + s
248 off += self.entry_size + s
249 if off != len(self._data):
249 if off != len(self._data):
250 raise ValueError(b"corrupted data")
250 raise ValueError(b"corrupted data")
251 return count
251 return count
252
252
253 def __delitem__(self, i):
253 def __delitem__(self, i):
254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
256 i = i.start
256 i = i.start
257 self._check_index(i)
257 self._check_index(i)
258 self._stripnodes(i)
258 self._stripnodes(i)
259 if i < self._lgt:
259 if i < self._lgt:
260 self._offsets = self._offsets[:i]
260 self._offsets = self._offsets[:i]
261 self._lgt = i
261 self._lgt = i
262 self._extra = []
262 self._extra = []
263 else:
263 else:
264 self._extra = self._extra[: i - self._lgt]
264 self._extra = self._extra[: i - self._lgt]
265
265
266 def _calculate_index(self, i):
266 def _calculate_index(self, i):
267 return self._offsets[i]
267 return self._offsets[i]
268
268
269
269
270 def parse_index2(data, inline, revlogv2=False):
270 def parse_index2(data, inline, revlogv2=False):
271 if not inline:
271 if not inline:
272 cls = IndexObject2 if revlogv2 else IndexObject
272 cls = IndexObject2 if revlogv2 else IndexObject
273 return cls(data), None
273 return cls(data), None
274 cls = InlinedIndexObject2 if revlogv2 else InlinedIndexObject
274 cls = InlinedIndexObject2 if revlogv2 else InlinedIndexObject
275 return cls(data, inline), (0, data)
275 return cls(data, inline), (0, data)
276
276
277
277
278 class Index2Mixin(object):
278 class Index2Mixin(object):
279 index_format = revlog_constants.INDEX_ENTRY_V2
279 index_format = revlog_constants.INDEX_ENTRY_V2
280
280
281 def replace_sidedata_info(
281 def replace_sidedata_info(
282 self, rev, sidedata_offset, sidedata_length, offset_flags
282 self, rev, sidedata_offset, sidedata_length, offset_flags
283 ):
283 ):
284 """
284 """
285 Replace an existing index entry's sidedata offset and length with new
285 Replace an existing index entry's sidedata offset and length with new
286 ones.
286 ones.
287 This cannot be used outside of the context of sidedata rewriting,
287 This cannot be used outside of the context of sidedata rewriting,
288 inside the transaction that creates the revision `rev`.
288 inside the transaction that creates the revision `rev`.
289 """
289 """
290 if rev < 0:
290 if rev < 0:
291 raise KeyError
291 raise KeyError
292 self._check_index(rev)
292 self._check_index(rev)
293 sidedata_format = b">Qi"
293 if rev < self._lgt:
294 packed_size = struct.calcsize(sidedata_format)
295 if rev >= self._lgt:
296 packed = _pack(sidedata_format, sidedata_offset, sidedata_length)
297 old = self._extra[rev - self._lgt]
298 offset_flags = struct.pack(b">Q", offset_flags)
299 new = offset_flags + old[8:64] + packed + old[64 + packed_size :]
300 self._extra[rev - self._lgt] = new
301 else:
302 msg = b"cannot rewrite entries outside of this transaction"
294 msg = b"cannot rewrite entries outside of this transaction"
303 raise KeyError(msg)
295 raise KeyError(msg)
296 else:
297 entry = list(self[rev])
298 entry[0] = offset_flags
299 entry[8] = sidedata_offset
300 entry[9] = sidedata_length
301 entry = tuple(entry)
302 new = self._pack_entry(entry)
303 self._extra[rev - self._lgt] = new
304
304
305 def _unpack_entry(self, data):
305 def _unpack_entry(self, data):
306 return self.index_format.unpack(data)
306 return self.index_format.unpack(data)
307
307
308 def _pack_entry(self, entry):
308 def _pack_entry(self, entry):
309 return self.index_format.pack(*entry)
309 return self.index_format.pack(*entry)
310
310
311 def entry_binary(self, rev):
311 def entry_binary(self, rev):
312 """return the raw binary string representing a revision"""
312 """return the raw binary string representing a revision"""
313 entry = self[rev]
313 entry = self[rev]
314 p = revlog_constants.INDEX_ENTRY_V2.pack(*entry)
314 p = revlog_constants.INDEX_ENTRY_V2.pack(*entry)
315 return p
315 return p
316
316
317 def pack_header(self, header):
317 def pack_header(self, header):
318 """pack header information as binary"""
318 """pack header information as binary"""
319 msg = 'version header should go in the docket, not the index: %d'
319 msg = 'version header should go in the docket, not the index: %d'
320 msg %= header
320 msg %= header
321 raise error.ProgrammingError(msg)
321 raise error.ProgrammingError(msg)
322
322
323
323
324 class IndexObject2(Index2Mixin, IndexObject):
324 class IndexObject2(Index2Mixin, IndexObject):
325 pass
325 pass
326
326
327
327
328 class InlinedIndexObject2(Index2Mixin, InlinedIndexObject):
328 class InlinedIndexObject2(Index2Mixin, InlinedIndexObject):
329 def _inline_scan(self, lgt):
329 def _inline_scan(self, lgt):
330 sidedata_length_pos = 72
330 sidedata_length_pos = 72
331 off = 0
331 off = 0
332 if lgt is not None:
332 if lgt is not None:
333 self._offsets = [0] * lgt
333 self._offsets = [0] * lgt
334 count = 0
334 count = 0
335 while off <= len(self._data) - self.entry_size:
335 while off <= len(self._data) - self.entry_size:
336 start = off + self.big_int_size
336 start = off + self.big_int_size
337 (data_size,) = struct.unpack(
337 (data_size,) = struct.unpack(
338 b'>i',
338 b'>i',
339 self._data[start : start + self.int_size],
339 self._data[start : start + self.int_size],
340 )
340 )
341 start = off + sidedata_length_pos
341 start = off + sidedata_length_pos
342 (side_data_size,) = struct.unpack(
342 (side_data_size,) = struct.unpack(
343 b'>i', self._data[start : start + self.int_size]
343 b'>i', self._data[start : start + self.int_size]
344 )
344 )
345 if lgt is not None:
345 if lgt is not None:
346 self._offsets[count] = off
346 self._offsets[count] = off
347 count += 1
347 count += 1
348 off += self.entry_size + data_size + side_data_size
348 off += self.entry_size + data_size + side_data_size
349 if off != len(self._data):
349 if off != len(self._data):
350 raise ValueError(b"corrupted data")
350 raise ValueError(b"corrupted data")
351 return count
351 return count
352
352
353
353
354 def parse_index_devel_nodemap(data, inline):
354 def parse_index_devel_nodemap(data, inline):
355 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
355 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
356 return PersistentNodeMapIndexObject(data), None
356 return PersistentNodeMapIndexObject(data), None
357
357
358
358
359 def parse_dirstate(dmap, copymap, st):
359 def parse_dirstate(dmap, copymap, st):
360 parents = [st[:20], st[20:40]]
360 parents = [st[:20], st[20:40]]
361 # dereference fields so they will be local in loop
361 # dereference fields so they will be local in loop
362 format = b">cllll"
362 format = b">cllll"
363 e_size = struct.calcsize(format)
363 e_size = struct.calcsize(format)
364 pos1 = 40
364 pos1 = 40
365 l = len(st)
365 l = len(st)
366
366
367 # the inner loop
367 # the inner loop
368 while pos1 < l:
368 while pos1 < l:
369 pos2 = pos1 + e_size
369 pos2 = pos1 + e_size
370 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
370 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
371 pos1 = pos2 + e[4]
371 pos1 = pos2 + e[4]
372 f = st[pos2:pos1]
372 f = st[pos2:pos1]
373 if b'\0' in f:
373 if b'\0' in f:
374 f, c = f.split(b'\0')
374 f, c = f.split(b'\0')
375 copymap[f] = c
375 copymap[f] = c
376 dmap[f] = e[:4]
376 dmap[f] = e[:4]
377 return parents
377 return parents
378
378
379
379
380 def pack_dirstate(dmap, copymap, pl, now):
380 def pack_dirstate(dmap, copymap, pl, now):
381 now = int(now)
381 now = int(now)
382 cs = stringio()
382 cs = stringio()
383 write = cs.write
383 write = cs.write
384 write(b"".join(pl))
384 write(b"".join(pl))
385 for f, e in pycompat.iteritems(dmap):
385 for f, e in pycompat.iteritems(dmap):
386 if e[0] == b'n' and e[3] == now:
386 if e[0] == b'n' and e[3] == now:
387 # The file was last modified "simultaneously" with the current
387 # The file was last modified "simultaneously" with the current
388 # write to dirstate (i.e. within the same second for file-
388 # write to dirstate (i.e. within the same second for file-
389 # systems with a granularity of 1 sec). This commonly happens
389 # systems with a granularity of 1 sec). This commonly happens
390 # for at least a couple of files on 'update'.
390 # for at least a couple of files on 'update'.
391 # The user could change the file without changing its size
391 # The user could change the file without changing its size
392 # within the same second. Invalidate the file's mtime in
392 # within the same second. Invalidate the file's mtime in
393 # dirstate, forcing future 'status' calls to compare the
393 # dirstate, forcing future 'status' calls to compare the
394 # contents of the file if the size is the same. This prevents
394 # contents of the file if the size is the same. This prevents
395 # mistakenly treating such files as clean.
395 # mistakenly treating such files as clean.
396 e = dirstatetuple(e[0], e[1], e[2], -1)
396 e = dirstatetuple(e[0], e[1], e[2], -1)
397 dmap[f] = e
397 dmap[f] = e
398
398
399 if f in copymap:
399 if f in copymap:
400 f = b"%s\0%s" % (f, copymap[f])
400 f = b"%s\0%s" % (f, copymap[f])
401 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
401 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
402 write(e)
402 write(e)
403 write(f)
403 write(f)
404 return cs.getvalue()
404 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now