##// END OF EJS Templates
dirstate: remove a update_file's special case for tracked file with p2 data...
marmoute -
r48920:d4e715d2 default
parent child Browse files
Show More
@@ -1,926 +1,917 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 )
23 )
24
24
25 parsers = policy.importmod('parsers')
25 parsers = policy.importmod('parsers')
26 rustmod = policy.importrust('dirstate')
26 rustmod = policy.importrust('dirstate')
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29
29
30 if rustmod is None:
30 if rustmod is None:
31 DirstateItem = parsers.DirstateItem
31 DirstateItem = parsers.DirstateItem
32 else:
32 else:
33 DirstateItem = rustmod.DirstateItem
33 DirstateItem = rustmod.DirstateItem
34
34
35 rangemask = 0x7FFFFFFF
35 rangemask = 0x7FFFFFFF
36
36
37
37
38 class dirstatemap(object):
38 class dirstatemap(object):
39 """Map encapsulating the dirstate's contents.
39 """Map encapsulating the dirstate's contents.
40
40
41 The dirstate contains the following state:
41 The dirstate contains the following state:
42
42
43 - `identity` is the identity of the dirstate file, which can be used to
43 - `identity` is the identity of the dirstate file, which can be used to
44 detect when changes have occurred to the dirstate file.
44 detect when changes have occurred to the dirstate file.
45
45
46 - `parents` is a pair containing the parents of the working copy. The
46 - `parents` is a pair containing the parents of the working copy. The
47 parents are updated by calling `setparents`.
47 parents are updated by calling `setparents`.
48
48
49 - the state map maps filenames to tuples of (state, mode, size, mtime),
49 - the state map maps filenames to tuples of (state, mode, size, mtime),
50 where state is a single character representing 'normal', 'added',
50 where state is a single character representing 'normal', 'added',
51 'removed', or 'merged'. It is read by treating the dirstate as a
51 'removed', or 'merged'. It is read by treating the dirstate as a
52 dict. File state is updated by calling various methods (see each
52 dict. File state is updated by calling various methods (see each
53 documentation for details):
53 documentation for details):
54
54
55 - `reset_state`,
55 - `reset_state`,
56 - `set_tracked`
56 - `set_tracked`
57 - `set_untracked`
57 - `set_untracked`
58 - `set_clean`
58 - `set_clean`
59 - `set_possibly_dirty`
59 - `set_possibly_dirty`
60
60
61 - `copymap` maps destination filenames to their source filename.
61 - `copymap` maps destination filenames to their source filename.
62
62
63 The dirstate also provides the following views onto the state:
63 The dirstate also provides the following views onto the state:
64
64
65 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
65 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
66 form that they appear as in the dirstate.
66 form that they appear as in the dirstate.
67
67
68 - `dirfoldmap` is a dict mapping normalized directory names to the
68 - `dirfoldmap` is a dict mapping normalized directory names to the
69 denormalized form that they appear as in the dirstate.
69 denormalized form that they appear as in the dirstate.
70 """
70 """
71
71
72 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
72 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
73 self._ui = ui
73 self._ui = ui
74 self._opener = opener
74 self._opener = opener
75 self._root = root
75 self._root = root
76 self._filename = b'dirstate'
76 self._filename = b'dirstate'
77 self._nodelen = 20
77 self._nodelen = 20
78 self._nodeconstants = nodeconstants
78 self._nodeconstants = nodeconstants
79 assert (
79 assert (
80 not use_dirstate_v2
80 not use_dirstate_v2
81 ), "should have detected unsupported requirement"
81 ), "should have detected unsupported requirement"
82
82
83 self._parents = None
83 self._parents = None
84 self._dirtyparents = False
84 self._dirtyparents = False
85
85
86 # for consistent view between _pl() and _read() invocations
86 # for consistent view between _pl() and _read() invocations
87 self._pendingmode = None
87 self._pendingmode = None
88
88
89 @propertycache
89 @propertycache
90 def _map(self):
90 def _map(self):
91 self._map = {}
91 self._map = {}
92 self.read()
92 self.read()
93 return self._map
93 return self._map
94
94
95 @propertycache
95 @propertycache
96 def copymap(self):
96 def copymap(self):
97 self.copymap = {}
97 self.copymap = {}
98 self._map
98 self._map
99 return self.copymap
99 return self.copymap
100
100
101 def clear(self):
101 def clear(self):
102 self._map.clear()
102 self._map.clear()
103 self.copymap.clear()
103 self.copymap.clear()
104 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
104 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
105 util.clearcachedproperty(self, b"_dirs")
105 util.clearcachedproperty(self, b"_dirs")
106 util.clearcachedproperty(self, b"_alldirs")
106 util.clearcachedproperty(self, b"_alldirs")
107 util.clearcachedproperty(self, b"filefoldmap")
107 util.clearcachedproperty(self, b"filefoldmap")
108 util.clearcachedproperty(self, b"dirfoldmap")
108 util.clearcachedproperty(self, b"dirfoldmap")
109
109
110 def items(self):
110 def items(self):
111 return pycompat.iteritems(self._map)
111 return pycompat.iteritems(self._map)
112
112
113 # forward for python2,3 compat
113 # forward for python2,3 compat
114 iteritems = items
114 iteritems = items
115
115
116 def debug_iter(self, all):
116 def debug_iter(self, all):
117 """
117 """
118 Return an iterator of (filename, state, mode, size, mtime) tuples
118 Return an iterator of (filename, state, mode, size, mtime) tuples
119
119
120 `all` is unused when Rust is not enabled
120 `all` is unused when Rust is not enabled
121 """
121 """
122 for (filename, item) in self.items():
122 for (filename, item) in self.items():
123 yield (filename, item.state, item.mode, item.size, item.mtime)
123 yield (filename, item.state, item.mode, item.size, item.mtime)
124
124
125 def __len__(self):
125 def __len__(self):
126 return len(self._map)
126 return len(self._map)
127
127
128 def __iter__(self):
128 def __iter__(self):
129 return iter(self._map)
129 return iter(self._map)
130
130
131 def get(self, key, default=None):
131 def get(self, key, default=None):
132 return self._map.get(key, default)
132 return self._map.get(key, default)
133
133
134 def __contains__(self, key):
134 def __contains__(self, key):
135 return key in self._map
135 return key in self._map
136
136
137 def __getitem__(self, key):
137 def __getitem__(self, key):
138 return self._map[key]
138 return self._map[key]
139
139
140 def keys(self):
140 def keys(self):
141 return self._map.keys()
141 return self._map.keys()
142
142
143 def preload(self):
143 def preload(self):
144 """Loads the underlying data, if it's not already loaded"""
144 """Loads the underlying data, if it's not already loaded"""
145 self._map
145 self._map
146
146
147 def _dirs_incr(self, filename, old_entry=None):
147 def _dirs_incr(self, filename, old_entry=None):
148 """incremente the dirstate counter if applicable"""
148 """incremente the dirstate counter if applicable"""
149 if (
149 if (
150 old_entry is None or old_entry.removed
150 old_entry is None or old_entry.removed
151 ) and "_dirs" in self.__dict__:
151 ) and "_dirs" in self.__dict__:
152 self._dirs.addpath(filename)
152 self._dirs.addpath(filename)
153 if old_entry is None and "_alldirs" in self.__dict__:
153 if old_entry is None and "_alldirs" in self.__dict__:
154 self._alldirs.addpath(filename)
154 self._alldirs.addpath(filename)
155
155
156 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
156 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
157 """decremente the dirstate counter if applicable"""
157 """decremente the dirstate counter if applicable"""
158 if old_entry is not None:
158 if old_entry is not None:
159 if "_dirs" in self.__dict__ and not old_entry.removed:
159 if "_dirs" in self.__dict__ and not old_entry.removed:
160 self._dirs.delpath(filename)
160 self._dirs.delpath(filename)
161 if "_alldirs" in self.__dict__ and not remove_variant:
161 if "_alldirs" in self.__dict__ and not remove_variant:
162 self._alldirs.delpath(filename)
162 self._alldirs.delpath(filename)
163 elif remove_variant and "_alldirs" in self.__dict__:
163 elif remove_variant and "_alldirs" in self.__dict__:
164 self._alldirs.addpath(filename)
164 self._alldirs.addpath(filename)
165 if "filefoldmap" in self.__dict__:
165 if "filefoldmap" in self.__dict__:
166 normed = util.normcase(filename)
166 normed = util.normcase(filename)
167 self.filefoldmap.pop(normed, None)
167 self.filefoldmap.pop(normed, None)
168
168
169 def set_possibly_dirty(self, filename):
169 def set_possibly_dirty(self, filename):
170 """record that the current state of the file on disk is unknown"""
170 """record that the current state of the file on disk is unknown"""
171 self[filename].set_possibly_dirty()
171 self[filename].set_possibly_dirty()
172
172
173 def set_clean(self, filename, mode, size, mtime):
173 def set_clean(self, filename, mode, size, mtime):
174 """mark a file as back to a clean state"""
174 """mark a file as back to a clean state"""
175 entry = self[filename]
175 entry = self[filename]
176 mtime = mtime & rangemask
176 mtime = mtime & rangemask
177 size = size & rangemask
177 size = size & rangemask
178 entry.set_clean(mode, size, mtime)
178 entry.set_clean(mode, size, mtime)
179 self.copymap.pop(filename, None)
179 self.copymap.pop(filename, None)
180
180
181 def reset_state(
181 def reset_state(
182 self,
182 self,
183 filename,
183 filename,
184 wc_tracked=False,
184 wc_tracked=False,
185 p1_tracked=False,
185 p1_tracked=False,
186 p2_tracked=False,
186 p2_tracked=False,
187 merged=False,
187 merged=False,
188 clean_p1=False,
188 clean_p1=False,
189 clean_p2=False,
189 clean_p2=False,
190 possibly_dirty=False,
190 possibly_dirty=False,
191 parentfiledata=None,
191 parentfiledata=None,
192 ):
192 ):
193 """Set a entry to a given state, diregarding all previous state
193 """Set a entry to a given state, diregarding all previous state
194
194
195 This is to be used by the part of the dirstate API dedicated to
195 This is to be used by the part of the dirstate API dedicated to
196 adjusting the dirstate after a update/merge.
196 adjusting the dirstate after a update/merge.
197
197
198 note: calling this might result to no entry existing at all if the
198 note: calling this might result to no entry existing at all if the
199 dirstate map does not see any point at having one for this file
199 dirstate map does not see any point at having one for this file
200 anymore.
200 anymore.
201 """
201 """
202 if merged and (clean_p1 or clean_p2):
202 if merged and (clean_p1 or clean_p2):
203 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
203 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
204 raise error.ProgrammingError(msg)
204 raise error.ProgrammingError(msg)
205 # copy information are now outdated
205 # copy information are now outdated
206 # (maybe new information should be in directly passed to this function)
206 # (maybe new information should be in directly passed to this function)
207 self.copymap.pop(filename, None)
207 self.copymap.pop(filename, None)
208
208
209 if not (p1_tracked or p2_tracked or wc_tracked):
209 if not (p1_tracked or p2_tracked or wc_tracked):
210 old_entry = self._map.pop(filename, None)
210 old_entry = self._map.pop(filename, None)
211 self._dirs_decr(filename, old_entry=old_entry)
211 self._dirs_decr(filename, old_entry=old_entry)
212 self.copymap.pop(filename, None)
212 self.copymap.pop(filename, None)
213 return
213 return
214 elif merged:
214 elif merged:
215 # XXX might be merged and removed ?
215 # XXX might be merged and removed ?
216 entry = self.get(filename)
216 entry = self.get(filename)
217 if entry is None or not entry.tracked:
217 if entry is None or not entry.tracked:
218 # XXX mostly replicate dirstate.other parent. We should get
218 # XXX mostly replicate dirstate.other parent. We should get
219 # the higher layer to pass us more reliable data where `merged`
219 # the higher layer to pass us more reliable data where `merged`
220 # actually mean merged. Dropping this clause will show failure
220 # actually mean merged. Dropping this clause will show failure
221 # in `test-graft.t`
221 # in `test-graft.t`
222 merged = False
222 merged = False
223 clean_p2 = True
223 clean_p2 = True
224 elif not (p1_tracked or p2_tracked) and wc_tracked:
224 elif not (p1_tracked or p2_tracked) and wc_tracked:
225 pass # file is added, nothing special to adjust
225 pass # file is added, nothing special to adjust
226 elif (p1_tracked or p2_tracked) and not wc_tracked:
226 elif (p1_tracked or p2_tracked) and not wc_tracked:
227 pass
227 pass
228 elif clean_p2 and wc_tracked:
228 elif clean_p2 and wc_tracked:
229 if p1_tracked or self.get(filename) is not None:
229 pass
230 # XXX the `self.get` call is catching some case in
231 # `test-merge-remove.t` where the file is tracked in p1, the
232 # p1_tracked argument is False.
233 #
234 # In addition, this seems to be a case where the file is marked
235 # as merged without actually being the result of a merge
236 # action. So thing are not ideal here.
237 merged = True
238 clean_p2 = False
239 elif not p1_tracked and p2_tracked and wc_tracked:
230 elif not p1_tracked and p2_tracked and wc_tracked:
240 clean_p2 = True
231 clean_p2 = True
241 elif possibly_dirty:
232 elif possibly_dirty:
242 pass
233 pass
243 elif wc_tracked:
234 elif wc_tracked:
244 # this is a "normal" file
235 # this is a "normal" file
245 if parentfiledata is None:
236 if parentfiledata is None:
246 msg = b'failed to pass parentfiledata for a normal file: %s'
237 msg = b'failed to pass parentfiledata for a normal file: %s'
247 msg %= filename
238 msg %= filename
248 raise error.ProgrammingError(msg)
239 raise error.ProgrammingError(msg)
249 else:
240 else:
250 assert False, 'unreachable'
241 assert False, 'unreachable'
251
242
252 old_entry = self._map.get(filename)
243 old_entry = self._map.get(filename)
253 self._dirs_incr(filename, old_entry)
244 self._dirs_incr(filename, old_entry)
254 entry = DirstateItem(
245 entry = DirstateItem(
255 wc_tracked=wc_tracked,
246 wc_tracked=wc_tracked,
256 p1_tracked=p1_tracked,
247 p1_tracked=p1_tracked,
257 p2_tracked=p2_tracked,
248 p2_tracked=p2_tracked,
258 merged=merged,
249 merged=merged,
259 clean_p1=clean_p1,
250 clean_p1=clean_p1,
260 clean_p2=clean_p2,
251 clean_p2=clean_p2,
261 possibly_dirty=possibly_dirty,
252 possibly_dirty=possibly_dirty,
262 parentfiledata=parentfiledata,
253 parentfiledata=parentfiledata,
263 )
254 )
264 self._map[filename] = entry
255 self._map[filename] = entry
265
256
266 def set_tracked(self, filename):
257 def set_tracked(self, filename):
267 new = False
258 new = False
268 entry = self.get(filename)
259 entry = self.get(filename)
269 if entry is None:
260 if entry is None:
270 self._dirs_incr(filename)
261 self._dirs_incr(filename)
271 entry = DirstateItem(
262 entry = DirstateItem(
272 p1_tracked=False,
263 p1_tracked=False,
273 p2_tracked=False,
264 p2_tracked=False,
274 wc_tracked=True,
265 wc_tracked=True,
275 merged=False,
266 merged=False,
276 clean_p1=False,
267 clean_p1=False,
277 clean_p2=False,
268 clean_p2=False,
278 possibly_dirty=False,
269 possibly_dirty=False,
279 parentfiledata=None,
270 parentfiledata=None,
280 )
271 )
281 self._map[filename] = entry
272 self._map[filename] = entry
282 new = True
273 new = True
283 elif not entry.tracked:
274 elif not entry.tracked:
284 self._dirs_incr(filename, entry)
275 self._dirs_incr(filename, entry)
285 entry.set_tracked()
276 entry.set_tracked()
286 new = True
277 new = True
287 else:
278 else:
288 # XXX This is probably overkill for more case, but we need this to
279 # XXX This is probably overkill for more case, but we need this to
289 # fully replace the `normallookup` call with `set_tracked` one.
280 # fully replace the `normallookup` call with `set_tracked` one.
290 # Consider smoothing this in the future.
281 # Consider smoothing this in the future.
291 self.set_possibly_dirty(filename)
282 self.set_possibly_dirty(filename)
292 return new
283 return new
293
284
294 def set_untracked(self, f):
285 def set_untracked(self, f):
295 """Mark a file as no longer tracked in the dirstate map"""
286 """Mark a file as no longer tracked in the dirstate map"""
296 entry = self.get(f)
287 entry = self.get(f)
297 if entry is None:
288 if entry is None:
298 return False
289 return False
299 else:
290 else:
300 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
291 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
301 if not entry.merged:
292 if not entry.merged:
302 self.copymap.pop(f, None)
293 self.copymap.pop(f, None)
303 if entry.added:
294 if entry.added:
304 self._map.pop(f, None)
295 self._map.pop(f, None)
305 else:
296 else:
306 entry.set_untracked()
297 entry.set_untracked()
307 return True
298 return True
308
299
309 @propertycache
300 @propertycache
310 def filefoldmap(self):
301 def filefoldmap(self):
311 """Returns a dictionary mapping normalized case paths to their
302 """Returns a dictionary mapping normalized case paths to their
312 non-normalized versions.
303 non-normalized versions.
313 """
304 """
314 try:
305 try:
315 makefilefoldmap = parsers.make_file_foldmap
306 makefilefoldmap = parsers.make_file_foldmap
316 except AttributeError:
307 except AttributeError:
317 pass
308 pass
318 else:
309 else:
319 return makefilefoldmap(
310 return makefilefoldmap(
320 self._map, util.normcasespec, util.normcasefallback
311 self._map, util.normcasespec, util.normcasefallback
321 )
312 )
322
313
323 f = {}
314 f = {}
324 normcase = util.normcase
315 normcase = util.normcase
325 for name, s in pycompat.iteritems(self._map):
316 for name, s in pycompat.iteritems(self._map):
326 if not s.removed:
317 if not s.removed:
327 f[normcase(name)] = name
318 f[normcase(name)] = name
328 f[b'.'] = b'.' # prevents useless util.fspath() invocation
319 f[b'.'] = b'.' # prevents useless util.fspath() invocation
329 return f
320 return f
330
321
331 def hastrackeddir(self, d):
322 def hastrackeddir(self, d):
332 """
323 """
333 Returns True if the dirstate contains a tracked (not removed) file
324 Returns True if the dirstate contains a tracked (not removed) file
334 in this directory.
325 in this directory.
335 """
326 """
336 return d in self._dirs
327 return d in self._dirs
337
328
338 def hasdir(self, d):
329 def hasdir(self, d):
339 """
330 """
340 Returns True if the dirstate contains a file (tracked or removed)
331 Returns True if the dirstate contains a file (tracked or removed)
341 in this directory.
332 in this directory.
342 """
333 """
343 return d in self._alldirs
334 return d in self._alldirs
344
335
345 @propertycache
336 @propertycache
346 def _dirs(self):
337 def _dirs(self):
347 return pathutil.dirs(self._map, only_tracked=True)
338 return pathutil.dirs(self._map, only_tracked=True)
348
339
349 @propertycache
340 @propertycache
350 def _alldirs(self):
341 def _alldirs(self):
351 return pathutil.dirs(self._map)
342 return pathutil.dirs(self._map)
352
343
353 def _opendirstatefile(self):
344 def _opendirstatefile(self):
354 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
345 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
355 if self._pendingmode is not None and self._pendingmode != mode:
346 if self._pendingmode is not None and self._pendingmode != mode:
356 fp.close()
347 fp.close()
357 raise error.Abort(
348 raise error.Abort(
358 _(b'working directory state may be changed parallelly')
349 _(b'working directory state may be changed parallelly')
359 )
350 )
360 self._pendingmode = mode
351 self._pendingmode = mode
361 return fp
352 return fp
362
353
363 def parents(self):
354 def parents(self):
364 if not self._parents:
355 if not self._parents:
365 try:
356 try:
366 fp = self._opendirstatefile()
357 fp = self._opendirstatefile()
367 st = fp.read(2 * self._nodelen)
358 st = fp.read(2 * self._nodelen)
368 fp.close()
359 fp.close()
369 except IOError as err:
360 except IOError as err:
370 if err.errno != errno.ENOENT:
361 if err.errno != errno.ENOENT:
371 raise
362 raise
372 # File doesn't exist, so the current state is empty
363 # File doesn't exist, so the current state is empty
373 st = b''
364 st = b''
374
365
375 l = len(st)
366 l = len(st)
376 if l == self._nodelen * 2:
367 if l == self._nodelen * 2:
377 self._parents = (
368 self._parents = (
378 st[: self._nodelen],
369 st[: self._nodelen],
379 st[self._nodelen : 2 * self._nodelen],
370 st[self._nodelen : 2 * self._nodelen],
380 )
371 )
381 elif l == 0:
372 elif l == 0:
382 self._parents = (
373 self._parents = (
383 self._nodeconstants.nullid,
374 self._nodeconstants.nullid,
384 self._nodeconstants.nullid,
375 self._nodeconstants.nullid,
385 )
376 )
386 else:
377 else:
387 raise error.Abort(
378 raise error.Abort(
388 _(b'working directory state appears damaged!')
379 _(b'working directory state appears damaged!')
389 )
380 )
390
381
391 return self._parents
382 return self._parents
392
383
393 def setparents(self, p1, p2, fold_p2=False):
384 def setparents(self, p1, p2, fold_p2=False):
394 self._parents = (p1, p2)
385 self._parents = (p1, p2)
395 self._dirtyparents = True
386 self._dirtyparents = True
396 copies = {}
387 copies = {}
397 if fold_p2:
388 if fold_p2:
398 for f, s in pycompat.iteritems(self._map):
389 for f, s in pycompat.iteritems(self._map):
399 # Discard "merged" markers when moving away from a merge state
390 # Discard "merged" markers when moving away from a merge state
400 if s.merged or s.from_p2:
391 if s.merged or s.from_p2:
401 source = self.copymap.pop(f, None)
392 source = self.copymap.pop(f, None)
402 if source:
393 if source:
403 copies[f] = source
394 copies[f] = source
404 s.drop_merge_data()
395 s.drop_merge_data()
405 return copies
396 return copies
406
397
407 def read(self):
398 def read(self):
408 # ignore HG_PENDING because identity is used only for writing
399 # ignore HG_PENDING because identity is used only for writing
409 self.identity = util.filestat.frompath(
400 self.identity = util.filestat.frompath(
410 self._opener.join(self._filename)
401 self._opener.join(self._filename)
411 )
402 )
412
403
413 try:
404 try:
414 fp = self._opendirstatefile()
405 fp = self._opendirstatefile()
415 try:
406 try:
416 st = fp.read()
407 st = fp.read()
417 finally:
408 finally:
418 fp.close()
409 fp.close()
419 except IOError as err:
410 except IOError as err:
420 if err.errno != errno.ENOENT:
411 if err.errno != errno.ENOENT:
421 raise
412 raise
422 return
413 return
423 if not st:
414 if not st:
424 return
415 return
425
416
426 if util.safehasattr(parsers, b'dict_new_presized'):
417 if util.safehasattr(parsers, b'dict_new_presized'):
427 # Make an estimate of the number of files in the dirstate based on
418 # Make an estimate of the number of files in the dirstate based on
428 # its size. This trades wasting some memory for avoiding costly
419 # its size. This trades wasting some memory for avoiding costly
429 # resizes. Each entry have a prefix of 17 bytes followed by one or
420 # resizes. Each entry have a prefix of 17 bytes followed by one or
430 # two path names. Studies on various large-scale real-world repositories
421 # two path names. Studies on various large-scale real-world repositories
431 # found 54 bytes a reasonable upper limit for the average path names.
422 # found 54 bytes a reasonable upper limit for the average path names.
432 # Copy entries are ignored for the sake of this estimate.
423 # Copy entries are ignored for the sake of this estimate.
433 self._map = parsers.dict_new_presized(len(st) // 71)
424 self._map = parsers.dict_new_presized(len(st) // 71)
434
425
435 # Python's garbage collector triggers a GC each time a certain number
426 # Python's garbage collector triggers a GC each time a certain number
436 # of container objects (the number being defined by
427 # of container objects (the number being defined by
437 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
428 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
438 # for each file in the dirstate. The C version then immediately marks
429 # for each file in the dirstate. The C version then immediately marks
439 # them as not to be tracked by the collector. However, this has no
430 # them as not to be tracked by the collector. However, this has no
440 # effect on when GCs are triggered, only on what objects the GC looks
431 # effect on when GCs are triggered, only on what objects the GC looks
441 # into. This means that O(number of files) GCs are unavoidable.
432 # into. This means that O(number of files) GCs are unavoidable.
442 # Depending on when in the process's lifetime the dirstate is parsed,
433 # Depending on when in the process's lifetime the dirstate is parsed,
443 # this can get very expensive. As a workaround, disable GC while
434 # this can get very expensive. As a workaround, disable GC while
444 # parsing the dirstate.
435 # parsing the dirstate.
445 #
436 #
446 # (we cannot decorate the function directly since it is in a C module)
437 # (we cannot decorate the function directly since it is in a C module)
447 parse_dirstate = util.nogc(parsers.parse_dirstate)
438 parse_dirstate = util.nogc(parsers.parse_dirstate)
448 p = parse_dirstate(self._map, self.copymap, st)
439 p = parse_dirstate(self._map, self.copymap, st)
449 if not self._dirtyparents:
440 if not self._dirtyparents:
450 self.setparents(*p)
441 self.setparents(*p)
451
442
452 # Avoid excess attribute lookups by fast pathing certain checks
443 # Avoid excess attribute lookups by fast pathing certain checks
453 self.__contains__ = self._map.__contains__
444 self.__contains__ = self._map.__contains__
454 self.__getitem__ = self._map.__getitem__
445 self.__getitem__ = self._map.__getitem__
455 self.get = self._map.get
446 self.get = self._map.get
456
447
457 def write(self, _tr, st, now):
448 def write(self, _tr, st, now):
458 st.write(
449 st.write(
459 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
450 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
460 )
451 )
461 st.close()
452 st.close()
462 self._dirtyparents = False
453 self._dirtyparents = False
463
454
464 @propertycache
455 @propertycache
465 def identity(self):
456 def identity(self):
466 self._map
457 self._map
467 return self.identity
458 return self.identity
468
459
469 @propertycache
460 @propertycache
470 def dirfoldmap(self):
461 def dirfoldmap(self):
471 f = {}
462 f = {}
472 normcase = util.normcase
463 normcase = util.normcase
473 for name in self._dirs:
464 for name in self._dirs:
474 f[normcase(name)] = name
465 f[normcase(name)] = name
475 return f
466 return f
476
467
477
468
478 if rustmod is not None:
469 if rustmod is not None:
479
470
480 class dirstatemap(object):
471 class dirstatemap(object):
481 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
472 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
482 self._use_dirstate_v2 = use_dirstate_v2
473 self._use_dirstate_v2 = use_dirstate_v2
483 self._nodeconstants = nodeconstants
474 self._nodeconstants = nodeconstants
484 self._ui = ui
475 self._ui = ui
485 self._opener = opener
476 self._opener = opener
486 self._root = root
477 self._root = root
487 self._filename = b'dirstate'
478 self._filename = b'dirstate'
488 self._nodelen = 20 # Also update Rust code when changing this!
479 self._nodelen = 20 # Also update Rust code when changing this!
489 self._parents = None
480 self._parents = None
490 self._dirtyparents = False
481 self._dirtyparents = False
491 self._docket = None
482 self._docket = None
492
483
493 # for consistent view between _pl() and _read() invocations
484 # for consistent view between _pl() and _read() invocations
494 self._pendingmode = None
485 self._pendingmode = None
495
486
496 def addfile(
487 def addfile(
497 self,
488 self,
498 f,
489 f,
499 mode=0,
490 mode=0,
500 size=None,
491 size=None,
501 mtime=None,
492 mtime=None,
502 added=False,
493 added=False,
503 merged=False,
494 merged=False,
504 from_p2=False,
495 from_p2=False,
505 possibly_dirty=False,
496 possibly_dirty=False,
506 ):
497 ):
507 if added:
498 if added:
508 assert not possibly_dirty
499 assert not possibly_dirty
509 assert not from_p2
500 assert not from_p2
510 item = DirstateItem.new_added()
501 item = DirstateItem.new_added()
511 elif merged:
502 elif merged:
512 assert not possibly_dirty
503 assert not possibly_dirty
513 assert not from_p2
504 assert not from_p2
514 item = DirstateItem.new_merged()
505 item = DirstateItem.new_merged()
515 elif from_p2:
506 elif from_p2:
516 assert not possibly_dirty
507 assert not possibly_dirty
517 item = DirstateItem.new_from_p2()
508 item = DirstateItem.new_from_p2()
518 elif possibly_dirty:
509 elif possibly_dirty:
519 item = DirstateItem.new_possibly_dirty()
510 item = DirstateItem.new_possibly_dirty()
520 else:
511 else:
521 assert size is not None
512 assert size is not None
522 assert mtime is not None
513 assert mtime is not None
523 size = size & rangemask
514 size = size & rangemask
524 mtime = mtime & rangemask
515 mtime = mtime & rangemask
525 item = DirstateItem.new_normal(mode, size, mtime)
516 item = DirstateItem.new_normal(mode, size, mtime)
526 self._rustmap.addfile(f, item)
517 self._rustmap.addfile(f, item)
527 if added:
518 if added:
528 self.copymap.pop(f, None)
519 self.copymap.pop(f, None)
529
520
530 def reset_state(
521 def reset_state(
531 self,
522 self,
532 filename,
523 filename,
533 wc_tracked=False,
524 wc_tracked=False,
534 p1_tracked=False,
525 p1_tracked=False,
535 p2_tracked=False,
526 p2_tracked=False,
536 merged=False,
527 merged=False,
537 clean_p1=False,
528 clean_p1=False,
538 clean_p2=False,
529 clean_p2=False,
539 possibly_dirty=False,
530 possibly_dirty=False,
540 parentfiledata=None,
531 parentfiledata=None,
541 ):
532 ):
542 """Set a entry to a given state, disregarding all previous state
533 """Set a entry to a given state, disregarding all previous state
543
534
544 This is to be used by the part of the dirstate API dedicated to
535 This is to be used by the part of the dirstate API dedicated to
545 adjusting the dirstate after a update/merge.
536 adjusting the dirstate after a update/merge.
546
537
547 note: calling this might result to no entry existing at all if the
538 note: calling this might result to no entry existing at all if the
548 dirstate map does not see any point at having one for this file
539 dirstate map does not see any point at having one for this file
549 anymore.
540 anymore.
550 """
541 """
551 if merged and (clean_p1 or clean_p2):
542 if merged and (clean_p1 or clean_p2):
552 msg = (
543 msg = (
553 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
544 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
554 )
545 )
555 raise error.ProgrammingError(msg)
546 raise error.ProgrammingError(msg)
556 # copy information are now outdated
547 # copy information are now outdated
557 # (maybe new information should be in directly passed to this function)
548 # (maybe new information should be in directly passed to this function)
558 self.copymap.pop(filename, None)
549 self.copymap.pop(filename, None)
559
550
560 if not (p1_tracked or p2_tracked or wc_tracked):
551 if not (p1_tracked or p2_tracked or wc_tracked):
561 self._rustmap.drop_item_and_copy_source(filename)
552 self._rustmap.drop_item_and_copy_source(filename)
562 elif merged:
553 elif merged:
563 # XXX might be merged and removed ?
554 # XXX might be merged and removed ?
564 entry = self.get(filename)
555 entry = self.get(filename)
565 if entry is not None and entry.tracked:
556 if entry is not None and entry.tracked:
566 # XXX mostly replicate dirstate.other parent. We should get
557 # XXX mostly replicate dirstate.other parent. We should get
567 # the higher layer to pass us more reliable data where `merged`
558 # the higher layer to pass us more reliable data where `merged`
568 # actually mean merged. Dropping the else clause will show
559 # actually mean merged. Dropping the else clause will show
569 # failure in `test-graft.t`
560 # failure in `test-graft.t`
570 self.addfile(filename, merged=True)
561 self.addfile(filename, merged=True)
571 else:
562 else:
572 self.addfile(filename, from_p2=True)
563 self.addfile(filename, from_p2=True)
573 elif not (p1_tracked or p2_tracked) and wc_tracked:
564 elif not (p1_tracked or p2_tracked) and wc_tracked:
574 self.addfile(
565 self.addfile(
575 filename, added=True, possibly_dirty=possibly_dirty
566 filename, added=True, possibly_dirty=possibly_dirty
576 )
567 )
577 elif (p1_tracked or p2_tracked) and not wc_tracked:
568 elif (p1_tracked or p2_tracked) and not wc_tracked:
578 # XXX might be merged and removed ?
569 # XXX might be merged and removed ?
579 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
570 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
580 elif clean_p2 and wc_tracked:
571 elif clean_p2 and wc_tracked:
581 if p1_tracked or self.get(filename) is not None:
572 if p1_tracked or self.get(filename) is not None:
582 # XXX the `self.get` call is catching some case in
573 # XXX the `self.get` call is catching some case in
583 # `test-merge-remove.t` where the file is tracked in p1, the
574 # `test-merge-remove.t` where the file is tracked in p1, the
584 # p1_tracked argument is False.
575 # p1_tracked argument is False.
585 #
576 #
586 # In addition, this seems to be a case where the file is marked
577 # In addition, this seems to be a case where the file is marked
587 # as merged without actually being the result of a merge
578 # as merged without actually being the result of a merge
588 # action. So thing are not ideal here.
579 # action. So thing are not ideal here.
589 self.addfile(filename, merged=True)
580 self.addfile(filename, merged=True)
590 else:
581 else:
591 self.addfile(filename, from_p2=True)
582 self.addfile(filename, from_p2=True)
592 elif not p1_tracked and p2_tracked and wc_tracked:
583 elif not p1_tracked and p2_tracked and wc_tracked:
593 self.addfile(
584 self.addfile(
594 filename, from_p2=True, possibly_dirty=possibly_dirty
585 filename, from_p2=True, possibly_dirty=possibly_dirty
595 )
586 )
596 elif possibly_dirty:
587 elif possibly_dirty:
597 self.addfile(filename, possibly_dirty=possibly_dirty)
588 self.addfile(filename, possibly_dirty=possibly_dirty)
598 elif wc_tracked:
589 elif wc_tracked:
599 # this is a "normal" file
590 # this is a "normal" file
600 if parentfiledata is None:
591 if parentfiledata is None:
601 msg = b'failed to pass parentfiledata for a normal file: %s'
592 msg = b'failed to pass parentfiledata for a normal file: %s'
602 msg %= filename
593 msg %= filename
603 raise error.ProgrammingError(msg)
594 raise error.ProgrammingError(msg)
604 mode, size, mtime = parentfiledata
595 mode, size, mtime = parentfiledata
605 self.addfile(filename, mode=mode, size=size, mtime=mtime)
596 self.addfile(filename, mode=mode, size=size, mtime=mtime)
606 else:
597 else:
607 assert False, 'unreachable'
598 assert False, 'unreachable'
608
599
609 def set_tracked(self, filename):
600 def set_tracked(self, filename):
610 new = False
601 new = False
611 entry = self.get(filename)
602 entry = self.get(filename)
612 if entry is None:
603 if entry is None:
613 self.addfile(filename, added=True)
604 self.addfile(filename, added=True)
614 new = True
605 new = True
615 elif not entry.tracked:
606 elif not entry.tracked:
616 entry.set_tracked()
607 entry.set_tracked()
617 self._rustmap.set_dirstate_item(filename, entry)
608 self._rustmap.set_dirstate_item(filename, entry)
618 new = True
609 new = True
619 else:
610 else:
620 # XXX This is probably overkill for more case, but we need this to
611 # XXX This is probably overkill for more case, but we need this to
621 # fully replace the `normallookup` call with `set_tracked` one.
612 # fully replace the `normallookup` call with `set_tracked` one.
622 # Consider smoothing this in the future.
613 # Consider smoothing this in the future.
623 self.set_possibly_dirty(filename)
614 self.set_possibly_dirty(filename)
624 return new
615 return new
625
616
626 def set_untracked(self, f):
617 def set_untracked(self, f):
627 """Mark a file as no longer tracked in the dirstate map"""
618 """Mark a file as no longer tracked in the dirstate map"""
628 # in merge is only trigger more logic, so it "fine" to pass it.
619 # in merge is only trigger more logic, so it "fine" to pass it.
629 #
620 #
630 # the inner rust dirstate map code need to be adjusted once the API
621 # the inner rust dirstate map code need to be adjusted once the API
631 # for dirstate/dirstatemap/DirstateItem is a bit more settled
622 # for dirstate/dirstatemap/DirstateItem is a bit more settled
632 entry = self.get(f)
623 entry = self.get(f)
633 if entry is None:
624 if entry is None:
634 return False
625 return False
635 else:
626 else:
636 if entry.added:
627 if entry.added:
637 self._rustmap.drop_item_and_copy_source(f)
628 self._rustmap.drop_item_and_copy_source(f)
638 else:
629 else:
639 self._rustmap.removefile(f, in_merge=True)
630 self._rustmap.removefile(f, in_merge=True)
640 return True
631 return True
641
632
642 def removefile(self, *args, **kwargs):
633 def removefile(self, *args, **kwargs):
643 return self._rustmap.removefile(*args, **kwargs)
634 return self._rustmap.removefile(*args, **kwargs)
644
635
645 def get(self, *args, **kwargs):
636 def get(self, *args, **kwargs):
646 return self._rustmap.get(*args, **kwargs)
637 return self._rustmap.get(*args, **kwargs)
647
638
648 @property
639 @property
649 def copymap(self):
640 def copymap(self):
650 return self._rustmap.copymap()
641 return self._rustmap.copymap()
651
642
652 def debug_iter(self, all):
643 def debug_iter(self, all):
653 """
644 """
654 Return an iterator of (filename, state, mode, size, mtime) tuples
645 Return an iterator of (filename, state, mode, size, mtime) tuples
655
646
656 `all`: also include with `state == b' '` dirstate tree nodes that
647 `all`: also include with `state == b' '` dirstate tree nodes that
657 don't have an associated `DirstateItem`.
648 don't have an associated `DirstateItem`.
658
649
659 """
650 """
660 return self._rustmap.debug_iter(all)
651 return self._rustmap.debug_iter(all)
661
652
662 def preload(self):
653 def preload(self):
663 self._rustmap
654 self._rustmap
664
655
665 def clear(self):
656 def clear(self):
666 self._rustmap.clear()
657 self._rustmap.clear()
667 self.setparents(
658 self.setparents(
668 self._nodeconstants.nullid, self._nodeconstants.nullid
659 self._nodeconstants.nullid, self._nodeconstants.nullid
669 )
660 )
670 util.clearcachedproperty(self, b"_dirs")
661 util.clearcachedproperty(self, b"_dirs")
671 util.clearcachedproperty(self, b"_alldirs")
662 util.clearcachedproperty(self, b"_alldirs")
672 util.clearcachedproperty(self, b"dirfoldmap")
663 util.clearcachedproperty(self, b"dirfoldmap")
673
664
674 def items(self):
665 def items(self):
675 return self._rustmap.items()
666 return self._rustmap.items()
676
667
677 def keys(self):
668 def keys(self):
678 return iter(self._rustmap)
669 return iter(self._rustmap)
679
670
680 def __contains__(self, key):
671 def __contains__(self, key):
681 return key in self._rustmap
672 return key in self._rustmap
682
673
683 def __getitem__(self, item):
674 def __getitem__(self, item):
684 return self._rustmap[item]
675 return self._rustmap[item]
685
676
686 def __len__(self):
677 def __len__(self):
687 return len(self._rustmap)
678 return len(self._rustmap)
688
679
689 def __iter__(self):
680 def __iter__(self):
690 return iter(self._rustmap)
681 return iter(self._rustmap)
691
682
692 # forward for python2,3 compat
683 # forward for python2,3 compat
693 iteritems = items
684 iteritems = items
694
685
695 def _opendirstatefile(self):
686 def _opendirstatefile(self):
696 fp, mode = txnutil.trypending(
687 fp, mode = txnutil.trypending(
697 self._root, self._opener, self._filename
688 self._root, self._opener, self._filename
698 )
689 )
699 if self._pendingmode is not None and self._pendingmode != mode:
690 if self._pendingmode is not None and self._pendingmode != mode:
700 fp.close()
691 fp.close()
701 raise error.Abort(
692 raise error.Abort(
702 _(b'working directory state may be changed parallelly')
693 _(b'working directory state may be changed parallelly')
703 )
694 )
704 self._pendingmode = mode
695 self._pendingmode = mode
705 return fp
696 return fp
706
697
707 def _readdirstatefile(self, size=-1):
698 def _readdirstatefile(self, size=-1):
708 try:
699 try:
709 with self._opendirstatefile() as fp:
700 with self._opendirstatefile() as fp:
710 return fp.read(size)
701 return fp.read(size)
711 except IOError as err:
702 except IOError as err:
712 if err.errno != errno.ENOENT:
703 if err.errno != errno.ENOENT:
713 raise
704 raise
714 # File doesn't exist, so the current state is empty
705 # File doesn't exist, so the current state is empty
715 return b''
706 return b''
716
707
717 def setparents(self, p1, p2, fold_p2=False):
708 def setparents(self, p1, p2, fold_p2=False):
718 self._parents = (p1, p2)
709 self._parents = (p1, p2)
719 self._dirtyparents = True
710 self._dirtyparents = True
720 copies = {}
711 copies = {}
721 if fold_p2:
712 if fold_p2:
722 # Collect into an intermediate list to avoid a `RuntimeError`
713 # Collect into an intermediate list to avoid a `RuntimeError`
723 # exception due to mutation during iteration.
714 # exception due to mutation during iteration.
724 # TODO: move this the whole loop to Rust where `iter_mut`
715 # TODO: move this the whole loop to Rust where `iter_mut`
725 # enables in-place mutation of elements of a collection while
716 # enables in-place mutation of elements of a collection while
726 # iterating it, without mutating the collection itself.
717 # iterating it, without mutating the collection itself.
727 candidatefiles = [
718 candidatefiles = [
728 (f, s)
719 (f, s)
729 for f, s in self._rustmap.items()
720 for f, s in self._rustmap.items()
730 if s.merged or s.from_p2
721 if s.merged or s.from_p2
731 ]
722 ]
732 for f, s in candidatefiles:
723 for f, s in candidatefiles:
733 # Discard "merged" markers when moving away from a merge state
724 # Discard "merged" markers when moving away from a merge state
734 if s.merged:
725 if s.merged:
735 source = self.copymap.get(f)
726 source = self.copymap.get(f)
736 if source:
727 if source:
737 copies[f] = source
728 copies[f] = source
738 self.reset_state(
729 self.reset_state(
739 f,
730 f,
740 wc_tracked=True,
731 wc_tracked=True,
741 p1_tracked=True,
732 p1_tracked=True,
742 possibly_dirty=True,
733 possibly_dirty=True,
743 )
734 )
744 # Also fix up otherparent markers
735 # Also fix up otherparent markers
745 elif s.from_p2:
736 elif s.from_p2:
746 source = self.copymap.get(f)
737 source = self.copymap.get(f)
747 if source:
738 if source:
748 copies[f] = source
739 copies[f] = source
749 self.reset_state(
740 self.reset_state(
750 f,
741 f,
751 p1_tracked=False,
742 p1_tracked=False,
752 wc_tracked=True,
743 wc_tracked=True,
753 )
744 )
754 return copies
745 return copies
755
746
756 def parents(self):
747 def parents(self):
757 if not self._parents:
748 if not self._parents:
758 if self._use_dirstate_v2:
749 if self._use_dirstate_v2:
759 self._parents = self.docket.parents
750 self._parents = self.docket.parents
760 else:
751 else:
761 read_len = self._nodelen * 2
752 read_len = self._nodelen * 2
762 st = self._readdirstatefile(read_len)
753 st = self._readdirstatefile(read_len)
763 l = len(st)
754 l = len(st)
764 if l == read_len:
755 if l == read_len:
765 self._parents = (
756 self._parents = (
766 st[: self._nodelen],
757 st[: self._nodelen],
767 st[self._nodelen : 2 * self._nodelen],
758 st[self._nodelen : 2 * self._nodelen],
768 )
759 )
769 elif l == 0:
760 elif l == 0:
770 self._parents = (
761 self._parents = (
771 self._nodeconstants.nullid,
762 self._nodeconstants.nullid,
772 self._nodeconstants.nullid,
763 self._nodeconstants.nullid,
773 )
764 )
774 else:
765 else:
775 raise error.Abort(
766 raise error.Abort(
776 _(b'working directory state appears damaged!')
767 _(b'working directory state appears damaged!')
777 )
768 )
778
769
779 return self._parents
770 return self._parents
780
771
781 @property
772 @property
782 def docket(self):
773 def docket(self):
783 if not self._docket:
774 if not self._docket:
784 if not self._use_dirstate_v2:
775 if not self._use_dirstate_v2:
785 raise error.ProgrammingError(
776 raise error.ProgrammingError(
786 b'dirstate only has a docket in v2 format'
777 b'dirstate only has a docket in v2 format'
787 )
778 )
788 self._docket = docketmod.DirstateDocket.parse(
779 self._docket = docketmod.DirstateDocket.parse(
789 self._readdirstatefile(), self._nodeconstants
780 self._readdirstatefile(), self._nodeconstants
790 )
781 )
791 return self._docket
782 return self._docket
792
783
793 @propertycache
784 @propertycache
794 def _rustmap(self):
785 def _rustmap(self):
795 """
786 """
796 Fills the Dirstatemap when called.
787 Fills the Dirstatemap when called.
797 """
788 """
798 # ignore HG_PENDING because identity is used only for writing
789 # ignore HG_PENDING because identity is used only for writing
799 self.identity = util.filestat.frompath(
790 self.identity = util.filestat.frompath(
800 self._opener.join(self._filename)
791 self._opener.join(self._filename)
801 )
792 )
802
793
803 if self._use_dirstate_v2:
794 if self._use_dirstate_v2:
804 if self.docket.uuid:
795 if self.docket.uuid:
805 # TODO: use mmap when possible
796 # TODO: use mmap when possible
806 data = self._opener.read(self.docket.data_filename())
797 data = self._opener.read(self.docket.data_filename())
807 else:
798 else:
808 data = b''
799 data = b''
809 self._rustmap = rustmod.DirstateMap.new_v2(
800 self._rustmap = rustmod.DirstateMap.new_v2(
810 data, self.docket.data_size, self.docket.tree_metadata
801 data, self.docket.data_size, self.docket.tree_metadata
811 )
802 )
812 parents = self.docket.parents
803 parents = self.docket.parents
813 else:
804 else:
814 self._rustmap, parents = rustmod.DirstateMap.new_v1(
805 self._rustmap, parents = rustmod.DirstateMap.new_v1(
815 self._readdirstatefile()
806 self._readdirstatefile()
816 )
807 )
817
808
818 if parents and not self._dirtyparents:
809 if parents and not self._dirtyparents:
819 self.setparents(*parents)
810 self.setparents(*parents)
820
811
821 self.__contains__ = self._rustmap.__contains__
812 self.__contains__ = self._rustmap.__contains__
822 self.__getitem__ = self._rustmap.__getitem__
813 self.__getitem__ = self._rustmap.__getitem__
823 self.get = self._rustmap.get
814 self.get = self._rustmap.get
824 return self._rustmap
815 return self._rustmap
825
816
826 def write(self, tr, st, now):
817 def write(self, tr, st, now):
827 if not self._use_dirstate_v2:
818 if not self._use_dirstate_v2:
828 p1, p2 = self.parents()
819 p1, p2 = self.parents()
829 packed = self._rustmap.write_v1(p1, p2, now)
820 packed = self._rustmap.write_v1(p1, p2, now)
830 st.write(packed)
821 st.write(packed)
831 st.close()
822 st.close()
832 self._dirtyparents = False
823 self._dirtyparents = False
833 return
824 return
834
825
835 # We can only append to an existing data file if there is one
826 # We can only append to an existing data file if there is one
836 can_append = self.docket.uuid is not None
827 can_append = self.docket.uuid is not None
837 packed, meta, append = self._rustmap.write_v2(now, can_append)
828 packed, meta, append = self._rustmap.write_v2(now, can_append)
838 if append:
829 if append:
839 docket = self.docket
830 docket = self.docket
840 data_filename = docket.data_filename()
831 data_filename = docket.data_filename()
841 if tr:
832 if tr:
842 tr.add(data_filename, docket.data_size)
833 tr.add(data_filename, docket.data_size)
843 with self._opener(data_filename, b'r+b') as fp:
834 with self._opener(data_filename, b'r+b') as fp:
844 fp.seek(docket.data_size)
835 fp.seek(docket.data_size)
845 assert fp.tell() == docket.data_size
836 assert fp.tell() == docket.data_size
846 written = fp.write(packed)
837 written = fp.write(packed)
847 if written is not None: # py2 may return None
838 if written is not None: # py2 may return None
848 assert written == len(packed), (written, len(packed))
839 assert written == len(packed), (written, len(packed))
849 docket.data_size += len(packed)
840 docket.data_size += len(packed)
850 docket.parents = self.parents()
841 docket.parents = self.parents()
851 docket.tree_metadata = meta
842 docket.tree_metadata = meta
852 st.write(docket.serialize())
843 st.write(docket.serialize())
853 st.close()
844 st.close()
854 else:
845 else:
855 old_docket = self.docket
846 old_docket = self.docket
856 new_docket = docketmod.DirstateDocket.with_new_uuid(
847 new_docket = docketmod.DirstateDocket.with_new_uuid(
857 self.parents(), len(packed), meta
848 self.parents(), len(packed), meta
858 )
849 )
859 data_filename = new_docket.data_filename()
850 data_filename = new_docket.data_filename()
860 if tr:
851 if tr:
861 tr.add(data_filename, 0)
852 tr.add(data_filename, 0)
862 self._opener.write(data_filename, packed)
853 self._opener.write(data_filename, packed)
863 # Write the new docket after the new data file has been
854 # Write the new docket after the new data file has been
864 # written. Because `st` was opened with `atomictemp=True`,
855 # written. Because `st` was opened with `atomictemp=True`,
865 # the actual `.hg/dirstate` file is only affected on close.
856 # the actual `.hg/dirstate` file is only affected on close.
866 st.write(new_docket.serialize())
857 st.write(new_docket.serialize())
867 st.close()
858 st.close()
868 # Remove the old data file after the new docket pointing to
859 # Remove the old data file after the new docket pointing to
869 # the new data file was written.
860 # the new data file was written.
870 if old_docket.uuid:
861 if old_docket.uuid:
871 data_filename = old_docket.data_filename()
862 data_filename = old_docket.data_filename()
872 unlink = lambda _tr=None: self._opener.unlink(data_filename)
863 unlink = lambda _tr=None: self._opener.unlink(data_filename)
873 if tr:
864 if tr:
874 category = b"dirstate-v2-clean-" + old_docket.uuid
865 category = b"dirstate-v2-clean-" + old_docket.uuid
875 tr.addpostclose(category, unlink)
866 tr.addpostclose(category, unlink)
876 else:
867 else:
877 unlink()
868 unlink()
878 self._docket = new_docket
869 self._docket = new_docket
879 # Reload from the newly-written file
870 # Reload from the newly-written file
880 util.clearcachedproperty(self, b"_rustmap")
871 util.clearcachedproperty(self, b"_rustmap")
881 self._dirtyparents = False
872 self._dirtyparents = False
882
873
883 @propertycache
874 @propertycache
884 def filefoldmap(self):
875 def filefoldmap(self):
885 """Returns a dictionary mapping normalized case paths to their
876 """Returns a dictionary mapping normalized case paths to their
886 non-normalized versions.
877 non-normalized versions.
887 """
878 """
888 return self._rustmap.filefoldmapasdict()
879 return self._rustmap.filefoldmapasdict()
889
880
890 def hastrackeddir(self, d):
881 def hastrackeddir(self, d):
891 return self._rustmap.hastrackeddir(d)
882 return self._rustmap.hastrackeddir(d)
892
883
893 def hasdir(self, d):
884 def hasdir(self, d):
894 return self._rustmap.hasdir(d)
885 return self._rustmap.hasdir(d)
895
886
896 @propertycache
887 @propertycache
897 def identity(self):
888 def identity(self):
898 self._rustmap
889 self._rustmap
899 return self.identity
890 return self.identity
900
891
901 @propertycache
892 @propertycache
902 def dirfoldmap(self):
893 def dirfoldmap(self):
903 f = {}
894 f = {}
904 normcase = util.normcase
895 normcase = util.normcase
905 for name in self._rustmap.tracked_dirs():
896 for name in self._rustmap.tracked_dirs():
906 f[normcase(name)] = name
897 f[normcase(name)] = name
907 return f
898 return f
908
899
909 def set_possibly_dirty(self, filename):
900 def set_possibly_dirty(self, filename):
910 """record that the current state of the file on disk is unknown"""
901 """record that the current state of the file on disk is unknown"""
911 entry = self[filename]
902 entry = self[filename]
912 entry.set_possibly_dirty()
903 entry.set_possibly_dirty()
913 self._rustmap.set_dirstate_item(filename, entry)
904 self._rustmap.set_dirstate_item(filename, entry)
914
905
915 def set_clean(self, filename, mode, size, mtime):
906 def set_clean(self, filename, mode, size, mtime):
916 """mark a file as back to a clean state"""
907 """mark a file as back to a clean state"""
917 entry = self[filename]
908 entry = self[filename]
918 mtime = mtime & rangemask
909 mtime = mtime & rangemask
919 size = size & rangemask
910 size = size & rangemask
920 entry.set_clean(mode, size, mtime)
911 entry.set_clean(mode, size, mtime)
921 self._rustmap.set_dirstate_item(filename, entry)
912 self._rustmap.set_dirstate_item(filename, entry)
922 self._rustmap.copymap().pop(filename, None)
913 self._rustmap.copymap().pop(filename, None)
923
914
924 def __setitem__(self, key, value):
915 def __setitem__(self, key, value):
925 assert isinstance(value, DirstateItem)
916 assert isinstance(value, DirstateItem)
926 self._rustmap.set_dirstate_item(key, value)
917 self._rustmap.set_dirstate_item(key, value)
@@ -1,866 +1,869 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import collections
3 import collections
4 import errno
4 import errno
5 import shutil
5 import shutil
6 import struct
6 import struct
7
7
8 from .i18n import _
8 from .i18n import _
9 from .node import (
9 from .node import (
10 bin,
10 bin,
11 hex,
11 hex,
12 nullrev,
12 nullrev,
13 )
13 )
14 from . import (
14 from . import (
15 error,
15 error,
16 filemerge,
16 filemerge,
17 pycompat,
17 pycompat,
18 util,
18 util,
19 )
19 )
20 from .utils import hashutil
20 from .utils import hashutil
21
21
22 _pack = struct.pack
22 _pack = struct.pack
23 _unpack = struct.unpack
23 _unpack = struct.unpack
24
24
25
25
26 def _droponode(data):
26 def _droponode(data):
27 # used for compatibility for v1
27 # used for compatibility for v1
28 bits = data.split(b'\0')
28 bits = data.split(b'\0')
29 bits = bits[:-2] + bits[-1:]
29 bits = bits[:-2] + bits[-1:]
30 return b'\0'.join(bits)
30 return b'\0'.join(bits)
31
31
32
32
33 def _filectxorabsent(hexnode, ctx, f):
33 def _filectxorabsent(hexnode, ctx, f):
34 if hexnode == ctx.repo().nodeconstants.nullhex:
34 if hexnode == ctx.repo().nodeconstants.nullhex:
35 return filemerge.absentfilectx(ctx, f)
35 return filemerge.absentfilectx(ctx, f)
36 else:
36 else:
37 return ctx[f]
37 return ctx[f]
38
38
39
39
40 # Merge state record types. See ``mergestate`` docs for more.
40 # Merge state record types. See ``mergestate`` docs for more.
41
41
42 ####
42 ####
43 # merge records which records metadata about a current merge
43 # merge records which records metadata about a current merge
44 # exists only once in a mergestate
44 # exists only once in a mergestate
45 #####
45 #####
46 RECORD_LOCAL = b'L'
46 RECORD_LOCAL = b'L'
47 RECORD_OTHER = b'O'
47 RECORD_OTHER = b'O'
48 # record merge labels
48 # record merge labels
49 RECORD_LABELS = b'l'
49 RECORD_LABELS = b'l'
50
50
51 #####
51 #####
52 # record extra information about files, with one entry containing info about one
52 # record extra information about files, with one entry containing info about one
53 # file. Hence, multiple of them can exists
53 # file. Hence, multiple of them can exists
54 #####
54 #####
55 RECORD_FILE_VALUES = b'f'
55 RECORD_FILE_VALUES = b'f'
56
56
57 #####
57 #####
58 # merge records which represents state of individual merges of files/folders
58 # merge records which represents state of individual merges of files/folders
59 # These are top level records for each entry containing merge related info.
59 # These are top level records for each entry containing merge related info.
60 # Each record of these has info about one file. Hence multiple of them can
60 # Each record of these has info about one file. Hence multiple of them can
61 # exists
61 # exists
62 #####
62 #####
63 RECORD_MERGED = b'F'
63 RECORD_MERGED = b'F'
64 RECORD_CHANGEDELETE_CONFLICT = b'C'
64 RECORD_CHANGEDELETE_CONFLICT = b'C'
65 # the path was dir on one side of merge and file on another
65 # the path was dir on one side of merge and file on another
66 RECORD_PATH_CONFLICT = b'P'
66 RECORD_PATH_CONFLICT = b'P'
67
67
68 #####
68 #####
69 # possible state which a merge entry can have. These are stored inside top-level
69 # possible state which a merge entry can have. These are stored inside top-level
70 # merge records mentioned just above.
70 # merge records mentioned just above.
71 #####
71 #####
72 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_UNRESOLVED = b'u'
73 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_RESOLVED = b'r'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 # represents that the file was automatically merged in favor
76 # represents that the file was automatically merged in favor
77 # of other version. This info is used on commit.
77 # of other version. This info is used on commit.
78 # This is now deprecated and commit related information is now
78 # This is now deprecated and commit related information is now
79 # stored in RECORD_FILE_VALUES
79 # stored in RECORD_FILE_VALUES
80 MERGE_RECORD_MERGED_OTHER = b'o'
80 MERGE_RECORD_MERGED_OTHER = b'o'
81
81
82 #####
82 #####
83 # top level record which stores other unknown records. Multiple of these can
83 # top level record which stores other unknown records. Multiple of these can
84 # exists
84 # exists
85 #####
85 #####
86 RECORD_OVERRIDE = b't'
86 RECORD_OVERRIDE = b't'
87
87
88 #####
88 #####
89 # legacy records which are no longer used but kept to prevent breaking BC
89 # legacy records which are no longer used but kept to prevent breaking BC
90 #####
90 #####
91 # This record was release in 5.4 and usage was removed in 5.5
91 # This record was release in 5.4 and usage was removed in 5.5
92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
93 # This record was release in 3.7 and usage was removed in 5.6
93 # This record was release in 3.7 and usage was removed in 5.6
94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
95 # This record was release in 3.7 and usage was removed in 5.6
95 # This record was release in 3.7 and usage was removed in 5.6
96 LEGACY_MERGE_DRIVER_STATE = b'm'
96 LEGACY_MERGE_DRIVER_STATE = b'm'
97 # This record was release in 3.7 and usage was removed in 5.6
97 # This record was release in 3.7 and usage was removed in 5.6
98 LEGACY_MERGE_DRIVER_MERGE = b'D'
98 LEGACY_MERGE_DRIVER_MERGE = b'D'
99
99
100
100
101 ACTION_FORGET = b'f'
101 ACTION_FORGET = b'f'
102 ACTION_REMOVE = b'r'
102 ACTION_REMOVE = b'r'
103 ACTION_ADD = b'a'
103 ACTION_ADD = b'a'
104 ACTION_GET = b'g'
104 ACTION_GET = b'g'
105 ACTION_PATH_CONFLICT = b'p'
105 ACTION_PATH_CONFLICT = b'p'
106 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
106 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
107 ACTION_ADD_MODIFIED = b'am'
107 ACTION_ADD_MODIFIED = b'am'
108 ACTION_CREATED = b'c'
108 ACTION_CREATED = b'c'
109 ACTION_DELETED_CHANGED = b'dc'
109 ACTION_DELETED_CHANGED = b'dc'
110 ACTION_CHANGED_DELETED = b'cd'
110 ACTION_CHANGED_DELETED = b'cd'
111 ACTION_MERGE = b'm'
111 ACTION_MERGE = b'm'
112 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
112 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
113 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
113 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
114 ACTION_KEEP = b'k'
114 ACTION_KEEP = b'k'
115 # the file was absent on local side before merge and we should
115 # the file was absent on local side before merge and we should
116 # keep it absent (absent means file not present, it can be a result
116 # keep it absent (absent means file not present, it can be a result
117 # of file deletion, rename etc.)
117 # of file deletion, rename etc.)
118 ACTION_KEEP_ABSENT = b'ka'
118 ACTION_KEEP_ABSENT = b'ka'
119 # the file is absent on the ancestor and remote side of the merge
119 # the file is absent on the ancestor and remote side of the merge
120 # hence this file is new and we should keep it
120 # hence this file is new and we should keep it
121 ACTION_KEEP_NEW = b'kn'
121 ACTION_KEEP_NEW = b'kn'
122 ACTION_EXEC = b'e'
122 ACTION_EXEC = b'e'
123 ACTION_CREATED_MERGE = b'cm'
123 ACTION_CREATED_MERGE = b'cm'
124
124
125 # actions which are no op
125 # actions which are no op
126 NO_OP_ACTIONS = (
126 NO_OP_ACTIONS = (
127 ACTION_KEEP,
127 ACTION_KEEP,
128 ACTION_KEEP_ABSENT,
128 ACTION_KEEP_ABSENT,
129 ACTION_KEEP_NEW,
129 ACTION_KEEP_NEW,
130 )
130 )
131
131
132
132
133 class _mergestate_base(object):
133 class _mergestate_base(object):
134 """track 3-way merge state of individual files
134 """track 3-way merge state of individual files
135
135
136 The merge state is stored on disk when needed. Two files are used: one with
136 The merge state is stored on disk when needed. Two files are used: one with
137 an old format (version 1), and one with a new format (version 2). Version 2
137 an old format (version 1), and one with a new format (version 2). Version 2
138 stores a superset of the data in version 1, including new kinds of records
138 stores a superset of the data in version 1, including new kinds of records
139 in the future. For more about the new format, see the documentation for
139 in the future. For more about the new format, see the documentation for
140 `_readrecordsv2`.
140 `_readrecordsv2`.
141
141
142 Each record can contain arbitrary content, and has an associated type. This
142 Each record can contain arbitrary content, and has an associated type. This
143 `type` should be a letter. If `type` is uppercase, the record is mandatory:
143 `type` should be a letter. If `type` is uppercase, the record is mandatory:
144 versions of Mercurial that don't support it should abort. If `type` is
144 versions of Mercurial that don't support it should abort. If `type` is
145 lowercase, the record can be safely ignored.
145 lowercase, the record can be safely ignored.
146
146
147 Currently known records:
147 Currently known records:
148
148
149 L: the node of the "local" part of the merge (hexified version)
149 L: the node of the "local" part of the merge (hexified version)
150 O: the node of the "other" part of the merge (hexified version)
150 O: the node of the "other" part of the merge (hexified version)
151 F: a file to be merged entry
151 F: a file to be merged entry
152 C: a change/delete or delete/change conflict
152 C: a change/delete or delete/change conflict
153 P: a path conflict (file vs directory)
153 P: a path conflict (file vs directory)
154 f: a (filename, dictionary) tuple of optional values for a given file
154 f: a (filename, dictionary) tuple of optional values for a given file
155 l: the labels for the parts of the merge.
155 l: the labels for the parts of the merge.
156
156
157 Merge record states (stored in self._state, indexed by filename):
157 Merge record states (stored in self._state, indexed by filename):
158 u: unresolved conflict
158 u: unresolved conflict
159 r: resolved conflict
159 r: resolved conflict
160 pu: unresolved path conflict (file conflicts with directory)
160 pu: unresolved path conflict (file conflicts with directory)
161 pr: resolved path conflict
161 pr: resolved path conflict
162 o: file was merged in favor of other parent of merge (DEPRECATED)
162 o: file was merged in favor of other parent of merge (DEPRECATED)
163
163
164 The resolve command transitions between 'u' and 'r' for conflicts and
164 The resolve command transitions between 'u' and 'r' for conflicts and
165 'pu' and 'pr' for path conflicts.
165 'pu' and 'pr' for path conflicts.
166 """
166 """
167
167
168 def __init__(self, repo):
168 def __init__(self, repo):
169 """Initialize the merge state.
169 """Initialize the merge state.
170
170
171 Do not use this directly! Instead call read() or clean()."""
171 Do not use this directly! Instead call read() or clean()."""
172 self._repo = repo
172 self._repo = repo
173 self._state = {}
173 self._state = {}
174 self._stateextras = collections.defaultdict(dict)
174 self._stateextras = collections.defaultdict(dict)
175 self._local = None
175 self._local = None
176 self._other = None
176 self._other = None
177 self._labels = None
177 self._labels = None
178 # contains a mapping of form:
178 # contains a mapping of form:
179 # {filename : (merge_return_value, action_to_be_performed}
179 # {filename : (merge_return_value, action_to_be_performed}
180 # these are results of re-running merge process
180 # these are results of re-running merge process
181 # this dict is used to perform actions on dirstate caused by re-running
181 # this dict is used to perform actions on dirstate caused by re-running
182 # the merge
182 # the merge
183 self._results = {}
183 self._results = {}
184 self._dirty = False
184 self._dirty = False
185
185
186 def reset(self):
186 def reset(self):
187 pass
187 pass
188
188
189 def start(self, node, other, labels=None):
189 def start(self, node, other, labels=None):
190 self._local = node
190 self._local = node
191 self._other = other
191 self._other = other
192 self._labels = labels
192 self._labels = labels
193
193
194 @util.propertycache
194 @util.propertycache
195 def local(self):
195 def local(self):
196 if self._local is None:
196 if self._local is None:
197 msg = b"local accessed but self._local isn't set"
197 msg = b"local accessed but self._local isn't set"
198 raise error.ProgrammingError(msg)
198 raise error.ProgrammingError(msg)
199 return self._local
199 return self._local
200
200
201 @util.propertycache
201 @util.propertycache
202 def localctx(self):
202 def localctx(self):
203 return self._repo[self.local]
203 return self._repo[self.local]
204
204
205 @util.propertycache
205 @util.propertycache
206 def other(self):
206 def other(self):
207 if self._other is None:
207 if self._other is None:
208 msg = b"other accessed but self._other isn't set"
208 msg = b"other accessed but self._other isn't set"
209 raise error.ProgrammingError(msg)
209 raise error.ProgrammingError(msg)
210 return self._other
210 return self._other
211
211
212 @util.propertycache
212 @util.propertycache
213 def otherctx(self):
213 def otherctx(self):
214 return self._repo[self.other]
214 return self._repo[self.other]
215
215
216 def active(self):
216 def active(self):
217 """Whether mergestate is active.
217 """Whether mergestate is active.
218
218
219 Returns True if there appears to be mergestate. This is a rough proxy
219 Returns True if there appears to be mergestate. This is a rough proxy
220 for "is a merge in progress."
220 for "is a merge in progress."
221 """
221 """
222 return bool(self._local) or bool(self._state)
222 return bool(self._local) or bool(self._state)
223
223
224 def commit(self):
224 def commit(self):
225 """Write current state on disk (if necessary)"""
225 """Write current state on disk (if necessary)"""
226
226
227 @staticmethod
227 @staticmethod
228 def getlocalkey(path):
228 def getlocalkey(path):
229 """hash the path of a local file context for storage in the .hg/merge
229 """hash the path of a local file context for storage in the .hg/merge
230 directory."""
230 directory."""
231
231
232 return hex(hashutil.sha1(path).digest())
232 return hex(hashutil.sha1(path).digest())
233
233
234 def _make_backup(self, fctx, localkey):
234 def _make_backup(self, fctx, localkey):
235 raise NotImplementedError()
235 raise NotImplementedError()
236
236
237 def _restore_backup(self, fctx, localkey, flags):
237 def _restore_backup(self, fctx, localkey, flags):
238 raise NotImplementedError()
238 raise NotImplementedError()
239
239
240 def add(self, fcl, fco, fca, fd):
240 def add(self, fcl, fco, fca, fd):
241 """add a new (potentially?) conflicting file the merge state
241 """add a new (potentially?) conflicting file the merge state
242 fcl: file context for local,
242 fcl: file context for local,
243 fco: file context for remote,
243 fco: file context for remote,
244 fca: file context for ancestors,
244 fca: file context for ancestors,
245 fd: file path of the resulting merge.
245 fd: file path of the resulting merge.
246
246
247 note: also write the local version to the `.hg/merge` directory.
247 note: also write the local version to the `.hg/merge` directory.
248 """
248 """
249 if fcl.isabsent():
249 if fcl.isabsent():
250 localkey = self._repo.nodeconstants.nullhex
250 localkey = self._repo.nodeconstants.nullhex
251 else:
251 else:
252 localkey = mergestate.getlocalkey(fcl.path())
252 localkey = mergestate.getlocalkey(fcl.path())
253 self._make_backup(fcl, localkey)
253 self._make_backup(fcl, localkey)
254 self._state[fd] = [
254 self._state[fd] = [
255 MERGE_RECORD_UNRESOLVED,
255 MERGE_RECORD_UNRESOLVED,
256 localkey,
256 localkey,
257 fcl.path(),
257 fcl.path(),
258 fca.path(),
258 fca.path(),
259 hex(fca.filenode()),
259 hex(fca.filenode()),
260 fco.path(),
260 fco.path(),
261 hex(fco.filenode()),
261 hex(fco.filenode()),
262 fcl.flags(),
262 fcl.flags(),
263 ]
263 ]
264 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
264 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
265 self._dirty = True
265 self._dirty = True
266
266
267 def addpathconflict(self, path, frename, forigin):
267 def addpathconflict(self, path, frename, forigin):
268 """add a new conflicting path to the merge state
268 """add a new conflicting path to the merge state
269 path: the path that conflicts
269 path: the path that conflicts
270 frename: the filename the conflicting file was renamed to
270 frename: the filename the conflicting file was renamed to
271 forigin: origin of the file ('l' or 'r' for local/remote)
271 forigin: origin of the file ('l' or 'r' for local/remote)
272 """
272 """
273 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
273 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
274 self._dirty = True
274 self._dirty = True
275
275
276 def addcommitinfo(self, path, data):
276 def addcommitinfo(self, path, data):
277 """stores information which is required at commit
277 """stores information which is required at commit
278 into _stateextras"""
278 into _stateextras"""
279 self._stateextras[path].update(data)
279 self._stateextras[path].update(data)
280 self._dirty = True
280 self._dirty = True
281
281
282 def __contains__(self, dfile):
282 def __contains__(self, dfile):
283 return dfile in self._state
283 return dfile in self._state
284
284
285 def __getitem__(self, dfile):
285 def __getitem__(self, dfile):
286 return self._state[dfile][0]
286 return self._state[dfile][0]
287
287
288 def __iter__(self):
288 def __iter__(self):
289 return iter(sorted(self._state))
289 return iter(sorted(self._state))
290
290
291 def files(self):
291 def files(self):
292 return self._state.keys()
292 return self._state.keys()
293
293
294 def mark(self, dfile, state):
294 def mark(self, dfile, state):
295 self._state[dfile][0] = state
295 self._state[dfile][0] = state
296 self._dirty = True
296 self._dirty = True
297
297
298 def unresolved(self):
298 def unresolved(self):
299 """Obtain the paths of unresolved files."""
299 """Obtain the paths of unresolved files."""
300
300
301 for f, entry in pycompat.iteritems(self._state):
301 for f, entry in pycompat.iteritems(self._state):
302 if entry[0] in (
302 if entry[0] in (
303 MERGE_RECORD_UNRESOLVED,
303 MERGE_RECORD_UNRESOLVED,
304 MERGE_RECORD_UNRESOLVED_PATH,
304 MERGE_RECORD_UNRESOLVED_PATH,
305 ):
305 ):
306 yield f
306 yield f
307
307
308 def allextras(self):
308 def allextras(self):
309 """return all extras information stored with the mergestate"""
309 """return all extras information stored with the mergestate"""
310 return self._stateextras
310 return self._stateextras
311
311
312 def extras(self, filename):
312 def extras(self, filename):
313 """return extras stored with the mergestate for the given filename"""
313 """return extras stored with the mergestate for the given filename"""
314 return self._stateextras[filename]
314 return self._stateextras[filename]
315
315
316 def _resolve(self, preresolve, dfile, wctx):
316 def _resolve(self, preresolve, dfile, wctx):
317 """rerun merge process for file path `dfile`.
317 """rerun merge process for file path `dfile`.
318 Returns whether the merge was completed and the return value of merge
318 Returns whether the merge was completed and the return value of merge
319 obtained from filemerge._filemerge().
319 obtained from filemerge._filemerge().
320 """
320 """
321 if self[dfile] in (
321 if self[dfile] in (
322 MERGE_RECORD_RESOLVED,
322 MERGE_RECORD_RESOLVED,
323 LEGACY_RECORD_DRIVER_RESOLVED,
323 LEGACY_RECORD_DRIVER_RESOLVED,
324 ):
324 ):
325 return True, 0
325 return True, 0
326 stateentry = self._state[dfile]
326 stateentry = self._state[dfile]
327 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
327 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
328 octx = self._repo[self._other]
328 octx = self._repo[self._other]
329 extras = self.extras(dfile)
329 extras = self.extras(dfile)
330 anccommitnode = extras.get(b'ancestorlinknode')
330 anccommitnode = extras.get(b'ancestorlinknode')
331 if anccommitnode:
331 if anccommitnode:
332 actx = self._repo[anccommitnode]
332 actx = self._repo[anccommitnode]
333 else:
333 else:
334 actx = None
334 actx = None
335 fcd = _filectxorabsent(localkey, wctx, dfile)
335 fcd = _filectxorabsent(localkey, wctx, dfile)
336 fco = _filectxorabsent(onode, octx, ofile)
336 fco = _filectxorabsent(onode, octx, ofile)
337 # TODO: move this to filectxorabsent
337 # TODO: move this to filectxorabsent
338 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
338 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
339 # "premerge" x flags
339 # "premerge" x flags
340 flo = fco.flags()
340 flo = fco.flags()
341 fla = fca.flags()
341 fla = fca.flags()
342 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
342 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
343 if fca.rev() == nullrev and flags != flo:
343 if fca.rev() == nullrev and flags != flo:
344 if preresolve:
344 if preresolve:
345 self._repo.ui.warn(
345 self._repo.ui.warn(
346 _(
346 _(
347 b'warning: cannot merge flags for %s '
347 b'warning: cannot merge flags for %s '
348 b'without common ancestor - keeping local flags\n'
348 b'without common ancestor - keeping local flags\n'
349 )
349 )
350 % afile
350 % afile
351 )
351 )
352 elif flags == fla:
352 elif flags == fla:
353 flags = flo
353 flags = flo
354 if preresolve:
354 if preresolve:
355 # restore local
355 # restore local
356 if localkey != self._repo.nodeconstants.nullhex:
356 if localkey != self._repo.nodeconstants.nullhex:
357 self._restore_backup(wctx[dfile], localkey, flags)
357 self._restore_backup(wctx[dfile], localkey, flags)
358 else:
358 else:
359 wctx[dfile].remove(ignoremissing=True)
359 wctx[dfile].remove(ignoremissing=True)
360 complete, merge_ret, deleted = filemerge.premerge(
360 complete, merge_ret, deleted = filemerge.premerge(
361 self._repo,
361 self._repo,
362 wctx,
362 wctx,
363 self._local,
363 self._local,
364 lfile,
364 lfile,
365 fcd,
365 fcd,
366 fco,
366 fco,
367 fca,
367 fca,
368 labels=self._labels,
368 labels=self._labels,
369 )
369 )
370 else:
370 else:
371 complete, merge_ret, deleted = filemerge.filemerge(
371 complete, merge_ret, deleted = filemerge.filemerge(
372 self._repo,
372 self._repo,
373 wctx,
373 wctx,
374 self._local,
374 self._local,
375 lfile,
375 lfile,
376 fcd,
376 fcd,
377 fco,
377 fco,
378 fca,
378 fca,
379 labels=self._labels,
379 labels=self._labels,
380 )
380 )
381 if merge_ret is None:
381 if merge_ret is None:
382 # If return value of merge is None, then there are no real conflict
382 # If return value of merge is None, then there are no real conflict
383 del self._state[dfile]
383 del self._state[dfile]
384 self._dirty = True
384 self._dirty = True
385 elif not merge_ret:
385 elif not merge_ret:
386 self.mark(dfile, MERGE_RECORD_RESOLVED)
386 self.mark(dfile, MERGE_RECORD_RESOLVED)
387
387
388 if complete:
388 if complete:
389 action = None
389 action = None
390 if deleted:
390 if deleted:
391 if fcd.isabsent():
391 if fcd.isabsent():
392 # dc: local picked. Need to drop if present, which may
392 # dc: local picked. Need to drop if present, which may
393 # happen on re-resolves.
393 # happen on re-resolves.
394 action = ACTION_FORGET
394 action = ACTION_FORGET
395 else:
395 else:
396 # cd: remote picked (or otherwise deleted)
396 # cd: remote picked (or otherwise deleted)
397 action = ACTION_REMOVE
397 action = ACTION_REMOVE
398 else:
398 else:
399 if fcd.isabsent(): # dc: remote picked
399 if fcd.isabsent(): # dc: remote picked
400 action = ACTION_GET
400 action = ACTION_GET
401 elif fco.isabsent(): # cd: local picked
401 elif fco.isabsent(): # cd: local picked
402 if dfile in self.localctx:
402 if dfile in self.localctx:
403 action = ACTION_ADD_MODIFIED
403 action = ACTION_ADD_MODIFIED
404 else:
404 else:
405 action = ACTION_ADD
405 action = ACTION_ADD
406 # else: regular merges (no action necessary)
406 # else: regular merges (no action necessary)
407 self._results[dfile] = merge_ret, action
407 self._results[dfile] = merge_ret, action
408
408
409 return complete, merge_ret
409 return complete, merge_ret
410
410
411 def preresolve(self, dfile, wctx):
411 def preresolve(self, dfile, wctx):
412 """run premerge process for dfile
412 """run premerge process for dfile
413
413
414 Returns whether the merge is complete, and the exit code."""
414 Returns whether the merge is complete, and the exit code."""
415 return self._resolve(True, dfile, wctx)
415 return self._resolve(True, dfile, wctx)
416
416
417 def resolve(self, dfile, wctx):
417 def resolve(self, dfile, wctx):
418 """run merge process (assuming premerge was run) for dfile
418 """run merge process (assuming premerge was run) for dfile
419
419
420 Returns the exit code of the merge."""
420 Returns the exit code of the merge."""
421 return self._resolve(False, dfile, wctx)[1]
421 return self._resolve(False, dfile, wctx)[1]
422
422
423 def counts(self):
423 def counts(self):
424 """return counts for updated, merged and removed files in this
424 """return counts for updated, merged and removed files in this
425 session"""
425 session"""
426 updated, merged, removed = 0, 0, 0
426 updated, merged, removed = 0, 0, 0
427 for r, action in pycompat.itervalues(self._results):
427 for r, action in pycompat.itervalues(self._results):
428 if r is None:
428 if r is None:
429 updated += 1
429 updated += 1
430 elif r == 0:
430 elif r == 0:
431 if action == ACTION_REMOVE:
431 if action == ACTION_REMOVE:
432 removed += 1
432 removed += 1
433 else:
433 else:
434 merged += 1
434 merged += 1
435 return updated, merged, removed
435 return updated, merged, removed
436
436
437 def unresolvedcount(self):
437 def unresolvedcount(self):
438 """get unresolved count for this merge (persistent)"""
438 """get unresolved count for this merge (persistent)"""
439 return len(list(self.unresolved()))
439 return len(list(self.unresolved()))
440
440
441 def actions(self):
441 def actions(self):
442 """return lists of actions to perform on the dirstate"""
442 """return lists of actions to perform on the dirstate"""
443 actions = {
443 actions = {
444 ACTION_REMOVE: [],
444 ACTION_REMOVE: [],
445 ACTION_FORGET: [],
445 ACTION_FORGET: [],
446 ACTION_ADD: [],
446 ACTION_ADD: [],
447 ACTION_ADD_MODIFIED: [],
447 ACTION_ADD_MODIFIED: [],
448 ACTION_GET: [],
448 ACTION_GET: [],
449 }
449 }
450 for f, (r, action) in pycompat.iteritems(self._results):
450 for f, (r, action) in pycompat.iteritems(self._results):
451 if action is not None:
451 if action is not None:
452 actions[action].append((f, None, b"merge result"))
452 actions[action].append((f, None, b"merge result"))
453 return actions
453 return actions
454
454
455
455
456 class mergestate(_mergestate_base):
456 class mergestate(_mergestate_base):
457
457
458 statepathv1 = b'merge/state'
458 statepathv1 = b'merge/state'
459 statepathv2 = b'merge/state2'
459 statepathv2 = b'merge/state2'
460
460
461 @staticmethod
461 @staticmethod
462 def clean(repo):
462 def clean(repo):
463 """Initialize a brand new merge state, removing any existing state on
463 """Initialize a brand new merge state, removing any existing state on
464 disk."""
464 disk."""
465 ms = mergestate(repo)
465 ms = mergestate(repo)
466 ms.reset()
466 ms.reset()
467 return ms
467 return ms
468
468
469 @staticmethod
469 @staticmethod
470 def read(repo):
470 def read(repo):
471 """Initialize the merge state, reading it from disk."""
471 """Initialize the merge state, reading it from disk."""
472 ms = mergestate(repo)
472 ms = mergestate(repo)
473 ms._read()
473 ms._read()
474 return ms
474 return ms
475
475
476 def _read(self):
476 def _read(self):
477 """Analyse each record content to restore a serialized state from disk
477 """Analyse each record content to restore a serialized state from disk
478
478
479 This function process "record" entry produced by the de-serialization
479 This function process "record" entry produced by the de-serialization
480 of on disk file.
480 of on disk file.
481 """
481 """
482 unsupported = set()
482 unsupported = set()
483 records = self._readrecords()
483 records = self._readrecords()
484 for rtype, record in records:
484 for rtype, record in records:
485 if rtype == RECORD_LOCAL:
485 if rtype == RECORD_LOCAL:
486 self._local = bin(record)
486 self._local = bin(record)
487 elif rtype == RECORD_OTHER:
487 elif rtype == RECORD_OTHER:
488 self._other = bin(record)
488 self._other = bin(record)
489 elif rtype == LEGACY_MERGE_DRIVER_STATE:
489 elif rtype == LEGACY_MERGE_DRIVER_STATE:
490 pass
490 pass
491 elif rtype in (
491 elif rtype in (
492 RECORD_MERGED,
492 RECORD_MERGED,
493 RECORD_CHANGEDELETE_CONFLICT,
493 RECORD_CHANGEDELETE_CONFLICT,
494 RECORD_PATH_CONFLICT,
494 RECORD_PATH_CONFLICT,
495 LEGACY_MERGE_DRIVER_MERGE,
495 LEGACY_MERGE_DRIVER_MERGE,
496 LEGACY_RECORD_RESOLVED_OTHER,
496 LEGACY_RECORD_RESOLVED_OTHER,
497 ):
497 ):
498 bits = record.split(b'\0')
498 bits = record.split(b'\0')
499 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
499 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
500 # and we now store related information in _stateextras, so
500 # and we now store related information in _stateextras, so
501 # lets write to _stateextras directly
501 # lets write to _stateextras directly
502 if bits[1] == MERGE_RECORD_MERGED_OTHER:
502 if bits[1] == MERGE_RECORD_MERGED_OTHER:
503 self._stateextras[bits[0]][b'filenode-source'] = b'other'
503 self._stateextras[bits[0]][b'filenode-source'] = b'other'
504 else:
504 else:
505 self._state[bits[0]] = bits[1:]
505 self._state[bits[0]] = bits[1:]
506 elif rtype == RECORD_FILE_VALUES:
506 elif rtype == RECORD_FILE_VALUES:
507 filename, rawextras = record.split(b'\0', 1)
507 filename, rawextras = record.split(b'\0', 1)
508 extraparts = rawextras.split(b'\0')
508 extraparts = rawextras.split(b'\0')
509 extras = {}
509 extras = {}
510 i = 0
510 i = 0
511 while i < len(extraparts):
511 while i < len(extraparts):
512 extras[extraparts[i]] = extraparts[i + 1]
512 extras[extraparts[i]] = extraparts[i + 1]
513 i += 2
513 i += 2
514
514
515 self._stateextras[filename] = extras
515 self._stateextras[filename] = extras
516 elif rtype == RECORD_LABELS:
516 elif rtype == RECORD_LABELS:
517 labels = record.split(b'\0', 2)
517 labels = record.split(b'\0', 2)
518 self._labels = [l for l in labels if len(l) > 0]
518 self._labels = [l for l in labels if len(l) > 0]
519 elif not rtype.islower():
519 elif not rtype.islower():
520 unsupported.add(rtype)
520 unsupported.add(rtype)
521
521
522 if unsupported:
522 if unsupported:
523 raise error.UnsupportedMergeRecords(unsupported)
523 raise error.UnsupportedMergeRecords(unsupported)
524
524
525 def _readrecords(self):
525 def _readrecords(self):
526 """Read merge state from disk and return a list of record (TYPE, data)
526 """Read merge state from disk and return a list of record (TYPE, data)
527
527
528 We read data from both v1 and v2 files and decide which one to use.
528 We read data from both v1 and v2 files and decide which one to use.
529
529
530 V1 has been used by version prior to 2.9.1 and contains less data than
530 V1 has been used by version prior to 2.9.1 and contains less data than
531 v2. We read both versions and check if no data in v2 contradicts
531 v2. We read both versions and check if no data in v2 contradicts
532 v1. If there is not contradiction we can safely assume that both v1
532 v1. If there is not contradiction we can safely assume that both v1
533 and v2 were written at the same time and use the extract data in v2. If
533 and v2 were written at the same time and use the extract data in v2. If
534 there is contradiction we ignore v2 content as we assume an old version
534 there is contradiction we ignore v2 content as we assume an old version
535 of Mercurial has overwritten the mergestate file and left an old v2
535 of Mercurial has overwritten the mergestate file and left an old v2
536 file around.
536 file around.
537
537
538 returns list of record [(TYPE, data), ...]"""
538 returns list of record [(TYPE, data), ...]"""
539 v1records = self._readrecordsv1()
539 v1records = self._readrecordsv1()
540 v2records = self._readrecordsv2()
540 v2records = self._readrecordsv2()
541 if self._v1v2match(v1records, v2records):
541 if self._v1v2match(v1records, v2records):
542 return v2records
542 return v2records
543 else:
543 else:
544 # v1 file is newer than v2 file, use it
544 # v1 file is newer than v2 file, use it
545 # we have to infer the "other" changeset of the merge
545 # we have to infer the "other" changeset of the merge
546 # we cannot do better than that with v1 of the format
546 # we cannot do better than that with v1 of the format
547 mctx = self._repo[None].parents()[-1]
547 mctx = self._repo[None].parents()[-1]
548 v1records.append((RECORD_OTHER, mctx.hex()))
548 v1records.append((RECORD_OTHER, mctx.hex()))
549 # add place holder "other" file node information
549 # add place holder "other" file node information
550 # nobody is using it yet so we do no need to fetch the data
550 # nobody is using it yet so we do no need to fetch the data
551 # if mctx was wrong `mctx[bits[-2]]` may fails.
551 # if mctx was wrong `mctx[bits[-2]]` may fails.
552 for idx, r in enumerate(v1records):
552 for idx, r in enumerate(v1records):
553 if r[0] == RECORD_MERGED:
553 if r[0] == RECORD_MERGED:
554 bits = r[1].split(b'\0')
554 bits = r[1].split(b'\0')
555 bits.insert(-2, b'')
555 bits.insert(-2, b'')
556 v1records[idx] = (r[0], b'\0'.join(bits))
556 v1records[idx] = (r[0], b'\0'.join(bits))
557 return v1records
557 return v1records
558
558
559 def _v1v2match(self, v1records, v2records):
559 def _v1v2match(self, v1records, v2records):
560 oldv2 = set() # old format version of v2 record
560 oldv2 = set() # old format version of v2 record
561 for rec in v2records:
561 for rec in v2records:
562 if rec[0] == RECORD_LOCAL:
562 if rec[0] == RECORD_LOCAL:
563 oldv2.add(rec)
563 oldv2.add(rec)
564 elif rec[0] == RECORD_MERGED:
564 elif rec[0] == RECORD_MERGED:
565 # drop the onode data (not contained in v1)
565 # drop the onode data (not contained in v1)
566 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
566 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
567 for rec in v1records:
567 for rec in v1records:
568 if rec not in oldv2:
568 if rec not in oldv2:
569 return False
569 return False
570 else:
570 else:
571 return True
571 return True
572
572
573 def _readrecordsv1(self):
573 def _readrecordsv1(self):
574 """read on disk merge state for version 1 file
574 """read on disk merge state for version 1 file
575
575
576 returns list of record [(TYPE, data), ...]
576 returns list of record [(TYPE, data), ...]
577
577
578 Note: the "F" data from this file are one entry short
578 Note: the "F" data from this file are one entry short
579 (no "other file node" entry)
579 (no "other file node" entry)
580 """
580 """
581 records = []
581 records = []
582 try:
582 try:
583 f = self._repo.vfs(self.statepathv1)
583 f = self._repo.vfs(self.statepathv1)
584 for i, l in enumerate(f):
584 for i, l in enumerate(f):
585 if i == 0:
585 if i == 0:
586 records.append((RECORD_LOCAL, l[:-1]))
586 records.append((RECORD_LOCAL, l[:-1]))
587 else:
587 else:
588 records.append((RECORD_MERGED, l[:-1]))
588 records.append((RECORD_MERGED, l[:-1]))
589 f.close()
589 f.close()
590 except IOError as err:
590 except IOError as err:
591 if err.errno != errno.ENOENT:
591 if err.errno != errno.ENOENT:
592 raise
592 raise
593 return records
593 return records
594
594
595 def _readrecordsv2(self):
595 def _readrecordsv2(self):
596 """read on disk merge state for version 2 file
596 """read on disk merge state for version 2 file
597
597
598 This format is a list of arbitrary records of the form:
598 This format is a list of arbitrary records of the form:
599
599
600 [type][length][content]
600 [type][length][content]
601
601
602 `type` is a single character, `length` is a 4 byte integer, and
602 `type` is a single character, `length` is a 4 byte integer, and
603 `content` is an arbitrary byte sequence of length `length`.
603 `content` is an arbitrary byte sequence of length `length`.
604
604
605 Mercurial versions prior to 3.7 have a bug where if there are
605 Mercurial versions prior to 3.7 have a bug where if there are
606 unsupported mandatory merge records, attempting to clear out the merge
606 unsupported mandatory merge records, attempting to clear out the merge
607 state with hg update --clean or similar aborts. The 't' record type
607 state with hg update --clean or similar aborts. The 't' record type
608 works around that by writing out what those versions treat as an
608 works around that by writing out what those versions treat as an
609 advisory record, but later versions interpret as special: the first
609 advisory record, but later versions interpret as special: the first
610 character is the 'real' record type and everything onwards is the data.
610 character is the 'real' record type and everything onwards is the data.
611
611
612 Returns list of records [(TYPE, data), ...]."""
612 Returns list of records [(TYPE, data), ...]."""
613 records = []
613 records = []
614 try:
614 try:
615 f = self._repo.vfs(self.statepathv2)
615 f = self._repo.vfs(self.statepathv2)
616 data = f.read()
616 data = f.read()
617 off = 0
617 off = 0
618 end = len(data)
618 end = len(data)
619 while off < end:
619 while off < end:
620 rtype = data[off : off + 1]
620 rtype = data[off : off + 1]
621 off += 1
621 off += 1
622 length = _unpack(b'>I', data[off : (off + 4)])[0]
622 length = _unpack(b'>I', data[off : (off + 4)])[0]
623 off += 4
623 off += 4
624 record = data[off : (off + length)]
624 record = data[off : (off + length)]
625 off += length
625 off += length
626 if rtype == RECORD_OVERRIDE:
626 if rtype == RECORD_OVERRIDE:
627 rtype, record = record[0:1], record[1:]
627 rtype, record = record[0:1], record[1:]
628 records.append((rtype, record))
628 records.append((rtype, record))
629 f.close()
629 f.close()
630 except IOError as err:
630 except IOError as err:
631 if err.errno != errno.ENOENT:
631 if err.errno != errno.ENOENT:
632 raise
632 raise
633 return records
633 return records
634
634
635 def commit(self):
635 def commit(self):
636 if self._dirty:
636 if self._dirty:
637 records = self._makerecords()
637 records = self._makerecords()
638 self._writerecords(records)
638 self._writerecords(records)
639 self._dirty = False
639 self._dirty = False
640
640
641 def _makerecords(self):
641 def _makerecords(self):
642 records = []
642 records = []
643 records.append((RECORD_LOCAL, hex(self._local)))
643 records.append((RECORD_LOCAL, hex(self._local)))
644 records.append((RECORD_OTHER, hex(self._other)))
644 records.append((RECORD_OTHER, hex(self._other)))
645 # Write out state items. In all cases, the value of the state map entry
645 # Write out state items. In all cases, the value of the state map entry
646 # is written as the contents of the record. The record type depends on
646 # is written as the contents of the record. The record type depends on
647 # the type of state that is stored, and capital-letter records are used
647 # the type of state that is stored, and capital-letter records are used
648 # to prevent older versions of Mercurial that do not support the feature
648 # to prevent older versions of Mercurial that do not support the feature
649 # from loading them.
649 # from loading them.
650 for filename, v in pycompat.iteritems(self._state):
650 for filename, v in pycompat.iteritems(self._state):
651 if v[0] in (
651 if v[0] in (
652 MERGE_RECORD_UNRESOLVED_PATH,
652 MERGE_RECORD_UNRESOLVED_PATH,
653 MERGE_RECORD_RESOLVED_PATH,
653 MERGE_RECORD_RESOLVED_PATH,
654 ):
654 ):
655 # Path conflicts. These are stored in 'P' records. The current
655 # Path conflicts. These are stored in 'P' records. The current
656 # resolution state ('pu' or 'pr') is stored within the record.
656 # resolution state ('pu' or 'pr') is stored within the record.
657 records.append(
657 records.append(
658 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
658 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
659 )
659 )
660 elif (
660 elif (
661 v[1] == self._repo.nodeconstants.nullhex
661 v[1] == self._repo.nodeconstants.nullhex
662 or v[6] == self._repo.nodeconstants.nullhex
662 or v[6] == self._repo.nodeconstants.nullhex
663 ):
663 ):
664 # Change/Delete or Delete/Change conflicts. These are stored in
664 # Change/Delete or Delete/Change conflicts. These are stored in
665 # 'C' records. v[1] is the local file, and is nullhex when the
665 # 'C' records. v[1] is the local file, and is nullhex when the
666 # file is deleted locally ('dc'). v[6] is the remote file, and
666 # file is deleted locally ('dc'). v[6] is the remote file, and
667 # is nullhex when the file is deleted remotely ('cd').
667 # is nullhex when the file is deleted remotely ('cd').
668 records.append(
668 records.append(
669 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
669 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
670 )
670 )
671 else:
671 else:
672 # Normal files. These are stored in 'F' records.
672 # Normal files. These are stored in 'F' records.
673 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
673 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
674 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
674 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
675 rawextras = b'\0'.join(
675 rawextras = b'\0'.join(
676 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
676 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
677 )
677 )
678 records.append(
678 records.append(
679 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
679 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
680 )
680 )
681 if self._labels is not None:
681 if self._labels is not None:
682 labels = b'\0'.join(self._labels)
682 labels = b'\0'.join(self._labels)
683 records.append((RECORD_LABELS, labels))
683 records.append((RECORD_LABELS, labels))
684 return records
684 return records
685
685
686 def _writerecords(self, records):
686 def _writerecords(self, records):
687 """Write current state on disk (both v1 and v2)"""
687 """Write current state on disk (both v1 and v2)"""
688 self._writerecordsv1(records)
688 self._writerecordsv1(records)
689 self._writerecordsv2(records)
689 self._writerecordsv2(records)
690
690
691 def _writerecordsv1(self, records):
691 def _writerecordsv1(self, records):
692 """Write current state on disk in a version 1 file"""
692 """Write current state on disk in a version 1 file"""
693 f = self._repo.vfs(self.statepathv1, b'wb')
693 f = self._repo.vfs(self.statepathv1, b'wb')
694 irecords = iter(records)
694 irecords = iter(records)
695 lrecords = next(irecords)
695 lrecords = next(irecords)
696 assert lrecords[0] == RECORD_LOCAL
696 assert lrecords[0] == RECORD_LOCAL
697 f.write(hex(self._local) + b'\n')
697 f.write(hex(self._local) + b'\n')
698 for rtype, data in irecords:
698 for rtype, data in irecords:
699 if rtype == RECORD_MERGED:
699 if rtype == RECORD_MERGED:
700 f.write(b'%s\n' % _droponode(data))
700 f.write(b'%s\n' % _droponode(data))
701 f.close()
701 f.close()
702
702
703 def _writerecordsv2(self, records):
703 def _writerecordsv2(self, records):
704 """Write current state on disk in a version 2 file
704 """Write current state on disk in a version 2 file
705
705
706 See the docstring for _readrecordsv2 for why we use 't'."""
706 See the docstring for _readrecordsv2 for why we use 't'."""
707 # these are the records that all version 2 clients can read
707 # these are the records that all version 2 clients can read
708 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
708 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
709 f = self._repo.vfs(self.statepathv2, b'wb')
709 f = self._repo.vfs(self.statepathv2, b'wb')
710 for key, data in records:
710 for key, data in records:
711 assert len(key) == 1
711 assert len(key) == 1
712 if key not in allowlist:
712 if key not in allowlist:
713 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
713 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
714 format = b'>sI%is' % len(data)
714 format = b'>sI%is' % len(data)
715 f.write(_pack(format, key, len(data), data))
715 f.write(_pack(format, key, len(data), data))
716 f.close()
716 f.close()
717
717
718 def _make_backup(self, fctx, localkey):
718 def _make_backup(self, fctx, localkey):
719 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
719 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
720
720
721 def _restore_backup(self, fctx, localkey, flags):
721 def _restore_backup(self, fctx, localkey, flags):
722 with self._repo.vfs(b'merge/' + localkey) as f:
722 with self._repo.vfs(b'merge/' + localkey) as f:
723 fctx.write(f.read(), flags)
723 fctx.write(f.read(), flags)
724
724
725 def reset(self):
725 def reset(self):
726 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
726 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
727
727
728
728
729 class memmergestate(_mergestate_base):
729 class memmergestate(_mergestate_base):
730 def __init__(self, repo):
730 def __init__(self, repo):
731 super(memmergestate, self).__init__(repo)
731 super(memmergestate, self).__init__(repo)
732 self._backups = {}
732 self._backups = {}
733
733
734 def _make_backup(self, fctx, localkey):
734 def _make_backup(self, fctx, localkey):
735 self._backups[localkey] = fctx.data()
735 self._backups[localkey] = fctx.data()
736
736
737 def _restore_backup(self, fctx, localkey, flags):
737 def _restore_backup(self, fctx, localkey, flags):
738 fctx.write(self._backups[localkey], flags)
738 fctx.write(self._backups[localkey], flags)
739
739
740
740
741 def recordupdates(repo, actions, branchmerge, getfiledata):
741 def recordupdates(repo, actions, branchmerge, getfiledata):
742 """record merge actions to the dirstate"""
742 """record merge actions to the dirstate"""
743 # remove (must come first)
743 # remove (must come first)
744 for f, args, msg in actions.get(ACTION_REMOVE, []):
744 for f, args, msg in actions.get(ACTION_REMOVE, []):
745 if branchmerge:
745 if branchmerge:
746 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
746 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
747 else:
747 else:
748 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
748 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
749
749
750 # forget (must come first)
750 # forget (must come first)
751 for f, args, msg in actions.get(ACTION_FORGET, []):
751 for f, args, msg in actions.get(ACTION_FORGET, []):
752 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
752 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
753
753
754 # resolve path conflicts
754 # resolve path conflicts
755 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
755 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
756 (f0, origf0) = args
756 (f0, origf0) = args
757 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
757 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
758 repo.dirstate.copy(origf0, f)
758 repo.dirstate.copy(origf0, f)
759 if f0 == origf0:
759 if f0 == origf0:
760 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
760 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
761 else:
761 else:
762 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
762 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
763
763
764 # re-add
764 # re-add
765 for f, args, msg in actions.get(ACTION_ADD, []):
765 for f, args, msg in actions.get(ACTION_ADD, []):
766 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
766 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
767
767
768 # re-add/mark as modified
768 # re-add/mark as modified
769 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
769 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
770 if branchmerge:
770 if branchmerge:
771 repo.dirstate.update_file(
771 repo.dirstate.update_file(
772 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
772 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
773 )
773 )
774 else:
774 else:
775 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
775 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
776
776
777 # exec change
777 # exec change
778 for f, args, msg in actions.get(ACTION_EXEC, []):
778 for f, args, msg in actions.get(ACTION_EXEC, []):
779 repo.dirstate.update_file(
779 repo.dirstate.update_file(
780 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
780 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
781 )
781 )
782
782
783 # keep
783 # keep
784 for f, args, msg in actions.get(ACTION_KEEP, []):
784 for f, args, msg in actions.get(ACTION_KEEP, []):
785 pass
785 pass
786
786
787 # keep deleted
787 # keep deleted
788 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
788 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
789 pass
789 pass
790
790
791 # keep new
791 # keep new
792 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
792 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
793 pass
793 pass
794
794
795 # get
795 # get
796 for f, args, msg in actions.get(ACTION_GET, []):
796 for f, args, msg in actions.get(ACTION_GET, []):
797 if branchmerge:
797 if branchmerge:
798 # tracked in p1 can be True also but update_file should not care
798 # tracked in p1 can be True also but update_file should not care
799 old_entry = repo.dirstate.get_entry(f)
800 p1_tracked = old_entry.any_tracked and not old_entry.added
799 repo.dirstate.update_file(
801 repo.dirstate.update_file(
800 f,
802 f,
801 p1_tracked=False,
803 p1_tracked=p1_tracked,
802 p2_tracked=True,
804 p2_tracked=True,
803 wc_tracked=True,
805 wc_tracked=True,
804 clean_p2=True,
806 clean_p2=not p1_tracked,
807 merged=p1_tracked,
805 )
808 )
806 else:
809 else:
807 parentfiledata = getfiledata[f] if getfiledata else None
810 parentfiledata = getfiledata[f] if getfiledata else None
808 repo.dirstate.update_file(
811 repo.dirstate.update_file(
809 f,
812 f,
810 p1_tracked=True,
813 p1_tracked=True,
811 wc_tracked=True,
814 wc_tracked=True,
812 parentfiledata=parentfiledata,
815 parentfiledata=parentfiledata,
813 )
816 )
814
817
815 # merge
818 # merge
816 for f, args, msg in actions.get(ACTION_MERGE, []):
819 for f, args, msg in actions.get(ACTION_MERGE, []):
817 f1, f2, fa, move, anc = args
820 f1, f2, fa, move, anc = args
818 if branchmerge:
821 if branchmerge:
819 # We've done a branch merge, mark this file as merged
822 # We've done a branch merge, mark this file as merged
820 # so that we properly record the merger later
823 # so that we properly record the merger later
821 repo.dirstate.update_file(
824 repo.dirstate.update_file(
822 f, p1_tracked=True, wc_tracked=True, merged=True
825 f, p1_tracked=True, wc_tracked=True, merged=True
823 )
826 )
824 if f1 != f2: # copy/rename
827 if f1 != f2: # copy/rename
825 if move:
828 if move:
826 repo.dirstate.update_file(
829 repo.dirstate.update_file(
827 f1, p1_tracked=True, wc_tracked=False
830 f1, p1_tracked=True, wc_tracked=False
828 )
831 )
829 if f1 != f:
832 if f1 != f:
830 repo.dirstate.copy(f1, f)
833 repo.dirstate.copy(f1, f)
831 else:
834 else:
832 repo.dirstate.copy(f2, f)
835 repo.dirstate.copy(f2, f)
833 else:
836 else:
834 # We've update-merged a locally modified file, so
837 # We've update-merged a locally modified file, so
835 # we set the dirstate to emulate a normal checkout
838 # we set the dirstate to emulate a normal checkout
836 # of that file some time in the past. Thus our
839 # of that file some time in the past. Thus our
837 # merge will appear as a normal local file
840 # merge will appear as a normal local file
838 # modification.
841 # modification.
839 if f2 == f: # file not locally copied/moved
842 if f2 == f: # file not locally copied/moved
840 repo.dirstate.update_file(
843 repo.dirstate.update_file(
841 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
844 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
842 )
845 )
843 if move:
846 if move:
844 repo.dirstate.update_file(
847 repo.dirstate.update_file(
845 f1, p1_tracked=False, wc_tracked=False
848 f1, p1_tracked=False, wc_tracked=False
846 )
849 )
847
850
848 # directory rename, move local
851 # directory rename, move local
849 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
852 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
850 f0, flag = args
853 f0, flag = args
851 if branchmerge:
854 if branchmerge:
852 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
855 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
853 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
856 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
854 repo.dirstate.copy(f0, f)
857 repo.dirstate.copy(f0, f)
855 else:
858 else:
856 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
859 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
857 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
860 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
858
861
859 # directory rename, get
862 # directory rename, get
860 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
863 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
861 f0, flag = args
864 f0, flag = args
862 if branchmerge:
865 if branchmerge:
863 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
866 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
864 repo.dirstate.copy(f0, f)
867 repo.dirstate.copy(f0, f)
865 else:
868 else:
866 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
869 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
General Comments 0
You need to be logged in to leave comments. Login now