##// END OF EJS Templates
obsutil: move 'foreground' to the new modules...
marmoute -
r33147:7017567e default
parent child Browse files
Show More
@@ -1,807 +1,807
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 bin,
14 bin,
15 hex,
15 hex,
16 short,
16 short,
17 )
17 )
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 lock as lockmod,
21 lock as lockmod,
22 obsolete,
22 obsutil,
23 pycompat,
23 pycompat,
24 scmutil,
24 scmutil,
25 txnutil,
25 txnutil,
26 util,
26 util,
27 )
27 )
28
28
29 # label constants
29 # label constants
30 # until 3.5, bookmarks.current was the advertised name, not
30 # until 3.5, bookmarks.current was the advertised name, not
31 # bookmarks.active, so we must use both to avoid breaking old
31 # bookmarks.active, so we must use both to avoid breaking old
32 # custom styles
32 # custom styles
33 activebookmarklabel = 'bookmarks.active bookmarks.current'
33 activebookmarklabel = 'bookmarks.active bookmarks.current'
34
34
35 def _getbkfile(repo):
35 def _getbkfile(repo):
36 """Hook so that extensions that mess with the store can hook bm storage.
36 """Hook so that extensions that mess with the store can hook bm storage.
37
37
38 For core, this just handles wether we should see pending
38 For core, this just handles wether we should see pending
39 bookmarks or the committed ones. Other extensions (like share)
39 bookmarks or the committed ones. Other extensions (like share)
40 may need to tweak this behavior further.
40 may need to tweak this behavior further.
41 """
41 """
42 fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks')
42 fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks')
43 return fp
43 return fp
44
44
45 class bmstore(dict):
45 class bmstore(dict):
46 """Storage for bookmarks.
46 """Storage for bookmarks.
47
47
48 This object should do all bookmark-related reads and writes, so
48 This object should do all bookmark-related reads and writes, so
49 that it's fairly simple to replace the storage underlying
49 that it's fairly simple to replace the storage underlying
50 bookmarks without having to clone the logic surrounding
50 bookmarks without having to clone the logic surrounding
51 bookmarks. This type also should manage the active bookmark, if
51 bookmarks. This type also should manage the active bookmark, if
52 any.
52 any.
53
53
54 This particular bmstore implementation stores bookmarks as
54 This particular bmstore implementation stores bookmarks as
55 {hash}\s{name}\n (the same format as localtags) in
55 {hash}\s{name}\n (the same format as localtags) in
56 .hg/bookmarks. The mapping is stored as {name: nodeid}.
56 .hg/bookmarks. The mapping is stored as {name: nodeid}.
57 """
57 """
58
58
59 def __init__(self, repo):
59 def __init__(self, repo):
60 dict.__init__(self)
60 dict.__init__(self)
61 self._repo = repo
61 self._repo = repo
62 self._clean = True
62 self._clean = True
63 self._aclean = True
63 self._aclean = True
64 nm = repo.changelog.nodemap
64 nm = repo.changelog.nodemap
65 tonode = bin # force local lookup
65 tonode = bin # force local lookup
66 setitem = dict.__setitem__
66 setitem = dict.__setitem__
67 try:
67 try:
68 with _getbkfile(repo) as bkfile:
68 with _getbkfile(repo) as bkfile:
69 for line in bkfile:
69 for line in bkfile:
70 line = line.strip()
70 line = line.strip()
71 if not line:
71 if not line:
72 continue
72 continue
73 try:
73 try:
74 sha, refspec = line.split(' ', 1)
74 sha, refspec = line.split(' ', 1)
75 node = tonode(sha)
75 node = tonode(sha)
76 if node in nm:
76 if node in nm:
77 refspec = encoding.tolocal(refspec)
77 refspec = encoding.tolocal(refspec)
78 setitem(self, refspec, node)
78 setitem(self, refspec, node)
79 except (TypeError, ValueError):
79 except (TypeError, ValueError):
80 # TypeError:
80 # TypeError:
81 # - bin(...)
81 # - bin(...)
82 # ValueError:
82 # ValueError:
83 # - node in nm, for non-20-bytes entry
83 # - node in nm, for non-20-bytes entry
84 # - split(...), for string without ' '
84 # - split(...), for string without ' '
85 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
85 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
86 % line)
86 % line)
87 except IOError as inst:
87 except IOError as inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90 self._active = _readactive(repo, self)
90 self._active = _readactive(repo, self)
91
91
92 @property
92 @property
93 def active(self):
93 def active(self):
94 return self._active
94 return self._active
95
95
96 @active.setter
96 @active.setter
97 def active(self, mark):
97 def active(self, mark):
98 if mark is not None and mark not in self:
98 if mark is not None and mark not in self:
99 raise AssertionError('bookmark %s does not exist!' % mark)
99 raise AssertionError('bookmark %s does not exist!' % mark)
100
100
101 self._active = mark
101 self._active = mark
102 self._aclean = False
102 self._aclean = False
103
103
104 def __setitem__(self, *args, **kwargs):
104 def __setitem__(self, *args, **kwargs):
105 self._clean = False
105 self._clean = False
106 return dict.__setitem__(self, *args, **kwargs)
106 return dict.__setitem__(self, *args, **kwargs)
107
107
108 def __delitem__(self, key):
108 def __delitem__(self, key):
109 self._clean = False
109 self._clean = False
110 return dict.__delitem__(self, key)
110 return dict.__delitem__(self, key)
111
111
112 def recordchange(self, tr):
112 def recordchange(self, tr):
113 """record that bookmarks have been changed in a transaction
113 """record that bookmarks have been changed in a transaction
114
114
115 The transaction is then responsible for updating the file content."""
115 The transaction is then responsible for updating the file content."""
116 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
116 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
117 location='plain')
117 location='plain')
118 tr.hookargs['bookmark_moved'] = '1'
118 tr.hookargs['bookmark_moved'] = '1'
119
119
120 def _writerepo(self, repo):
120 def _writerepo(self, repo):
121 """Factored out for extensibility"""
121 """Factored out for extensibility"""
122 rbm = repo._bookmarks
122 rbm = repo._bookmarks
123 if rbm.active not in self:
123 if rbm.active not in self:
124 rbm.active = None
124 rbm.active = None
125 rbm._writeactive()
125 rbm._writeactive()
126
126
127 with repo.wlock():
127 with repo.wlock():
128 file_ = repo.vfs('bookmarks', 'w', atomictemp=True,
128 file_ = repo.vfs('bookmarks', 'w', atomictemp=True,
129 checkambig=True)
129 checkambig=True)
130 try:
130 try:
131 self._write(file_)
131 self._write(file_)
132 except: # re-raises
132 except: # re-raises
133 file_.discard()
133 file_.discard()
134 raise
134 raise
135 finally:
135 finally:
136 file_.close()
136 file_.close()
137
137
138 def _writeactive(self):
138 def _writeactive(self):
139 if self._aclean:
139 if self._aclean:
140 return
140 return
141 with self._repo.wlock():
141 with self._repo.wlock():
142 if self._active is not None:
142 if self._active is not None:
143 f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
143 f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
144 checkambig=True)
144 checkambig=True)
145 try:
145 try:
146 f.write(encoding.fromlocal(self._active))
146 f.write(encoding.fromlocal(self._active))
147 finally:
147 finally:
148 f.close()
148 f.close()
149 else:
149 else:
150 self._repo.vfs.tryunlink('bookmarks.current')
150 self._repo.vfs.tryunlink('bookmarks.current')
151 self._aclean = True
151 self._aclean = True
152
152
153 def _write(self, fp):
153 def _write(self, fp):
154 for name, node in self.iteritems():
154 for name, node in self.iteritems():
155 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
155 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
156 self._clean = True
156 self._clean = True
157 self._repo.invalidatevolatilesets()
157 self._repo.invalidatevolatilesets()
158
158
159 def expandname(self, bname):
159 def expandname(self, bname):
160 if bname == '.':
160 if bname == '.':
161 if self.active:
161 if self.active:
162 return self.active
162 return self.active
163 else:
163 else:
164 raise error.Abort(_("no active bookmark"))
164 raise error.Abort(_("no active bookmark"))
165 return bname
165 return bname
166
166
167 def checkconflict(self, mark, force=False, target=None):
167 def checkconflict(self, mark, force=False, target=None):
168 """check repo for a potential clash of mark with an existing bookmark,
168 """check repo for a potential clash of mark with an existing bookmark,
169 branch, or hash
169 branch, or hash
170
170
171 If target is supplied, then check that we are moving the bookmark
171 If target is supplied, then check that we are moving the bookmark
172 forward.
172 forward.
173
173
174 If force is supplied, then forcibly move the bookmark to a new commit
174 If force is supplied, then forcibly move the bookmark to a new commit
175 regardless if it is a move forward.
175 regardless if it is a move forward.
176 """
176 """
177 cur = self._repo.changectx('.').node()
177 cur = self._repo.changectx('.').node()
178 if mark in self and not force:
178 if mark in self and not force:
179 if target:
179 if target:
180 if self[mark] == target and target == cur:
180 if self[mark] == target and target == cur:
181 # re-activating a bookmark
181 # re-activating a bookmark
182 return
182 return
183 rev = self._repo[target].rev()
183 rev = self._repo[target].rev()
184 anc = self._repo.changelog.ancestors([rev])
184 anc = self._repo.changelog.ancestors([rev])
185 bmctx = self._repo[self[mark]]
185 bmctx = self._repo[self[mark]]
186 divs = [self._repo[b].node() for b in self
186 divs = [self._repo[b].node() for b in self
187 if b.split('@', 1)[0] == mark.split('@', 1)[0]]
187 if b.split('@', 1)[0] == mark.split('@', 1)[0]]
188
188
189 # allow resolving a single divergent bookmark even if moving
189 # allow resolving a single divergent bookmark even if moving
190 # the bookmark across branches when a revision is specified
190 # the bookmark across branches when a revision is specified
191 # that contains a divergent bookmark
191 # that contains a divergent bookmark
192 if bmctx.rev() not in anc and target in divs:
192 if bmctx.rev() not in anc and target in divs:
193 deletedivergent(self._repo, [target], mark)
193 deletedivergent(self._repo, [target], mark)
194 return
194 return
195
195
196 deletefrom = [b for b in divs
196 deletefrom = [b for b in divs
197 if self._repo[b].rev() in anc or b == target]
197 if self._repo[b].rev() in anc or b == target]
198 deletedivergent(self._repo, deletefrom, mark)
198 deletedivergent(self._repo, deletefrom, mark)
199 if validdest(self._repo, bmctx, self._repo[target]):
199 if validdest(self._repo, bmctx, self._repo[target]):
200 self._repo.ui.status(
200 self._repo.ui.status(
201 _("moving bookmark '%s' forward from %s\n") %
201 _("moving bookmark '%s' forward from %s\n") %
202 (mark, short(bmctx.node())))
202 (mark, short(bmctx.node())))
203 return
203 return
204 raise error.Abort(_("bookmark '%s' already exists "
204 raise error.Abort(_("bookmark '%s' already exists "
205 "(use -f to force)") % mark)
205 "(use -f to force)") % mark)
206 if ((mark in self._repo.branchmap() or
206 if ((mark in self._repo.branchmap() or
207 mark == self._repo.dirstate.branch()) and not force):
207 mark == self._repo.dirstate.branch()) and not force):
208 raise error.Abort(
208 raise error.Abort(
209 _("a bookmark cannot have the name of an existing branch"))
209 _("a bookmark cannot have the name of an existing branch"))
210 if len(mark) > 3 and not force:
210 if len(mark) > 3 and not force:
211 try:
211 try:
212 shadowhash = (mark in self._repo)
212 shadowhash = (mark in self._repo)
213 except error.LookupError: # ambiguous identifier
213 except error.LookupError: # ambiguous identifier
214 shadowhash = False
214 shadowhash = False
215 if shadowhash:
215 if shadowhash:
216 self._repo.ui.warn(
216 self._repo.ui.warn(
217 _("bookmark %s matches a changeset hash\n"
217 _("bookmark %s matches a changeset hash\n"
218 "(did you leave a -r out of an 'hg bookmark' "
218 "(did you leave a -r out of an 'hg bookmark' "
219 "command?)\n")
219 "command?)\n")
220 % mark)
220 % mark)
221
221
222 def _readactive(repo, marks):
222 def _readactive(repo, marks):
223 """
223 """
224 Get the active bookmark. We can have an active bookmark that updates
224 Get the active bookmark. We can have an active bookmark that updates
225 itself as we commit. This function returns the name of that bookmark.
225 itself as we commit. This function returns the name of that bookmark.
226 It is stored in .hg/bookmarks.current
226 It is stored in .hg/bookmarks.current
227 """
227 """
228 mark = None
228 mark = None
229 try:
229 try:
230 file = repo.vfs('bookmarks.current')
230 file = repo.vfs('bookmarks.current')
231 except IOError as inst:
231 except IOError as inst:
232 if inst.errno != errno.ENOENT:
232 if inst.errno != errno.ENOENT:
233 raise
233 raise
234 return None
234 return None
235 try:
235 try:
236 # No readline() in osutil.posixfile, reading everything is
236 # No readline() in osutil.posixfile, reading everything is
237 # cheap.
237 # cheap.
238 # Note that it's possible for readlines() here to raise
238 # Note that it's possible for readlines() here to raise
239 # IOError, since we might be reading the active mark over
239 # IOError, since we might be reading the active mark over
240 # static-http which only tries to load the file when we try
240 # static-http which only tries to load the file when we try
241 # to read from it.
241 # to read from it.
242 mark = encoding.tolocal((file.readlines() or [''])[0])
242 mark = encoding.tolocal((file.readlines() or [''])[0])
243 if mark == '' or mark not in marks:
243 if mark == '' or mark not in marks:
244 mark = None
244 mark = None
245 except IOError as inst:
245 except IOError as inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248 return None
248 return None
249 finally:
249 finally:
250 file.close()
250 file.close()
251 return mark
251 return mark
252
252
253 def activate(repo, mark):
253 def activate(repo, mark):
254 """
254 """
255 Set the given bookmark to be 'active', meaning that this bookmark will
255 Set the given bookmark to be 'active', meaning that this bookmark will
256 follow new commits that are made.
256 follow new commits that are made.
257 The name is recorded in .hg/bookmarks.current
257 The name is recorded in .hg/bookmarks.current
258 """
258 """
259 repo._bookmarks.active = mark
259 repo._bookmarks.active = mark
260 repo._bookmarks._writeactive()
260 repo._bookmarks._writeactive()
261
261
262 def deactivate(repo):
262 def deactivate(repo):
263 """
263 """
264 Unset the active bookmark in this repository.
264 Unset the active bookmark in this repository.
265 """
265 """
266 repo._bookmarks.active = None
266 repo._bookmarks.active = None
267 repo._bookmarks._writeactive()
267 repo._bookmarks._writeactive()
268
268
269 def isactivewdirparent(repo):
269 def isactivewdirparent(repo):
270 """
270 """
271 Tell whether the 'active' bookmark (the one that follows new commits)
271 Tell whether the 'active' bookmark (the one that follows new commits)
272 points to one of the parents of the current working directory (wdir).
272 points to one of the parents of the current working directory (wdir).
273
273
274 While this is normally the case, it can on occasion be false; for example,
274 While this is normally the case, it can on occasion be false; for example,
275 immediately after a pull, the active bookmark can be moved to point
275 immediately after a pull, the active bookmark can be moved to point
276 to a place different than the wdir. This is solved by running `hg update`.
276 to a place different than the wdir. This is solved by running `hg update`.
277 """
277 """
278 mark = repo._activebookmark
278 mark = repo._activebookmark
279 marks = repo._bookmarks
279 marks = repo._bookmarks
280 parents = [p.node() for p in repo[None].parents()]
280 parents = [p.node() for p in repo[None].parents()]
281 return (mark in marks and marks[mark] in parents)
281 return (mark in marks and marks[mark] in parents)
282
282
283 def deletedivergent(repo, deletefrom, bm):
283 def deletedivergent(repo, deletefrom, bm):
284 '''Delete divergent versions of bm on nodes in deletefrom.
284 '''Delete divergent versions of bm on nodes in deletefrom.
285
285
286 Return True if at least one bookmark was deleted, False otherwise.'''
286 Return True if at least one bookmark was deleted, False otherwise.'''
287 deleted = False
287 deleted = False
288 marks = repo._bookmarks
288 marks = repo._bookmarks
289 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
289 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
290 for mark in divergent:
290 for mark in divergent:
291 if mark == '@' or '@' not in mark:
291 if mark == '@' or '@' not in mark:
292 # can't be divergent by definition
292 # can't be divergent by definition
293 continue
293 continue
294 if mark and marks[mark] in deletefrom:
294 if mark and marks[mark] in deletefrom:
295 if mark != bm:
295 if mark != bm:
296 del marks[mark]
296 del marks[mark]
297 deleted = True
297 deleted = True
298 return deleted
298 return deleted
299
299
300 def headsforactive(repo):
300 def headsforactive(repo):
301 """Given a repo with an active bookmark, return divergent bookmark nodes.
301 """Given a repo with an active bookmark, return divergent bookmark nodes.
302
302
303 Args:
303 Args:
304 repo: A repository with an active bookmark.
304 repo: A repository with an active bookmark.
305
305
306 Returns:
306 Returns:
307 A list of binary node ids that is the full list of other
307 A list of binary node ids that is the full list of other
308 revisions with bookmarks divergent from the active bookmark. If
308 revisions with bookmarks divergent from the active bookmark. If
309 there were no divergent bookmarks, then this list will contain
309 there were no divergent bookmarks, then this list will contain
310 only one entry.
310 only one entry.
311 """
311 """
312 if not repo._activebookmark:
312 if not repo._activebookmark:
313 raise ValueError(
313 raise ValueError(
314 'headsforactive() only makes sense with an active bookmark')
314 'headsforactive() only makes sense with an active bookmark')
315 name = repo._activebookmark.split('@', 1)[0]
315 name = repo._activebookmark.split('@', 1)[0]
316 heads = []
316 heads = []
317 for mark, n in repo._bookmarks.iteritems():
317 for mark, n in repo._bookmarks.iteritems():
318 if mark.split('@', 1)[0] == name:
318 if mark.split('@', 1)[0] == name:
319 heads.append(n)
319 heads.append(n)
320 return heads
320 return heads
321
321
322 def calculateupdate(ui, repo, checkout):
322 def calculateupdate(ui, repo, checkout):
323 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
323 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
324 check out and where to move the active bookmark from, if needed.'''
324 check out and where to move the active bookmark from, if needed.'''
325 movemarkfrom = None
325 movemarkfrom = None
326 if checkout is None:
326 if checkout is None:
327 activemark = repo._activebookmark
327 activemark = repo._activebookmark
328 if isactivewdirparent(repo):
328 if isactivewdirparent(repo):
329 movemarkfrom = repo['.'].node()
329 movemarkfrom = repo['.'].node()
330 elif activemark:
330 elif activemark:
331 ui.status(_("updating to active bookmark %s\n") % activemark)
331 ui.status(_("updating to active bookmark %s\n") % activemark)
332 checkout = activemark
332 checkout = activemark
333 return (checkout, movemarkfrom)
333 return (checkout, movemarkfrom)
334
334
335 def update(repo, parents, node):
335 def update(repo, parents, node):
336 deletefrom = parents
336 deletefrom = parents
337 marks = repo._bookmarks
337 marks = repo._bookmarks
338 update = False
338 update = False
339 active = marks.active
339 active = marks.active
340 if not active:
340 if not active:
341 return False
341 return False
342
342
343 if marks[active] in parents:
343 if marks[active] in parents:
344 new = repo[node]
344 new = repo[node]
345 divs = [repo[b] for b in marks
345 divs = [repo[b] for b in marks
346 if b.split('@', 1)[0] == active.split('@', 1)[0]]
346 if b.split('@', 1)[0] == active.split('@', 1)[0]]
347 anc = repo.changelog.ancestors([new.rev()])
347 anc = repo.changelog.ancestors([new.rev()])
348 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
348 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
349 if validdest(repo, repo[marks[active]], new):
349 if validdest(repo, repo[marks[active]], new):
350 marks[active] = new.node()
350 marks[active] = new.node()
351 update = True
351 update = True
352
352
353 if deletedivergent(repo, deletefrom, active):
353 if deletedivergent(repo, deletefrom, active):
354 update = True
354 update = True
355
355
356 if update:
356 if update:
357 lock = tr = None
357 lock = tr = None
358 try:
358 try:
359 lock = repo.lock()
359 lock = repo.lock()
360 tr = repo.transaction('bookmark')
360 tr = repo.transaction('bookmark')
361 marks.recordchange(tr)
361 marks.recordchange(tr)
362 tr.close()
362 tr.close()
363 finally:
363 finally:
364 lockmod.release(tr, lock)
364 lockmod.release(tr, lock)
365 return update
365 return update
366
366
367 def listbinbookmarks(repo):
367 def listbinbookmarks(repo):
368 # We may try to list bookmarks on a repo type that does not
368 # We may try to list bookmarks on a repo type that does not
369 # support it (e.g., statichttprepository).
369 # support it (e.g., statichttprepository).
370 marks = getattr(repo, '_bookmarks', {})
370 marks = getattr(repo, '_bookmarks', {})
371
371
372 hasnode = repo.changelog.hasnode
372 hasnode = repo.changelog.hasnode
373 for k, v in marks.iteritems():
373 for k, v in marks.iteritems():
374 # don't expose local divergent bookmarks
374 # don't expose local divergent bookmarks
375 if hasnode(v) and ('@' not in k or k.endswith('@')):
375 if hasnode(v) and ('@' not in k or k.endswith('@')):
376 yield k, v
376 yield k, v
377
377
378 def listbookmarks(repo):
378 def listbookmarks(repo):
379 d = {}
379 d = {}
380 for book, node in listbinbookmarks(repo):
380 for book, node in listbinbookmarks(repo):
381 d[book] = hex(node)
381 d[book] = hex(node)
382 return d
382 return d
383
383
384 def pushbookmark(repo, key, old, new):
384 def pushbookmark(repo, key, old, new):
385 w = l = tr = None
385 w = l = tr = None
386 try:
386 try:
387 w = repo.wlock()
387 w = repo.wlock()
388 l = repo.lock()
388 l = repo.lock()
389 tr = repo.transaction('bookmarks')
389 tr = repo.transaction('bookmarks')
390 marks = repo._bookmarks
390 marks = repo._bookmarks
391 existing = hex(marks.get(key, ''))
391 existing = hex(marks.get(key, ''))
392 if existing != old and existing != new:
392 if existing != old and existing != new:
393 return False
393 return False
394 if new == '':
394 if new == '':
395 del marks[key]
395 del marks[key]
396 else:
396 else:
397 if new not in repo:
397 if new not in repo:
398 return False
398 return False
399 marks[key] = repo[new].node()
399 marks[key] = repo[new].node()
400 marks.recordchange(tr)
400 marks.recordchange(tr)
401 tr.close()
401 tr.close()
402 return True
402 return True
403 finally:
403 finally:
404 lockmod.release(tr, l, w)
404 lockmod.release(tr, l, w)
405
405
406 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
406 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
407 '''Compare bookmarks between srcmarks and dstmarks
407 '''Compare bookmarks between srcmarks and dstmarks
408
408
409 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
409 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
410 differ, invalid)", each are list of bookmarks below:
410 differ, invalid)", each are list of bookmarks below:
411
411
412 :addsrc: added on src side (removed on dst side, perhaps)
412 :addsrc: added on src side (removed on dst side, perhaps)
413 :adddst: added on dst side (removed on src side, perhaps)
413 :adddst: added on dst side (removed on src side, perhaps)
414 :advsrc: advanced on src side
414 :advsrc: advanced on src side
415 :advdst: advanced on dst side
415 :advdst: advanced on dst side
416 :diverge: diverge
416 :diverge: diverge
417 :differ: changed, but changeset referred on src is unknown on dst
417 :differ: changed, but changeset referred on src is unknown on dst
418 :invalid: unknown on both side
418 :invalid: unknown on both side
419 :same: same on both side
419 :same: same on both side
420
420
421 Each elements of lists in result tuple is tuple "(bookmark name,
421 Each elements of lists in result tuple is tuple "(bookmark name,
422 changeset ID on source side, changeset ID on destination
422 changeset ID on source side, changeset ID on destination
423 side)". Each changeset IDs are 40 hexadecimal digit string or
423 side)". Each changeset IDs are 40 hexadecimal digit string or
424 None.
424 None.
425
425
426 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
426 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
427 "invalid" list may be unknown for repo.
427 "invalid" list may be unknown for repo.
428
428
429 If "targets" is specified, only bookmarks listed in it are
429 If "targets" is specified, only bookmarks listed in it are
430 examined.
430 examined.
431 '''
431 '''
432
432
433 if targets:
433 if targets:
434 bset = set(targets)
434 bset = set(targets)
435 else:
435 else:
436 srcmarkset = set(srcmarks)
436 srcmarkset = set(srcmarks)
437 dstmarkset = set(dstmarks)
437 dstmarkset = set(dstmarks)
438 bset = srcmarkset | dstmarkset
438 bset = srcmarkset | dstmarkset
439
439
440 results = ([], [], [], [], [], [], [], [])
440 results = ([], [], [], [], [], [], [], [])
441 addsrc = results[0].append
441 addsrc = results[0].append
442 adddst = results[1].append
442 adddst = results[1].append
443 advsrc = results[2].append
443 advsrc = results[2].append
444 advdst = results[3].append
444 advdst = results[3].append
445 diverge = results[4].append
445 diverge = results[4].append
446 differ = results[5].append
446 differ = results[5].append
447 invalid = results[6].append
447 invalid = results[6].append
448 same = results[7].append
448 same = results[7].append
449
449
450 for b in sorted(bset):
450 for b in sorted(bset):
451 if b not in srcmarks:
451 if b not in srcmarks:
452 if b in dstmarks:
452 if b in dstmarks:
453 adddst((b, None, dstmarks[b]))
453 adddst((b, None, dstmarks[b]))
454 else:
454 else:
455 invalid((b, None, None))
455 invalid((b, None, None))
456 elif b not in dstmarks:
456 elif b not in dstmarks:
457 addsrc((b, srcmarks[b], None))
457 addsrc((b, srcmarks[b], None))
458 else:
458 else:
459 scid = srcmarks[b]
459 scid = srcmarks[b]
460 dcid = dstmarks[b]
460 dcid = dstmarks[b]
461 if scid == dcid:
461 if scid == dcid:
462 same((b, scid, dcid))
462 same((b, scid, dcid))
463 elif scid in repo and dcid in repo:
463 elif scid in repo and dcid in repo:
464 sctx = repo[scid]
464 sctx = repo[scid]
465 dctx = repo[dcid]
465 dctx = repo[dcid]
466 if sctx.rev() < dctx.rev():
466 if sctx.rev() < dctx.rev():
467 if validdest(repo, sctx, dctx):
467 if validdest(repo, sctx, dctx):
468 advdst((b, scid, dcid))
468 advdst((b, scid, dcid))
469 else:
469 else:
470 diverge((b, scid, dcid))
470 diverge((b, scid, dcid))
471 else:
471 else:
472 if validdest(repo, dctx, sctx):
472 if validdest(repo, dctx, sctx):
473 advsrc((b, scid, dcid))
473 advsrc((b, scid, dcid))
474 else:
474 else:
475 diverge((b, scid, dcid))
475 diverge((b, scid, dcid))
476 else:
476 else:
477 # it is too expensive to examine in detail, in this case
477 # it is too expensive to examine in detail, in this case
478 differ((b, scid, dcid))
478 differ((b, scid, dcid))
479
479
480 return results
480 return results
481
481
482 def _diverge(ui, b, path, localmarks, remotenode):
482 def _diverge(ui, b, path, localmarks, remotenode):
483 '''Return appropriate diverged bookmark for specified ``path``
483 '''Return appropriate diverged bookmark for specified ``path``
484
484
485 This returns None, if it is failed to assign any divergent
485 This returns None, if it is failed to assign any divergent
486 bookmark name.
486 bookmark name.
487
487
488 This reuses already existing one with "@number" suffix, if it
488 This reuses already existing one with "@number" suffix, if it
489 refers ``remotenode``.
489 refers ``remotenode``.
490 '''
490 '''
491 if b == '@':
491 if b == '@':
492 b = ''
492 b = ''
493 # try to use an @pathalias suffix
493 # try to use an @pathalias suffix
494 # if an @pathalias already exists, we overwrite (update) it
494 # if an @pathalias already exists, we overwrite (update) it
495 if path.startswith("file:"):
495 if path.startswith("file:"):
496 path = util.url(path).path
496 path = util.url(path).path
497 for p, u in ui.configitems("paths"):
497 for p, u in ui.configitems("paths"):
498 if u.startswith("file:"):
498 if u.startswith("file:"):
499 u = util.url(u).path
499 u = util.url(u).path
500 if path == u:
500 if path == u:
501 return '%s@%s' % (b, p)
501 return '%s@%s' % (b, p)
502
502
503 # assign a unique "@number" suffix newly
503 # assign a unique "@number" suffix newly
504 for x in range(1, 100):
504 for x in range(1, 100):
505 n = '%s@%d' % (b, x)
505 n = '%s@%d' % (b, x)
506 if n not in localmarks or localmarks[n] == remotenode:
506 if n not in localmarks or localmarks[n] == remotenode:
507 return n
507 return n
508
508
509 return None
509 return None
510
510
511 def unhexlifybookmarks(marks):
511 def unhexlifybookmarks(marks):
512 binremotemarks = {}
512 binremotemarks = {}
513 for name, node in marks.items():
513 for name, node in marks.items():
514 binremotemarks[name] = bin(node)
514 binremotemarks[name] = bin(node)
515 return binremotemarks
515 return binremotemarks
516
516
517 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
517 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
518 ui.debug("checking for updated bookmarks\n")
518 ui.debug("checking for updated bookmarks\n")
519 localmarks = repo._bookmarks
519 localmarks = repo._bookmarks
520 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
520 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
521 ) = comparebookmarks(repo, remotemarks, localmarks)
521 ) = comparebookmarks(repo, remotemarks, localmarks)
522
522
523 status = ui.status
523 status = ui.status
524 warn = ui.warn
524 warn = ui.warn
525 if ui.configbool('ui', 'quietbookmarkmove', False):
525 if ui.configbool('ui', 'quietbookmarkmove', False):
526 status = warn = ui.debug
526 status = warn = ui.debug
527
527
528 explicit = set(explicit)
528 explicit = set(explicit)
529 changed = []
529 changed = []
530 for b, scid, dcid in addsrc:
530 for b, scid, dcid in addsrc:
531 if scid in repo: # add remote bookmarks for changes we already have
531 if scid in repo: # add remote bookmarks for changes we already have
532 changed.append((b, scid, status,
532 changed.append((b, scid, status,
533 _("adding remote bookmark %s\n") % (b)))
533 _("adding remote bookmark %s\n") % (b)))
534 elif b in explicit:
534 elif b in explicit:
535 explicit.remove(b)
535 explicit.remove(b)
536 ui.warn(_("remote bookmark %s points to locally missing %s\n")
536 ui.warn(_("remote bookmark %s points to locally missing %s\n")
537 % (b, hex(scid)[:12]))
537 % (b, hex(scid)[:12]))
538
538
539 for b, scid, dcid in advsrc:
539 for b, scid, dcid in advsrc:
540 changed.append((b, scid, status,
540 changed.append((b, scid, status,
541 _("updating bookmark %s\n") % (b)))
541 _("updating bookmark %s\n") % (b)))
542 # remove normal movement from explicit set
542 # remove normal movement from explicit set
543 explicit.difference_update(d[0] for d in changed)
543 explicit.difference_update(d[0] for d in changed)
544
544
545 for b, scid, dcid in diverge:
545 for b, scid, dcid in diverge:
546 if b in explicit:
546 if b in explicit:
547 explicit.discard(b)
547 explicit.discard(b)
548 changed.append((b, scid, status,
548 changed.append((b, scid, status,
549 _("importing bookmark %s\n") % (b)))
549 _("importing bookmark %s\n") % (b)))
550 else:
550 else:
551 db = _diverge(ui, b, path, localmarks, scid)
551 db = _diverge(ui, b, path, localmarks, scid)
552 if db:
552 if db:
553 changed.append((db, scid, warn,
553 changed.append((db, scid, warn,
554 _("divergent bookmark %s stored as %s\n") %
554 _("divergent bookmark %s stored as %s\n") %
555 (b, db)))
555 (b, db)))
556 else:
556 else:
557 warn(_("warning: failed to assign numbered name "
557 warn(_("warning: failed to assign numbered name "
558 "to divergent bookmark %s\n") % (b))
558 "to divergent bookmark %s\n") % (b))
559 for b, scid, dcid in adddst + advdst:
559 for b, scid, dcid in adddst + advdst:
560 if b in explicit:
560 if b in explicit:
561 explicit.discard(b)
561 explicit.discard(b)
562 changed.append((b, scid, status,
562 changed.append((b, scid, status,
563 _("importing bookmark %s\n") % (b)))
563 _("importing bookmark %s\n") % (b)))
564 for b, scid, dcid in differ:
564 for b, scid, dcid in differ:
565 if b in explicit:
565 if b in explicit:
566 explicit.remove(b)
566 explicit.remove(b)
567 ui.warn(_("remote bookmark %s points to locally missing %s\n")
567 ui.warn(_("remote bookmark %s points to locally missing %s\n")
568 % (b, hex(scid)[:12]))
568 % (b, hex(scid)[:12]))
569
569
570 if changed:
570 if changed:
571 tr = trfunc()
571 tr = trfunc()
572 for b, node, writer, msg in sorted(changed):
572 for b, node, writer, msg in sorted(changed):
573 localmarks[b] = node
573 localmarks[b] = node
574 writer(msg)
574 writer(msg)
575 localmarks.recordchange(tr)
575 localmarks.recordchange(tr)
576
576
577 def incoming(ui, repo, other):
577 def incoming(ui, repo, other):
578 '''Show bookmarks incoming from other to repo
578 '''Show bookmarks incoming from other to repo
579 '''
579 '''
580 ui.status(_("searching for changed bookmarks\n"))
580 ui.status(_("searching for changed bookmarks\n"))
581
581
582 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
582 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
583 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
583 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
584 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
584 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
585
585
586 incomings = []
586 incomings = []
587 if ui.debugflag:
587 if ui.debugflag:
588 getid = lambda id: id
588 getid = lambda id: id
589 else:
589 else:
590 getid = lambda id: id[:12]
590 getid = lambda id: id[:12]
591 if ui.verbose:
591 if ui.verbose:
592 def add(b, id, st):
592 def add(b, id, st):
593 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
593 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
594 else:
594 else:
595 def add(b, id, st):
595 def add(b, id, st):
596 incomings.append(" %-25s %s\n" % (b, getid(id)))
596 incomings.append(" %-25s %s\n" % (b, getid(id)))
597 for b, scid, dcid in addsrc:
597 for b, scid, dcid in addsrc:
598 # i18n: "added" refers to a bookmark
598 # i18n: "added" refers to a bookmark
599 add(b, hex(scid), _('added'))
599 add(b, hex(scid), _('added'))
600 for b, scid, dcid in advsrc:
600 for b, scid, dcid in advsrc:
601 # i18n: "advanced" refers to a bookmark
601 # i18n: "advanced" refers to a bookmark
602 add(b, hex(scid), _('advanced'))
602 add(b, hex(scid), _('advanced'))
603 for b, scid, dcid in diverge:
603 for b, scid, dcid in diverge:
604 # i18n: "diverged" refers to a bookmark
604 # i18n: "diverged" refers to a bookmark
605 add(b, hex(scid), _('diverged'))
605 add(b, hex(scid), _('diverged'))
606 for b, scid, dcid in differ:
606 for b, scid, dcid in differ:
607 # i18n: "changed" refers to a bookmark
607 # i18n: "changed" refers to a bookmark
608 add(b, hex(scid), _('changed'))
608 add(b, hex(scid), _('changed'))
609
609
610 if not incomings:
610 if not incomings:
611 ui.status(_("no changed bookmarks found\n"))
611 ui.status(_("no changed bookmarks found\n"))
612 return 1
612 return 1
613
613
614 for s in sorted(incomings):
614 for s in sorted(incomings):
615 ui.write(s)
615 ui.write(s)
616
616
617 return 0
617 return 0
618
618
619 def outgoing(ui, repo, other):
619 def outgoing(ui, repo, other):
620 '''Show bookmarks outgoing from repo to other
620 '''Show bookmarks outgoing from repo to other
621 '''
621 '''
622 ui.status(_("searching for changed bookmarks\n"))
622 ui.status(_("searching for changed bookmarks\n"))
623
623
624 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
624 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
625 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
625 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
626 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
626 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
627
627
628 outgoings = []
628 outgoings = []
629 if ui.debugflag:
629 if ui.debugflag:
630 getid = lambda id: id
630 getid = lambda id: id
631 else:
631 else:
632 getid = lambda id: id[:12]
632 getid = lambda id: id[:12]
633 if ui.verbose:
633 if ui.verbose:
634 def add(b, id, st):
634 def add(b, id, st):
635 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
635 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
636 else:
636 else:
637 def add(b, id, st):
637 def add(b, id, st):
638 outgoings.append(" %-25s %s\n" % (b, getid(id)))
638 outgoings.append(" %-25s %s\n" % (b, getid(id)))
639 for b, scid, dcid in addsrc:
639 for b, scid, dcid in addsrc:
640 # i18n: "added refers to a bookmark
640 # i18n: "added refers to a bookmark
641 add(b, hex(scid), _('added'))
641 add(b, hex(scid), _('added'))
642 for b, scid, dcid in adddst:
642 for b, scid, dcid in adddst:
643 # i18n: "deleted" refers to a bookmark
643 # i18n: "deleted" refers to a bookmark
644 add(b, ' ' * 40, _('deleted'))
644 add(b, ' ' * 40, _('deleted'))
645 for b, scid, dcid in advsrc:
645 for b, scid, dcid in advsrc:
646 # i18n: "advanced" refers to a bookmark
646 # i18n: "advanced" refers to a bookmark
647 add(b, hex(scid), _('advanced'))
647 add(b, hex(scid), _('advanced'))
648 for b, scid, dcid in diverge:
648 for b, scid, dcid in diverge:
649 # i18n: "diverged" refers to a bookmark
649 # i18n: "diverged" refers to a bookmark
650 add(b, hex(scid), _('diverged'))
650 add(b, hex(scid), _('diverged'))
651 for b, scid, dcid in differ:
651 for b, scid, dcid in differ:
652 # i18n: "changed" refers to a bookmark
652 # i18n: "changed" refers to a bookmark
653 add(b, hex(scid), _('changed'))
653 add(b, hex(scid), _('changed'))
654
654
655 if not outgoings:
655 if not outgoings:
656 ui.status(_("no changed bookmarks found\n"))
656 ui.status(_("no changed bookmarks found\n"))
657 return 1
657 return 1
658
658
659 for s in sorted(outgoings):
659 for s in sorted(outgoings):
660 ui.write(s)
660 ui.write(s)
661
661
662 return 0
662 return 0
663
663
664 def summary(repo, other):
664 def summary(repo, other):
665 '''Compare bookmarks between repo and other for "hg summary" output
665 '''Compare bookmarks between repo and other for "hg summary" output
666
666
667 This returns "(# of incoming, # of outgoing)" tuple.
667 This returns "(# of incoming, # of outgoing)" tuple.
668 '''
668 '''
669 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
669 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
670 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
670 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
671 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
671 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
672 return (len(addsrc), len(adddst))
672 return (len(addsrc), len(adddst))
673
673
674 def validdest(repo, old, new):
674 def validdest(repo, old, new):
675 """Is the new bookmark destination a valid update from the old one"""
675 """Is the new bookmark destination a valid update from the old one"""
676 repo = repo.unfiltered()
676 repo = repo.unfiltered()
677 if old == new:
677 if old == new:
678 # Old == new -> nothing to update.
678 # Old == new -> nothing to update.
679 return False
679 return False
680 elif not old:
680 elif not old:
681 # old is nullrev, anything is valid.
681 # old is nullrev, anything is valid.
682 # (new != nullrev has been excluded by the previous check)
682 # (new != nullrev has been excluded by the previous check)
683 return True
683 return True
684 elif repo.obsstore:
684 elif repo.obsstore:
685 return new.node() in obsolete.foreground(repo, [old.node()])
685 return new.node() in obsutil.foreground(repo, [old.node()])
686 else:
686 else:
687 # still an independent clause as it is lazier (and therefore faster)
687 # still an independent clause as it is lazier (and therefore faster)
688 return old.descendant(new)
688 return old.descendant(new)
689
689
690 def checkformat(repo, mark):
690 def checkformat(repo, mark):
691 """return a valid version of a potential bookmark name
691 """return a valid version of a potential bookmark name
692
692
693 Raises an abort error if the bookmark name is not valid.
693 Raises an abort error if the bookmark name is not valid.
694 """
694 """
695 mark = mark.strip()
695 mark = mark.strip()
696 if not mark:
696 if not mark:
697 raise error.Abort(_("bookmark names cannot consist entirely of "
697 raise error.Abort(_("bookmark names cannot consist entirely of "
698 "whitespace"))
698 "whitespace"))
699 scmutil.checknewlabel(repo, mark, 'bookmark')
699 scmutil.checknewlabel(repo, mark, 'bookmark')
700 return mark
700 return mark
701
701
702 def delete(repo, tr, names):
702 def delete(repo, tr, names):
703 """remove a mark from the bookmark store
703 """remove a mark from the bookmark store
704
704
705 Raises an abort error if mark does not exist.
705 Raises an abort error if mark does not exist.
706 """
706 """
707 marks = repo._bookmarks
707 marks = repo._bookmarks
708 for mark in names:
708 for mark in names:
709 if mark not in marks:
709 if mark not in marks:
710 raise error.Abort(_("bookmark '%s' does not exist") % mark)
710 raise error.Abort(_("bookmark '%s' does not exist") % mark)
711 if mark == repo._activebookmark:
711 if mark == repo._activebookmark:
712 deactivate(repo)
712 deactivate(repo)
713 del marks[mark]
713 del marks[mark]
714 marks.recordchange(tr)
714 marks.recordchange(tr)
715
715
716 def rename(repo, tr, old, new, force=False, inactive=False):
716 def rename(repo, tr, old, new, force=False, inactive=False):
717 """rename a bookmark from old to new
717 """rename a bookmark from old to new
718
718
719 If force is specified, then the new name can overwrite an existing
719 If force is specified, then the new name can overwrite an existing
720 bookmark.
720 bookmark.
721
721
722 If inactive is specified, then do not activate the new bookmark.
722 If inactive is specified, then do not activate the new bookmark.
723
723
724 Raises an abort error if old is not in the bookmark store.
724 Raises an abort error if old is not in the bookmark store.
725 """
725 """
726 marks = repo._bookmarks
726 marks = repo._bookmarks
727 mark = checkformat(repo, new)
727 mark = checkformat(repo, new)
728 if old not in marks:
728 if old not in marks:
729 raise error.Abort(_("bookmark '%s' does not exist") % old)
729 raise error.Abort(_("bookmark '%s' does not exist") % old)
730 marks.checkconflict(mark, force)
730 marks.checkconflict(mark, force)
731 marks[mark] = marks[old]
731 marks[mark] = marks[old]
732 if repo._activebookmark == old and not inactive:
732 if repo._activebookmark == old and not inactive:
733 activate(repo, mark)
733 activate(repo, mark)
734 del marks[old]
734 del marks[old]
735 marks.recordchange(tr)
735 marks.recordchange(tr)
736
736
737 def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
737 def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
738 """add a list of bookmarks
738 """add a list of bookmarks
739
739
740 If force is specified, then the new name can overwrite an existing
740 If force is specified, then the new name can overwrite an existing
741 bookmark.
741 bookmark.
742
742
743 If inactive is specified, then do not activate any bookmark. Otherwise, the
743 If inactive is specified, then do not activate any bookmark. Otherwise, the
744 first bookmark is activated.
744 first bookmark is activated.
745
745
746 Raises an abort error if old is not in the bookmark store.
746 Raises an abort error if old is not in the bookmark store.
747 """
747 """
748 marks = repo._bookmarks
748 marks = repo._bookmarks
749 cur = repo.changectx('.').node()
749 cur = repo.changectx('.').node()
750 newact = None
750 newact = None
751 for mark in names:
751 for mark in names:
752 mark = checkformat(repo, mark)
752 mark = checkformat(repo, mark)
753 if newact is None:
753 if newact is None:
754 newact = mark
754 newact = mark
755 if inactive and mark == repo._activebookmark:
755 if inactive and mark == repo._activebookmark:
756 deactivate(repo)
756 deactivate(repo)
757 return
757 return
758 tgt = cur
758 tgt = cur
759 if rev:
759 if rev:
760 tgt = scmutil.revsingle(repo, rev).node()
760 tgt = scmutil.revsingle(repo, rev).node()
761 marks.checkconflict(mark, force, tgt)
761 marks.checkconflict(mark, force, tgt)
762 marks[mark] = tgt
762 marks[mark] = tgt
763 if not inactive and cur == marks[newact] and not rev:
763 if not inactive and cur == marks[newact] and not rev:
764 activate(repo, newact)
764 activate(repo, newact)
765 elif cur != tgt and newact == repo._activebookmark:
765 elif cur != tgt and newact == repo._activebookmark:
766 deactivate(repo)
766 deactivate(repo)
767 marks.recordchange(tr)
767 marks.recordchange(tr)
768
768
769 def _printbookmarks(ui, repo, bmarks, **opts):
769 def _printbookmarks(ui, repo, bmarks, **opts):
770 """private method to print bookmarks
770 """private method to print bookmarks
771
771
772 Provides a way for extensions to control how bookmarks are printed (e.g.
772 Provides a way for extensions to control how bookmarks are printed (e.g.
773 prepend or postpend names)
773 prepend or postpend names)
774 """
774 """
775 opts = pycompat.byteskwargs(opts)
775 opts = pycompat.byteskwargs(opts)
776 fm = ui.formatter('bookmarks', opts)
776 fm = ui.formatter('bookmarks', opts)
777 hexfn = fm.hexfunc
777 hexfn = fm.hexfunc
778 if len(bmarks) == 0 and fm.isplain():
778 if len(bmarks) == 0 and fm.isplain():
779 ui.status(_("no bookmarks set\n"))
779 ui.status(_("no bookmarks set\n"))
780 for bmark, (n, prefix, label) in sorted(bmarks.iteritems()):
780 for bmark, (n, prefix, label) in sorted(bmarks.iteritems()):
781 fm.startitem()
781 fm.startitem()
782 if not ui.quiet:
782 if not ui.quiet:
783 fm.plain(' %s ' % prefix, label=label)
783 fm.plain(' %s ' % prefix, label=label)
784 fm.write('bookmark', '%s', bmark, label=label)
784 fm.write('bookmark', '%s', bmark, label=label)
785 pad = " " * (25 - encoding.colwidth(bmark))
785 pad = " " * (25 - encoding.colwidth(bmark))
786 fm.condwrite(not ui.quiet, 'rev node', pad + ' %d:%s',
786 fm.condwrite(not ui.quiet, 'rev node', pad + ' %d:%s',
787 repo.changelog.rev(n), hexfn(n), label=label)
787 repo.changelog.rev(n), hexfn(n), label=label)
788 fm.data(active=(activebookmarklabel in label))
788 fm.data(active=(activebookmarklabel in label))
789 fm.plain('\n')
789 fm.plain('\n')
790 fm.end()
790 fm.end()
791
791
792 def printbookmarks(ui, repo, **opts):
792 def printbookmarks(ui, repo, **opts):
793 """print bookmarks to a formatter
793 """print bookmarks to a formatter
794
794
795 Provides a way for extensions to control how bookmarks are printed.
795 Provides a way for extensions to control how bookmarks are printed.
796 """
796 """
797 marks = repo._bookmarks
797 marks = repo._bookmarks
798 bmarks = {}
798 bmarks = {}
799 for bmark, n in sorted(marks.iteritems()):
799 for bmark, n in sorted(marks.iteritems()):
800 active = repo._activebookmark
800 active = repo._activebookmark
801 if bmark == active:
801 if bmark == active:
802 prefix, label = '*', activebookmarklabel
802 prefix, label = '*', activebookmarklabel
803 else:
803 else:
804 prefix, label = ' ', ''
804 prefix, label = ' ', ''
805
805
806 bmarks[bmark] = (n, prefix, label)
806 bmarks[bmark] = (n, prefix, label)
807 _printbookmarks(ui, repo, bmarks, **opts)
807 _printbookmarks(ui, repo, bmarks, **opts)
@@ -1,1749 +1,1749
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import shutil
13 import shutil
14 import struct
14 import struct
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullhex,
22 nullhex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 )
25 )
26 from . import (
26 from . import (
27 copies,
27 copies,
28 error,
28 error,
29 filemerge,
29 filemerge,
30 match as matchmod,
30 match as matchmod,
31 obsolete,
31 obsutil,
32 pycompat,
32 pycompat,
33 scmutil,
33 scmutil,
34 subrepo,
34 subrepo,
35 util,
35 util,
36 worker,
36 worker,
37 )
37 )
38
38
39 _pack = struct.pack
39 _pack = struct.pack
40 _unpack = struct.unpack
40 _unpack = struct.unpack
41
41
42 def _droponode(data):
42 def _droponode(data):
43 # used for compatibility for v1
43 # used for compatibility for v1
44 bits = data.split('\0')
44 bits = data.split('\0')
45 bits = bits[:-2] + bits[-1:]
45 bits = bits[:-2] + bits[-1:]
46 return '\0'.join(bits)
46 return '\0'.join(bits)
47
47
48 class mergestate(object):
48 class mergestate(object):
49 '''track 3-way merge state of individual files
49 '''track 3-way merge state of individual files
50
50
51 The merge state is stored on disk when needed. Two files are used: one with
51 The merge state is stored on disk when needed. Two files are used: one with
52 an old format (version 1), and one with a new format (version 2). Version 2
52 an old format (version 1), and one with a new format (version 2). Version 2
53 stores a superset of the data in version 1, including new kinds of records
53 stores a superset of the data in version 1, including new kinds of records
54 in the future. For more about the new format, see the documentation for
54 in the future. For more about the new format, see the documentation for
55 `_readrecordsv2`.
55 `_readrecordsv2`.
56
56
57 Each record can contain arbitrary content, and has an associated type. This
57 Each record can contain arbitrary content, and has an associated type. This
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
59 versions of Mercurial that don't support it should abort. If `type` is
59 versions of Mercurial that don't support it should abort. If `type` is
60 lowercase, the record can be safely ignored.
60 lowercase, the record can be safely ignored.
61
61
62 Currently known records:
62 Currently known records:
63
63
64 L: the node of the "local" part of the merge (hexified version)
64 L: the node of the "local" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
66 F: a file to be merged entry
66 F: a file to be merged entry
67 C: a change/delete or delete/change conflict
67 C: a change/delete or delete/change conflict
68 D: a file that the external merge driver will merge internally
68 D: a file that the external merge driver will merge internally
69 (experimental)
69 (experimental)
70 m: the external merge driver defined for this merge plus its run state
70 m: the external merge driver defined for this merge plus its run state
71 (experimental)
71 (experimental)
72 f: a (filename, dictionary) tuple of optional values for a given file
72 f: a (filename, dictionary) tuple of optional values for a given file
73 X: unsupported mandatory record type (used in tests)
73 X: unsupported mandatory record type (used in tests)
74 x: unsupported advisory record type (used in tests)
74 x: unsupported advisory record type (used in tests)
75 l: the labels for the parts of the merge.
75 l: the labels for the parts of the merge.
76
76
77 Merge driver run states (experimental):
77 Merge driver run states (experimental):
78 u: driver-resolved files unmarked -- needs to be run next time we're about
78 u: driver-resolved files unmarked -- needs to be run next time we're about
79 to resolve or commit
79 to resolve or commit
80 m: driver-resolved files marked -- only needs to be run before commit
80 m: driver-resolved files marked -- only needs to be run before commit
81 s: success/skipped -- does not need to be run any more
81 s: success/skipped -- does not need to be run any more
82
82
83 '''
83 '''
84 statepathv1 = 'merge/state'
84 statepathv1 = 'merge/state'
85 statepathv2 = 'merge/state2'
85 statepathv2 = 'merge/state2'
86
86
87 @staticmethod
87 @staticmethod
88 def clean(repo, node=None, other=None, labels=None):
88 def clean(repo, node=None, other=None, labels=None):
89 """Initialize a brand new merge state, removing any existing state on
89 """Initialize a brand new merge state, removing any existing state on
90 disk."""
90 disk."""
91 ms = mergestate(repo)
91 ms = mergestate(repo)
92 ms.reset(node, other, labels)
92 ms.reset(node, other, labels)
93 return ms
93 return ms
94
94
95 @staticmethod
95 @staticmethod
96 def read(repo):
96 def read(repo):
97 """Initialize the merge state, reading it from disk."""
97 """Initialize the merge state, reading it from disk."""
98 ms = mergestate(repo)
98 ms = mergestate(repo)
99 ms._read()
99 ms._read()
100 return ms
100 return ms
101
101
102 def __init__(self, repo):
102 def __init__(self, repo):
103 """Initialize the merge state.
103 """Initialize the merge state.
104
104
105 Do not use this directly! Instead call read() or clean()."""
105 Do not use this directly! Instead call read() or clean()."""
106 self._repo = repo
106 self._repo = repo
107 self._dirty = False
107 self._dirty = False
108 self._labels = None
108 self._labels = None
109
109
110 def reset(self, node=None, other=None, labels=None):
110 def reset(self, node=None, other=None, labels=None):
111 self._state = {}
111 self._state = {}
112 self._stateextras = {}
112 self._stateextras = {}
113 self._local = None
113 self._local = None
114 self._other = None
114 self._other = None
115 self._labels = labels
115 self._labels = labels
116 for var in ('localctx', 'otherctx'):
116 for var in ('localctx', 'otherctx'):
117 if var in vars(self):
117 if var in vars(self):
118 delattr(self, var)
118 delattr(self, var)
119 if node:
119 if node:
120 self._local = node
120 self._local = node
121 self._other = other
121 self._other = other
122 self._readmergedriver = None
122 self._readmergedriver = None
123 if self.mergedriver:
123 if self.mergedriver:
124 self._mdstate = 's'
124 self._mdstate = 's'
125 else:
125 else:
126 self._mdstate = 'u'
126 self._mdstate = 'u'
127 shutil.rmtree(self._repo.vfs.join('merge'), True)
127 shutil.rmtree(self._repo.vfs.join('merge'), True)
128 self._results = {}
128 self._results = {}
129 self._dirty = False
129 self._dirty = False
130
130
131 def _read(self):
131 def _read(self):
132 """Analyse each record content to restore a serialized state from disk
132 """Analyse each record content to restore a serialized state from disk
133
133
134 This function process "record" entry produced by the de-serialization
134 This function process "record" entry produced by the de-serialization
135 of on disk file.
135 of on disk file.
136 """
136 """
137 self._state = {}
137 self._state = {}
138 self._stateextras = {}
138 self._stateextras = {}
139 self._local = None
139 self._local = None
140 self._other = None
140 self._other = None
141 for var in ('localctx', 'otherctx'):
141 for var in ('localctx', 'otherctx'):
142 if var in vars(self):
142 if var in vars(self):
143 delattr(self, var)
143 delattr(self, var)
144 self._readmergedriver = None
144 self._readmergedriver = None
145 self._mdstate = 's'
145 self._mdstate = 's'
146 unsupported = set()
146 unsupported = set()
147 records = self._readrecords()
147 records = self._readrecords()
148 for rtype, record in records:
148 for rtype, record in records:
149 if rtype == 'L':
149 if rtype == 'L':
150 self._local = bin(record)
150 self._local = bin(record)
151 elif rtype == 'O':
151 elif rtype == 'O':
152 self._other = bin(record)
152 self._other = bin(record)
153 elif rtype == 'm':
153 elif rtype == 'm':
154 bits = record.split('\0', 1)
154 bits = record.split('\0', 1)
155 mdstate = bits[1]
155 mdstate = bits[1]
156 if len(mdstate) != 1 or mdstate not in 'ums':
156 if len(mdstate) != 1 or mdstate not in 'ums':
157 # the merge driver should be idempotent, so just rerun it
157 # the merge driver should be idempotent, so just rerun it
158 mdstate = 'u'
158 mdstate = 'u'
159
159
160 self._readmergedriver = bits[0]
160 self._readmergedriver = bits[0]
161 self._mdstate = mdstate
161 self._mdstate = mdstate
162 elif rtype in 'FDC':
162 elif rtype in 'FDC':
163 bits = record.split('\0')
163 bits = record.split('\0')
164 self._state[bits[0]] = bits[1:]
164 self._state[bits[0]] = bits[1:]
165 elif rtype == 'f':
165 elif rtype == 'f':
166 filename, rawextras = record.split('\0', 1)
166 filename, rawextras = record.split('\0', 1)
167 extraparts = rawextras.split('\0')
167 extraparts = rawextras.split('\0')
168 extras = {}
168 extras = {}
169 i = 0
169 i = 0
170 while i < len(extraparts):
170 while i < len(extraparts):
171 extras[extraparts[i]] = extraparts[i + 1]
171 extras[extraparts[i]] = extraparts[i + 1]
172 i += 2
172 i += 2
173
173
174 self._stateextras[filename] = extras
174 self._stateextras[filename] = extras
175 elif rtype == 'l':
175 elif rtype == 'l':
176 labels = record.split('\0', 2)
176 labels = record.split('\0', 2)
177 self._labels = [l for l in labels if len(l) > 0]
177 self._labels = [l for l in labels if len(l) > 0]
178 elif not rtype.islower():
178 elif not rtype.islower():
179 unsupported.add(rtype)
179 unsupported.add(rtype)
180 self._results = {}
180 self._results = {}
181 self._dirty = False
181 self._dirty = False
182
182
183 if unsupported:
183 if unsupported:
184 raise error.UnsupportedMergeRecords(unsupported)
184 raise error.UnsupportedMergeRecords(unsupported)
185
185
186 def _readrecords(self):
186 def _readrecords(self):
187 """Read merge state from disk and return a list of record (TYPE, data)
187 """Read merge state from disk and return a list of record (TYPE, data)
188
188
189 We read data from both v1 and v2 files and decide which one to use.
189 We read data from both v1 and v2 files and decide which one to use.
190
190
191 V1 has been used by version prior to 2.9.1 and contains less data than
191 V1 has been used by version prior to 2.9.1 and contains less data than
192 v2. We read both versions and check if no data in v2 contradicts
192 v2. We read both versions and check if no data in v2 contradicts
193 v1. If there is not contradiction we can safely assume that both v1
193 v1. If there is not contradiction we can safely assume that both v1
194 and v2 were written at the same time and use the extract data in v2. If
194 and v2 were written at the same time and use the extract data in v2. If
195 there is contradiction we ignore v2 content as we assume an old version
195 there is contradiction we ignore v2 content as we assume an old version
196 of Mercurial has overwritten the mergestate file and left an old v2
196 of Mercurial has overwritten the mergestate file and left an old v2
197 file around.
197 file around.
198
198
199 returns list of record [(TYPE, data), ...]"""
199 returns list of record [(TYPE, data), ...]"""
200 v1records = self._readrecordsv1()
200 v1records = self._readrecordsv1()
201 v2records = self._readrecordsv2()
201 v2records = self._readrecordsv2()
202 if self._v1v2match(v1records, v2records):
202 if self._v1v2match(v1records, v2records):
203 return v2records
203 return v2records
204 else:
204 else:
205 # v1 file is newer than v2 file, use it
205 # v1 file is newer than v2 file, use it
206 # we have to infer the "other" changeset of the merge
206 # we have to infer the "other" changeset of the merge
207 # we cannot do better than that with v1 of the format
207 # we cannot do better than that with v1 of the format
208 mctx = self._repo[None].parents()[-1]
208 mctx = self._repo[None].parents()[-1]
209 v1records.append(('O', mctx.hex()))
209 v1records.append(('O', mctx.hex()))
210 # add place holder "other" file node information
210 # add place holder "other" file node information
211 # nobody is using it yet so we do no need to fetch the data
211 # nobody is using it yet so we do no need to fetch the data
212 # if mctx was wrong `mctx[bits[-2]]` may fails.
212 # if mctx was wrong `mctx[bits[-2]]` may fails.
213 for idx, r in enumerate(v1records):
213 for idx, r in enumerate(v1records):
214 if r[0] == 'F':
214 if r[0] == 'F':
215 bits = r[1].split('\0')
215 bits = r[1].split('\0')
216 bits.insert(-2, '')
216 bits.insert(-2, '')
217 v1records[idx] = (r[0], '\0'.join(bits))
217 v1records[idx] = (r[0], '\0'.join(bits))
218 return v1records
218 return v1records
219
219
220 def _v1v2match(self, v1records, v2records):
220 def _v1v2match(self, v1records, v2records):
221 oldv2 = set() # old format version of v2 record
221 oldv2 = set() # old format version of v2 record
222 for rec in v2records:
222 for rec in v2records:
223 if rec[0] == 'L':
223 if rec[0] == 'L':
224 oldv2.add(rec)
224 oldv2.add(rec)
225 elif rec[0] == 'F':
225 elif rec[0] == 'F':
226 # drop the onode data (not contained in v1)
226 # drop the onode data (not contained in v1)
227 oldv2.add(('F', _droponode(rec[1])))
227 oldv2.add(('F', _droponode(rec[1])))
228 for rec in v1records:
228 for rec in v1records:
229 if rec not in oldv2:
229 if rec not in oldv2:
230 return False
230 return False
231 else:
231 else:
232 return True
232 return True
233
233
234 def _readrecordsv1(self):
234 def _readrecordsv1(self):
235 """read on disk merge state for version 1 file
235 """read on disk merge state for version 1 file
236
236
237 returns list of record [(TYPE, data), ...]
237 returns list of record [(TYPE, data), ...]
238
238
239 Note: the "F" data from this file are one entry short
239 Note: the "F" data from this file are one entry short
240 (no "other file node" entry)
240 (no "other file node" entry)
241 """
241 """
242 records = []
242 records = []
243 try:
243 try:
244 f = self._repo.vfs(self.statepathv1)
244 f = self._repo.vfs(self.statepathv1)
245 for i, l in enumerate(f):
245 for i, l in enumerate(f):
246 if i == 0:
246 if i == 0:
247 records.append(('L', l[:-1]))
247 records.append(('L', l[:-1]))
248 else:
248 else:
249 records.append(('F', l[:-1]))
249 records.append(('F', l[:-1]))
250 f.close()
250 f.close()
251 except IOError as err:
251 except IOError as err:
252 if err.errno != errno.ENOENT:
252 if err.errno != errno.ENOENT:
253 raise
253 raise
254 return records
254 return records
255
255
256 def _readrecordsv2(self):
256 def _readrecordsv2(self):
257 """read on disk merge state for version 2 file
257 """read on disk merge state for version 2 file
258
258
259 This format is a list of arbitrary records of the form:
259 This format is a list of arbitrary records of the form:
260
260
261 [type][length][content]
261 [type][length][content]
262
262
263 `type` is a single character, `length` is a 4 byte integer, and
263 `type` is a single character, `length` is a 4 byte integer, and
264 `content` is an arbitrary byte sequence of length `length`.
264 `content` is an arbitrary byte sequence of length `length`.
265
265
266 Mercurial versions prior to 3.7 have a bug where if there are
266 Mercurial versions prior to 3.7 have a bug where if there are
267 unsupported mandatory merge records, attempting to clear out the merge
267 unsupported mandatory merge records, attempting to clear out the merge
268 state with hg update --clean or similar aborts. The 't' record type
268 state with hg update --clean or similar aborts. The 't' record type
269 works around that by writing out what those versions treat as an
269 works around that by writing out what those versions treat as an
270 advisory record, but later versions interpret as special: the first
270 advisory record, but later versions interpret as special: the first
271 character is the 'real' record type and everything onwards is the data.
271 character is the 'real' record type and everything onwards is the data.
272
272
273 Returns list of records [(TYPE, data), ...]."""
273 Returns list of records [(TYPE, data), ...]."""
274 records = []
274 records = []
275 try:
275 try:
276 f = self._repo.vfs(self.statepathv2)
276 f = self._repo.vfs(self.statepathv2)
277 data = f.read()
277 data = f.read()
278 off = 0
278 off = 0
279 end = len(data)
279 end = len(data)
280 while off < end:
280 while off < end:
281 rtype = data[off]
281 rtype = data[off]
282 off += 1
282 off += 1
283 length = _unpack('>I', data[off:(off + 4)])[0]
283 length = _unpack('>I', data[off:(off + 4)])[0]
284 off += 4
284 off += 4
285 record = data[off:(off + length)]
285 record = data[off:(off + length)]
286 off += length
286 off += length
287 if rtype == 't':
287 if rtype == 't':
288 rtype, record = record[0], record[1:]
288 rtype, record = record[0], record[1:]
289 records.append((rtype, record))
289 records.append((rtype, record))
290 f.close()
290 f.close()
291 except IOError as err:
291 except IOError as err:
292 if err.errno != errno.ENOENT:
292 if err.errno != errno.ENOENT:
293 raise
293 raise
294 return records
294 return records
295
295
296 @util.propertycache
296 @util.propertycache
297 def mergedriver(self):
297 def mergedriver(self):
298 # protect against the following:
298 # protect against the following:
299 # - A configures a malicious merge driver in their hgrc, then
299 # - A configures a malicious merge driver in their hgrc, then
300 # pauses the merge
300 # pauses the merge
301 # - A edits their hgrc to remove references to the merge driver
301 # - A edits their hgrc to remove references to the merge driver
302 # - A gives a copy of their entire repo, including .hg, to B
302 # - A gives a copy of their entire repo, including .hg, to B
303 # - B inspects .hgrc and finds it to be clean
303 # - B inspects .hgrc and finds it to be clean
304 # - B then continues the merge and the malicious merge driver
304 # - B then continues the merge and the malicious merge driver
305 # gets invoked
305 # gets invoked
306 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
306 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
307 if (self._readmergedriver is not None
307 if (self._readmergedriver is not None
308 and self._readmergedriver != configmergedriver):
308 and self._readmergedriver != configmergedriver):
309 raise error.ConfigError(
309 raise error.ConfigError(
310 _("merge driver changed since merge started"),
310 _("merge driver changed since merge started"),
311 hint=_("revert merge driver change or abort merge"))
311 hint=_("revert merge driver change or abort merge"))
312
312
313 return configmergedriver
313 return configmergedriver
314
314
315 @util.propertycache
315 @util.propertycache
316 def localctx(self):
316 def localctx(self):
317 if self._local is None:
317 if self._local is None:
318 msg = "localctx accessed but self._local isn't set"
318 msg = "localctx accessed but self._local isn't set"
319 raise error.ProgrammingError(msg)
319 raise error.ProgrammingError(msg)
320 return self._repo[self._local]
320 return self._repo[self._local]
321
321
322 @util.propertycache
322 @util.propertycache
323 def otherctx(self):
323 def otherctx(self):
324 if self._other is None:
324 if self._other is None:
325 msg = "otherctx accessed but self._other isn't set"
325 msg = "otherctx accessed but self._other isn't set"
326 raise error.ProgrammingError(msg)
326 raise error.ProgrammingError(msg)
327 return self._repo[self._other]
327 return self._repo[self._other]
328
328
329 def active(self):
329 def active(self):
330 """Whether mergestate is active.
330 """Whether mergestate is active.
331
331
332 Returns True if there appears to be mergestate. This is a rough proxy
332 Returns True if there appears to be mergestate. This is a rough proxy
333 for "is a merge in progress."
333 for "is a merge in progress."
334 """
334 """
335 # Check local variables before looking at filesystem for performance
335 # Check local variables before looking at filesystem for performance
336 # reasons.
336 # reasons.
337 return bool(self._local) or bool(self._state) or \
337 return bool(self._local) or bool(self._state) or \
338 self._repo.vfs.exists(self.statepathv1) or \
338 self._repo.vfs.exists(self.statepathv1) or \
339 self._repo.vfs.exists(self.statepathv2)
339 self._repo.vfs.exists(self.statepathv2)
340
340
341 def commit(self):
341 def commit(self):
342 """Write current state on disk (if necessary)"""
342 """Write current state on disk (if necessary)"""
343 if self._dirty:
343 if self._dirty:
344 records = self._makerecords()
344 records = self._makerecords()
345 self._writerecords(records)
345 self._writerecords(records)
346 self._dirty = False
346 self._dirty = False
347
347
348 def _makerecords(self):
348 def _makerecords(self):
349 records = []
349 records = []
350 records.append(('L', hex(self._local)))
350 records.append(('L', hex(self._local)))
351 records.append(('O', hex(self._other)))
351 records.append(('O', hex(self._other)))
352 if self.mergedriver:
352 if self.mergedriver:
353 records.append(('m', '\0'.join([
353 records.append(('m', '\0'.join([
354 self.mergedriver, self._mdstate])))
354 self.mergedriver, self._mdstate])))
355 for d, v in self._state.iteritems():
355 for d, v in self._state.iteritems():
356 if v[0] == 'd':
356 if v[0] == 'd':
357 records.append(('D', '\0'.join([d] + v)))
357 records.append(('D', '\0'.join([d] + v)))
358 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
358 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
359 # older versions of Mercurial
359 # older versions of Mercurial
360 elif v[1] == nullhex or v[6] == nullhex:
360 elif v[1] == nullhex or v[6] == nullhex:
361 records.append(('C', '\0'.join([d] + v)))
361 records.append(('C', '\0'.join([d] + v)))
362 else:
362 else:
363 records.append(('F', '\0'.join([d] + v)))
363 records.append(('F', '\0'.join([d] + v)))
364 for filename, extras in sorted(self._stateextras.iteritems()):
364 for filename, extras in sorted(self._stateextras.iteritems()):
365 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
365 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
366 extras.iteritems())
366 extras.iteritems())
367 records.append(('f', '%s\0%s' % (filename, rawextras)))
367 records.append(('f', '%s\0%s' % (filename, rawextras)))
368 if self._labels is not None:
368 if self._labels is not None:
369 labels = '\0'.join(self._labels)
369 labels = '\0'.join(self._labels)
370 records.append(('l', labels))
370 records.append(('l', labels))
371 return records
371 return records
372
372
373 def _writerecords(self, records):
373 def _writerecords(self, records):
374 """Write current state on disk (both v1 and v2)"""
374 """Write current state on disk (both v1 and v2)"""
375 self._writerecordsv1(records)
375 self._writerecordsv1(records)
376 self._writerecordsv2(records)
376 self._writerecordsv2(records)
377
377
378 def _writerecordsv1(self, records):
378 def _writerecordsv1(self, records):
379 """Write current state on disk in a version 1 file"""
379 """Write current state on disk in a version 1 file"""
380 f = self._repo.vfs(self.statepathv1, 'w')
380 f = self._repo.vfs(self.statepathv1, 'w')
381 irecords = iter(records)
381 irecords = iter(records)
382 lrecords = next(irecords)
382 lrecords = next(irecords)
383 assert lrecords[0] == 'L'
383 assert lrecords[0] == 'L'
384 f.write(hex(self._local) + '\n')
384 f.write(hex(self._local) + '\n')
385 for rtype, data in irecords:
385 for rtype, data in irecords:
386 if rtype == 'F':
386 if rtype == 'F':
387 f.write('%s\n' % _droponode(data))
387 f.write('%s\n' % _droponode(data))
388 f.close()
388 f.close()
389
389
390 def _writerecordsv2(self, records):
390 def _writerecordsv2(self, records):
391 """Write current state on disk in a version 2 file
391 """Write current state on disk in a version 2 file
392
392
393 See the docstring for _readrecordsv2 for why we use 't'."""
393 See the docstring for _readrecordsv2 for why we use 't'."""
394 # these are the records that all version 2 clients can read
394 # these are the records that all version 2 clients can read
395 whitelist = 'LOF'
395 whitelist = 'LOF'
396 f = self._repo.vfs(self.statepathv2, 'w')
396 f = self._repo.vfs(self.statepathv2, 'w')
397 for key, data in records:
397 for key, data in records:
398 assert len(key) == 1
398 assert len(key) == 1
399 if key not in whitelist:
399 if key not in whitelist:
400 key, data = 't', '%s%s' % (key, data)
400 key, data = 't', '%s%s' % (key, data)
401 format = '>sI%is' % len(data)
401 format = '>sI%is' % len(data)
402 f.write(_pack(format, key, len(data), data))
402 f.write(_pack(format, key, len(data), data))
403 f.close()
403 f.close()
404
404
405 def add(self, fcl, fco, fca, fd):
405 def add(self, fcl, fco, fca, fd):
406 """add a new (potentially?) conflicting file the merge state
406 """add a new (potentially?) conflicting file the merge state
407 fcl: file context for local,
407 fcl: file context for local,
408 fco: file context for remote,
408 fco: file context for remote,
409 fca: file context for ancestors,
409 fca: file context for ancestors,
410 fd: file path of the resulting merge.
410 fd: file path of the resulting merge.
411
411
412 note: also write the local version to the `.hg/merge` directory.
412 note: also write the local version to the `.hg/merge` directory.
413 """
413 """
414 if fcl.isabsent():
414 if fcl.isabsent():
415 hash = nullhex
415 hash = nullhex
416 else:
416 else:
417 hash = hex(hashlib.sha1(fcl.path()).digest())
417 hash = hex(hashlib.sha1(fcl.path()).digest())
418 self._repo.vfs.write('merge/' + hash, fcl.data())
418 self._repo.vfs.write('merge/' + hash, fcl.data())
419 self._state[fd] = ['u', hash, fcl.path(),
419 self._state[fd] = ['u', hash, fcl.path(),
420 fca.path(), hex(fca.filenode()),
420 fca.path(), hex(fca.filenode()),
421 fco.path(), hex(fco.filenode()),
421 fco.path(), hex(fco.filenode()),
422 fcl.flags()]
422 fcl.flags()]
423 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
423 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
424 self._dirty = True
424 self._dirty = True
425
425
426 def __contains__(self, dfile):
426 def __contains__(self, dfile):
427 return dfile in self._state
427 return dfile in self._state
428
428
429 def __getitem__(self, dfile):
429 def __getitem__(self, dfile):
430 return self._state[dfile][0]
430 return self._state[dfile][0]
431
431
432 def __iter__(self):
432 def __iter__(self):
433 return iter(sorted(self._state))
433 return iter(sorted(self._state))
434
434
435 def files(self):
435 def files(self):
436 return self._state.keys()
436 return self._state.keys()
437
437
438 def mark(self, dfile, state):
438 def mark(self, dfile, state):
439 self._state[dfile][0] = state
439 self._state[dfile][0] = state
440 self._dirty = True
440 self._dirty = True
441
441
442 def mdstate(self):
442 def mdstate(self):
443 return self._mdstate
443 return self._mdstate
444
444
445 def unresolved(self):
445 def unresolved(self):
446 """Obtain the paths of unresolved files."""
446 """Obtain the paths of unresolved files."""
447
447
448 for f, entry in self._state.items():
448 for f, entry in self._state.items():
449 if entry[0] == 'u':
449 if entry[0] == 'u':
450 yield f
450 yield f
451
451
452 def driverresolved(self):
452 def driverresolved(self):
453 """Obtain the paths of driver-resolved files."""
453 """Obtain the paths of driver-resolved files."""
454
454
455 for f, entry in self._state.items():
455 for f, entry in self._state.items():
456 if entry[0] == 'd':
456 if entry[0] == 'd':
457 yield f
457 yield f
458
458
459 def extras(self, filename):
459 def extras(self, filename):
460 return self._stateextras.setdefault(filename, {})
460 return self._stateextras.setdefault(filename, {})
461
461
462 def _resolve(self, preresolve, dfile, wctx):
462 def _resolve(self, preresolve, dfile, wctx):
463 """rerun merge process for file path `dfile`"""
463 """rerun merge process for file path `dfile`"""
464 if self[dfile] in 'rd':
464 if self[dfile] in 'rd':
465 return True, 0
465 return True, 0
466 stateentry = self._state[dfile]
466 stateentry = self._state[dfile]
467 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
467 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
468 octx = self._repo[self._other]
468 octx = self._repo[self._other]
469 extras = self.extras(dfile)
469 extras = self.extras(dfile)
470 anccommitnode = extras.get('ancestorlinknode')
470 anccommitnode = extras.get('ancestorlinknode')
471 if anccommitnode:
471 if anccommitnode:
472 actx = self._repo[anccommitnode]
472 actx = self._repo[anccommitnode]
473 else:
473 else:
474 actx = None
474 actx = None
475 fcd = self._filectxorabsent(hash, wctx, dfile)
475 fcd = self._filectxorabsent(hash, wctx, dfile)
476 fco = self._filectxorabsent(onode, octx, ofile)
476 fco = self._filectxorabsent(onode, octx, ofile)
477 # TODO: move this to filectxorabsent
477 # TODO: move this to filectxorabsent
478 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
478 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
479 # "premerge" x flags
479 # "premerge" x flags
480 flo = fco.flags()
480 flo = fco.flags()
481 fla = fca.flags()
481 fla = fca.flags()
482 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
482 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
483 if fca.node() == nullid and flags != flo:
483 if fca.node() == nullid and flags != flo:
484 if preresolve:
484 if preresolve:
485 self._repo.ui.warn(
485 self._repo.ui.warn(
486 _('warning: cannot merge flags for %s '
486 _('warning: cannot merge flags for %s '
487 'without common ancestor - keeping local flags\n')
487 'without common ancestor - keeping local flags\n')
488 % afile)
488 % afile)
489 elif flags == fla:
489 elif flags == fla:
490 flags = flo
490 flags = flo
491 if preresolve:
491 if preresolve:
492 # restore local
492 # restore local
493 if hash != nullhex:
493 if hash != nullhex:
494 f = self._repo.vfs('merge/' + hash)
494 f = self._repo.vfs('merge/' + hash)
495 wctx[dfile].write(f.read(), flags)
495 wctx[dfile].write(f.read(), flags)
496 f.close()
496 f.close()
497 else:
497 else:
498 wctx[dfile].remove(ignoremissing=True)
498 wctx[dfile].remove(ignoremissing=True)
499 complete, r, deleted = filemerge.premerge(self._repo, self._local,
499 complete, r, deleted = filemerge.premerge(self._repo, self._local,
500 lfile, fcd, fco, fca,
500 lfile, fcd, fco, fca,
501 labels=self._labels)
501 labels=self._labels)
502 else:
502 else:
503 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
503 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
504 lfile, fcd, fco, fca,
504 lfile, fcd, fco, fca,
505 labels=self._labels)
505 labels=self._labels)
506 if r is None:
506 if r is None:
507 # no real conflict
507 # no real conflict
508 del self._state[dfile]
508 del self._state[dfile]
509 self._stateextras.pop(dfile, None)
509 self._stateextras.pop(dfile, None)
510 self._dirty = True
510 self._dirty = True
511 elif not r:
511 elif not r:
512 self.mark(dfile, 'r')
512 self.mark(dfile, 'r')
513
513
514 if complete:
514 if complete:
515 action = None
515 action = None
516 if deleted:
516 if deleted:
517 if fcd.isabsent():
517 if fcd.isabsent():
518 # dc: local picked. Need to drop if present, which may
518 # dc: local picked. Need to drop if present, which may
519 # happen on re-resolves.
519 # happen on re-resolves.
520 action = 'f'
520 action = 'f'
521 else:
521 else:
522 # cd: remote picked (or otherwise deleted)
522 # cd: remote picked (or otherwise deleted)
523 action = 'r'
523 action = 'r'
524 else:
524 else:
525 if fcd.isabsent(): # dc: remote picked
525 if fcd.isabsent(): # dc: remote picked
526 action = 'g'
526 action = 'g'
527 elif fco.isabsent(): # cd: local picked
527 elif fco.isabsent(): # cd: local picked
528 if dfile in self.localctx:
528 if dfile in self.localctx:
529 action = 'am'
529 action = 'am'
530 else:
530 else:
531 action = 'a'
531 action = 'a'
532 # else: regular merges (no action necessary)
532 # else: regular merges (no action necessary)
533 self._results[dfile] = r, action
533 self._results[dfile] = r, action
534
534
535 return complete, r
535 return complete, r
536
536
537 def _filectxorabsent(self, hexnode, ctx, f):
537 def _filectxorabsent(self, hexnode, ctx, f):
538 if hexnode == nullhex:
538 if hexnode == nullhex:
539 return filemerge.absentfilectx(ctx, f)
539 return filemerge.absentfilectx(ctx, f)
540 else:
540 else:
541 return ctx[f]
541 return ctx[f]
542
542
543 def preresolve(self, dfile, wctx):
543 def preresolve(self, dfile, wctx):
544 """run premerge process for dfile
544 """run premerge process for dfile
545
545
546 Returns whether the merge is complete, and the exit code."""
546 Returns whether the merge is complete, and the exit code."""
547 return self._resolve(True, dfile, wctx)
547 return self._resolve(True, dfile, wctx)
548
548
549 def resolve(self, dfile, wctx):
549 def resolve(self, dfile, wctx):
550 """run merge process (assuming premerge was run) for dfile
550 """run merge process (assuming premerge was run) for dfile
551
551
552 Returns the exit code of the merge."""
552 Returns the exit code of the merge."""
553 return self._resolve(False, dfile, wctx)[1]
553 return self._resolve(False, dfile, wctx)[1]
554
554
555 def counts(self):
555 def counts(self):
556 """return counts for updated, merged and removed files in this
556 """return counts for updated, merged and removed files in this
557 session"""
557 session"""
558 updated, merged, removed = 0, 0, 0
558 updated, merged, removed = 0, 0, 0
559 for r, action in self._results.itervalues():
559 for r, action in self._results.itervalues():
560 if r is None:
560 if r is None:
561 updated += 1
561 updated += 1
562 elif r == 0:
562 elif r == 0:
563 if action == 'r':
563 if action == 'r':
564 removed += 1
564 removed += 1
565 else:
565 else:
566 merged += 1
566 merged += 1
567 return updated, merged, removed
567 return updated, merged, removed
568
568
569 def unresolvedcount(self):
569 def unresolvedcount(self):
570 """get unresolved count for this merge (persistent)"""
570 """get unresolved count for this merge (persistent)"""
571 return len([True for f, entry in self._state.iteritems()
571 return len([True for f, entry in self._state.iteritems()
572 if entry[0] == 'u'])
572 if entry[0] == 'u'])
573
573
574 def actions(self):
574 def actions(self):
575 """return lists of actions to perform on the dirstate"""
575 """return lists of actions to perform on the dirstate"""
576 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
576 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
577 for f, (r, action) in self._results.iteritems():
577 for f, (r, action) in self._results.iteritems():
578 if action is not None:
578 if action is not None:
579 actions[action].append((f, None, "merge result"))
579 actions[action].append((f, None, "merge result"))
580 return actions
580 return actions
581
581
582 def recordactions(self):
582 def recordactions(self):
583 """record remove/add/get actions in the dirstate"""
583 """record remove/add/get actions in the dirstate"""
584 branchmerge = self._repo.dirstate.p2() != nullid
584 branchmerge = self._repo.dirstate.p2() != nullid
585 recordupdates(self._repo, self.actions(), branchmerge)
585 recordupdates(self._repo, self.actions(), branchmerge)
586
586
587 def queueremove(self, f):
587 def queueremove(self, f):
588 """queues a file to be removed from the dirstate
588 """queues a file to be removed from the dirstate
589
589
590 Meant for use by custom merge drivers."""
590 Meant for use by custom merge drivers."""
591 self._results[f] = 0, 'r'
591 self._results[f] = 0, 'r'
592
592
593 def queueadd(self, f):
593 def queueadd(self, f):
594 """queues a file to be added to the dirstate
594 """queues a file to be added to the dirstate
595
595
596 Meant for use by custom merge drivers."""
596 Meant for use by custom merge drivers."""
597 self._results[f] = 0, 'a'
597 self._results[f] = 0, 'a'
598
598
599 def queueget(self, f):
599 def queueget(self, f):
600 """queues a file to be marked modified in the dirstate
600 """queues a file to be marked modified in the dirstate
601
601
602 Meant for use by custom merge drivers."""
602 Meant for use by custom merge drivers."""
603 self._results[f] = 0, 'g'
603 self._results[f] = 0, 'g'
604
604
605 def _getcheckunknownconfig(repo, section, name):
605 def _getcheckunknownconfig(repo, section, name):
606 config = repo.ui.config(section, name, default='abort')
606 config = repo.ui.config(section, name, default='abort')
607 valid = ['abort', 'ignore', 'warn']
607 valid = ['abort', 'ignore', 'warn']
608 if config not in valid:
608 if config not in valid:
609 validstr = ', '.join(["'" + v + "'" for v in valid])
609 validstr = ', '.join(["'" + v + "'" for v in valid])
610 raise error.ConfigError(_("%s.%s not valid "
610 raise error.ConfigError(_("%s.%s not valid "
611 "('%s' is none of %s)")
611 "('%s' is none of %s)")
612 % (section, name, config, validstr))
612 % (section, name, config, validstr))
613 return config
613 return config
614
614
615 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
615 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
616 if f2 is None:
616 if f2 is None:
617 f2 = f
617 f2 = f
618 return (repo.wvfs.audit.check(f)
618 return (repo.wvfs.audit.check(f)
619 and repo.wvfs.isfileorlink(f)
619 and repo.wvfs.isfileorlink(f)
620 and repo.dirstate.normalize(f) not in repo.dirstate
620 and repo.dirstate.normalize(f) not in repo.dirstate
621 and mctx[f2].cmp(wctx[f]))
621 and mctx[f2].cmp(wctx[f]))
622
622
623 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
623 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
624 """
624 """
625 Considers any actions that care about the presence of conflicting unknown
625 Considers any actions that care about the presence of conflicting unknown
626 files. For some actions, the result is to abort; for others, it is to
626 files. For some actions, the result is to abort; for others, it is to
627 choose a different action.
627 choose a different action.
628 """
628 """
629 conflicts = set()
629 conflicts = set()
630 warnconflicts = set()
630 warnconflicts = set()
631 abortconflicts = set()
631 abortconflicts = set()
632 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
632 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
633 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
633 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
634 if not force:
634 if not force:
635 def collectconflicts(conflicts, config):
635 def collectconflicts(conflicts, config):
636 if config == 'abort':
636 if config == 'abort':
637 abortconflicts.update(conflicts)
637 abortconflicts.update(conflicts)
638 elif config == 'warn':
638 elif config == 'warn':
639 warnconflicts.update(conflicts)
639 warnconflicts.update(conflicts)
640
640
641 for f, (m, args, msg) in actions.iteritems():
641 for f, (m, args, msg) in actions.iteritems():
642 if m in ('c', 'dc'):
642 if m in ('c', 'dc'):
643 if _checkunknownfile(repo, wctx, mctx, f):
643 if _checkunknownfile(repo, wctx, mctx, f):
644 conflicts.add(f)
644 conflicts.add(f)
645 elif m == 'dg':
645 elif m == 'dg':
646 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
646 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
647 conflicts.add(f)
647 conflicts.add(f)
648
648
649 ignoredconflicts = set([c for c in conflicts
649 ignoredconflicts = set([c for c in conflicts
650 if repo.dirstate._ignore(c)])
650 if repo.dirstate._ignore(c)])
651 unknownconflicts = conflicts - ignoredconflicts
651 unknownconflicts = conflicts - ignoredconflicts
652 collectconflicts(ignoredconflicts, ignoredconfig)
652 collectconflicts(ignoredconflicts, ignoredconfig)
653 collectconflicts(unknownconflicts, unknownconfig)
653 collectconflicts(unknownconflicts, unknownconfig)
654 else:
654 else:
655 for f, (m, args, msg) in actions.iteritems():
655 for f, (m, args, msg) in actions.iteritems():
656 if m == 'cm':
656 if m == 'cm':
657 fl2, anc = args
657 fl2, anc = args
658 different = _checkunknownfile(repo, wctx, mctx, f)
658 different = _checkunknownfile(repo, wctx, mctx, f)
659 if repo.dirstate._ignore(f):
659 if repo.dirstate._ignore(f):
660 config = ignoredconfig
660 config = ignoredconfig
661 else:
661 else:
662 config = unknownconfig
662 config = unknownconfig
663
663
664 # The behavior when force is True is described by this table:
664 # The behavior when force is True is described by this table:
665 # config different mergeforce | action backup
665 # config different mergeforce | action backup
666 # * n * | get n
666 # * n * | get n
667 # * y y | merge -
667 # * y y | merge -
668 # abort y n | merge - (1)
668 # abort y n | merge - (1)
669 # warn y n | warn + get y
669 # warn y n | warn + get y
670 # ignore y n | get y
670 # ignore y n | get y
671 #
671 #
672 # (1) this is probably the wrong behavior here -- we should
672 # (1) this is probably the wrong behavior here -- we should
673 # probably abort, but some actions like rebases currently
673 # probably abort, but some actions like rebases currently
674 # don't like an abort happening in the middle of
674 # don't like an abort happening in the middle of
675 # merge.update.
675 # merge.update.
676 if not different:
676 if not different:
677 actions[f] = ('g', (fl2, False), "remote created")
677 actions[f] = ('g', (fl2, False), "remote created")
678 elif mergeforce or config == 'abort':
678 elif mergeforce or config == 'abort':
679 actions[f] = ('m', (f, f, None, False, anc),
679 actions[f] = ('m', (f, f, None, False, anc),
680 "remote differs from untracked local")
680 "remote differs from untracked local")
681 elif config == 'abort':
681 elif config == 'abort':
682 abortconflicts.add(f)
682 abortconflicts.add(f)
683 else:
683 else:
684 if config == 'warn':
684 if config == 'warn':
685 warnconflicts.add(f)
685 warnconflicts.add(f)
686 actions[f] = ('g', (fl2, True), "remote created")
686 actions[f] = ('g', (fl2, True), "remote created")
687
687
688 for f in sorted(abortconflicts):
688 for f in sorted(abortconflicts):
689 repo.ui.warn(_("%s: untracked file differs\n") % f)
689 repo.ui.warn(_("%s: untracked file differs\n") % f)
690 if abortconflicts:
690 if abortconflicts:
691 raise error.Abort(_("untracked files in working directory "
691 raise error.Abort(_("untracked files in working directory "
692 "differ from files in requested revision"))
692 "differ from files in requested revision"))
693
693
694 for f in sorted(warnconflicts):
694 for f in sorted(warnconflicts):
695 repo.ui.warn(_("%s: replacing untracked file\n") % f)
695 repo.ui.warn(_("%s: replacing untracked file\n") % f)
696
696
697 for f, (m, args, msg) in actions.iteritems():
697 for f, (m, args, msg) in actions.iteritems():
698 backup = f in conflicts
698 backup = f in conflicts
699 if m == 'c':
699 if m == 'c':
700 flags, = args
700 flags, = args
701 actions[f] = ('g', (flags, backup), msg)
701 actions[f] = ('g', (flags, backup), msg)
702
702
703 def _forgetremoved(wctx, mctx, branchmerge):
703 def _forgetremoved(wctx, mctx, branchmerge):
704 """
704 """
705 Forget removed files
705 Forget removed files
706
706
707 If we're jumping between revisions (as opposed to merging), and if
707 If we're jumping between revisions (as opposed to merging), and if
708 neither the working directory nor the target rev has the file,
708 neither the working directory nor the target rev has the file,
709 then we need to remove it from the dirstate, to prevent the
709 then we need to remove it from the dirstate, to prevent the
710 dirstate from listing the file when it is no longer in the
710 dirstate from listing the file when it is no longer in the
711 manifest.
711 manifest.
712
712
713 If we're merging, and the other revision has removed a file
713 If we're merging, and the other revision has removed a file
714 that is not present in the working directory, we need to mark it
714 that is not present in the working directory, we need to mark it
715 as removed.
715 as removed.
716 """
716 """
717
717
718 actions = {}
718 actions = {}
719 m = 'f'
719 m = 'f'
720 if branchmerge:
720 if branchmerge:
721 m = 'r'
721 m = 'r'
722 for f in wctx.deleted():
722 for f in wctx.deleted():
723 if f not in mctx:
723 if f not in mctx:
724 actions[f] = m, None, "forget deleted"
724 actions[f] = m, None, "forget deleted"
725
725
726 if not branchmerge:
726 if not branchmerge:
727 for f in wctx.removed():
727 for f in wctx.removed():
728 if f not in mctx:
728 if f not in mctx:
729 actions[f] = 'f', None, "forget removed"
729 actions[f] = 'f', None, "forget removed"
730
730
731 return actions
731 return actions
732
732
733 def _checkcollision(repo, wmf, actions):
733 def _checkcollision(repo, wmf, actions):
734 # build provisional merged manifest up
734 # build provisional merged manifest up
735 pmmf = set(wmf)
735 pmmf = set(wmf)
736
736
737 if actions:
737 if actions:
738 # k, dr, e and rd are no-op
738 # k, dr, e and rd are no-op
739 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
739 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
740 for f, args, msg in actions[m]:
740 for f, args, msg in actions[m]:
741 pmmf.add(f)
741 pmmf.add(f)
742 for f, args, msg in actions['r']:
742 for f, args, msg in actions['r']:
743 pmmf.discard(f)
743 pmmf.discard(f)
744 for f, args, msg in actions['dm']:
744 for f, args, msg in actions['dm']:
745 f2, flags = args
745 f2, flags = args
746 pmmf.discard(f2)
746 pmmf.discard(f2)
747 pmmf.add(f)
747 pmmf.add(f)
748 for f, args, msg in actions['dg']:
748 for f, args, msg in actions['dg']:
749 pmmf.add(f)
749 pmmf.add(f)
750 for f, args, msg in actions['m']:
750 for f, args, msg in actions['m']:
751 f1, f2, fa, move, anc = args
751 f1, f2, fa, move, anc = args
752 if move:
752 if move:
753 pmmf.discard(f1)
753 pmmf.discard(f1)
754 pmmf.add(f)
754 pmmf.add(f)
755
755
756 # check case-folding collision in provisional merged manifest
756 # check case-folding collision in provisional merged manifest
757 foldmap = {}
757 foldmap = {}
758 for f in sorted(pmmf):
758 for f in sorted(pmmf):
759 fold = util.normcase(f)
759 fold = util.normcase(f)
760 if fold in foldmap:
760 if fold in foldmap:
761 raise error.Abort(_("case-folding collision between %s and %s")
761 raise error.Abort(_("case-folding collision between %s and %s")
762 % (f, foldmap[fold]))
762 % (f, foldmap[fold]))
763 foldmap[fold] = f
763 foldmap[fold] = f
764
764
765 # check case-folding of directories
765 # check case-folding of directories
766 foldprefix = unfoldprefix = lastfull = ''
766 foldprefix = unfoldprefix = lastfull = ''
767 for fold, f in sorted(foldmap.items()):
767 for fold, f in sorted(foldmap.items()):
768 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
768 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
769 # the folded prefix matches but actual casing is different
769 # the folded prefix matches but actual casing is different
770 raise error.Abort(_("case-folding collision between "
770 raise error.Abort(_("case-folding collision between "
771 "%s and directory of %s") % (lastfull, f))
771 "%s and directory of %s") % (lastfull, f))
772 foldprefix = fold + '/'
772 foldprefix = fold + '/'
773 unfoldprefix = f + '/'
773 unfoldprefix = f + '/'
774 lastfull = f
774 lastfull = f
775
775
776 def driverpreprocess(repo, ms, wctx, labels=None):
776 def driverpreprocess(repo, ms, wctx, labels=None):
777 """run the preprocess step of the merge driver, if any
777 """run the preprocess step of the merge driver, if any
778
778
779 This is currently not implemented -- it's an extension point."""
779 This is currently not implemented -- it's an extension point."""
780 return True
780 return True
781
781
782 def driverconclude(repo, ms, wctx, labels=None):
782 def driverconclude(repo, ms, wctx, labels=None):
783 """run the conclude step of the merge driver, if any
783 """run the conclude step of the merge driver, if any
784
784
785 This is currently not implemented -- it's an extension point."""
785 This is currently not implemented -- it's an extension point."""
786 return True
786 return True
787
787
788 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
788 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
789 acceptremote, followcopies, forcefulldiff=False):
789 acceptremote, followcopies, forcefulldiff=False):
790 """
790 """
791 Merge wctx and p2 with ancestor pa and generate merge action list
791 Merge wctx and p2 with ancestor pa and generate merge action list
792
792
793 branchmerge and force are as passed in to update
793 branchmerge and force are as passed in to update
794 matcher = matcher to filter file lists
794 matcher = matcher to filter file lists
795 acceptremote = accept the incoming changes without prompting
795 acceptremote = accept the incoming changes without prompting
796 """
796 """
797 if matcher is not None and matcher.always():
797 if matcher is not None and matcher.always():
798 matcher = None
798 matcher = None
799
799
800 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
800 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
801
801
802 # manifests fetched in order are going to be faster, so prime the caches
802 # manifests fetched in order are going to be faster, so prime the caches
803 [x.manifest() for x in
803 [x.manifest() for x in
804 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
804 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
805
805
806 if followcopies:
806 if followcopies:
807 ret = copies.mergecopies(repo, wctx, p2, pa)
807 ret = copies.mergecopies(repo, wctx, p2, pa)
808 copy, movewithdir, diverge, renamedelete, dirmove = ret
808 copy, movewithdir, diverge, renamedelete, dirmove = ret
809
809
810 boolbm = pycompat.bytestr(bool(branchmerge))
810 boolbm = pycompat.bytestr(bool(branchmerge))
811 boolf = pycompat.bytestr(bool(force))
811 boolf = pycompat.bytestr(bool(force))
812 boolm = pycompat.bytestr(bool(matcher))
812 boolm = pycompat.bytestr(bool(matcher))
813 repo.ui.note(_("resolving manifests\n"))
813 repo.ui.note(_("resolving manifests\n"))
814 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
814 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
815 % (boolbm, boolf, boolm))
815 % (boolbm, boolf, boolm))
816 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
816 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
817
817
818 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
818 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
819 copied = set(copy.values())
819 copied = set(copy.values())
820 copied.update(movewithdir.values())
820 copied.update(movewithdir.values())
821
821
822 if '.hgsubstate' in m1:
822 if '.hgsubstate' in m1:
823 # check whether sub state is modified
823 # check whether sub state is modified
824 if any(wctx.sub(s).dirty() for s in wctx.substate):
824 if any(wctx.sub(s).dirty() for s in wctx.substate):
825 m1['.hgsubstate'] = modifiednodeid
825 m1['.hgsubstate'] = modifiednodeid
826
826
827 # Don't use m2-vs-ma optimization if:
827 # Don't use m2-vs-ma optimization if:
828 # - ma is the same as m1 or m2, which we're just going to diff again later
828 # - ma is the same as m1 or m2, which we're just going to diff again later
829 # - The caller specifically asks for a full diff, which is useful during bid
829 # - The caller specifically asks for a full diff, which is useful during bid
830 # merge.
830 # merge.
831 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
831 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
832 # Identify which files are relevant to the merge, so we can limit the
832 # Identify which files are relevant to the merge, so we can limit the
833 # total m1-vs-m2 diff to just those files. This has significant
833 # total m1-vs-m2 diff to just those files. This has significant
834 # performance benefits in large repositories.
834 # performance benefits in large repositories.
835 relevantfiles = set(ma.diff(m2).keys())
835 relevantfiles = set(ma.diff(m2).keys())
836
836
837 # For copied and moved files, we need to add the source file too.
837 # For copied and moved files, we need to add the source file too.
838 for copykey, copyvalue in copy.iteritems():
838 for copykey, copyvalue in copy.iteritems():
839 if copyvalue in relevantfiles:
839 if copyvalue in relevantfiles:
840 relevantfiles.add(copykey)
840 relevantfiles.add(copykey)
841 for movedirkey in movewithdir:
841 for movedirkey in movewithdir:
842 relevantfiles.add(movedirkey)
842 relevantfiles.add(movedirkey)
843 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
843 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
844 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
844 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
845
845
846 diff = m1.diff(m2, match=matcher)
846 diff = m1.diff(m2, match=matcher)
847
847
848 if matcher is None:
848 if matcher is None:
849 matcher = matchmod.always('', '')
849 matcher = matchmod.always('', '')
850
850
851 actions = {}
851 actions = {}
852 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
852 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
853 if n1 and n2: # file exists on both local and remote side
853 if n1 and n2: # file exists on both local and remote side
854 if f not in ma:
854 if f not in ma:
855 fa = copy.get(f, None)
855 fa = copy.get(f, None)
856 if fa is not None:
856 if fa is not None:
857 actions[f] = ('m', (f, f, fa, False, pa.node()),
857 actions[f] = ('m', (f, f, fa, False, pa.node()),
858 "both renamed from " + fa)
858 "both renamed from " + fa)
859 else:
859 else:
860 actions[f] = ('m', (f, f, None, False, pa.node()),
860 actions[f] = ('m', (f, f, None, False, pa.node()),
861 "both created")
861 "both created")
862 else:
862 else:
863 a = ma[f]
863 a = ma[f]
864 fla = ma.flags(f)
864 fla = ma.flags(f)
865 nol = 'l' not in fl1 + fl2 + fla
865 nol = 'l' not in fl1 + fl2 + fla
866 if n2 == a and fl2 == fla:
866 if n2 == a and fl2 == fla:
867 actions[f] = ('k' , (), "remote unchanged")
867 actions[f] = ('k' , (), "remote unchanged")
868 elif n1 == a and fl1 == fla: # local unchanged - use remote
868 elif n1 == a and fl1 == fla: # local unchanged - use remote
869 if n1 == n2: # optimization: keep local content
869 if n1 == n2: # optimization: keep local content
870 actions[f] = ('e', (fl2,), "update permissions")
870 actions[f] = ('e', (fl2,), "update permissions")
871 else:
871 else:
872 actions[f] = ('g', (fl2, False), "remote is newer")
872 actions[f] = ('g', (fl2, False), "remote is newer")
873 elif nol and n2 == a: # remote only changed 'x'
873 elif nol and n2 == a: # remote only changed 'x'
874 actions[f] = ('e', (fl2,), "update permissions")
874 actions[f] = ('e', (fl2,), "update permissions")
875 elif nol and n1 == a: # local only changed 'x'
875 elif nol and n1 == a: # local only changed 'x'
876 actions[f] = ('g', (fl1, False), "remote is newer")
876 actions[f] = ('g', (fl1, False), "remote is newer")
877 else: # both changed something
877 else: # both changed something
878 actions[f] = ('m', (f, f, f, False, pa.node()),
878 actions[f] = ('m', (f, f, f, False, pa.node()),
879 "versions differ")
879 "versions differ")
880 elif n1: # file exists only on local side
880 elif n1: # file exists only on local side
881 if f in copied:
881 if f in copied:
882 pass # we'll deal with it on m2 side
882 pass # we'll deal with it on m2 side
883 elif f in movewithdir: # directory rename, move local
883 elif f in movewithdir: # directory rename, move local
884 f2 = movewithdir[f]
884 f2 = movewithdir[f]
885 if f2 in m2:
885 if f2 in m2:
886 actions[f2] = ('m', (f, f2, None, True, pa.node()),
886 actions[f2] = ('m', (f, f2, None, True, pa.node()),
887 "remote directory rename, both created")
887 "remote directory rename, both created")
888 else:
888 else:
889 actions[f2] = ('dm', (f, fl1),
889 actions[f2] = ('dm', (f, fl1),
890 "remote directory rename - move from " + f)
890 "remote directory rename - move from " + f)
891 elif f in copy:
891 elif f in copy:
892 f2 = copy[f]
892 f2 = copy[f]
893 actions[f] = ('m', (f, f2, f2, False, pa.node()),
893 actions[f] = ('m', (f, f2, f2, False, pa.node()),
894 "local copied/moved from " + f2)
894 "local copied/moved from " + f2)
895 elif f in ma: # clean, a different, no remote
895 elif f in ma: # clean, a different, no remote
896 if n1 != ma[f]:
896 if n1 != ma[f]:
897 if acceptremote:
897 if acceptremote:
898 actions[f] = ('r', None, "remote delete")
898 actions[f] = ('r', None, "remote delete")
899 else:
899 else:
900 actions[f] = ('cd', (f, None, f, False, pa.node()),
900 actions[f] = ('cd', (f, None, f, False, pa.node()),
901 "prompt changed/deleted")
901 "prompt changed/deleted")
902 elif n1 == addednodeid:
902 elif n1 == addednodeid:
903 # This extra 'a' is added by working copy manifest to mark
903 # This extra 'a' is added by working copy manifest to mark
904 # the file as locally added. We should forget it instead of
904 # the file as locally added. We should forget it instead of
905 # deleting it.
905 # deleting it.
906 actions[f] = ('f', None, "remote deleted")
906 actions[f] = ('f', None, "remote deleted")
907 else:
907 else:
908 actions[f] = ('r', None, "other deleted")
908 actions[f] = ('r', None, "other deleted")
909 elif n2: # file exists only on remote side
909 elif n2: # file exists only on remote side
910 if f in copied:
910 if f in copied:
911 pass # we'll deal with it on m1 side
911 pass # we'll deal with it on m1 side
912 elif f in movewithdir:
912 elif f in movewithdir:
913 f2 = movewithdir[f]
913 f2 = movewithdir[f]
914 if f2 in m1:
914 if f2 in m1:
915 actions[f2] = ('m', (f2, f, None, False, pa.node()),
915 actions[f2] = ('m', (f2, f, None, False, pa.node()),
916 "local directory rename, both created")
916 "local directory rename, both created")
917 else:
917 else:
918 actions[f2] = ('dg', (f, fl2),
918 actions[f2] = ('dg', (f, fl2),
919 "local directory rename - get from " + f)
919 "local directory rename - get from " + f)
920 elif f in copy:
920 elif f in copy:
921 f2 = copy[f]
921 f2 = copy[f]
922 if f2 in m2:
922 if f2 in m2:
923 actions[f] = ('m', (f2, f, f2, False, pa.node()),
923 actions[f] = ('m', (f2, f, f2, False, pa.node()),
924 "remote copied from " + f2)
924 "remote copied from " + f2)
925 else:
925 else:
926 actions[f] = ('m', (f2, f, f2, True, pa.node()),
926 actions[f] = ('m', (f2, f, f2, True, pa.node()),
927 "remote moved from " + f2)
927 "remote moved from " + f2)
928 elif f not in ma:
928 elif f not in ma:
929 # local unknown, remote created: the logic is described by the
929 # local unknown, remote created: the logic is described by the
930 # following table:
930 # following table:
931 #
931 #
932 # force branchmerge different | action
932 # force branchmerge different | action
933 # n * * | create
933 # n * * | create
934 # y n * | create
934 # y n * | create
935 # y y n | create
935 # y y n | create
936 # y y y | merge
936 # y y y | merge
937 #
937 #
938 # Checking whether the files are different is expensive, so we
938 # Checking whether the files are different is expensive, so we
939 # don't do that when we can avoid it.
939 # don't do that when we can avoid it.
940 if not force:
940 if not force:
941 actions[f] = ('c', (fl2,), "remote created")
941 actions[f] = ('c', (fl2,), "remote created")
942 elif not branchmerge:
942 elif not branchmerge:
943 actions[f] = ('c', (fl2,), "remote created")
943 actions[f] = ('c', (fl2,), "remote created")
944 else:
944 else:
945 actions[f] = ('cm', (fl2, pa.node()),
945 actions[f] = ('cm', (fl2, pa.node()),
946 "remote created, get or merge")
946 "remote created, get or merge")
947 elif n2 != ma[f]:
947 elif n2 != ma[f]:
948 df = None
948 df = None
949 for d in dirmove:
949 for d in dirmove:
950 if f.startswith(d):
950 if f.startswith(d):
951 # new file added in a directory that was moved
951 # new file added in a directory that was moved
952 df = dirmove[d] + f[len(d):]
952 df = dirmove[d] + f[len(d):]
953 break
953 break
954 if df is not None and df in m1:
954 if df is not None and df in m1:
955 actions[df] = ('m', (df, f, f, False, pa.node()),
955 actions[df] = ('m', (df, f, f, False, pa.node()),
956 "local directory rename - respect move from " + f)
956 "local directory rename - respect move from " + f)
957 elif acceptremote:
957 elif acceptremote:
958 actions[f] = ('c', (fl2,), "remote recreating")
958 actions[f] = ('c', (fl2,), "remote recreating")
959 else:
959 else:
960 actions[f] = ('dc', (None, f, f, False, pa.node()),
960 actions[f] = ('dc', (None, f, f, False, pa.node()),
961 "prompt deleted/changed")
961 "prompt deleted/changed")
962
962
963 return actions, diverge, renamedelete
963 return actions, diverge, renamedelete
964
964
965 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
965 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
966 """Resolves false conflicts where the nodeid changed but the content
966 """Resolves false conflicts where the nodeid changed but the content
967 remained the same."""
967 remained the same."""
968
968
969 for f, (m, args, msg) in actions.items():
969 for f, (m, args, msg) in actions.items():
970 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
970 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
971 # local did change but ended up with same content
971 # local did change but ended up with same content
972 actions[f] = 'r', None, "prompt same"
972 actions[f] = 'r', None, "prompt same"
973 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
973 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
974 # remote did change but ended up with same content
974 # remote did change but ended up with same content
975 del actions[f] # don't get = keep local deleted
975 del actions[f] # don't get = keep local deleted
976
976
977 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
977 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
978 acceptremote, followcopies, matcher=None,
978 acceptremote, followcopies, matcher=None,
979 mergeforce=False):
979 mergeforce=False):
980 "Calculate the actions needed to merge mctx into wctx using ancestors"
980 "Calculate the actions needed to merge mctx into wctx using ancestors"
981 if len(ancestors) == 1: # default
981 if len(ancestors) == 1: # default
982 actions, diverge, renamedelete = manifestmerge(
982 actions, diverge, renamedelete = manifestmerge(
983 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
983 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
984 acceptremote, followcopies)
984 acceptremote, followcopies)
985 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
985 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
986
986
987 else: # only when merge.preferancestor=* - the default
987 else: # only when merge.preferancestor=* - the default
988 repo.ui.note(
988 repo.ui.note(
989 _("note: merging %s and %s using bids from ancestors %s\n") %
989 _("note: merging %s and %s using bids from ancestors %s\n") %
990 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
990 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
991
991
992 # Call for bids
992 # Call for bids
993 fbids = {} # mapping filename to bids (action method to list af actions)
993 fbids = {} # mapping filename to bids (action method to list af actions)
994 diverge, renamedelete = None, None
994 diverge, renamedelete = None, None
995 for ancestor in ancestors:
995 for ancestor in ancestors:
996 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
996 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
997 actions, diverge1, renamedelete1 = manifestmerge(
997 actions, diverge1, renamedelete1 = manifestmerge(
998 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
998 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
999 acceptremote, followcopies, forcefulldiff=True)
999 acceptremote, followcopies, forcefulldiff=True)
1000 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1000 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1001
1001
1002 # Track the shortest set of warning on the theory that bid
1002 # Track the shortest set of warning on the theory that bid
1003 # merge will correctly incorporate more information
1003 # merge will correctly incorporate more information
1004 if diverge is None or len(diverge1) < len(diverge):
1004 if diverge is None or len(diverge1) < len(diverge):
1005 diverge = diverge1
1005 diverge = diverge1
1006 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1006 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1007 renamedelete = renamedelete1
1007 renamedelete = renamedelete1
1008
1008
1009 for f, a in sorted(actions.iteritems()):
1009 for f, a in sorted(actions.iteritems()):
1010 m, args, msg = a
1010 m, args, msg = a
1011 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1011 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1012 if f in fbids:
1012 if f in fbids:
1013 d = fbids[f]
1013 d = fbids[f]
1014 if m in d:
1014 if m in d:
1015 d[m].append(a)
1015 d[m].append(a)
1016 else:
1016 else:
1017 d[m] = [a]
1017 d[m] = [a]
1018 else:
1018 else:
1019 fbids[f] = {m: [a]}
1019 fbids[f] = {m: [a]}
1020
1020
1021 # Pick the best bid for each file
1021 # Pick the best bid for each file
1022 repo.ui.note(_('\nauction for merging merge bids\n'))
1022 repo.ui.note(_('\nauction for merging merge bids\n'))
1023 actions = {}
1023 actions = {}
1024 dms = [] # filenames that have dm actions
1024 dms = [] # filenames that have dm actions
1025 for f, bids in sorted(fbids.items()):
1025 for f, bids in sorted(fbids.items()):
1026 # bids is a mapping from action method to list af actions
1026 # bids is a mapping from action method to list af actions
1027 # Consensus?
1027 # Consensus?
1028 if len(bids) == 1: # all bids are the same kind of method
1028 if len(bids) == 1: # all bids are the same kind of method
1029 m, l = bids.items()[0]
1029 m, l = bids.items()[0]
1030 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1030 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1031 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1031 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1032 actions[f] = l[0]
1032 actions[f] = l[0]
1033 if m == 'dm':
1033 if m == 'dm':
1034 dms.append(f)
1034 dms.append(f)
1035 continue
1035 continue
1036 # If keep is an option, just do it.
1036 # If keep is an option, just do it.
1037 if 'k' in bids:
1037 if 'k' in bids:
1038 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1038 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1039 actions[f] = bids['k'][0]
1039 actions[f] = bids['k'][0]
1040 continue
1040 continue
1041 # If there are gets and they all agree [how could they not?], do it.
1041 # If there are gets and they all agree [how could they not?], do it.
1042 if 'g' in bids:
1042 if 'g' in bids:
1043 ga0 = bids['g'][0]
1043 ga0 = bids['g'][0]
1044 if all(a == ga0 for a in bids['g'][1:]):
1044 if all(a == ga0 for a in bids['g'][1:]):
1045 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1045 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1046 actions[f] = ga0
1046 actions[f] = ga0
1047 continue
1047 continue
1048 # TODO: Consider other simple actions such as mode changes
1048 # TODO: Consider other simple actions such as mode changes
1049 # Handle inefficient democrazy.
1049 # Handle inefficient democrazy.
1050 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1050 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1051 for m, l in sorted(bids.items()):
1051 for m, l in sorted(bids.items()):
1052 for _f, args, msg in l:
1052 for _f, args, msg in l:
1053 repo.ui.note(' %s -> %s\n' % (msg, m))
1053 repo.ui.note(' %s -> %s\n' % (msg, m))
1054 # Pick random action. TODO: Instead, prompt user when resolving
1054 # Pick random action. TODO: Instead, prompt user when resolving
1055 m, l = bids.items()[0]
1055 m, l = bids.items()[0]
1056 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1056 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1057 (f, m))
1057 (f, m))
1058 actions[f] = l[0]
1058 actions[f] = l[0]
1059 if m == 'dm':
1059 if m == 'dm':
1060 dms.append(f)
1060 dms.append(f)
1061 continue
1061 continue
1062 # Work around 'dm' that can cause multiple actions for the same file
1062 # Work around 'dm' that can cause multiple actions for the same file
1063 for f in dms:
1063 for f in dms:
1064 dm, (f0, flags), msg = actions[f]
1064 dm, (f0, flags), msg = actions[f]
1065 assert dm == 'dm', dm
1065 assert dm == 'dm', dm
1066 if f0 in actions and actions[f0][0] == 'r':
1066 if f0 in actions and actions[f0][0] == 'r':
1067 # We have one bid for removing a file and another for moving it.
1067 # We have one bid for removing a file and another for moving it.
1068 # These two could be merged as first move and then delete ...
1068 # These two could be merged as first move and then delete ...
1069 # but instead drop moving and just delete.
1069 # but instead drop moving and just delete.
1070 del actions[f]
1070 del actions[f]
1071 repo.ui.note(_('end of auction\n\n'))
1071 repo.ui.note(_('end of auction\n\n'))
1072
1072
1073 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1073 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1074
1074
1075 if wctx.rev() is None:
1075 if wctx.rev() is None:
1076 fractions = _forgetremoved(wctx, mctx, branchmerge)
1076 fractions = _forgetremoved(wctx, mctx, branchmerge)
1077 actions.update(fractions)
1077 actions.update(fractions)
1078
1078
1079 return actions, diverge, renamedelete
1079 return actions, diverge, renamedelete
1080
1080
1081 def batchremove(repo, wctx, actions):
1081 def batchremove(repo, wctx, actions):
1082 """apply removes to the working directory
1082 """apply removes to the working directory
1083
1083
1084 yields tuples for progress updates
1084 yields tuples for progress updates
1085 """
1085 """
1086 verbose = repo.ui.verbose
1086 verbose = repo.ui.verbose
1087 try:
1087 try:
1088 cwd = pycompat.getcwd()
1088 cwd = pycompat.getcwd()
1089 except OSError as err:
1089 except OSError as err:
1090 if err.errno != errno.ENOENT:
1090 if err.errno != errno.ENOENT:
1091 raise
1091 raise
1092 cwd = None
1092 cwd = None
1093 i = 0
1093 i = 0
1094 for f, args, msg in actions:
1094 for f, args, msg in actions:
1095 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1095 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1096 if verbose:
1096 if verbose:
1097 repo.ui.note(_("removing %s\n") % f)
1097 repo.ui.note(_("removing %s\n") % f)
1098 wctx[f].audit()
1098 wctx[f].audit()
1099 try:
1099 try:
1100 wctx[f].remove(ignoremissing=True)
1100 wctx[f].remove(ignoremissing=True)
1101 except OSError as inst:
1101 except OSError as inst:
1102 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1102 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1103 (f, inst.strerror))
1103 (f, inst.strerror))
1104 if i == 100:
1104 if i == 100:
1105 yield i, f
1105 yield i, f
1106 i = 0
1106 i = 0
1107 i += 1
1107 i += 1
1108 if i > 0:
1108 if i > 0:
1109 yield i, f
1109 yield i, f
1110 if cwd:
1110 if cwd:
1111 # cwd was present before we started to remove files
1111 # cwd was present before we started to remove files
1112 # let's check if it is present after we removed them
1112 # let's check if it is present after we removed them
1113 try:
1113 try:
1114 pycompat.getcwd()
1114 pycompat.getcwd()
1115 except OSError as err:
1115 except OSError as err:
1116 if err.errno != errno.ENOENT:
1116 if err.errno != errno.ENOENT:
1117 raise
1117 raise
1118 # Print a warning if cwd was deleted
1118 # Print a warning if cwd was deleted
1119 repo.ui.warn(_("current directory was removed\n"
1119 repo.ui.warn(_("current directory was removed\n"
1120 "(consider changing to repo root: %s)\n") %
1120 "(consider changing to repo root: %s)\n") %
1121 repo.root)
1121 repo.root)
1122
1122
1123 def batchget(repo, mctx, wctx, actions):
1123 def batchget(repo, mctx, wctx, actions):
1124 """apply gets to the working directory
1124 """apply gets to the working directory
1125
1125
1126 mctx is the context to get from
1126 mctx is the context to get from
1127
1127
1128 yields tuples for progress updates
1128 yields tuples for progress updates
1129 """
1129 """
1130 verbose = repo.ui.verbose
1130 verbose = repo.ui.verbose
1131 fctx = mctx.filectx
1131 fctx = mctx.filectx
1132 ui = repo.ui
1132 ui = repo.ui
1133 i = 0
1133 i = 0
1134 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1134 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1135 for f, (flags, backup), msg in actions:
1135 for f, (flags, backup), msg in actions:
1136 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1136 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1137 if verbose:
1137 if verbose:
1138 repo.ui.note(_("getting %s\n") % f)
1138 repo.ui.note(_("getting %s\n") % f)
1139
1139
1140 if backup:
1140 if backup:
1141 absf = repo.wjoin(f)
1141 absf = repo.wjoin(f)
1142 orig = scmutil.origpath(ui, repo, absf)
1142 orig = scmutil.origpath(ui, repo, absf)
1143 try:
1143 try:
1144 if repo.wvfs.isfileorlink(f):
1144 if repo.wvfs.isfileorlink(f):
1145 util.rename(absf, orig)
1145 util.rename(absf, orig)
1146 except OSError as e:
1146 except OSError as e:
1147 if e.errno != errno.ENOENT:
1147 if e.errno != errno.ENOENT:
1148 raise
1148 raise
1149
1149
1150 if repo.wvfs.isdir(f) and not repo.wvfs.islink(f):
1150 if repo.wvfs.isdir(f) and not repo.wvfs.islink(f):
1151 repo.wvfs.removedirs(f)
1151 repo.wvfs.removedirs(f)
1152 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1152 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1153 if i == 100:
1153 if i == 100:
1154 yield i, f
1154 yield i, f
1155 i = 0
1155 i = 0
1156 i += 1
1156 i += 1
1157 if i > 0:
1157 if i > 0:
1158 yield i, f
1158 yield i, f
1159
1159
1160 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1160 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1161 """apply the merge action list to the working directory
1161 """apply the merge action list to the working directory
1162
1162
1163 wctx is the working copy context
1163 wctx is the working copy context
1164 mctx is the context to be merged into the working copy
1164 mctx is the context to be merged into the working copy
1165
1165
1166 Return a tuple of counts (updated, merged, removed, unresolved) that
1166 Return a tuple of counts (updated, merged, removed, unresolved) that
1167 describes how many files were affected by the update.
1167 describes how many files were affected by the update.
1168 """
1168 """
1169
1169
1170 updated, merged, removed = 0, 0, 0
1170 updated, merged, removed = 0, 0, 0
1171 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1171 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1172 moves = []
1172 moves = []
1173 for m, l in actions.items():
1173 for m, l in actions.items():
1174 l.sort()
1174 l.sort()
1175
1175
1176 # 'cd' and 'dc' actions are treated like other merge conflicts
1176 # 'cd' and 'dc' actions are treated like other merge conflicts
1177 mergeactions = sorted(actions['cd'])
1177 mergeactions = sorted(actions['cd'])
1178 mergeactions.extend(sorted(actions['dc']))
1178 mergeactions.extend(sorted(actions['dc']))
1179 mergeactions.extend(actions['m'])
1179 mergeactions.extend(actions['m'])
1180 for f, args, msg in mergeactions:
1180 for f, args, msg in mergeactions:
1181 f1, f2, fa, move, anc = args
1181 f1, f2, fa, move, anc = args
1182 if f == '.hgsubstate': # merged internally
1182 if f == '.hgsubstate': # merged internally
1183 continue
1183 continue
1184 if f1 is None:
1184 if f1 is None:
1185 fcl = filemerge.absentfilectx(wctx, fa)
1185 fcl = filemerge.absentfilectx(wctx, fa)
1186 else:
1186 else:
1187 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1187 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1188 fcl = wctx[f1]
1188 fcl = wctx[f1]
1189 if f2 is None:
1189 if f2 is None:
1190 fco = filemerge.absentfilectx(mctx, fa)
1190 fco = filemerge.absentfilectx(mctx, fa)
1191 else:
1191 else:
1192 fco = mctx[f2]
1192 fco = mctx[f2]
1193 actx = repo[anc]
1193 actx = repo[anc]
1194 if fa in actx:
1194 if fa in actx:
1195 fca = actx[fa]
1195 fca = actx[fa]
1196 else:
1196 else:
1197 # TODO: move to absentfilectx
1197 # TODO: move to absentfilectx
1198 fca = repo.filectx(f1, fileid=nullrev)
1198 fca = repo.filectx(f1, fileid=nullrev)
1199 ms.add(fcl, fco, fca, f)
1199 ms.add(fcl, fco, fca, f)
1200 if f1 != f and move:
1200 if f1 != f and move:
1201 moves.append(f1)
1201 moves.append(f1)
1202
1202
1203 _updating = _('updating')
1203 _updating = _('updating')
1204 _files = _('files')
1204 _files = _('files')
1205 progress = repo.ui.progress
1205 progress = repo.ui.progress
1206
1206
1207 # remove renamed files after safely stored
1207 # remove renamed files after safely stored
1208 for f in moves:
1208 for f in moves:
1209 if os.path.lexists(repo.wjoin(f)):
1209 if os.path.lexists(repo.wjoin(f)):
1210 repo.ui.debug("removing %s\n" % f)
1210 repo.ui.debug("removing %s\n" % f)
1211 wctx[f].audit()
1211 wctx[f].audit()
1212 wctx[f].remove()
1212 wctx[f].remove()
1213
1213
1214 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1214 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1215
1215
1216 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1216 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1217 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1217 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1218
1218
1219 # remove in parallel (must come first)
1219 # remove in parallel (must come first)
1220 z = 0
1220 z = 0
1221 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1221 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1222 actions['r'])
1222 actions['r'])
1223 for i, item in prog:
1223 for i, item in prog:
1224 z += i
1224 z += i
1225 progress(_updating, z, item=item, total=numupdates, unit=_files)
1225 progress(_updating, z, item=item, total=numupdates, unit=_files)
1226 removed = len(actions['r'])
1226 removed = len(actions['r'])
1227
1227
1228 # get in parallel
1228 # get in parallel
1229 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1229 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1230 actions['g'])
1230 actions['g'])
1231 for i, item in prog:
1231 for i, item in prog:
1232 z += i
1232 z += i
1233 progress(_updating, z, item=item, total=numupdates, unit=_files)
1233 progress(_updating, z, item=item, total=numupdates, unit=_files)
1234 updated = len(actions['g'])
1234 updated = len(actions['g'])
1235
1235
1236 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1236 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1237 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1237 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1238
1238
1239 # forget (manifest only, just log it) (must come first)
1239 # forget (manifest only, just log it) (must come first)
1240 for f, args, msg in actions['f']:
1240 for f, args, msg in actions['f']:
1241 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1241 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1242 z += 1
1242 z += 1
1243 progress(_updating, z, item=f, total=numupdates, unit=_files)
1243 progress(_updating, z, item=f, total=numupdates, unit=_files)
1244
1244
1245 # re-add (manifest only, just log it)
1245 # re-add (manifest only, just log it)
1246 for f, args, msg in actions['a']:
1246 for f, args, msg in actions['a']:
1247 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1247 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1248 z += 1
1248 z += 1
1249 progress(_updating, z, item=f, total=numupdates, unit=_files)
1249 progress(_updating, z, item=f, total=numupdates, unit=_files)
1250
1250
1251 # re-add/mark as modified (manifest only, just log it)
1251 # re-add/mark as modified (manifest only, just log it)
1252 for f, args, msg in actions['am']:
1252 for f, args, msg in actions['am']:
1253 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1253 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1254 z += 1
1254 z += 1
1255 progress(_updating, z, item=f, total=numupdates, unit=_files)
1255 progress(_updating, z, item=f, total=numupdates, unit=_files)
1256
1256
1257 # keep (noop, just log it)
1257 # keep (noop, just log it)
1258 for f, args, msg in actions['k']:
1258 for f, args, msg in actions['k']:
1259 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1259 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1260 # no progress
1260 # no progress
1261
1261
1262 # directory rename, move local
1262 # directory rename, move local
1263 for f, args, msg in actions['dm']:
1263 for f, args, msg in actions['dm']:
1264 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1264 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1265 z += 1
1265 z += 1
1266 progress(_updating, z, item=f, total=numupdates, unit=_files)
1266 progress(_updating, z, item=f, total=numupdates, unit=_files)
1267 f0, flags = args
1267 f0, flags = args
1268 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1268 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1269 wctx[f].audit()
1269 wctx[f].audit()
1270 wctx[f].write(wctx.filectx(f0).data(), flags)
1270 wctx[f].write(wctx.filectx(f0).data(), flags)
1271 wctx[f0].remove()
1271 wctx[f0].remove()
1272 updated += 1
1272 updated += 1
1273
1273
1274 # local directory rename, get
1274 # local directory rename, get
1275 for f, args, msg in actions['dg']:
1275 for f, args, msg in actions['dg']:
1276 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1276 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1277 z += 1
1277 z += 1
1278 progress(_updating, z, item=f, total=numupdates, unit=_files)
1278 progress(_updating, z, item=f, total=numupdates, unit=_files)
1279 f0, flags = args
1279 f0, flags = args
1280 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1280 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1281 wctx[f].write(mctx.filectx(f0).data(), flags)
1281 wctx[f].write(mctx.filectx(f0).data(), flags)
1282 updated += 1
1282 updated += 1
1283
1283
1284 # exec
1284 # exec
1285 for f, args, msg in actions['e']:
1285 for f, args, msg in actions['e']:
1286 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1286 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1287 z += 1
1287 z += 1
1288 progress(_updating, z, item=f, total=numupdates, unit=_files)
1288 progress(_updating, z, item=f, total=numupdates, unit=_files)
1289 flags, = args
1289 flags, = args
1290 wctx[f].audit()
1290 wctx[f].audit()
1291 wctx[f].setflags('l' in flags, 'x' in flags)
1291 wctx[f].setflags('l' in flags, 'x' in flags)
1292 updated += 1
1292 updated += 1
1293
1293
1294 # the ordering is important here -- ms.mergedriver will raise if the merge
1294 # the ordering is important here -- ms.mergedriver will raise if the merge
1295 # driver has changed, and we want to be able to bypass it when overwrite is
1295 # driver has changed, and we want to be able to bypass it when overwrite is
1296 # True
1296 # True
1297 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1297 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1298
1298
1299 if usemergedriver:
1299 if usemergedriver:
1300 ms.commit()
1300 ms.commit()
1301 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1301 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1302 # the driver might leave some files unresolved
1302 # the driver might leave some files unresolved
1303 unresolvedf = set(ms.unresolved())
1303 unresolvedf = set(ms.unresolved())
1304 if not proceed:
1304 if not proceed:
1305 # XXX setting unresolved to at least 1 is a hack to make sure we
1305 # XXX setting unresolved to at least 1 is a hack to make sure we
1306 # error out
1306 # error out
1307 return updated, merged, removed, max(len(unresolvedf), 1)
1307 return updated, merged, removed, max(len(unresolvedf), 1)
1308 newactions = []
1308 newactions = []
1309 for f, args, msg in mergeactions:
1309 for f, args, msg in mergeactions:
1310 if f in unresolvedf:
1310 if f in unresolvedf:
1311 newactions.append((f, args, msg))
1311 newactions.append((f, args, msg))
1312 mergeactions = newactions
1312 mergeactions = newactions
1313
1313
1314 # premerge
1314 # premerge
1315 tocomplete = []
1315 tocomplete = []
1316 for f, args, msg in mergeactions:
1316 for f, args, msg in mergeactions:
1317 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1317 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1318 z += 1
1318 z += 1
1319 progress(_updating, z, item=f, total=numupdates, unit=_files)
1319 progress(_updating, z, item=f, total=numupdates, unit=_files)
1320 if f == '.hgsubstate': # subrepo states need updating
1320 if f == '.hgsubstate': # subrepo states need updating
1321 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1321 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1322 overwrite, labels)
1322 overwrite, labels)
1323 continue
1323 continue
1324 wctx[f].audit()
1324 wctx[f].audit()
1325 complete, r = ms.preresolve(f, wctx)
1325 complete, r = ms.preresolve(f, wctx)
1326 if not complete:
1326 if not complete:
1327 numupdates += 1
1327 numupdates += 1
1328 tocomplete.append((f, args, msg))
1328 tocomplete.append((f, args, msg))
1329
1329
1330 # merge
1330 # merge
1331 for f, args, msg in tocomplete:
1331 for f, args, msg in tocomplete:
1332 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1332 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1333 z += 1
1333 z += 1
1334 progress(_updating, z, item=f, total=numupdates, unit=_files)
1334 progress(_updating, z, item=f, total=numupdates, unit=_files)
1335 ms.resolve(f, wctx)
1335 ms.resolve(f, wctx)
1336
1336
1337 ms.commit()
1337 ms.commit()
1338
1338
1339 unresolved = ms.unresolvedcount()
1339 unresolved = ms.unresolvedcount()
1340
1340
1341 if usemergedriver and not unresolved and ms.mdstate() != 's':
1341 if usemergedriver and not unresolved and ms.mdstate() != 's':
1342 if not driverconclude(repo, ms, wctx, labels=labels):
1342 if not driverconclude(repo, ms, wctx, labels=labels):
1343 # XXX setting unresolved to at least 1 is a hack to make sure we
1343 # XXX setting unresolved to at least 1 is a hack to make sure we
1344 # error out
1344 # error out
1345 unresolved = max(unresolved, 1)
1345 unresolved = max(unresolved, 1)
1346
1346
1347 ms.commit()
1347 ms.commit()
1348
1348
1349 msupdated, msmerged, msremoved = ms.counts()
1349 msupdated, msmerged, msremoved = ms.counts()
1350 updated += msupdated
1350 updated += msupdated
1351 merged += msmerged
1351 merged += msmerged
1352 removed += msremoved
1352 removed += msremoved
1353
1353
1354 extraactions = ms.actions()
1354 extraactions = ms.actions()
1355 if extraactions:
1355 if extraactions:
1356 mfiles = set(a[0] for a in actions['m'])
1356 mfiles = set(a[0] for a in actions['m'])
1357 for k, acts in extraactions.iteritems():
1357 for k, acts in extraactions.iteritems():
1358 actions[k].extend(acts)
1358 actions[k].extend(acts)
1359 # Remove these files from actions['m'] as well. This is important
1359 # Remove these files from actions['m'] as well. This is important
1360 # because in recordupdates, files in actions['m'] are processed
1360 # because in recordupdates, files in actions['m'] are processed
1361 # after files in other actions, and the merge driver might add
1361 # after files in other actions, and the merge driver might add
1362 # files to those actions via extraactions above. This can lead to a
1362 # files to those actions via extraactions above. This can lead to a
1363 # file being recorded twice, with poor results. This is especially
1363 # file being recorded twice, with poor results. This is especially
1364 # problematic for actions['r'] (currently only possible with the
1364 # problematic for actions['r'] (currently only possible with the
1365 # merge driver in the initial merge process; interrupted merges
1365 # merge driver in the initial merge process; interrupted merges
1366 # don't go through this flow).
1366 # don't go through this flow).
1367 #
1367 #
1368 # The real fix here is to have indexes by both file and action so
1368 # The real fix here is to have indexes by both file and action so
1369 # that when the action for a file is changed it is automatically
1369 # that when the action for a file is changed it is automatically
1370 # reflected in the other action lists. But that involves a more
1370 # reflected in the other action lists. But that involves a more
1371 # complex data structure, so this will do for now.
1371 # complex data structure, so this will do for now.
1372 #
1372 #
1373 # We don't need to do the same operation for 'dc' and 'cd' because
1373 # We don't need to do the same operation for 'dc' and 'cd' because
1374 # those lists aren't consulted again.
1374 # those lists aren't consulted again.
1375 mfiles.difference_update(a[0] for a in acts)
1375 mfiles.difference_update(a[0] for a in acts)
1376
1376
1377 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1377 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1378
1378
1379 progress(_updating, None, total=numupdates, unit=_files)
1379 progress(_updating, None, total=numupdates, unit=_files)
1380
1380
1381 return updated, merged, removed, unresolved
1381 return updated, merged, removed, unresolved
1382
1382
1383 def recordupdates(repo, actions, branchmerge):
1383 def recordupdates(repo, actions, branchmerge):
1384 "record merge actions to the dirstate"
1384 "record merge actions to the dirstate"
1385 # remove (must come first)
1385 # remove (must come first)
1386 for f, args, msg in actions.get('r', []):
1386 for f, args, msg in actions.get('r', []):
1387 if branchmerge:
1387 if branchmerge:
1388 repo.dirstate.remove(f)
1388 repo.dirstate.remove(f)
1389 else:
1389 else:
1390 repo.dirstate.drop(f)
1390 repo.dirstate.drop(f)
1391
1391
1392 # forget (must come first)
1392 # forget (must come first)
1393 for f, args, msg in actions.get('f', []):
1393 for f, args, msg in actions.get('f', []):
1394 repo.dirstate.drop(f)
1394 repo.dirstate.drop(f)
1395
1395
1396 # re-add
1396 # re-add
1397 for f, args, msg in actions.get('a', []):
1397 for f, args, msg in actions.get('a', []):
1398 repo.dirstate.add(f)
1398 repo.dirstate.add(f)
1399
1399
1400 # re-add/mark as modified
1400 # re-add/mark as modified
1401 for f, args, msg in actions.get('am', []):
1401 for f, args, msg in actions.get('am', []):
1402 if branchmerge:
1402 if branchmerge:
1403 repo.dirstate.normallookup(f)
1403 repo.dirstate.normallookup(f)
1404 else:
1404 else:
1405 repo.dirstate.add(f)
1405 repo.dirstate.add(f)
1406
1406
1407 # exec change
1407 # exec change
1408 for f, args, msg in actions.get('e', []):
1408 for f, args, msg in actions.get('e', []):
1409 repo.dirstate.normallookup(f)
1409 repo.dirstate.normallookup(f)
1410
1410
1411 # keep
1411 # keep
1412 for f, args, msg in actions.get('k', []):
1412 for f, args, msg in actions.get('k', []):
1413 pass
1413 pass
1414
1414
1415 # get
1415 # get
1416 for f, args, msg in actions.get('g', []):
1416 for f, args, msg in actions.get('g', []):
1417 if branchmerge:
1417 if branchmerge:
1418 repo.dirstate.otherparent(f)
1418 repo.dirstate.otherparent(f)
1419 else:
1419 else:
1420 repo.dirstate.normal(f)
1420 repo.dirstate.normal(f)
1421
1421
1422 # merge
1422 # merge
1423 for f, args, msg in actions.get('m', []):
1423 for f, args, msg in actions.get('m', []):
1424 f1, f2, fa, move, anc = args
1424 f1, f2, fa, move, anc = args
1425 if branchmerge:
1425 if branchmerge:
1426 # We've done a branch merge, mark this file as merged
1426 # We've done a branch merge, mark this file as merged
1427 # so that we properly record the merger later
1427 # so that we properly record the merger later
1428 repo.dirstate.merge(f)
1428 repo.dirstate.merge(f)
1429 if f1 != f2: # copy/rename
1429 if f1 != f2: # copy/rename
1430 if move:
1430 if move:
1431 repo.dirstate.remove(f1)
1431 repo.dirstate.remove(f1)
1432 if f1 != f:
1432 if f1 != f:
1433 repo.dirstate.copy(f1, f)
1433 repo.dirstate.copy(f1, f)
1434 else:
1434 else:
1435 repo.dirstate.copy(f2, f)
1435 repo.dirstate.copy(f2, f)
1436 else:
1436 else:
1437 # We've update-merged a locally modified file, so
1437 # We've update-merged a locally modified file, so
1438 # we set the dirstate to emulate a normal checkout
1438 # we set the dirstate to emulate a normal checkout
1439 # of that file some time in the past. Thus our
1439 # of that file some time in the past. Thus our
1440 # merge will appear as a normal local file
1440 # merge will appear as a normal local file
1441 # modification.
1441 # modification.
1442 if f2 == f: # file not locally copied/moved
1442 if f2 == f: # file not locally copied/moved
1443 repo.dirstate.normallookup(f)
1443 repo.dirstate.normallookup(f)
1444 if move:
1444 if move:
1445 repo.dirstate.drop(f1)
1445 repo.dirstate.drop(f1)
1446
1446
1447 # directory rename, move local
1447 # directory rename, move local
1448 for f, args, msg in actions.get('dm', []):
1448 for f, args, msg in actions.get('dm', []):
1449 f0, flag = args
1449 f0, flag = args
1450 if branchmerge:
1450 if branchmerge:
1451 repo.dirstate.add(f)
1451 repo.dirstate.add(f)
1452 repo.dirstate.remove(f0)
1452 repo.dirstate.remove(f0)
1453 repo.dirstate.copy(f0, f)
1453 repo.dirstate.copy(f0, f)
1454 else:
1454 else:
1455 repo.dirstate.normal(f)
1455 repo.dirstate.normal(f)
1456 repo.dirstate.drop(f0)
1456 repo.dirstate.drop(f0)
1457
1457
1458 # directory rename, get
1458 # directory rename, get
1459 for f, args, msg in actions.get('dg', []):
1459 for f, args, msg in actions.get('dg', []):
1460 f0, flag = args
1460 f0, flag = args
1461 if branchmerge:
1461 if branchmerge:
1462 repo.dirstate.add(f)
1462 repo.dirstate.add(f)
1463 repo.dirstate.copy(f0, f)
1463 repo.dirstate.copy(f0, f)
1464 else:
1464 else:
1465 repo.dirstate.normal(f)
1465 repo.dirstate.normal(f)
1466
1466
1467 def update(repo, node, branchmerge, force, ancestor=None,
1467 def update(repo, node, branchmerge, force, ancestor=None,
1468 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1468 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1469 updatecheck=None):
1469 updatecheck=None):
1470 """
1470 """
1471 Perform a merge between the working directory and the given node
1471 Perform a merge between the working directory and the given node
1472
1472
1473 node = the node to update to
1473 node = the node to update to
1474 branchmerge = whether to merge between branches
1474 branchmerge = whether to merge between branches
1475 force = whether to force branch merging or file overwriting
1475 force = whether to force branch merging or file overwriting
1476 matcher = a matcher to filter file lists (dirstate not updated)
1476 matcher = a matcher to filter file lists (dirstate not updated)
1477 mergeancestor = whether it is merging with an ancestor. If true,
1477 mergeancestor = whether it is merging with an ancestor. If true,
1478 we should accept the incoming changes for any prompts that occur.
1478 we should accept the incoming changes for any prompts that occur.
1479 If false, merging with an ancestor (fast-forward) is only allowed
1479 If false, merging with an ancestor (fast-forward) is only allowed
1480 between different named branches. This flag is used by rebase extension
1480 between different named branches. This flag is used by rebase extension
1481 as a temporary fix and should be avoided in general.
1481 as a temporary fix and should be avoided in general.
1482 labels = labels to use for base, local and other
1482 labels = labels to use for base, local and other
1483 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1483 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1484 this is True, then 'force' should be True as well.
1484 this is True, then 'force' should be True as well.
1485
1485
1486 The table below shows all the behaviors of the update command
1486 The table below shows all the behaviors of the update command
1487 given the -c and -C or no options, whether the working directory
1487 given the -c and -C or no options, whether the working directory
1488 is dirty, whether a revision is specified, and the relationship of
1488 is dirty, whether a revision is specified, and the relationship of
1489 the parent rev to the target rev (linear or not). Match from top first. The
1489 the parent rev to the target rev (linear or not). Match from top first. The
1490 -n option doesn't exist on the command line, but represents the
1490 -n option doesn't exist on the command line, but represents the
1491 experimental.updatecheck=noconflict option.
1491 experimental.updatecheck=noconflict option.
1492
1492
1493 This logic is tested by test-update-branches.t.
1493 This logic is tested by test-update-branches.t.
1494
1494
1495 -c -C -n -m dirty rev linear | result
1495 -c -C -n -m dirty rev linear | result
1496 y y * * * * * | (1)
1496 y y * * * * * | (1)
1497 y * y * * * * | (1)
1497 y * y * * * * | (1)
1498 y * * y * * * | (1)
1498 y * * y * * * | (1)
1499 * y y * * * * | (1)
1499 * y y * * * * | (1)
1500 * y * y * * * | (1)
1500 * y * y * * * | (1)
1501 * * y y * * * | (1)
1501 * * y y * * * | (1)
1502 * * * * * n n | x
1502 * * * * * n n | x
1503 * * * * n * * | ok
1503 * * * * n * * | ok
1504 n n n n y * y | merge
1504 n n n n y * y | merge
1505 n n n n y y n | (2)
1505 n n n n y y n | (2)
1506 n n n y y * * | merge
1506 n n n y y * * | merge
1507 n n y n y * * | merge if no conflict
1507 n n y n y * * | merge if no conflict
1508 n y n n y * * | discard
1508 n y n n y * * | discard
1509 y n n n y * * | (3)
1509 y n n n y * * | (3)
1510
1510
1511 x = can't happen
1511 x = can't happen
1512 * = don't-care
1512 * = don't-care
1513 1 = incompatible options (checked in commands.py)
1513 1 = incompatible options (checked in commands.py)
1514 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1514 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1515 3 = abort: uncommitted changes (checked in commands.py)
1515 3 = abort: uncommitted changes (checked in commands.py)
1516
1516
1517 Return the same tuple as applyupdates().
1517 Return the same tuple as applyupdates().
1518 """
1518 """
1519
1519
1520 # This function used to find the default destination if node was None, but
1520 # This function used to find the default destination if node was None, but
1521 # that's now in destutil.py.
1521 # that's now in destutil.py.
1522 assert node is not None
1522 assert node is not None
1523 if not branchmerge and not force:
1523 if not branchmerge and not force:
1524 # TODO: remove the default once all callers that pass branchmerge=False
1524 # TODO: remove the default once all callers that pass branchmerge=False
1525 # and force=False pass a value for updatecheck. We may want to allow
1525 # and force=False pass a value for updatecheck. We may want to allow
1526 # updatecheck='abort' to better suppport some of these callers.
1526 # updatecheck='abort' to better suppport some of these callers.
1527 if updatecheck is None:
1527 if updatecheck is None:
1528 updatecheck = 'linear'
1528 updatecheck = 'linear'
1529 assert updatecheck in ('none', 'linear', 'noconflict')
1529 assert updatecheck in ('none', 'linear', 'noconflict')
1530 # If we're doing a partial update, we need to skip updating
1530 # If we're doing a partial update, we need to skip updating
1531 # the dirstate, so make a note of any partial-ness to the
1531 # the dirstate, so make a note of any partial-ness to the
1532 # update here.
1532 # update here.
1533 if matcher is None or matcher.always():
1533 if matcher is None or matcher.always():
1534 partial = False
1534 partial = False
1535 else:
1535 else:
1536 partial = True
1536 partial = True
1537 with repo.wlock():
1537 with repo.wlock():
1538 wc = repo[None]
1538 wc = repo[None]
1539 pl = wc.parents()
1539 pl = wc.parents()
1540 p1 = pl[0]
1540 p1 = pl[0]
1541 pas = [None]
1541 pas = [None]
1542 if ancestor is not None:
1542 if ancestor is not None:
1543 pas = [repo[ancestor]]
1543 pas = [repo[ancestor]]
1544
1544
1545 overwrite = force and not branchmerge
1545 overwrite = force and not branchmerge
1546
1546
1547 p2 = repo[node]
1547 p2 = repo[node]
1548 if pas[0] is None:
1548 if pas[0] is None:
1549 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1549 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1550 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1550 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1551 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1551 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1552 else:
1552 else:
1553 pas = [p1.ancestor(p2, warn=branchmerge)]
1553 pas = [p1.ancestor(p2, warn=branchmerge)]
1554
1554
1555 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1555 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1556
1556
1557 ### check phase
1557 ### check phase
1558 if not overwrite:
1558 if not overwrite:
1559 if len(pl) > 1:
1559 if len(pl) > 1:
1560 raise error.Abort(_("outstanding uncommitted merge"))
1560 raise error.Abort(_("outstanding uncommitted merge"))
1561 ms = mergestate.read(repo)
1561 ms = mergestate.read(repo)
1562 if list(ms.unresolved()):
1562 if list(ms.unresolved()):
1563 raise error.Abort(_("outstanding merge conflicts"))
1563 raise error.Abort(_("outstanding merge conflicts"))
1564 if branchmerge:
1564 if branchmerge:
1565 if pas == [p2]:
1565 if pas == [p2]:
1566 raise error.Abort(_("merging with a working directory ancestor"
1566 raise error.Abort(_("merging with a working directory ancestor"
1567 " has no effect"))
1567 " has no effect"))
1568 elif pas == [p1]:
1568 elif pas == [p1]:
1569 if not mergeancestor and wc.branch() == p2.branch():
1569 if not mergeancestor and wc.branch() == p2.branch():
1570 raise error.Abort(_("nothing to merge"),
1570 raise error.Abort(_("nothing to merge"),
1571 hint=_("use 'hg update' "
1571 hint=_("use 'hg update' "
1572 "or check 'hg heads'"))
1572 "or check 'hg heads'"))
1573 if not force and (wc.files() or wc.deleted()):
1573 if not force and (wc.files() or wc.deleted()):
1574 raise error.Abort(_("uncommitted changes"),
1574 raise error.Abort(_("uncommitted changes"),
1575 hint=_("use 'hg status' to list changes"))
1575 hint=_("use 'hg status' to list changes"))
1576 for s in sorted(wc.substate):
1576 for s in sorted(wc.substate):
1577 wc.sub(s).bailifchanged()
1577 wc.sub(s).bailifchanged()
1578
1578
1579 elif not overwrite:
1579 elif not overwrite:
1580 if p1 == p2: # no-op update
1580 if p1 == p2: # no-op update
1581 # call the hooks and exit early
1581 # call the hooks and exit early
1582 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1582 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1583 repo.hook('update', parent1=xp2, parent2='', error=0)
1583 repo.hook('update', parent1=xp2, parent2='', error=0)
1584 return 0, 0, 0, 0
1584 return 0, 0, 0, 0
1585
1585
1586 if (updatecheck == 'linear' and
1586 if (updatecheck == 'linear' and
1587 pas not in ([p1], [p2])): # nonlinear
1587 pas not in ([p1], [p2])): # nonlinear
1588 dirty = wc.dirty(missing=True)
1588 dirty = wc.dirty(missing=True)
1589 if dirty:
1589 if dirty:
1590 # Branching is a bit strange to ensure we do the minimal
1590 # Branching is a bit strange to ensure we do the minimal
1591 # amount of call to obsolete.foreground.
1591 # amount of call to obsutil.foreground.
1592 foreground = obsolete.foreground(repo, [p1.node()])
1592 foreground = obsutil.foreground(repo, [p1.node()])
1593 # note: the <node> variable contains a random identifier
1593 # note: the <node> variable contains a random identifier
1594 if repo[node].node() in foreground:
1594 if repo[node].node() in foreground:
1595 pass # allow updating to successors
1595 pass # allow updating to successors
1596 else:
1596 else:
1597 msg = _("uncommitted changes")
1597 msg = _("uncommitted changes")
1598 hint = _("commit or update --clean to discard changes")
1598 hint = _("commit or update --clean to discard changes")
1599 raise error.UpdateAbort(msg, hint=hint)
1599 raise error.UpdateAbort(msg, hint=hint)
1600 else:
1600 else:
1601 # Allow jumping branches if clean and specific rev given
1601 # Allow jumping branches if clean and specific rev given
1602 pass
1602 pass
1603
1603
1604 if overwrite:
1604 if overwrite:
1605 pas = [wc]
1605 pas = [wc]
1606 elif not branchmerge:
1606 elif not branchmerge:
1607 pas = [p1]
1607 pas = [p1]
1608
1608
1609 # deprecated config: merge.followcopies
1609 # deprecated config: merge.followcopies
1610 followcopies = repo.ui.configbool('merge', 'followcopies', True)
1610 followcopies = repo.ui.configbool('merge', 'followcopies', True)
1611 if overwrite:
1611 if overwrite:
1612 followcopies = False
1612 followcopies = False
1613 elif not pas[0]:
1613 elif not pas[0]:
1614 followcopies = False
1614 followcopies = False
1615 if not branchmerge and not wc.dirty(missing=True):
1615 if not branchmerge and not wc.dirty(missing=True):
1616 followcopies = False
1616 followcopies = False
1617
1617
1618 ### calculate phase
1618 ### calculate phase
1619 actionbyfile, diverge, renamedelete = calculateupdates(
1619 actionbyfile, diverge, renamedelete = calculateupdates(
1620 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1620 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1621 followcopies, matcher=matcher, mergeforce=mergeforce)
1621 followcopies, matcher=matcher, mergeforce=mergeforce)
1622
1622
1623 if updatecheck == 'noconflict':
1623 if updatecheck == 'noconflict':
1624 for f, (m, args, msg) in actionbyfile.iteritems():
1624 for f, (m, args, msg) in actionbyfile.iteritems():
1625 if m not in ('g', 'k', 'e', 'r'):
1625 if m not in ('g', 'k', 'e', 'r'):
1626 msg = _("conflicting changes")
1626 msg = _("conflicting changes")
1627 hint = _("commit or update --clean to discard changes")
1627 hint = _("commit or update --clean to discard changes")
1628 raise error.Abort(msg, hint=hint)
1628 raise error.Abort(msg, hint=hint)
1629
1629
1630 # Prompt and create actions. Most of this is in the resolve phase
1630 # Prompt and create actions. Most of this is in the resolve phase
1631 # already, but we can't handle .hgsubstate in filemerge or
1631 # already, but we can't handle .hgsubstate in filemerge or
1632 # subrepo.submerge yet so we have to keep prompting for it.
1632 # subrepo.submerge yet so we have to keep prompting for it.
1633 if '.hgsubstate' in actionbyfile:
1633 if '.hgsubstate' in actionbyfile:
1634 f = '.hgsubstate'
1634 f = '.hgsubstate'
1635 m, args, msg = actionbyfile[f]
1635 m, args, msg = actionbyfile[f]
1636 prompts = filemerge.partextras(labels)
1636 prompts = filemerge.partextras(labels)
1637 prompts['f'] = f
1637 prompts['f'] = f
1638 if m == 'cd':
1638 if m == 'cd':
1639 if repo.ui.promptchoice(
1639 if repo.ui.promptchoice(
1640 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1640 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1641 "use (c)hanged version or (d)elete?"
1641 "use (c)hanged version or (d)elete?"
1642 "$$ &Changed $$ &Delete") % prompts, 0):
1642 "$$ &Changed $$ &Delete") % prompts, 0):
1643 actionbyfile[f] = ('r', None, "prompt delete")
1643 actionbyfile[f] = ('r', None, "prompt delete")
1644 elif f in p1:
1644 elif f in p1:
1645 actionbyfile[f] = ('am', None, "prompt keep")
1645 actionbyfile[f] = ('am', None, "prompt keep")
1646 else:
1646 else:
1647 actionbyfile[f] = ('a', None, "prompt keep")
1647 actionbyfile[f] = ('a', None, "prompt keep")
1648 elif m == 'dc':
1648 elif m == 'dc':
1649 f1, f2, fa, move, anc = args
1649 f1, f2, fa, move, anc = args
1650 flags = p2[f2].flags()
1650 flags = p2[f2].flags()
1651 if repo.ui.promptchoice(
1651 if repo.ui.promptchoice(
1652 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1652 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1653 "use (c)hanged version or leave (d)eleted?"
1653 "use (c)hanged version or leave (d)eleted?"
1654 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1654 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1655 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1655 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1656 else:
1656 else:
1657 del actionbyfile[f]
1657 del actionbyfile[f]
1658
1658
1659 # Convert to dictionary-of-lists format
1659 # Convert to dictionary-of-lists format
1660 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1660 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1661 for f, (m, args, msg) in actionbyfile.iteritems():
1661 for f, (m, args, msg) in actionbyfile.iteritems():
1662 if m not in actions:
1662 if m not in actions:
1663 actions[m] = []
1663 actions[m] = []
1664 actions[m].append((f, args, msg))
1664 actions[m].append((f, args, msg))
1665
1665
1666 if not util.fscasesensitive(repo.path):
1666 if not util.fscasesensitive(repo.path):
1667 # check collision between files only in p2 for clean update
1667 # check collision between files only in p2 for clean update
1668 if (not branchmerge and
1668 if (not branchmerge and
1669 (force or not wc.dirty(missing=True, branch=False))):
1669 (force or not wc.dirty(missing=True, branch=False))):
1670 _checkcollision(repo, p2.manifest(), None)
1670 _checkcollision(repo, p2.manifest(), None)
1671 else:
1671 else:
1672 _checkcollision(repo, wc.manifest(), actions)
1672 _checkcollision(repo, wc.manifest(), actions)
1673
1673
1674 # divergent renames
1674 # divergent renames
1675 for f, fl in sorted(diverge.iteritems()):
1675 for f, fl in sorted(diverge.iteritems()):
1676 repo.ui.warn(_("note: possible conflict - %s was renamed "
1676 repo.ui.warn(_("note: possible conflict - %s was renamed "
1677 "multiple times to:\n") % f)
1677 "multiple times to:\n") % f)
1678 for nf in fl:
1678 for nf in fl:
1679 repo.ui.warn(" %s\n" % nf)
1679 repo.ui.warn(" %s\n" % nf)
1680
1680
1681 # rename and delete
1681 # rename and delete
1682 for f, fl in sorted(renamedelete.iteritems()):
1682 for f, fl in sorted(renamedelete.iteritems()):
1683 repo.ui.warn(_("note: possible conflict - %s was deleted "
1683 repo.ui.warn(_("note: possible conflict - %s was deleted "
1684 "and renamed to:\n") % f)
1684 "and renamed to:\n") % f)
1685 for nf in fl:
1685 for nf in fl:
1686 repo.ui.warn(" %s\n" % nf)
1686 repo.ui.warn(" %s\n" % nf)
1687
1687
1688 ### apply phase
1688 ### apply phase
1689 if not branchmerge: # just jump to the new rev
1689 if not branchmerge: # just jump to the new rev
1690 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1690 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1691 if not partial:
1691 if not partial:
1692 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1692 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1693 # note that we're in the middle of an update
1693 # note that we're in the middle of an update
1694 repo.vfs.write('updatestate', p2.hex())
1694 repo.vfs.write('updatestate', p2.hex())
1695
1695
1696 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1696 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1697
1697
1698 if not partial:
1698 if not partial:
1699 with repo.dirstate.parentchange():
1699 with repo.dirstate.parentchange():
1700 repo.setparents(fp1, fp2)
1700 repo.setparents(fp1, fp2)
1701 recordupdates(repo, actions, branchmerge)
1701 recordupdates(repo, actions, branchmerge)
1702 # update completed, clear state
1702 # update completed, clear state
1703 util.unlink(repo.vfs.join('updatestate'))
1703 util.unlink(repo.vfs.join('updatestate'))
1704
1704
1705 if not branchmerge:
1705 if not branchmerge:
1706 repo.dirstate.setbranch(p2.branch())
1706 repo.dirstate.setbranch(p2.branch())
1707
1707
1708 if not partial:
1708 if not partial:
1709 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1709 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1710 return stats
1710 return stats
1711
1711
1712 def graft(repo, ctx, pctx, labels, keepparent=False):
1712 def graft(repo, ctx, pctx, labels, keepparent=False):
1713 """Do a graft-like merge.
1713 """Do a graft-like merge.
1714
1714
1715 This is a merge where the merge ancestor is chosen such that one
1715 This is a merge where the merge ancestor is chosen such that one
1716 or more changesets are grafted onto the current changeset. In
1716 or more changesets are grafted onto the current changeset. In
1717 addition to the merge, this fixes up the dirstate to include only
1717 addition to the merge, this fixes up the dirstate to include only
1718 a single parent (if keepparent is False) and tries to duplicate any
1718 a single parent (if keepparent is False) and tries to duplicate any
1719 renames/copies appropriately.
1719 renames/copies appropriately.
1720
1720
1721 ctx - changeset to rebase
1721 ctx - changeset to rebase
1722 pctx - merge base, usually ctx.p1()
1722 pctx - merge base, usually ctx.p1()
1723 labels - merge labels eg ['local', 'graft']
1723 labels - merge labels eg ['local', 'graft']
1724 keepparent - keep second parent if any
1724 keepparent - keep second parent if any
1725
1725
1726 """
1726 """
1727 # If we're grafting a descendant onto an ancestor, be sure to pass
1727 # If we're grafting a descendant onto an ancestor, be sure to pass
1728 # mergeancestor=True to update. This does two things: 1) allows the merge if
1728 # mergeancestor=True to update. This does two things: 1) allows the merge if
1729 # the destination is the same as the parent of the ctx (so we can use graft
1729 # the destination is the same as the parent of the ctx (so we can use graft
1730 # to copy commits), and 2) informs update that the incoming changes are
1730 # to copy commits), and 2) informs update that the incoming changes are
1731 # newer than the destination so it doesn't prompt about "remote changed foo
1731 # newer than the destination so it doesn't prompt about "remote changed foo
1732 # which local deleted".
1732 # which local deleted".
1733 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1733 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1734
1734
1735 stats = update(repo, ctx.node(), True, True, pctx.node(),
1735 stats = update(repo, ctx.node(), True, True, pctx.node(),
1736 mergeancestor=mergeancestor, labels=labels)
1736 mergeancestor=mergeancestor, labels=labels)
1737
1737
1738 pother = nullid
1738 pother = nullid
1739 parents = ctx.parents()
1739 parents = ctx.parents()
1740 if keepparent and len(parents) == 2 and pctx in parents:
1740 if keepparent and len(parents) == 2 and pctx in parents:
1741 parents.remove(pctx)
1741 parents.remove(pctx)
1742 pother = parents[0].node()
1742 pother = parents[0].node()
1743
1743
1744 with repo.dirstate.parentchange():
1744 with repo.dirstate.parentchange():
1745 repo.setparents(repo['.'].node(), pother)
1745 repo.setparents(repo['.'].node(), pother)
1746 repo.dirstate.write(repo.currenttransaction())
1746 repo.dirstate.write(repo.currenttransaction())
1747 # fix up dirstate for copies and renames
1747 # fix up dirstate for copies and renames
1748 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1748 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1749 return stats
1749 return stats
@@ -1,1115 +1,1094
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def isenabled(repo, option):
101 def isenabled(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 result = set(repo.ui.configlist('experimental', 'evolution'))
105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 if 'all' in result:
106 if 'all' in result:
107 return True
107 return True
108
108
109 # For migration purposes, temporarily return true if the config hasn't been
109 # For migration purposes, temporarily return true if the config hasn't been
110 # set but _enabled is true.
110 # set but _enabled is true.
111 if len(result) == 0 and _enabled:
111 if len(result) == 0 and _enabled:
112 return True
112 return True
113
113
114 # createmarkers must be enabled if other options are enabled
114 # createmarkers must be enabled if other options are enabled
115 if ((allowunstableopt in result or exchangeopt in result) and
115 if ((allowunstableopt in result or exchangeopt in result) and
116 not createmarkersopt in result):
116 not createmarkersopt in result):
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 "if other obsolete options are enabled"))
118 "if other obsolete options are enabled"))
119
119
120 return option in result
120 return option in result
121
121
122 ### obsolescence marker flag
122 ### obsolescence marker flag
123
123
124 ## bumpedfix flag
124 ## bumpedfix flag
125 #
125 #
126 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 # "bumped" because it's a successors of a public changesets
127 # "bumped" because it's a successors of a public changesets
128 #
128 #
129 # o A' (bumped)
129 # o A' (bumped)
130 # |`:
130 # |`:
131 # | o A
131 # | o A
132 # |/
132 # |/
133 # o Z
133 # o Z
134 #
134 #
135 # The way to solve this situation is to create a new changeset Ad as children
135 # The way to solve this situation is to create a new changeset Ad as children
136 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 #
138 #
139 # o Ad
139 # o Ad
140 # |`:
140 # |`:
141 # | x A'
141 # | x A'
142 # |'|
142 # |'|
143 # o | A
143 # o | A
144 # |/
144 # |/
145 # o Z
145 # o Z
146 #
146 #
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 # This flag mean that the successors express the changes between the public and
149 # This flag mean that the successors express the changes between the public and
150 # bumped version and fix the situation, breaking the transitivity of
150 # bumped version and fix the situation, breaking the transitivity of
151 # "bumped" here.
151 # "bumped" here.
152 bumpedfix = 1
152 bumpedfix = 1
153 usingsha256 = 2
153 usingsha256 = 2
154
154
155 ## Parsing and writing of version "0"
155 ## Parsing and writing of version "0"
156 #
156 #
157 # The header is followed by the markers. Each marker is made of:
157 # The header is followed by the markers. Each marker is made of:
158 #
158 #
159 # - 1 uint8 : number of new changesets "N", can be zero.
159 # - 1 uint8 : number of new changesets "N", can be zero.
160 #
160 #
161 # - 1 uint32: metadata size "M" in bytes.
161 # - 1 uint32: metadata size "M" in bytes.
162 #
162 #
163 # - 1 byte: a bit field. It is reserved for flags used in common
163 # - 1 byte: a bit field. It is reserved for flags used in common
164 # obsolete marker operations, to avoid repeated decoding of metadata
164 # obsolete marker operations, to avoid repeated decoding of metadata
165 # entries.
165 # entries.
166 #
166 #
167 # - 20 bytes: obsoleted changeset identifier.
167 # - 20 bytes: obsoleted changeset identifier.
168 #
168 #
169 # - N*20 bytes: new changesets identifiers.
169 # - N*20 bytes: new changesets identifiers.
170 #
170 #
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 # string contains a key and a value, separated by a colon ':', without
172 # string contains a key and a value, separated by a colon ':', without
173 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 # cannot contain '\0'.
174 # cannot contain '\0'.
175 _fm0version = 0
175 _fm0version = 0
176 _fm0fixed = '>BIB20s'
176 _fm0fixed = '>BIB20s'
177 _fm0node = '20s'
177 _fm0node = '20s'
178 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fsize = _calcsize(_fm0fixed)
179 _fm0fnodesize = _calcsize(_fm0node)
179 _fm0fnodesize = _calcsize(_fm0node)
180
180
181 def _fm0readmarkers(data, off):
181 def _fm0readmarkers(data, off):
182 # Loop on markers
182 # Loop on markers
183 l = len(data)
183 l = len(data)
184 while off + _fm0fsize <= l:
184 while off + _fm0fsize <= l:
185 # read fixed part
185 # read fixed part
186 cur = data[off:off + _fm0fsize]
186 cur = data[off:off + _fm0fsize]
187 off += _fm0fsize
187 off += _fm0fsize
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 # read replacement
189 # read replacement
190 sucs = ()
190 sucs = ()
191 if numsuc:
191 if numsuc:
192 s = (_fm0fnodesize * numsuc)
192 s = (_fm0fnodesize * numsuc)
193 cur = data[off:off + s]
193 cur = data[off:off + s]
194 sucs = _unpack(_fm0node * numsuc, cur)
194 sucs = _unpack(_fm0node * numsuc, cur)
195 off += s
195 off += s
196 # read metadata
196 # read metadata
197 # (metadata will be decoded on demand)
197 # (metadata will be decoded on demand)
198 metadata = data[off:off + mdsize]
198 metadata = data[off:off + mdsize]
199 if len(metadata) != mdsize:
199 if len(metadata) != mdsize:
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 'short, %d bytes expected, got %d')
201 'short, %d bytes expected, got %d')
202 % (mdsize, len(metadata)))
202 % (mdsize, len(metadata)))
203 off += mdsize
203 off += mdsize
204 metadata = _fm0decodemeta(metadata)
204 metadata = _fm0decodemeta(metadata)
205 try:
205 try:
206 when, offset = metadata.pop('date', '0 0').split(' ')
206 when, offset = metadata.pop('date', '0 0').split(' ')
207 date = float(when), int(offset)
207 date = float(when), int(offset)
208 except ValueError:
208 except ValueError:
209 date = (0., 0)
209 date = (0., 0)
210 parents = None
210 parents = None
211 if 'p2' in metadata:
211 if 'p2' in metadata:
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 elif 'p1' in metadata:
213 elif 'p1' in metadata:
214 parents = (metadata.pop('p1', None),)
214 parents = (metadata.pop('p1', None),)
215 elif 'p0' in metadata:
215 elif 'p0' in metadata:
216 parents = ()
216 parents = ()
217 if parents is not None:
217 if parents is not None:
218 try:
218 try:
219 parents = tuple(node.bin(p) for p in parents)
219 parents = tuple(node.bin(p) for p in parents)
220 # if parent content is not a nodeid, drop the data
220 # if parent content is not a nodeid, drop the data
221 for p in parents:
221 for p in parents:
222 if len(p) != 20:
222 if len(p) != 20:
223 parents = None
223 parents = None
224 break
224 break
225 except TypeError:
225 except TypeError:
226 # if content cannot be translated to nodeid drop the data.
226 # if content cannot be translated to nodeid drop the data.
227 parents = None
227 parents = None
228
228
229 metadata = tuple(sorted(metadata.iteritems()))
229 metadata = tuple(sorted(metadata.iteritems()))
230
230
231 yield (pre, sucs, flags, metadata, date, parents)
231 yield (pre, sucs, flags, metadata, date, parents)
232
232
233 def _fm0encodeonemarker(marker):
233 def _fm0encodeonemarker(marker):
234 pre, sucs, flags, metadata, date, parents = marker
234 pre, sucs, flags, metadata, date, parents = marker
235 if flags & usingsha256:
235 if flags & usingsha256:
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 metadata = dict(metadata)
237 metadata = dict(metadata)
238 time, tz = date
238 time, tz = date
239 metadata['date'] = '%r %i' % (time, tz)
239 metadata['date'] = '%r %i' % (time, tz)
240 if parents is not None:
240 if parents is not None:
241 if not parents:
241 if not parents:
242 # mark that we explicitly recorded no parents
242 # mark that we explicitly recorded no parents
243 metadata['p0'] = ''
243 metadata['p0'] = ''
244 for i, p in enumerate(parents, 1):
244 for i, p in enumerate(parents, 1):
245 metadata['p%i' % i] = node.hex(p)
245 metadata['p%i' % i] = node.hex(p)
246 metadata = _fm0encodemeta(metadata)
246 metadata = _fm0encodemeta(metadata)
247 numsuc = len(sucs)
247 numsuc = len(sucs)
248 format = _fm0fixed + (_fm0node * numsuc)
248 format = _fm0fixed + (_fm0node * numsuc)
249 data = [numsuc, len(metadata), flags, pre]
249 data = [numsuc, len(metadata), flags, pre]
250 data.extend(sucs)
250 data.extend(sucs)
251 return _pack(format, *data) + metadata
251 return _pack(format, *data) + metadata
252
252
253 def _fm0encodemeta(meta):
253 def _fm0encodemeta(meta):
254 """Return encoded metadata string to string mapping.
254 """Return encoded metadata string to string mapping.
255
255
256 Assume no ':' in key and no '\0' in both key and value."""
256 Assume no ':' in key and no '\0' in both key and value."""
257 for key, value in meta.iteritems():
257 for key, value in meta.iteritems():
258 if ':' in key or '\0' in key:
258 if ':' in key or '\0' in key:
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 if '\0' in value:
260 if '\0' in value:
261 raise ValueError("':' is forbidden in metadata value'")
261 raise ValueError("':' is forbidden in metadata value'")
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263
263
264 def _fm0decodemeta(data):
264 def _fm0decodemeta(data):
265 """Return string to string dictionary from encoded version."""
265 """Return string to string dictionary from encoded version."""
266 d = {}
266 d = {}
267 for l in data.split('\0'):
267 for l in data.split('\0'):
268 if l:
268 if l:
269 key, value = l.split(':')
269 key, value = l.split(':')
270 d[key] = value
270 d[key] = value
271 return d
271 return d
272
272
273 ## Parsing and writing of version "1"
273 ## Parsing and writing of version "1"
274 #
274 #
275 # The header is followed by the markers. Each marker is made of:
275 # The header is followed by the markers. Each marker is made of:
276 #
276 #
277 # - uint32: total size of the marker (including this field)
277 # - uint32: total size of the marker (including this field)
278 #
278 #
279 # - float64: date in seconds since epoch
279 # - float64: date in seconds since epoch
280 #
280 #
281 # - int16: timezone offset in minutes
281 # - int16: timezone offset in minutes
282 #
282 #
283 # - uint16: a bit field. It is reserved for flags used in common
283 # - uint16: a bit field. It is reserved for flags used in common
284 # obsolete marker operations, to avoid repeated decoding of metadata
284 # obsolete marker operations, to avoid repeated decoding of metadata
285 # entries.
285 # entries.
286 #
286 #
287 # - uint8: number of successors "N", can be zero.
287 # - uint8: number of successors "N", can be zero.
288 #
288 #
289 # - uint8: number of parents "P", can be zero.
289 # - uint8: number of parents "P", can be zero.
290 #
290 #
291 # 0: parents data stored but no parent,
291 # 0: parents data stored but no parent,
292 # 1: one parent stored,
292 # 1: one parent stored,
293 # 2: two parents stored,
293 # 2: two parents stored,
294 # 3: no parent data stored
294 # 3: no parent data stored
295 #
295 #
296 # - uint8: number of metadata entries M
296 # - uint8: number of metadata entries M
297 #
297 #
298 # - 20 or 32 bytes: precursor changeset identifier.
298 # - 20 or 32 bytes: precursor changeset identifier.
299 #
299 #
300 # - N*(20 or 32) bytes: successors changesets identifiers.
300 # - N*(20 or 32) bytes: successors changesets identifiers.
301 #
301 #
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 #
303 #
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 #
305 #
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 _fm1version = 1
307 _fm1version = 1
308 _fm1fixed = '>IdhHBBB20s'
308 _fm1fixed = '>IdhHBBB20s'
309 _fm1nodesha1 = '20s'
309 _fm1nodesha1 = '20s'
310 _fm1nodesha256 = '32s'
310 _fm1nodesha256 = '32s'
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1fsize = _calcsize(_fm1fixed)
314 _fm1parentnone = 3
314 _fm1parentnone = 3
315 _fm1parentshift = 14
315 _fm1parentshift = 14
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 _fm1metapair = 'BB'
317 _fm1metapair = 'BB'
318 _fm1metapairsize = _calcsize('BB')
318 _fm1metapairsize = _calcsize('BB')
319
319
320 def _fm1purereadmarkers(data, off):
320 def _fm1purereadmarkers(data, off):
321 # make some global constants local for performance
321 # make some global constants local for performance
322 noneflag = _fm1parentnone
322 noneflag = _fm1parentnone
323 sha2flag = usingsha256
323 sha2flag = usingsha256
324 sha1size = _fm1nodesha1size
324 sha1size = _fm1nodesha1size
325 sha2size = _fm1nodesha256size
325 sha2size = _fm1nodesha256size
326 sha1fmt = _fm1nodesha1
326 sha1fmt = _fm1nodesha1
327 sha2fmt = _fm1nodesha256
327 sha2fmt = _fm1nodesha256
328 metasize = _fm1metapairsize
328 metasize = _fm1metapairsize
329 metafmt = _fm1metapair
329 metafmt = _fm1metapair
330 fsize = _fm1fsize
330 fsize = _fm1fsize
331 unpack = _unpack
331 unpack = _unpack
332
332
333 # Loop on markers
333 # Loop on markers
334 stop = len(data) - _fm1fsize
334 stop = len(data) - _fm1fsize
335 ufixed = struct.Struct(_fm1fixed).unpack
335 ufixed = struct.Struct(_fm1fixed).unpack
336
336
337 while off <= stop:
337 while off <= stop:
338 # read fixed part
338 # read fixed part
339 o1 = off + fsize
339 o1 = off + fsize
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341
341
342 if flags & sha2flag:
342 if flags & sha2flag:
343 # FIXME: prec was read as a SHA1, needs to be amended
343 # FIXME: prec was read as a SHA1, needs to be amended
344
344
345 # read 0 or more successors
345 # read 0 or more successors
346 if numsuc == 1:
346 if numsuc == 1:
347 o2 = o1 + sha2size
347 o2 = o1 + sha2size
348 sucs = (data[o1:o2],)
348 sucs = (data[o1:o2],)
349 else:
349 else:
350 o2 = o1 + sha2size * numsuc
350 o2 = o1 + sha2size * numsuc
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352
352
353 # read parents
353 # read parents
354 if numpar == noneflag:
354 if numpar == noneflag:
355 o3 = o2
355 o3 = o2
356 parents = None
356 parents = None
357 elif numpar == 1:
357 elif numpar == 1:
358 o3 = o2 + sha2size
358 o3 = o2 + sha2size
359 parents = (data[o2:o3],)
359 parents = (data[o2:o3],)
360 else:
360 else:
361 o3 = o2 + sha2size * numpar
361 o3 = o2 + sha2size * numpar
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 else:
363 else:
364 # read 0 or more successors
364 # read 0 or more successors
365 if numsuc == 1:
365 if numsuc == 1:
366 o2 = o1 + sha1size
366 o2 = o1 + sha1size
367 sucs = (data[o1:o2],)
367 sucs = (data[o1:o2],)
368 else:
368 else:
369 o2 = o1 + sha1size * numsuc
369 o2 = o1 + sha1size * numsuc
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371
371
372 # read parents
372 # read parents
373 if numpar == noneflag:
373 if numpar == noneflag:
374 o3 = o2
374 o3 = o2
375 parents = None
375 parents = None
376 elif numpar == 1:
376 elif numpar == 1:
377 o3 = o2 + sha1size
377 o3 = o2 + sha1size
378 parents = (data[o2:o3],)
378 parents = (data[o2:o3],)
379 else:
379 else:
380 o3 = o2 + sha1size * numpar
380 o3 = o2 + sha1size * numpar
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
382
382
383 # read metadata
383 # read metadata
384 off = o3 + metasize * nummeta
384 off = o3 + metasize * nummeta
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 metadata = []
386 metadata = []
387 for idx in xrange(0, len(metapairsize), 2):
387 for idx in xrange(0, len(metapairsize), 2):
388 o1 = off + metapairsize[idx]
388 o1 = off + metapairsize[idx]
389 o2 = o1 + metapairsize[idx + 1]
389 o2 = o1 + metapairsize[idx + 1]
390 metadata.append((data[off:o1], data[o1:o2]))
390 metadata.append((data[off:o1], data[o1:o2]))
391 off = o2
391 off = o2
392
392
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394
394
395 def _fm1encodeonemarker(marker):
395 def _fm1encodeonemarker(marker):
396 pre, sucs, flags, metadata, date, parents = marker
396 pre, sucs, flags, metadata, date, parents = marker
397 # determine node size
397 # determine node size
398 _fm1node = _fm1nodesha1
398 _fm1node = _fm1nodesha1
399 if flags & usingsha256:
399 if flags & usingsha256:
400 _fm1node = _fm1nodesha256
400 _fm1node = _fm1nodesha256
401 numsuc = len(sucs)
401 numsuc = len(sucs)
402 numextranodes = numsuc
402 numextranodes = numsuc
403 if parents is None:
403 if parents is None:
404 numpar = _fm1parentnone
404 numpar = _fm1parentnone
405 else:
405 else:
406 numpar = len(parents)
406 numpar = len(parents)
407 numextranodes += numpar
407 numextranodes += numpar
408 formatnodes = _fm1node * numextranodes
408 formatnodes = _fm1node * numextranodes
409 formatmeta = _fm1metapair * len(metadata)
409 formatmeta = _fm1metapair * len(metadata)
410 format = _fm1fixed + formatnodes + formatmeta
410 format = _fm1fixed + formatnodes + formatmeta
411 # tz is stored in minutes so we divide by 60
411 # tz is stored in minutes so we divide by 60
412 tz = date[1]//60
412 tz = date[1]//60
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 data.extend(sucs)
414 data.extend(sucs)
415 if parents is not None:
415 if parents is not None:
416 data.extend(parents)
416 data.extend(parents)
417 totalsize = _calcsize(format)
417 totalsize = _calcsize(format)
418 for key, value in metadata:
418 for key, value in metadata:
419 lk = len(key)
419 lk = len(key)
420 lv = len(value)
420 lv = len(value)
421 data.append(lk)
421 data.append(lk)
422 data.append(lv)
422 data.append(lv)
423 totalsize += lk + lv
423 totalsize += lk + lv
424 data[0] = totalsize
424 data[0] = totalsize
425 data = [_pack(format, *data)]
425 data = [_pack(format, *data)]
426 for key, value in metadata:
426 for key, value in metadata:
427 data.append(key)
427 data.append(key)
428 data.append(value)
428 data.append(value)
429 return ''.join(data)
429 return ''.join(data)
430
430
431 def _fm1readmarkers(data, off):
431 def _fm1readmarkers(data, off):
432 native = getattr(parsers, 'fm1readmarkers', None)
432 native = getattr(parsers, 'fm1readmarkers', None)
433 if not native:
433 if not native:
434 return _fm1purereadmarkers(data, off)
434 return _fm1purereadmarkers(data, off)
435 stop = len(data) - _fm1fsize
435 stop = len(data) - _fm1fsize
436 return native(data, off, stop)
436 return native(data, off, stop)
437
437
438 # mapping to read/write various marker formats
438 # mapping to read/write various marker formats
439 # <version> -> (decoder, encoder)
439 # <version> -> (decoder, encoder)
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442
442
443 def _readmarkerversion(data):
443 def _readmarkerversion(data):
444 return _unpack('>B', data[0:1])[0]
444 return _unpack('>B', data[0:1])[0]
445
445
446 @util.nogc
446 @util.nogc
447 def _readmarkers(data):
447 def _readmarkers(data):
448 """Read and enumerate markers from raw data"""
448 """Read and enumerate markers from raw data"""
449 diskversion = _readmarkerversion(data)
449 diskversion = _readmarkerversion(data)
450 off = 1
450 off = 1
451 if diskversion not in formats:
451 if diskversion not in formats:
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 raise error.UnknownVersion(msg, version=diskversion)
453 raise error.UnknownVersion(msg, version=diskversion)
454 return diskversion, formats[diskversion][0](data, off)
454 return diskversion, formats[diskversion][0](data, off)
455
455
456 def encodeheader(version=_fm0version):
456 def encodeheader(version=_fm0version):
457 return _pack('>B', version)
457 return _pack('>B', version)
458
458
459 def encodemarkers(markers, addheader=False, version=_fm0version):
459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 # Kept separate from flushmarkers(), it will be reused for
460 # Kept separate from flushmarkers(), it will be reused for
461 # markers exchange.
461 # markers exchange.
462 encodeone = formats[version][1]
462 encodeone = formats[version][1]
463 if addheader:
463 if addheader:
464 yield encodeheader(version)
464 yield encodeheader(version)
465 for marker in markers:
465 for marker in markers:
466 yield encodeone(marker)
466 yield encodeone(marker)
467
467
468
468
469 class marker(object):
469 class marker(object):
470 """Wrap obsolete marker raw data"""
470 """Wrap obsolete marker raw data"""
471
471
472 def __init__(self, repo, data):
472 def __init__(self, repo, data):
473 # the repo argument will be used to create changectx in later version
473 # the repo argument will be used to create changectx in later version
474 self._repo = repo
474 self._repo = repo
475 self._data = data
475 self._data = data
476 self._decodedmeta = None
476 self._decodedmeta = None
477
477
478 def __hash__(self):
478 def __hash__(self):
479 return hash(self._data)
479 return hash(self._data)
480
480
481 def __eq__(self, other):
481 def __eq__(self, other):
482 if type(other) != type(self):
482 if type(other) != type(self):
483 return False
483 return False
484 return self._data == other._data
484 return self._data == other._data
485
485
486 def precnode(self):
486 def precnode(self):
487 """Precursor changeset node identifier"""
487 """Precursor changeset node identifier"""
488 return self._data[0]
488 return self._data[0]
489
489
490 def succnodes(self):
490 def succnodes(self):
491 """List of successor changesets node identifiers"""
491 """List of successor changesets node identifiers"""
492 return self._data[1]
492 return self._data[1]
493
493
494 def parentnodes(self):
494 def parentnodes(self):
495 """Parents of the precursors (None if not recorded)"""
495 """Parents of the precursors (None if not recorded)"""
496 return self._data[5]
496 return self._data[5]
497
497
498 def metadata(self):
498 def metadata(self):
499 """Decoded metadata dictionary"""
499 """Decoded metadata dictionary"""
500 return dict(self._data[3])
500 return dict(self._data[3])
501
501
502 def date(self):
502 def date(self):
503 """Creation date as (unixtime, offset)"""
503 """Creation date as (unixtime, offset)"""
504 return self._data[4]
504 return self._data[4]
505
505
506 def flags(self):
506 def flags(self):
507 """The flags field of the marker"""
507 """The flags field of the marker"""
508 return self._data[2]
508 return self._data[2]
509
509
510 @util.nogc
510 @util.nogc
511 def _addsuccessors(successors, markers):
511 def _addsuccessors(successors, markers):
512 for mark in markers:
512 for mark in markers:
513 successors.setdefault(mark[0], set()).add(mark)
513 successors.setdefault(mark[0], set()).add(mark)
514
514
515 @util.nogc
515 @util.nogc
516 def _addprecursors(precursors, markers):
516 def _addprecursors(precursors, markers):
517 for mark in markers:
517 for mark in markers:
518 for suc in mark[1]:
518 for suc in mark[1]:
519 precursors.setdefault(suc, set()).add(mark)
519 precursors.setdefault(suc, set()).add(mark)
520
520
521 @util.nogc
521 @util.nogc
522 def _addchildren(children, markers):
522 def _addchildren(children, markers):
523 for mark in markers:
523 for mark in markers:
524 parents = mark[5]
524 parents = mark[5]
525 if parents is not None:
525 if parents is not None:
526 for p in parents:
526 for p in parents:
527 children.setdefault(p, set()).add(mark)
527 children.setdefault(p, set()).add(mark)
528
528
529 def _checkinvalidmarkers(markers):
529 def _checkinvalidmarkers(markers):
530 """search for marker with invalid data and raise error if needed
530 """search for marker with invalid data and raise error if needed
531
531
532 Exist as a separated function to allow the evolve extension for a more
532 Exist as a separated function to allow the evolve extension for a more
533 subtle handling.
533 subtle handling.
534 """
534 """
535 for mark in markers:
535 for mark in markers:
536 if node.nullid in mark[1]:
536 if node.nullid in mark[1]:
537 raise error.Abort(_('bad obsolescence marker detected: '
537 raise error.Abort(_('bad obsolescence marker detected: '
538 'invalid successors nullid'))
538 'invalid successors nullid'))
539
539
540 class obsstore(object):
540 class obsstore(object):
541 """Store obsolete markers
541 """Store obsolete markers
542
542
543 Markers can be accessed with two mappings:
543 Markers can be accessed with two mappings:
544 - precursors[x] -> set(markers on precursors edges of x)
544 - precursors[x] -> set(markers on precursors edges of x)
545 - successors[x] -> set(markers on successors edges of x)
545 - successors[x] -> set(markers on successors edges of x)
546 - children[x] -> set(markers on precursors edges of children(x)
546 - children[x] -> set(markers on precursors edges of children(x)
547 """
547 """
548
548
549 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
549 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
550 # prec: nodeid, precursor changesets
550 # prec: nodeid, precursor changesets
551 # succs: tuple of nodeid, successor changesets (0-N length)
551 # succs: tuple of nodeid, successor changesets (0-N length)
552 # flag: integer, flag field carrying modifier for the markers (see doc)
552 # flag: integer, flag field carrying modifier for the markers (see doc)
553 # meta: binary blob, encoded metadata dictionary
553 # meta: binary blob, encoded metadata dictionary
554 # date: (float, int) tuple, date of marker creation
554 # date: (float, int) tuple, date of marker creation
555 # parents: (tuple of nodeid) or None, parents of precursors
555 # parents: (tuple of nodeid) or None, parents of precursors
556 # None is used when no data has been recorded
556 # None is used when no data has been recorded
557
557
558 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
558 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
559 # caches for various obsolescence related cache
559 # caches for various obsolescence related cache
560 self.caches = {}
560 self.caches = {}
561 self.svfs = svfs
561 self.svfs = svfs
562 self._defaultformat = defaultformat
562 self._defaultformat = defaultformat
563 self._readonly = readonly
563 self._readonly = readonly
564
564
565 def __iter__(self):
565 def __iter__(self):
566 return iter(self._all)
566 return iter(self._all)
567
567
568 def __len__(self):
568 def __len__(self):
569 return len(self._all)
569 return len(self._all)
570
570
571 def __nonzero__(self):
571 def __nonzero__(self):
572 if not self._cached('_all'):
572 if not self._cached('_all'):
573 try:
573 try:
574 return self.svfs.stat('obsstore').st_size > 1
574 return self.svfs.stat('obsstore').st_size > 1
575 except OSError as inst:
575 except OSError as inst:
576 if inst.errno != errno.ENOENT:
576 if inst.errno != errno.ENOENT:
577 raise
577 raise
578 # just build an empty _all list if no obsstore exists, which
578 # just build an empty _all list if no obsstore exists, which
579 # avoids further stat() syscalls
579 # avoids further stat() syscalls
580 pass
580 pass
581 return bool(self._all)
581 return bool(self._all)
582
582
583 __bool__ = __nonzero__
583 __bool__ = __nonzero__
584
584
585 @property
585 @property
586 def readonly(self):
586 def readonly(self):
587 """True if marker creation is disabled
587 """True if marker creation is disabled
588
588
589 Remove me in the future when obsolete marker is always on."""
589 Remove me in the future when obsolete marker is always on."""
590 return self._readonly
590 return self._readonly
591
591
592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
593 date=None, metadata=None, ui=None):
593 date=None, metadata=None, ui=None):
594 """obsolete: add a new obsolete marker
594 """obsolete: add a new obsolete marker
595
595
596 * ensuring it is hashable
596 * ensuring it is hashable
597 * check mandatory metadata
597 * check mandatory metadata
598 * encode metadata
598 * encode metadata
599
599
600 If you are a human writing code creating marker you want to use the
600 If you are a human writing code creating marker you want to use the
601 `createmarkers` function in this module instead.
601 `createmarkers` function in this module instead.
602
602
603 return True if a new marker have been added, False if the markers
603 return True if a new marker have been added, False if the markers
604 already existed (no op).
604 already existed (no op).
605 """
605 """
606 if metadata is None:
606 if metadata is None:
607 metadata = {}
607 metadata = {}
608 if date is None:
608 if date is None:
609 if 'date' in metadata:
609 if 'date' in metadata:
610 # as a courtesy for out-of-tree extensions
610 # as a courtesy for out-of-tree extensions
611 date = util.parsedate(metadata.pop('date'))
611 date = util.parsedate(metadata.pop('date'))
612 elif ui is not None:
612 elif ui is not None:
613 date = ui.configdate('devel', 'default-date')
613 date = ui.configdate('devel', 'default-date')
614 if date is None:
614 if date is None:
615 date = util.makedate()
615 date = util.makedate()
616 else:
616 else:
617 date = util.makedate()
617 date = util.makedate()
618 if len(prec) != 20:
618 if len(prec) != 20:
619 raise ValueError(prec)
619 raise ValueError(prec)
620 for succ in succs:
620 for succ in succs:
621 if len(succ) != 20:
621 if len(succ) != 20:
622 raise ValueError(succ)
622 raise ValueError(succ)
623 if prec in succs:
623 if prec in succs:
624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
625
625
626 metadata = tuple(sorted(metadata.iteritems()))
626 metadata = tuple(sorted(metadata.iteritems()))
627
627
628 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
628 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
629 return bool(self.add(transaction, [marker]))
629 return bool(self.add(transaction, [marker]))
630
630
631 def add(self, transaction, markers):
631 def add(self, transaction, markers):
632 """Add new markers to the store
632 """Add new markers to the store
633
633
634 Take care of filtering duplicate.
634 Take care of filtering duplicate.
635 Return the number of new marker."""
635 Return the number of new marker."""
636 if self._readonly:
636 if self._readonly:
637 raise error.Abort(_('creating obsolete markers is not enabled on '
637 raise error.Abort(_('creating obsolete markers is not enabled on '
638 'this repo'))
638 'this repo'))
639 known = set()
639 known = set()
640 getsuccessors = self.successors.get
640 getsuccessors = self.successors.get
641 new = []
641 new = []
642 for m in markers:
642 for m in markers:
643 if m not in getsuccessors(m[0], ()) and m not in known:
643 if m not in getsuccessors(m[0], ()) and m not in known:
644 known.add(m)
644 known.add(m)
645 new.append(m)
645 new.append(m)
646 if new:
646 if new:
647 f = self.svfs('obsstore', 'ab')
647 f = self.svfs('obsstore', 'ab')
648 try:
648 try:
649 offset = f.tell()
649 offset = f.tell()
650 transaction.add('obsstore', offset)
650 transaction.add('obsstore', offset)
651 # offset == 0: new file - add the version header
651 # offset == 0: new file - add the version header
652 for bytes in encodemarkers(new, offset == 0, self._version):
652 for bytes in encodemarkers(new, offset == 0, self._version):
653 f.write(bytes)
653 f.write(bytes)
654 finally:
654 finally:
655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
656 # call 'filecacheentry.refresh()' here
656 # call 'filecacheentry.refresh()' here
657 f.close()
657 f.close()
658 self._addmarkers(new)
658 self._addmarkers(new)
659 # new marker *may* have changed several set. invalidate the cache.
659 # new marker *may* have changed several set. invalidate the cache.
660 self.caches.clear()
660 self.caches.clear()
661 # records the number of new markers for the transaction hooks
661 # records the number of new markers for the transaction hooks
662 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
662 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
663 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
663 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
664 return len(new)
664 return len(new)
665
665
666 def mergemarkers(self, transaction, data):
666 def mergemarkers(self, transaction, data):
667 """merge a binary stream of markers inside the obsstore
667 """merge a binary stream of markers inside the obsstore
668
668
669 Returns the number of new markers added."""
669 Returns the number of new markers added."""
670 version, markers = _readmarkers(data)
670 version, markers = _readmarkers(data)
671 return self.add(transaction, markers)
671 return self.add(transaction, markers)
672
672
673 @propertycache
673 @propertycache
674 def _data(self):
674 def _data(self):
675 return self.svfs.tryread('obsstore')
675 return self.svfs.tryread('obsstore')
676
676
677 @propertycache
677 @propertycache
678 def _version(self):
678 def _version(self):
679 if len(self._data) >= 1:
679 if len(self._data) >= 1:
680 return _readmarkerversion(self._data)
680 return _readmarkerversion(self._data)
681 else:
681 else:
682 return self._defaultformat
682 return self._defaultformat
683
683
684 @propertycache
684 @propertycache
685 def _all(self):
685 def _all(self):
686 data = self._data
686 data = self._data
687 if not data:
687 if not data:
688 return []
688 return []
689 self._version, markers = _readmarkers(data)
689 self._version, markers = _readmarkers(data)
690 markers = list(markers)
690 markers = list(markers)
691 _checkinvalidmarkers(markers)
691 _checkinvalidmarkers(markers)
692 return markers
692 return markers
693
693
694 @propertycache
694 @propertycache
695 def successors(self):
695 def successors(self):
696 successors = {}
696 successors = {}
697 _addsuccessors(successors, self._all)
697 _addsuccessors(successors, self._all)
698 return successors
698 return successors
699
699
700 @propertycache
700 @propertycache
701 def precursors(self):
701 def precursors(self):
702 precursors = {}
702 precursors = {}
703 _addprecursors(precursors, self._all)
703 _addprecursors(precursors, self._all)
704 return precursors
704 return precursors
705
705
706 @propertycache
706 @propertycache
707 def children(self):
707 def children(self):
708 children = {}
708 children = {}
709 _addchildren(children, self._all)
709 _addchildren(children, self._all)
710 return children
710 return children
711
711
712 def _cached(self, attr):
712 def _cached(self, attr):
713 return attr in self.__dict__
713 return attr in self.__dict__
714
714
715 def _addmarkers(self, markers):
715 def _addmarkers(self, markers):
716 markers = list(markers) # to allow repeated iteration
716 markers = list(markers) # to allow repeated iteration
717 self._all.extend(markers)
717 self._all.extend(markers)
718 if self._cached('successors'):
718 if self._cached('successors'):
719 _addsuccessors(self.successors, markers)
719 _addsuccessors(self.successors, markers)
720 if self._cached('precursors'):
720 if self._cached('precursors'):
721 _addprecursors(self.precursors, markers)
721 _addprecursors(self.precursors, markers)
722 if self._cached('children'):
722 if self._cached('children'):
723 _addchildren(self.children, markers)
723 _addchildren(self.children, markers)
724 _checkinvalidmarkers(markers)
724 _checkinvalidmarkers(markers)
725
725
726 def relevantmarkers(self, nodes):
726 def relevantmarkers(self, nodes):
727 """return a set of all obsolescence markers relevant to a set of nodes.
727 """return a set of all obsolescence markers relevant to a set of nodes.
728
728
729 "relevant" to a set of nodes mean:
729 "relevant" to a set of nodes mean:
730
730
731 - marker that use this changeset as successor
731 - marker that use this changeset as successor
732 - prune marker of direct children on this changeset
732 - prune marker of direct children on this changeset
733 - recursive application of the two rules on precursors of these markers
733 - recursive application of the two rules on precursors of these markers
734
734
735 It is a set so you cannot rely on order."""
735 It is a set so you cannot rely on order."""
736
736
737 pendingnodes = set(nodes)
737 pendingnodes = set(nodes)
738 seenmarkers = set()
738 seenmarkers = set()
739 seennodes = set(pendingnodes)
739 seennodes = set(pendingnodes)
740 precursorsmarkers = self.precursors
740 precursorsmarkers = self.precursors
741 succsmarkers = self.successors
741 succsmarkers = self.successors
742 children = self.children
742 children = self.children
743 while pendingnodes:
743 while pendingnodes:
744 direct = set()
744 direct = set()
745 for current in pendingnodes:
745 for current in pendingnodes:
746 direct.update(precursorsmarkers.get(current, ()))
746 direct.update(precursorsmarkers.get(current, ()))
747 pruned = [m for m in children.get(current, ()) if not m[1]]
747 pruned = [m for m in children.get(current, ()) if not m[1]]
748 direct.update(pruned)
748 direct.update(pruned)
749 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
749 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
750 direct.update(pruned)
750 direct.update(pruned)
751 direct -= seenmarkers
751 direct -= seenmarkers
752 pendingnodes = set([m[0] for m in direct])
752 pendingnodes = set([m[0] for m in direct])
753 seenmarkers |= direct
753 seenmarkers |= direct
754 pendingnodes -= seennodes
754 pendingnodes -= seennodes
755 seennodes |= pendingnodes
755 seennodes |= pendingnodes
756 return seenmarkers
756 return seenmarkers
757
757
758 def makestore(ui, repo):
758 def makestore(ui, repo):
759 """Create an obsstore instance from a repo."""
759 """Create an obsstore instance from a repo."""
760 # read default format for new obsstore.
760 # read default format for new obsstore.
761 # developer config: format.obsstore-version
761 # developer config: format.obsstore-version
762 defaultformat = ui.configint('format', 'obsstore-version', None)
762 defaultformat = ui.configint('format', 'obsstore-version', None)
763 # rely on obsstore class default when possible.
763 # rely on obsstore class default when possible.
764 kwargs = {}
764 kwargs = {}
765 if defaultformat is not None:
765 if defaultformat is not None:
766 kwargs['defaultformat'] = defaultformat
766 kwargs['defaultformat'] = defaultformat
767 readonly = not isenabled(repo, createmarkersopt)
767 readonly = not isenabled(repo, createmarkersopt)
768 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
768 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
769 if store and readonly:
769 if store and readonly:
770 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
770 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
771 % len(list(store)))
771 % len(list(store)))
772 return store
772 return store
773
773
774 def commonversion(versions):
774 def commonversion(versions):
775 """Return the newest version listed in both versions and our local formats.
775 """Return the newest version listed in both versions and our local formats.
776
776
777 Returns None if no common version exists.
777 Returns None if no common version exists.
778 """
778 """
779 versions.sort(reverse=True)
779 versions.sort(reverse=True)
780 # search for highest version known on both side
780 # search for highest version known on both side
781 for v in versions:
781 for v in versions:
782 if v in formats:
782 if v in formats:
783 return v
783 return v
784 return None
784 return None
785
785
786 # arbitrary picked to fit into 8K limit from HTTP server
786 # arbitrary picked to fit into 8K limit from HTTP server
787 # you have to take in account:
787 # you have to take in account:
788 # - the version header
788 # - the version header
789 # - the base85 encoding
789 # - the base85 encoding
790 _maxpayload = 5300
790 _maxpayload = 5300
791
791
792 def _pushkeyescape(markers):
792 def _pushkeyescape(markers):
793 """encode markers into a dict suitable for pushkey exchange
793 """encode markers into a dict suitable for pushkey exchange
794
794
795 - binary data is base85 encoded
795 - binary data is base85 encoded
796 - split in chunks smaller than 5300 bytes"""
796 - split in chunks smaller than 5300 bytes"""
797 keys = {}
797 keys = {}
798 parts = []
798 parts = []
799 currentlen = _maxpayload * 2 # ensure we create a new part
799 currentlen = _maxpayload * 2 # ensure we create a new part
800 for marker in markers:
800 for marker in markers:
801 nextdata = _fm0encodeonemarker(marker)
801 nextdata = _fm0encodeonemarker(marker)
802 if (len(nextdata) + currentlen > _maxpayload):
802 if (len(nextdata) + currentlen > _maxpayload):
803 currentpart = []
803 currentpart = []
804 currentlen = 0
804 currentlen = 0
805 parts.append(currentpart)
805 parts.append(currentpart)
806 currentpart.append(nextdata)
806 currentpart.append(nextdata)
807 currentlen += len(nextdata)
807 currentlen += len(nextdata)
808 for idx, part in enumerate(reversed(parts)):
808 for idx, part in enumerate(reversed(parts)):
809 data = ''.join([_pack('>B', _fm0version)] + part)
809 data = ''.join([_pack('>B', _fm0version)] + part)
810 keys['dump%i' % idx] = util.b85encode(data)
810 keys['dump%i' % idx] = util.b85encode(data)
811 return keys
811 return keys
812
812
813 def listmarkers(repo):
813 def listmarkers(repo):
814 """List markers over pushkey"""
814 """List markers over pushkey"""
815 if not repo.obsstore:
815 if not repo.obsstore:
816 return {}
816 return {}
817 return _pushkeyescape(sorted(repo.obsstore))
817 return _pushkeyescape(sorted(repo.obsstore))
818
818
819 def pushmarker(repo, key, old, new):
819 def pushmarker(repo, key, old, new):
820 """Push markers over pushkey"""
820 """Push markers over pushkey"""
821 if not key.startswith('dump'):
821 if not key.startswith('dump'):
822 repo.ui.warn(_('unknown key: %r') % key)
822 repo.ui.warn(_('unknown key: %r') % key)
823 return False
823 return False
824 if old:
824 if old:
825 repo.ui.warn(_('unexpected old value for %r') % key)
825 repo.ui.warn(_('unexpected old value for %r') % key)
826 return False
826 return False
827 data = util.b85decode(new)
827 data = util.b85decode(new)
828 lock = repo.lock()
828 lock = repo.lock()
829 try:
829 try:
830 tr = repo.transaction('pushkey: obsolete markers')
830 tr = repo.transaction('pushkey: obsolete markers')
831 try:
831 try:
832 repo.obsstore.mergemarkers(tr, data)
832 repo.obsstore.mergemarkers(tr, data)
833 repo.invalidatevolatilesets()
833 repo.invalidatevolatilesets()
834 tr.close()
834 tr.close()
835 return True
835 return True
836 finally:
836 finally:
837 tr.release()
837 tr.release()
838 finally:
838 finally:
839 lock.release()
839 lock.release()
840
840
841 def getmarkers(repo, nodes=None, exclusive=False):
841 def getmarkers(repo, nodes=None, exclusive=False):
842 """returns markers known in a repository
842 """returns markers known in a repository
843
843
844 If <nodes> is specified, only markers "relevant" to those nodes are are
844 If <nodes> is specified, only markers "relevant" to those nodes are are
845 returned"""
845 returned"""
846 if nodes is None:
846 if nodes is None:
847 rawmarkers = repo.obsstore
847 rawmarkers = repo.obsstore
848 elif exclusive:
848 elif exclusive:
849 rawmarkers = obsutil.exclusivemarkers(repo, nodes)
849 rawmarkers = obsutil.exclusivemarkers(repo, nodes)
850 else:
850 else:
851 rawmarkers = repo.obsstore.relevantmarkers(nodes)
851 rawmarkers = repo.obsstore.relevantmarkers(nodes)
852
852
853 for markerdata in rawmarkers:
853 for markerdata in rawmarkers:
854 yield marker(repo, markerdata)
854 yield marker(repo, markerdata)
855
855
856 def relevantmarkers(repo, node):
856 def relevantmarkers(repo, node):
857 """all obsolete markers relevant to some revision"""
857 """all obsolete markers relevant to some revision"""
858 for markerdata in repo.obsstore.relevantmarkers(node):
858 for markerdata in repo.obsstore.relevantmarkers(node):
859 yield marker(repo, markerdata)
859 yield marker(repo, markerdata)
860
860
861
861
862 def precursormarkers(ctx):
862 def precursormarkers(ctx):
863 """obsolete marker marking this changeset as a successors"""
863 """obsolete marker marking this changeset as a successors"""
864 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
864 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
865 yield marker(ctx.repo(), data)
865 yield marker(ctx.repo(), data)
866
866
867 def successormarkers(ctx):
867 def successormarkers(ctx):
868 """obsolete marker making this changeset obsolete"""
868 """obsolete marker making this changeset obsolete"""
869 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
869 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
870 yield marker(ctx.repo(), data)
870 yield marker(ctx.repo(), data)
871
871
872 def foreground(repo, nodes):
873 """return all nodes in the "foreground" of other node
874
875 The foreground of a revision is anything reachable using parent -> children
876 or precursor -> successor relation. It is very similar to "descendant" but
877 augmented with obsolescence information.
878
879 Beware that possible obsolescence cycle may result if complex situation.
880 """
881 repo = repo.unfiltered()
882 foreground = set(repo.set('%ln::', nodes))
883 if repo.obsstore:
884 # We only need this complicated logic if there is obsolescence
885 # XXX will probably deserve an optimised revset.
886 nm = repo.changelog.nodemap
887 plen = -1
888 # compute the whole set of successors or descendants
889 while len(foreground) != plen:
890 plen = len(foreground)
891 succs = set(c.node() for c in foreground)
892 mutable = [c.node() for c in foreground if c.mutable()]
893 succs.update(obsutil.allsuccessors(repo.obsstore, mutable))
894 known = (n for n in succs if n in nm)
895 foreground = set(repo.set('%ln::', known))
896 return set(c.node() for c in foreground)
897
898 # keep compatibility for the 4.3 cycle
872 # keep compatibility for the 4.3 cycle
899 def allprecursors(obsstore, nodes, ignoreflags=0):
873 def allprecursors(obsstore, nodes, ignoreflags=0):
900 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
874 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
901 util.nouideprecwarn(movemsg, '4.3')
875 util.nouideprecwarn(movemsg, '4.3')
902 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
876 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
903
877
904 def allsuccessors(obsstore, nodes, ignoreflags=0):
878 def allsuccessors(obsstore, nodes, ignoreflags=0):
905 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
879 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
906 util.nouideprecwarn(movemsg, '4.3')
880 util.nouideprecwarn(movemsg, '4.3')
907 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
881 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
908
882
909 def exclusivemarkers(repo, nodes):
883 def exclusivemarkers(repo, nodes):
910 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
884 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
911 repo.ui.deprecwarn(movemsg, '4.3')
885 repo.ui.deprecwarn(movemsg, '4.3')
912 return obsutil.exclusivemarkers(repo, nodes)
886 return obsutil.exclusivemarkers(repo, nodes)
913
887
888 def foreground(repo, nodes):
889 movemsg = 'obsolete.foreground moved to obsutil.foreground'
890 repo.ui.deprecwarn(movemsg, '4.3')
891 return obsutil.foreground(repo, nodes)
892
914 def successorssets(repo, initialnode, cache=None):
893 def successorssets(repo, initialnode, cache=None):
915 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
894 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
916 repo.ui.deprecwarn(movemsg, '4.3')
895 repo.ui.deprecwarn(movemsg, '4.3')
917 return obsutil.successorssets(repo, initialnode, cache=cache)
896 return obsutil.successorssets(repo, initialnode, cache=cache)
918
897
919 # mapping of 'set-name' -> <function to compute this set>
898 # mapping of 'set-name' -> <function to compute this set>
920 cachefuncs = {}
899 cachefuncs = {}
921 def cachefor(name):
900 def cachefor(name):
922 """Decorator to register a function as computing the cache for a set"""
901 """Decorator to register a function as computing the cache for a set"""
923 def decorator(func):
902 def decorator(func):
924 if name in cachefuncs:
903 if name in cachefuncs:
925 msg = "duplicated registration for volatileset '%s' (existing: %r)"
904 msg = "duplicated registration for volatileset '%s' (existing: %r)"
926 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
905 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
927 cachefuncs[name] = func
906 cachefuncs[name] = func
928 return func
907 return func
929 return decorator
908 return decorator
930
909
931 def getrevs(repo, name):
910 def getrevs(repo, name):
932 """Return the set of revision that belong to the <name> set
911 """Return the set of revision that belong to the <name> set
933
912
934 Such access may compute the set and cache it for future use"""
913 Such access may compute the set and cache it for future use"""
935 repo = repo.unfiltered()
914 repo = repo.unfiltered()
936 if not repo.obsstore:
915 if not repo.obsstore:
937 return frozenset()
916 return frozenset()
938 if name not in repo.obsstore.caches:
917 if name not in repo.obsstore.caches:
939 repo.obsstore.caches[name] = cachefuncs[name](repo)
918 repo.obsstore.caches[name] = cachefuncs[name](repo)
940 return repo.obsstore.caches[name]
919 return repo.obsstore.caches[name]
941
920
942 # To be simple we need to invalidate obsolescence cache when:
921 # To be simple we need to invalidate obsolescence cache when:
943 #
922 #
944 # - new changeset is added:
923 # - new changeset is added:
945 # - public phase is changed
924 # - public phase is changed
946 # - obsolescence marker are added
925 # - obsolescence marker are added
947 # - strip is used a repo
926 # - strip is used a repo
948 def clearobscaches(repo):
927 def clearobscaches(repo):
949 """Remove all obsolescence related cache from a repo
928 """Remove all obsolescence related cache from a repo
950
929
951 This remove all cache in obsstore is the obsstore already exist on the
930 This remove all cache in obsstore is the obsstore already exist on the
952 repo.
931 repo.
953
932
954 (We could be smarter here given the exact event that trigger the cache
933 (We could be smarter here given the exact event that trigger the cache
955 clearing)"""
934 clearing)"""
956 # only clear cache is there is obsstore data in this repo
935 # only clear cache is there is obsstore data in this repo
957 if 'obsstore' in repo._filecache:
936 if 'obsstore' in repo._filecache:
958 repo.obsstore.caches.clear()
937 repo.obsstore.caches.clear()
959
938
960 def _mutablerevs(repo):
939 def _mutablerevs(repo):
961 """the set of mutable revision in the repository"""
940 """the set of mutable revision in the repository"""
962 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
941 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
963
942
964 @cachefor('obsolete')
943 @cachefor('obsolete')
965 def _computeobsoleteset(repo):
944 def _computeobsoleteset(repo):
966 """the set of obsolete revisions"""
945 """the set of obsolete revisions"""
967 getnode = repo.changelog.node
946 getnode = repo.changelog.node
968 notpublic = _mutablerevs(repo)
947 notpublic = _mutablerevs(repo)
969 isobs = repo.obsstore.successors.__contains__
948 isobs = repo.obsstore.successors.__contains__
970 obs = set(r for r in notpublic if isobs(getnode(r)))
949 obs = set(r for r in notpublic if isobs(getnode(r)))
971 return obs
950 return obs
972
951
973 @cachefor('unstable')
952 @cachefor('unstable')
974 def _computeunstableset(repo):
953 def _computeunstableset(repo):
975 """the set of non obsolete revisions with obsolete parents"""
954 """the set of non obsolete revisions with obsolete parents"""
976 pfunc = repo.changelog.parentrevs
955 pfunc = repo.changelog.parentrevs
977 mutable = _mutablerevs(repo)
956 mutable = _mutablerevs(repo)
978 obsolete = getrevs(repo, 'obsolete')
957 obsolete = getrevs(repo, 'obsolete')
979 others = mutable - obsolete
958 others = mutable - obsolete
980 unstable = set()
959 unstable = set()
981 for r in sorted(others):
960 for r in sorted(others):
982 # A rev is unstable if one of its parent is obsolete or unstable
961 # A rev is unstable if one of its parent is obsolete or unstable
983 # this works since we traverse following growing rev order
962 # this works since we traverse following growing rev order
984 for p in pfunc(r):
963 for p in pfunc(r):
985 if p in obsolete or p in unstable:
964 if p in obsolete or p in unstable:
986 unstable.add(r)
965 unstable.add(r)
987 break
966 break
988 return unstable
967 return unstable
989
968
990 @cachefor('suspended')
969 @cachefor('suspended')
991 def _computesuspendedset(repo):
970 def _computesuspendedset(repo):
992 """the set of obsolete parents with non obsolete descendants"""
971 """the set of obsolete parents with non obsolete descendants"""
993 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
972 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
994 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
973 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
995
974
996 @cachefor('extinct')
975 @cachefor('extinct')
997 def _computeextinctset(repo):
976 def _computeextinctset(repo):
998 """the set of obsolete parents without non obsolete descendants"""
977 """the set of obsolete parents without non obsolete descendants"""
999 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
978 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1000
979
1001
980
1002 @cachefor('bumped')
981 @cachefor('bumped')
1003 def _computebumpedset(repo):
982 def _computebumpedset(repo):
1004 """the set of revs trying to obsolete public revisions"""
983 """the set of revs trying to obsolete public revisions"""
1005 bumped = set()
984 bumped = set()
1006 # util function (avoid attribute lookup in the loop)
985 # util function (avoid attribute lookup in the loop)
1007 phase = repo._phasecache.phase # would be faster to grab the full list
986 phase = repo._phasecache.phase # would be faster to grab the full list
1008 public = phases.public
987 public = phases.public
1009 cl = repo.changelog
988 cl = repo.changelog
1010 torev = cl.nodemap.get
989 torev = cl.nodemap.get
1011 for ctx in repo.set('(not public()) and (not obsolete())'):
990 for ctx in repo.set('(not public()) and (not obsolete())'):
1012 rev = ctx.rev()
991 rev = ctx.rev()
1013 # We only evaluate mutable, non-obsolete revision
992 # We only evaluate mutable, non-obsolete revision
1014 node = ctx.node()
993 node = ctx.node()
1015 # (future) A cache of precursors may worth if split is very common
994 # (future) A cache of precursors may worth if split is very common
1016 for pnode in obsutil.allprecursors(repo.obsstore, [node],
995 for pnode in obsutil.allprecursors(repo.obsstore, [node],
1017 ignoreflags=bumpedfix):
996 ignoreflags=bumpedfix):
1018 prev = torev(pnode) # unfiltered! but so is phasecache
997 prev = torev(pnode) # unfiltered! but so is phasecache
1019 if (prev is not None) and (phase(repo, prev) <= public):
998 if (prev is not None) and (phase(repo, prev) <= public):
1020 # we have a public precursor
999 # we have a public precursor
1021 bumped.add(rev)
1000 bumped.add(rev)
1022 break # Next draft!
1001 break # Next draft!
1023 return bumped
1002 return bumped
1024
1003
1025 @cachefor('divergent')
1004 @cachefor('divergent')
1026 def _computedivergentset(repo):
1005 def _computedivergentset(repo):
1027 """the set of rev that compete to be the final successors of some revision.
1006 """the set of rev that compete to be the final successors of some revision.
1028 """
1007 """
1029 divergent = set()
1008 divergent = set()
1030 obsstore = repo.obsstore
1009 obsstore = repo.obsstore
1031 newermap = {}
1010 newermap = {}
1032 for ctx in repo.set('(not public()) - obsolete()'):
1011 for ctx in repo.set('(not public()) - obsolete()'):
1033 mark = obsstore.precursors.get(ctx.node(), ())
1012 mark = obsstore.precursors.get(ctx.node(), ())
1034 toprocess = set(mark)
1013 toprocess = set(mark)
1035 seen = set()
1014 seen = set()
1036 while toprocess:
1015 while toprocess:
1037 prec = toprocess.pop()[0]
1016 prec = toprocess.pop()[0]
1038 if prec in seen:
1017 if prec in seen:
1039 continue # emergency cycle hanging prevention
1018 continue # emergency cycle hanging prevention
1040 seen.add(prec)
1019 seen.add(prec)
1041 if prec not in newermap:
1020 if prec not in newermap:
1042 obsutil.successorssets(repo, prec, newermap)
1021 obsutil.successorssets(repo, prec, newermap)
1043 newer = [n for n in newermap[prec] if n]
1022 newer = [n for n in newermap[prec] if n]
1044 if len(newer) > 1:
1023 if len(newer) > 1:
1045 divergent.add(ctx.rev())
1024 divergent.add(ctx.rev())
1046 break
1025 break
1047 toprocess.update(obsstore.precursors.get(prec, ()))
1026 toprocess.update(obsstore.precursors.get(prec, ()))
1048 return divergent
1027 return divergent
1049
1028
1050
1029
1051 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1030 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1052 operation=None):
1031 operation=None):
1053 """Add obsolete markers between changesets in a repo
1032 """Add obsolete markers between changesets in a repo
1054
1033
1055 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1034 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1056 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1035 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1057 containing metadata for this marker only. It is merged with the global
1036 containing metadata for this marker only. It is merged with the global
1058 metadata specified through the `metadata` argument of this function,
1037 metadata specified through the `metadata` argument of this function,
1059
1038
1060 Trying to obsolete a public changeset will raise an exception.
1039 Trying to obsolete a public changeset will raise an exception.
1061
1040
1062 Current user and date are used except if specified otherwise in the
1041 Current user and date are used except if specified otherwise in the
1063 metadata attribute.
1042 metadata attribute.
1064
1043
1065 This function operates within a transaction of its own, but does
1044 This function operates within a transaction of its own, but does
1066 not take any lock on the repo.
1045 not take any lock on the repo.
1067 """
1046 """
1068 # prepare metadata
1047 # prepare metadata
1069 if metadata is None:
1048 if metadata is None:
1070 metadata = {}
1049 metadata = {}
1071 if 'user' not in metadata:
1050 if 'user' not in metadata:
1072 metadata['user'] = repo.ui.username()
1051 metadata['user'] = repo.ui.username()
1073 useoperation = repo.ui.configbool('experimental',
1052 useoperation = repo.ui.configbool('experimental',
1074 'evolution.track-operation',
1053 'evolution.track-operation',
1075 False)
1054 False)
1076 if useoperation and operation:
1055 if useoperation and operation:
1077 metadata['operation'] = operation
1056 metadata['operation'] = operation
1078 tr = repo.transaction('add-obsolescence-marker')
1057 tr = repo.transaction('add-obsolescence-marker')
1079 try:
1058 try:
1080 markerargs = []
1059 markerargs = []
1081 for rel in relations:
1060 for rel in relations:
1082 prec = rel[0]
1061 prec = rel[0]
1083 sucs = rel[1]
1062 sucs = rel[1]
1084 localmetadata = metadata.copy()
1063 localmetadata = metadata.copy()
1085 if 2 < len(rel):
1064 if 2 < len(rel):
1086 localmetadata.update(rel[2])
1065 localmetadata.update(rel[2])
1087
1066
1088 if not prec.mutable():
1067 if not prec.mutable():
1089 raise error.Abort(_("cannot obsolete public changeset: %s")
1068 raise error.Abort(_("cannot obsolete public changeset: %s")
1090 % prec,
1069 % prec,
1091 hint="see 'hg help phases' for details")
1070 hint="see 'hg help phases' for details")
1092 nprec = prec.node()
1071 nprec = prec.node()
1093 nsucs = tuple(s.node() for s in sucs)
1072 nsucs = tuple(s.node() for s in sucs)
1094 npare = None
1073 npare = None
1095 if not nsucs:
1074 if not nsucs:
1096 npare = tuple(p.node() for p in prec.parents())
1075 npare = tuple(p.node() for p in prec.parents())
1097 if nprec in nsucs:
1076 if nprec in nsucs:
1098 raise error.Abort(_("changeset %s cannot obsolete itself")
1077 raise error.Abort(_("changeset %s cannot obsolete itself")
1099 % prec)
1078 % prec)
1100
1079
1101 # Creating the marker causes the hidden cache to become invalid,
1080 # Creating the marker causes the hidden cache to become invalid,
1102 # which causes recomputation when we ask for prec.parents() above.
1081 # which causes recomputation when we ask for prec.parents() above.
1103 # Resulting in n^2 behavior. So let's prepare all of the args
1082 # Resulting in n^2 behavior. So let's prepare all of the args
1104 # first, then create the markers.
1083 # first, then create the markers.
1105 markerargs.append((nprec, nsucs, npare, localmetadata))
1084 markerargs.append((nprec, nsucs, npare, localmetadata))
1106
1085
1107 for args in markerargs:
1086 for args in markerargs:
1108 nprec, nsucs, npare, localmetadata = args
1087 nprec, nsucs, npare, localmetadata = args
1109 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1088 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1110 date=date, metadata=localmetadata,
1089 date=date, metadata=localmetadata,
1111 ui=repo.ui)
1090 ui=repo.ui)
1112 repo.filteredrevcache.clear()
1091 repo.filteredrevcache.clear()
1113 tr.close()
1092 tr.close()
1114 finally:
1093 finally:
1115 tr.release()
1094 tr.release()
@@ -1,409 +1,435
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 def closestpredecessors(repo, nodeid):
10 def closestpredecessors(repo, nodeid):
11 """yield the list of next predecessors pointing on visible changectx nodes
11 """yield the list of next predecessors pointing on visible changectx nodes
12
12
13 This function respect the repoview filtering, filtered revision will be
13 This function respect the repoview filtering, filtered revision will be
14 considered missing.
14 considered missing.
15 """
15 """
16
16
17 precursors = repo.obsstore.precursors
17 precursors = repo.obsstore.precursors
18 stack = [nodeid]
18 stack = [nodeid]
19 seen = set(stack)
19 seen = set(stack)
20
20
21 while stack:
21 while stack:
22 current = stack.pop()
22 current = stack.pop()
23 currentpreccs = precursors.get(current, ())
23 currentpreccs = precursors.get(current, ())
24
24
25 for prec in currentpreccs:
25 for prec in currentpreccs:
26 precnodeid = prec[0]
26 precnodeid = prec[0]
27
27
28 # Basic cycle protection
28 # Basic cycle protection
29 if precnodeid in seen:
29 if precnodeid in seen:
30 continue
30 continue
31 seen.add(precnodeid)
31 seen.add(precnodeid)
32
32
33 if precnodeid in repo:
33 if precnodeid in repo:
34 yield precnodeid
34 yield precnodeid
35 else:
35 else:
36 stack.append(precnodeid)
36 stack.append(precnodeid)
37
37
38 def allprecursors(obsstore, nodes, ignoreflags=0):
38 def allprecursors(obsstore, nodes, ignoreflags=0):
39 """Yield node for every precursors of <nodes>.
39 """Yield node for every precursors of <nodes>.
40
40
41 Some precursors may be unknown locally.
41 Some precursors may be unknown locally.
42
42
43 This is a linear yield unsuited to detecting folded changesets. It includes
43 This is a linear yield unsuited to detecting folded changesets. It includes
44 initial nodes too."""
44 initial nodes too."""
45
45
46 remaining = set(nodes)
46 remaining = set(nodes)
47 seen = set(remaining)
47 seen = set(remaining)
48 while remaining:
48 while remaining:
49 current = remaining.pop()
49 current = remaining.pop()
50 yield current
50 yield current
51 for mark in obsstore.precursors.get(current, ()):
51 for mark in obsstore.precursors.get(current, ()):
52 # ignore marker flagged with specified flag
52 # ignore marker flagged with specified flag
53 if mark[2] & ignoreflags:
53 if mark[2] & ignoreflags:
54 continue
54 continue
55 suc = mark[0]
55 suc = mark[0]
56 if suc not in seen:
56 if suc not in seen:
57 seen.add(suc)
57 seen.add(suc)
58 remaining.add(suc)
58 remaining.add(suc)
59
59
60 def allsuccessors(obsstore, nodes, ignoreflags=0):
60 def allsuccessors(obsstore, nodes, ignoreflags=0):
61 """Yield node for every successor of <nodes>.
61 """Yield node for every successor of <nodes>.
62
62
63 Some successors may be unknown locally.
63 Some successors may be unknown locally.
64
64
65 This is a linear yield unsuited to detecting split changesets. It includes
65 This is a linear yield unsuited to detecting split changesets. It includes
66 initial nodes too."""
66 initial nodes too."""
67 remaining = set(nodes)
67 remaining = set(nodes)
68 seen = set(remaining)
68 seen = set(remaining)
69 while remaining:
69 while remaining:
70 current = remaining.pop()
70 current = remaining.pop()
71 yield current
71 yield current
72 for mark in obsstore.successors.get(current, ()):
72 for mark in obsstore.successors.get(current, ()):
73 # ignore marker flagged with specified flag
73 # ignore marker flagged with specified flag
74 if mark[2] & ignoreflags:
74 if mark[2] & ignoreflags:
75 continue
75 continue
76 for suc in mark[1]:
76 for suc in mark[1]:
77 if suc not in seen:
77 if suc not in seen:
78 seen.add(suc)
78 seen.add(suc)
79 remaining.add(suc)
79 remaining.add(suc)
80
80
81 def _filterprunes(markers):
81 def _filterprunes(markers):
82 """return a set with no prune markers"""
82 """return a set with no prune markers"""
83 return set(m for m in markers if m[1])
83 return set(m for m in markers if m[1])
84
84
85 def exclusivemarkers(repo, nodes):
85 def exclusivemarkers(repo, nodes):
86 """set of markers relevant to "nodes" but no other locally-known nodes
86 """set of markers relevant to "nodes" but no other locally-known nodes
87
87
88 This function compute the set of markers "exclusive" to a locally-known
88 This function compute the set of markers "exclusive" to a locally-known
89 node. This means we walk the markers starting from <nodes> until we reach a
89 node. This means we walk the markers starting from <nodes> until we reach a
90 locally-known precursors outside of <nodes>. Element of <nodes> with
90 locally-known precursors outside of <nodes>. Element of <nodes> with
91 locally-known successors outside of <nodes> are ignored (since their
91 locally-known successors outside of <nodes> are ignored (since their
92 precursors markers are also relevant to these successors).
92 precursors markers are also relevant to these successors).
93
93
94 For example:
94 For example:
95
95
96 # (A0 rewritten as A1)
96 # (A0 rewritten as A1)
97 #
97 #
98 # A0 <-1- A1 # Marker "1" is exclusive to A1
98 # A0 <-1- A1 # Marker "1" is exclusive to A1
99
99
100 or
100 or
101
101
102 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
102 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
103 #
103 #
104 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
104 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
105
105
106 or
106 or
107
107
108 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
108 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
109 #
109 #
110 # <-2- A1 # Marker "2" is exclusive to A0,A1
110 # <-2- A1 # Marker "2" is exclusive to A0,A1
111 # /
111 # /
112 # <-1- A0
112 # <-1- A0
113 # \
113 # \
114 # <-3- A2 # Marker "3" is exclusive to A0,A2
114 # <-3- A2 # Marker "3" is exclusive to A0,A2
115 #
115 #
116 # in addition:
116 # in addition:
117 #
117 #
118 # Markers "2,3" are exclusive to A1,A2
118 # Markers "2,3" are exclusive to A1,A2
119 # Markers "1,2,3" are exclusive to A0,A1,A2
119 # Markers "1,2,3" are exclusive to A0,A1,A2
120
120
121 See test/test-obsolete-bundle-strip.t for more examples.
121 See test/test-obsolete-bundle-strip.t for more examples.
122
122
123 An example usage is strip. When stripping a changeset, we also want to
123 An example usage is strip. When stripping a changeset, we also want to
124 strip the markers exclusive to this changeset. Otherwise we would have
124 strip the markers exclusive to this changeset. Otherwise we would have
125 "dangling"" obsolescence markers from its precursors: Obsolescence markers
125 "dangling"" obsolescence markers from its precursors: Obsolescence markers
126 marking a node as obsolete without any successors available locally.
126 marking a node as obsolete without any successors available locally.
127
127
128 As for relevant markers, the prune markers for children will be followed.
128 As for relevant markers, the prune markers for children will be followed.
129 Of course, they will only be followed if the pruned children is
129 Of course, they will only be followed if the pruned children is
130 locally-known. Since the prune markers are relevant to the pruned node.
130 locally-known. Since the prune markers are relevant to the pruned node.
131 However, while prune markers are considered relevant to the parent of the
131 However, while prune markers are considered relevant to the parent of the
132 pruned changesets, prune markers for locally-known changeset (with no
132 pruned changesets, prune markers for locally-known changeset (with no
133 successors) are considered exclusive to the pruned nodes. This allows
133 successors) are considered exclusive to the pruned nodes. This allows
134 to strip the prune markers (with the rest of the exclusive chain) alongside
134 to strip the prune markers (with the rest of the exclusive chain) alongside
135 the pruned changesets.
135 the pruned changesets.
136 """
136 """
137 # running on a filtered repository would be dangerous as markers could be
137 # running on a filtered repository would be dangerous as markers could be
138 # reported as exclusive when they are relevant for other filtered nodes.
138 # reported as exclusive when they are relevant for other filtered nodes.
139 unfi = repo.unfiltered()
139 unfi = repo.unfiltered()
140
140
141 # shortcut to various useful item
141 # shortcut to various useful item
142 nm = unfi.changelog.nodemap
142 nm = unfi.changelog.nodemap
143 precursorsmarkers = unfi.obsstore.precursors
143 precursorsmarkers = unfi.obsstore.precursors
144 successormarkers = unfi.obsstore.successors
144 successormarkers = unfi.obsstore.successors
145 childrenmarkers = unfi.obsstore.children
145 childrenmarkers = unfi.obsstore.children
146
146
147 # exclusive markers (return of the function)
147 # exclusive markers (return of the function)
148 exclmarkers = set()
148 exclmarkers = set()
149 # we need fast membership testing
149 # we need fast membership testing
150 nodes = set(nodes)
150 nodes = set(nodes)
151 # looking for head in the obshistory
151 # looking for head in the obshistory
152 #
152 #
153 # XXX we are ignoring all issues in regard with cycle for now.
153 # XXX we are ignoring all issues in regard with cycle for now.
154 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
154 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
155 stack.sort()
155 stack.sort()
156 # nodes already stacked
156 # nodes already stacked
157 seennodes = set(stack)
157 seennodes = set(stack)
158 while stack:
158 while stack:
159 current = stack.pop()
159 current = stack.pop()
160 # fetch precursors markers
160 # fetch precursors markers
161 markers = list(precursorsmarkers.get(current, ()))
161 markers = list(precursorsmarkers.get(current, ()))
162 # extend the list with prune markers
162 # extend the list with prune markers
163 for mark in successormarkers.get(current, ()):
163 for mark in successormarkers.get(current, ()):
164 if not mark[1]:
164 if not mark[1]:
165 markers.append(mark)
165 markers.append(mark)
166 # and markers from children (looking for prune)
166 # and markers from children (looking for prune)
167 for mark in childrenmarkers.get(current, ()):
167 for mark in childrenmarkers.get(current, ()):
168 if not mark[1]:
168 if not mark[1]:
169 markers.append(mark)
169 markers.append(mark)
170 # traverse the markers
170 # traverse the markers
171 for mark in markers:
171 for mark in markers:
172 if mark in exclmarkers:
172 if mark in exclmarkers:
173 # markers already selected
173 # markers already selected
174 continue
174 continue
175
175
176 # If the markers is about the current node, select it
176 # If the markers is about the current node, select it
177 #
177 #
178 # (this delay the addition of markers from children)
178 # (this delay the addition of markers from children)
179 if mark[1] or mark[0] == current:
179 if mark[1] or mark[0] == current:
180 exclmarkers.add(mark)
180 exclmarkers.add(mark)
181
181
182 # should we keep traversing through the precursors?
182 # should we keep traversing through the precursors?
183 prec = mark[0]
183 prec = mark[0]
184
184
185 # nodes in the stack or already processed
185 # nodes in the stack or already processed
186 if prec in seennodes:
186 if prec in seennodes:
187 continue
187 continue
188
188
189 # is this a locally known node ?
189 # is this a locally known node ?
190 known = prec in nm
190 known = prec in nm
191 # if locally-known and not in the <nodes> set the traversal
191 # if locally-known and not in the <nodes> set the traversal
192 # stop here.
192 # stop here.
193 if known and prec not in nodes:
193 if known and prec not in nodes:
194 continue
194 continue
195
195
196 # do not keep going if there are unselected markers pointing to this
196 # do not keep going if there are unselected markers pointing to this
197 # nodes. If we end up traversing these unselected markers later the
197 # nodes. If we end up traversing these unselected markers later the
198 # node will be taken care of at that point.
198 # node will be taken care of at that point.
199 precmarkers = _filterprunes(successormarkers.get(prec))
199 precmarkers = _filterprunes(successormarkers.get(prec))
200 if precmarkers.issubset(exclmarkers):
200 if precmarkers.issubset(exclmarkers):
201 seennodes.add(prec)
201 seennodes.add(prec)
202 stack.append(prec)
202 stack.append(prec)
203
203
204 return exclmarkers
204 return exclmarkers
205
205
206 def foreground(repo, nodes):
207 """return all nodes in the "foreground" of other node
208
209 The foreground of a revision is anything reachable using parent -> children
210 or precursor -> successor relation. It is very similar to "descendant" but
211 augmented with obsolescence information.
212
213 Beware that possible obsolescence cycle may result if complex situation.
214 """
215 repo = repo.unfiltered()
216 foreground = set(repo.set('%ln::', nodes))
217 if repo.obsstore:
218 # We only need this complicated logic if there is obsolescence
219 # XXX will probably deserve an optimised revset.
220 nm = repo.changelog.nodemap
221 plen = -1
222 # compute the whole set of successors or descendants
223 while len(foreground) != plen:
224 plen = len(foreground)
225 succs = set(c.node() for c in foreground)
226 mutable = [c.node() for c in foreground if c.mutable()]
227 succs.update(allsuccessors(repo.obsstore, mutable))
228 known = (n for n in succs if n in nm)
229 foreground = set(repo.set('%ln::', known))
230 return set(c.node() for c in foreground)
231
206 def successorssets(repo, initialnode, cache=None):
232 def successorssets(repo, initialnode, cache=None):
207 """Return set of all latest successors of initial nodes
233 """Return set of all latest successors of initial nodes
208
234
209 The successors set of a changeset A are the group of revisions that succeed
235 The successors set of a changeset A are the group of revisions that succeed
210 A. It succeeds A as a consistent whole, each revision being only a partial
236 A. It succeeds A as a consistent whole, each revision being only a partial
211 replacement. The successors set contains non-obsolete changesets only.
237 replacement. The successors set contains non-obsolete changesets only.
212
238
213 This function returns the full list of successor sets which is why it
239 This function returns the full list of successor sets which is why it
214 returns a list of tuples and not just a single tuple. Each tuple is a valid
240 returns a list of tuples and not just a single tuple. Each tuple is a valid
215 successors set. Note that (A,) may be a valid successors set for changeset A
241 successors set. Note that (A,) may be a valid successors set for changeset A
216 (see below).
242 (see below).
217
243
218 In most cases, a changeset A will have a single element (e.g. the changeset
244 In most cases, a changeset A will have a single element (e.g. the changeset
219 A is replaced by A') in its successors set. Though, it is also common for a
245 A is replaced by A') in its successors set. Though, it is also common for a
220 changeset A to have no elements in its successor set (e.g. the changeset
246 changeset A to have no elements in its successor set (e.g. the changeset
221 has been pruned). Therefore, the returned list of successors sets will be
247 has been pruned). Therefore, the returned list of successors sets will be
222 [(A',)] or [], respectively.
248 [(A',)] or [], respectively.
223
249
224 When a changeset A is split into A' and B', however, it will result in a
250 When a changeset A is split into A' and B', however, it will result in a
225 successors set containing more than a single element, i.e. [(A',B')].
251 successors set containing more than a single element, i.e. [(A',B')].
226 Divergent changesets will result in multiple successors sets, i.e. [(A',),
252 Divergent changesets will result in multiple successors sets, i.e. [(A',),
227 (A'')].
253 (A'')].
228
254
229 If a changeset A is not obsolete, then it will conceptually have no
255 If a changeset A is not obsolete, then it will conceptually have no
230 successors set. To distinguish this from a pruned changeset, the successor
256 successors set. To distinguish this from a pruned changeset, the successor
231 set will contain itself only, i.e. [(A,)].
257 set will contain itself only, i.e. [(A,)].
232
258
233 Finally, successors unknown locally are considered to be pruned (obsoleted
259 Finally, successors unknown locally are considered to be pruned (obsoleted
234 without any successors).
260 without any successors).
235
261
236 The optional `cache` parameter is a dictionary that may contain precomputed
262 The optional `cache` parameter is a dictionary that may contain precomputed
237 successors sets. It is meant to reuse the computation of a previous call to
263 successors sets. It is meant to reuse the computation of a previous call to
238 `successorssets` when multiple calls are made at the same time. The cache
264 `successorssets` when multiple calls are made at the same time. The cache
239 dictionary is updated in place. The caller is responsible for its life
265 dictionary is updated in place. The caller is responsible for its life
240 span. Code that makes multiple calls to `successorssets` *must* use this
266 span. Code that makes multiple calls to `successorssets` *must* use this
241 cache mechanism or suffer terrible performance.
267 cache mechanism or suffer terrible performance.
242 """
268 """
243
269
244 succmarkers = repo.obsstore.successors
270 succmarkers = repo.obsstore.successors
245
271
246 # Stack of nodes we search successors sets for
272 # Stack of nodes we search successors sets for
247 toproceed = [initialnode]
273 toproceed = [initialnode]
248 # set version of above list for fast loop detection
274 # set version of above list for fast loop detection
249 # element added to "toproceed" must be added here
275 # element added to "toproceed" must be added here
250 stackedset = set(toproceed)
276 stackedset = set(toproceed)
251 if cache is None:
277 if cache is None:
252 cache = {}
278 cache = {}
253
279
254 # This while loop is the flattened version of a recursive search for
280 # This while loop is the flattened version of a recursive search for
255 # successors sets
281 # successors sets
256 #
282 #
257 # def successorssets(x):
283 # def successorssets(x):
258 # successors = directsuccessors(x)
284 # successors = directsuccessors(x)
259 # ss = [[]]
285 # ss = [[]]
260 # for succ in directsuccessors(x):
286 # for succ in directsuccessors(x):
261 # # product as in itertools cartesian product
287 # # product as in itertools cartesian product
262 # ss = product(ss, successorssets(succ))
288 # ss = product(ss, successorssets(succ))
263 # return ss
289 # return ss
264 #
290 #
265 # But we can not use plain recursive calls here:
291 # But we can not use plain recursive calls here:
266 # - that would blow the python call stack
292 # - that would blow the python call stack
267 # - obsolescence markers may have cycles, we need to handle them.
293 # - obsolescence markers may have cycles, we need to handle them.
268 #
294 #
269 # The `toproceed` list act as our call stack. Every node we search
295 # The `toproceed` list act as our call stack. Every node we search
270 # successors set for are stacked there.
296 # successors set for are stacked there.
271 #
297 #
272 # The `stackedset` is set version of this stack used to check if a node is
298 # The `stackedset` is set version of this stack used to check if a node is
273 # already stacked. This check is used to detect cycles and prevent infinite
299 # already stacked. This check is used to detect cycles and prevent infinite
274 # loop.
300 # loop.
275 #
301 #
276 # successors set of all nodes are stored in the `cache` dictionary.
302 # successors set of all nodes are stored in the `cache` dictionary.
277 #
303 #
278 # After this while loop ends we use the cache to return the successors sets
304 # After this while loop ends we use the cache to return the successors sets
279 # for the node requested by the caller.
305 # for the node requested by the caller.
280 while toproceed:
306 while toproceed:
281 # Every iteration tries to compute the successors sets of the topmost
307 # Every iteration tries to compute the successors sets of the topmost
282 # node of the stack: CURRENT.
308 # node of the stack: CURRENT.
283 #
309 #
284 # There are four possible outcomes:
310 # There are four possible outcomes:
285 #
311 #
286 # 1) We already know the successors sets of CURRENT:
312 # 1) We already know the successors sets of CURRENT:
287 # -> mission accomplished, pop it from the stack.
313 # -> mission accomplished, pop it from the stack.
288 # 2) Node is not obsolete:
314 # 2) Node is not obsolete:
289 # -> the node is its own successors sets. Add it to the cache.
315 # -> the node is its own successors sets. Add it to the cache.
290 # 3) We do not know successors set of direct successors of CURRENT:
316 # 3) We do not know successors set of direct successors of CURRENT:
291 # -> We add those successors to the stack.
317 # -> We add those successors to the stack.
292 # 4) We know successors sets of all direct successors of CURRENT:
318 # 4) We know successors sets of all direct successors of CURRENT:
293 # -> We can compute CURRENT successors set and add it to the
319 # -> We can compute CURRENT successors set and add it to the
294 # cache.
320 # cache.
295 #
321 #
296 current = toproceed[-1]
322 current = toproceed[-1]
297 if current in cache:
323 if current in cache:
298 # case (1): We already know the successors sets
324 # case (1): We already know the successors sets
299 stackedset.remove(toproceed.pop())
325 stackedset.remove(toproceed.pop())
300 elif current not in succmarkers:
326 elif current not in succmarkers:
301 # case (2): The node is not obsolete.
327 # case (2): The node is not obsolete.
302 if current in repo:
328 if current in repo:
303 # We have a valid last successors.
329 # We have a valid last successors.
304 cache[current] = [(current,)]
330 cache[current] = [(current,)]
305 else:
331 else:
306 # Final obsolete version is unknown locally.
332 # Final obsolete version is unknown locally.
307 # Do not count that as a valid successors
333 # Do not count that as a valid successors
308 cache[current] = []
334 cache[current] = []
309 else:
335 else:
310 # cases (3) and (4)
336 # cases (3) and (4)
311 #
337 #
312 # We proceed in two phases. Phase 1 aims to distinguish case (3)
338 # We proceed in two phases. Phase 1 aims to distinguish case (3)
313 # from case (4):
339 # from case (4):
314 #
340 #
315 # For each direct successors of CURRENT, we check whether its
341 # For each direct successors of CURRENT, we check whether its
316 # successors sets are known. If they are not, we stack the
342 # successors sets are known. If they are not, we stack the
317 # unknown node and proceed to the next iteration of the while
343 # unknown node and proceed to the next iteration of the while
318 # loop. (case 3)
344 # loop. (case 3)
319 #
345 #
320 # During this step, we may detect obsolescence cycles: a node
346 # During this step, we may detect obsolescence cycles: a node
321 # with unknown successors sets but already in the call stack.
347 # with unknown successors sets but already in the call stack.
322 # In such a situation, we arbitrary set the successors sets of
348 # In such a situation, we arbitrary set the successors sets of
323 # the node to nothing (node pruned) to break the cycle.
349 # the node to nothing (node pruned) to break the cycle.
324 #
350 #
325 # If no break was encountered we proceed to phase 2.
351 # If no break was encountered we proceed to phase 2.
326 #
352 #
327 # Phase 2 computes successors sets of CURRENT (case 4); see details
353 # Phase 2 computes successors sets of CURRENT (case 4); see details
328 # in phase 2 itself.
354 # in phase 2 itself.
329 #
355 #
330 # Note the two levels of iteration in each phase.
356 # Note the two levels of iteration in each phase.
331 # - The first one handles obsolescence markers using CURRENT as
357 # - The first one handles obsolescence markers using CURRENT as
332 # precursor (successors markers of CURRENT).
358 # precursor (successors markers of CURRENT).
333 #
359 #
334 # Having multiple entry here means divergence.
360 # Having multiple entry here means divergence.
335 #
361 #
336 # - The second one handles successors defined in each marker.
362 # - The second one handles successors defined in each marker.
337 #
363 #
338 # Having none means pruned node, multiple successors means split,
364 # Having none means pruned node, multiple successors means split,
339 # single successors are standard replacement.
365 # single successors are standard replacement.
340 #
366 #
341 for mark in sorted(succmarkers[current]):
367 for mark in sorted(succmarkers[current]):
342 for suc in mark[1]:
368 for suc in mark[1]:
343 if suc not in cache:
369 if suc not in cache:
344 if suc in stackedset:
370 if suc in stackedset:
345 # cycle breaking
371 # cycle breaking
346 cache[suc] = []
372 cache[suc] = []
347 else:
373 else:
348 # case (3) If we have not computed successors sets
374 # case (3) If we have not computed successors sets
349 # of one of those successors we add it to the
375 # of one of those successors we add it to the
350 # `toproceed` stack and stop all work for this
376 # `toproceed` stack and stop all work for this
351 # iteration.
377 # iteration.
352 toproceed.append(suc)
378 toproceed.append(suc)
353 stackedset.add(suc)
379 stackedset.add(suc)
354 break
380 break
355 else:
381 else:
356 continue
382 continue
357 break
383 break
358 else:
384 else:
359 # case (4): we know all successors sets of all direct
385 # case (4): we know all successors sets of all direct
360 # successors
386 # successors
361 #
387 #
362 # Successors set contributed by each marker depends on the
388 # Successors set contributed by each marker depends on the
363 # successors sets of all its "successors" node.
389 # successors sets of all its "successors" node.
364 #
390 #
365 # Each different marker is a divergence in the obsolescence
391 # Each different marker is a divergence in the obsolescence
366 # history. It contributes successors sets distinct from other
392 # history. It contributes successors sets distinct from other
367 # markers.
393 # markers.
368 #
394 #
369 # Within a marker, a successor may have divergent successors
395 # Within a marker, a successor may have divergent successors
370 # sets. In such a case, the marker will contribute multiple
396 # sets. In such a case, the marker will contribute multiple
371 # divergent successors sets. If multiple successors have
397 # divergent successors sets. If multiple successors have
372 # divergent successors sets, a Cartesian product is used.
398 # divergent successors sets, a Cartesian product is used.
373 #
399 #
374 # At the end we post-process successors sets to remove
400 # At the end we post-process successors sets to remove
375 # duplicated entry and successors set that are strict subset of
401 # duplicated entry and successors set that are strict subset of
376 # another one.
402 # another one.
377 succssets = []
403 succssets = []
378 for mark in sorted(succmarkers[current]):
404 for mark in sorted(succmarkers[current]):
379 # successors sets contributed by this marker
405 # successors sets contributed by this marker
380 markss = [[]]
406 markss = [[]]
381 for suc in mark[1]:
407 for suc in mark[1]:
382 # cardinal product with previous successors
408 # cardinal product with previous successors
383 productresult = []
409 productresult = []
384 for prefix in markss:
410 for prefix in markss:
385 for suffix in cache[suc]:
411 for suffix in cache[suc]:
386 newss = list(prefix)
412 newss = list(prefix)
387 for part in suffix:
413 for part in suffix:
388 # do not duplicated entry in successors set
414 # do not duplicated entry in successors set
389 # first entry wins.
415 # first entry wins.
390 if part not in newss:
416 if part not in newss:
391 newss.append(part)
417 newss.append(part)
392 productresult.append(newss)
418 productresult.append(newss)
393 markss = productresult
419 markss = productresult
394 succssets.extend(markss)
420 succssets.extend(markss)
395 # remove duplicated and subset
421 # remove duplicated and subset
396 seen = []
422 seen = []
397 final = []
423 final = []
398 candidate = sorted(((set(s), s) for s in succssets if s),
424 candidate = sorted(((set(s), s) for s in succssets if s),
399 key=lambda x: len(x[1]), reverse=True)
425 key=lambda x: len(x[1]), reverse=True)
400 for setversion, listversion in candidate:
426 for setversion, listversion in candidate:
401 for seenset in seen:
427 for seenset in seen:
402 if setversion.issubset(seenset):
428 if setversion.issubset(seenset):
403 break
429 break
404 else:
430 else:
405 final.append(listversion)
431 final.append(listversion)
406 seen.append(setversion)
432 seen.append(setversion)
407 final.reverse() # put small successors set first
433 final.reverse() # put small successors set first
408 cache[current] = final
434 cache[current] = final
409 return cache[initialnode]
435 return cache[initialnode]
General Comments 0
You need to be logged in to leave comments. Login now