##// END OF EJS Templates
localrepo: extract bookmarkheads method to bookmarks.py...
Augie Fackler -
r32381:b9942bc6 default
parent child Browse files
Show More
@@ -1,595 +1,617 b''
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 bin,
14 bin,
15 hex,
15 hex,
16 )
16 )
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 lock as lockmod,
20 lock as lockmod,
21 obsolete,
21 obsolete,
22 txnutil,
22 txnutil,
23 util,
23 util,
24 )
24 )
25
25
26 def _getbkfile(repo):
26 def _getbkfile(repo):
27 """Hook so that extensions that mess with the store can hook bm storage.
27 """Hook so that extensions that mess with the store can hook bm storage.
28
28
29 For core, this just handles wether we should see pending
29 For core, this just handles wether we should see pending
30 bookmarks or the committed ones. Other extensions (like share)
30 bookmarks or the committed ones. Other extensions (like share)
31 may need to tweak this behavior further.
31 may need to tweak this behavior further.
32 """
32 """
33 fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks')
33 fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks')
34 return fp
34 return fp
35
35
36 class bmstore(dict):
36 class bmstore(dict):
37 """Storage for bookmarks.
37 """Storage for bookmarks.
38
38
39 This object should do all bookmark-related reads and writes, so
39 This object should do all bookmark-related reads and writes, so
40 that it's fairly simple to replace the storage underlying
40 that it's fairly simple to replace the storage underlying
41 bookmarks without having to clone the logic surrounding
41 bookmarks without having to clone the logic surrounding
42 bookmarks. This type also should manage the active bookmark, if
42 bookmarks. This type also should manage the active bookmark, if
43 any.
43 any.
44
44
45 This particular bmstore implementation stores bookmarks as
45 This particular bmstore implementation stores bookmarks as
46 {hash}\s{name}\n (the same format as localtags) in
46 {hash}\s{name}\n (the same format as localtags) in
47 .hg/bookmarks. The mapping is stored as {name: nodeid}.
47 .hg/bookmarks. The mapping is stored as {name: nodeid}.
48 """
48 """
49
49
50 def __init__(self, repo):
50 def __init__(self, repo):
51 dict.__init__(self)
51 dict.__init__(self)
52 self._repo = repo
52 self._repo = repo
53 try:
53 try:
54 bkfile = _getbkfile(repo)
54 bkfile = _getbkfile(repo)
55 for line in bkfile:
55 for line in bkfile:
56 line = line.strip()
56 line = line.strip()
57 if not line:
57 if not line:
58 continue
58 continue
59 if ' ' not in line:
59 if ' ' not in line:
60 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
60 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
61 % line)
61 % line)
62 continue
62 continue
63 sha, refspec = line.split(' ', 1)
63 sha, refspec = line.split(' ', 1)
64 refspec = encoding.tolocal(refspec)
64 refspec = encoding.tolocal(refspec)
65 try:
65 try:
66 self[refspec] = repo.changelog.lookup(sha)
66 self[refspec] = repo.changelog.lookup(sha)
67 except LookupError:
67 except LookupError:
68 pass
68 pass
69 except IOError as inst:
69 except IOError as inst:
70 if inst.errno != errno.ENOENT:
70 if inst.errno != errno.ENOENT:
71 raise
71 raise
72 self._clean = True
72 self._clean = True
73 self._active = _readactive(repo, self)
73 self._active = _readactive(repo, self)
74 self._aclean = True
74 self._aclean = True
75
75
76 @property
76 @property
77 def active(self):
77 def active(self):
78 return self._active
78 return self._active
79
79
80 @active.setter
80 @active.setter
81 def active(self, mark):
81 def active(self, mark):
82 if mark is not None and mark not in self:
82 if mark is not None and mark not in self:
83 raise AssertionError('bookmark %s does not exist!' % mark)
83 raise AssertionError('bookmark %s does not exist!' % mark)
84
84
85 self._active = mark
85 self._active = mark
86 self._aclean = False
86 self._aclean = False
87
87
88 def __setitem__(self, *args, **kwargs):
88 def __setitem__(self, *args, **kwargs):
89 self._clean = False
89 self._clean = False
90 return dict.__setitem__(self, *args, **kwargs)
90 return dict.__setitem__(self, *args, **kwargs)
91
91
92 def __delitem__(self, key):
92 def __delitem__(self, key):
93 self._clean = False
93 self._clean = False
94 return dict.__delitem__(self, key)
94 return dict.__delitem__(self, key)
95
95
96 def recordchange(self, tr):
96 def recordchange(self, tr):
97 """record that bookmarks have been changed in a transaction
97 """record that bookmarks have been changed in a transaction
98
98
99 The transaction is then responsible for updating the file content."""
99 The transaction is then responsible for updating the file content."""
100 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
100 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
101 location='plain')
101 location='plain')
102 tr.hookargs['bookmark_moved'] = '1'
102 tr.hookargs['bookmark_moved'] = '1'
103
103
104 def _writerepo(self, repo):
104 def _writerepo(self, repo):
105 """Factored out for extensibility"""
105 """Factored out for extensibility"""
106 rbm = repo._bookmarks
106 rbm = repo._bookmarks
107 if rbm.active not in self:
107 if rbm.active not in self:
108 rbm.active = None
108 rbm.active = None
109 rbm._writeactive()
109 rbm._writeactive()
110
110
111 with repo.wlock():
111 with repo.wlock():
112 file_ = repo.vfs('bookmarks', 'w', atomictemp=True,
112 file_ = repo.vfs('bookmarks', 'w', atomictemp=True,
113 checkambig=True)
113 checkambig=True)
114 try:
114 try:
115 self._write(file_)
115 self._write(file_)
116 except: # re-raises
116 except: # re-raises
117 file_.discard()
117 file_.discard()
118 raise
118 raise
119 finally:
119 finally:
120 file_.close()
120 file_.close()
121
121
122 def _writeactive(self):
122 def _writeactive(self):
123 if self._aclean:
123 if self._aclean:
124 return
124 return
125 with self._repo.wlock():
125 with self._repo.wlock():
126 if self._active is not None:
126 if self._active is not None:
127 f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
127 f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
128 checkambig=True)
128 checkambig=True)
129 try:
129 try:
130 f.write(encoding.fromlocal(self._active))
130 f.write(encoding.fromlocal(self._active))
131 finally:
131 finally:
132 f.close()
132 f.close()
133 else:
133 else:
134 self._repo.vfs.tryunlink('bookmarks.current')
134 self._repo.vfs.tryunlink('bookmarks.current')
135 self._aclean = True
135 self._aclean = True
136
136
137 def _write(self, fp):
137 def _write(self, fp):
138 for name, node in self.iteritems():
138 for name, node in self.iteritems():
139 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
139 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
140 self._clean = True
140 self._clean = True
141 self._repo.invalidatevolatilesets()
141 self._repo.invalidatevolatilesets()
142
142
143 def expandname(self, bname):
143 def expandname(self, bname):
144 if bname == '.':
144 if bname == '.':
145 if self.active:
145 if self.active:
146 return self.active
146 return self.active
147 else:
147 else:
148 raise error.Abort(_("no active bookmark"))
148 raise error.Abort(_("no active bookmark"))
149 return bname
149 return bname
150
150
151 def _readactive(repo, marks):
151 def _readactive(repo, marks):
152 """
152 """
153 Get the active bookmark. We can have an active bookmark that updates
153 Get the active bookmark. We can have an active bookmark that updates
154 itself as we commit. This function returns the name of that bookmark.
154 itself as we commit. This function returns the name of that bookmark.
155 It is stored in .hg/bookmarks.current
155 It is stored in .hg/bookmarks.current
156 """
156 """
157 mark = None
157 mark = None
158 try:
158 try:
159 file = repo.vfs('bookmarks.current')
159 file = repo.vfs('bookmarks.current')
160 except IOError as inst:
160 except IOError as inst:
161 if inst.errno != errno.ENOENT:
161 if inst.errno != errno.ENOENT:
162 raise
162 raise
163 return None
163 return None
164 try:
164 try:
165 # No readline() in osutil.posixfile, reading everything is
165 # No readline() in osutil.posixfile, reading everything is
166 # cheap.
166 # cheap.
167 # Note that it's possible for readlines() here to raise
167 # Note that it's possible for readlines() here to raise
168 # IOError, since we might be reading the active mark over
168 # IOError, since we might be reading the active mark over
169 # static-http which only tries to load the file when we try
169 # static-http which only tries to load the file when we try
170 # to read from it.
170 # to read from it.
171 mark = encoding.tolocal((file.readlines() or [''])[0])
171 mark = encoding.tolocal((file.readlines() or [''])[0])
172 if mark == '' or mark not in marks:
172 if mark == '' or mark not in marks:
173 mark = None
173 mark = None
174 except IOError as inst:
174 except IOError as inst:
175 if inst.errno != errno.ENOENT:
175 if inst.errno != errno.ENOENT:
176 raise
176 raise
177 return None
177 return None
178 finally:
178 finally:
179 file.close()
179 file.close()
180 return mark
180 return mark
181
181
182 def activate(repo, mark):
182 def activate(repo, mark):
183 """
183 """
184 Set the given bookmark to be 'active', meaning that this bookmark will
184 Set the given bookmark to be 'active', meaning that this bookmark will
185 follow new commits that are made.
185 follow new commits that are made.
186 The name is recorded in .hg/bookmarks.current
186 The name is recorded in .hg/bookmarks.current
187 """
187 """
188 repo._bookmarks.active = mark
188 repo._bookmarks.active = mark
189 repo._bookmarks._writeactive()
189 repo._bookmarks._writeactive()
190
190
191 def deactivate(repo):
191 def deactivate(repo):
192 """
192 """
193 Unset the active bookmark in this repository.
193 Unset the active bookmark in this repository.
194 """
194 """
195 repo._bookmarks.active = None
195 repo._bookmarks.active = None
196 repo._bookmarks._writeactive()
196 repo._bookmarks._writeactive()
197
197
198 def isactivewdirparent(repo):
198 def isactivewdirparent(repo):
199 """
199 """
200 Tell whether the 'active' bookmark (the one that follows new commits)
200 Tell whether the 'active' bookmark (the one that follows new commits)
201 points to one of the parents of the current working directory (wdir).
201 points to one of the parents of the current working directory (wdir).
202
202
203 While this is normally the case, it can on occasion be false; for example,
203 While this is normally the case, it can on occasion be false; for example,
204 immediately after a pull, the active bookmark can be moved to point
204 immediately after a pull, the active bookmark can be moved to point
205 to a place different than the wdir. This is solved by running `hg update`.
205 to a place different than the wdir. This is solved by running `hg update`.
206 """
206 """
207 mark = repo._activebookmark
207 mark = repo._activebookmark
208 marks = repo._bookmarks
208 marks = repo._bookmarks
209 parents = [p.node() for p in repo[None].parents()]
209 parents = [p.node() for p in repo[None].parents()]
210 return (mark in marks and marks[mark] in parents)
210 return (mark in marks and marks[mark] in parents)
211
211
212 def deletedivergent(repo, deletefrom, bm):
212 def deletedivergent(repo, deletefrom, bm):
213 '''Delete divergent versions of bm on nodes in deletefrom.
213 '''Delete divergent versions of bm on nodes in deletefrom.
214
214
215 Return True if at least one bookmark was deleted, False otherwise.'''
215 Return True if at least one bookmark was deleted, False otherwise.'''
216 deleted = False
216 deleted = False
217 marks = repo._bookmarks
217 marks = repo._bookmarks
218 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
218 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
219 for mark in divergent:
219 for mark in divergent:
220 if mark == '@' or '@' not in mark:
220 if mark == '@' or '@' not in mark:
221 # can't be divergent by definition
221 # can't be divergent by definition
222 continue
222 continue
223 if mark and marks[mark] in deletefrom:
223 if mark and marks[mark] in deletefrom:
224 if mark != bm:
224 if mark != bm:
225 del marks[mark]
225 del marks[mark]
226 deleted = True
226 deleted = True
227 return deleted
227 return deleted
228
228
229 def headsforactive(repo):
230 """Given a repo with an active bookmark, return divergent bookmark nodes.
231
232 Args:
233 repo: A repository with an active bookmark.
234
235 Returns:
236 A list of binary node ids that is the full list of other
237 revisions with bookmarks divergent from the active bookmark. If
238 there were no divergent bookmarks, then this list will contain
239 only one entry.
240 """
241 if not repo._activebookmark:
242 raise ValueError(
243 'headsforactive() only makes sense with an active bookmark')
244 name = repo._activebookmark.split('@', 1)[0]
245 heads = []
246 for mark, n in repo._bookmarks.iteritems():
247 if mark.split('@', 1)[0] == name:
248 heads.append(n)
249 return heads
250
229 def calculateupdate(ui, repo, checkout):
251 def calculateupdate(ui, repo, checkout):
230 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
252 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
231 check out and where to move the active bookmark from, if needed.'''
253 check out and where to move the active bookmark from, if needed.'''
232 movemarkfrom = None
254 movemarkfrom = None
233 if checkout is None:
255 if checkout is None:
234 activemark = repo._activebookmark
256 activemark = repo._activebookmark
235 if isactivewdirparent(repo):
257 if isactivewdirparent(repo):
236 movemarkfrom = repo['.'].node()
258 movemarkfrom = repo['.'].node()
237 elif activemark:
259 elif activemark:
238 ui.status(_("updating to active bookmark %s\n") % activemark)
260 ui.status(_("updating to active bookmark %s\n") % activemark)
239 checkout = activemark
261 checkout = activemark
240 return (checkout, movemarkfrom)
262 return (checkout, movemarkfrom)
241
263
242 def update(repo, parents, node):
264 def update(repo, parents, node):
243 deletefrom = parents
265 deletefrom = parents
244 marks = repo._bookmarks
266 marks = repo._bookmarks
245 update = False
267 update = False
246 active = marks.active
268 active = marks.active
247 if not active:
269 if not active:
248 return False
270 return False
249
271
250 if marks[active] in parents:
272 if marks[active] in parents:
251 new = repo[node]
273 new = repo[node]
252 divs = [repo[b] for b in marks
274 divs = [repo[b] for b in marks
253 if b.split('@', 1)[0] == active.split('@', 1)[0]]
275 if b.split('@', 1)[0] == active.split('@', 1)[0]]
254 anc = repo.changelog.ancestors([new.rev()])
276 anc = repo.changelog.ancestors([new.rev()])
255 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
277 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
256 if validdest(repo, repo[marks[active]], new):
278 if validdest(repo, repo[marks[active]], new):
257 marks[active] = new.node()
279 marks[active] = new.node()
258 update = True
280 update = True
259
281
260 if deletedivergent(repo, deletefrom, active):
282 if deletedivergent(repo, deletefrom, active):
261 update = True
283 update = True
262
284
263 if update:
285 if update:
264 lock = tr = None
286 lock = tr = None
265 try:
287 try:
266 lock = repo.lock()
288 lock = repo.lock()
267 tr = repo.transaction('bookmark')
289 tr = repo.transaction('bookmark')
268 marks.recordchange(tr)
290 marks.recordchange(tr)
269 tr.close()
291 tr.close()
270 finally:
292 finally:
271 lockmod.release(tr, lock)
293 lockmod.release(tr, lock)
272 return update
294 return update
273
295
274 def listbinbookmarks(repo):
296 def listbinbookmarks(repo):
275 # We may try to list bookmarks on a repo type that does not
297 # We may try to list bookmarks on a repo type that does not
276 # support it (e.g., statichttprepository).
298 # support it (e.g., statichttprepository).
277 marks = getattr(repo, '_bookmarks', {})
299 marks = getattr(repo, '_bookmarks', {})
278
300
279 hasnode = repo.changelog.hasnode
301 hasnode = repo.changelog.hasnode
280 for k, v in marks.iteritems():
302 for k, v in marks.iteritems():
281 # don't expose local divergent bookmarks
303 # don't expose local divergent bookmarks
282 if hasnode(v) and ('@' not in k or k.endswith('@')):
304 if hasnode(v) and ('@' not in k or k.endswith('@')):
283 yield k, v
305 yield k, v
284
306
285 def listbookmarks(repo):
307 def listbookmarks(repo):
286 d = {}
308 d = {}
287 for book, node in listbinbookmarks(repo):
309 for book, node in listbinbookmarks(repo):
288 d[book] = hex(node)
310 d[book] = hex(node)
289 return d
311 return d
290
312
291 def pushbookmark(repo, key, old, new):
313 def pushbookmark(repo, key, old, new):
292 w = l = tr = None
314 w = l = tr = None
293 try:
315 try:
294 w = repo.wlock()
316 w = repo.wlock()
295 l = repo.lock()
317 l = repo.lock()
296 tr = repo.transaction('bookmarks')
318 tr = repo.transaction('bookmarks')
297 marks = repo._bookmarks
319 marks = repo._bookmarks
298 existing = hex(marks.get(key, ''))
320 existing = hex(marks.get(key, ''))
299 if existing != old and existing != new:
321 if existing != old and existing != new:
300 return False
322 return False
301 if new == '':
323 if new == '':
302 del marks[key]
324 del marks[key]
303 else:
325 else:
304 if new not in repo:
326 if new not in repo:
305 return False
327 return False
306 marks[key] = repo[new].node()
328 marks[key] = repo[new].node()
307 marks.recordchange(tr)
329 marks.recordchange(tr)
308 tr.close()
330 tr.close()
309 return True
331 return True
310 finally:
332 finally:
311 lockmod.release(tr, l, w)
333 lockmod.release(tr, l, w)
312
334
313 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
335 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
314 '''Compare bookmarks between srcmarks and dstmarks
336 '''Compare bookmarks between srcmarks and dstmarks
315
337
316 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
338 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
317 differ, invalid)", each are list of bookmarks below:
339 differ, invalid)", each are list of bookmarks below:
318
340
319 :addsrc: added on src side (removed on dst side, perhaps)
341 :addsrc: added on src side (removed on dst side, perhaps)
320 :adddst: added on dst side (removed on src side, perhaps)
342 :adddst: added on dst side (removed on src side, perhaps)
321 :advsrc: advanced on src side
343 :advsrc: advanced on src side
322 :advdst: advanced on dst side
344 :advdst: advanced on dst side
323 :diverge: diverge
345 :diverge: diverge
324 :differ: changed, but changeset referred on src is unknown on dst
346 :differ: changed, but changeset referred on src is unknown on dst
325 :invalid: unknown on both side
347 :invalid: unknown on both side
326 :same: same on both side
348 :same: same on both side
327
349
328 Each elements of lists in result tuple is tuple "(bookmark name,
350 Each elements of lists in result tuple is tuple "(bookmark name,
329 changeset ID on source side, changeset ID on destination
351 changeset ID on source side, changeset ID on destination
330 side)". Each changeset IDs are 40 hexadecimal digit string or
352 side)". Each changeset IDs are 40 hexadecimal digit string or
331 None.
353 None.
332
354
333 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
355 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
334 "invalid" list may be unknown for repo.
356 "invalid" list may be unknown for repo.
335
357
336 If "targets" is specified, only bookmarks listed in it are
358 If "targets" is specified, only bookmarks listed in it are
337 examined.
359 examined.
338 '''
360 '''
339
361
340 if targets:
362 if targets:
341 bset = set(targets)
363 bset = set(targets)
342 else:
364 else:
343 srcmarkset = set(srcmarks)
365 srcmarkset = set(srcmarks)
344 dstmarkset = set(dstmarks)
366 dstmarkset = set(dstmarks)
345 bset = srcmarkset | dstmarkset
367 bset = srcmarkset | dstmarkset
346
368
347 results = ([], [], [], [], [], [], [], [])
369 results = ([], [], [], [], [], [], [], [])
348 addsrc = results[0].append
370 addsrc = results[0].append
349 adddst = results[1].append
371 adddst = results[1].append
350 advsrc = results[2].append
372 advsrc = results[2].append
351 advdst = results[3].append
373 advdst = results[3].append
352 diverge = results[4].append
374 diverge = results[4].append
353 differ = results[5].append
375 differ = results[5].append
354 invalid = results[6].append
376 invalid = results[6].append
355 same = results[7].append
377 same = results[7].append
356
378
357 for b in sorted(bset):
379 for b in sorted(bset):
358 if b not in srcmarks:
380 if b not in srcmarks:
359 if b in dstmarks:
381 if b in dstmarks:
360 adddst((b, None, dstmarks[b]))
382 adddst((b, None, dstmarks[b]))
361 else:
383 else:
362 invalid((b, None, None))
384 invalid((b, None, None))
363 elif b not in dstmarks:
385 elif b not in dstmarks:
364 addsrc((b, srcmarks[b], None))
386 addsrc((b, srcmarks[b], None))
365 else:
387 else:
366 scid = srcmarks[b]
388 scid = srcmarks[b]
367 dcid = dstmarks[b]
389 dcid = dstmarks[b]
368 if scid == dcid:
390 if scid == dcid:
369 same((b, scid, dcid))
391 same((b, scid, dcid))
370 elif scid in repo and dcid in repo:
392 elif scid in repo and dcid in repo:
371 sctx = repo[scid]
393 sctx = repo[scid]
372 dctx = repo[dcid]
394 dctx = repo[dcid]
373 if sctx.rev() < dctx.rev():
395 if sctx.rev() < dctx.rev():
374 if validdest(repo, sctx, dctx):
396 if validdest(repo, sctx, dctx):
375 advdst((b, scid, dcid))
397 advdst((b, scid, dcid))
376 else:
398 else:
377 diverge((b, scid, dcid))
399 diverge((b, scid, dcid))
378 else:
400 else:
379 if validdest(repo, dctx, sctx):
401 if validdest(repo, dctx, sctx):
380 advsrc((b, scid, dcid))
402 advsrc((b, scid, dcid))
381 else:
403 else:
382 diverge((b, scid, dcid))
404 diverge((b, scid, dcid))
383 else:
405 else:
384 # it is too expensive to examine in detail, in this case
406 # it is too expensive to examine in detail, in this case
385 differ((b, scid, dcid))
407 differ((b, scid, dcid))
386
408
387 return results
409 return results
388
410
389 def _diverge(ui, b, path, localmarks, remotenode):
411 def _diverge(ui, b, path, localmarks, remotenode):
390 '''Return appropriate diverged bookmark for specified ``path``
412 '''Return appropriate diverged bookmark for specified ``path``
391
413
392 This returns None, if it is failed to assign any divergent
414 This returns None, if it is failed to assign any divergent
393 bookmark name.
415 bookmark name.
394
416
395 This reuses already existing one with "@number" suffix, if it
417 This reuses already existing one with "@number" suffix, if it
396 refers ``remotenode``.
418 refers ``remotenode``.
397 '''
419 '''
398 if b == '@':
420 if b == '@':
399 b = ''
421 b = ''
400 # try to use an @pathalias suffix
422 # try to use an @pathalias suffix
401 # if an @pathalias already exists, we overwrite (update) it
423 # if an @pathalias already exists, we overwrite (update) it
402 if path.startswith("file:"):
424 if path.startswith("file:"):
403 path = util.url(path).path
425 path = util.url(path).path
404 for p, u in ui.configitems("paths"):
426 for p, u in ui.configitems("paths"):
405 if u.startswith("file:"):
427 if u.startswith("file:"):
406 u = util.url(u).path
428 u = util.url(u).path
407 if path == u:
429 if path == u:
408 return '%s@%s' % (b, p)
430 return '%s@%s' % (b, p)
409
431
410 # assign a unique "@number" suffix newly
432 # assign a unique "@number" suffix newly
411 for x in range(1, 100):
433 for x in range(1, 100):
412 n = '%s@%d' % (b, x)
434 n = '%s@%d' % (b, x)
413 if n not in localmarks or localmarks[n] == remotenode:
435 if n not in localmarks or localmarks[n] == remotenode:
414 return n
436 return n
415
437
416 return None
438 return None
417
439
418 def unhexlifybookmarks(marks):
440 def unhexlifybookmarks(marks):
419 binremotemarks = {}
441 binremotemarks = {}
420 for name, node in marks.items():
442 for name, node in marks.items():
421 binremotemarks[name] = bin(node)
443 binremotemarks[name] = bin(node)
422 return binremotemarks
444 return binremotemarks
423
445
424 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
446 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
425 ui.debug("checking for updated bookmarks\n")
447 ui.debug("checking for updated bookmarks\n")
426 localmarks = repo._bookmarks
448 localmarks = repo._bookmarks
427 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
449 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
428 ) = comparebookmarks(repo, remotemarks, localmarks)
450 ) = comparebookmarks(repo, remotemarks, localmarks)
429
451
430 status = ui.status
452 status = ui.status
431 warn = ui.warn
453 warn = ui.warn
432 if ui.configbool('ui', 'quietbookmarkmove', False):
454 if ui.configbool('ui', 'quietbookmarkmove', False):
433 status = warn = ui.debug
455 status = warn = ui.debug
434
456
435 explicit = set(explicit)
457 explicit = set(explicit)
436 changed = []
458 changed = []
437 for b, scid, dcid in addsrc:
459 for b, scid, dcid in addsrc:
438 if scid in repo: # add remote bookmarks for changes we already have
460 if scid in repo: # add remote bookmarks for changes we already have
439 changed.append((b, scid, status,
461 changed.append((b, scid, status,
440 _("adding remote bookmark %s\n") % (b)))
462 _("adding remote bookmark %s\n") % (b)))
441 elif b in explicit:
463 elif b in explicit:
442 explicit.remove(b)
464 explicit.remove(b)
443 ui.warn(_("remote bookmark %s points to locally missing %s\n")
465 ui.warn(_("remote bookmark %s points to locally missing %s\n")
444 % (b, hex(scid)[:12]))
466 % (b, hex(scid)[:12]))
445
467
446 for b, scid, dcid in advsrc:
468 for b, scid, dcid in advsrc:
447 changed.append((b, scid, status,
469 changed.append((b, scid, status,
448 _("updating bookmark %s\n") % (b)))
470 _("updating bookmark %s\n") % (b)))
449 # remove normal movement from explicit set
471 # remove normal movement from explicit set
450 explicit.difference_update(d[0] for d in changed)
472 explicit.difference_update(d[0] for d in changed)
451
473
452 for b, scid, dcid in diverge:
474 for b, scid, dcid in diverge:
453 if b in explicit:
475 if b in explicit:
454 explicit.discard(b)
476 explicit.discard(b)
455 changed.append((b, scid, status,
477 changed.append((b, scid, status,
456 _("importing bookmark %s\n") % (b)))
478 _("importing bookmark %s\n") % (b)))
457 else:
479 else:
458 db = _diverge(ui, b, path, localmarks, scid)
480 db = _diverge(ui, b, path, localmarks, scid)
459 if db:
481 if db:
460 changed.append((db, scid, warn,
482 changed.append((db, scid, warn,
461 _("divergent bookmark %s stored as %s\n") %
483 _("divergent bookmark %s stored as %s\n") %
462 (b, db)))
484 (b, db)))
463 else:
485 else:
464 warn(_("warning: failed to assign numbered name "
486 warn(_("warning: failed to assign numbered name "
465 "to divergent bookmark %s\n") % (b))
487 "to divergent bookmark %s\n") % (b))
466 for b, scid, dcid in adddst + advdst:
488 for b, scid, dcid in adddst + advdst:
467 if b in explicit:
489 if b in explicit:
468 explicit.discard(b)
490 explicit.discard(b)
469 changed.append((b, scid, status,
491 changed.append((b, scid, status,
470 _("importing bookmark %s\n") % (b)))
492 _("importing bookmark %s\n") % (b)))
471 for b, scid, dcid in differ:
493 for b, scid, dcid in differ:
472 if b in explicit:
494 if b in explicit:
473 explicit.remove(b)
495 explicit.remove(b)
474 ui.warn(_("remote bookmark %s points to locally missing %s\n")
496 ui.warn(_("remote bookmark %s points to locally missing %s\n")
475 % (b, hex(scid)[:12]))
497 % (b, hex(scid)[:12]))
476
498
477 if changed:
499 if changed:
478 tr = trfunc()
500 tr = trfunc()
479 for b, node, writer, msg in sorted(changed):
501 for b, node, writer, msg in sorted(changed):
480 localmarks[b] = node
502 localmarks[b] = node
481 writer(msg)
503 writer(msg)
482 localmarks.recordchange(tr)
504 localmarks.recordchange(tr)
483
505
484 def incoming(ui, repo, other):
506 def incoming(ui, repo, other):
485 '''Show bookmarks incoming from other to repo
507 '''Show bookmarks incoming from other to repo
486 '''
508 '''
487 ui.status(_("searching for changed bookmarks\n"))
509 ui.status(_("searching for changed bookmarks\n"))
488
510
489 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
511 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
490 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
512 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
491 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
513 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
492
514
493 incomings = []
515 incomings = []
494 if ui.debugflag:
516 if ui.debugflag:
495 getid = lambda id: id
517 getid = lambda id: id
496 else:
518 else:
497 getid = lambda id: id[:12]
519 getid = lambda id: id[:12]
498 if ui.verbose:
520 if ui.verbose:
499 def add(b, id, st):
521 def add(b, id, st):
500 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
522 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
501 else:
523 else:
502 def add(b, id, st):
524 def add(b, id, st):
503 incomings.append(" %-25s %s\n" % (b, getid(id)))
525 incomings.append(" %-25s %s\n" % (b, getid(id)))
504 for b, scid, dcid in addsrc:
526 for b, scid, dcid in addsrc:
505 # i18n: "added" refers to a bookmark
527 # i18n: "added" refers to a bookmark
506 add(b, hex(scid), _('added'))
528 add(b, hex(scid), _('added'))
507 for b, scid, dcid in advsrc:
529 for b, scid, dcid in advsrc:
508 # i18n: "advanced" refers to a bookmark
530 # i18n: "advanced" refers to a bookmark
509 add(b, hex(scid), _('advanced'))
531 add(b, hex(scid), _('advanced'))
510 for b, scid, dcid in diverge:
532 for b, scid, dcid in diverge:
511 # i18n: "diverged" refers to a bookmark
533 # i18n: "diverged" refers to a bookmark
512 add(b, hex(scid), _('diverged'))
534 add(b, hex(scid), _('diverged'))
513 for b, scid, dcid in differ:
535 for b, scid, dcid in differ:
514 # i18n: "changed" refers to a bookmark
536 # i18n: "changed" refers to a bookmark
515 add(b, hex(scid), _('changed'))
537 add(b, hex(scid), _('changed'))
516
538
517 if not incomings:
539 if not incomings:
518 ui.status(_("no changed bookmarks found\n"))
540 ui.status(_("no changed bookmarks found\n"))
519 return 1
541 return 1
520
542
521 for s in sorted(incomings):
543 for s in sorted(incomings):
522 ui.write(s)
544 ui.write(s)
523
545
524 return 0
546 return 0
525
547
526 def outgoing(ui, repo, other):
548 def outgoing(ui, repo, other):
527 '''Show bookmarks outgoing from repo to other
549 '''Show bookmarks outgoing from repo to other
528 '''
550 '''
529 ui.status(_("searching for changed bookmarks\n"))
551 ui.status(_("searching for changed bookmarks\n"))
530
552
531 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
553 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
532 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
554 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
533 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
555 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
534
556
535 outgoings = []
557 outgoings = []
536 if ui.debugflag:
558 if ui.debugflag:
537 getid = lambda id: id
559 getid = lambda id: id
538 else:
560 else:
539 getid = lambda id: id[:12]
561 getid = lambda id: id[:12]
540 if ui.verbose:
562 if ui.verbose:
541 def add(b, id, st):
563 def add(b, id, st):
542 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
564 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
543 else:
565 else:
544 def add(b, id, st):
566 def add(b, id, st):
545 outgoings.append(" %-25s %s\n" % (b, getid(id)))
567 outgoings.append(" %-25s %s\n" % (b, getid(id)))
546 for b, scid, dcid in addsrc:
568 for b, scid, dcid in addsrc:
547 # i18n: "added refers to a bookmark
569 # i18n: "added refers to a bookmark
548 add(b, hex(scid), _('added'))
570 add(b, hex(scid), _('added'))
549 for b, scid, dcid in adddst:
571 for b, scid, dcid in adddst:
550 # i18n: "deleted" refers to a bookmark
572 # i18n: "deleted" refers to a bookmark
551 add(b, ' ' * 40, _('deleted'))
573 add(b, ' ' * 40, _('deleted'))
552 for b, scid, dcid in advsrc:
574 for b, scid, dcid in advsrc:
553 # i18n: "advanced" refers to a bookmark
575 # i18n: "advanced" refers to a bookmark
554 add(b, hex(scid), _('advanced'))
576 add(b, hex(scid), _('advanced'))
555 for b, scid, dcid in diverge:
577 for b, scid, dcid in diverge:
556 # i18n: "diverged" refers to a bookmark
578 # i18n: "diverged" refers to a bookmark
557 add(b, hex(scid), _('diverged'))
579 add(b, hex(scid), _('diverged'))
558 for b, scid, dcid in differ:
580 for b, scid, dcid in differ:
559 # i18n: "changed" refers to a bookmark
581 # i18n: "changed" refers to a bookmark
560 add(b, hex(scid), _('changed'))
582 add(b, hex(scid), _('changed'))
561
583
562 if not outgoings:
584 if not outgoings:
563 ui.status(_("no changed bookmarks found\n"))
585 ui.status(_("no changed bookmarks found\n"))
564 return 1
586 return 1
565
587
566 for s in sorted(outgoings):
588 for s in sorted(outgoings):
567 ui.write(s)
589 ui.write(s)
568
590
569 return 0
591 return 0
570
592
571 def summary(repo, other):
593 def summary(repo, other):
572 '''Compare bookmarks between repo and other for "hg summary" output
594 '''Compare bookmarks between repo and other for "hg summary" output
573
595
574 This returns "(# of incoming, # of outgoing)" tuple.
596 This returns "(# of incoming, # of outgoing)" tuple.
575 '''
597 '''
576 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
598 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
577 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
599 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
578 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
600 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
579 return (len(addsrc), len(adddst))
601 return (len(addsrc), len(adddst))
580
602
581 def validdest(repo, old, new):
603 def validdest(repo, old, new):
582 """Is the new bookmark destination a valid update from the old one"""
604 """Is the new bookmark destination a valid update from the old one"""
583 repo = repo.unfiltered()
605 repo = repo.unfiltered()
584 if old == new:
606 if old == new:
585 # Old == new -> nothing to update.
607 # Old == new -> nothing to update.
586 return False
608 return False
587 elif not old:
609 elif not old:
588 # old is nullrev, anything is valid.
610 # old is nullrev, anything is valid.
589 # (new != nullrev has been excluded by the previous check)
611 # (new != nullrev has been excluded by the previous check)
590 return True
612 return True
591 elif repo.obsstore:
613 elif repo.obsstore:
592 return new.node() in obsolete.foreground(repo, [old.node()])
614 return new.node() in obsolete.foreground(repo, [old.node()])
593 else:
615 else:
594 # still an independent clause as it is lazier (and therefore faster)
616 # still an independent clause as it is lazier (and therefore faster)
595 return old.descendant(new)
617 return old.descendant(new)
@@ -1,405 +1,405 b''
1 # destutil.py - Mercurial utility function for command destination
1 # destutil.py - Mercurial utility function for command destination
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com> and other
3 # Copyright Matt Mackall <mpm@selenic.com> and other
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 bookmarks,
12 bookmarks,
13 error,
13 error,
14 obsolete,
14 obsolete,
15 scmutil,
15 scmutil,
16 )
16 )
17
17
18 def _destupdateobs(repo, clean):
18 def _destupdateobs(repo, clean):
19 """decide of an update destination from obsolescence markers"""
19 """decide of an update destination from obsolescence markers"""
20 node = None
20 node = None
21 wc = repo[None]
21 wc = repo[None]
22 p1 = wc.p1()
22 p1 = wc.p1()
23 movemark = None
23 movemark = None
24
24
25 if p1.obsolete() and not p1.children():
25 if p1.obsolete() and not p1.children():
26 # allow updating to successors
26 # allow updating to successors
27 successors = obsolete.successorssets(repo, p1.node())
27 successors = obsolete.successorssets(repo, p1.node())
28
28
29 # behavior of certain cases is as follows,
29 # behavior of certain cases is as follows,
30 #
30 #
31 # divergent changesets: update to highest rev, similar to what
31 # divergent changesets: update to highest rev, similar to what
32 # is currently done when there are more than one head
32 # is currently done when there are more than one head
33 # (i.e. 'tip')
33 # (i.e. 'tip')
34 #
34 #
35 # replaced changesets: same as divergent except we know there
35 # replaced changesets: same as divergent except we know there
36 # is no conflict
36 # is no conflict
37 #
37 #
38 # pruned changeset: no update is done; though, we could
38 # pruned changeset: no update is done; though, we could
39 # consider updating to the first non-obsolete parent,
39 # consider updating to the first non-obsolete parent,
40 # similar to what is current done for 'hg prune'
40 # similar to what is current done for 'hg prune'
41
41
42 if successors:
42 if successors:
43 # flatten the list here handles both divergent (len > 1)
43 # flatten the list here handles both divergent (len > 1)
44 # and the usual case (len = 1)
44 # and the usual case (len = 1)
45 successors = [n for sub in successors for n in sub]
45 successors = [n for sub in successors for n in sub]
46
46
47 # get the max revision for the given successors set,
47 # get the max revision for the given successors set,
48 # i.e. the 'tip' of a set
48 # i.e. the 'tip' of a set
49 node = repo.revs('max(%ln)', successors).first()
49 node = repo.revs('max(%ln)', successors).first()
50 if bookmarks.isactivewdirparent(repo):
50 if bookmarks.isactivewdirparent(repo):
51 movemark = repo['.'].node()
51 movemark = repo['.'].node()
52 return node, movemark, None
52 return node, movemark, None
53
53
54 def _destupdatebook(repo, clean):
54 def _destupdatebook(repo, clean):
55 """decide on an update destination from active bookmark"""
55 """decide on an update destination from active bookmark"""
56 # we also move the active bookmark, if any
56 # we also move the active bookmark, if any
57 activemark = None
57 activemark = None
58 node, movemark = bookmarks.calculateupdate(repo.ui, repo, None)
58 node, movemark = bookmarks.calculateupdate(repo.ui, repo, None)
59 if node is not None:
59 if node is not None:
60 activemark = node
60 activemark = node
61 return node, movemark, activemark
61 return node, movemark, activemark
62
62
63 def _destupdatebranch(repo, clean):
63 def _destupdatebranch(repo, clean):
64 """decide on an update destination from current branch
64 """decide on an update destination from current branch
65
65
66 This ignores closed branch heads.
66 This ignores closed branch heads.
67 """
67 """
68 wc = repo[None]
68 wc = repo[None]
69 movemark = node = None
69 movemark = node = None
70 currentbranch = wc.branch()
70 currentbranch = wc.branch()
71
71
72 if clean:
72 if clean:
73 currentbranch = repo['.'].branch()
73 currentbranch = repo['.'].branch()
74
74
75 if currentbranch in repo.branchmap():
75 if currentbranch in repo.branchmap():
76 heads = repo.branchheads(currentbranch)
76 heads = repo.branchheads(currentbranch)
77 if heads:
77 if heads:
78 node = repo.revs('max(.::(%ln))', heads).first()
78 node = repo.revs('max(.::(%ln))', heads).first()
79 if bookmarks.isactivewdirparent(repo):
79 if bookmarks.isactivewdirparent(repo):
80 movemark = repo['.'].node()
80 movemark = repo['.'].node()
81 elif currentbranch == 'default' and not wc.p1():
81 elif currentbranch == 'default' and not wc.p1():
82 # "null" parent belongs to "default" branch, but it doesn't exist, so
82 # "null" parent belongs to "default" branch, but it doesn't exist, so
83 # update to the tipmost non-closed branch head
83 # update to the tipmost non-closed branch head
84 node = repo.revs('max(head() and not closed())').first()
84 node = repo.revs('max(head() and not closed())').first()
85 else:
85 else:
86 node = repo['.'].node()
86 node = repo['.'].node()
87 return node, movemark, None
87 return node, movemark, None
88
88
89 def _destupdatebranchfallback(repo, clean):
89 def _destupdatebranchfallback(repo, clean):
90 """decide on an update destination from closed heads in current branch"""
90 """decide on an update destination from closed heads in current branch"""
91 wc = repo[None]
91 wc = repo[None]
92 currentbranch = wc.branch()
92 currentbranch = wc.branch()
93 movemark = None
93 movemark = None
94 if currentbranch in repo.branchmap():
94 if currentbranch in repo.branchmap():
95 # here, all descendant branch heads are closed
95 # here, all descendant branch heads are closed
96 heads = repo.branchheads(currentbranch, closed=True)
96 heads = repo.branchheads(currentbranch, closed=True)
97 assert heads, "any branch has at least one head"
97 assert heads, "any branch has at least one head"
98 node = repo.revs('max(.::(%ln))', heads).first()
98 node = repo.revs('max(.::(%ln))', heads).first()
99 assert node is not None, ("any revision has at least "
99 assert node is not None, ("any revision has at least "
100 "one descendant branch head")
100 "one descendant branch head")
101 if bookmarks.isactivewdirparent(repo):
101 if bookmarks.isactivewdirparent(repo):
102 movemark = repo['.'].node()
102 movemark = repo['.'].node()
103 else:
103 else:
104 # here, no "default" branch, and all branches are closed
104 # here, no "default" branch, and all branches are closed
105 node = repo.lookup('tip')
105 node = repo.lookup('tip')
106 assert node is not None, "'tip' exists even in empty repository"
106 assert node is not None, "'tip' exists even in empty repository"
107 return node, movemark, None
107 return node, movemark, None
108
108
109 # order in which each step should be evaluated
109 # order in which each step should be evaluated
110 # steps are run until one finds a destination
110 # steps are run until one finds a destination
111 destupdatesteps = ['evolution', 'bookmark', 'branch', 'branchfallback']
111 destupdatesteps = ['evolution', 'bookmark', 'branch', 'branchfallback']
112 # mapping to ease extension overriding steps.
112 # mapping to ease extension overriding steps.
113 destupdatestepmap = {'evolution': _destupdateobs,
113 destupdatestepmap = {'evolution': _destupdateobs,
114 'bookmark': _destupdatebook,
114 'bookmark': _destupdatebook,
115 'branch': _destupdatebranch,
115 'branch': _destupdatebranch,
116 'branchfallback': _destupdatebranchfallback,
116 'branchfallback': _destupdatebranchfallback,
117 }
117 }
118
118
119 def destupdate(repo, clean=False):
119 def destupdate(repo, clean=False):
120 """destination for bare update operation
120 """destination for bare update operation
121
121
122 return (rev, movemark, activemark)
122 return (rev, movemark, activemark)
123
123
124 - rev: the revision to update to,
124 - rev: the revision to update to,
125 - movemark: node to move the active bookmark from
125 - movemark: node to move the active bookmark from
126 (cf bookmark.calculate update),
126 (cf bookmark.calculate update),
127 - activemark: a bookmark to activate at the end of the update.
127 - activemark: a bookmark to activate at the end of the update.
128 """
128 """
129 node = movemark = activemark = None
129 node = movemark = activemark = None
130
130
131 for step in destupdatesteps:
131 for step in destupdatesteps:
132 node, movemark, activemark = destupdatestepmap[step](repo, clean)
132 node, movemark, activemark = destupdatestepmap[step](repo, clean)
133 if node is not None:
133 if node is not None:
134 break
134 break
135 rev = repo[node].rev()
135 rev = repo[node].rev()
136
136
137 return rev, movemark, activemark
137 return rev, movemark, activemark
138
138
139 msgdestmerge = {
139 msgdestmerge = {
140 # too many matching divergent bookmark
140 # too many matching divergent bookmark
141 'toomanybookmarks':
141 'toomanybookmarks':
142 {'merge':
142 {'merge':
143 (_("multiple matching bookmarks to merge -"
143 (_("multiple matching bookmarks to merge -"
144 " please merge with an explicit rev or bookmark"),
144 " please merge with an explicit rev or bookmark"),
145 _("run 'hg heads' to see all heads")),
145 _("run 'hg heads' to see all heads")),
146 'rebase':
146 'rebase':
147 (_("multiple matching bookmarks to rebase -"
147 (_("multiple matching bookmarks to rebase -"
148 " please rebase to an explicit rev or bookmark"),
148 " please rebase to an explicit rev or bookmark"),
149 _("run 'hg heads' to see all heads")),
149 _("run 'hg heads' to see all heads")),
150 },
150 },
151 # no other matching divergent bookmark
151 # no other matching divergent bookmark
152 'nootherbookmarks':
152 'nootherbookmarks':
153 {'merge':
153 {'merge':
154 (_("no matching bookmark to merge - "
154 (_("no matching bookmark to merge - "
155 "please merge with an explicit rev or bookmark"),
155 "please merge with an explicit rev or bookmark"),
156 _("run 'hg heads' to see all heads")),
156 _("run 'hg heads' to see all heads")),
157 'rebase':
157 'rebase':
158 (_("no matching bookmark to rebase - "
158 (_("no matching bookmark to rebase - "
159 "please rebase to an explicit rev or bookmark"),
159 "please rebase to an explicit rev or bookmark"),
160 _("run 'hg heads' to see all heads")),
160 _("run 'hg heads' to see all heads")),
161 },
161 },
162 # branch have too many unbookmarked heads, no obvious destination
162 # branch have too many unbookmarked heads, no obvious destination
163 'toomanyheads':
163 'toomanyheads':
164 {'merge':
164 {'merge':
165 (_("branch '%s' has %d heads - please merge with an explicit rev"),
165 (_("branch '%s' has %d heads - please merge with an explicit rev"),
166 _("run 'hg heads .' to see heads")),
166 _("run 'hg heads .' to see heads")),
167 'rebase':
167 'rebase':
168 (_("branch '%s' has %d heads - please rebase to an explicit rev"),
168 (_("branch '%s' has %d heads - please rebase to an explicit rev"),
169 _("run 'hg heads .' to see heads")),
169 _("run 'hg heads .' to see heads")),
170 },
170 },
171 # branch have no other unbookmarked heads
171 # branch have no other unbookmarked heads
172 'bookmarkedheads':
172 'bookmarkedheads':
173 {'merge':
173 {'merge':
174 (_("heads are bookmarked - please merge with an explicit rev"),
174 (_("heads are bookmarked - please merge with an explicit rev"),
175 _("run 'hg heads' to see all heads")),
175 _("run 'hg heads' to see all heads")),
176 'rebase':
176 'rebase':
177 (_("heads are bookmarked - please rebase to an explicit rev"),
177 (_("heads are bookmarked - please rebase to an explicit rev"),
178 _("run 'hg heads' to see all heads")),
178 _("run 'hg heads' to see all heads")),
179 },
179 },
180 # branch have just a single heads, but there is other branches
180 # branch have just a single heads, but there is other branches
181 'nootherbranchheads':
181 'nootherbranchheads':
182 {'merge':
182 {'merge':
183 (_("branch '%s' has one head - please merge with an explicit rev"),
183 (_("branch '%s' has one head - please merge with an explicit rev"),
184 _("run 'hg heads' to see all heads")),
184 _("run 'hg heads' to see all heads")),
185 'rebase':
185 'rebase':
186 (_("branch '%s' has one head - please rebase to an explicit rev"),
186 (_("branch '%s' has one head - please rebase to an explicit rev"),
187 _("run 'hg heads' to see all heads")),
187 _("run 'hg heads' to see all heads")),
188 },
188 },
189 # repository have a single head
189 # repository have a single head
190 'nootherheads':
190 'nootherheads':
191 {'merge':
191 {'merge':
192 (_('nothing to merge'),
192 (_('nothing to merge'),
193 None),
193 None),
194 'rebase':
194 'rebase':
195 (_('nothing to rebase'),
195 (_('nothing to rebase'),
196 None),
196 None),
197 },
197 },
198 # repository have a single head and we are not on it
198 # repository have a single head and we are not on it
199 'nootherheadsbehind':
199 'nootherheadsbehind':
200 {'merge':
200 {'merge':
201 (_('nothing to merge'),
201 (_('nothing to merge'),
202 _("use 'hg update' instead")),
202 _("use 'hg update' instead")),
203 'rebase':
203 'rebase':
204 (_('nothing to rebase'),
204 (_('nothing to rebase'),
205 _("use 'hg update' instead")),
205 _("use 'hg update' instead")),
206 },
206 },
207 # We are not on a head
207 # We are not on a head
208 'notatheads':
208 'notatheads':
209 {'merge':
209 {'merge':
210 (_('working directory not at a head revision'),
210 (_('working directory not at a head revision'),
211 _("use 'hg update' or merge with an explicit revision")),
211 _("use 'hg update' or merge with an explicit revision")),
212 'rebase':
212 'rebase':
213 (_('working directory not at a head revision'),
213 (_('working directory not at a head revision'),
214 _("use 'hg update' or rebase to an explicit revision"))
214 _("use 'hg update' or rebase to an explicit revision"))
215 },
215 },
216 'emptysourceset':
216 'emptysourceset':
217 {'merge':
217 {'merge':
218 (_('source set is empty'),
218 (_('source set is empty'),
219 None),
219 None),
220 'rebase':
220 'rebase':
221 (_('source set is empty'),
221 (_('source set is empty'),
222 None),
222 None),
223 },
223 },
224 'multiplebranchessourceset':
224 'multiplebranchessourceset':
225 {'merge':
225 {'merge':
226 (_('source set is rooted in multiple branches'),
226 (_('source set is rooted in multiple branches'),
227 None),
227 None),
228 'rebase':
228 'rebase':
229 (_('rebaseset is rooted in multiple named branches'),
229 (_('rebaseset is rooted in multiple named branches'),
230 _('specify an explicit destination with --dest')),
230 _('specify an explicit destination with --dest')),
231 },
231 },
232 }
232 }
233
233
234 def _destmergebook(repo, action='merge', sourceset=None, destspace=None):
234 def _destmergebook(repo, action='merge', sourceset=None, destspace=None):
235 """find merge destination in the active bookmark case"""
235 """find merge destination in the active bookmark case"""
236 node = None
236 node = None
237 bmheads = repo.bookmarkheads(repo._activebookmark)
237 bmheads = bookmarks.headsforactive(repo)
238 curhead = repo[repo._activebookmark].node()
238 curhead = repo[repo._activebookmark].node()
239 if len(bmheads) == 2:
239 if len(bmheads) == 2:
240 if curhead == bmheads[0]:
240 if curhead == bmheads[0]:
241 node = bmheads[1]
241 node = bmheads[1]
242 else:
242 else:
243 node = bmheads[0]
243 node = bmheads[0]
244 elif len(bmheads) > 2:
244 elif len(bmheads) > 2:
245 msg, hint = msgdestmerge['toomanybookmarks'][action]
245 msg, hint = msgdestmerge['toomanybookmarks'][action]
246 raise error.ManyMergeDestAbort(msg, hint=hint)
246 raise error.ManyMergeDestAbort(msg, hint=hint)
247 elif len(bmheads) <= 1:
247 elif len(bmheads) <= 1:
248 msg, hint = msgdestmerge['nootherbookmarks'][action]
248 msg, hint = msgdestmerge['nootherbookmarks'][action]
249 raise error.NoMergeDestAbort(msg, hint=hint)
249 raise error.NoMergeDestAbort(msg, hint=hint)
250 assert node is not None
250 assert node is not None
251 return node
251 return node
252
252
253 def _destmergebranch(repo, action='merge', sourceset=None, onheadcheck=True,
253 def _destmergebranch(repo, action='merge', sourceset=None, onheadcheck=True,
254 destspace=None):
254 destspace=None):
255 """find merge destination based on branch heads"""
255 """find merge destination based on branch heads"""
256 node = None
256 node = None
257
257
258 if sourceset is None:
258 if sourceset is None:
259 sourceset = [repo[repo.dirstate.p1()].rev()]
259 sourceset = [repo[repo.dirstate.p1()].rev()]
260 branch = repo.dirstate.branch()
260 branch = repo.dirstate.branch()
261 elif not sourceset:
261 elif not sourceset:
262 msg, hint = msgdestmerge['emptysourceset'][action]
262 msg, hint = msgdestmerge['emptysourceset'][action]
263 raise error.NoMergeDestAbort(msg, hint=hint)
263 raise error.NoMergeDestAbort(msg, hint=hint)
264 else:
264 else:
265 branch = None
265 branch = None
266 for ctx in repo.set('roots(%ld::%ld)', sourceset, sourceset):
266 for ctx in repo.set('roots(%ld::%ld)', sourceset, sourceset):
267 if branch is not None and ctx.branch() != branch:
267 if branch is not None and ctx.branch() != branch:
268 msg, hint = msgdestmerge['multiplebranchessourceset'][action]
268 msg, hint = msgdestmerge['multiplebranchessourceset'][action]
269 raise error.ManyMergeDestAbort(msg, hint=hint)
269 raise error.ManyMergeDestAbort(msg, hint=hint)
270 branch = ctx.branch()
270 branch = ctx.branch()
271
271
272 bheads = repo.branchheads(branch)
272 bheads = repo.branchheads(branch)
273 onhead = repo.revs('%ld and %ln', sourceset, bheads)
273 onhead = repo.revs('%ld and %ln', sourceset, bheads)
274 if onheadcheck and not onhead:
274 if onheadcheck and not onhead:
275 # Case A: working copy if not on a head. (merge only)
275 # Case A: working copy if not on a head. (merge only)
276 #
276 #
277 # This is probably a user mistake We bailout pointing at 'hg update'
277 # This is probably a user mistake We bailout pointing at 'hg update'
278 if len(repo.heads()) <= 1:
278 if len(repo.heads()) <= 1:
279 msg, hint = msgdestmerge['nootherheadsbehind'][action]
279 msg, hint = msgdestmerge['nootherheadsbehind'][action]
280 else:
280 else:
281 msg, hint = msgdestmerge['notatheads'][action]
281 msg, hint = msgdestmerge['notatheads'][action]
282 raise error.Abort(msg, hint=hint)
282 raise error.Abort(msg, hint=hint)
283 # remove heads descendants of source from the set
283 # remove heads descendants of source from the set
284 bheads = list(repo.revs('%ln - (%ld::)', bheads, sourceset))
284 bheads = list(repo.revs('%ln - (%ld::)', bheads, sourceset))
285 # filters out bookmarked heads
285 # filters out bookmarked heads
286 nbhs = list(repo.revs('%ld - bookmark()', bheads))
286 nbhs = list(repo.revs('%ld - bookmark()', bheads))
287
287
288 if destspace is not None:
288 if destspace is not None:
289 # restrict search space
289 # restrict search space
290 # used in the 'hg pull --rebase' case, see issue 5214.
290 # used in the 'hg pull --rebase' case, see issue 5214.
291 nbhs = list(repo.revs('%ld and %ld', destspace, nbhs))
291 nbhs = list(repo.revs('%ld and %ld', destspace, nbhs))
292
292
293 if len(nbhs) > 1:
293 if len(nbhs) > 1:
294 # Case B: There is more than 1 other anonymous heads
294 # Case B: There is more than 1 other anonymous heads
295 #
295 #
296 # This means that there will be more than 1 candidate. This is
296 # This means that there will be more than 1 candidate. This is
297 # ambiguous. We abort asking the user to pick as explicit destination
297 # ambiguous. We abort asking the user to pick as explicit destination
298 # instead.
298 # instead.
299 msg, hint = msgdestmerge['toomanyheads'][action]
299 msg, hint = msgdestmerge['toomanyheads'][action]
300 msg %= (branch, len(bheads) + 1)
300 msg %= (branch, len(bheads) + 1)
301 raise error.ManyMergeDestAbort(msg, hint=hint)
301 raise error.ManyMergeDestAbort(msg, hint=hint)
302 elif not nbhs:
302 elif not nbhs:
303 # Case B: There is no other anonymous heads
303 # Case B: There is no other anonymous heads
304 #
304 #
305 # This means that there is no natural candidate to merge with.
305 # This means that there is no natural candidate to merge with.
306 # We abort, with various messages for various cases.
306 # We abort, with various messages for various cases.
307 if bheads:
307 if bheads:
308 msg, hint = msgdestmerge['bookmarkedheads'][action]
308 msg, hint = msgdestmerge['bookmarkedheads'][action]
309 elif len(repo.heads()) > 1:
309 elif len(repo.heads()) > 1:
310 msg, hint = msgdestmerge['nootherbranchheads'][action]
310 msg, hint = msgdestmerge['nootherbranchheads'][action]
311 msg %= branch
311 msg %= branch
312 elif not onhead:
312 elif not onhead:
313 # if 'onheadcheck == False' (rebase case),
313 # if 'onheadcheck == False' (rebase case),
314 # this was not caught in Case A.
314 # this was not caught in Case A.
315 msg, hint = msgdestmerge['nootherheadsbehind'][action]
315 msg, hint = msgdestmerge['nootherheadsbehind'][action]
316 else:
316 else:
317 msg, hint = msgdestmerge['nootherheads'][action]
317 msg, hint = msgdestmerge['nootherheads'][action]
318 raise error.NoMergeDestAbort(msg, hint=hint)
318 raise error.NoMergeDestAbort(msg, hint=hint)
319 else:
319 else:
320 node = nbhs[0]
320 node = nbhs[0]
321 assert node is not None
321 assert node is not None
322 return node
322 return node
323
323
324 def destmerge(repo, action='merge', sourceset=None, onheadcheck=True,
324 def destmerge(repo, action='merge', sourceset=None, onheadcheck=True,
325 destspace=None):
325 destspace=None):
326 """return the default destination for a merge
326 """return the default destination for a merge
327
327
328 (or raise exception about why it can't pick one)
328 (or raise exception about why it can't pick one)
329
329
330 :action: the action being performed, controls emitted error message
330 :action: the action being performed, controls emitted error message
331 """
331 """
332 # destspace is here to work around issues with `hg pull --rebase` see
332 # destspace is here to work around issues with `hg pull --rebase` see
333 # issue5214 for details
333 # issue5214 for details
334 if repo._activebookmark:
334 if repo._activebookmark:
335 node = _destmergebook(repo, action=action, sourceset=sourceset,
335 node = _destmergebook(repo, action=action, sourceset=sourceset,
336 destspace=destspace)
336 destspace=destspace)
337 else:
337 else:
338 node = _destmergebranch(repo, action=action, sourceset=sourceset,
338 node = _destmergebranch(repo, action=action, sourceset=sourceset,
339 onheadcheck=onheadcheck, destspace=destspace)
339 onheadcheck=onheadcheck, destspace=destspace)
340 return repo[node].rev()
340 return repo[node].rev()
341
341
342 histeditdefaultrevset = 'reverse(only(.) and not public() and not ::merge())'
342 histeditdefaultrevset = 'reverse(only(.) and not public() and not ::merge())'
343
343
344 def desthistedit(ui, repo):
344 def desthistedit(ui, repo):
345 """Default base revision to edit for `hg histedit`."""
345 """Default base revision to edit for `hg histedit`."""
346 default = ui.config('histedit', 'defaultrev', histeditdefaultrevset)
346 default = ui.config('histedit', 'defaultrev', histeditdefaultrevset)
347 if default:
347 if default:
348 revs = scmutil.revrange(repo, [default])
348 revs = scmutil.revrange(repo, [default])
349 if revs:
349 if revs:
350 # The revset supplied by the user may not be in ascending order nor
350 # The revset supplied by the user may not be in ascending order nor
351 # take the first revision. So do this manually.
351 # take the first revision. So do this manually.
352 revs.sort()
352 revs.sort()
353 return revs.first()
353 return revs.first()
354
354
355 return None
355 return None
356
356
357 def _statusotherbook(ui, repo):
357 def _statusotherbook(ui, repo):
358 bmheads = repo.bookmarkheads(repo._activebookmark)
358 bmheads = bookmarks.headsforactive(repo)
359 curhead = repo[repo._activebookmark].node()
359 curhead = repo[repo._activebookmark].node()
360 if repo.revs('%n and parents()', curhead):
360 if repo.revs('%n and parents()', curhead):
361 # we are on the active bookmark
361 # we are on the active bookmark
362 bmheads = [b for b in bmheads if curhead != b]
362 bmheads = [b for b in bmheads if curhead != b]
363 if bmheads:
363 if bmheads:
364 msg = _('%i other divergent bookmarks for "%s"\n')
364 msg = _('%i other divergent bookmarks for "%s"\n')
365 ui.status(msg % (len(bmheads), repo._activebookmark))
365 ui.status(msg % (len(bmheads), repo._activebookmark))
366
366
367 def _statusotherbranchheads(ui, repo):
367 def _statusotherbranchheads(ui, repo):
368 currentbranch = repo.dirstate.branch()
368 currentbranch = repo.dirstate.branch()
369 allheads = repo.branchheads(currentbranch, closed=True)
369 allheads = repo.branchheads(currentbranch, closed=True)
370 heads = repo.branchheads(currentbranch)
370 heads = repo.branchheads(currentbranch)
371 if repo.revs('%ln and parents()', allheads):
371 if repo.revs('%ln and parents()', allheads):
372 # we are on a head, even though it might be closed
372 # we are on a head, even though it might be closed
373 #
373 #
374 # on closed otherheads
374 # on closed otherheads
375 # ========= ==========
375 # ========= ==========
376 # o 0 all heads for current branch are closed
376 # o 0 all heads for current branch are closed
377 # N only descendant branch heads are closed
377 # N only descendant branch heads are closed
378 # x 0 there is only one non-closed branch head
378 # x 0 there is only one non-closed branch head
379 # N there are some non-closed branch heads
379 # N there are some non-closed branch heads
380 # ========= ==========
380 # ========= ==========
381 otherheads = repo.revs('%ln - parents()', heads)
381 otherheads = repo.revs('%ln - parents()', heads)
382 if repo['.'].closesbranch():
382 if repo['.'].closesbranch():
383 ui.warn(_('no open descendant heads on branch "%s", '
383 ui.warn(_('no open descendant heads on branch "%s", '
384 'updating to a closed head\n') %
384 'updating to a closed head\n') %
385 (currentbranch))
385 (currentbranch))
386 if otherheads:
386 if otherheads:
387 ui.warn(_("(committing will reopen the head, "
387 ui.warn(_("(committing will reopen the head, "
388 "use 'hg heads .' to see %i other heads)\n") %
388 "use 'hg heads .' to see %i other heads)\n") %
389 (len(otherheads)))
389 (len(otherheads)))
390 else:
390 else:
391 ui.warn(_('(committing will reopen branch "%s")\n') %
391 ui.warn(_('(committing will reopen branch "%s")\n') %
392 (currentbranch))
392 (currentbranch))
393 elif otherheads:
393 elif otherheads:
394 ui.status(_('%i other heads for branch "%s"\n') %
394 ui.status(_('%i other heads for branch "%s"\n') %
395 (len(otherheads), currentbranch))
395 (len(otherheads), currentbranch))
396
396
397 def statusotherdests(ui, repo):
397 def statusotherdests(ui, repo):
398 """Print message about other head"""
398 """Print message about other head"""
399 # XXX we should probably include a hint:
399 # XXX we should probably include a hint:
400 # - about what to do
400 # - about what to do
401 # - how to see such heads
401 # - how to see such heads
402 if repo._activebookmark:
402 if repo._activebookmark:
403 _statusotherbook(ui, repo)
403 _statusotherbook(ui, repo)
404 else:
404 else:
405 _statusotherbranchheads(ui, repo)
405 _statusotherbranchheads(ui, repo)
@@ -1,2073 +1,2065 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repoview,
53 repoview,
54 revset,
54 revset,
55 revsetlang,
55 revsetlang,
56 scmutil,
56 scmutil,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 class repofilecache(scmutil.filecache):
70 class repofilecache(scmutil.filecache):
71 """All filecache usage on repo are done for logic that should be unfiltered
71 """All filecache usage on repo are done for logic that should be unfiltered
72 """
72 """
73
73
74 def join(self, obj, fname):
74 def join(self, obj, fname):
75 return obj.vfs.join(fname)
75 return obj.vfs.join(fname)
76 def __get__(self, repo, type=None):
76 def __get__(self, repo, type=None):
77 if repo is None:
77 if repo is None:
78 return self
78 return self
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 def __set__(self, repo, value):
80 def __set__(self, repo, value):
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 def __delete__(self, repo):
82 def __delete__(self, repo):
83 return super(repofilecache, self).__delete__(repo.unfiltered())
83 return super(repofilecache, self).__delete__(repo.unfiltered())
84
84
85 class storecache(repofilecache):
85 class storecache(repofilecache):
86 """filecache for files in the store"""
86 """filecache for files in the store"""
87 def join(self, obj, fname):
87 def join(self, obj, fname):
88 return obj.sjoin(fname)
88 return obj.sjoin(fname)
89
89
90 class unfilteredpropertycache(util.propertycache):
90 class unfilteredpropertycache(util.propertycache):
91 """propertycache that apply to unfiltered repo only"""
91 """propertycache that apply to unfiltered repo only"""
92
92
93 def __get__(self, repo, type=None):
93 def __get__(self, repo, type=None):
94 unfi = repo.unfiltered()
94 unfi = repo.unfiltered()
95 if unfi is repo:
95 if unfi is repo:
96 return super(unfilteredpropertycache, self).__get__(unfi)
96 return super(unfilteredpropertycache, self).__get__(unfi)
97 return getattr(unfi, self.name)
97 return getattr(unfi, self.name)
98
98
99 class filteredpropertycache(util.propertycache):
99 class filteredpropertycache(util.propertycache):
100 """propertycache that must take filtering in account"""
100 """propertycache that must take filtering in account"""
101
101
102 def cachevalue(self, obj, value):
102 def cachevalue(self, obj, value):
103 object.__setattr__(obj, self.name, value)
103 object.__setattr__(obj, self.name, value)
104
104
105
105
106 def hasunfilteredcache(repo, name):
106 def hasunfilteredcache(repo, name):
107 """check if a repo has an unfilteredpropertycache value for <name>"""
107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 return name in vars(repo.unfiltered())
108 return name in vars(repo.unfiltered())
109
109
110 def unfilteredmethod(orig):
110 def unfilteredmethod(orig):
111 """decorate method that always need to be run on unfiltered version"""
111 """decorate method that always need to be run on unfiltered version"""
112 def wrapper(repo, *args, **kwargs):
112 def wrapper(repo, *args, **kwargs):
113 return orig(repo.unfiltered(), *args, **kwargs)
113 return orig(repo.unfiltered(), *args, **kwargs)
114 return wrapper
114 return wrapper
115
115
116 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 'unbundle'}
117 'unbundle'}
118 legacycaps = moderncaps.union({'changegroupsubset'})
118 legacycaps = moderncaps.union({'changegroupsubset'})
119
119
120 class localpeer(peer.peerrepository):
120 class localpeer(peer.peerrepository):
121 '''peer for a local repo; reflects only the most recent API'''
121 '''peer for a local repo; reflects only the most recent API'''
122
122
123 def __init__(self, repo, caps=None):
123 def __init__(self, repo, caps=None):
124 if caps is None:
124 if caps is None:
125 caps = moderncaps.copy()
125 caps = moderncaps.copy()
126 peer.peerrepository.__init__(self)
126 peer.peerrepository.__init__(self)
127 self._repo = repo.filtered('served')
127 self._repo = repo.filtered('served')
128 self.ui = repo.ui
128 self.ui = repo.ui
129 self._caps = repo._restrictcapabilities(caps)
129 self._caps = repo._restrictcapabilities(caps)
130 self.requirements = repo.requirements
130 self.requirements = repo.requirements
131 self.supportedformats = repo.supportedformats
131 self.supportedformats = repo.supportedformats
132
132
133 def close(self):
133 def close(self):
134 self._repo.close()
134 self._repo.close()
135
135
136 def _capabilities(self):
136 def _capabilities(self):
137 return self._caps
137 return self._caps
138
138
139 def local(self):
139 def local(self):
140 return self._repo
140 return self._repo
141
141
142 def canpush(self):
142 def canpush(self):
143 return True
143 return True
144
144
145 def url(self):
145 def url(self):
146 return self._repo.url()
146 return self._repo.url()
147
147
148 def lookup(self, key):
148 def lookup(self, key):
149 return self._repo.lookup(key)
149 return self._repo.lookup(key)
150
150
151 def branchmap(self):
151 def branchmap(self):
152 return self._repo.branchmap()
152 return self._repo.branchmap()
153
153
154 def heads(self):
154 def heads(self):
155 return self._repo.heads()
155 return self._repo.heads()
156
156
157 def known(self, nodes):
157 def known(self, nodes):
158 return self._repo.known(nodes)
158 return self._repo.known(nodes)
159
159
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 **kwargs):
161 **kwargs):
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 common=common, bundlecaps=bundlecaps,
163 common=common, bundlecaps=bundlecaps,
164 **kwargs)
164 **kwargs)
165 cb = util.chunkbuffer(chunks)
165 cb = util.chunkbuffer(chunks)
166
166
167 if exchange.bundle2requested(bundlecaps):
167 if exchange.bundle2requested(bundlecaps):
168 # When requesting a bundle2, getbundle returns a stream to make the
168 # When requesting a bundle2, getbundle returns a stream to make the
169 # wire level function happier. We need to build a proper object
169 # wire level function happier. We need to build a proper object
170 # from it in local peer.
170 # from it in local peer.
171 return bundle2.getunbundler(self.ui, cb)
171 return bundle2.getunbundler(self.ui, cb)
172 else:
172 else:
173 return changegroup.getunbundler('01', cb, None)
173 return changegroup.getunbundler('01', cb, None)
174
174
175 # TODO We might want to move the next two calls into legacypeer and add
175 # TODO We might want to move the next two calls into legacypeer and add
176 # unbundle instead.
176 # unbundle instead.
177
177
178 def unbundle(self, cg, heads, url):
178 def unbundle(self, cg, heads, url):
179 """apply a bundle on a repo
179 """apply a bundle on a repo
180
180
181 This function handles the repo locking itself."""
181 This function handles the repo locking itself."""
182 try:
182 try:
183 try:
183 try:
184 cg = exchange.readbundle(self.ui, cg, None)
184 cg = exchange.readbundle(self.ui, cg, None)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 if util.safehasattr(ret, 'getchunks'):
186 if util.safehasattr(ret, 'getchunks'):
187 # This is a bundle20 object, turn it into an unbundler.
187 # This is a bundle20 object, turn it into an unbundler.
188 # This little dance should be dropped eventually when the
188 # This little dance should be dropped eventually when the
189 # API is finally improved.
189 # API is finally improved.
190 stream = util.chunkbuffer(ret.getchunks())
190 stream = util.chunkbuffer(ret.getchunks())
191 ret = bundle2.getunbundler(self.ui, stream)
191 ret = bundle2.getunbundler(self.ui, stream)
192 return ret
192 return ret
193 except Exception as exc:
193 except Exception as exc:
194 # If the exception contains output salvaged from a bundle2
194 # If the exception contains output salvaged from a bundle2
195 # reply, we need to make sure it is printed before continuing
195 # reply, we need to make sure it is printed before continuing
196 # to fail. So we build a bundle2 with such output and consume
196 # to fail. So we build a bundle2 with such output and consume
197 # it directly.
197 # it directly.
198 #
198 #
199 # This is not very elegant but allows a "simple" solution for
199 # This is not very elegant but allows a "simple" solution for
200 # issue4594
200 # issue4594
201 output = getattr(exc, '_bundle2salvagedoutput', ())
201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 if output:
202 if output:
203 bundler = bundle2.bundle20(self._repo.ui)
203 bundler = bundle2.bundle20(self._repo.ui)
204 for out in output:
204 for out in output:
205 bundler.addpart(out)
205 bundler.addpart(out)
206 stream = util.chunkbuffer(bundler.getchunks())
206 stream = util.chunkbuffer(bundler.getchunks())
207 b = bundle2.getunbundler(self.ui, stream)
207 b = bundle2.getunbundler(self.ui, stream)
208 bundle2.processbundle(self._repo, b)
208 bundle2.processbundle(self._repo, b)
209 raise
209 raise
210 except error.PushRaced as exc:
210 except error.PushRaced as exc:
211 raise error.ResponseError(_('push failed:'), str(exc))
211 raise error.ResponseError(_('push failed:'), str(exc))
212
212
213 def lock(self):
213 def lock(self):
214 return self._repo.lock()
214 return self._repo.lock()
215
215
216 def addchangegroup(self, cg, source, url):
216 def addchangegroup(self, cg, source, url):
217 return cg.apply(self._repo, source, url)
217 return cg.apply(self._repo, source, url)
218
218
219 def pushkey(self, namespace, key, old, new):
219 def pushkey(self, namespace, key, old, new):
220 return self._repo.pushkey(namespace, key, old, new)
220 return self._repo.pushkey(namespace, key, old, new)
221
221
222 def listkeys(self, namespace):
222 def listkeys(self, namespace):
223 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
224
224
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 '''used to test argument passing over the wire'''
226 '''used to test argument passing over the wire'''
227 return "%s %s %s %s %s" % (one, two, three, four, five)
227 return "%s %s %s %s %s" % (one, two, three, four, five)
228
228
229 class locallegacypeer(localpeer):
229 class locallegacypeer(localpeer):
230 '''peer extension which implements legacy methods too; used for tests with
230 '''peer extension which implements legacy methods too; used for tests with
231 restricted capabilities'''
231 restricted capabilities'''
232
232
233 def __init__(self, repo):
233 def __init__(self, repo):
234 localpeer.__init__(self, repo, caps=legacycaps)
234 localpeer.__init__(self, repo, caps=legacycaps)
235
235
236 def branches(self, nodes):
236 def branches(self, nodes):
237 return self._repo.branches(nodes)
237 return self._repo.branches(nodes)
238
238
239 def between(self, pairs):
239 def between(self, pairs):
240 return self._repo.between(pairs)
240 return self._repo.between(pairs)
241
241
242 def changegroup(self, basenodes, source):
242 def changegroup(self, basenodes, source):
243 return changegroup.changegroup(self._repo, basenodes, source)
243 return changegroup.changegroup(self._repo, basenodes, source)
244
244
245 def changegroupsubset(self, bases, heads, source):
245 def changegroupsubset(self, bases, heads, source):
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247
247
248 class localrepository(object):
248 class localrepository(object):
249
249
250 supportedformats = {
250 supportedformats = {
251 'revlogv1',
251 'revlogv1',
252 'generaldelta',
252 'generaldelta',
253 'treemanifest',
253 'treemanifest',
254 'manifestv2',
254 'manifestv2',
255 }
255 }
256 _basesupported = supportedformats | {
256 _basesupported = supportedformats | {
257 'store',
257 'store',
258 'fncache',
258 'fncache',
259 'shared',
259 'shared',
260 'relshared',
260 'relshared',
261 'dotencode',
261 'dotencode',
262 }
262 }
263 openerreqs = {
263 openerreqs = {
264 'revlogv1',
264 'revlogv1',
265 'generaldelta',
265 'generaldelta',
266 'treemanifest',
266 'treemanifest',
267 'manifestv2',
267 'manifestv2',
268 }
268 }
269 filtername = None
269 filtername = None
270
270
271 # a list of (ui, featureset) functions.
271 # a list of (ui, featureset) functions.
272 # only functions defined in module of enabled extensions are invoked
272 # only functions defined in module of enabled extensions are invoked
273 featuresetupfuncs = set()
273 featuresetupfuncs = set()
274
274
275 def __init__(self, baseui, path, create=False):
275 def __init__(self, baseui, path, create=False):
276 self.requirements = set()
276 self.requirements = set()
277 # wvfs: rooted at the repository root, used to access the working copy
277 # wvfs: rooted at the repository root, used to access the working copy
278 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
278 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
279 # vfs: rooted at .hg, used to access repo files outside of .hg/store
279 # vfs: rooted at .hg, used to access repo files outside of .hg/store
280 self.vfs = None
280 self.vfs = None
281 # svfs: usually rooted at .hg/store, used to access repository history
281 # svfs: usually rooted at .hg/store, used to access repository history
282 # If this is a shared repository, this vfs may point to another
282 # If this is a shared repository, this vfs may point to another
283 # repository's .hg/store directory.
283 # repository's .hg/store directory.
284 self.svfs = None
284 self.svfs = None
285 self.root = self.wvfs.base
285 self.root = self.wvfs.base
286 self.path = self.wvfs.join(".hg")
286 self.path = self.wvfs.join(".hg")
287 self.origroot = path
287 self.origroot = path
288 self.auditor = pathutil.pathauditor(self.root, self._checknested)
288 self.auditor = pathutil.pathauditor(self.root, self._checknested)
289 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
289 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
290 realfs=False)
290 realfs=False)
291 self.vfs = vfsmod.vfs(self.path)
291 self.vfs = vfsmod.vfs(self.path)
292 self.baseui = baseui
292 self.baseui = baseui
293 self.ui = baseui.copy()
293 self.ui = baseui.copy()
294 self.ui.copy = baseui.copy # prevent copying repo configuration
294 self.ui.copy = baseui.copy # prevent copying repo configuration
295 # A list of callback to shape the phase if no data were found.
295 # A list of callback to shape the phase if no data were found.
296 # Callback are in the form: func(repo, roots) --> processed root.
296 # Callback are in the form: func(repo, roots) --> processed root.
297 # This list it to be filled by extension during repo setup
297 # This list it to be filled by extension during repo setup
298 self._phasedefaults = []
298 self._phasedefaults = []
299 try:
299 try:
300 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
300 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
301 self._loadextensions()
301 self._loadextensions()
302 except IOError:
302 except IOError:
303 pass
303 pass
304
304
305 if self.featuresetupfuncs:
305 if self.featuresetupfuncs:
306 self.supported = set(self._basesupported) # use private copy
306 self.supported = set(self._basesupported) # use private copy
307 extmods = set(m.__name__ for n, m
307 extmods = set(m.__name__ for n, m
308 in extensions.extensions(self.ui))
308 in extensions.extensions(self.ui))
309 for setupfunc in self.featuresetupfuncs:
309 for setupfunc in self.featuresetupfuncs:
310 if setupfunc.__module__ in extmods:
310 if setupfunc.__module__ in extmods:
311 setupfunc(self.ui, self.supported)
311 setupfunc(self.ui, self.supported)
312 else:
312 else:
313 self.supported = self._basesupported
313 self.supported = self._basesupported
314 color.setup(self.ui)
314 color.setup(self.ui)
315
315
316 # Add compression engines.
316 # Add compression engines.
317 for name in util.compengines:
317 for name in util.compengines:
318 engine = util.compengines[name]
318 engine = util.compengines[name]
319 if engine.revlogheader():
319 if engine.revlogheader():
320 self.supported.add('exp-compression-%s' % name)
320 self.supported.add('exp-compression-%s' % name)
321
321
322 if not self.vfs.isdir():
322 if not self.vfs.isdir():
323 if create:
323 if create:
324 self.requirements = newreporequirements(self)
324 self.requirements = newreporequirements(self)
325
325
326 if not self.wvfs.exists():
326 if not self.wvfs.exists():
327 self.wvfs.makedirs()
327 self.wvfs.makedirs()
328 self.vfs.makedir(notindexed=True)
328 self.vfs.makedir(notindexed=True)
329
329
330 if 'store' in self.requirements:
330 if 'store' in self.requirements:
331 self.vfs.mkdir("store")
331 self.vfs.mkdir("store")
332
332
333 # create an invalid changelog
333 # create an invalid changelog
334 self.vfs.append(
334 self.vfs.append(
335 "00changelog.i",
335 "00changelog.i",
336 '\0\0\0\2' # represents revlogv2
336 '\0\0\0\2' # represents revlogv2
337 ' dummy changelog to prevent using the old repo layout'
337 ' dummy changelog to prevent using the old repo layout'
338 )
338 )
339 else:
339 else:
340 raise error.RepoError(_("repository %s not found") % path)
340 raise error.RepoError(_("repository %s not found") % path)
341 elif create:
341 elif create:
342 raise error.RepoError(_("repository %s already exists") % path)
342 raise error.RepoError(_("repository %s already exists") % path)
343 else:
343 else:
344 try:
344 try:
345 self.requirements = scmutil.readrequires(
345 self.requirements = scmutil.readrequires(
346 self.vfs, self.supported)
346 self.vfs, self.supported)
347 except IOError as inst:
347 except IOError as inst:
348 if inst.errno != errno.ENOENT:
348 if inst.errno != errno.ENOENT:
349 raise
349 raise
350
350
351 self.sharedpath = self.path
351 self.sharedpath = self.path
352 try:
352 try:
353 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
353 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
354 if 'relshared' in self.requirements:
354 if 'relshared' in self.requirements:
355 sharedpath = self.vfs.join(sharedpath)
355 sharedpath = self.vfs.join(sharedpath)
356 vfs = vfsmod.vfs(sharedpath, realpath=True)
356 vfs = vfsmod.vfs(sharedpath, realpath=True)
357 s = vfs.base
357 s = vfs.base
358 if not vfs.exists():
358 if not vfs.exists():
359 raise error.RepoError(
359 raise error.RepoError(
360 _('.hg/sharedpath points to nonexistent directory %s') % s)
360 _('.hg/sharedpath points to nonexistent directory %s') % s)
361 self.sharedpath = s
361 self.sharedpath = s
362 except IOError as inst:
362 except IOError as inst:
363 if inst.errno != errno.ENOENT:
363 if inst.errno != errno.ENOENT:
364 raise
364 raise
365
365
366 self.store = store.store(
366 self.store = store.store(
367 self.requirements, self.sharedpath, vfsmod.vfs)
367 self.requirements, self.sharedpath, vfsmod.vfs)
368 self.spath = self.store.path
368 self.spath = self.store.path
369 self.svfs = self.store.vfs
369 self.svfs = self.store.vfs
370 self.sjoin = self.store.join
370 self.sjoin = self.store.join
371 self.vfs.createmode = self.store.createmode
371 self.vfs.createmode = self.store.createmode
372 self._applyopenerreqs()
372 self._applyopenerreqs()
373 if create:
373 if create:
374 self._writerequirements()
374 self._writerequirements()
375
375
376 self._dirstatevalidatewarned = False
376 self._dirstatevalidatewarned = False
377
377
378 self._branchcaches = {}
378 self._branchcaches = {}
379 self._revbranchcache = None
379 self._revbranchcache = None
380 self.filterpats = {}
380 self.filterpats = {}
381 self._datafilters = {}
381 self._datafilters = {}
382 self._transref = self._lockref = self._wlockref = None
382 self._transref = self._lockref = self._wlockref = None
383
383
384 # A cache for various files under .hg/ that tracks file changes,
384 # A cache for various files under .hg/ that tracks file changes,
385 # (used by the filecache decorator)
385 # (used by the filecache decorator)
386 #
386 #
387 # Maps a property name to its util.filecacheentry
387 # Maps a property name to its util.filecacheentry
388 self._filecache = {}
388 self._filecache = {}
389
389
390 # hold sets of revision to be filtered
390 # hold sets of revision to be filtered
391 # should be cleared when something might have changed the filter value:
391 # should be cleared when something might have changed the filter value:
392 # - new changesets,
392 # - new changesets,
393 # - phase change,
393 # - phase change,
394 # - new obsolescence marker,
394 # - new obsolescence marker,
395 # - working directory parent change,
395 # - working directory parent change,
396 # - bookmark changes
396 # - bookmark changes
397 self.filteredrevcache = {}
397 self.filteredrevcache = {}
398
398
399 # generic mapping between names and nodes
399 # generic mapping between names and nodes
400 self.names = namespaces.namespaces()
400 self.names = namespaces.namespaces()
401
401
402 def close(self):
402 def close(self):
403 self._writecaches()
403 self._writecaches()
404
404
405 def _loadextensions(self):
405 def _loadextensions(self):
406 extensions.loadall(self.ui)
406 extensions.loadall(self.ui)
407
407
408 def _writecaches(self):
408 def _writecaches(self):
409 if self._revbranchcache:
409 if self._revbranchcache:
410 self._revbranchcache.write()
410 self._revbranchcache.write()
411
411
412 def _restrictcapabilities(self, caps):
412 def _restrictcapabilities(self, caps):
413 if self.ui.configbool('experimental', 'bundle2-advertise', True):
413 if self.ui.configbool('experimental', 'bundle2-advertise', True):
414 caps = set(caps)
414 caps = set(caps)
415 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
415 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
416 caps.add('bundle2=' + urlreq.quote(capsblob))
416 caps.add('bundle2=' + urlreq.quote(capsblob))
417 return caps
417 return caps
418
418
419 def _applyopenerreqs(self):
419 def _applyopenerreqs(self):
420 self.svfs.options = dict((r, 1) for r in self.requirements
420 self.svfs.options = dict((r, 1) for r in self.requirements
421 if r in self.openerreqs)
421 if r in self.openerreqs)
422 # experimental config: format.chunkcachesize
422 # experimental config: format.chunkcachesize
423 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
423 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
424 if chunkcachesize is not None:
424 if chunkcachesize is not None:
425 self.svfs.options['chunkcachesize'] = chunkcachesize
425 self.svfs.options['chunkcachesize'] = chunkcachesize
426 # experimental config: format.maxchainlen
426 # experimental config: format.maxchainlen
427 maxchainlen = self.ui.configint('format', 'maxchainlen')
427 maxchainlen = self.ui.configint('format', 'maxchainlen')
428 if maxchainlen is not None:
428 if maxchainlen is not None:
429 self.svfs.options['maxchainlen'] = maxchainlen
429 self.svfs.options['maxchainlen'] = maxchainlen
430 # experimental config: format.manifestcachesize
430 # experimental config: format.manifestcachesize
431 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
431 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
432 if manifestcachesize is not None:
432 if manifestcachesize is not None:
433 self.svfs.options['manifestcachesize'] = manifestcachesize
433 self.svfs.options['manifestcachesize'] = manifestcachesize
434 # experimental config: format.aggressivemergedeltas
434 # experimental config: format.aggressivemergedeltas
435 aggressivemergedeltas = self.ui.configbool('format',
435 aggressivemergedeltas = self.ui.configbool('format',
436 'aggressivemergedeltas', False)
436 'aggressivemergedeltas', False)
437 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
437 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
438 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
438 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
439
439
440 for r in self.requirements:
440 for r in self.requirements:
441 if r.startswith('exp-compression-'):
441 if r.startswith('exp-compression-'):
442 self.svfs.options['compengine'] = r[len('exp-compression-'):]
442 self.svfs.options['compengine'] = r[len('exp-compression-'):]
443
443
444 def _writerequirements(self):
444 def _writerequirements(self):
445 scmutil.writerequires(self.vfs, self.requirements)
445 scmutil.writerequires(self.vfs, self.requirements)
446
446
447 def _checknested(self, path):
447 def _checknested(self, path):
448 """Determine if path is a legal nested repository."""
448 """Determine if path is a legal nested repository."""
449 if not path.startswith(self.root):
449 if not path.startswith(self.root):
450 return False
450 return False
451 subpath = path[len(self.root) + 1:]
451 subpath = path[len(self.root) + 1:]
452 normsubpath = util.pconvert(subpath)
452 normsubpath = util.pconvert(subpath)
453
453
454 # XXX: Checking against the current working copy is wrong in
454 # XXX: Checking against the current working copy is wrong in
455 # the sense that it can reject things like
455 # the sense that it can reject things like
456 #
456 #
457 # $ hg cat -r 10 sub/x.txt
457 # $ hg cat -r 10 sub/x.txt
458 #
458 #
459 # if sub/ is no longer a subrepository in the working copy
459 # if sub/ is no longer a subrepository in the working copy
460 # parent revision.
460 # parent revision.
461 #
461 #
462 # However, it can of course also allow things that would have
462 # However, it can of course also allow things that would have
463 # been rejected before, such as the above cat command if sub/
463 # been rejected before, such as the above cat command if sub/
464 # is a subrepository now, but was a normal directory before.
464 # is a subrepository now, but was a normal directory before.
465 # The old path auditor would have rejected by mistake since it
465 # The old path auditor would have rejected by mistake since it
466 # panics when it sees sub/.hg/.
466 # panics when it sees sub/.hg/.
467 #
467 #
468 # All in all, checking against the working copy seems sensible
468 # All in all, checking against the working copy seems sensible
469 # since we want to prevent access to nested repositories on
469 # since we want to prevent access to nested repositories on
470 # the filesystem *now*.
470 # the filesystem *now*.
471 ctx = self[None]
471 ctx = self[None]
472 parts = util.splitpath(subpath)
472 parts = util.splitpath(subpath)
473 while parts:
473 while parts:
474 prefix = '/'.join(parts)
474 prefix = '/'.join(parts)
475 if prefix in ctx.substate:
475 if prefix in ctx.substate:
476 if prefix == normsubpath:
476 if prefix == normsubpath:
477 return True
477 return True
478 else:
478 else:
479 sub = ctx.sub(prefix)
479 sub = ctx.sub(prefix)
480 return sub.checknested(subpath[len(prefix) + 1:])
480 return sub.checknested(subpath[len(prefix) + 1:])
481 else:
481 else:
482 parts.pop()
482 parts.pop()
483 return False
483 return False
484
484
485 def peer(self):
485 def peer(self):
486 return localpeer(self) # not cached to avoid reference cycle
486 return localpeer(self) # not cached to avoid reference cycle
487
487
488 def unfiltered(self):
488 def unfiltered(self):
489 """Return unfiltered version of the repository
489 """Return unfiltered version of the repository
490
490
491 Intended to be overwritten by filtered repo."""
491 Intended to be overwritten by filtered repo."""
492 return self
492 return self
493
493
494 def filtered(self, name):
494 def filtered(self, name):
495 """Return a filtered version of a repository"""
495 """Return a filtered version of a repository"""
496 # build a new class with the mixin and the current class
496 # build a new class with the mixin and the current class
497 # (possibly subclass of the repo)
497 # (possibly subclass of the repo)
498 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
498 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
499 pass
499 pass
500 return filteredrepo(self, name)
500 return filteredrepo(self, name)
501
501
502 @repofilecache('bookmarks', 'bookmarks.current')
502 @repofilecache('bookmarks', 'bookmarks.current')
503 def _bookmarks(self):
503 def _bookmarks(self):
504 return bookmarks.bmstore(self)
504 return bookmarks.bmstore(self)
505
505
506 @property
506 @property
507 def _activebookmark(self):
507 def _activebookmark(self):
508 return self._bookmarks.active
508 return self._bookmarks.active
509
509
510 def bookmarkheads(self, bookmark):
511 name = bookmark.split('@', 1)[0]
512 heads = []
513 for mark, n in self._bookmarks.iteritems():
514 if mark.split('@', 1)[0] == name:
515 heads.append(n)
516 return heads
517
518 # _phaserevs and _phasesets depend on changelog. what we need is to
510 # _phaserevs and _phasesets depend on changelog. what we need is to
519 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
511 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
520 # can't be easily expressed in filecache mechanism.
512 # can't be easily expressed in filecache mechanism.
521 @storecache('phaseroots', '00changelog.i')
513 @storecache('phaseroots', '00changelog.i')
522 def _phasecache(self):
514 def _phasecache(self):
523 return phases.phasecache(self, self._phasedefaults)
515 return phases.phasecache(self, self._phasedefaults)
524
516
525 @storecache('obsstore')
517 @storecache('obsstore')
526 def obsstore(self):
518 def obsstore(self):
527 # read default format for new obsstore.
519 # read default format for new obsstore.
528 # developer config: format.obsstore-version
520 # developer config: format.obsstore-version
529 defaultformat = self.ui.configint('format', 'obsstore-version', None)
521 defaultformat = self.ui.configint('format', 'obsstore-version', None)
530 # rely on obsstore class default when possible.
522 # rely on obsstore class default when possible.
531 kwargs = {}
523 kwargs = {}
532 if defaultformat is not None:
524 if defaultformat is not None:
533 kwargs['defaultformat'] = defaultformat
525 kwargs['defaultformat'] = defaultformat
534 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
526 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
535 store = obsolete.obsstore(self.svfs, readonly=readonly,
527 store = obsolete.obsstore(self.svfs, readonly=readonly,
536 **kwargs)
528 **kwargs)
537 if store and readonly:
529 if store and readonly:
538 self.ui.warn(
530 self.ui.warn(
539 _('obsolete feature not enabled but %i markers found!\n')
531 _('obsolete feature not enabled but %i markers found!\n')
540 % len(list(store)))
532 % len(list(store)))
541 return store
533 return store
542
534
543 @storecache('00changelog.i')
535 @storecache('00changelog.i')
544 def changelog(self):
536 def changelog(self):
545 return changelog.changelog(self.svfs,
537 return changelog.changelog(self.svfs,
546 trypending=txnutil.mayhavepending(self.root))
538 trypending=txnutil.mayhavepending(self.root))
547
539
548 def _constructmanifest(self):
540 def _constructmanifest(self):
549 # This is a temporary function while we migrate from manifest to
541 # This is a temporary function while we migrate from manifest to
550 # manifestlog. It allows bundlerepo and unionrepo to intercept the
542 # manifestlog. It allows bundlerepo and unionrepo to intercept the
551 # manifest creation.
543 # manifest creation.
552 return manifest.manifestrevlog(self.svfs)
544 return manifest.manifestrevlog(self.svfs)
553
545
554 @storecache('00manifest.i')
546 @storecache('00manifest.i')
555 def manifestlog(self):
547 def manifestlog(self):
556 return manifest.manifestlog(self.svfs, self)
548 return manifest.manifestlog(self.svfs, self)
557
549
558 @repofilecache('dirstate')
550 @repofilecache('dirstate')
559 def dirstate(self):
551 def dirstate(self):
560 return dirstate.dirstate(self.vfs, self.ui, self.root,
552 return dirstate.dirstate(self.vfs, self.ui, self.root,
561 self._dirstatevalidate)
553 self._dirstatevalidate)
562
554
563 def _dirstatevalidate(self, node):
555 def _dirstatevalidate(self, node):
564 try:
556 try:
565 self.changelog.rev(node)
557 self.changelog.rev(node)
566 return node
558 return node
567 except error.LookupError:
559 except error.LookupError:
568 if not self._dirstatevalidatewarned:
560 if not self._dirstatevalidatewarned:
569 self._dirstatevalidatewarned = True
561 self._dirstatevalidatewarned = True
570 self.ui.warn(_("warning: ignoring unknown"
562 self.ui.warn(_("warning: ignoring unknown"
571 " working parent %s!\n") % short(node))
563 " working parent %s!\n") % short(node))
572 return nullid
564 return nullid
573
565
574 def __getitem__(self, changeid):
566 def __getitem__(self, changeid):
575 if changeid is None or changeid == wdirrev:
567 if changeid is None or changeid == wdirrev:
576 return context.workingctx(self)
568 return context.workingctx(self)
577 if isinstance(changeid, slice):
569 if isinstance(changeid, slice):
578 return [context.changectx(self, i)
570 return [context.changectx(self, i)
579 for i in xrange(*changeid.indices(len(self)))
571 for i in xrange(*changeid.indices(len(self)))
580 if i not in self.changelog.filteredrevs]
572 if i not in self.changelog.filteredrevs]
581 return context.changectx(self, changeid)
573 return context.changectx(self, changeid)
582
574
583 def __contains__(self, changeid):
575 def __contains__(self, changeid):
584 try:
576 try:
585 self[changeid]
577 self[changeid]
586 return True
578 return True
587 except error.RepoLookupError:
579 except error.RepoLookupError:
588 return False
580 return False
589
581
590 def __nonzero__(self):
582 def __nonzero__(self):
591 return True
583 return True
592
584
593 __bool__ = __nonzero__
585 __bool__ = __nonzero__
594
586
595 def __len__(self):
587 def __len__(self):
596 return len(self.changelog)
588 return len(self.changelog)
597
589
598 def __iter__(self):
590 def __iter__(self):
599 return iter(self.changelog)
591 return iter(self.changelog)
600
592
601 def revs(self, expr, *args):
593 def revs(self, expr, *args):
602 '''Find revisions matching a revset.
594 '''Find revisions matching a revset.
603
595
604 The revset is specified as a string ``expr`` that may contain
596 The revset is specified as a string ``expr`` that may contain
605 %-formatting to escape certain types. See ``revsetlang.formatspec``.
597 %-formatting to escape certain types. See ``revsetlang.formatspec``.
606
598
607 Revset aliases from the configuration are not expanded. To expand
599 Revset aliases from the configuration are not expanded. To expand
608 user aliases, consider calling ``scmutil.revrange()`` or
600 user aliases, consider calling ``scmutil.revrange()`` or
609 ``repo.anyrevs([expr], user=True)``.
601 ``repo.anyrevs([expr], user=True)``.
610
602
611 Returns a revset.abstractsmartset, which is a list-like interface
603 Returns a revset.abstractsmartset, which is a list-like interface
612 that contains integer revisions.
604 that contains integer revisions.
613 '''
605 '''
614 expr = revsetlang.formatspec(expr, *args)
606 expr = revsetlang.formatspec(expr, *args)
615 m = revset.match(None, expr)
607 m = revset.match(None, expr)
616 return m(self)
608 return m(self)
617
609
618 def set(self, expr, *args):
610 def set(self, expr, *args):
619 '''Find revisions matching a revset and emit changectx instances.
611 '''Find revisions matching a revset and emit changectx instances.
620
612
621 This is a convenience wrapper around ``revs()`` that iterates the
613 This is a convenience wrapper around ``revs()`` that iterates the
622 result and is a generator of changectx instances.
614 result and is a generator of changectx instances.
623
615
624 Revset aliases from the configuration are not expanded. To expand
616 Revset aliases from the configuration are not expanded. To expand
625 user aliases, consider calling ``scmutil.revrange()``.
617 user aliases, consider calling ``scmutil.revrange()``.
626 '''
618 '''
627 for r in self.revs(expr, *args):
619 for r in self.revs(expr, *args):
628 yield self[r]
620 yield self[r]
629
621
630 def anyrevs(self, specs, user=False):
622 def anyrevs(self, specs, user=False):
631 '''Find revisions matching one of the given revsets.
623 '''Find revisions matching one of the given revsets.
632
624
633 Revset aliases from the configuration are not expanded by default. To
625 Revset aliases from the configuration are not expanded by default. To
634 expand user aliases, specify ``user=True``.
626 expand user aliases, specify ``user=True``.
635 '''
627 '''
636 if user:
628 if user:
637 m = revset.matchany(self.ui, specs, repo=self)
629 m = revset.matchany(self.ui, specs, repo=self)
638 else:
630 else:
639 m = revset.matchany(None, specs)
631 m = revset.matchany(None, specs)
640 return m(self)
632 return m(self)
641
633
642 def url(self):
634 def url(self):
643 return 'file:' + self.root
635 return 'file:' + self.root
644
636
645 def hook(self, name, throw=False, **args):
637 def hook(self, name, throw=False, **args):
646 """Call a hook, passing this repo instance.
638 """Call a hook, passing this repo instance.
647
639
648 This a convenience method to aid invoking hooks. Extensions likely
640 This a convenience method to aid invoking hooks. Extensions likely
649 won't call this unless they have registered a custom hook or are
641 won't call this unless they have registered a custom hook or are
650 replacing code that is expected to call a hook.
642 replacing code that is expected to call a hook.
651 """
643 """
652 return hook.hook(self.ui, self, name, throw, **args)
644 return hook.hook(self.ui, self, name, throw, **args)
653
645
654 @filteredpropertycache
646 @filteredpropertycache
655 def _tagscache(self):
647 def _tagscache(self):
656 '''Returns a tagscache object that contains various tags related
648 '''Returns a tagscache object that contains various tags related
657 caches.'''
649 caches.'''
658
650
659 # This simplifies its cache management by having one decorated
651 # This simplifies its cache management by having one decorated
660 # function (this one) and the rest simply fetch things from it.
652 # function (this one) and the rest simply fetch things from it.
661 class tagscache(object):
653 class tagscache(object):
662 def __init__(self):
654 def __init__(self):
663 # These two define the set of tags for this repository. tags
655 # These two define the set of tags for this repository. tags
664 # maps tag name to node; tagtypes maps tag name to 'global' or
656 # maps tag name to node; tagtypes maps tag name to 'global' or
665 # 'local'. (Global tags are defined by .hgtags across all
657 # 'local'. (Global tags are defined by .hgtags across all
666 # heads, and local tags are defined in .hg/localtags.)
658 # heads, and local tags are defined in .hg/localtags.)
667 # They constitute the in-memory cache of tags.
659 # They constitute the in-memory cache of tags.
668 self.tags = self.tagtypes = None
660 self.tags = self.tagtypes = None
669
661
670 self.nodetagscache = self.tagslist = None
662 self.nodetagscache = self.tagslist = None
671
663
672 cache = tagscache()
664 cache = tagscache()
673 cache.tags, cache.tagtypes = self._findtags()
665 cache.tags, cache.tagtypes = self._findtags()
674
666
675 return cache
667 return cache
676
668
677 def tags(self):
669 def tags(self):
678 '''return a mapping of tag to node'''
670 '''return a mapping of tag to node'''
679 t = {}
671 t = {}
680 if self.changelog.filteredrevs:
672 if self.changelog.filteredrevs:
681 tags, tt = self._findtags()
673 tags, tt = self._findtags()
682 else:
674 else:
683 tags = self._tagscache.tags
675 tags = self._tagscache.tags
684 for k, v in tags.iteritems():
676 for k, v in tags.iteritems():
685 try:
677 try:
686 # ignore tags to unknown nodes
678 # ignore tags to unknown nodes
687 self.changelog.rev(v)
679 self.changelog.rev(v)
688 t[k] = v
680 t[k] = v
689 except (error.LookupError, ValueError):
681 except (error.LookupError, ValueError):
690 pass
682 pass
691 return t
683 return t
692
684
693 def _findtags(self):
685 def _findtags(self):
694 '''Do the hard work of finding tags. Return a pair of dicts
686 '''Do the hard work of finding tags. Return a pair of dicts
695 (tags, tagtypes) where tags maps tag name to node, and tagtypes
687 (tags, tagtypes) where tags maps tag name to node, and tagtypes
696 maps tag name to a string like \'global\' or \'local\'.
688 maps tag name to a string like \'global\' or \'local\'.
697 Subclasses or extensions are free to add their own tags, but
689 Subclasses or extensions are free to add their own tags, but
698 should be aware that the returned dicts will be retained for the
690 should be aware that the returned dicts will be retained for the
699 duration of the localrepo object.'''
691 duration of the localrepo object.'''
700
692
701 # XXX what tagtype should subclasses/extensions use? Currently
693 # XXX what tagtype should subclasses/extensions use? Currently
702 # mq and bookmarks add tags, but do not set the tagtype at all.
694 # mq and bookmarks add tags, but do not set the tagtype at all.
703 # Should each extension invent its own tag type? Should there
695 # Should each extension invent its own tag type? Should there
704 # be one tagtype for all such "virtual" tags? Or is the status
696 # be one tagtype for all such "virtual" tags? Or is the status
705 # quo fine?
697 # quo fine?
706
698
707
699
708 # map tag name to (node, hist)
700 # map tag name to (node, hist)
709 alltags = tagsmod.findglobaltags(self.ui, self)
701 alltags = tagsmod.findglobaltags(self.ui, self)
710 # map tag name to tag type
702 # map tag name to tag type
711 tagtypes = dict((tag, 'global') for tag in alltags)
703 tagtypes = dict((tag, 'global') for tag in alltags)
712
704
713 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
705 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
714
706
715 # Build the return dicts. Have to re-encode tag names because
707 # Build the return dicts. Have to re-encode tag names because
716 # the tags module always uses UTF-8 (in order not to lose info
708 # the tags module always uses UTF-8 (in order not to lose info
717 # writing to the cache), but the rest of Mercurial wants them in
709 # writing to the cache), but the rest of Mercurial wants them in
718 # local encoding.
710 # local encoding.
719 tags = {}
711 tags = {}
720 for (name, (node, hist)) in alltags.iteritems():
712 for (name, (node, hist)) in alltags.iteritems():
721 if node != nullid:
713 if node != nullid:
722 tags[encoding.tolocal(name)] = node
714 tags[encoding.tolocal(name)] = node
723 tags['tip'] = self.changelog.tip()
715 tags['tip'] = self.changelog.tip()
724 tagtypes = dict([(encoding.tolocal(name), value)
716 tagtypes = dict([(encoding.tolocal(name), value)
725 for (name, value) in tagtypes.iteritems()])
717 for (name, value) in tagtypes.iteritems()])
726 return (tags, tagtypes)
718 return (tags, tagtypes)
727
719
728 def tagtype(self, tagname):
720 def tagtype(self, tagname):
729 '''
721 '''
730 return the type of the given tag. result can be:
722 return the type of the given tag. result can be:
731
723
732 'local' : a local tag
724 'local' : a local tag
733 'global' : a global tag
725 'global' : a global tag
734 None : tag does not exist
726 None : tag does not exist
735 '''
727 '''
736
728
737 return self._tagscache.tagtypes.get(tagname)
729 return self._tagscache.tagtypes.get(tagname)
738
730
739 def tagslist(self):
731 def tagslist(self):
740 '''return a list of tags ordered by revision'''
732 '''return a list of tags ordered by revision'''
741 if not self._tagscache.tagslist:
733 if not self._tagscache.tagslist:
742 l = []
734 l = []
743 for t, n in self.tags().iteritems():
735 for t, n in self.tags().iteritems():
744 l.append((self.changelog.rev(n), t, n))
736 l.append((self.changelog.rev(n), t, n))
745 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
737 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
746
738
747 return self._tagscache.tagslist
739 return self._tagscache.tagslist
748
740
749 def nodetags(self, node):
741 def nodetags(self, node):
750 '''return the tags associated with a node'''
742 '''return the tags associated with a node'''
751 if not self._tagscache.nodetagscache:
743 if not self._tagscache.nodetagscache:
752 nodetagscache = {}
744 nodetagscache = {}
753 for t, n in self._tagscache.tags.iteritems():
745 for t, n in self._tagscache.tags.iteritems():
754 nodetagscache.setdefault(n, []).append(t)
746 nodetagscache.setdefault(n, []).append(t)
755 for tags in nodetagscache.itervalues():
747 for tags in nodetagscache.itervalues():
756 tags.sort()
748 tags.sort()
757 self._tagscache.nodetagscache = nodetagscache
749 self._tagscache.nodetagscache = nodetagscache
758 return self._tagscache.nodetagscache.get(node, [])
750 return self._tagscache.nodetagscache.get(node, [])
759
751
760 def nodebookmarks(self, node):
752 def nodebookmarks(self, node):
761 """return the list of bookmarks pointing to the specified node"""
753 """return the list of bookmarks pointing to the specified node"""
762 marks = []
754 marks = []
763 for bookmark, n in self._bookmarks.iteritems():
755 for bookmark, n in self._bookmarks.iteritems():
764 if n == node:
756 if n == node:
765 marks.append(bookmark)
757 marks.append(bookmark)
766 return sorted(marks)
758 return sorted(marks)
767
759
768 def branchmap(self):
760 def branchmap(self):
769 '''returns a dictionary {branch: [branchheads]} with branchheads
761 '''returns a dictionary {branch: [branchheads]} with branchheads
770 ordered by increasing revision number'''
762 ordered by increasing revision number'''
771 branchmap.updatecache(self)
763 branchmap.updatecache(self)
772 return self._branchcaches[self.filtername]
764 return self._branchcaches[self.filtername]
773
765
774 @unfilteredmethod
766 @unfilteredmethod
775 def revbranchcache(self):
767 def revbranchcache(self):
776 if not self._revbranchcache:
768 if not self._revbranchcache:
777 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
769 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
778 return self._revbranchcache
770 return self._revbranchcache
779
771
780 def branchtip(self, branch, ignoremissing=False):
772 def branchtip(self, branch, ignoremissing=False):
781 '''return the tip node for a given branch
773 '''return the tip node for a given branch
782
774
783 If ignoremissing is True, then this method will not raise an error.
775 If ignoremissing is True, then this method will not raise an error.
784 This is helpful for callers that only expect None for a missing branch
776 This is helpful for callers that only expect None for a missing branch
785 (e.g. namespace).
777 (e.g. namespace).
786
778
787 '''
779 '''
788 try:
780 try:
789 return self.branchmap().branchtip(branch)
781 return self.branchmap().branchtip(branch)
790 except KeyError:
782 except KeyError:
791 if not ignoremissing:
783 if not ignoremissing:
792 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
784 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
793 else:
785 else:
794 pass
786 pass
795
787
796 def lookup(self, key):
788 def lookup(self, key):
797 return self[key].node()
789 return self[key].node()
798
790
799 def lookupbranch(self, key, remote=None):
791 def lookupbranch(self, key, remote=None):
800 repo = remote or self
792 repo = remote or self
801 if key in repo.branchmap():
793 if key in repo.branchmap():
802 return key
794 return key
803
795
804 repo = (remote and remote.local()) and remote or self
796 repo = (remote and remote.local()) and remote or self
805 return repo[key].branch()
797 return repo[key].branch()
806
798
807 def known(self, nodes):
799 def known(self, nodes):
808 cl = self.changelog
800 cl = self.changelog
809 nm = cl.nodemap
801 nm = cl.nodemap
810 filtered = cl.filteredrevs
802 filtered = cl.filteredrevs
811 result = []
803 result = []
812 for n in nodes:
804 for n in nodes:
813 r = nm.get(n)
805 r = nm.get(n)
814 resp = not (r is None or r in filtered)
806 resp = not (r is None or r in filtered)
815 result.append(resp)
807 result.append(resp)
816 return result
808 return result
817
809
818 def local(self):
810 def local(self):
819 return self
811 return self
820
812
821 def publishing(self):
813 def publishing(self):
822 # it's safe (and desirable) to trust the publish flag unconditionally
814 # it's safe (and desirable) to trust the publish flag unconditionally
823 # so that we don't finalize changes shared between users via ssh or nfs
815 # so that we don't finalize changes shared between users via ssh or nfs
824 return self.ui.configbool('phases', 'publish', True, untrusted=True)
816 return self.ui.configbool('phases', 'publish', True, untrusted=True)
825
817
826 def cancopy(self):
818 def cancopy(self):
827 # so statichttprepo's override of local() works
819 # so statichttprepo's override of local() works
828 if not self.local():
820 if not self.local():
829 return False
821 return False
830 if not self.publishing():
822 if not self.publishing():
831 return True
823 return True
832 # if publishing we can't copy if there is filtered content
824 # if publishing we can't copy if there is filtered content
833 return not self.filtered('visible').changelog.filteredrevs
825 return not self.filtered('visible').changelog.filteredrevs
834
826
835 def shared(self):
827 def shared(self):
836 '''the type of shared repository (None if not shared)'''
828 '''the type of shared repository (None if not shared)'''
837 if self.sharedpath != self.path:
829 if self.sharedpath != self.path:
838 return 'store'
830 return 'store'
839 return None
831 return None
840
832
841 def wjoin(self, f, *insidef):
833 def wjoin(self, f, *insidef):
842 return self.vfs.reljoin(self.root, f, *insidef)
834 return self.vfs.reljoin(self.root, f, *insidef)
843
835
844 def file(self, f):
836 def file(self, f):
845 if f[0] == '/':
837 if f[0] == '/':
846 f = f[1:]
838 f = f[1:]
847 return filelog.filelog(self.svfs, f)
839 return filelog.filelog(self.svfs, f)
848
840
849 def changectx(self, changeid):
841 def changectx(self, changeid):
850 return self[changeid]
842 return self[changeid]
851
843
852 def setparents(self, p1, p2=nullid):
844 def setparents(self, p1, p2=nullid):
853 with self.dirstate.parentchange():
845 with self.dirstate.parentchange():
854 copies = self.dirstate.setparents(p1, p2)
846 copies = self.dirstate.setparents(p1, p2)
855 pctx = self[p1]
847 pctx = self[p1]
856 if copies:
848 if copies:
857 # Adjust copy records, the dirstate cannot do it, it
849 # Adjust copy records, the dirstate cannot do it, it
858 # requires access to parents manifests. Preserve them
850 # requires access to parents manifests. Preserve them
859 # only for entries added to first parent.
851 # only for entries added to first parent.
860 for f in copies:
852 for f in copies:
861 if f not in pctx and copies[f] in pctx:
853 if f not in pctx and copies[f] in pctx:
862 self.dirstate.copy(copies[f], f)
854 self.dirstate.copy(copies[f], f)
863 if p2 == nullid:
855 if p2 == nullid:
864 for f, s in sorted(self.dirstate.copies().items()):
856 for f, s in sorted(self.dirstate.copies().items()):
865 if f not in pctx and s not in pctx:
857 if f not in pctx and s not in pctx:
866 self.dirstate.copy(None, f)
858 self.dirstate.copy(None, f)
867
859
868 def filectx(self, path, changeid=None, fileid=None):
860 def filectx(self, path, changeid=None, fileid=None):
869 """changeid can be a changeset revision, node, or tag.
861 """changeid can be a changeset revision, node, or tag.
870 fileid can be a file revision or node."""
862 fileid can be a file revision or node."""
871 return context.filectx(self, path, changeid, fileid)
863 return context.filectx(self, path, changeid, fileid)
872
864
873 def getcwd(self):
865 def getcwd(self):
874 return self.dirstate.getcwd()
866 return self.dirstate.getcwd()
875
867
876 def pathto(self, f, cwd=None):
868 def pathto(self, f, cwd=None):
877 return self.dirstate.pathto(f, cwd)
869 return self.dirstate.pathto(f, cwd)
878
870
879 def _loadfilter(self, filter):
871 def _loadfilter(self, filter):
880 if filter not in self.filterpats:
872 if filter not in self.filterpats:
881 l = []
873 l = []
882 for pat, cmd in self.ui.configitems(filter):
874 for pat, cmd in self.ui.configitems(filter):
883 if cmd == '!':
875 if cmd == '!':
884 continue
876 continue
885 mf = matchmod.match(self.root, '', [pat])
877 mf = matchmod.match(self.root, '', [pat])
886 fn = None
878 fn = None
887 params = cmd
879 params = cmd
888 for name, filterfn in self._datafilters.iteritems():
880 for name, filterfn in self._datafilters.iteritems():
889 if cmd.startswith(name):
881 if cmd.startswith(name):
890 fn = filterfn
882 fn = filterfn
891 params = cmd[len(name):].lstrip()
883 params = cmd[len(name):].lstrip()
892 break
884 break
893 if not fn:
885 if not fn:
894 fn = lambda s, c, **kwargs: util.filter(s, c)
886 fn = lambda s, c, **kwargs: util.filter(s, c)
895 # Wrap old filters not supporting keyword arguments
887 # Wrap old filters not supporting keyword arguments
896 if not inspect.getargspec(fn)[2]:
888 if not inspect.getargspec(fn)[2]:
897 oldfn = fn
889 oldfn = fn
898 fn = lambda s, c, **kwargs: oldfn(s, c)
890 fn = lambda s, c, **kwargs: oldfn(s, c)
899 l.append((mf, fn, params))
891 l.append((mf, fn, params))
900 self.filterpats[filter] = l
892 self.filterpats[filter] = l
901 return self.filterpats[filter]
893 return self.filterpats[filter]
902
894
903 def _filter(self, filterpats, filename, data):
895 def _filter(self, filterpats, filename, data):
904 for mf, fn, cmd in filterpats:
896 for mf, fn, cmd in filterpats:
905 if mf(filename):
897 if mf(filename):
906 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
898 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
907 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
899 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
908 break
900 break
909
901
910 return data
902 return data
911
903
912 @unfilteredpropertycache
904 @unfilteredpropertycache
913 def _encodefilterpats(self):
905 def _encodefilterpats(self):
914 return self._loadfilter('encode')
906 return self._loadfilter('encode')
915
907
916 @unfilteredpropertycache
908 @unfilteredpropertycache
917 def _decodefilterpats(self):
909 def _decodefilterpats(self):
918 return self._loadfilter('decode')
910 return self._loadfilter('decode')
919
911
920 def adddatafilter(self, name, filter):
912 def adddatafilter(self, name, filter):
921 self._datafilters[name] = filter
913 self._datafilters[name] = filter
922
914
923 def wread(self, filename):
915 def wread(self, filename):
924 if self.wvfs.islink(filename):
916 if self.wvfs.islink(filename):
925 data = self.wvfs.readlink(filename)
917 data = self.wvfs.readlink(filename)
926 else:
918 else:
927 data = self.wvfs.read(filename)
919 data = self.wvfs.read(filename)
928 return self._filter(self._encodefilterpats, filename, data)
920 return self._filter(self._encodefilterpats, filename, data)
929
921
930 def wwrite(self, filename, data, flags, backgroundclose=False):
922 def wwrite(self, filename, data, flags, backgroundclose=False):
931 """write ``data`` into ``filename`` in the working directory
923 """write ``data`` into ``filename`` in the working directory
932
924
933 This returns length of written (maybe decoded) data.
925 This returns length of written (maybe decoded) data.
934 """
926 """
935 data = self._filter(self._decodefilterpats, filename, data)
927 data = self._filter(self._decodefilterpats, filename, data)
936 if 'l' in flags:
928 if 'l' in flags:
937 self.wvfs.symlink(data, filename)
929 self.wvfs.symlink(data, filename)
938 else:
930 else:
939 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
931 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
940 if 'x' in flags:
932 if 'x' in flags:
941 self.wvfs.setflags(filename, False, True)
933 self.wvfs.setflags(filename, False, True)
942 return len(data)
934 return len(data)
943
935
944 def wwritedata(self, filename, data):
936 def wwritedata(self, filename, data):
945 return self._filter(self._decodefilterpats, filename, data)
937 return self._filter(self._decodefilterpats, filename, data)
946
938
947 def currenttransaction(self):
939 def currenttransaction(self):
948 """return the current transaction or None if non exists"""
940 """return the current transaction or None if non exists"""
949 if self._transref:
941 if self._transref:
950 tr = self._transref()
942 tr = self._transref()
951 else:
943 else:
952 tr = None
944 tr = None
953
945
954 if tr and tr.running():
946 if tr and tr.running():
955 return tr
947 return tr
956 return None
948 return None
957
949
958 def transaction(self, desc, report=None):
950 def transaction(self, desc, report=None):
959 if (self.ui.configbool('devel', 'all-warnings')
951 if (self.ui.configbool('devel', 'all-warnings')
960 or self.ui.configbool('devel', 'check-locks')):
952 or self.ui.configbool('devel', 'check-locks')):
961 if self._currentlock(self._lockref) is None:
953 if self._currentlock(self._lockref) is None:
962 raise error.ProgrammingError('transaction requires locking')
954 raise error.ProgrammingError('transaction requires locking')
963 tr = self.currenttransaction()
955 tr = self.currenttransaction()
964 if tr is not None:
956 if tr is not None:
965 return tr.nest()
957 return tr.nest()
966
958
967 # abort here if the journal already exists
959 # abort here if the journal already exists
968 if self.svfs.exists("journal"):
960 if self.svfs.exists("journal"):
969 raise error.RepoError(
961 raise error.RepoError(
970 _("abandoned transaction found"),
962 _("abandoned transaction found"),
971 hint=_("run 'hg recover' to clean up transaction"))
963 hint=_("run 'hg recover' to clean up transaction"))
972
964
973 idbase = "%.40f#%f" % (random.random(), time.time())
965 idbase = "%.40f#%f" % (random.random(), time.time())
974 ha = hex(hashlib.sha1(idbase).digest())
966 ha = hex(hashlib.sha1(idbase).digest())
975 txnid = 'TXN:' + ha
967 txnid = 'TXN:' + ha
976 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
968 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
977
969
978 self._writejournal(desc)
970 self._writejournal(desc)
979 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
971 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
980 if report:
972 if report:
981 rp = report
973 rp = report
982 else:
974 else:
983 rp = self.ui.warn
975 rp = self.ui.warn
984 vfsmap = {'plain': self.vfs} # root of .hg/
976 vfsmap = {'plain': self.vfs} # root of .hg/
985 # we must avoid cyclic reference between repo and transaction.
977 # we must avoid cyclic reference between repo and transaction.
986 reporef = weakref.ref(self)
978 reporef = weakref.ref(self)
987 # Code to track tag movement
979 # Code to track tag movement
988 #
980 #
989 # Since tags are all handled as file content, it is actually quite hard
981 # Since tags are all handled as file content, it is actually quite hard
990 # to track these movement from a code perspective. So we fallback to a
982 # to track these movement from a code perspective. So we fallback to a
991 # tracking at the repository level. One could envision to track changes
983 # tracking at the repository level. One could envision to track changes
992 # to the '.hgtags' file through changegroup apply but that fails to
984 # to the '.hgtags' file through changegroup apply but that fails to
993 # cope with case where transaction expose new heads without changegroup
985 # cope with case where transaction expose new heads without changegroup
994 # being involved (eg: phase movement).
986 # being involved (eg: phase movement).
995 #
987 #
996 # For now, We gate the feature behind a flag since this likely comes
988 # For now, We gate the feature behind a flag since this likely comes
997 # with performance impacts. The current code run more often than needed
989 # with performance impacts. The current code run more often than needed
998 # and do not use caches as much as it could. The current focus is on
990 # and do not use caches as much as it could. The current focus is on
999 # the behavior of the feature so we disable it by default. The flag
991 # the behavior of the feature so we disable it by default. The flag
1000 # will be removed when we are happy with the performance impact.
992 # will be removed when we are happy with the performance impact.
1001 #
993 #
1002 # Once this feature is no longer experimental move the following
994 # Once this feature is no longer experimental move the following
1003 # documentation to the appropriate help section:
995 # documentation to the appropriate help section:
1004 #
996 #
1005 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
997 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1006 # tags (new or changed or deleted tags). In addition the details of
998 # tags (new or changed or deleted tags). In addition the details of
1007 # these changes are made available in a file at:
999 # these changes are made available in a file at:
1008 # ``REPOROOT/.hg/changes/tags.changes``.
1000 # ``REPOROOT/.hg/changes/tags.changes``.
1009 # Make sure you check for HG_TAG_MOVED before reading that file as it
1001 # Make sure you check for HG_TAG_MOVED before reading that file as it
1010 # might exist from a previous transaction even if no tag were touched
1002 # might exist from a previous transaction even if no tag were touched
1011 # in this one. Changes are recorded in a line base format::
1003 # in this one. Changes are recorded in a line base format::
1012 #
1004 #
1013 # <action> <hex-node> <tag-name>\n
1005 # <action> <hex-node> <tag-name>\n
1014 #
1006 #
1015 # Actions are defined as follow:
1007 # Actions are defined as follow:
1016 # "-R": tag is removed,
1008 # "-R": tag is removed,
1017 # "+A": tag is added,
1009 # "+A": tag is added,
1018 # "-M": tag is moved (old value),
1010 # "-M": tag is moved (old value),
1019 # "+M": tag is moved (new value),
1011 # "+M": tag is moved (new value),
1020 tracktags = lambda x: None
1012 tracktags = lambda x: None
1021 # experimental config: experimental.hook-track-tags
1013 # experimental config: experimental.hook-track-tags
1022 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1014 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1023 False)
1015 False)
1024 if desc != 'strip' and shouldtracktags:
1016 if desc != 'strip' and shouldtracktags:
1025 oldheads = self.changelog.headrevs()
1017 oldheads = self.changelog.headrevs()
1026 def tracktags(tr2):
1018 def tracktags(tr2):
1027 repo = reporef()
1019 repo = reporef()
1028 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1020 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1029 newheads = repo.changelog.headrevs()
1021 newheads = repo.changelog.headrevs()
1030 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1022 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1031 # notes: we compare lists here.
1023 # notes: we compare lists here.
1032 # As we do it only once buiding set would not be cheaper
1024 # As we do it only once buiding set would not be cheaper
1033 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1025 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1034 if changes:
1026 if changes:
1035 tr2.hookargs['tag_moved'] = '1'
1027 tr2.hookargs['tag_moved'] = '1'
1036 with repo.vfs('changes/tags.changes', 'w',
1028 with repo.vfs('changes/tags.changes', 'w',
1037 atomictemp=True) as changesfile:
1029 atomictemp=True) as changesfile:
1038 # note: we do not register the file to the transaction
1030 # note: we do not register the file to the transaction
1039 # because we needs it to still exist on the transaction
1031 # because we needs it to still exist on the transaction
1040 # is close (for txnclose hooks)
1032 # is close (for txnclose hooks)
1041 tagsmod.writediff(changesfile, changes)
1033 tagsmod.writediff(changesfile, changes)
1042 def validate(tr2):
1034 def validate(tr2):
1043 """will run pre-closing hooks"""
1035 """will run pre-closing hooks"""
1044 # XXX the transaction API is a bit lacking here so we take a hacky
1036 # XXX the transaction API is a bit lacking here so we take a hacky
1045 # path for now
1037 # path for now
1046 #
1038 #
1047 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1039 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1048 # dict is copied before these run. In addition we needs the data
1040 # dict is copied before these run. In addition we needs the data
1049 # available to in memory hooks too.
1041 # available to in memory hooks too.
1050 #
1042 #
1051 # Moreover, we also need to make sure this runs before txnclose
1043 # Moreover, we also need to make sure this runs before txnclose
1052 # hooks and there is no "pending" mechanism that would execute
1044 # hooks and there is no "pending" mechanism that would execute
1053 # logic only if hooks are about to run.
1045 # logic only if hooks are about to run.
1054 #
1046 #
1055 # Fixing this limitation of the transaction is also needed to track
1047 # Fixing this limitation of the transaction is also needed to track
1056 # other families of changes (bookmarks, phases, obsolescence).
1048 # other families of changes (bookmarks, phases, obsolescence).
1057 #
1049 #
1058 # This will have to be fixed before we remove the experimental
1050 # This will have to be fixed before we remove the experimental
1059 # gating.
1051 # gating.
1060 tracktags(tr2)
1052 tracktags(tr2)
1061 reporef().hook('pretxnclose', throw=True,
1053 reporef().hook('pretxnclose', throw=True,
1062 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1054 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1063 def releasefn(tr, success):
1055 def releasefn(tr, success):
1064 repo = reporef()
1056 repo = reporef()
1065 if success:
1057 if success:
1066 # this should be explicitly invoked here, because
1058 # this should be explicitly invoked here, because
1067 # in-memory changes aren't written out at closing
1059 # in-memory changes aren't written out at closing
1068 # transaction, if tr.addfilegenerator (via
1060 # transaction, if tr.addfilegenerator (via
1069 # dirstate.write or so) isn't invoked while
1061 # dirstate.write or so) isn't invoked while
1070 # transaction running
1062 # transaction running
1071 repo.dirstate.write(None)
1063 repo.dirstate.write(None)
1072 else:
1064 else:
1073 # discard all changes (including ones already written
1065 # discard all changes (including ones already written
1074 # out) in this transaction
1066 # out) in this transaction
1075 repo.dirstate.restorebackup(None, prefix='journal.')
1067 repo.dirstate.restorebackup(None, prefix='journal.')
1076
1068
1077 repo.invalidate(clearfilecache=True)
1069 repo.invalidate(clearfilecache=True)
1078
1070
1079 tr = transaction.transaction(rp, self.svfs, vfsmap,
1071 tr = transaction.transaction(rp, self.svfs, vfsmap,
1080 "journal",
1072 "journal",
1081 "undo",
1073 "undo",
1082 aftertrans(renames),
1074 aftertrans(renames),
1083 self.store.createmode,
1075 self.store.createmode,
1084 validator=validate,
1076 validator=validate,
1085 releasefn=releasefn)
1077 releasefn=releasefn)
1086 tr.changes['revs'] = set()
1078 tr.changes['revs'] = set()
1087
1079
1088 tr.hookargs['txnid'] = txnid
1080 tr.hookargs['txnid'] = txnid
1089 # note: writing the fncache only during finalize mean that the file is
1081 # note: writing the fncache only during finalize mean that the file is
1090 # outdated when running hooks. As fncache is used for streaming clone,
1082 # outdated when running hooks. As fncache is used for streaming clone,
1091 # this is not expected to break anything that happen during the hooks.
1083 # this is not expected to break anything that happen during the hooks.
1092 tr.addfinalize('flush-fncache', self.store.write)
1084 tr.addfinalize('flush-fncache', self.store.write)
1093 def txnclosehook(tr2):
1085 def txnclosehook(tr2):
1094 """To be run if transaction is successful, will schedule a hook run
1086 """To be run if transaction is successful, will schedule a hook run
1095 """
1087 """
1096 # Don't reference tr2 in hook() so we don't hold a reference.
1088 # Don't reference tr2 in hook() so we don't hold a reference.
1097 # This reduces memory consumption when there are multiple
1089 # This reduces memory consumption when there are multiple
1098 # transactions per lock. This can likely go away if issue5045
1090 # transactions per lock. This can likely go away if issue5045
1099 # fixes the function accumulation.
1091 # fixes the function accumulation.
1100 hookargs = tr2.hookargs
1092 hookargs = tr2.hookargs
1101
1093
1102 def hook():
1094 def hook():
1103 reporef().hook('txnclose', throw=False, txnname=desc,
1095 reporef().hook('txnclose', throw=False, txnname=desc,
1104 **pycompat.strkwargs(hookargs))
1096 **pycompat.strkwargs(hookargs))
1105 reporef()._afterlock(hook)
1097 reporef()._afterlock(hook)
1106 tr.addfinalize('txnclose-hook', txnclosehook)
1098 tr.addfinalize('txnclose-hook', txnclosehook)
1107 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1099 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1108 def txnaborthook(tr2):
1100 def txnaborthook(tr2):
1109 """To be run if transaction is aborted
1101 """To be run if transaction is aborted
1110 """
1102 """
1111 reporef().hook('txnabort', throw=False, txnname=desc,
1103 reporef().hook('txnabort', throw=False, txnname=desc,
1112 **tr2.hookargs)
1104 **tr2.hookargs)
1113 tr.addabort('txnabort-hook', txnaborthook)
1105 tr.addabort('txnabort-hook', txnaborthook)
1114 # avoid eager cache invalidation. in-memory data should be identical
1106 # avoid eager cache invalidation. in-memory data should be identical
1115 # to stored data if transaction has no error.
1107 # to stored data if transaction has no error.
1116 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1108 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1117 self._transref = weakref.ref(tr)
1109 self._transref = weakref.ref(tr)
1118 return tr
1110 return tr
1119
1111
1120 def _journalfiles(self):
1112 def _journalfiles(self):
1121 return ((self.svfs, 'journal'),
1113 return ((self.svfs, 'journal'),
1122 (self.vfs, 'journal.dirstate'),
1114 (self.vfs, 'journal.dirstate'),
1123 (self.vfs, 'journal.branch'),
1115 (self.vfs, 'journal.branch'),
1124 (self.vfs, 'journal.desc'),
1116 (self.vfs, 'journal.desc'),
1125 (self.vfs, 'journal.bookmarks'),
1117 (self.vfs, 'journal.bookmarks'),
1126 (self.svfs, 'journal.phaseroots'))
1118 (self.svfs, 'journal.phaseroots'))
1127
1119
1128 def undofiles(self):
1120 def undofiles(self):
1129 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1121 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1130
1122
1131 def _writejournal(self, desc):
1123 def _writejournal(self, desc):
1132 self.dirstate.savebackup(None, prefix='journal.')
1124 self.dirstate.savebackup(None, prefix='journal.')
1133 self.vfs.write("journal.branch",
1125 self.vfs.write("journal.branch",
1134 encoding.fromlocal(self.dirstate.branch()))
1126 encoding.fromlocal(self.dirstate.branch()))
1135 self.vfs.write("journal.desc",
1127 self.vfs.write("journal.desc",
1136 "%d\n%s\n" % (len(self), desc))
1128 "%d\n%s\n" % (len(self), desc))
1137 self.vfs.write("journal.bookmarks",
1129 self.vfs.write("journal.bookmarks",
1138 self.vfs.tryread("bookmarks"))
1130 self.vfs.tryread("bookmarks"))
1139 self.svfs.write("journal.phaseroots",
1131 self.svfs.write("journal.phaseroots",
1140 self.svfs.tryread("phaseroots"))
1132 self.svfs.tryread("phaseroots"))
1141
1133
1142 def recover(self):
1134 def recover(self):
1143 with self.lock():
1135 with self.lock():
1144 if self.svfs.exists("journal"):
1136 if self.svfs.exists("journal"):
1145 self.ui.status(_("rolling back interrupted transaction\n"))
1137 self.ui.status(_("rolling back interrupted transaction\n"))
1146 vfsmap = {'': self.svfs,
1138 vfsmap = {'': self.svfs,
1147 'plain': self.vfs,}
1139 'plain': self.vfs,}
1148 transaction.rollback(self.svfs, vfsmap, "journal",
1140 transaction.rollback(self.svfs, vfsmap, "journal",
1149 self.ui.warn)
1141 self.ui.warn)
1150 self.invalidate()
1142 self.invalidate()
1151 return True
1143 return True
1152 else:
1144 else:
1153 self.ui.warn(_("no interrupted transaction available\n"))
1145 self.ui.warn(_("no interrupted transaction available\n"))
1154 return False
1146 return False
1155
1147
1156 def rollback(self, dryrun=False, force=False):
1148 def rollback(self, dryrun=False, force=False):
1157 wlock = lock = dsguard = None
1149 wlock = lock = dsguard = None
1158 try:
1150 try:
1159 wlock = self.wlock()
1151 wlock = self.wlock()
1160 lock = self.lock()
1152 lock = self.lock()
1161 if self.svfs.exists("undo"):
1153 if self.svfs.exists("undo"):
1162 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1154 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1163
1155
1164 return self._rollback(dryrun, force, dsguard)
1156 return self._rollback(dryrun, force, dsguard)
1165 else:
1157 else:
1166 self.ui.warn(_("no rollback information available\n"))
1158 self.ui.warn(_("no rollback information available\n"))
1167 return 1
1159 return 1
1168 finally:
1160 finally:
1169 release(dsguard, lock, wlock)
1161 release(dsguard, lock, wlock)
1170
1162
1171 @unfilteredmethod # Until we get smarter cache management
1163 @unfilteredmethod # Until we get smarter cache management
1172 def _rollback(self, dryrun, force, dsguard):
1164 def _rollback(self, dryrun, force, dsguard):
1173 ui = self.ui
1165 ui = self.ui
1174 try:
1166 try:
1175 args = self.vfs.read('undo.desc').splitlines()
1167 args = self.vfs.read('undo.desc').splitlines()
1176 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1168 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1177 if len(args) >= 3:
1169 if len(args) >= 3:
1178 detail = args[2]
1170 detail = args[2]
1179 oldtip = oldlen - 1
1171 oldtip = oldlen - 1
1180
1172
1181 if detail and ui.verbose:
1173 if detail and ui.verbose:
1182 msg = (_('repository tip rolled back to revision %s'
1174 msg = (_('repository tip rolled back to revision %s'
1183 ' (undo %s: %s)\n')
1175 ' (undo %s: %s)\n')
1184 % (oldtip, desc, detail))
1176 % (oldtip, desc, detail))
1185 else:
1177 else:
1186 msg = (_('repository tip rolled back to revision %s'
1178 msg = (_('repository tip rolled back to revision %s'
1187 ' (undo %s)\n')
1179 ' (undo %s)\n')
1188 % (oldtip, desc))
1180 % (oldtip, desc))
1189 except IOError:
1181 except IOError:
1190 msg = _('rolling back unknown transaction\n')
1182 msg = _('rolling back unknown transaction\n')
1191 desc = None
1183 desc = None
1192
1184
1193 if not force and self['.'] != self['tip'] and desc == 'commit':
1185 if not force and self['.'] != self['tip'] and desc == 'commit':
1194 raise error.Abort(
1186 raise error.Abort(
1195 _('rollback of last commit while not checked out '
1187 _('rollback of last commit while not checked out '
1196 'may lose data'), hint=_('use -f to force'))
1188 'may lose data'), hint=_('use -f to force'))
1197
1189
1198 ui.status(msg)
1190 ui.status(msg)
1199 if dryrun:
1191 if dryrun:
1200 return 0
1192 return 0
1201
1193
1202 parents = self.dirstate.parents()
1194 parents = self.dirstate.parents()
1203 self.destroying()
1195 self.destroying()
1204 vfsmap = {'plain': self.vfs, '': self.svfs}
1196 vfsmap = {'plain': self.vfs, '': self.svfs}
1205 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1197 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1206 if self.vfs.exists('undo.bookmarks'):
1198 if self.vfs.exists('undo.bookmarks'):
1207 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1199 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1208 if self.svfs.exists('undo.phaseroots'):
1200 if self.svfs.exists('undo.phaseroots'):
1209 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1201 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1210 self.invalidate()
1202 self.invalidate()
1211
1203
1212 parentgone = (parents[0] not in self.changelog.nodemap or
1204 parentgone = (parents[0] not in self.changelog.nodemap or
1213 parents[1] not in self.changelog.nodemap)
1205 parents[1] not in self.changelog.nodemap)
1214 if parentgone:
1206 if parentgone:
1215 # prevent dirstateguard from overwriting already restored one
1207 # prevent dirstateguard from overwriting already restored one
1216 dsguard.close()
1208 dsguard.close()
1217
1209
1218 self.dirstate.restorebackup(None, prefix='undo.')
1210 self.dirstate.restorebackup(None, prefix='undo.')
1219 try:
1211 try:
1220 branch = self.vfs.read('undo.branch')
1212 branch = self.vfs.read('undo.branch')
1221 self.dirstate.setbranch(encoding.tolocal(branch))
1213 self.dirstate.setbranch(encoding.tolocal(branch))
1222 except IOError:
1214 except IOError:
1223 ui.warn(_('named branch could not be reset: '
1215 ui.warn(_('named branch could not be reset: '
1224 'current branch is still \'%s\'\n')
1216 'current branch is still \'%s\'\n')
1225 % self.dirstate.branch())
1217 % self.dirstate.branch())
1226
1218
1227 parents = tuple([p.rev() for p in self[None].parents()])
1219 parents = tuple([p.rev() for p in self[None].parents()])
1228 if len(parents) > 1:
1220 if len(parents) > 1:
1229 ui.status(_('working directory now based on '
1221 ui.status(_('working directory now based on '
1230 'revisions %d and %d\n') % parents)
1222 'revisions %d and %d\n') % parents)
1231 else:
1223 else:
1232 ui.status(_('working directory now based on '
1224 ui.status(_('working directory now based on '
1233 'revision %d\n') % parents)
1225 'revision %d\n') % parents)
1234 mergemod.mergestate.clean(self, self['.'].node())
1226 mergemod.mergestate.clean(self, self['.'].node())
1235
1227
1236 # TODO: if we know which new heads may result from this rollback, pass
1228 # TODO: if we know which new heads may result from this rollback, pass
1237 # them to destroy(), which will prevent the branchhead cache from being
1229 # them to destroy(), which will prevent the branchhead cache from being
1238 # invalidated.
1230 # invalidated.
1239 self.destroyed()
1231 self.destroyed()
1240 return 0
1232 return 0
1241
1233
1242 def _buildcacheupdater(self, newtransaction):
1234 def _buildcacheupdater(self, newtransaction):
1243 """called during transaction to build the callback updating cache
1235 """called during transaction to build the callback updating cache
1244
1236
1245 Lives on the repository to help extension who might want to augment
1237 Lives on the repository to help extension who might want to augment
1246 this logic. For this purpose, the created transaction is passed to the
1238 this logic. For this purpose, the created transaction is passed to the
1247 method.
1239 method.
1248 """
1240 """
1249 # we must avoid cyclic reference between repo and transaction.
1241 # we must avoid cyclic reference between repo and transaction.
1250 reporef = weakref.ref(self)
1242 reporef = weakref.ref(self)
1251 def updater(tr):
1243 def updater(tr):
1252 repo = reporef()
1244 repo = reporef()
1253 repo.updatecaches(tr)
1245 repo.updatecaches(tr)
1254 return updater
1246 return updater
1255
1247
1256 @unfilteredmethod
1248 @unfilteredmethod
1257 def updatecaches(self, tr=None):
1249 def updatecaches(self, tr=None):
1258 """warm appropriate caches
1250 """warm appropriate caches
1259
1251
1260 If this function is called after a transaction closed. The transaction
1252 If this function is called after a transaction closed. The transaction
1261 will be available in the 'tr' argument. This can be used to selectively
1253 will be available in the 'tr' argument. This can be used to selectively
1262 update caches relevant to the changes in that transaction.
1254 update caches relevant to the changes in that transaction.
1263 """
1255 """
1264 if tr is not None and tr.hookargs.get('source') == 'strip':
1256 if tr is not None and tr.hookargs.get('source') == 'strip':
1265 # During strip, many caches are invalid but
1257 # During strip, many caches are invalid but
1266 # later call to `destroyed` will refresh them.
1258 # later call to `destroyed` will refresh them.
1267 return
1259 return
1268
1260
1269 if tr is None or tr.changes['revs']:
1261 if tr is None or tr.changes['revs']:
1270 # updating the unfiltered branchmap should refresh all the others,
1262 # updating the unfiltered branchmap should refresh all the others,
1271 self.ui.debug('updating the branch cache\n')
1263 self.ui.debug('updating the branch cache\n')
1272 branchmap.updatecache(self.filtered('served'))
1264 branchmap.updatecache(self.filtered('served'))
1273
1265
1274 def invalidatecaches(self):
1266 def invalidatecaches(self):
1275
1267
1276 if '_tagscache' in vars(self):
1268 if '_tagscache' in vars(self):
1277 # can't use delattr on proxy
1269 # can't use delattr on proxy
1278 del self.__dict__['_tagscache']
1270 del self.__dict__['_tagscache']
1279
1271
1280 self.unfiltered()._branchcaches.clear()
1272 self.unfiltered()._branchcaches.clear()
1281 self.invalidatevolatilesets()
1273 self.invalidatevolatilesets()
1282
1274
1283 def invalidatevolatilesets(self):
1275 def invalidatevolatilesets(self):
1284 self.filteredrevcache.clear()
1276 self.filteredrevcache.clear()
1285 obsolete.clearobscaches(self)
1277 obsolete.clearobscaches(self)
1286
1278
1287 def invalidatedirstate(self):
1279 def invalidatedirstate(self):
1288 '''Invalidates the dirstate, causing the next call to dirstate
1280 '''Invalidates the dirstate, causing the next call to dirstate
1289 to check if it was modified since the last time it was read,
1281 to check if it was modified since the last time it was read,
1290 rereading it if it has.
1282 rereading it if it has.
1291
1283
1292 This is different to dirstate.invalidate() that it doesn't always
1284 This is different to dirstate.invalidate() that it doesn't always
1293 rereads the dirstate. Use dirstate.invalidate() if you want to
1285 rereads the dirstate. Use dirstate.invalidate() if you want to
1294 explicitly read the dirstate again (i.e. restoring it to a previous
1286 explicitly read the dirstate again (i.e. restoring it to a previous
1295 known good state).'''
1287 known good state).'''
1296 if hasunfilteredcache(self, 'dirstate'):
1288 if hasunfilteredcache(self, 'dirstate'):
1297 for k in self.dirstate._filecache:
1289 for k in self.dirstate._filecache:
1298 try:
1290 try:
1299 delattr(self.dirstate, k)
1291 delattr(self.dirstate, k)
1300 except AttributeError:
1292 except AttributeError:
1301 pass
1293 pass
1302 delattr(self.unfiltered(), 'dirstate')
1294 delattr(self.unfiltered(), 'dirstate')
1303
1295
1304 def invalidate(self, clearfilecache=False):
1296 def invalidate(self, clearfilecache=False):
1305 '''Invalidates both store and non-store parts other than dirstate
1297 '''Invalidates both store and non-store parts other than dirstate
1306
1298
1307 If a transaction is running, invalidation of store is omitted,
1299 If a transaction is running, invalidation of store is omitted,
1308 because discarding in-memory changes might cause inconsistency
1300 because discarding in-memory changes might cause inconsistency
1309 (e.g. incomplete fncache causes unintentional failure, but
1301 (e.g. incomplete fncache causes unintentional failure, but
1310 redundant one doesn't).
1302 redundant one doesn't).
1311 '''
1303 '''
1312 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1304 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1313 for k in list(self._filecache.keys()):
1305 for k in list(self._filecache.keys()):
1314 # dirstate is invalidated separately in invalidatedirstate()
1306 # dirstate is invalidated separately in invalidatedirstate()
1315 if k == 'dirstate':
1307 if k == 'dirstate':
1316 continue
1308 continue
1317
1309
1318 if clearfilecache:
1310 if clearfilecache:
1319 del self._filecache[k]
1311 del self._filecache[k]
1320 try:
1312 try:
1321 delattr(unfiltered, k)
1313 delattr(unfiltered, k)
1322 except AttributeError:
1314 except AttributeError:
1323 pass
1315 pass
1324 self.invalidatecaches()
1316 self.invalidatecaches()
1325 if not self.currenttransaction():
1317 if not self.currenttransaction():
1326 # TODO: Changing contents of store outside transaction
1318 # TODO: Changing contents of store outside transaction
1327 # causes inconsistency. We should make in-memory store
1319 # causes inconsistency. We should make in-memory store
1328 # changes detectable, and abort if changed.
1320 # changes detectable, and abort if changed.
1329 self.store.invalidatecaches()
1321 self.store.invalidatecaches()
1330
1322
1331 def invalidateall(self):
1323 def invalidateall(self):
1332 '''Fully invalidates both store and non-store parts, causing the
1324 '''Fully invalidates both store and non-store parts, causing the
1333 subsequent operation to reread any outside changes.'''
1325 subsequent operation to reread any outside changes.'''
1334 # extension should hook this to invalidate its caches
1326 # extension should hook this to invalidate its caches
1335 self.invalidate()
1327 self.invalidate()
1336 self.invalidatedirstate()
1328 self.invalidatedirstate()
1337
1329
1338 @unfilteredmethod
1330 @unfilteredmethod
1339 def _refreshfilecachestats(self, tr):
1331 def _refreshfilecachestats(self, tr):
1340 """Reload stats of cached files so that they are flagged as valid"""
1332 """Reload stats of cached files so that they are flagged as valid"""
1341 for k, ce in self._filecache.items():
1333 for k, ce in self._filecache.items():
1342 if k == 'dirstate' or k not in self.__dict__:
1334 if k == 'dirstate' or k not in self.__dict__:
1343 continue
1335 continue
1344 ce.refresh()
1336 ce.refresh()
1345
1337
1346 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1338 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1347 inheritchecker=None, parentenvvar=None):
1339 inheritchecker=None, parentenvvar=None):
1348 parentlock = None
1340 parentlock = None
1349 # the contents of parentenvvar are used by the underlying lock to
1341 # the contents of parentenvvar are used by the underlying lock to
1350 # determine whether it can be inherited
1342 # determine whether it can be inherited
1351 if parentenvvar is not None:
1343 if parentenvvar is not None:
1352 parentlock = encoding.environ.get(parentenvvar)
1344 parentlock = encoding.environ.get(parentenvvar)
1353 try:
1345 try:
1354 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1346 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1355 acquirefn=acquirefn, desc=desc,
1347 acquirefn=acquirefn, desc=desc,
1356 inheritchecker=inheritchecker,
1348 inheritchecker=inheritchecker,
1357 parentlock=parentlock)
1349 parentlock=parentlock)
1358 except error.LockHeld as inst:
1350 except error.LockHeld as inst:
1359 if not wait:
1351 if not wait:
1360 raise
1352 raise
1361 # show more details for new-style locks
1353 # show more details for new-style locks
1362 if ':' in inst.locker:
1354 if ':' in inst.locker:
1363 host, pid = inst.locker.split(":", 1)
1355 host, pid = inst.locker.split(":", 1)
1364 self.ui.warn(
1356 self.ui.warn(
1365 _("waiting for lock on %s held by process %r "
1357 _("waiting for lock on %s held by process %r "
1366 "on host %r\n") % (desc, pid, host))
1358 "on host %r\n") % (desc, pid, host))
1367 else:
1359 else:
1368 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1360 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1369 (desc, inst.locker))
1361 (desc, inst.locker))
1370 # default to 600 seconds timeout
1362 # default to 600 seconds timeout
1371 l = lockmod.lock(vfs, lockname,
1363 l = lockmod.lock(vfs, lockname,
1372 int(self.ui.config("ui", "timeout", "600")),
1364 int(self.ui.config("ui", "timeout", "600")),
1373 releasefn=releasefn, acquirefn=acquirefn,
1365 releasefn=releasefn, acquirefn=acquirefn,
1374 desc=desc)
1366 desc=desc)
1375 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1367 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1376 return l
1368 return l
1377
1369
1378 def _afterlock(self, callback):
1370 def _afterlock(self, callback):
1379 """add a callback to be run when the repository is fully unlocked
1371 """add a callback to be run when the repository is fully unlocked
1380
1372
1381 The callback will be executed when the outermost lock is released
1373 The callback will be executed when the outermost lock is released
1382 (with wlock being higher level than 'lock')."""
1374 (with wlock being higher level than 'lock')."""
1383 for ref in (self._wlockref, self._lockref):
1375 for ref in (self._wlockref, self._lockref):
1384 l = ref and ref()
1376 l = ref and ref()
1385 if l and l.held:
1377 if l and l.held:
1386 l.postrelease.append(callback)
1378 l.postrelease.append(callback)
1387 break
1379 break
1388 else: # no lock have been found.
1380 else: # no lock have been found.
1389 callback()
1381 callback()
1390
1382
1391 def lock(self, wait=True):
1383 def lock(self, wait=True):
1392 '''Lock the repository store (.hg/store) and return a weak reference
1384 '''Lock the repository store (.hg/store) and return a weak reference
1393 to the lock. Use this before modifying the store (e.g. committing or
1385 to the lock. Use this before modifying the store (e.g. committing or
1394 stripping). If you are opening a transaction, get a lock as well.)
1386 stripping). If you are opening a transaction, get a lock as well.)
1395
1387
1396 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1388 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1397 'wlock' first to avoid a dead-lock hazard.'''
1389 'wlock' first to avoid a dead-lock hazard.'''
1398 l = self._currentlock(self._lockref)
1390 l = self._currentlock(self._lockref)
1399 if l is not None:
1391 if l is not None:
1400 l.lock()
1392 l.lock()
1401 return l
1393 return l
1402
1394
1403 l = self._lock(self.svfs, "lock", wait, None,
1395 l = self._lock(self.svfs, "lock", wait, None,
1404 self.invalidate, _('repository %s') % self.origroot)
1396 self.invalidate, _('repository %s') % self.origroot)
1405 self._lockref = weakref.ref(l)
1397 self._lockref = weakref.ref(l)
1406 return l
1398 return l
1407
1399
1408 def _wlockchecktransaction(self):
1400 def _wlockchecktransaction(self):
1409 if self.currenttransaction() is not None:
1401 if self.currenttransaction() is not None:
1410 raise error.LockInheritanceContractViolation(
1402 raise error.LockInheritanceContractViolation(
1411 'wlock cannot be inherited in the middle of a transaction')
1403 'wlock cannot be inherited in the middle of a transaction')
1412
1404
1413 def wlock(self, wait=True):
1405 def wlock(self, wait=True):
1414 '''Lock the non-store parts of the repository (everything under
1406 '''Lock the non-store parts of the repository (everything under
1415 .hg except .hg/store) and return a weak reference to the lock.
1407 .hg except .hg/store) and return a weak reference to the lock.
1416
1408
1417 Use this before modifying files in .hg.
1409 Use this before modifying files in .hg.
1418
1410
1419 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1411 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1420 'wlock' first to avoid a dead-lock hazard.'''
1412 'wlock' first to avoid a dead-lock hazard.'''
1421 l = self._wlockref and self._wlockref()
1413 l = self._wlockref and self._wlockref()
1422 if l is not None and l.held:
1414 if l is not None and l.held:
1423 l.lock()
1415 l.lock()
1424 return l
1416 return l
1425
1417
1426 # We do not need to check for non-waiting lock acquisition. Such
1418 # We do not need to check for non-waiting lock acquisition. Such
1427 # acquisition would not cause dead-lock as they would just fail.
1419 # acquisition would not cause dead-lock as they would just fail.
1428 if wait and (self.ui.configbool('devel', 'all-warnings')
1420 if wait and (self.ui.configbool('devel', 'all-warnings')
1429 or self.ui.configbool('devel', 'check-locks')):
1421 or self.ui.configbool('devel', 'check-locks')):
1430 if self._currentlock(self._lockref) is not None:
1422 if self._currentlock(self._lockref) is not None:
1431 self.ui.develwarn('"wlock" acquired after "lock"')
1423 self.ui.develwarn('"wlock" acquired after "lock"')
1432
1424
1433 def unlock():
1425 def unlock():
1434 if self.dirstate.pendingparentchange():
1426 if self.dirstate.pendingparentchange():
1435 self.dirstate.invalidate()
1427 self.dirstate.invalidate()
1436 else:
1428 else:
1437 self.dirstate.write(None)
1429 self.dirstate.write(None)
1438
1430
1439 self._filecache['dirstate'].refresh()
1431 self._filecache['dirstate'].refresh()
1440
1432
1441 l = self._lock(self.vfs, "wlock", wait, unlock,
1433 l = self._lock(self.vfs, "wlock", wait, unlock,
1442 self.invalidatedirstate, _('working directory of %s') %
1434 self.invalidatedirstate, _('working directory of %s') %
1443 self.origroot,
1435 self.origroot,
1444 inheritchecker=self._wlockchecktransaction,
1436 inheritchecker=self._wlockchecktransaction,
1445 parentenvvar='HG_WLOCK_LOCKER')
1437 parentenvvar='HG_WLOCK_LOCKER')
1446 self._wlockref = weakref.ref(l)
1438 self._wlockref = weakref.ref(l)
1447 return l
1439 return l
1448
1440
1449 def _currentlock(self, lockref):
1441 def _currentlock(self, lockref):
1450 """Returns the lock if it's held, or None if it's not."""
1442 """Returns the lock if it's held, or None if it's not."""
1451 if lockref is None:
1443 if lockref is None:
1452 return None
1444 return None
1453 l = lockref()
1445 l = lockref()
1454 if l is None or not l.held:
1446 if l is None or not l.held:
1455 return None
1447 return None
1456 return l
1448 return l
1457
1449
1458 def currentwlock(self):
1450 def currentwlock(self):
1459 """Returns the wlock if it's held, or None if it's not."""
1451 """Returns the wlock if it's held, or None if it's not."""
1460 return self._currentlock(self._wlockref)
1452 return self._currentlock(self._wlockref)
1461
1453
1462 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1454 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1463 """
1455 """
1464 commit an individual file as part of a larger transaction
1456 commit an individual file as part of a larger transaction
1465 """
1457 """
1466
1458
1467 fname = fctx.path()
1459 fname = fctx.path()
1468 fparent1 = manifest1.get(fname, nullid)
1460 fparent1 = manifest1.get(fname, nullid)
1469 fparent2 = manifest2.get(fname, nullid)
1461 fparent2 = manifest2.get(fname, nullid)
1470 if isinstance(fctx, context.filectx):
1462 if isinstance(fctx, context.filectx):
1471 node = fctx.filenode()
1463 node = fctx.filenode()
1472 if node in [fparent1, fparent2]:
1464 if node in [fparent1, fparent2]:
1473 self.ui.debug('reusing %s filelog entry\n' % fname)
1465 self.ui.debug('reusing %s filelog entry\n' % fname)
1474 if manifest1.flags(fname) != fctx.flags():
1466 if manifest1.flags(fname) != fctx.flags():
1475 changelist.append(fname)
1467 changelist.append(fname)
1476 return node
1468 return node
1477
1469
1478 flog = self.file(fname)
1470 flog = self.file(fname)
1479 meta = {}
1471 meta = {}
1480 copy = fctx.renamed()
1472 copy = fctx.renamed()
1481 if copy and copy[0] != fname:
1473 if copy and copy[0] != fname:
1482 # Mark the new revision of this file as a copy of another
1474 # Mark the new revision of this file as a copy of another
1483 # file. This copy data will effectively act as a parent
1475 # file. This copy data will effectively act as a parent
1484 # of this new revision. If this is a merge, the first
1476 # of this new revision. If this is a merge, the first
1485 # parent will be the nullid (meaning "look up the copy data")
1477 # parent will be the nullid (meaning "look up the copy data")
1486 # and the second one will be the other parent. For example:
1478 # and the second one will be the other parent. For example:
1487 #
1479 #
1488 # 0 --- 1 --- 3 rev1 changes file foo
1480 # 0 --- 1 --- 3 rev1 changes file foo
1489 # \ / rev2 renames foo to bar and changes it
1481 # \ / rev2 renames foo to bar and changes it
1490 # \- 2 -/ rev3 should have bar with all changes and
1482 # \- 2 -/ rev3 should have bar with all changes and
1491 # should record that bar descends from
1483 # should record that bar descends from
1492 # bar in rev2 and foo in rev1
1484 # bar in rev2 and foo in rev1
1493 #
1485 #
1494 # this allows this merge to succeed:
1486 # this allows this merge to succeed:
1495 #
1487 #
1496 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1488 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1497 # \ / merging rev3 and rev4 should use bar@rev2
1489 # \ / merging rev3 and rev4 should use bar@rev2
1498 # \- 2 --- 4 as the merge base
1490 # \- 2 --- 4 as the merge base
1499 #
1491 #
1500
1492
1501 cfname = copy[0]
1493 cfname = copy[0]
1502 crev = manifest1.get(cfname)
1494 crev = manifest1.get(cfname)
1503 newfparent = fparent2
1495 newfparent = fparent2
1504
1496
1505 if manifest2: # branch merge
1497 if manifest2: # branch merge
1506 if fparent2 == nullid or crev is None: # copied on remote side
1498 if fparent2 == nullid or crev is None: # copied on remote side
1507 if cfname in manifest2:
1499 if cfname in manifest2:
1508 crev = manifest2[cfname]
1500 crev = manifest2[cfname]
1509 newfparent = fparent1
1501 newfparent = fparent1
1510
1502
1511 # Here, we used to search backwards through history to try to find
1503 # Here, we used to search backwards through history to try to find
1512 # where the file copy came from if the source of a copy was not in
1504 # where the file copy came from if the source of a copy was not in
1513 # the parent directory. However, this doesn't actually make sense to
1505 # the parent directory. However, this doesn't actually make sense to
1514 # do (what does a copy from something not in your working copy even
1506 # do (what does a copy from something not in your working copy even
1515 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1507 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1516 # the user that copy information was dropped, so if they didn't
1508 # the user that copy information was dropped, so if they didn't
1517 # expect this outcome it can be fixed, but this is the correct
1509 # expect this outcome it can be fixed, but this is the correct
1518 # behavior in this circumstance.
1510 # behavior in this circumstance.
1519
1511
1520 if crev:
1512 if crev:
1521 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1513 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1522 meta["copy"] = cfname
1514 meta["copy"] = cfname
1523 meta["copyrev"] = hex(crev)
1515 meta["copyrev"] = hex(crev)
1524 fparent1, fparent2 = nullid, newfparent
1516 fparent1, fparent2 = nullid, newfparent
1525 else:
1517 else:
1526 self.ui.warn(_("warning: can't find ancestor for '%s' "
1518 self.ui.warn(_("warning: can't find ancestor for '%s' "
1527 "copied from '%s'!\n") % (fname, cfname))
1519 "copied from '%s'!\n") % (fname, cfname))
1528
1520
1529 elif fparent1 == nullid:
1521 elif fparent1 == nullid:
1530 fparent1, fparent2 = fparent2, nullid
1522 fparent1, fparent2 = fparent2, nullid
1531 elif fparent2 != nullid:
1523 elif fparent2 != nullid:
1532 # is one parent an ancestor of the other?
1524 # is one parent an ancestor of the other?
1533 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1525 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1534 if fparent1 in fparentancestors:
1526 if fparent1 in fparentancestors:
1535 fparent1, fparent2 = fparent2, nullid
1527 fparent1, fparent2 = fparent2, nullid
1536 elif fparent2 in fparentancestors:
1528 elif fparent2 in fparentancestors:
1537 fparent2 = nullid
1529 fparent2 = nullid
1538
1530
1539 # is the file changed?
1531 # is the file changed?
1540 text = fctx.data()
1532 text = fctx.data()
1541 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1533 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1542 changelist.append(fname)
1534 changelist.append(fname)
1543 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1535 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1544 # are just the flags changed during merge?
1536 # are just the flags changed during merge?
1545 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1537 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1546 changelist.append(fname)
1538 changelist.append(fname)
1547
1539
1548 return fparent1
1540 return fparent1
1549
1541
1550 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1542 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1551 """check for commit arguments that aren't committable"""
1543 """check for commit arguments that aren't committable"""
1552 if match.isexact() or match.prefix():
1544 if match.isexact() or match.prefix():
1553 matched = set(status.modified + status.added + status.removed)
1545 matched = set(status.modified + status.added + status.removed)
1554
1546
1555 for f in match.files():
1547 for f in match.files():
1556 f = self.dirstate.normalize(f)
1548 f = self.dirstate.normalize(f)
1557 if f == '.' or f in matched or f in wctx.substate:
1549 if f == '.' or f in matched or f in wctx.substate:
1558 continue
1550 continue
1559 if f in status.deleted:
1551 if f in status.deleted:
1560 fail(f, _('file not found!'))
1552 fail(f, _('file not found!'))
1561 if f in vdirs: # visited directory
1553 if f in vdirs: # visited directory
1562 d = f + '/'
1554 d = f + '/'
1563 for mf in matched:
1555 for mf in matched:
1564 if mf.startswith(d):
1556 if mf.startswith(d):
1565 break
1557 break
1566 else:
1558 else:
1567 fail(f, _("no match under directory!"))
1559 fail(f, _("no match under directory!"))
1568 elif f not in self.dirstate:
1560 elif f not in self.dirstate:
1569 fail(f, _("file not tracked!"))
1561 fail(f, _("file not tracked!"))
1570
1562
1571 @unfilteredmethod
1563 @unfilteredmethod
1572 def commit(self, text="", user=None, date=None, match=None, force=False,
1564 def commit(self, text="", user=None, date=None, match=None, force=False,
1573 editor=False, extra=None):
1565 editor=False, extra=None):
1574 """Add a new revision to current repository.
1566 """Add a new revision to current repository.
1575
1567
1576 Revision information is gathered from the working directory,
1568 Revision information is gathered from the working directory,
1577 match can be used to filter the committed files. If editor is
1569 match can be used to filter the committed files. If editor is
1578 supplied, it is called to get a commit message.
1570 supplied, it is called to get a commit message.
1579 """
1571 """
1580 if extra is None:
1572 if extra is None:
1581 extra = {}
1573 extra = {}
1582
1574
1583 def fail(f, msg):
1575 def fail(f, msg):
1584 raise error.Abort('%s: %s' % (f, msg))
1576 raise error.Abort('%s: %s' % (f, msg))
1585
1577
1586 if not match:
1578 if not match:
1587 match = matchmod.always(self.root, '')
1579 match = matchmod.always(self.root, '')
1588
1580
1589 if not force:
1581 if not force:
1590 vdirs = []
1582 vdirs = []
1591 match.explicitdir = vdirs.append
1583 match.explicitdir = vdirs.append
1592 match.bad = fail
1584 match.bad = fail
1593
1585
1594 wlock = lock = tr = None
1586 wlock = lock = tr = None
1595 try:
1587 try:
1596 wlock = self.wlock()
1588 wlock = self.wlock()
1597 lock = self.lock() # for recent changelog (see issue4368)
1589 lock = self.lock() # for recent changelog (see issue4368)
1598
1590
1599 wctx = self[None]
1591 wctx = self[None]
1600 merge = len(wctx.parents()) > 1
1592 merge = len(wctx.parents()) > 1
1601
1593
1602 if not force and merge and not match.always():
1594 if not force and merge and not match.always():
1603 raise error.Abort(_('cannot partially commit a merge '
1595 raise error.Abort(_('cannot partially commit a merge '
1604 '(do not specify files or patterns)'))
1596 '(do not specify files or patterns)'))
1605
1597
1606 status = self.status(match=match, clean=force)
1598 status = self.status(match=match, clean=force)
1607 if force:
1599 if force:
1608 status.modified.extend(status.clean) # mq may commit clean files
1600 status.modified.extend(status.clean) # mq may commit clean files
1609
1601
1610 # check subrepos
1602 # check subrepos
1611 subs = []
1603 subs = []
1612 commitsubs = set()
1604 commitsubs = set()
1613 newstate = wctx.substate.copy()
1605 newstate = wctx.substate.copy()
1614 # only manage subrepos and .hgsubstate if .hgsub is present
1606 # only manage subrepos and .hgsubstate if .hgsub is present
1615 if '.hgsub' in wctx:
1607 if '.hgsub' in wctx:
1616 # we'll decide whether to track this ourselves, thanks
1608 # we'll decide whether to track this ourselves, thanks
1617 for c in status.modified, status.added, status.removed:
1609 for c in status.modified, status.added, status.removed:
1618 if '.hgsubstate' in c:
1610 if '.hgsubstate' in c:
1619 c.remove('.hgsubstate')
1611 c.remove('.hgsubstate')
1620
1612
1621 # compare current state to last committed state
1613 # compare current state to last committed state
1622 # build new substate based on last committed state
1614 # build new substate based on last committed state
1623 oldstate = wctx.p1().substate
1615 oldstate = wctx.p1().substate
1624 for s in sorted(newstate.keys()):
1616 for s in sorted(newstate.keys()):
1625 if not match(s):
1617 if not match(s):
1626 # ignore working copy, use old state if present
1618 # ignore working copy, use old state if present
1627 if s in oldstate:
1619 if s in oldstate:
1628 newstate[s] = oldstate[s]
1620 newstate[s] = oldstate[s]
1629 continue
1621 continue
1630 if not force:
1622 if not force:
1631 raise error.Abort(
1623 raise error.Abort(
1632 _("commit with new subrepo %s excluded") % s)
1624 _("commit with new subrepo %s excluded") % s)
1633 dirtyreason = wctx.sub(s).dirtyreason(True)
1625 dirtyreason = wctx.sub(s).dirtyreason(True)
1634 if dirtyreason:
1626 if dirtyreason:
1635 if not self.ui.configbool('ui', 'commitsubrepos'):
1627 if not self.ui.configbool('ui', 'commitsubrepos'):
1636 raise error.Abort(dirtyreason,
1628 raise error.Abort(dirtyreason,
1637 hint=_("use --subrepos for recursive commit"))
1629 hint=_("use --subrepos for recursive commit"))
1638 subs.append(s)
1630 subs.append(s)
1639 commitsubs.add(s)
1631 commitsubs.add(s)
1640 else:
1632 else:
1641 bs = wctx.sub(s).basestate()
1633 bs = wctx.sub(s).basestate()
1642 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1634 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1643 if oldstate.get(s, (None, None, None))[1] != bs:
1635 if oldstate.get(s, (None, None, None))[1] != bs:
1644 subs.append(s)
1636 subs.append(s)
1645
1637
1646 # check for removed subrepos
1638 # check for removed subrepos
1647 for p in wctx.parents():
1639 for p in wctx.parents():
1648 r = [s for s in p.substate if s not in newstate]
1640 r = [s for s in p.substate if s not in newstate]
1649 subs += [s for s in r if match(s)]
1641 subs += [s for s in r if match(s)]
1650 if subs:
1642 if subs:
1651 if (not match('.hgsub') and
1643 if (not match('.hgsub') and
1652 '.hgsub' in (wctx.modified() + wctx.added())):
1644 '.hgsub' in (wctx.modified() + wctx.added())):
1653 raise error.Abort(
1645 raise error.Abort(
1654 _("can't commit subrepos without .hgsub"))
1646 _("can't commit subrepos without .hgsub"))
1655 status.modified.insert(0, '.hgsubstate')
1647 status.modified.insert(0, '.hgsubstate')
1656
1648
1657 elif '.hgsub' in status.removed:
1649 elif '.hgsub' in status.removed:
1658 # clean up .hgsubstate when .hgsub is removed
1650 # clean up .hgsubstate when .hgsub is removed
1659 if ('.hgsubstate' in wctx and
1651 if ('.hgsubstate' in wctx and
1660 '.hgsubstate' not in (status.modified + status.added +
1652 '.hgsubstate' not in (status.modified + status.added +
1661 status.removed)):
1653 status.removed)):
1662 status.removed.insert(0, '.hgsubstate')
1654 status.removed.insert(0, '.hgsubstate')
1663
1655
1664 # make sure all explicit patterns are matched
1656 # make sure all explicit patterns are matched
1665 if not force:
1657 if not force:
1666 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1658 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1667
1659
1668 cctx = context.workingcommitctx(self, status,
1660 cctx = context.workingcommitctx(self, status,
1669 text, user, date, extra)
1661 text, user, date, extra)
1670
1662
1671 # internal config: ui.allowemptycommit
1663 # internal config: ui.allowemptycommit
1672 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1664 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1673 or extra.get('close') or merge or cctx.files()
1665 or extra.get('close') or merge or cctx.files()
1674 or self.ui.configbool('ui', 'allowemptycommit'))
1666 or self.ui.configbool('ui', 'allowemptycommit'))
1675 if not allowemptycommit:
1667 if not allowemptycommit:
1676 return None
1668 return None
1677
1669
1678 if merge and cctx.deleted():
1670 if merge and cctx.deleted():
1679 raise error.Abort(_("cannot commit merge with missing files"))
1671 raise error.Abort(_("cannot commit merge with missing files"))
1680
1672
1681 ms = mergemod.mergestate.read(self)
1673 ms = mergemod.mergestate.read(self)
1682 mergeutil.checkunresolved(ms)
1674 mergeutil.checkunresolved(ms)
1683
1675
1684 if editor:
1676 if editor:
1685 cctx._text = editor(self, cctx, subs)
1677 cctx._text = editor(self, cctx, subs)
1686 edited = (text != cctx._text)
1678 edited = (text != cctx._text)
1687
1679
1688 # Save commit message in case this transaction gets rolled back
1680 # Save commit message in case this transaction gets rolled back
1689 # (e.g. by a pretxncommit hook). Leave the content alone on
1681 # (e.g. by a pretxncommit hook). Leave the content alone on
1690 # the assumption that the user will use the same editor again.
1682 # the assumption that the user will use the same editor again.
1691 msgfn = self.savecommitmessage(cctx._text)
1683 msgfn = self.savecommitmessage(cctx._text)
1692
1684
1693 # commit subs and write new state
1685 # commit subs and write new state
1694 if subs:
1686 if subs:
1695 for s in sorted(commitsubs):
1687 for s in sorted(commitsubs):
1696 sub = wctx.sub(s)
1688 sub = wctx.sub(s)
1697 self.ui.status(_('committing subrepository %s\n') %
1689 self.ui.status(_('committing subrepository %s\n') %
1698 subrepo.subrelpath(sub))
1690 subrepo.subrelpath(sub))
1699 sr = sub.commit(cctx._text, user, date)
1691 sr = sub.commit(cctx._text, user, date)
1700 newstate[s] = (newstate[s][0], sr)
1692 newstate[s] = (newstate[s][0], sr)
1701 subrepo.writestate(self, newstate)
1693 subrepo.writestate(self, newstate)
1702
1694
1703 p1, p2 = self.dirstate.parents()
1695 p1, p2 = self.dirstate.parents()
1704 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1696 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1705 try:
1697 try:
1706 self.hook("precommit", throw=True, parent1=hookp1,
1698 self.hook("precommit", throw=True, parent1=hookp1,
1707 parent2=hookp2)
1699 parent2=hookp2)
1708 tr = self.transaction('commit')
1700 tr = self.transaction('commit')
1709 ret = self.commitctx(cctx, True)
1701 ret = self.commitctx(cctx, True)
1710 except: # re-raises
1702 except: # re-raises
1711 if edited:
1703 if edited:
1712 self.ui.write(
1704 self.ui.write(
1713 _('note: commit message saved in %s\n') % msgfn)
1705 _('note: commit message saved in %s\n') % msgfn)
1714 raise
1706 raise
1715 # update bookmarks, dirstate and mergestate
1707 # update bookmarks, dirstate and mergestate
1716 bookmarks.update(self, [p1, p2], ret)
1708 bookmarks.update(self, [p1, p2], ret)
1717 cctx.markcommitted(ret)
1709 cctx.markcommitted(ret)
1718 ms.reset()
1710 ms.reset()
1719 tr.close()
1711 tr.close()
1720
1712
1721 finally:
1713 finally:
1722 lockmod.release(tr, lock, wlock)
1714 lockmod.release(tr, lock, wlock)
1723
1715
1724 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1716 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1725 # hack for command that use a temporary commit (eg: histedit)
1717 # hack for command that use a temporary commit (eg: histedit)
1726 # temporary commit got stripped before hook release
1718 # temporary commit got stripped before hook release
1727 if self.changelog.hasnode(ret):
1719 if self.changelog.hasnode(ret):
1728 self.hook("commit", node=node, parent1=parent1,
1720 self.hook("commit", node=node, parent1=parent1,
1729 parent2=parent2)
1721 parent2=parent2)
1730 self._afterlock(commithook)
1722 self._afterlock(commithook)
1731 return ret
1723 return ret
1732
1724
1733 @unfilteredmethod
1725 @unfilteredmethod
1734 def commitctx(self, ctx, error=False):
1726 def commitctx(self, ctx, error=False):
1735 """Add a new revision to current repository.
1727 """Add a new revision to current repository.
1736 Revision information is passed via the context argument.
1728 Revision information is passed via the context argument.
1737 """
1729 """
1738
1730
1739 tr = None
1731 tr = None
1740 p1, p2 = ctx.p1(), ctx.p2()
1732 p1, p2 = ctx.p1(), ctx.p2()
1741 user = ctx.user()
1733 user = ctx.user()
1742
1734
1743 lock = self.lock()
1735 lock = self.lock()
1744 try:
1736 try:
1745 tr = self.transaction("commit")
1737 tr = self.transaction("commit")
1746 trp = weakref.proxy(tr)
1738 trp = weakref.proxy(tr)
1747
1739
1748 if ctx.manifestnode():
1740 if ctx.manifestnode():
1749 # reuse an existing manifest revision
1741 # reuse an existing manifest revision
1750 mn = ctx.manifestnode()
1742 mn = ctx.manifestnode()
1751 files = ctx.files()
1743 files = ctx.files()
1752 elif ctx.files():
1744 elif ctx.files():
1753 m1ctx = p1.manifestctx()
1745 m1ctx = p1.manifestctx()
1754 m2ctx = p2.manifestctx()
1746 m2ctx = p2.manifestctx()
1755 mctx = m1ctx.copy()
1747 mctx = m1ctx.copy()
1756
1748
1757 m = mctx.read()
1749 m = mctx.read()
1758 m1 = m1ctx.read()
1750 m1 = m1ctx.read()
1759 m2 = m2ctx.read()
1751 m2 = m2ctx.read()
1760
1752
1761 # check in files
1753 # check in files
1762 added = []
1754 added = []
1763 changed = []
1755 changed = []
1764 removed = list(ctx.removed())
1756 removed = list(ctx.removed())
1765 linkrev = len(self)
1757 linkrev = len(self)
1766 self.ui.note(_("committing files:\n"))
1758 self.ui.note(_("committing files:\n"))
1767 for f in sorted(ctx.modified() + ctx.added()):
1759 for f in sorted(ctx.modified() + ctx.added()):
1768 self.ui.note(f + "\n")
1760 self.ui.note(f + "\n")
1769 try:
1761 try:
1770 fctx = ctx[f]
1762 fctx = ctx[f]
1771 if fctx is None:
1763 if fctx is None:
1772 removed.append(f)
1764 removed.append(f)
1773 else:
1765 else:
1774 added.append(f)
1766 added.append(f)
1775 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1767 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1776 trp, changed)
1768 trp, changed)
1777 m.setflag(f, fctx.flags())
1769 m.setflag(f, fctx.flags())
1778 except OSError as inst:
1770 except OSError as inst:
1779 self.ui.warn(_("trouble committing %s!\n") % f)
1771 self.ui.warn(_("trouble committing %s!\n") % f)
1780 raise
1772 raise
1781 except IOError as inst:
1773 except IOError as inst:
1782 errcode = getattr(inst, 'errno', errno.ENOENT)
1774 errcode = getattr(inst, 'errno', errno.ENOENT)
1783 if error or errcode and errcode != errno.ENOENT:
1775 if error or errcode and errcode != errno.ENOENT:
1784 self.ui.warn(_("trouble committing %s!\n") % f)
1776 self.ui.warn(_("trouble committing %s!\n") % f)
1785 raise
1777 raise
1786
1778
1787 # update manifest
1779 # update manifest
1788 self.ui.note(_("committing manifest\n"))
1780 self.ui.note(_("committing manifest\n"))
1789 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1781 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1790 drop = [f for f in removed if f in m]
1782 drop = [f for f in removed if f in m]
1791 for f in drop:
1783 for f in drop:
1792 del m[f]
1784 del m[f]
1793 mn = mctx.write(trp, linkrev,
1785 mn = mctx.write(trp, linkrev,
1794 p1.manifestnode(), p2.manifestnode(),
1786 p1.manifestnode(), p2.manifestnode(),
1795 added, drop)
1787 added, drop)
1796 files = changed + removed
1788 files = changed + removed
1797 else:
1789 else:
1798 mn = p1.manifestnode()
1790 mn = p1.manifestnode()
1799 files = []
1791 files = []
1800
1792
1801 # update changelog
1793 # update changelog
1802 self.ui.note(_("committing changelog\n"))
1794 self.ui.note(_("committing changelog\n"))
1803 self.changelog.delayupdate(tr)
1795 self.changelog.delayupdate(tr)
1804 n = self.changelog.add(mn, files, ctx.description(),
1796 n = self.changelog.add(mn, files, ctx.description(),
1805 trp, p1.node(), p2.node(),
1797 trp, p1.node(), p2.node(),
1806 user, ctx.date(), ctx.extra().copy())
1798 user, ctx.date(), ctx.extra().copy())
1807 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1799 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1808 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1800 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1809 parent2=xp2)
1801 parent2=xp2)
1810 # set the new commit is proper phase
1802 # set the new commit is proper phase
1811 targetphase = subrepo.newcommitphase(self.ui, ctx)
1803 targetphase = subrepo.newcommitphase(self.ui, ctx)
1812 if targetphase:
1804 if targetphase:
1813 # retract boundary do not alter parent changeset.
1805 # retract boundary do not alter parent changeset.
1814 # if a parent have higher the resulting phase will
1806 # if a parent have higher the resulting phase will
1815 # be compliant anyway
1807 # be compliant anyway
1816 #
1808 #
1817 # if minimal phase was 0 we don't need to retract anything
1809 # if minimal phase was 0 we don't need to retract anything
1818 phases.retractboundary(self, tr, targetphase, [n])
1810 phases.retractboundary(self, tr, targetphase, [n])
1819 tr.close()
1811 tr.close()
1820 return n
1812 return n
1821 finally:
1813 finally:
1822 if tr:
1814 if tr:
1823 tr.release()
1815 tr.release()
1824 lock.release()
1816 lock.release()
1825
1817
1826 @unfilteredmethod
1818 @unfilteredmethod
1827 def destroying(self):
1819 def destroying(self):
1828 '''Inform the repository that nodes are about to be destroyed.
1820 '''Inform the repository that nodes are about to be destroyed.
1829 Intended for use by strip and rollback, so there's a common
1821 Intended for use by strip and rollback, so there's a common
1830 place for anything that has to be done before destroying history.
1822 place for anything that has to be done before destroying history.
1831
1823
1832 This is mostly useful for saving state that is in memory and waiting
1824 This is mostly useful for saving state that is in memory and waiting
1833 to be flushed when the current lock is released. Because a call to
1825 to be flushed when the current lock is released. Because a call to
1834 destroyed is imminent, the repo will be invalidated causing those
1826 destroyed is imminent, the repo will be invalidated causing those
1835 changes to stay in memory (waiting for the next unlock), or vanish
1827 changes to stay in memory (waiting for the next unlock), or vanish
1836 completely.
1828 completely.
1837 '''
1829 '''
1838 # When using the same lock to commit and strip, the phasecache is left
1830 # When using the same lock to commit and strip, the phasecache is left
1839 # dirty after committing. Then when we strip, the repo is invalidated,
1831 # dirty after committing. Then when we strip, the repo is invalidated,
1840 # causing those changes to disappear.
1832 # causing those changes to disappear.
1841 if '_phasecache' in vars(self):
1833 if '_phasecache' in vars(self):
1842 self._phasecache.write()
1834 self._phasecache.write()
1843
1835
1844 @unfilteredmethod
1836 @unfilteredmethod
1845 def destroyed(self):
1837 def destroyed(self):
1846 '''Inform the repository that nodes have been destroyed.
1838 '''Inform the repository that nodes have been destroyed.
1847 Intended for use by strip and rollback, so there's a common
1839 Intended for use by strip and rollback, so there's a common
1848 place for anything that has to be done after destroying history.
1840 place for anything that has to be done after destroying history.
1849 '''
1841 '''
1850 # When one tries to:
1842 # When one tries to:
1851 # 1) destroy nodes thus calling this method (e.g. strip)
1843 # 1) destroy nodes thus calling this method (e.g. strip)
1852 # 2) use phasecache somewhere (e.g. commit)
1844 # 2) use phasecache somewhere (e.g. commit)
1853 #
1845 #
1854 # then 2) will fail because the phasecache contains nodes that were
1846 # then 2) will fail because the phasecache contains nodes that were
1855 # removed. We can either remove phasecache from the filecache,
1847 # removed. We can either remove phasecache from the filecache,
1856 # causing it to reload next time it is accessed, or simply filter
1848 # causing it to reload next time it is accessed, or simply filter
1857 # the removed nodes now and write the updated cache.
1849 # the removed nodes now and write the updated cache.
1858 self._phasecache.filterunknown(self)
1850 self._phasecache.filterunknown(self)
1859 self._phasecache.write()
1851 self._phasecache.write()
1860
1852
1861 # refresh all repository caches
1853 # refresh all repository caches
1862 self.updatecaches()
1854 self.updatecaches()
1863
1855
1864 # Ensure the persistent tag cache is updated. Doing it now
1856 # Ensure the persistent tag cache is updated. Doing it now
1865 # means that the tag cache only has to worry about destroyed
1857 # means that the tag cache only has to worry about destroyed
1866 # heads immediately after a strip/rollback. That in turn
1858 # heads immediately after a strip/rollback. That in turn
1867 # guarantees that "cachetip == currenttip" (comparing both rev
1859 # guarantees that "cachetip == currenttip" (comparing both rev
1868 # and node) always means no nodes have been added or destroyed.
1860 # and node) always means no nodes have been added or destroyed.
1869
1861
1870 # XXX this is suboptimal when qrefresh'ing: we strip the current
1862 # XXX this is suboptimal when qrefresh'ing: we strip the current
1871 # head, refresh the tag cache, then immediately add a new head.
1863 # head, refresh the tag cache, then immediately add a new head.
1872 # But I think doing it this way is necessary for the "instant
1864 # But I think doing it this way is necessary for the "instant
1873 # tag cache retrieval" case to work.
1865 # tag cache retrieval" case to work.
1874 self.invalidate()
1866 self.invalidate()
1875
1867
1876 def walk(self, match, node=None):
1868 def walk(self, match, node=None):
1877 '''
1869 '''
1878 walk recursively through the directory tree or a given
1870 walk recursively through the directory tree or a given
1879 changeset, finding all files matched by the match
1871 changeset, finding all files matched by the match
1880 function
1872 function
1881 '''
1873 '''
1882 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1874 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1883 return self[node].walk(match)
1875 return self[node].walk(match)
1884
1876
1885 def status(self, node1='.', node2=None, match=None,
1877 def status(self, node1='.', node2=None, match=None,
1886 ignored=False, clean=False, unknown=False,
1878 ignored=False, clean=False, unknown=False,
1887 listsubrepos=False):
1879 listsubrepos=False):
1888 '''a convenience method that calls node1.status(node2)'''
1880 '''a convenience method that calls node1.status(node2)'''
1889 return self[node1].status(node2, match, ignored, clean, unknown,
1881 return self[node1].status(node2, match, ignored, clean, unknown,
1890 listsubrepos)
1882 listsubrepos)
1891
1883
1892 def heads(self, start=None):
1884 def heads(self, start=None):
1893 if start is None:
1885 if start is None:
1894 cl = self.changelog
1886 cl = self.changelog
1895 headrevs = reversed(cl.headrevs())
1887 headrevs = reversed(cl.headrevs())
1896 return [cl.node(rev) for rev in headrevs]
1888 return [cl.node(rev) for rev in headrevs]
1897
1889
1898 heads = self.changelog.heads(start)
1890 heads = self.changelog.heads(start)
1899 # sort the output in rev descending order
1891 # sort the output in rev descending order
1900 return sorted(heads, key=self.changelog.rev, reverse=True)
1892 return sorted(heads, key=self.changelog.rev, reverse=True)
1901
1893
1902 def branchheads(self, branch=None, start=None, closed=False):
1894 def branchheads(self, branch=None, start=None, closed=False):
1903 '''return a (possibly filtered) list of heads for the given branch
1895 '''return a (possibly filtered) list of heads for the given branch
1904
1896
1905 Heads are returned in topological order, from newest to oldest.
1897 Heads are returned in topological order, from newest to oldest.
1906 If branch is None, use the dirstate branch.
1898 If branch is None, use the dirstate branch.
1907 If start is not None, return only heads reachable from start.
1899 If start is not None, return only heads reachable from start.
1908 If closed is True, return heads that are marked as closed as well.
1900 If closed is True, return heads that are marked as closed as well.
1909 '''
1901 '''
1910 if branch is None:
1902 if branch is None:
1911 branch = self[None].branch()
1903 branch = self[None].branch()
1912 branches = self.branchmap()
1904 branches = self.branchmap()
1913 if branch not in branches:
1905 if branch not in branches:
1914 return []
1906 return []
1915 # the cache returns heads ordered lowest to highest
1907 # the cache returns heads ordered lowest to highest
1916 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1908 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1917 if start is not None:
1909 if start is not None:
1918 # filter out the heads that cannot be reached from startrev
1910 # filter out the heads that cannot be reached from startrev
1919 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1911 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1920 bheads = [h for h in bheads if h in fbheads]
1912 bheads = [h for h in bheads if h in fbheads]
1921 return bheads
1913 return bheads
1922
1914
1923 def branches(self, nodes):
1915 def branches(self, nodes):
1924 if not nodes:
1916 if not nodes:
1925 nodes = [self.changelog.tip()]
1917 nodes = [self.changelog.tip()]
1926 b = []
1918 b = []
1927 for n in nodes:
1919 for n in nodes:
1928 t = n
1920 t = n
1929 while True:
1921 while True:
1930 p = self.changelog.parents(n)
1922 p = self.changelog.parents(n)
1931 if p[1] != nullid or p[0] == nullid:
1923 if p[1] != nullid or p[0] == nullid:
1932 b.append((t, n, p[0], p[1]))
1924 b.append((t, n, p[0], p[1]))
1933 break
1925 break
1934 n = p[0]
1926 n = p[0]
1935 return b
1927 return b
1936
1928
1937 def between(self, pairs):
1929 def between(self, pairs):
1938 r = []
1930 r = []
1939
1931
1940 for top, bottom in pairs:
1932 for top, bottom in pairs:
1941 n, l, i = top, [], 0
1933 n, l, i = top, [], 0
1942 f = 1
1934 f = 1
1943
1935
1944 while n != bottom and n != nullid:
1936 while n != bottom and n != nullid:
1945 p = self.changelog.parents(n)[0]
1937 p = self.changelog.parents(n)[0]
1946 if i == f:
1938 if i == f:
1947 l.append(n)
1939 l.append(n)
1948 f = f * 2
1940 f = f * 2
1949 n = p
1941 n = p
1950 i += 1
1942 i += 1
1951
1943
1952 r.append(l)
1944 r.append(l)
1953
1945
1954 return r
1946 return r
1955
1947
1956 def checkpush(self, pushop):
1948 def checkpush(self, pushop):
1957 """Extensions can override this function if additional checks have
1949 """Extensions can override this function if additional checks have
1958 to be performed before pushing, or call it if they override push
1950 to be performed before pushing, or call it if they override push
1959 command.
1951 command.
1960 """
1952 """
1961 pass
1953 pass
1962
1954
1963 @unfilteredpropertycache
1955 @unfilteredpropertycache
1964 def prepushoutgoinghooks(self):
1956 def prepushoutgoinghooks(self):
1965 """Return util.hooks consists of a pushop with repo, remote, outgoing
1957 """Return util.hooks consists of a pushop with repo, remote, outgoing
1966 methods, which are called before pushing changesets.
1958 methods, which are called before pushing changesets.
1967 """
1959 """
1968 return util.hooks()
1960 return util.hooks()
1969
1961
1970 def pushkey(self, namespace, key, old, new):
1962 def pushkey(self, namespace, key, old, new):
1971 try:
1963 try:
1972 tr = self.currenttransaction()
1964 tr = self.currenttransaction()
1973 hookargs = {}
1965 hookargs = {}
1974 if tr is not None:
1966 if tr is not None:
1975 hookargs.update(tr.hookargs)
1967 hookargs.update(tr.hookargs)
1976 hookargs['namespace'] = namespace
1968 hookargs['namespace'] = namespace
1977 hookargs['key'] = key
1969 hookargs['key'] = key
1978 hookargs['old'] = old
1970 hookargs['old'] = old
1979 hookargs['new'] = new
1971 hookargs['new'] = new
1980 self.hook('prepushkey', throw=True, **hookargs)
1972 self.hook('prepushkey', throw=True, **hookargs)
1981 except error.HookAbort as exc:
1973 except error.HookAbort as exc:
1982 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1974 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1983 if exc.hint:
1975 if exc.hint:
1984 self.ui.write_err(_("(%s)\n") % exc.hint)
1976 self.ui.write_err(_("(%s)\n") % exc.hint)
1985 return False
1977 return False
1986 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1978 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1987 ret = pushkey.push(self, namespace, key, old, new)
1979 ret = pushkey.push(self, namespace, key, old, new)
1988 def runhook():
1980 def runhook():
1989 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1981 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1990 ret=ret)
1982 ret=ret)
1991 self._afterlock(runhook)
1983 self._afterlock(runhook)
1992 return ret
1984 return ret
1993
1985
1994 def listkeys(self, namespace):
1986 def listkeys(self, namespace):
1995 self.hook('prelistkeys', throw=True, namespace=namespace)
1987 self.hook('prelistkeys', throw=True, namespace=namespace)
1996 self.ui.debug('listing keys for "%s"\n' % namespace)
1988 self.ui.debug('listing keys for "%s"\n' % namespace)
1997 values = pushkey.list(self, namespace)
1989 values = pushkey.list(self, namespace)
1998 self.hook('listkeys', namespace=namespace, values=values)
1990 self.hook('listkeys', namespace=namespace, values=values)
1999 return values
1991 return values
2000
1992
2001 def debugwireargs(self, one, two, three=None, four=None, five=None):
1993 def debugwireargs(self, one, two, three=None, four=None, five=None):
2002 '''used to test argument passing over the wire'''
1994 '''used to test argument passing over the wire'''
2003 return "%s %s %s %s %s" % (one, two, three, four, five)
1995 return "%s %s %s %s %s" % (one, two, three, four, five)
2004
1996
2005 def savecommitmessage(self, text):
1997 def savecommitmessage(self, text):
2006 fp = self.vfs('last-message.txt', 'wb')
1998 fp = self.vfs('last-message.txt', 'wb')
2007 try:
1999 try:
2008 fp.write(text)
2000 fp.write(text)
2009 finally:
2001 finally:
2010 fp.close()
2002 fp.close()
2011 return self.pathto(fp.name[len(self.root) + 1:])
2003 return self.pathto(fp.name[len(self.root) + 1:])
2012
2004
2013 # used to avoid circular references so destructors work
2005 # used to avoid circular references so destructors work
2014 def aftertrans(files):
2006 def aftertrans(files):
2015 renamefiles = [tuple(t) for t in files]
2007 renamefiles = [tuple(t) for t in files]
2016 def a():
2008 def a():
2017 for vfs, src, dest in renamefiles:
2009 for vfs, src, dest in renamefiles:
2018 # if src and dest refer to a same file, vfs.rename is a no-op,
2010 # if src and dest refer to a same file, vfs.rename is a no-op,
2019 # leaving both src and dest on disk. delete dest to make sure
2011 # leaving both src and dest on disk. delete dest to make sure
2020 # the rename couldn't be such a no-op.
2012 # the rename couldn't be such a no-op.
2021 vfs.tryunlink(dest)
2013 vfs.tryunlink(dest)
2022 try:
2014 try:
2023 vfs.rename(src, dest)
2015 vfs.rename(src, dest)
2024 except OSError: # journal file does not yet exist
2016 except OSError: # journal file does not yet exist
2025 pass
2017 pass
2026 return a
2018 return a
2027
2019
2028 def undoname(fn):
2020 def undoname(fn):
2029 base, name = os.path.split(fn)
2021 base, name = os.path.split(fn)
2030 assert name.startswith('journal')
2022 assert name.startswith('journal')
2031 return os.path.join(base, name.replace('journal', 'undo', 1))
2023 return os.path.join(base, name.replace('journal', 'undo', 1))
2032
2024
2033 def instance(ui, path, create):
2025 def instance(ui, path, create):
2034 return localrepository(ui, util.urllocalpath(path), create)
2026 return localrepository(ui, util.urllocalpath(path), create)
2035
2027
2036 def islocal(path):
2028 def islocal(path):
2037 return True
2029 return True
2038
2030
2039 def newreporequirements(repo):
2031 def newreporequirements(repo):
2040 """Determine the set of requirements for a new local repository.
2032 """Determine the set of requirements for a new local repository.
2041
2033
2042 Extensions can wrap this function to specify custom requirements for
2034 Extensions can wrap this function to specify custom requirements for
2043 new repositories.
2035 new repositories.
2044 """
2036 """
2045 ui = repo.ui
2037 ui = repo.ui
2046 requirements = {'revlogv1'}
2038 requirements = {'revlogv1'}
2047 if ui.configbool('format', 'usestore', True):
2039 if ui.configbool('format', 'usestore', True):
2048 requirements.add('store')
2040 requirements.add('store')
2049 if ui.configbool('format', 'usefncache', True):
2041 if ui.configbool('format', 'usefncache', True):
2050 requirements.add('fncache')
2042 requirements.add('fncache')
2051 if ui.configbool('format', 'dotencode', True):
2043 if ui.configbool('format', 'dotencode', True):
2052 requirements.add('dotencode')
2044 requirements.add('dotencode')
2053
2045
2054 compengine = ui.config('experimental', 'format.compression', 'zlib')
2046 compengine = ui.config('experimental', 'format.compression', 'zlib')
2055 if compengine not in util.compengines:
2047 if compengine not in util.compengines:
2056 raise error.Abort(_('compression engine %s defined by '
2048 raise error.Abort(_('compression engine %s defined by '
2057 'experimental.format.compression not available') %
2049 'experimental.format.compression not available') %
2058 compengine,
2050 compengine,
2059 hint=_('run "hg debuginstall" to list available '
2051 hint=_('run "hg debuginstall" to list available '
2060 'compression engines'))
2052 'compression engines'))
2061
2053
2062 # zlib is the historical default and doesn't need an explicit requirement.
2054 # zlib is the historical default and doesn't need an explicit requirement.
2063 if compengine != 'zlib':
2055 if compengine != 'zlib':
2064 requirements.add('exp-compression-%s' % compengine)
2056 requirements.add('exp-compression-%s' % compengine)
2065
2057
2066 if scmutil.gdinitconfig(ui):
2058 if scmutil.gdinitconfig(ui):
2067 requirements.add('generaldelta')
2059 requirements.add('generaldelta')
2068 if ui.configbool('experimental', 'treemanifest', False):
2060 if ui.configbool('experimental', 'treemanifest', False):
2069 requirements.add('treemanifest')
2061 requirements.add('treemanifest')
2070 if ui.configbool('experimental', 'manifestv2', False):
2062 if ui.configbool('experimental', 'manifestv2', False):
2071 requirements.add('manifestv2')
2063 requirements.add('manifestv2')
2072
2064
2073 return requirements
2065 return requirements
General Comments 0
You need to be logged in to leave comments. Login now