##// END OF EJS Templates
bookmarks: extract function that looks up bookmark names by node
Yuya Nishihara -
r37867:6e225984 default
parent child Browse files
Show More
@@ -1,937 +1,945 b''
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import struct
11 import struct
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 bin,
15 bin,
16 hex,
16 hex,
17 short,
17 short,
18 wdirid,
18 wdirid,
19 )
19 )
20 from . import (
20 from . import (
21 encoding,
21 encoding,
22 error,
22 error,
23 obsutil,
23 obsutil,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 txnutil,
26 txnutil,
27 util,
27 util,
28 )
28 )
29
29
30 # label constants
30 # label constants
31 # until 3.5, bookmarks.current was the advertised name, not
31 # until 3.5, bookmarks.current was the advertised name, not
32 # bookmarks.active, so we must use both to avoid breaking old
32 # bookmarks.active, so we must use both to avoid breaking old
33 # custom styles
33 # custom styles
34 activebookmarklabel = 'bookmarks.active bookmarks.current'
34 activebookmarklabel = 'bookmarks.active bookmarks.current'
35
35
36 def _getbkfile(repo):
36 def _getbkfile(repo):
37 """Hook so that extensions that mess with the store can hook bm storage.
37 """Hook so that extensions that mess with the store can hook bm storage.
38
38
39 For core, this just handles wether we should see pending
39 For core, this just handles wether we should see pending
40 bookmarks or the committed ones. Other extensions (like share)
40 bookmarks or the committed ones. Other extensions (like share)
41 may need to tweak this behavior further.
41 may need to tweak this behavior further.
42 """
42 """
43 fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks')
43 fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks')
44 return fp
44 return fp
45
45
46 class bmstore(object):
46 class bmstore(object):
47 """Storage for bookmarks.
47 """Storage for bookmarks.
48
48
49 This object should do all bookmark-related reads and writes, so
49 This object should do all bookmark-related reads and writes, so
50 that it's fairly simple to replace the storage underlying
50 that it's fairly simple to replace the storage underlying
51 bookmarks without having to clone the logic surrounding
51 bookmarks without having to clone the logic surrounding
52 bookmarks. This type also should manage the active bookmark, if
52 bookmarks. This type also should manage the active bookmark, if
53 any.
53 any.
54
54
55 This particular bmstore implementation stores bookmarks as
55 This particular bmstore implementation stores bookmarks as
56 {hash}\s{name}\n (the same format as localtags) in
56 {hash}\s{name}\n (the same format as localtags) in
57 .hg/bookmarks. The mapping is stored as {name: nodeid}.
57 .hg/bookmarks. The mapping is stored as {name: nodeid}.
58 """
58 """
59
59
60 def __init__(self, repo):
60 def __init__(self, repo):
61 self._repo = repo
61 self._repo = repo
62 self._refmap = refmap = {} # refspec: node
62 self._refmap = refmap = {} # refspec: node
63 self._clean = True
63 self._clean = True
64 self._aclean = True
64 self._aclean = True
65 nm = repo.changelog.nodemap
65 nm = repo.changelog.nodemap
66 tonode = bin # force local lookup
66 tonode = bin # force local lookup
67 try:
67 try:
68 with _getbkfile(repo) as bkfile:
68 with _getbkfile(repo) as bkfile:
69 for line in bkfile:
69 for line in bkfile:
70 line = line.strip()
70 line = line.strip()
71 if not line:
71 if not line:
72 continue
72 continue
73 try:
73 try:
74 sha, refspec = line.split(' ', 1)
74 sha, refspec = line.split(' ', 1)
75 node = tonode(sha)
75 node = tonode(sha)
76 if node in nm:
76 if node in nm:
77 refspec = encoding.tolocal(refspec)
77 refspec = encoding.tolocal(refspec)
78 refmap[refspec] = node
78 refmap[refspec] = node
79 except (TypeError, ValueError):
79 except (TypeError, ValueError):
80 # TypeError:
80 # TypeError:
81 # - bin(...)
81 # - bin(...)
82 # ValueError:
82 # ValueError:
83 # - node in nm, for non-20-bytes entry
83 # - node in nm, for non-20-bytes entry
84 # - split(...), for string without ' '
84 # - split(...), for string without ' '
85 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
85 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
86 % pycompat.bytestr(line))
86 % pycompat.bytestr(line))
87 except IOError as inst:
87 except IOError as inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90 self._active = _readactive(repo, self)
90 self._active = _readactive(repo, self)
91
91
92 @property
92 @property
93 def active(self):
93 def active(self):
94 return self._active
94 return self._active
95
95
96 @active.setter
96 @active.setter
97 def active(self, mark):
97 def active(self, mark):
98 if mark is not None and mark not in self._refmap:
98 if mark is not None and mark not in self._refmap:
99 raise AssertionError('bookmark %s does not exist!' % mark)
99 raise AssertionError('bookmark %s does not exist!' % mark)
100
100
101 self._active = mark
101 self._active = mark
102 self._aclean = False
102 self._aclean = False
103
103
104 def __len__(self):
104 def __len__(self):
105 return len(self._refmap)
105 return len(self._refmap)
106
106
107 def __iter__(self):
107 def __iter__(self):
108 return iter(self._refmap)
108 return iter(self._refmap)
109
109
110 def iteritems(self):
110 def iteritems(self):
111 return self._refmap.iteritems()
111 return self._refmap.iteritems()
112
112
113 def items(self):
113 def items(self):
114 return self._refmap.items()
114 return self._refmap.items()
115
115
116 # TODO: maybe rename to allnames()?
116 # TODO: maybe rename to allnames()?
117 def keys(self):
117 def keys(self):
118 return self._refmap.keys()
118 return self._refmap.keys()
119
119
120 # TODO: maybe rename to allnodes()? but nodes would have to be deduplicated
120 # TODO: maybe rename to allnodes()? but nodes would have to be deduplicated
121 def values(self):
121 def values(self):
122 return self._refmap.values()
122 return self._refmap.values()
123
123
124 def __contains__(self, mark):
124 def __contains__(self, mark):
125 return mark in self._refmap
125 return mark in self._refmap
126
126
127 def __getitem__(self, mark):
127 def __getitem__(self, mark):
128 return self._refmap[mark]
128 return self._refmap[mark]
129
129
130 def get(self, mark, default=None):
130 def get(self, mark, default=None):
131 return self._refmap.get(mark, default)
131 return self._refmap.get(mark, default)
132
132
133 def _set(self, key, value):
133 def _set(self, key, value):
134 self._clean = False
134 self._clean = False
135 self._refmap[key] = value
135 self._refmap[key] = value
136
136
137 def _del(self, key):
137 def _del(self, key):
138 self._clean = False
138 self._clean = False
139 del self._refmap[key]
139 del self._refmap[key]
140
140
141 def names(self, node):
142 """Return a sorted list of bookmarks pointing to the specified node"""
143 marks = []
144 for m, n in self._refmap.iteritems():
145 if n == node:
146 marks.append(m)
147 return sorted(marks)
148
141 def changectx(self, mark):
149 def changectx(self, mark):
142 node = self._refmap[mark]
150 node = self._refmap[mark]
143 return self._repo[node]
151 return self._repo[node]
144
152
145 def applychanges(self, repo, tr, changes):
153 def applychanges(self, repo, tr, changes):
146 """Apply a list of changes to bookmarks
154 """Apply a list of changes to bookmarks
147 """
155 """
148 bmchanges = tr.changes.get('bookmarks')
156 bmchanges = tr.changes.get('bookmarks')
149 for name, node in changes:
157 for name, node in changes:
150 old = self._refmap.get(name)
158 old = self._refmap.get(name)
151 if node is None:
159 if node is None:
152 self._del(name)
160 self._del(name)
153 else:
161 else:
154 self._set(name, node)
162 self._set(name, node)
155 if bmchanges is not None:
163 if bmchanges is not None:
156 # if a previous value exist preserve the "initial" value
164 # if a previous value exist preserve the "initial" value
157 previous = bmchanges.get(name)
165 previous = bmchanges.get(name)
158 if previous is not None:
166 if previous is not None:
159 old = previous[0]
167 old = previous[0]
160 bmchanges[name] = (old, node)
168 bmchanges[name] = (old, node)
161 self._recordchange(tr)
169 self._recordchange(tr)
162
170
163 def _recordchange(self, tr):
171 def _recordchange(self, tr):
164 """record that bookmarks have been changed in a transaction
172 """record that bookmarks have been changed in a transaction
165
173
166 The transaction is then responsible for updating the file content."""
174 The transaction is then responsible for updating the file content."""
167 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
175 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
168 location='plain')
176 location='plain')
169 tr.hookargs['bookmark_moved'] = '1'
177 tr.hookargs['bookmark_moved'] = '1'
170
178
171 def _writerepo(self, repo):
179 def _writerepo(self, repo):
172 """Factored out for extensibility"""
180 """Factored out for extensibility"""
173 rbm = repo._bookmarks
181 rbm = repo._bookmarks
174 if rbm.active not in self._refmap:
182 if rbm.active not in self._refmap:
175 rbm.active = None
183 rbm.active = None
176 rbm._writeactive()
184 rbm._writeactive()
177
185
178 with repo.wlock():
186 with repo.wlock():
179 file_ = repo.vfs('bookmarks', 'w', atomictemp=True,
187 file_ = repo.vfs('bookmarks', 'w', atomictemp=True,
180 checkambig=True)
188 checkambig=True)
181 try:
189 try:
182 self._write(file_)
190 self._write(file_)
183 except: # re-raises
191 except: # re-raises
184 file_.discard()
192 file_.discard()
185 raise
193 raise
186 finally:
194 finally:
187 file_.close()
195 file_.close()
188
196
189 def _writeactive(self):
197 def _writeactive(self):
190 if self._aclean:
198 if self._aclean:
191 return
199 return
192 with self._repo.wlock():
200 with self._repo.wlock():
193 if self._active is not None:
201 if self._active is not None:
194 f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
202 f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
195 checkambig=True)
203 checkambig=True)
196 try:
204 try:
197 f.write(encoding.fromlocal(self._active))
205 f.write(encoding.fromlocal(self._active))
198 finally:
206 finally:
199 f.close()
207 f.close()
200 else:
208 else:
201 self._repo.vfs.tryunlink('bookmarks.current')
209 self._repo.vfs.tryunlink('bookmarks.current')
202 self._aclean = True
210 self._aclean = True
203
211
204 def _write(self, fp):
212 def _write(self, fp):
205 for name, node in sorted(self._refmap.iteritems()):
213 for name, node in sorted(self._refmap.iteritems()):
206 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
214 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
207 self._clean = True
215 self._clean = True
208 self._repo.invalidatevolatilesets()
216 self._repo.invalidatevolatilesets()
209
217
210 def expandname(self, bname):
218 def expandname(self, bname):
211 if bname == '.':
219 if bname == '.':
212 if self.active:
220 if self.active:
213 return self.active
221 return self.active
214 else:
222 else:
215 raise error.Abort(_("no active bookmark"))
223 raise error.Abort(_("no active bookmark"))
216 return bname
224 return bname
217
225
218 def checkconflict(self, mark, force=False, target=None):
226 def checkconflict(self, mark, force=False, target=None):
219 """check repo for a potential clash of mark with an existing bookmark,
227 """check repo for a potential clash of mark with an existing bookmark,
220 branch, or hash
228 branch, or hash
221
229
222 If target is supplied, then check that we are moving the bookmark
230 If target is supplied, then check that we are moving the bookmark
223 forward.
231 forward.
224
232
225 If force is supplied, then forcibly move the bookmark to a new commit
233 If force is supplied, then forcibly move the bookmark to a new commit
226 regardless if it is a move forward.
234 regardless if it is a move forward.
227
235
228 If divergent bookmark are to be deleted, they will be returned as list.
236 If divergent bookmark are to be deleted, they will be returned as list.
229 """
237 """
230 cur = self._repo['.'].node()
238 cur = self._repo['.'].node()
231 if mark in self._refmap and not force:
239 if mark in self._refmap and not force:
232 if target:
240 if target:
233 if self._refmap[mark] == target and target == cur:
241 if self._refmap[mark] == target and target == cur:
234 # re-activating a bookmark
242 # re-activating a bookmark
235 return []
243 return []
236 rev = self._repo[target].rev()
244 rev = self._repo[target].rev()
237 anc = self._repo.changelog.ancestors([rev])
245 anc = self._repo.changelog.ancestors([rev])
238 bmctx = self.changectx(mark)
246 bmctx = self.changectx(mark)
239 divs = [self._refmap[b] for b in self._refmap
247 divs = [self._refmap[b] for b in self._refmap
240 if b.split('@', 1)[0] == mark.split('@', 1)[0]]
248 if b.split('@', 1)[0] == mark.split('@', 1)[0]]
241
249
242 # allow resolving a single divergent bookmark even if moving
250 # allow resolving a single divergent bookmark even if moving
243 # the bookmark across branches when a revision is specified
251 # the bookmark across branches when a revision is specified
244 # that contains a divergent bookmark
252 # that contains a divergent bookmark
245 if bmctx.rev() not in anc and target in divs:
253 if bmctx.rev() not in anc and target in divs:
246 return divergent2delete(self._repo, [target], mark)
254 return divergent2delete(self._repo, [target], mark)
247
255
248 deletefrom = [b for b in divs
256 deletefrom = [b for b in divs
249 if self._repo[b].rev() in anc or b == target]
257 if self._repo[b].rev() in anc or b == target]
250 delbms = divergent2delete(self._repo, deletefrom, mark)
258 delbms = divergent2delete(self._repo, deletefrom, mark)
251 if validdest(self._repo, bmctx, self._repo[target]):
259 if validdest(self._repo, bmctx, self._repo[target]):
252 self._repo.ui.status(
260 self._repo.ui.status(
253 _("moving bookmark '%s' forward from %s\n") %
261 _("moving bookmark '%s' forward from %s\n") %
254 (mark, short(bmctx.node())))
262 (mark, short(bmctx.node())))
255 return delbms
263 return delbms
256 raise error.Abort(_("bookmark '%s' already exists "
264 raise error.Abort(_("bookmark '%s' already exists "
257 "(use -f to force)") % mark)
265 "(use -f to force)") % mark)
258 if ((mark in self._repo.branchmap() or
266 if ((mark in self._repo.branchmap() or
259 mark == self._repo.dirstate.branch()) and not force):
267 mark == self._repo.dirstate.branch()) and not force):
260 raise error.Abort(
268 raise error.Abort(
261 _("a bookmark cannot have the name of an existing branch"))
269 _("a bookmark cannot have the name of an existing branch"))
262 if len(mark) > 3 and not force:
270 if len(mark) > 3 and not force:
263 try:
271 try:
264 shadowhash = scmutil.isrevsymbol(self._repo, mark)
272 shadowhash = scmutil.isrevsymbol(self._repo, mark)
265 except error.LookupError: # ambiguous identifier
273 except error.LookupError: # ambiguous identifier
266 shadowhash = False
274 shadowhash = False
267 if shadowhash:
275 if shadowhash:
268 self._repo.ui.warn(
276 self._repo.ui.warn(
269 _("bookmark %s matches a changeset hash\n"
277 _("bookmark %s matches a changeset hash\n"
270 "(did you leave a -r out of an 'hg bookmark' "
278 "(did you leave a -r out of an 'hg bookmark' "
271 "command?)\n")
279 "command?)\n")
272 % mark)
280 % mark)
273 return []
281 return []
274
282
275 def _readactive(repo, marks):
283 def _readactive(repo, marks):
276 """
284 """
277 Get the active bookmark. We can have an active bookmark that updates
285 Get the active bookmark. We can have an active bookmark that updates
278 itself as we commit. This function returns the name of that bookmark.
286 itself as we commit. This function returns the name of that bookmark.
279 It is stored in .hg/bookmarks.current
287 It is stored in .hg/bookmarks.current
280 """
288 """
281 mark = None
289 mark = None
282 try:
290 try:
283 file = repo.vfs('bookmarks.current')
291 file = repo.vfs('bookmarks.current')
284 except IOError as inst:
292 except IOError as inst:
285 if inst.errno != errno.ENOENT:
293 if inst.errno != errno.ENOENT:
286 raise
294 raise
287 return None
295 return None
288 try:
296 try:
289 # No readline() in osutil.posixfile, reading everything is
297 # No readline() in osutil.posixfile, reading everything is
290 # cheap.
298 # cheap.
291 # Note that it's possible for readlines() here to raise
299 # Note that it's possible for readlines() here to raise
292 # IOError, since we might be reading the active mark over
300 # IOError, since we might be reading the active mark over
293 # static-http which only tries to load the file when we try
301 # static-http which only tries to load the file when we try
294 # to read from it.
302 # to read from it.
295 mark = encoding.tolocal((file.readlines() or [''])[0])
303 mark = encoding.tolocal((file.readlines() or [''])[0])
296 if mark == '' or mark not in marks:
304 if mark == '' or mark not in marks:
297 mark = None
305 mark = None
298 except IOError as inst:
306 except IOError as inst:
299 if inst.errno != errno.ENOENT:
307 if inst.errno != errno.ENOENT:
300 raise
308 raise
301 return None
309 return None
302 finally:
310 finally:
303 file.close()
311 file.close()
304 return mark
312 return mark
305
313
306 def activate(repo, mark):
314 def activate(repo, mark):
307 """
315 """
308 Set the given bookmark to be 'active', meaning that this bookmark will
316 Set the given bookmark to be 'active', meaning that this bookmark will
309 follow new commits that are made.
317 follow new commits that are made.
310 The name is recorded in .hg/bookmarks.current
318 The name is recorded in .hg/bookmarks.current
311 """
319 """
312 repo._bookmarks.active = mark
320 repo._bookmarks.active = mark
313 repo._bookmarks._writeactive()
321 repo._bookmarks._writeactive()
314
322
315 def deactivate(repo):
323 def deactivate(repo):
316 """
324 """
317 Unset the active bookmark in this repository.
325 Unset the active bookmark in this repository.
318 """
326 """
319 repo._bookmarks.active = None
327 repo._bookmarks.active = None
320 repo._bookmarks._writeactive()
328 repo._bookmarks._writeactive()
321
329
322 def isactivewdirparent(repo):
330 def isactivewdirparent(repo):
323 """
331 """
324 Tell whether the 'active' bookmark (the one that follows new commits)
332 Tell whether the 'active' bookmark (the one that follows new commits)
325 points to one of the parents of the current working directory (wdir).
333 points to one of the parents of the current working directory (wdir).
326
334
327 While this is normally the case, it can on occasion be false; for example,
335 While this is normally the case, it can on occasion be false; for example,
328 immediately after a pull, the active bookmark can be moved to point
336 immediately after a pull, the active bookmark can be moved to point
329 to a place different than the wdir. This is solved by running `hg update`.
337 to a place different than the wdir. This is solved by running `hg update`.
330 """
338 """
331 mark = repo._activebookmark
339 mark = repo._activebookmark
332 marks = repo._bookmarks
340 marks = repo._bookmarks
333 parents = [p.node() for p in repo[None].parents()]
341 parents = [p.node() for p in repo[None].parents()]
334 return (mark in marks and marks[mark] in parents)
342 return (mark in marks and marks[mark] in parents)
335
343
336 def divergent2delete(repo, deletefrom, bm):
344 def divergent2delete(repo, deletefrom, bm):
337 """find divergent versions of bm on nodes in deletefrom.
345 """find divergent versions of bm on nodes in deletefrom.
338
346
339 the list of bookmark to delete."""
347 the list of bookmark to delete."""
340 todelete = []
348 todelete = []
341 marks = repo._bookmarks
349 marks = repo._bookmarks
342 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
350 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
343 for mark in divergent:
351 for mark in divergent:
344 if mark == '@' or '@' not in mark:
352 if mark == '@' or '@' not in mark:
345 # can't be divergent by definition
353 # can't be divergent by definition
346 continue
354 continue
347 if mark and marks[mark] in deletefrom:
355 if mark and marks[mark] in deletefrom:
348 if mark != bm:
356 if mark != bm:
349 todelete.append(mark)
357 todelete.append(mark)
350 return todelete
358 return todelete
351
359
352 def headsforactive(repo):
360 def headsforactive(repo):
353 """Given a repo with an active bookmark, return divergent bookmark nodes.
361 """Given a repo with an active bookmark, return divergent bookmark nodes.
354
362
355 Args:
363 Args:
356 repo: A repository with an active bookmark.
364 repo: A repository with an active bookmark.
357
365
358 Returns:
366 Returns:
359 A list of binary node ids that is the full list of other
367 A list of binary node ids that is the full list of other
360 revisions with bookmarks divergent from the active bookmark. If
368 revisions with bookmarks divergent from the active bookmark. If
361 there were no divergent bookmarks, then this list will contain
369 there were no divergent bookmarks, then this list will contain
362 only one entry.
370 only one entry.
363 """
371 """
364 if not repo._activebookmark:
372 if not repo._activebookmark:
365 raise ValueError(
373 raise ValueError(
366 'headsforactive() only makes sense with an active bookmark')
374 'headsforactive() only makes sense with an active bookmark')
367 name = repo._activebookmark.split('@', 1)[0]
375 name = repo._activebookmark.split('@', 1)[0]
368 heads = []
376 heads = []
369 for mark, n in repo._bookmarks.iteritems():
377 for mark, n in repo._bookmarks.iteritems():
370 if mark.split('@', 1)[0] == name:
378 if mark.split('@', 1)[0] == name:
371 heads.append(n)
379 heads.append(n)
372 return heads
380 return heads
373
381
374 def calculateupdate(ui, repo):
382 def calculateupdate(ui, repo):
375 '''Return a tuple (activemark, movemarkfrom) indicating the active bookmark
383 '''Return a tuple (activemark, movemarkfrom) indicating the active bookmark
376 and where to move the active bookmark from, if needed.'''
384 and where to move the active bookmark from, if needed.'''
377 checkout, movemarkfrom = None, None
385 checkout, movemarkfrom = None, None
378 activemark = repo._activebookmark
386 activemark = repo._activebookmark
379 if isactivewdirparent(repo):
387 if isactivewdirparent(repo):
380 movemarkfrom = repo['.'].node()
388 movemarkfrom = repo['.'].node()
381 elif activemark:
389 elif activemark:
382 ui.status(_("updating to active bookmark %s\n") % activemark)
390 ui.status(_("updating to active bookmark %s\n") % activemark)
383 checkout = activemark
391 checkout = activemark
384 return (checkout, movemarkfrom)
392 return (checkout, movemarkfrom)
385
393
386 def update(repo, parents, node):
394 def update(repo, parents, node):
387 deletefrom = parents
395 deletefrom = parents
388 marks = repo._bookmarks
396 marks = repo._bookmarks
389 active = marks.active
397 active = marks.active
390 if not active:
398 if not active:
391 return False
399 return False
392
400
393 bmchanges = []
401 bmchanges = []
394 if marks[active] in parents:
402 if marks[active] in parents:
395 new = repo[node]
403 new = repo[node]
396 divs = [marks.changectx(b) for b in marks
404 divs = [marks.changectx(b) for b in marks
397 if b.split('@', 1)[0] == active.split('@', 1)[0]]
405 if b.split('@', 1)[0] == active.split('@', 1)[0]]
398 anc = repo.changelog.ancestors([new.rev()])
406 anc = repo.changelog.ancestors([new.rev()])
399 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
407 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
400 if validdest(repo, marks.changectx(active), new):
408 if validdest(repo, marks.changectx(active), new):
401 bmchanges.append((active, new.node()))
409 bmchanges.append((active, new.node()))
402
410
403 for bm in divergent2delete(repo, deletefrom, active):
411 for bm in divergent2delete(repo, deletefrom, active):
404 bmchanges.append((bm, None))
412 bmchanges.append((bm, None))
405
413
406 if bmchanges:
414 if bmchanges:
407 with repo.lock(), repo.transaction('bookmark') as tr:
415 with repo.lock(), repo.transaction('bookmark') as tr:
408 marks.applychanges(repo, tr, bmchanges)
416 marks.applychanges(repo, tr, bmchanges)
409 return bool(bmchanges)
417 return bool(bmchanges)
410
418
411 def listbinbookmarks(repo):
419 def listbinbookmarks(repo):
412 # We may try to list bookmarks on a repo type that does not
420 # We may try to list bookmarks on a repo type that does not
413 # support it (e.g., statichttprepository).
421 # support it (e.g., statichttprepository).
414 marks = getattr(repo, '_bookmarks', {})
422 marks = getattr(repo, '_bookmarks', {})
415
423
416 hasnode = repo.changelog.hasnode
424 hasnode = repo.changelog.hasnode
417 for k, v in marks.iteritems():
425 for k, v in marks.iteritems():
418 # don't expose local divergent bookmarks
426 # don't expose local divergent bookmarks
419 if hasnode(v) and ('@' not in k or k.endswith('@')):
427 if hasnode(v) and ('@' not in k or k.endswith('@')):
420 yield k, v
428 yield k, v
421
429
422 def listbookmarks(repo):
430 def listbookmarks(repo):
423 d = {}
431 d = {}
424 for book, node in listbinbookmarks(repo):
432 for book, node in listbinbookmarks(repo):
425 d[book] = hex(node)
433 d[book] = hex(node)
426 return d
434 return d
427
435
428 def pushbookmark(repo, key, old, new):
436 def pushbookmark(repo, key, old, new):
429 with repo.wlock(), repo.lock(), repo.transaction('bookmarks') as tr:
437 with repo.wlock(), repo.lock(), repo.transaction('bookmarks') as tr:
430 marks = repo._bookmarks
438 marks = repo._bookmarks
431 existing = hex(marks.get(key, ''))
439 existing = hex(marks.get(key, ''))
432 if existing != old and existing != new:
440 if existing != old and existing != new:
433 return False
441 return False
434 if new == '':
442 if new == '':
435 changes = [(key, None)]
443 changes = [(key, None)]
436 else:
444 else:
437 if new not in repo:
445 if new not in repo:
438 return False
446 return False
439 changes = [(key, repo[new].node())]
447 changes = [(key, repo[new].node())]
440 marks.applychanges(repo, tr, changes)
448 marks.applychanges(repo, tr, changes)
441 return True
449 return True
442
450
443 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
451 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
444 '''Compare bookmarks between srcmarks and dstmarks
452 '''Compare bookmarks between srcmarks and dstmarks
445
453
446 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
454 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
447 differ, invalid)", each are list of bookmarks below:
455 differ, invalid)", each are list of bookmarks below:
448
456
449 :addsrc: added on src side (removed on dst side, perhaps)
457 :addsrc: added on src side (removed on dst side, perhaps)
450 :adddst: added on dst side (removed on src side, perhaps)
458 :adddst: added on dst side (removed on src side, perhaps)
451 :advsrc: advanced on src side
459 :advsrc: advanced on src side
452 :advdst: advanced on dst side
460 :advdst: advanced on dst side
453 :diverge: diverge
461 :diverge: diverge
454 :differ: changed, but changeset referred on src is unknown on dst
462 :differ: changed, but changeset referred on src is unknown on dst
455 :invalid: unknown on both side
463 :invalid: unknown on both side
456 :same: same on both side
464 :same: same on both side
457
465
458 Each elements of lists in result tuple is tuple "(bookmark name,
466 Each elements of lists in result tuple is tuple "(bookmark name,
459 changeset ID on source side, changeset ID on destination
467 changeset ID on source side, changeset ID on destination
460 side)". Each changeset IDs are 40 hexadecimal digit string or
468 side)". Each changeset IDs are 40 hexadecimal digit string or
461 None.
469 None.
462
470
463 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
471 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
464 "invalid" list may be unknown for repo.
472 "invalid" list may be unknown for repo.
465
473
466 If "targets" is specified, only bookmarks listed in it are
474 If "targets" is specified, only bookmarks listed in it are
467 examined.
475 examined.
468 '''
476 '''
469
477
470 if targets:
478 if targets:
471 bset = set(targets)
479 bset = set(targets)
472 else:
480 else:
473 srcmarkset = set(srcmarks)
481 srcmarkset = set(srcmarks)
474 dstmarkset = set(dstmarks)
482 dstmarkset = set(dstmarks)
475 bset = srcmarkset | dstmarkset
483 bset = srcmarkset | dstmarkset
476
484
477 results = ([], [], [], [], [], [], [], [])
485 results = ([], [], [], [], [], [], [], [])
478 addsrc = results[0].append
486 addsrc = results[0].append
479 adddst = results[1].append
487 adddst = results[1].append
480 advsrc = results[2].append
488 advsrc = results[2].append
481 advdst = results[3].append
489 advdst = results[3].append
482 diverge = results[4].append
490 diverge = results[4].append
483 differ = results[5].append
491 differ = results[5].append
484 invalid = results[6].append
492 invalid = results[6].append
485 same = results[7].append
493 same = results[7].append
486
494
487 for b in sorted(bset):
495 for b in sorted(bset):
488 if b not in srcmarks:
496 if b not in srcmarks:
489 if b in dstmarks:
497 if b in dstmarks:
490 adddst((b, None, dstmarks[b]))
498 adddst((b, None, dstmarks[b]))
491 else:
499 else:
492 invalid((b, None, None))
500 invalid((b, None, None))
493 elif b not in dstmarks:
501 elif b not in dstmarks:
494 addsrc((b, srcmarks[b], None))
502 addsrc((b, srcmarks[b], None))
495 else:
503 else:
496 scid = srcmarks[b]
504 scid = srcmarks[b]
497 dcid = dstmarks[b]
505 dcid = dstmarks[b]
498 if scid == dcid:
506 if scid == dcid:
499 same((b, scid, dcid))
507 same((b, scid, dcid))
500 elif scid in repo and dcid in repo:
508 elif scid in repo and dcid in repo:
501 sctx = repo[scid]
509 sctx = repo[scid]
502 dctx = repo[dcid]
510 dctx = repo[dcid]
503 if sctx.rev() < dctx.rev():
511 if sctx.rev() < dctx.rev():
504 if validdest(repo, sctx, dctx):
512 if validdest(repo, sctx, dctx):
505 advdst((b, scid, dcid))
513 advdst((b, scid, dcid))
506 else:
514 else:
507 diverge((b, scid, dcid))
515 diverge((b, scid, dcid))
508 else:
516 else:
509 if validdest(repo, dctx, sctx):
517 if validdest(repo, dctx, sctx):
510 advsrc((b, scid, dcid))
518 advsrc((b, scid, dcid))
511 else:
519 else:
512 diverge((b, scid, dcid))
520 diverge((b, scid, dcid))
513 else:
521 else:
514 # it is too expensive to examine in detail, in this case
522 # it is too expensive to examine in detail, in this case
515 differ((b, scid, dcid))
523 differ((b, scid, dcid))
516
524
517 return results
525 return results
518
526
519 def _diverge(ui, b, path, localmarks, remotenode):
527 def _diverge(ui, b, path, localmarks, remotenode):
520 '''Return appropriate diverged bookmark for specified ``path``
528 '''Return appropriate diverged bookmark for specified ``path``
521
529
522 This returns None, if it is failed to assign any divergent
530 This returns None, if it is failed to assign any divergent
523 bookmark name.
531 bookmark name.
524
532
525 This reuses already existing one with "@number" suffix, if it
533 This reuses already existing one with "@number" suffix, if it
526 refers ``remotenode``.
534 refers ``remotenode``.
527 '''
535 '''
528 if b == '@':
536 if b == '@':
529 b = ''
537 b = ''
530 # try to use an @pathalias suffix
538 # try to use an @pathalias suffix
531 # if an @pathalias already exists, we overwrite (update) it
539 # if an @pathalias already exists, we overwrite (update) it
532 if path.startswith("file:"):
540 if path.startswith("file:"):
533 path = util.url(path).path
541 path = util.url(path).path
534 for p, u in ui.configitems("paths"):
542 for p, u in ui.configitems("paths"):
535 if u.startswith("file:"):
543 if u.startswith("file:"):
536 u = util.url(u).path
544 u = util.url(u).path
537 if path == u:
545 if path == u:
538 return '%s@%s' % (b, p)
546 return '%s@%s' % (b, p)
539
547
540 # assign a unique "@number" suffix newly
548 # assign a unique "@number" suffix newly
541 for x in range(1, 100):
549 for x in range(1, 100):
542 n = '%s@%d' % (b, x)
550 n = '%s@%d' % (b, x)
543 if n not in localmarks or localmarks[n] == remotenode:
551 if n not in localmarks or localmarks[n] == remotenode:
544 return n
552 return n
545
553
546 return None
554 return None
547
555
548 def unhexlifybookmarks(marks):
556 def unhexlifybookmarks(marks):
549 binremotemarks = {}
557 binremotemarks = {}
550 for name, node in marks.items():
558 for name, node in marks.items():
551 binremotemarks[name] = bin(node)
559 binremotemarks[name] = bin(node)
552 return binremotemarks
560 return binremotemarks
553
561
554 _binaryentry = struct.Struct('>20sH')
562 _binaryentry = struct.Struct('>20sH')
555
563
556 def binaryencode(bookmarks):
564 def binaryencode(bookmarks):
557 """encode a '(bookmark, node)' iterable into a binary stream
565 """encode a '(bookmark, node)' iterable into a binary stream
558
566
559 the binary format is:
567 the binary format is:
560
568
561 <node><bookmark-length><bookmark-name>
569 <node><bookmark-length><bookmark-name>
562
570
563 :node: is a 20 bytes binary node,
571 :node: is a 20 bytes binary node,
564 :bookmark-length: an unsigned short,
572 :bookmark-length: an unsigned short,
565 :bookmark-name: the name of the bookmark (of length <bookmark-length>)
573 :bookmark-name: the name of the bookmark (of length <bookmark-length>)
566
574
567 wdirid (all bits set) will be used as a special value for "missing"
575 wdirid (all bits set) will be used as a special value for "missing"
568 """
576 """
569 binarydata = []
577 binarydata = []
570 for book, node in bookmarks:
578 for book, node in bookmarks:
571 if not node: # None or ''
579 if not node: # None or ''
572 node = wdirid
580 node = wdirid
573 binarydata.append(_binaryentry.pack(node, len(book)))
581 binarydata.append(_binaryentry.pack(node, len(book)))
574 binarydata.append(book)
582 binarydata.append(book)
575 return ''.join(binarydata)
583 return ''.join(binarydata)
576
584
577 def binarydecode(stream):
585 def binarydecode(stream):
578 """decode a binary stream into an '(bookmark, node)' iterable
586 """decode a binary stream into an '(bookmark, node)' iterable
579
587
580 the binary format is:
588 the binary format is:
581
589
582 <node><bookmark-length><bookmark-name>
590 <node><bookmark-length><bookmark-name>
583
591
584 :node: is a 20 bytes binary node,
592 :node: is a 20 bytes binary node,
585 :bookmark-length: an unsigned short,
593 :bookmark-length: an unsigned short,
586 :bookmark-name: the name of the bookmark (of length <bookmark-length>))
594 :bookmark-name: the name of the bookmark (of length <bookmark-length>))
587
595
588 wdirid (all bits set) will be used as a special value for "missing"
596 wdirid (all bits set) will be used as a special value for "missing"
589 """
597 """
590 entrysize = _binaryentry.size
598 entrysize = _binaryentry.size
591 books = []
599 books = []
592 while True:
600 while True:
593 entry = stream.read(entrysize)
601 entry = stream.read(entrysize)
594 if len(entry) < entrysize:
602 if len(entry) < entrysize:
595 if entry:
603 if entry:
596 raise error.Abort(_('bad bookmark stream'))
604 raise error.Abort(_('bad bookmark stream'))
597 break
605 break
598 node, length = _binaryentry.unpack(entry)
606 node, length = _binaryentry.unpack(entry)
599 bookmark = stream.read(length)
607 bookmark = stream.read(length)
600 if len(bookmark) < length:
608 if len(bookmark) < length:
601 if entry:
609 if entry:
602 raise error.Abort(_('bad bookmark stream'))
610 raise error.Abort(_('bad bookmark stream'))
603 if node == wdirid:
611 if node == wdirid:
604 node = None
612 node = None
605 books.append((bookmark, node))
613 books.append((bookmark, node))
606 return books
614 return books
607
615
608 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
616 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
609 ui.debug("checking for updated bookmarks\n")
617 ui.debug("checking for updated bookmarks\n")
610 localmarks = repo._bookmarks
618 localmarks = repo._bookmarks
611 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
619 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
612 ) = comparebookmarks(repo, remotemarks, localmarks)
620 ) = comparebookmarks(repo, remotemarks, localmarks)
613
621
614 status = ui.status
622 status = ui.status
615 warn = ui.warn
623 warn = ui.warn
616 if ui.configbool('ui', 'quietbookmarkmove'):
624 if ui.configbool('ui', 'quietbookmarkmove'):
617 status = warn = ui.debug
625 status = warn = ui.debug
618
626
619 explicit = set(explicit)
627 explicit = set(explicit)
620 changed = []
628 changed = []
621 for b, scid, dcid in addsrc:
629 for b, scid, dcid in addsrc:
622 if scid in repo: # add remote bookmarks for changes we already have
630 if scid in repo: # add remote bookmarks for changes we already have
623 changed.append((b, scid, status,
631 changed.append((b, scid, status,
624 _("adding remote bookmark %s\n") % (b)))
632 _("adding remote bookmark %s\n") % (b)))
625 elif b in explicit:
633 elif b in explicit:
626 explicit.remove(b)
634 explicit.remove(b)
627 ui.warn(_("remote bookmark %s points to locally missing %s\n")
635 ui.warn(_("remote bookmark %s points to locally missing %s\n")
628 % (b, hex(scid)[:12]))
636 % (b, hex(scid)[:12]))
629
637
630 for b, scid, dcid in advsrc:
638 for b, scid, dcid in advsrc:
631 changed.append((b, scid, status,
639 changed.append((b, scid, status,
632 _("updating bookmark %s\n") % (b)))
640 _("updating bookmark %s\n") % (b)))
633 # remove normal movement from explicit set
641 # remove normal movement from explicit set
634 explicit.difference_update(d[0] for d in changed)
642 explicit.difference_update(d[0] for d in changed)
635
643
636 for b, scid, dcid in diverge:
644 for b, scid, dcid in diverge:
637 if b in explicit:
645 if b in explicit:
638 explicit.discard(b)
646 explicit.discard(b)
639 changed.append((b, scid, status,
647 changed.append((b, scid, status,
640 _("importing bookmark %s\n") % (b)))
648 _("importing bookmark %s\n") % (b)))
641 else:
649 else:
642 db = _diverge(ui, b, path, localmarks, scid)
650 db = _diverge(ui, b, path, localmarks, scid)
643 if db:
651 if db:
644 changed.append((db, scid, warn,
652 changed.append((db, scid, warn,
645 _("divergent bookmark %s stored as %s\n") %
653 _("divergent bookmark %s stored as %s\n") %
646 (b, db)))
654 (b, db)))
647 else:
655 else:
648 warn(_("warning: failed to assign numbered name "
656 warn(_("warning: failed to assign numbered name "
649 "to divergent bookmark %s\n") % (b))
657 "to divergent bookmark %s\n") % (b))
650 for b, scid, dcid in adddst + advdst:
658 for b, scid, dcid in adddst + advdst:
651 if b in explicit:
659 if b in explicit:
652 explicit.discard(b)
660 explicit.discard(b)
653 changed.append((b, scid, status,
661 changed.append((b, scid, status,
654 _("importing bookmark %s\n") % (b)))
662 _("importing bookmark %s\n") % (b)))
655 for b, scid, dcid in differ:
663 for b, scid, dcid in differ:
656 if b in explicit:
664 if b in explicit:
657 explicit.remove(b)
665 explicit.remove(b)
658 ui.warn(_("remote bookmark %s points to locally missing %s\n")
666 ui.warn(_("remote bookmark %s points to locally missing %s\n")
659 % (b, hex(scid)[:12]))
667 % (b, hex(scid)[:12]))
660
668
661 if changed:
669 if changed:
662 tr = trfunc()
670 tr = trfunc()
663 changes = []
671 changes = []
664 for b, node, writer, msg in sorted(changed):
672 for b, node, writer, msg in sorted(changed):
665 changes.append((b, node))
673 changes.append((b, node))
666 writer(msg)
674 writer(msg)
667 localmarks.applychanges(repo, tr, changes)
675 localmarks.applychanges(repo, tr, changes)
668
676
669 def incoming(ui, repo, peer):
677 def incoming(ui, repo, peer):
670 '''Show bookmarks incoming from other to repo
678 '''Show bookmarks incoming from other to repo
671 '''
679 '''
672 ui.status(_("searching for changed bookmarks\n"))
680 ui.status(_("searching for changed bookmarks\n"))
673
681
674 with peer.commandexecutor() as e:
682 with peer.commandexecutor() as e:
675 remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
683 remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
676 'namespace': 'bookmarks',
684 'namespace': 'bookmarks',
677 }).result())
685 }).result())
678
686
679 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
687 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
680 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
688 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
681
689
682 incomings = []
690 incomings = []
683 if ui.debugflag:
691 if ui.debugflag:
684 getid = lambda id: id
692 getid = lambda id: id
685 else:
693 else:
686 getid = lambda id: id[:12]
694 getid = lambda id: id[:12]
687 if ui.verbose:
695 if ui.verbose:
688 def add(b, id, st):
696 def add(b, id, st):
689 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
697 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
690 else:
698 else:
691 def add(b, id, st):
699 def add(b, id, st):
692 incomings.append(" %-25s %s\n" % (b, getid(id)))
700 incomings.append(" %-25s %s\n" % (b, getid(id)))
693 for b, scid, dcid in addsrc:
701 for b, scid, dcid in addsrc:
694 # i18n: "added" refers to a bookmark
702 # i18n: "added" refers to a bookmark
695 add(b, hex(scid), _('added'))
703 add(b, hex(scid), _('added'))
696 for b, scid, dcid in advsrc:
704 for b, scid, dcid in advsrc:
697 # i18n: "advanced" refers to a bookmark
705 # i18n: "advanced" refers to a bookmark
698 add(b, hex(scid), _('advanced'))
706 add(b, hex(scid), _('advanced'))
699 for b, scid, dcid in diverge:
707 for b, scid, dcid in diverge:
700 # i18n: "diverged" refers to a bookmark
708 # i18n: "diverged" refers to a bookmark
701 add(b, hex(scid), _('diverged'))
709 add(b, hex(scid), _('diverged'))
702 for b, scid, dcid in differ:
710 for b, scid, dcid in differ:
703 # i18n: "changed" refers to a bookmark
711 # i18n: "changed" refers to a bookmark
704 add(b, hex(scid), _('changed'))
712 add(b, hex(scid), _('changed'))
705
713
706 if not incomings:
714 if not incomings:
707 ui.status(_("no changed bookmarks found\n"))
715 ui.status(_("no changed bookmarks found\n"))
708 return 1
716 return 1
709
717
710 for s in sorted(incomings):
718 for s in sorted(incomings):
711 ui.write(s)
719 ui.write(s)
712
720
713 return 0
721 return 0
714
722
715 def outgoing(ui, repo, other):
723 def outgoing(ui, repo, other):
716 '''Show bookmarks outgoing from repo to other
724 '''Show bookmarks outgoing from repo to other
717 '''
725 '''
718 ui.status(_("searching for changed bookmarks\n"))
726 ui.status(_("searching for changed bookmarks\n"))
719
727
720 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
728 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
721 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
729 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
722 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
730 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
723
731
724 outgoings = []
732 outgoings = []
725 if ui.debugflag:
733 if ui.debugflag:
726 getid = lambda id: id
734 getid = lambda id: id
727 else:
735 else:
728 getid = lambda id: id[:12]
736 getid = lambda id: id[:12]
729 if ui.verbose:
737 if ui.verbose:
730 def add(b, id, st):
738 def add(b, id, st):
731 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
739 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
732 else:
740 else:
733 def add(b, id, st):
741 def add(b, id, st):
734 outgoings.append(" %-25s %s\n" % (b, getid(id)))
742 outgoings.append(" %-25s %s\n" % (b, getid(id)))
735 for b, scid, dcid in addsrc:
743 for b, scid, dcid in addsrc:
736 # i18n: "added refers to a bookmark
744 # i18n: "added refers to a bookmark
737 add(b, hex(scid), _('added'))
745 add(b, hex(scid), _('added'))
738 for b, scid, dcid in adddst:
746 for b, scid, dcid in adddst:
739 # i18n: "deleted" refers to a bookmark
747 # i18n: "deleted" refers to a bookmark
740 add(b, ' ' * 40, _('deleted'))
748 add(b, ' ' * 40, _('deleted'))
741 for b, scid, dcid in advsrc:
749 for b, scid, dcid in advsrc:
742 # i18n: "advanced" refers to a bookmark
750 # i18n: "advanced" refers to a bookmark
743 add(b, hex(scid), _('advanced'))
751 add(b, hex(scid), _('advanced'))
744 for b, scid, dcid in diverge:
752 for b, scid, dcid in diverge:
745 # i18n: "diverged" refers to a bookmark
753 # i18n: "diverged" refers to a bookmark
746 add(b, hex(scid), _('diverged'))
754 add(b, hex(scid), _('diverged'))
747 for b, scid, dcid in differ:
755 for b, scid, dcid in differ:
748 # i18n: "changed" refers to a bookmark
756 # i18n: "changed" refers to a bookmark
749 add(b, hex(scid), _('changed'))
757 add(b, hex(scid), _('changed'))
750
758
751 if not outgoings:
759 if not outgoings:
752 ui.status(_("no changed bookmarks found\n"))
760 ui.status(_("no changed bookmarks found\n"))
753 return 1
761 return 1
754
762
755 for s in sorted(outgoings):
763 for s in sorted(outgoings):
756 ui.write(s)
764 ui.write(s)
757
765
758 return 0
766 return 0
759
767
760 def summary(repo, peer):
768 def summary(repo, peer):
761 '''Compare bookmarks between repo and other for "hg summary" output
769 '''Compare bookmarks between repo and other for "hg summary" output
762
770
763 This returns "(# of incoming, # of outgoing)" tuple.
771 This returns "(# of incoming, # of outgoing)" tuple.
764 '''
772 '''
765 with peer.commandexecutor() as e:
773 with peer.commandexecutor() as e:
766 remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
774 remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
767 'namespace': 'bookmarks',
775 'namespace': 'bookmarks',
768 }).result())
776 }).result())
769
777
770 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
778 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
771 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
779 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
772 return (len(addsrc), len(adddst))
780 return (len(addsrc), len(adddst))
773
781
774 def validdest(repo, old, new):
782 def validdest(repo, old, new):
775 """Is the new bookmark destination a valid update from the old one"""
783 """Is the new bookmark destination a valid update from the old one"""
776 repo = repo.unfiltered()
784 repo = repo.unfiltered()
777 if old == new:
785 if old == new:
778 # Old == new -> nothing to update.
786 # Old == new -> nothing to update.
779 return False
787 return False
780 elif not old:
788 elif not old:
781 # old is nullrev, anything is valid.
789 # old is nullrev, anything is valid.
782 # (new != nullrev has been excluded by the previous check)
790 # (new != nullrev has been excluded by the previous check)
783 return True
791 return True
784 elif repo.obsstore:
792 elif repo.obsstore:
785 return new.node() in obsutil.foreground(repo, [old.node()])
793 return new.node() in obsutil.foreground(repo, [old.node()])
786 else:
794 else:
787 # still an independent clause as it is lazier (and therefore faster)
795 # still an independent clause as it is lazier (and therefore faster)
788 return old.descendant(new)
796 return old.descendant(new)
789
797
790 def checkformat(repo, mark):
798 def checkformat(repo, mark):
791 """return a valid version of a potential bookmark name
799 """return a valid version of a potential bookmark name
792
800
793 Raises an abort error if the bookmark name is not valid.
801 Raises an abort error if the bookmark name is not valid.
794 """
802 """
795 mark = mark.strip()
803 mark = mark.strip()
796 if not mark:
804 if not mark:
797 raise error.Abort(_("bookmark names cannot consist entirely of "
805 raise error.Abort(_("bookmark names cannot consist entirely of "
798 "whitespace"))
806 "whitespace"))
799 scmutil.checknewlabel(repo, mark, 'bookmark')
807 scmutil.checknewlabel(repo, mark, 'bookmark')
800 return mark
808 return mark
801
809
802 def delete(repo, tr, names):
810 def delete(repo, tr, names):
803 """remove a mark from the bookmark store
811 """remove a mark from the bookmark store
804
812
805 Raises an abort error if mark does not exist.
813 Raises an abort error if mark does not exist.
806 """
814 """
807 marks = repo._bookmarks
815 marks = repo._bookmarks
808 changes = []
816 changes = []
809 for mark in names:
817 for mark in names:
810 if mark not in marks:
818 if mark not in marks:
811 raise error.Abort(_("bookmark '%s' does not exist") % mark)
819 raise error.Abort(_("bookmark '%s' does not exist") % mark)
812 if mark == repo._activebookmark:
820 if mark == repo._activebookmark:
813 deactivate(repo)
821 deactivate(repo)
814 changes.append((mark, None))
822 changes.append((mark, None))
815 marks.applychanges(repo, tr, changes)
823 marks.applychanges(repo, tr, changes)
816
824
817 def rename(repo, tr, old, new, force=False, inactive=False):
825 def rename(repo, tr, old, new, force=False, inactive=False):
818 """rename a bookmark from old to new
826 """rename a bookmark from old to new
819
827
820 If force is specified, then the new name can overwrite an existing
828 If force is specified, then the new name can overwrite an existing
821 bookmark.
829 bookmark.
822
830
823 If inactive is specified, then do not activate the new bookmark.
831 If inactive is specified, then do not activate the new bookmark.
824
832
825 Raises an abort error if old is not in the bookmark store.
833 Raises an abort error if old is not in the bookmark store.
826 """
834 """
827 marks = repo._bookmarks
835 marks = repo._bookmarks
828 mark = checkformat(repo, new)
836 mark = checkformat(repo, new)
829 if old not in marks:
837 if old not in marks:
830 raise error.Abort(_("bookmark '%s' does not exist") % old)
838 raise error.Abort(_("bookmark '%s' does not exist") % old)
831 changes = []
839 changes = []
832 for bm in marks.checkconflict(mark, force):
840 for bm in marks.checkconflict(mark, force):
833 changes.append((bm, None))
841 changes.append((bm, None))
834 changes.extend([(mark, marks[old]), (old, None)])
842 changes.extend([(mark, marks[old]), (old, None)])
835 marks.applychanges(repo, tr, changes)
843 marks.applychanges(repo, tr, changes)
836 if repo._activebookmark == old and not inactive:
844 if repo._activebookmark == old and not inactive:
837 activate(repo, mark)
845 activate(repo, mark)
838
846
839 def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
847 def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
840 """add a list of bookmarks
848 """add a list of bookmarks
841
849
842 If force is specified, then the new name can overwrite an existing
850 If force is specified, then the new name can overwrite an existing
843 bookmark.
851 bookmark.
844
852
845 If inactive is specified, then do not activate any bookmark. Otherwise, the
853 If inactive is specified, then do not activate any bookmark. Otherwise, the
846 first bookmark is activated.
854 first bookmark is activated.
847
855
848 Raises an abort error if old is not in the bookmark store.
856 Raises an abort error if old is not in the bookmark store.
849 """
857 """
850 marks = repo._bookmarks
858 marks = repo._bookmarks
851 cur = repo['.'].node()
859 cur = repo['.'].node()
852 newact = None
860 newact = None
853 changes = []
861 changes = []
854 hiddenrev = None
862 hiddenrev = None
855
863
856 # unhide revs if any
864 # unhide revs if any
857 if rev:
865 if rev:
858 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
866 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
859
867
860 for mark in names:
868 for mark in names:
861 mark = checkformat(repo, mark)
869 mark = checkformat(repo, mark)
862 if newact is None:
870 if newact is None:
863 newact = mark
871 newact = mark
864 if inactive and mark == repo._activebookmark:
872 if inactive and mark == repo._activebookmark:
865 deactivate(repo)
873 deactivate(repo)
866 return
874 return
867 tgt = cur
875 tgt = cur
868 if rev:
876 if rev:
869 ctx = scmutil.revsingle(repo, rev)
877 ctx = scmutil.revsingle(repo, rev)
870 if ctx.hidden():
878 if ctx.hidden():
871 hiddenrev = ctx.hex()[:12]
879 hiddenrev = ctx.hex()[:12]
872 tgt = ctx.node()
880 tgt = ctx.node()
873 for bm in marks.checkconflict(mark, force, tgt):
881 for bm in marks.checkconflict(mark, force, tgt):
874 changes.append((bm, None))
882 changes.append((bm, None))
875 changes.append((mark, tgt))
883 changes.append((mark, tgt))
876
884
877 if hiddenrev:
885 if hiddenrev:
878 repo.ui.warn(_("bookmarking hidden changeset %s\n") % hiddenrev)
886 repo.ui.warn(_("bookmarking hidden changeset %s\n") % hiddenrev)
879
887
880 if ctx.obsolete():
888 if ctx.obsolete():
881 msg = obsutil._getfilteredreason(repo, "%s" % hiddenrev, ctx)
889 msg = obsutil._getfilteredreason(repo, "%s" % hiddenrev, ctx)
882 repo.ui.warn("(%s)\n" % msg)
890 repo.ui.warn("(%s)\n" % msg)
883
891
884 marks.applychanges(repo, tr, changes)
892 marks.applychanges(repo, tr, changes)
885 if not inactive and cur == marks[newact] and not rev:
893 if not inactive and cur == marks[newact] and not rev:
886 activate(repo, newact)
894 activate(repo, newact)
887 elif cur != tgt and newact == repo._activebookmark:
895 elif cur != tgt and newact == repo._activebookmark:
888 deactivate(repo)
896 deactivate(repo)
889
897
890 def _printbookmarks(ui, repo, bmarks, **opts):
898 def _printbookmarks(ui, repo, bmarks, **opts):
891 """private method to print bookmarks
899 """private method to print bookmarks
892
900
893 Provides a way for extensions to control how bookmarks are printed (e.g.
901 Provides a way for extensions to control how bookmarks are printed (e.g.
894 prepend or postpend names)
902 prepend or postpend names)
895 """
903 """
896 opts = pycompat.byteskwargs(opts)
904 opts = pycompat.byteskwargs(opts)
897 fm = ui.formatter('bookmarks', opts)
905 fm = ui.formatter('bookmarks', opts)
898 hexfn = fm.hexfunc
906 hexfn = fm.hexfunc
899 if len(bmarks) == 0 and fm.isplain():
907 if len(bmarks) == 0 and fm.isplain():
900 ui.status(_("no bookmarks set\n"))
908 ui.status(_("no bookmarks set\n"))
901 for bmark, (n, prefix, label) in sorted(bmarks.iteritems()):
909 for bmark, (n, prefix, label) in sorted(bmarks.iteritems()):
902 fm.startitem()
910 fm.startitem()
903 if not ui.quiet:
911 if not ui.quiet:
904 fm.plain(' %s ' % prefix, label=label)
912 fm.plain(' %s ' % prefix, label=label)
905 fm.write('bookmark', '%s', bmark, label=label)
913 fm.write('bookmark', '%s', bmark, label=label)
906 pad = " " * (25 - encoding.colwidth(bmark))
914 pad = " " * (25 - encoding.colwidth(bmark))
907 fm.condwrite(not ui.quiet, 'rev node', pad + ' %d:%s',
915 fm.condwrite(not ui.quiet, 'rev node', pad + ' %d:%s',
908 repo.changelog.rev(n), hexfn(n), label=label)
916 repo.changelog.rev(n), hexfn(n), label=label)
909 fm.data(active=(activebookmarklabel in label))
917 fm.data(active=(activebookmarklabel in label))
910 fm.plain('\n')
918 fm.plain('\n')
911 fm.end()
919 fm.end()
912
920
913 def printbookmarks(ui, repo, **opts):
921 def printbookmarks(ui, repo, **opts):
914 """print bookmarks to a formatter
922 """print bookmarks to a formatter
915
923
916 Provides a way for extensions to control how bookmarks are printed.
924 Provides a way for extensions to control how bookmarks are printed.
917 """
925 """
918 marks = repo._bookmarks
926 marks = repo._bookmarks
919 bmarks = {}
927 bmarks = {}
920 for bmark, n in sorted(marks.iteritems()):
928 for bmark, n in sorted(marks.iteritems()):
921 active = repo._activebookmark
929 active = repo._activebookmark
922 if bmark == active:
930 if bmark == active:
923 prefix, label = '*', activebookmarklabel
931 prefix, label = '*', activebookmarklabel
924 else:
932 else:
925 prefix, label = ' ', ''
933 prefix, label = ' ', ''
926
934
927 bmarks[bmark] = (n, prefix, label)
935 bmarks[bmark] = (n, prefix, label)
928 _printbookmarks(ui, repo, bmarks, **opts)
936 _printbookmarks(ui, repo, bmarks, **opts)
929
937
930 def preparehookargs(name, old, new):
938 def preparehookargs(name, old, new):
931 if new is None:
939 if new is None:
932 new = ''
940 new = ''
933 if old is None:
941 if old is None:
934 old = ''
942 old = ''
935 return {'bookmark': name,
943 return {'bookmark': name,
936 'node': hex(new),
944 'node': hex(new),
937 'oldnode': hex(old)}
945 'oldnode': hex(old)}
@@ -1,2378 +1,2374 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 release = lockmod.release
73 release = lockmod.release
74 urlerr = util.urlerr
74 urlerr = util.urlerr
75 urlreq = util.urlreq
75 urlreq = util.urlreq
76
76
77 # set of (path, vfs-location) tuples. vfs-location is:
77 # set of (path, vfs-location) tuples. vfs-location is:
78 # - 'plain for vfs relative paths
78 # - 'plain for vfs relative paths
79 # - '' for svfs relative paths
79 # - '' for svfs relative paths
80 _cachedfiles = set()
80 _cachedfiles = set()
81
81
82 class _basefilecache(scmutil.filecache):
82 class _basefilecache(scmutil.filecache):
83 """All filecache usage on repo are done for logic that should be unfiltered
83 """All filecache usage on repo are done for logic that should be unfiltered
84 """
84 """
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 if repo is None:
86 if repo is None:
87 return self
87 return self
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 def __set__(self, repo, value):
89 def __set__(self, repo, value):
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 def __delete__(self, repo):
91 def __delete__(self, repo):
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93
93
94 class repofilecache(_basefilecache):
94 class repofilecache(_basefilecache):
95 """filecache for files in .hg but outside of .hg/store"""
95 """filecache for files in .hg but outside of .hg/store"""
96 def __init__(self, *paths):
96 def __init__(self, *paths):
97 super(repofilecache, self).__init__(*paths)
97 super(repofilecache, self).__init__(*paths)
98 for path in paths:
98 for path in paths:
99 _cachedfiles.add((path, 'plain'))
99 _cachedfiles.add((path, 'plain'))
100
100
101 def join(self, obj, fname):
101 def join(self, obj, fname):
102 return obj.vfs.join(fname)
102 return obj.vfs.join(fname)
103
103
104 class storecache(_basefilecache):
104 class storecache(_basefilecache):
105 """filecache for files in the store"""
105 """filecache for files in the store"""
106 def __init__(self, *paths):
106 def __init__(self, *paths):
107 super(storecache, self).__init__(*paths)
107 super(storecache, self).__init__(*paths)
108 for path in paths:
108 for path in paths:
109 _cachedfiles.add((path, ''))
109 _cachedfiles.add((path, ''))
110
110
111 def join(self, obj, fname):
111 def join(self, obj, fname):
112 return obj.sjoin(fname)
112 return obj.sjoin(fname)
113
113
114 def isfilecached(repo, name):
114 def isfilecached(repo, name):
115 """check if a repo has already cached "name" filecache-ed property
115 """check if a repo has already cached "name" filecache-ed property
116
116
117 This returns (cachedobj-or-None, iscached) tuple.
117 This returns (cachedobj-or-None, iscached) tuple.
118 """
118 """
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 if not cacheentry:
120 if not cacheentry:
121 return None, False
121 return None, False
122 return cacheentry.obj, True
122 return cacheentry.obj, True
123
123
124 class unfilteredpropertycache(util.propertycache):
124 class unfilteredpropertycache(util.propertycache):
125 """propertycache that apply to unfiltered repo only"""
125 """propertycache that apply to unfiltered repo only"""
126
126
127 def __get__(self, repo, type=None):
127 def __get__(self, repo, type=None):
128 unfi = repo.unfiltered()
128 unfi = repo.unfiltered()
129 if unfi is repo:
129 if unfi is repo:
130 return super(unfilteredpropertycache, self).__get__(unfi)
130 return super(unfilteredpropertycache, self).__get__(unfi)
131 return getattr(unfi, self.name)
131 return getattr(unfi, self.name)
132
132
133 class filteredpropertycache(util.propertycache):
133 class filteredpropertycache(util.propertycache):
134 """propertycache that must take filtering in account"""
134 """propertycache that must take filtering in account"""
135
135
136 def cachevalue(self, obj, value):
136 def cachevalue(self, obj, value):
137 object.__setattr__(obj, self.name, value)
137 object.__setattr__(obj, self.name, value)
138
138
139
139
140 def hasunfilteredcache(repo, name):
140 def hasunfilteredcache(repo, name):
141 """check if a repo has an unfilteredpropertycache value for <name>"""
141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 return name in vars(repo.unfiltered())
142 return name in vars(repo.unfiltered())
143
143
144 def unfilteredmethod(orig):
144 def unfilteredmethod(orig):
145 """decorate method that always need to be run on unfiltered version"""
145 """decorate method that always need to be run on unfiltered version"""
146 def wrapper(repo, *args, **kwargs):
146 def wrapper(repo, *args, **kwargs):
147 return orig(repo.unfiltered(), *args, **kwargs)
147 return orig(repo.unfiltered(), *args, **kwargs)
148 return wrapper
148 return wrapper
149
149
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 'unbundle'}
151 'unbundle'}
152 legacycaps = moderncaps.union({'changegroupsubset'})
152 legacycaps = moderncaps.union({'changegroupsubset'})
153
153
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 class localcommandexecutor(object):
155 class localcommandexecutor(object):
156 def __init__(self, peer):
156 def __init__(self, peer):
157 self._peer = peer
157 self._peer = peer
158 self._sent = False
158 self._sent = False
159 self._closed = False
159 self._closed = False
160
160
161 def __enter__(self):
161 def __enter__(self):
162 return self
162 return self
163
163
164 def __exit__(self, exctype, excvalue, exctb):
164 def __exit__(self, exctype, excvalue, exctb):
165 self.close()
165 self.close()
166
166
167 def callcommand(self, command, args):
167 def callcommand(self, command, args):
168 if self._sent:
168 if self._sent:
169 raise error.ProgrammingError('callcommand() cannot be used after '
169 raise error.ProgrammingError('callcommand() cannot be used after '
170 'sendcommands()')
170 'sendcommands()')
171
171
172 if self._closed:
172 if self._closed:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'close()')
174 'close()')
175
175
176 # We don't need to support anything fancy. Just call the named
176 # We don't need to support anything fancy. Just call the named
177 # method on the peer and return a resolved future.
177 # method on the peer and return a resolved future.
178 fn = getattr(self._peer, pycompat.sysstr(command))
178 fn = getattr(self._peer, pycompat.sysstr(command))
179
179
180 f = pycompat.futures.Future()
180 f = pycompat.futures.Future()
181
181
182 try:
182 try:
183 result = fn(**pycompat.strkwargs(args))
183 result = fn(**pycompat.strkwargs(args))
184 except Exception:
184 except Exception:
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 else:
186 else:
187 f.set_result(result)
187 f.set_result(result)
188
188
189 return f
189 return f
190
190
191 def sendcommands(self):
191 def sendcommands(self):
192 self._sent = True
192 self._sent = True
193
193
194 def close(self):
194 def close(self):
195 self._closed = True
195 self._closed = True
196
196
197 @interfaceutil.implementer(repository.ipeercommands)
197 @interfaceutil.implementer(repository.ipeercommands)
198 class localpeer(repository.peer):
198 class localpeer(repository.peer):
199 '''peer for a local repo; reflects only the most recent API'''
199 '''peer for a local repo; reflects only the most recent API'''
200
200
201 def __init__(self, repo, caps=None):
201 def __init__(self, repo, caps=None):
202 super(localpeer, self).__init__()
202 super(localpeer, self).__init__()
203
203
204 if caps is None:
204 if caps is None:
205 caps = moderncaps.copy()
205 caps = moderncaps.copy()
206 self._repo = repo.filtered('served')
206 self._repo = repo.filtered('served')
207 self.ui = repo.ui
207 self.ui = repo.ui
208 self._caps = repo._restrictcapabilities(caps)
208 self._caps = repo._restrictcapabilities(caps)
209
209
210 # Begin of _basepeer interface.
210 # Begin of _basepeer interface.
211
211
212 def url(self):
212 def url(self):
213 return self._repo.url()
213 return self._repo.url()
214
214
215 def local(self):
215 def local(self):
216 return self._repo
216 return self._repo
217
217
218 def peer(self):
218 def peer(self):
219 return self
219 return self
220
220
221 def canpush(self):
221 def canpush(self):
222 return True
222 return True
223
223
224 def close(self):
224 def close(self):
225 self._repo.close()
225 self._repo.close()
226
226
227 # End of _basepeer interface.
227 # End of _basepeer interface.
228
228
229 # Begin of _basewirecommands interface.
229 # Begin of _basewirecommands interface.
230
230
231 def branchmap(self):
231 def branchmap(self):
232 return self._repo.branchmap()
232 return self._repo.branchmap()
233
233
234 def capabilities(self):
234 def capabilities(self):
235 return self._caps
235 return self._caps
236
236
237 def clonebundles(self):
237 def clonebundles(self):
238 return self._repo.tryread('clonebundles.manifest')
238 return self._repo.tryread('clonebundles.manifest')
239
239
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 """Used to test argument passing over the wire"""
241 """Used to test argument passing over the wire"""
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 pycompat.bytestr(four),
243 pycompat.bytestr(four),
244 pycompat.bytestr(five))
244 pycompat.bytestr(five))
245
245
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 **kwargs):
247 **kwargs):
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 common=common, bundlecaps=bundlecaps,
249 common=common, bundlecaps=bundlecaps,
250 **kwargs)[1]
250 **kwargs)[1]
251 cb = util.chunkbuffer(chunks)
251 cb = util.chunkbuffer(chunks)
252
252
253 if exchange.bundle2requested(bundlecaps):
253 if exchange.bundle2requested(bundlecaps):
254 # When requesting a bundle2, getbundle returns a stream to make the
254 # When requesting a bundle2, getbundle returns a stream to make the
255 # wire level function happier. We need to build a proper object
255 # wire level function happier. We need to build a proper object
256 # from it in local peer.
256 # from it in local peer.
257 return bundle2.getunbundler(self.ui, cb)
257 return bundle2.getunbundler(self.ui, cb)
258 else:
258 else:
259 return changegroup.getunbundler('01', cb, None)
259 return changegroup.getunbundler('01', cb, None)
260
260
261 def heads(self):
261 def heads(self):
262 return self._repo.heads()
262 return self._repo.heads()
263
263
264 def known(self, nodes):
264 def known(self, nodes):
265 return self._repo.known(nodes)
265 return self._repo.known(nodes)
266
266
267 def listkeys(self, namespace):
267 def listkeys(self, namespace):
268 return self._repo.listkeys(namespace)
268 return self._repo.listkeys(namespace)
269
269
270 def lookup(self, key):
270 def lookup(self, key):
271 return self._repo.lookup(key)
271 return self._repo.lookup(key)
272
272
273 def pushkey(self, namespace, key, old, new):
273 def pushkey(self, namespace, key, old, new):
274 return self._repo.pushkey(namespace, key, old, new)
274 return self._repo.pushkey(namespace, key, old, new)
275
275
276 def stream_out(self):
276 def stream_out(self):
277 raise error.Abort(_('cannot perform stream clone against local '
277 raise error.Abort(_('cannot perform stream clone against local '
278 'peer'))
278 'peer'))
279
279
280 def unbundle(self, bundle, heads, url):
280 def unbundle(self, bundle, heads, url):
281 """apply a bundle on a repo
281 """apply a bundle on a repo
282
282
283 This function handles the repo locking itself."""
283 This function handles the repo locking itself."""
284 try:
284 try:
285 try:
285 try:
286 bundle = exchange.readbundle(self.ui, bundle, None)
286 bundle = exchange.readbundle(self.ui, bundle, None)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 if util.safehasattr(ret, 'getchunks'):
288 if util.safehasattr(ret, 'getchunks'):
289 # This is a bundle20 object, turn it into an unbundler.
289 # This is a bundle20 object, turn it into an unbundler.
290 # This little dance should be dropped eventually when the
290 # This little dance should be dropped eventually when the
291 # API is finally improved.
291 # API is finally improved.
292 stream = util.chunkbuffer(ret.getchunks())
292 stream = util.chunkbuffer(ret.getchunks())
293 ret = bundle2.getunbundler(self.ui, stream)
293 ret = bundle2.getunbundler(self.ui, stream)
294 return ret
294 return ret
295 except Exception as exc:
295 except Exception as exc:
296 # If the exception contains output salvaged from a bundle2
296 # If the exception contains output salvaged from a bundle2
297 # reply, we need to make sure it is printed before continuing
297 # reply, we need to make sure it is printed before continuing
298 # to fail. So we build a bundle2 with such output and consume
298 # to fail. So we build a bundle2 with such output and consume
299 # it directly.
299 # it directly.
300 #
300 #
301 # This is not very elegant but allows a "simple" solution for
301 # This is not very elegant but allows a "simple" solution for
302 # issue4594
302 # issue4594
303 output = getattr(exc, '_bundle2salvagedoutput', ())
303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 if output:
304 if output:
305 bundler = bundle2.bundle20(self._repo.ui)
305 bundler = bundle2.bundle20(self._repo.ui)
306 for out in output:
306 for out in output:
307 bundler.addpart(out)
307 bundler.addpart(out)
308 stream = util.chunkbuffer(bundler.getchunks())
308 stream = util.chunkbuffer(bundler.getchunks())
309 b = bundle2.getunbundler(self.ui, stream)
309 b = bundle2.getunbundler(self.ui, stream)
310 bundle2.processbundle(self._repo, b)
310 bundle2.processbundle(self._repo, b)
311 raise
311 raise
312 except error.PushRaced as exc:
312 except error.PushRaced as exc:
313 raise error.ResponseError(_('push failed:'),
313 raise error.ResponseError(_('push failed:'),
314 stringutil.forcebytestr(exc))
314 stringutil.forcebytestr(exc))
315
315
316 # End of _basewirecommands interface.
316 # End of _basewirecommands interface.
317
317
318 # Begin of peer interface.
318 # Begin of peer interface.
319
319
320 def commandexecutor(self):
320 def commandexecutor(self):
321 return localcommandexecutor(self)
321 return localcommandexecutor(self)
322
322
323 # End of peer interface.
323 # End of peer interface.
324
324
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 class locallegacypeer(localpeer):
326 class locallegacypeer(localpeer):
327 '''peer extension which implements legacy methods too; used for tests with
327 '''peer extension which implements legacy methods too; used for tests with
328 restricted capabilities'''
328 restricted capabilities'''
329
329
330 def __init__(self, repo):
330 def __init__(self, repo):
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332
332
333 # Begin of baselegacywirecommands interface.
333 # Begin of baselegacywirecommands interface.
334
334
335 def between(self, pairs):
335 def between(self, pairs):
336 return self._repo.between(pairs)
336 return self._repo.between(pairs)
337
337
338 def branches(self, nodes):
338 def branches(self, nodes):
339 return self._repo.branches(nodes)
339 return self._repo.branches(nodes)
340
340
341 def changegroup(self, nodes, source):
341 def changegroup(self, nodes, source):
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 missingheads=self._repo.heads())
343 missingheads=self._repo.heads())
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345
345
346 def changegroupsubset(self, bases, heads, source):
346 def changegroupsubset(self, bases, heads, source):
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 missingheads=heads)
348 missingheads=heads)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350
350
351 # End of baselegacywirecommands interface.
351 # End of baselegacywirecommands interface.
352
352
353 # Increment the sub-version when the revlog v2 format changes to lock out old
353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 # clients.
354 # clients.
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356
356
357 # Functions receiving (ui, features) that extensions can register to impact
357 # Functions receiving (ui, features) that extensions can register to impact
358 # the ability to load repositories with custom requirements. Only
358 # the ability to load repositories with custom requirements. Only
359 # functions defined in loaded extensions are called.
359 # functions defined in loaded extensions are called.
360 #
360 #
361 # The function receives a set of requirement strings that the repository
361 # The function receives a set of requirement strings that the repository
362 # is capable of opening. Functions will typically add elements to the
362 # is capable of opening. Functions will typically add elements to the
363 # set to reflect that the extension knows how to handle that requirements.
363 # set to reflect that the extension knows how to handle that requirements.
364 featuresetupfuncs = set()
364 featuresetupfuncs = set()
365
365
366 @interfaceutil.implementer(repository.completelocalrepository)
366 @interfaceutil.implementer(repository.completelocalrepository)
367 class localrepository(object):
367 class localrepository(object):
368
368
369 # obsolete experimental requirements:
369 # obsolete experimental requirements:
370 # - manifestv2: An experimental new manifest format that allowed
370 # - manifestv2: An experimental new manifest format that allowed
371 # for stem compression of long paths. Experiment ended up not
371 # for stem compression of long paths. Experiment ended up not
372 # being successful (repository sizes went up due to worse delta
372 # being successful (repository sizes went up due to worse delta
373 # chains), and the code was deleted in 4.6.
373 # chains), and the code was deleted in 4.6.
374 supportedformats = {
374 supportedformats = {
375 'revlogv1',
375 'revlogv1',
376 'generaldelta',
376 'generaldelta',
377 'treemanifest',
377 'treemanifest',
378 REVLOGV2_REQUIREMENT,
378 REVLOGV2_REQUIREMENT,
379 }
379 }
380 _basesupported = supportedformats | {
380 _basesupported = supportedformats | {
381 'store',
381 'store',
382 'fncache',
382 'fncache',
383 'shared',
383 'shared',
384 'relshared',
384 'relshared',
385 'dotencode',
385 'dotencode',
386 'exp-sparse',
386 'exp-sparse',
387 }
387 }
388 openerreqs = {
388 openerreqs = {
389 'revlogv1',
389 'revlogv1',
390 'generaldelta',
390 'generaldelta',
391 'treemanifest',
391 'treemanifest',
392 }
392 }
393
393
394 # list of prefix for file which can be written without 'wlock'
394 # list of prefix for file which can be written without 'wlock'
395 # Extensions should extend this list when needed
395 # Extensions should extend this list when needed
396 _wlockfreeprefix = {
396 _wlockfreeprefix = {
397 # We migh consider requiring 'wlock' for the next
397 # We migh consider requiring 'wlock' for the next
398 # two, but pretty much all the existing code assume
398 # two, but pretty much all the existing code assume
399 # wlock is not needed so we keep them excluded for
399 # wlock is not needed so we keep them excluded for
400 # now.
400 # now.
401 'hgrc',
401 'hgrc',
402 'requires',
402 'requires',
403 # XXX cache is a complicatged business someone
403 # XXX cache is a complicatged business someone
404 # should investigate this in depth at some point
404 # should investigate this in depth at some point
405 'cache/',
405 'cache/',
406 # XXX shouldn't be dirstate covered by the wlock?
406 # XXX shouldn't be dirstate covered by the wlock?
407 'dirstate',
407 'dirstate',
408 # XXX bisect was still a bit too messy at the time
408 # XXX bisect was still a bit too messy at the time
409 # this changeset was introduced. Someone should fix
409 # this changeset was introduced. Someone should fix
410 # the remainig bit and drop this line
410 # the remainig bit and drop this line
411 'bisect.state',
411 'bisect.state',
412 }
412 }
413
413
414 def __init__(self, baseui, path, create=False, intents=None):
414 def __init__(self, baseui, path, create=False, intents=None):
415 self.requirements = set()
415 self.requirements = set()
416 self.filtername = None
416 self.filtername = None
417 # wvfs: rooted at the repository root, used to access the working copy
417 # wvfs: rooted at the repository root, used to access the working copy
418 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
419 # vfs: rooted at .hg, used to access repo files outside of .hg/store
419 # vfs: rooted at .hg, used to access repo files outside of .hg/store
420 self.vfs = None
420 self.vfs = None
421 # svfs: usually rooted at .hg/store, used to access repository history
421 # svfs: usually rooted at .hg/store, used to access repository history
422 # If this is a shared repository, this vfs may point to another
422 # If this is a shared repository, this vfs may point to another
423 # repository's .hg/store directory.
423 # repository's .hg/store directory.
424 self.svfs = None
424 self.svfs = None
425 self.root = self.wvfs.base
425 self.root = self.wvfs.base
426 self.path = self.wvfs.join(".hg")
426 self.path = self.wvfs.join(".hg")
427 self.origroot = path
427 self.origroot = path
428 # This is only used by context.workingctx.match in order to
428 # This is only used by context.workingctx.match in order to
429 # detect files in subrepos.
429 # detect files in subrepos.
430 self.auditor = pathutil.pathauditor(
430 self.auditor = pathutil.pathauditor(
431 self.root, callback=self._checknested)
431 self.root, callback=self._checknested)
432 # This is only used by context.basectx.match in order to detect
432 # This is only used by context.basectx.match in order to detect
433 # files in subrepos.
433 # files in subrepos.
434 self.nofsauditor = pathutil.pathauditor(
434 self.nofsauditor = pathutil.pathauditor(
435 self.root, callback=self._checknested, realfs=False, cached=True)
435 self.root, callback=self._checknested, realfs=False, cached=True)
436 self.baseui = baseui
436 self.baseui = baseui
437 self.ui = baseui.copy()
437 self.ui = baseui.copy()
438 self.ui.copy = baseui.copy # prevent copying repo configuration
438 self.ui.copy = baseui.copy # prevent copying repo configuration
439 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
439 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
440 if (self.ui.configbool('devel', 'all-warnings') or
440 if (self.ui.configbool('devel', 'all-warnings') or
441 self.ui.configbool('devel', 'check-locks')):
441 self.ui.configbool('devel', 'check-locks')):
442 self.vfs.audit = self._getvfsward(self.vfs.audit)
442 self.vfs.audit = self._getvfsward(self.vfs.audit)
443 # A list of callback to shape the phase if no data were found.
443 # A list of callback to shape the phase if no data were found.
444 # Callback are in the form: func(repo, roots) --> processed root.
444 # Callback are in the form: func(repo, roots) --> processed root.
445 # This list it to be filled by extension during repo setup
445 # This list it to be filled by extension during repo setup
446 self._phasedefaults = []
446 self._phasedefaults = []
447 try:
447 try:
448 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
448 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
449 self._loadextensions()
449 self._loadextensions()
450 except IOError:
450 except IOError:
451 pass
451 pass
452
452
453 if featuresetupfuncs:
453 if featuresetupfuncs:
454 self.supported = set(self._basesupported) # use private copy
454 self.supported = set(self._basesupported) # use private copy
455 extmods = set(m.__name__ for n, m
455 extmods = set(m.__name__ for n, m
456 in extensions.extensions(self.ui))
456 in extensions.extensions(self.ui))
457 for setupfunc in featuresetupfuncs:
457 for setupfunc in featuresetupfuncs:
458 if setupfunc.__module__ in extmods:
458 if setupfunc.__module__ in extmods:
459 setupfunc(self.ui, self.supported)
459 setupfunc(self.ui, self.supported)
460 else:
460 else:
461 self.supported = self._basesupported
461 self.supported = self._basesupported
462 color.setup(self.ui)
462 color.setup(self.ui)
463
463
464 # Add compression engines.
464 # Add compression engines.
465 for name in util.compengines:
465 for name in util.compengines:
466 engine = util.compengines[name]
466 engine = util.compengines[name]
467 if engine.revlogheader():
467 if engine.revlogheader():
468 self.supported.add('exp-compression-%s' % name)
468 self.supported.add('exp-compression-%s' % name)
469
469
470 if not self.vfs.isdir():
470 if not self.vfs.isdir():
471 if create:
471 if create:
472 self.requirements = newreporequirements(self)
472 self.requirements = newreporequirements(self)
473
473
474 if not self.wvfs.exists():
474 if not self.wvfs.exists():
475 self.wvfs.makedirs()
475 self.wvfs.makedirs()
476 self.vfs.makedir(notindexed=True)
476 self.vfs.makedir(notindexed=True)
477
477
478 if 'store' in self.requirements:
478 if 'store' in self.requirements:
479 self.vfs.mkdir("store")
479 self.vfs.mkdir("store")
480
480
481 # create an invalid changelog
481 # create an invalid changelog
482 self.vfs.append(
482 self.vfs.append(
483 "00changelog.i",
483 "00changelog.i",
484 '\0\0\0\2' # represents revlogv2
484 '\0\0\0\2' # represents revlogv2
485 ' dummy changelog to prevent using the old repo layout'
485 ' dummy changelog to prevent using the old repo layout'
486 )
486 )
487 else:
487 else:
488 raise error.RepoError(_("repository %s not found") % path)
488 raise error.RepoError(_("repository %s not found") % path)
489 elif create:
489 elif create:
490 raise error.RepoError(_("repository %s already exists") % path)
490 raise error.RepoError(_("repository %s already exists") % path)
491 else:
491 else:
492 try:
492 try:
493 self.requirements = scmutil.readrequires(
493 self.requirements = scmutil.readrequires(
494 self.vfs, self.supported)
494 self.vfs, self.supported)
495 except IOError as inst:
495 except IOError as inst:
496 if inst.errno != errno.ENOENT:
496 if inst.errno != errno.ENOENT:
497 raise
497 raise
498
498
499 cachepath = self.vfs.join('cache')
499 cachepath = self.vfs.join('cache')
500 self.sharedpath = self.path
500 self.sharedpath = self.path
501 try:
501 try:
502 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
502 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
503 if 'relshared' in self.requirements:
503 if 'relshared' in self.requirements:
504 sharedpath = self.vfs.join(sharedpath)
504 sharedpath = self.vfs.join(sharedpath)
505 vfs = vfsmod.vfs(sharedpath, realpath=True)
505 vfs = vfsmod.vfs(sharedpath, realpath=True)
506 cachepath = vfs.join('cache')
506 cachepath = vfs.join('cache')
507 s = vfs.base
507 s = vfs.base
508 if not vfs.exists():
508 if not vfs.exists():
509 raise error.RepoError(
509 raise error.RepoError(
510 _('.hg/sharedpath points to nonexistent directory %s') % s)
510 _('.hg/sharedpath points to nonexistent directory %s') % s)
511 self.sharedpath = s
511 self.sharedpath = s
512 except IOError as inst:
512 except IOError as inst:
513 if inst.errno != errno.ENOENT:
513 if inst.errno != errno.ENOENT:
514 raise
514 raise
515
515
516 if 'exp-sparse' in self.requirements and not sparse.enabled:
516 if 'exp-sparse' in self.requirements and not sparse.enabled:
517 raise error.RepoError(_('repository is using sparse feature but '
517 raise error.RepoError(_('repository is using sparse feature but '
518 'sparse is not enabled; enable the '
518 'sparse is not enabled; enable the '
519 '"sparse" extensions to access'))
519 '"sparse" extensions to access'))
520
520
521 self.store = store.store(
521 self.store = store.store(
522 self.requirements, self.sharedpath,
522 self.requirements, self.sharedpath,
523 lambda base: vfsmod.vfs(base, cacheaudited=True))
523 lambda base: vfsmod.vfs(base, cacheaudited=True))
524 self.spath = self.store.path
524 self.spath = self.store.path
525 self.svfs = self.store.vfs
525 self.svfs = self.store.vfs
526 self.sjoin = self.store.join
526 self.sjoin = self.store.join
527 self.vfs.createmode = self.store.createmode
527 self.vfs.createmode = self.store.createmode
528 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
528 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
529 self.cachevfs.createmode = self.store.createmode
529 self.cachevfs.createmode = self.store.createmode
530 if (self.ui.configbool('devel', 'all-warnings') or
530 if (self.ui.configbool('devel', 'all-warnings') or
531 self.ui.configbool('devel', 'check-locks')):
531 self.ui.configbool('devel', 'check-locks')):
532 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
532 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
533 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
533 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
534 else: # standard vfs
534 else: # standard vfs
535 self.svfs.audit = self._getsvfsward(self.svfs.audit)
535 self.svfs.audit = self._getsvfsward(self.svfs.audit)
536 self._applyopenerreqs()
536 self._applyopenerreqs()
537 if create:
537 if create:
538 self._writerequirements()
538 self._writerequirements()
539
539
540 self._dirstatevalidatewarned = False
540 self._dirstatevalidatewarned = False
541
541
542 self._branchcaches = {}
542 self._branchcaches = {}
543 self._revbranchcache = None
543 self._revbranchcache = None
544 self._filterpats = {}
544 self._filterpats = {}
545 self._datafilters = {}
545 self._datafilters = {}
546 self._transref = self._lockref = self._wlockref = None
546 self._transref = self._lockref = self._wlockref = None
547
547
548 # A cache for various files under .hg/ that tracks file changes,
548 # A cache for various files under .hg/ that tracks file changes,
549 # (used by the filecache decorator)
549 # (used by the filecache decorator)
550 #
550 #
551 # Maps a property name to its util.filecacheentry
551 # Maps a property name to its util.filecacheentry
552 self._filecache = {}
552 self._filecache = {}
553
553
554 # hold sets of revision to be filtered
554 # hold sets of revision to be filtered
555 # should be cleared when something might have changed the filter value:
555 # should be cleared when something might have changed the filter value:
556 # - new changesets,
556 # - new changesets,
557 # - phase change,
557 # - phase change,
558 # - new obsolescence marker,
558 # - new obsolescence marker,
559 # - working directory parent change,
559 # - working directory parent change,
560 # - bookmark changes
560 # - bookmark changes
561 self.filteredrevcache = {}
561 self.filteredrevcache = {}
562
562
563 # post-dirstate-status hooks
563 # post-dirstate-status hooks
564 self._postdsstatus = []
564 self._postdsstatus = []
565
565
566 # generic mapping between names and nodes
566 # generic mapping between names and nodes
567 self.names = namespaces.namespaces()
567 self.names = namespaces.namespaces()
568
568
569 # Key to signature value.
569 # Key to signature value.
570 self._sparsesignaturecache = {}
570 self._sparsesignaturecache = {}
571 # Signature to cached matcher instance.
571 # Signature to cached matcher instance.
572 self._sparsematchercache = {}
572 self._sparsematchercache = {}
573
573
574 def _getvfsward(self, origfunc):
574 def _getvfsward(self, origfunc):
575 """build a ward for self.vfs"""
575 """build a ward for self.vfs"""
576 rref = weakref.ref(self)
576 rref = weakref.ref(self)
577 def checkvfs(path, mode=None):
577 def checkvfs(path, mode=None):
578 ret = origfunc(path, mode=mode)
578 ret = origfunc(path, mode=mode)
579 repo = rref()
579 repo = rref()
580 if (repo is None
580 if (repo is None
581 or not util.safehasattr(repo, '_wlockref')
581 or not util.safehasattr(repo, '_wlockref')
582 or not util.safehasattr(repo, '_lockref')):
582 or not util.safehasattr(repo, '_lockref')):
583 return
583 return
584 if mode in (None, 'r', 'rb'):
584 if mode in (None, 'r', 'rb'):
585 return
585 return
586 if path.startswith(repo.path):
586 if path.startswith(repo.path):
587 # truncate name relative to the repository (.hg)
587 # truncate name relative to the repository (.hg)
588 path = path[len(repo.path) + 1:]
588 path = path[len(repo.path) + 1:]
589 if path.startswith('cache/'):
589 if path.startswith('cache/'):
590 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
590 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
591 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
591 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
592 if path.startswith('journal.'):
592 if path.startswith('journal.'):
593 # journal is covered by 'lock'
593 # journal is covered by 'lock'
594 if repo._currentlock(repo._lockref) is None:
594 if repo._currentlock(repo._lockref) is None:
595 repo.ui.develwarn('write with no lock: "%s"' % path,
595 repo.ui.develwarn('write with no lock: "%s"' % path,
596 stacklevel=2, config='check-locks')
596 stacklevel=2, config='check-locks')
597 elif repo._currentlock(repo._wlockref) is None:
597 elif repo._currentlock(repo._wlockref) is None:
598 # rest of vfs files are covered by 'wlock'
598 # rest of vfs files are covered by 'wlock'
599 #
599 #
600 # exclude special files
600 # exclude special files
601 for prefix in self._wlockfreeprefix:
601 for prefix in self._wlockfreeprefix:
602 if path.startswith(prefix):
602 if path.startswith(prefix):
603 return
603 return
604 repo.ui.develwarn('write with no wlock: "%s"' % path,
604 repo.ui.develwarn('write with no wlock: "%s"' % path,
605 stacklevel=2, config='check-locks')
605 stacklevel=2, config='check-locks')
606 return ret
606 return ret
607 return checkvfs
607 return checkvfs
608
608
609 def _getsvfsward(self, origfunc):
609 def _getsvfsward(self, origfunc):
610 """build a ward for self.svfs"""
610 """build a ward for self.svfs"""
611 rref = weakref.ref(self)
611 rref = weakref.ref(self)
612 def checksvfs(path, mode=None):
612 def checksvfs(path, mode=None):
613 ret = origfunc(path, mode=mode)
613 ret = origfunc(path, mode=mode)
614 repo = rref()
614 repo = rref()
615 if repo is None or not util.safehasattr(repo, '_lockref'):
615 if repo is None or not util.safehasattr(repo, '_lockref'):
616 return
616 return
617 if mode in (None, 'r', 'rb'):
617 if mode in (None, 'r', 'rb'):
618 return
618 return
619 if path.startswith(repo.sharedpath):
619 if path.startswith(repo.sharedpath):
620 # truncate name relative to the repository (.hg)
620 # truncate name relative to the repository (.hg)
621 path = path[len(repo.sharedpath) + 1:]
621 path = path[len(repo.sharedpath) + 1:]
622 if repo._currentlock(repo._lockref) is None:
622 if repo._currentlock(repo._lockref) is None:
623 repo.ui.develwarn('write with no lock: "%s"' % path,
623 repo.ui.develwarn('write with no lock: "%s"' % path,
624 stacklevel=3)
624 stacklevel=3)
625 return ret
625 return ret
626 return checksvfs
626 return checksvfs
627
627
628 def close(self):
628 def close(self):
629 self._writecaches()
629 self._writecaches()
630
630
631 def _loadextensions(self):
631 def _loadextensions(self):
632 extensions.loadall(self.ui)
632 extensions.loadall(self.ui)
633
633
634 def _writecaches(self):
634 def _writecaches(self):
635 if self._revbranchcache:
635 if self._revbranchcache:
636 self._revbranchcache.write()
636 self._revbranchcache.write()
637
637
638 def _restrictcapabilities(self, caps):
638 def _restrictcapabilities(self, caps):
639 if self.ui.configbool('experimental', 'bundle2-advertise'):
639 if self.ui.configbool('experimental', 'bundle2-advertise'):
640 caps = set(caps)
640 caps = set(caps)
641 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
641 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
642 role='client'))
642 role='client'))
643 caps.add('bundle2=' + urlreq.quote(capsblob))
643 caps.add('bundle2=' + urlreq.quote(capsblob))
644 return caps
644 return caps
645
645
646 def _applyopenerreqs(self):
646 def _applyopenerreqs(self):
647 self.svfs.options = dict((r, 1) for r in self.requirements
647 self.svfs.options = dict((r, 1) for r in self.requirements
648 if r in self.openerreqs)
648 if r in self.openerreqs)
649 # experimental config: format.chunkcachesize
649 # experimental config: format.chunkcachesize
650 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
650 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
651 if chunkcachesize is not None:
651 if chunkcachesize is not None:
652 self.svfs.options['chunkcachesize'] = chunkcachesize
652 self.svfs.options['chunkcachesize'] = chunkcachesize
653 # experimental config: format.maxchainlen
653 # experimental config: format.maxchainlen
654 maxchainlen = self.ui.configint('format', 'maxchainlen')
654 maxchainlen = self.ui.configint('format', 'maxchainlen')
655 if maxchainlen is not None:
655 if maxchainlen is not None:
656 self.svfs.options['maxchainlen'] = maxchainlen
656 self.svfs.options['maxchainlen'] = maxchainlen
657 # experimental config: format.manifestcachesize
657 # experimental config: format.manifestcachesize
658 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
658 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
659 if manifestcachesize is not None:
659 if manifestcachesize is not None:
660 self.svfs.options['manifestcachesize'] = manifestcachesize
660 self.svfs.options['manifestcachesize'] = manifestcachesize
661 # experimental config: format.aggressivemergedeltas
661 # experimental config: format.aggressivemergedeltas
662 aggressivemergedeltas = self.ui.configbool('format',
662 aggressivemergedeltas = self.ui.configbool('format',
663 'aggressivemergedeltas')
663 'aggressivemergedeltas')
664 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
664 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
665 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
665 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
666 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
666 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
667 if 0 <= chainspan:
667 if 0 <= chainspan:
668 self.svfs.options['maxdeltachainspan'] = chainspan
668 self.svfs.options['maxdeltachainspan'] = chainspan
669 mmapindexthreshold = self.ui.configbytes('experimental',
669 mmapindexthreshold = self.ui.configbytes('experimental',
670 'mmapindexthreshold')
670 'mmapindexthreshold')
671 if mmapindexthreshold is not None:
671 if mmapindexthreshold is not None:
672 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
672 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
673 withsparseread = self.ui.configbool('experimental', 'sparse-read')
673 withsparseread = self.ui.configbool('experimental', 'sparse-read')
674 srdensitythres = float(self.ui.config('experimental',
674 srdensitythres = float(self.ui.config('experimental',
675 'sparse-read.density-threshold'))
675 'sparse-read.density-threshold'))
676 srmingapsize = self.ui.configbytes('experimental',
676 srmingapsize = self.ui.configbytes('experimental',
677 'sparse-read.min-gap-size')
677 'sparse-read.min-gap-size')
678 self.svfs.options['with-sparse-read'] = withsparseread
678 self.svfs.options['with-sparse-read'] = withsparseread
679 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
679 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
680 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
680 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
681
681
682 for r in self.requirements:
682 for r in self.requirements:
683 if r.startswith('exp-compression-'):
683 if r.startswith('exp-compression-'):
684 self.svfs.options['compengine'] = r[len('exp-compression-'):]
684 self.svfs.options['compengine'] = r[len('exp-compression-'):]
685
685
686 # TODO move "revlogv2" to openerreqs once finalized.
686 # TODO move "revlogv2" to openerreqs once finalized.
687 if REVLOGV2_REQUIREMENT in self.requirements:
687 if REVLOGV2_REQUIREMENT in self.requirements:
688 self.svfs.options['revlogv2'] = True
688 self.svfs.options['revlogv2'] = True
689
689
690 def _writerequirements(self):
690 def _writerequirements(self):
691 scmutil.writerequires(self.vfs, self.requirements)
691 scmutil.writerequires(self.vfs, self.requirements)
692
692
693 def _checknested(self, path):
693 def _checknested(self, path):
694 """Determine if path is a legal nested repository."""
694 """Determine if path is a legal nested repository."""
695 if not path.startswith(self.root):
695 if not path.startswith(self.root):
696 return False
696 return False
697 subpath = path[len(self.root) + 1:]
697 subpath = path[len(self.root) + 1:]
698 normsubpath = util.pconvert(subpath)
698 normsubpath = util.pconvert(subpath)
699
699
700 # XXX: Checking against the current working copy is wrong in
700 # XXX: Checking against the current working copy is wrong in
701 # the sense that it can reject things like
701 # the sense that it can reject things like
702 #
702 #
703 # $ hg cat -r 10 sub/x.txt
703 # $ hg cat -r 10 sub/x.txt
704 #
704 #
705 # if sub/ is no longer a subrepository in the working copy
705 # if sub/ is no longer a subrepository in the working copy
706 # parent revision.
706 # parent revision.
707 #
707 #
708 # However, it can of course also allow things that would have
708 # However, it can of course also allow things that would have
709 # been rejected before, such as the above cat command if sub/
709 # been rejected before, such as the above cat command if sub/
710 # is a subrepository now, but was a normal directory before.
710 # is a subrepository now, but was a normal directory before.
711 # The old path auditor would have rejected by mistake since it
711 # The old path auditor would have rejected by mistake since it
712 # panics when it sees sub/.hg/.
712 # panics when it sees sub/.hg/.
713 #
713 #
714 # All in all, checking against the working copy seems sensible
714 # All in all, checking against the working copy seems sensible
715 # since we want to prevent access to nested repositories on
715 # since we want to prevent access to nested repositories on
716 # the filesystem *now*.
716 # the filesystem *now*.
717 ctx = self[None]
717 ctx = self[None]
718 parts = util.splitpath(subpath)
718 parts = util.splitpath(subpath)
719 while parts:
719 while parts:
720 prefix = '/'.join(parts)
720 prefix = '/'.join(parts)
721 if prefix in ctx.substate:
721 if prefix in ctx.substate:
722 if prefix == normsubpath:
722 if prefix == normsubpath:
723 return True
723 return True
724 else:
724 else:
725 sub = ctx.sub(prefix)
725 sub = ctx.sub(prefix)
726 return sub.checknested(subpath[len(prefix) + 1:])
726 return sub.checknested(subpath[len(prefix) + 1:])
727 else:
727 else:
728 parts.pop()
728 parts.pop()
729 return False
729 return False
730
730
731 def peer(self):
731 def peer(self):
732 return localpeer(self) # not cached to avoid reference cycle
732 return localpeer(self) # not cached to avoid reference cycle
733
733
734 def unfiltered(self):
734 def unfiltered(self):
735 """Return unfiltered version of the repository
735 """Return unfiltered version of the repository
736
736
737 Intended to be overwritten by filtered repo."""
737 Intended to be overwritten by filtered repo."""
738 return self
738 return self
739
739
740 def filtered(self, name, visibilityexceptions=None):
740 def filtered(self, name, visibilityexceptions=None):
741 """Return a filtered version of a repository"""
741 """Return a filtered version of a repository"""
742 cls = repoview.newtype(self.unfiltered().__class__)
742 cls = repoview.newtype(self.unfiltered().__class__)
743 return cls(self, name, visibilityexceptions)
743 return cls(self, name, visibilityexceptions)
744
744
745 @repofilecache('bookmarks', 'bookmarks.current')
745 @repofilecache('bookmarks', 'bookmarks.current')
746 def _bookmarks(self):
746 def _bookmarks(self):
747 return bookmarks.bmstore(self)
747 return bookmarks.bmstore(self)
748
748
749 @property
749 @property
750 def _activebookmark(self):
750 def _activebookmark(self):
751 return self._bookmarks.active
751 return self._bookmarks.active
752
752
753 # _phasesets depend on changelog. what we need is to call
753 # _phasesets depend on changelog. what we need is to call
754 # _phasecache.invalidate() if '00changelog.i' was changed, but it
754 # _phasecache.invalidate() if '00changelog.i' was changed, but it
755 # can't be easily expressed in filecache mechanism.
755 # can't be easily expressed in filecache mechanism.
756 @storecache('phaseroots', '00changelog.i')
756 @storecache('phaseroots', '00changelog.i')
757 def _phasecache(self):
757 def _phasecache(self):
758 return phases.phasecache(self, self._phasedefaults)
758 return phases.phasecache(self, self._phasedefaults)
759
759
760 @storecache('obsstore')
760 @storecache('obsstore')
761 def obsstore(self):
761 def obsstore(self):
762 return obsolete.makestore(self.ui, self)
762 return obsolete.makestore(self.ui, self)
763
763
764 @storecache('00changelog.i')
764 @storecache('00changelog.i')
765 def changelog(self):
765 def changelog(self):
766 return changelog.changelog(self.svfs,
766 return changelog.changelog(self.svfs,
767 trypending=txnutil.mayhavepending(self.root))
767 trypending=txnutil.mayhavepending(self.root))
768
768
769 def _constructmanifest(self):
769 def _constructmanifest(self):
770 # This is a temporary function while we migrate from manifest to
770 # This is a temporary function while we migrate from manifest to
771 # manifestlog. It allows bundlerepo and unionrepo to intercept the
771 # manifestlog. It allows bundlerepo and unionrepo to intercept the
772 # manifest creation.
772 # manifest creation.
773 return manifest.manifestrevlog(self.svfs)
773 return manifest.manifestrevlog(self.svfs)
774
774
775 @storecache('00manifest.i')
775 @storecache('00manifest.i')
776 def manifestlog(self):
776 def manifestlog(self):
777 return manifest.manifestlog(self.svfs, self)
777 return manifest.manifestlog(self.svfs, self)
778
778
779 @repofilecache('dirstate')
779 @repofilecache('dirstate')
780 def dirstate(self):
780 def dirstate(self):
781 sparsematchfn = lambda: sparse.matcher(self)
781 sparsematchfn = lambda: sparse.matcher(self)
782
782
783 return dirstate.dirstate(self.vfs, self.ui, self.root,
783 return dirstate.dirstate(self.vfs, self.ui, self.root,
784 self._dirstatevalidate, sparsematchfn)
784 self._dirstatevalidate, sparsematchfn)
785
785
786 def _dirstatevalidate(self, node):
786 def _dirstatevalidate(self, node):
787 try:
787 try:
788 self.changelog.rev(node)
788 self.changelog.rev(node)
789 return node
789 return node
790 except error.LookupError:
790 except error.LookupError:
791 if not self._dirstatevalidatewarned:
791 if not self._dirstatevalidatewarned:
792 self._dirstatevalidatewarned = True
792 self._dirstatevalidatewarned = True
793 self.ui.warn(_("warning: ignoring unknown"
793 self.ui.warn(_("warning: ignoring unknown"
794 " working parent %s!\n") % short(node))
794 " working parent %s!\n") % short(node))
795 return nullid
795 return nullid
796
796
797 @repofilecache(narrowspec.FILENAME)
797 @repofilecache(narrowspec.FILENAME)
798 def narrowpats(self):
798 def narrowpats(self):
799 """matcher patterns for this repository's narrowspec
799 """matcher patterns for this repository's narrowspec
800
800
801 A tuple of (includes, excludes).
801 A tuple of (includes, excludes).
802 """
802 """
803 source = self
803 source = self
804 if self.shared():
804 if self.shared():
805 from . import hg
805 from . import hg
806 source = hg.sharedreposource(self)
806 source = hg.sharedreposource(self)
807 return narrowspec.load(source)
807 return narrowspec.load(source)
808
808
809 @repofilecache(narrowspec.FILENAME)
809 @repofilecache(narrowspec.FILENAME)
810 def _narrowmatch(self):
810 def _narrowmatch(self):
811 if changegroup.NARROW_REQUIREMENT not in self.requirements:
811 if changegroup.NARROW_REQUIREMENT not in self.requirements:
812 return matchmod.always(self.root, '')
812 return matchmod.always(self.root, '')
813 include, exclude = self.narrowpats
813 include, exclude = self.narrowpats
814 return narrowspec.match(self.root, include=include, exclude=exclude)
814 return narrowspec.match(self.root, include=include, exclude=exclude)
815
815
816 # TODO(martinvonz): make this property-like instead?
816 # TODO(martinvonz): make this property-like instead?
817 def narrowmatch(self):
817 def narrowmatch(self):
818 return self._narrowmatch
818 return self._narrowmatch
819
819
820 def setnarrowpats(self, newincludes, newexcludes):
820 def setnarrowpats(self, newincludes, newexcludes):
821 target = self
821 target = self
822 if self.shared():
822 if self.shared():
823 from . import hg
823 from . import hg
824 target = hg.sharedreposource(self)
824 target = hg.sharedreposource(self)
825 narrowspec.save(target, newincludes, newexcludes)
825 narrowspec.save(target, newincludes, newexcludes)
826 self.invalidate(clearfilecache=True)
826 self.invalidate(clearfilecache=True)
827
827
828 def __getitem__(self, changeid):
828 def __getitem__(self, changeid):
829 if changeid is None:
829 if changeid is None:
830 return context.workingctx(self)
830 return context.workingctx(self)
831 if isinstance(changeid, context.basectx):
831 if isinstance(changeid, context.basectx):
832 return changeid
832 return changeid
833 if isinstance(changeid, slice):
833 if isinstance(changeid, slice):
834 # wdirrev isn't contiguous so the slice shouldn't include it
834 # wdirrev isn't contiguous so the slice shouldn't include it
835 return [context.changectx(self, i)
835 return [context.changectx(self, i)
836 for i in xrange(*changeid.indices(len(self)))
836 for i in xrange(*changeid.indices(len(self)))
837 if i not in self.changelog.filteredrevs]
837 if i not in self.changelog.filteredrevs]
838 try:
838 try:
839 return context.changectx(self, changeid)
839 return context.changectx(self, changeid)
840 except error.WdirUnsupported:
840 except error.WdirUnsupported:
841 return context.workingctx(self)
841 return context.workingctx(self)
842
842
843 def __contains__(self, changeid):
843 def __contains__(self, changeid):
844 """True if the given changeid exists
844 """True if the given changeid exists
845
845
846 error.LookupError is raised if an ambiguous node specified.
846 error.LookupError is raised if an ambiguous node specified.
847 """
847 """
848 try:
848 try:
849 self[changeid]
849 self[changeid]
850 return True
850 return True
851 except error.RepoLookupError:
851 except error.RepoLookupError:
852 return False
852 return False
853
853
854 def __nonzero__(self):
854 def __nonzero__(self):
855 return True
855 return True
856
856
857 __bool__ = __nonzero__
857 __bool__ = __nonzero__
858
858
859 def __len__(self):
859 def __len__(self):
860 # no need to pay the cost of repoview.changelog
860 # no need to pay the cost of repoview.changelog
861 unfi = self.unfiltered()
861 unfi = self.unfiltered()
862 return len(unfi.changelog)
862 return len(unfi.changelog)
863
863
864 def __iter__(self):
864 def __iter__(self):
865 return iter(self.changelog)
865 return iter(self.changelog)
866
866
867 def revs(self, expr, *args):
867 def revs(self, expr, *args):
868 '''Find revisions matching a revset.
868 '''Find revisions matching a revset.
869
869
870 The revset is specified as a string ``expr`` that may contain
870 The revset is specified as a string ``expr`` that may contain
871 %-formatting to escape certain types. See ``revsetlang.formatspec``.
871 %-formatting to escape certain types. See ``revsetlang.formatspec``.
872
872
873 Revset aliases from the configuration are not expanded. To expand
873 Revset aliases from the configuration are not expanded. To expand
874 user aliases, consider calling ``scmutil.revrange()`` or
874 user aliases, consider calling ``scmutil.revrange()`` or
875 ``repo.anyrevs([expr], user=True)``.
875 ``repo.anyrevs([expr], user=True)``.
876
876
877 Returns a revset.abstractsmartset, which is a list-like interface
877 Returns a revset.abstractsmartset, which is a list-like interface
878 that contains integer revisions.
878 that contains integer revisions.
879 '''
879 '''
880 expr = revsetlang.formatspec(expr, *args)
880 expr = revsetlang.formatspec(expr, *args)
881 m = revset.match(None, expr)
881 m = revset.match(None, expr)
882 return m(self)
882 return m(self)
883
883
884 def set(self, expr, *args):
884 def set(self, expr, *args):
885 '''Find revisions matching a revset and emit changectx instances.
885 '''Find revisions matching a revset and emit changectx instances.
886
886
887 This is a convenience wrapper around ``revs()`` that iterates the
887 This is a convenience wrapper around ``revs()`` that iterates the
888 result and is a generator of changectx instances.
888 result and is a generator of changectx instances.
889
889
890 Revset aliases from the configuration are not expanded. To expand
890 Revset aliases from the configuration are not expanded. To expand
891 user aliases, consider calling ``scmutil.revrange()``.
891 user aliases, consider calling ``scmutil.revrange()``.
892 '''
892 '''
893 for r in self.revs(expr, *args):
893 for r in self.revs(expr, *args):
894 yield self[r]
894 yield self[r]
895
895
896 def anyrevs(self, specs, user=False, localalias=None):
896 def anyrevs(self, specs, user=False, localalias=None):
897 '''Find revisions matching one of the given revsets.
897 '''Find revisions matching one of the given revsets.
898
898
899 Revset aliases from the configuration are not expanded by default. To
899 Revset aliases from the configuration are not expanded by default. To
900 expand user aliases, specify ``user=True``. To provide some local
900 expand user aliases, specify ``user=True``. To provide some local
901 definitions overriding user aliases, set ``localalias`` to
901 definitions overriding user aliases, set ``localalias`` to
902 ``{name: definitionstring}``.
902 ``{name: definitionstring}``.
903 '''
903 '''
904 if user:
904 if user:
905 m = revset.matchany(self.ui, specs,
905 m = revset.matchany(self.ui, specs,
906 lookup=revset.lookupfn(self),
906 lookup=revset.lookupfn(self),
907 localalias=localalias)
907 localalias=localalias)
908 else:
908 else:
909 m = revset.matchany(None, specs, localalias=localalias)
909 m = revset.matchany(None, specs, localalias=localalias)
910 return m(self)
910 return m(self)
911
911
912 def url(self):
912 def url(self):
913 return 'file:' + self.root
913 return 'file:' + self.root
914
914
915 def hook(self, name, throw=False, **args):
915 def hook(self, name, throw=False, **args):
916 """Call a hook, passing this repo instance.
916 """Call a hook, passing this repo instance.
917
917
918 This a convenience method to aid invoking hooks. Extensions likely
918 This a convenience method to aid invoking hooks. Extensions likely
919 won't call this unless they have registered a custom hook or are
919 won't call this unless they have registered a custom hook or are
920 replacing code that is expected to call a hook.
920 replacing code that is expected to call a hook.
921 """
921 """
922 return hook.hook(self.ui, self, name, throw, **args)
922 return hook.hook(self.ui, self, name, throw, **args)
923
923
924 @filteredpropertycache
924 @filteredpropertycache
925 def _tagscache(self):
925 def _tagscache(self):
926 '''Returns a tagscache object that contains various tags related
926 '''Returns a tagscache object that contains various tags related
927 caches.'''
927 caches.'''
928
928
929 # This simplifies its cache management by having one decorated
929 # This simplifies its cache management by having one decorated
930 # function (this one) and the rest simply fetch things from it.
930 # function (this one) and the rest simply fetch things from it.
931 class tagscache(object):
931 class tagscache(object):
932 def __init__(self):
932 def __init__(self):
933 # These two define the set of tags for this repository. tags
933 # These two define the set of tags for this repository. tags
934 # maps tag name to node; tagtypes maps tag name to 'global' or
934 # maps tag name to node; tagtypes maps tag name to 'global' or
935 # 'local'. (Global tags are defined by .hgtags across all
935 # 'local'. (Global tags are defined by .hgtags across all
936 # heads, and local tags are defined in .hg/localtags.)
936 # heads, and local tags are defined in .hg/localtags.)
937 # They constitute the in-memory cache of tags.
937 # They constitute the in-memory cache of tags.
938 self.tags = self.tagtypes = None
938 self.tags = self.tagtypes = None
939
939
940 self.nodetagscache = self.tagslist = None
940 self.nodetagscache = self.tagslist = None
941
941
942 cache = tagscache()
942 cache = tagscache()
943 cache.tags, cache.tagtypes = self._findtags()
943 cache.tags, cache.tagtypes = self._findtags()
944
944
945 return cache
945 return cache
946
946
947 def tags(self):
947 def tags(self):
948 '''return a mapping of tag to node'''
948 '''return a mapping of tag to node'''
949 t = {}
949 t = {}
950 if self.changelog.filteredrevs:
950 if self.changelog.filteredrevs:
951 tags, tt = self._findtags()
951 tags, tt = self._findtags()
952 else:
952 else:
953 tags = self._tagscache.tags
953 tags = self._tagscache.tags
954 for k, v in tags.iteritems():
954 for k, v in tags.iteritems():
955 try:
955 try:
956 # ignore tags to unknown nodes
956 # ignore tags to unknown nodes
957 self.changelog.rev(v)
957 self.changelog.rev(v)
958 t[k] = v
958 t[k] = v
959 except (error.LookupError, ValueError):
959 except (error.LookupError, ValueError):
960 pass
960 pass
961 return t
961 return t
962
962
963 def _findtags(self):
963 def _findtags(self):
964 '''Do the hard work of finding tags. Return a pair of dicts
964 '''Do the hard work of finding tags. Return a pair of dicts
965 (tags, tagtypes) where tags maps tag name to node, and tagtypes
965 (tags, tagtypes) where tags maps tag name to node, and tagtypes
966 maps tag name to a string like \'global\' or \'local\'.
966 maps tag name to a string like \'global\' or \'local\'.
967 Subclasses or extensions are free to add their own tags, but
967 Subclasses or extensions are free to add their own tags, but
968 should be aware that the returned dicts will be retained for the
968 should be aware that the returned dicts will be retained for the
969 duration of the localrepo object.'''
969 duration of the localrepo object.'''
970
970
971 # XXX what tagtype should subclasses/extensions use? Currently
971 # XXX what tagtype should subclasses/extensions use? Currently
972 # mq and bookmarks add tags, but do not set the tagtype at all.
972 # mq and bookmarks add tags, but do not set the tagtype at all.
973 # Should each extension invent its own tag type? Should there
973 # Should each extension invent its own tag type? Should there
974 # be one tagtype for all such "virtual" tags? Or is the status
974 # be one tagtype for all such "virtual" tags? Or is the status
975 # quo fine?
975 # quo fine?
976
976
977
977
978 # map tag name to (node, hist)
978 # map tag name to (node, hist)
979 alltags = tagsmod.findglobaltags(self.ui, self)
979 alltags = tagsmod.findglobaltags(self.ui, self)
980 # map tag name to tag type
980 # map tag name to tag type
981 tagtypes = dict((tag, 'global') for tag in alltags)
981 tagtypes = dict((tag, 'global') for tag in alltags)
982
982
983 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
983 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
984
984
985 # Build the return dicts. Have to re-encode tag names because
985 # Build the return dicts. Have to re-encode tag names because
986 # the tags module always uses UTF-8 (in order not to lose info
986 # the tags module always uses UTF-8 (in order not to lose info
987 # writing to the cache), but the rest of Mercurial wants them in
987 # writing to the cache), but the rest of Mercurial wants them in
988 # local encoding.
988 # local encoding.
989 tags = {}
989 tags = {}
990 for (name, (node, hist)) in alltags.iteritems():
990 for (name, (node, hist)) in alltags.iteritems():
991 if node != nullid:
991 if node != nullid:
992 tags[encoding.tolocal(name)] = node
992 tags[encoding.tolocal(name)] = node
993 tags['tip'] = self.changelog.tip()
993 tags['tip'] = self.changelog.tip()
994 tagtypes = dict([(encoding.tolocal(name), value)
994 tagtypes = dict([(encoding.tolocal(name), value)
995 for (name, value) in tagtypes.iteritems()])
995 for (name, value) in tagtypes.iteritems()])
996 return (tags, tagtypes)
996 return (tags, tagtypes)
997
997
998 def tagtype(self, tagname):
998 def tagtype(self, tagname):
999 '''
999 '''
1000 return the type of the given tag. result can be:
1000 return the type of the given tag. result can be:
1001
1001
1002 'local' : a local tag
1002 'local' : a local tag
1003 'global' : a global tag
1003 'global' : a global tag
1004 None : tag does not exist
1004 None : tag does not exist
1005 '''
1005 '''
1006
1006
1007 return self._tagscache.tagtypes.get(tagname)
1007 return self._tagscache.tagtypes.get(tagname)
1008
1008
1009 def tagslist(self):
1009 def tagslist(self):
1010 '''return a list of tags ordered by revision'''
1010 '''return a list of tags ordered by revision'''
1011 if not self._tagscache.tagslist:
1011 if not self._tagscache.tagslist:
1012 l = []
1012 l = []
1013 for t, n in self.tags().iteritems():
1013 for t, n in self.tags().iteritems():
1014 l.append((self.changelog.rev(n), t, n))
1014 l.append((self.changelog.rev(n), t, n))
1015 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1015 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1016
1016
1017 return self._tagscache.tagslist
1017 return self._tagscache.tagslist
1018
1018
1019 def nodetags(self, node):
1019 def nodetags(self, node):
1020 '''return the tags associated with a node'''
1020 '''return the tags associated with a node'''
1021 if not self._tagscache.nodetagscache:
1021 if not self._tagscache.nodetagscache:
1022 nodetagscache = {}
1022 nodetagscache = {}
1023 for t, n in self._tagscache.tags.iteritems():
1023 for t, n in self._tagscache.tags.iteritems():
1024 nodetagscache.setdefault(n, []).append(t)
1024 nodetagscache.setdefault(n, []).append(t)
1025 for tags in nodetagscache.itervalues():
1025 for tags in nodetagscache.itervalues():
1026 tags.sort()
1026 tags.sort()
1027 self._tagscache.nodetagscache = nodetagscache
1027 self._tagscache.nodetagscache = nodetagscache
1028 return self._tagscache.nodetagscache.get(node, [])
1028 return self._tagscache.nodetagscache.get(node, [])
1029
1029
1030 def nodebookmarks(self, node):
1030 def nodebookmarks(self, node):
1031 """return the list of bookmarks pointing to the specified node"""
1031 """return the list of bookmarks pointing to the specified node"""
1032 marks = []
1032 return self._bookmarks.names(node)
1033 for bookmark, n in self._bookmarks.iteritems():
1034 if n == node:
1035 marks.append(bookmark)
1036 return sorted(marks)
1037
1033
1038 def branchmap(self):
1034 def branchmap(self):
1039 '''returns a dictionary {branch: [branchheads]} with branchheads
1035 '''returns a dictionary {branch: [branchheads]} with branchheads
1040 ordered by increasing revision number'''
1036 ordered by increasing revision number'''
1041 branchmap.updatecache(self)
1037 branchmap.updatecache(self)
1042 return self._branchcaches[self.filtername]
1038 return self._branchcaches[self.filtername]
1043
1039
1044 @unfilteredmethod
1040 @unfilteredmethod
1045 def revbranchcache(self):
1041 def revbranchcache(self):
1046 if not self._revbranchcache:
1042 if not self._revbranchcache:
1047 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1043 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1048 return self._revbranchcache
1044 return self._revbranchcache
1049
1045
1050 def branchtip(self, branch, ignoremissing=False):
1046 def branchtip(self, branch, ignoremissing=False):
1051 '''return the tip node for a given branch
1047 '''return the tip node for a given branch
1052
1048
1053 If ignoremissing is True, then this method will not raise an error.
1049 If ignoremissing is True, then this method will not raise an error.
1054 This is helpful for callers that only expect None for a missing branch
1050 This is helpful for callers that only expect None for a missing branch
1055 (e.g. namespace).
1051 (e.g. namespace).
1056
1052
1057 '''
1053 '''
1058 try:
1054 try:
1059 return self.branchmap().branchtip(branch)
1055 return self.branchmap().branchtip(branch)
1060 except KeyError:
1056 except KeyError:
1061 if not ignoremissing:
1057 if not ignoremissing:
1062 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1058 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1063 else:
1059 else:
1064 pass
1060 pass
1065
1061
1066 def lookup(self, key):
1062 def lookup(self, key):
1067 return scmutil.revsymbol(self, key).node()
1063 return scmutil.revsymbol(self, key).node()
1068
1064
1069 def lookupbranch(self, key):
1065 def lookupbranch(self, key):
1070 if key in self.branchmap():
1066 if key in self.branchmap():
1071 return key
1067 return key
1072
1068
1073 return scmutil.revsymbol(self, key).branch()
1069 return scmutil.revsymbol(self, key).branch()
1074
1070
1075 def known(self, nodes):
1071 def known(self, nodes):
1076 cl = self.changelog
1072 cl = self.changelog
1077 nm = cl.nodemap
1073 nm = cl.nodemap
1078 filtered = cl.filteredrevs
1074 filtered = cl.filteredrevs
1079 result = []
1075 result = []
1080 for n in nodes:
1076 for n in nodes:
1081 r = nm.get(n)
1077 r = nm.get(n)
1082 resp = not (r is None or r in filtered)
1078 resp = not (r is None or r in filtered)
1083 result.append(resp)
1079 result.append(resp)
1084 return result
1080 return result
1085
1081
1086 def local(self):
1082 def local(self):
1087 return self
1083 return self
1088
1084
1089 def publishing(self):
1085 def publishing(self):
1090 # it's safe (and desirable) to trust the publish flag unconditionally
1086 # it's safe (and desirable) to trust the publish flag unconditionally
1091 # so that we don't finalize changes shared between users via ssh or nfs
1087 # so that we don't finalize changes shared between users via ssh or nfs
1092 return self.ui.configbool('phases', 'publish', untrusted=True)
1088 return self.ui.configbool('phases', 'publish', untrusted=True)
1093
1089
1094 def cancopy(self):
1090 def cancopy(self):
1095 # so statichttprepo's override of local() works
1091 # so statichttprepo's override of local() works
1096 if not self.local():
1092 if not self.local():
1097 return False
1093 return False
1098 if not self.publishing():
1094 if not self.publishing():
1099 return True
1095 return True
1100 # if publishing we can't copy if there is filtered content
1096 # if publishing we can't copy if there is filtered content
1101 return not self.filtered('visible').changelog.filteredrevs
1097 return not self.filtered('visible').changelog.filteredrevs
1102
1098
1103 def shared(self):
1099 def shared(self):
1104 '''the type of shared repository (None if not shared)'''
1100 '''the type of shared repository (None if not shared)'''
1105 if self.sharedpath != self.path:
1101 if self.sharedpath != self.path:
1106 return 'store'
1102 return 'store'
1107 return None
1103 return None
1108
1104
1109 def wjoin(self, f, *insidef):
1105 def wjoin(self, f, *insidef):
1110 return self.vfs.reljoin(self.root, f, *insidef)
1106 return self.vfs.reljoin(self.root, f, *insidef)
1111
1107
1112 def file(self, f):
1108 def file(self, f):
1113 if f[0] == '/':
1109 if f[0] == '/':
1114 f = f[1:]
1110 f = f[1:]
1115 return filelog.filelog(self.svfs, f)
1111 return filelog.filelog(self.svfs, f)
1116
1112
1117 def setparents(self, p1, p2=nullid):
1113 def setparents(self, p1, p2=nullid):
1118 with self.dirstate.parentchange():
1114 with self.dirstate.parentchange():
1119 copies = self.dirstate.setparents(p1, p2)
1115 copies = self.dirstate.setparents(p1, p2)
1120 pctx = self[p1]
1116 pctx = self[p1]
1121 if copies:
1117 if copies:
1122 # Adjust copy records, the dirstate cannot do it, it
1118 # Adjust copy records, the dirstate cannot do it, it
1123 # requires access to parents manifests. Preserve them
1119 # requires access to parents manifests. Preserve them
1124 # only for entries added to first parent.
1120 # only for entries added to first parent.
1125 for f in copies:
1121 for f in copies:
1126 if f not in pctx and copies[f] in pctx:
1122 if f not in pctx and copies[f] in pctx:
1127 self.dirstate.copy(copies[f], f)
1123 self.dirstate.copy(copies[f], f)
1128 if p2 == nullid:
1124 if p2 == nullid:
1129 for f, s in sorted(self.dirstate.copies().items()):
1125 for f, s in sorted(self.dirstate.copies().items()):
1130 if f not in pctx and s not in pctx:
1126 if f not in pctx and s not in pctx:
1131 self.dirstate.copy(None, f)
1127 self.dirstate.copy(None, f)
1132
1128
1133 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1129 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1134 """changeid can be a changeset revision, node, or tag.
1130 """changeid can be a changeset revision, node, or tag.
1135 fileid can be a file revision or node."""
1131 fileid can be a file revision or node."""
1136 return context.filectx(self, path, changeid, fileid,
1132 return context.filectx(self, path, changeid, fileid,
1137 changectx=changectx)
1133 changectx=changectx)
1138
1134
1139 def getcwd(self):
1135 def getcwd(self):
1140 return self.dirstate.getcwd()
1136 return self.dirstate.getcwd()
1141
1137
1142 def pathto(self, f, cwd=None):
1138 def pathto(self, f, cwd=None):
1143 return self.dirstate.pathto(f, cwd)
1139 return self.dirstate.pathto(f, cwd)
1144
1140
1145 def _loadfilter(self, filter):
1141 def _loadfilter(self, filter):
1146 if filter not in self._filterpats:
1142 if filter not in self._filterpats:
1147 l = []
1143 l = []
1148 for pat, cmd in self.ui.configitems(filter):
1144 for pat, cmd in self.ui.configitems(filter):
1149 if cmd == '!':
1145 if cmd == '!':
1150 continue
1146 continue
1151 mf = matchmod.match(self.root, '', [pat])
1147 mf = matchmod.match(self.root, '', [pat])
1152 fn = None
1148 fn = None
1153 params = cmd
1149 params = cmd
1154 for name, filterfn in self._datafilters.iteritems():
1150 for name, filterfn in self._datafilters.iteritems():
1155 if cmd.startswith(name):
1151 if cmd.startswith(name):
1156 fn = filterfn
1152 fn = filterfn
1157 params = cmd[len(name):].lstrip()
1153 params = cmd[len(name):].lstrip()
1158 break
1154 break
1159 if not fn:
1155 if not fn:
1160 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1156 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1161 # Wrap old filters not supporting keyword arguments
1157 # Wrap old filters not supporting keyword arguments
1162 if not pycompat.getargspec(fn)[2]:
1158 if not pycompat.getargspec(fn)[2]:
1163 oldfn = fn
1159 oldfn = fn
1164 fn = lambda s, c, **kwargs: oldfn(s, c)
1160 fn = lambda s, c, **kwargs: oldfn(s, c)
1165 l.append((mf, fn, params))
1161 l.append((mf, fn, params))
1166 self._filterpats[filter] = l
1162 self._filterpats[filter] = l
1167 return self._filterpats[filter]
1163 return self._filterpats[filter]
1168
1164
1169 def _filter(self, filterpats, filename, data):
1165 def _filter(self, filterpats, filename, data):
1170 for mf, fn, cmd in filterpats:
1166 for mf, fn, cmd in filterpats:
1171 if mf(filename):
1167 if mf(filename):
1172 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1168 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1173 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1169 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1174 break
1170 break
1175
1171
1176 return data
1172 return data
1177
1173
1178 @unfilteredpropertycache
1174 @unfilteredpropertycache
1179 def _encodefilterpats(self):
1175 def _encodefilterpats(self):
1180 return self._loadfilter('encode')
1176 return self._loadfilter('encode')
1181
1177
1182 @unfilteredpropertycache
1178 @unfilteredpropertycache
1183 def _decodefilterpats(self):
1179 def _decodefilterpats(self):
1184 return self._loadfilter('decode')
1180 return self._loadfilter('decode')
1185
1181
1186 def adddatafilter(self, name, filter):
1182 def adddatafilter(self, name, filter):
1187 self._datafilters[name] = filter
1183 self._datafilters[name] = filter
1188
1184
1189 def wread(self, filename):
1185 def wread(self, filename):
1190 if self.wvfs.islink(filename):
1186 if self.wvfs.islink(filename):
1191 data = self.wvfs.readlink(filename)
1187 data = self.wvfs.readlink(filename)
1192 else:
1188 else:
1193 data = self.wvfs.read(filename)
1189 data = self.wvfs.read(filename)
1194 return self._filter(self._encodefilterpats, filename, data)
1190 return self._filter(self._encodefilterpats, filename, data)
1195
1191
1196 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1192 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1197 """write ``data`` into ``filename`` in the working directory
1193 """write ``data`` into ``filename`` in the working directory
1198
1194
1199 This returns length of written (maybe decoded) data.
1195 This returns length of written (maybe decoded) data.
1200 """
1196 """
1201 data = self._filter(self._decodefilterpats, filename, data)
1197 data = self._filter(self._decodefilterpats, filename, data)
1202 if 'l' in flags:
1198 if 'l' in flags:
1203 self.wvfs.symlink(data, filename)
1199 self.wvfs.symlink(data, filename)
1204 else:
1200 else:
1205 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1201 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1206 **kwargs)
1202 **kwargs)
1207 if 'x' in flags:
1203 if 'x' in flags:
1208 self.wvfs.setflags(filename, False, True)
1204 self.wvfs.setflags(filename, False, True)
1209 else:
1205 else:
1210 self.wvfs.setflags(filename, False, False)
1206 self.wvfs.setflags(filename, False, False)
1211 return len(data)
1207 return len(data)
1212
1208
1213 def wwritedata(self, filename, data):
1209 def wwritedata(self, filename, data):
1214 return self._filter(self._decodefilterpats, filename, data)
1210 return self._filter(self._decodefilterpats, filename, data)
1215
1211
1216 def currenttransaction(self):
1212 def currenttransaction(self):
1217 """return the current transaction or None if non exists"""
1213 """return the current transaction or None if non exists"""
1218 if self._transref:
1214 if self._transref:
1219 tr = self._transref()
1215 tr = self._transref()
1220 else:
1216 else:
1221 tr = None
1217 tr = None
1222
1218
1223 if tr and tr.running():
1219 if tr and tr.running():
1224 return tr
1220 return tr
1225 return None
1221 return None
1226
1222
1227 def transaction(self, desc, report=None):
1223 def transaction(self, desc, report=None):
1228 if (self.ui.configbool('devel', 'all-warnings')
1224 if (self.ui.configbool('devel', 'all-warnings')
1229 or self.ui.configbool('devel', 'check-locks')):
1225 or self.ui.configbool('devel', 'check-locks')):
1230 if self._currentlock(self._lockref) is None:
1226 if self._currentlock(self._lockref) is None:
1231 raise error.ProgrammingError('transaction requires locking')
1227 raise error.ProgrammingError('transaction requires locking')
1232 tr = self.currenttransaction()
1228 tr = self.currenttransaction()
1233 if tr is not None:
1229 if tr is not None:
1234 return tr.nest(name=desc)
1230 return tr.nest(name=desc)
1235
1231
1236 # abort here if the journal already exists
1232 # abort here if the journal already exists
1237 if self.svfs.exists("journal"):
1233 if self.svfs.exists("journal"):
1238 raise error.RepoError(
1234 raise error.RepoError(
1239 _("abandoned transaction found"),
1235 _("abandoned transaction found"),
1240 hint=_("run 'hg recover' to clean up transaction"))
1236 hint=_("run 'hg recover' to clean up transaction"))
1241
1237
1242 idbase = "%.40f#%f" % (random.random(), time.time())
1238 idbase = "%.40f#%f" % (random.random(), time.time())
1243 ha = hex(hashlib.sha1(idbase).digest())
1239 ha = hex(hashlib.sha1(idbase).digest())
1244 txnid = 'TXN:' + ha
1240 txnid = 'TXN:' + ha
1245 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1241 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1246
1242
1247 self._writejournal(desc)
1243 self._writejournal(desc)
1248 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1244 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1249 if report:
1245 if report:
1250 rp = report
1246 rp = report
1251 else:
1247 else:
1252 rp = self.ui.warn
1248 rp = self.ui.warn
1253 vfsmap = {'plain': self.vfs} # root of .hg/
1249 vfsmap = {'plain': self.vfs} # root of .hg/
1254 # we must avoid cyclic reference between repo and transaction.
1250 # we must avoid cyclic reference between repo and transaction.
1255 reporef = weakref.ref(self)
1251 reporef = weakref.ref(self)
1256 # Code to track tag movement
1252 # Code to track tag movement
1257 #
1253 #
1258 # Since tags are all handled as file content, it is actually quite hard
1254 # Since tags are all handled as file content, it is actually quite hard
1259 # to track these movement from a code perspective. So we fallback to a
1255 # to track these movement from a code perspective. So we fallback to a
1260 # tracking at the repository level. One could envision to track changes
1256 # tracking at the repository level. One could envision to track changes
1261 # to the '.hgtags' file through changegroup apply but that fails to
1257 # to the '.hgtags' file through changegroup apply but that fails to
1262 # cope with case where transaction expose new heads without changegroup
1258 # cope with case where transaction expose new heads without changegroup
1263 # being involved (eg: phase movement).
1259 # being involved (eg: phase movement).
1264 #
1260 #
1265 # For now, We gate the feature behind a flag since this likely comes
1261 # For now, We gate the feature behind a flag since this likely comes
1266 # with performance impacts. The current code run more often than needed
1262 # with performance impacts. The current code run more often than needed
1267 # and do not use caches as much as it could. The current focus is on
1263 # and do not use caches as much as it could. The current focus is on
1268 # the behavior of the feature so we disable it by default. The flag
1264 # the behavior of the feature so we disable it by default. The flag
1269 # will be removed when we are happy with the performance impact.
1265 # will be removed when we are happy with the performance impact.
1270 #
1266 #
1271 # Once this feature is no longer experimental move the following
1267 # Once this feature is no longer experimental move the following
1272 # documentation to the appropriate help section:
1268 # documentation to the appropriate help section:
1273 #
1269 #
1274 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1270 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1275 # tags (new or changed or deleted tags). In addition the details of
1271 # tags (new or changed or deleted tags). In addition the details of
1276 # these changes are made available in a file at:
1272 # these changes are made available in a file at:
1277 # ``REPOROOT/.hg/changes/tags.changes``.
1273 # ``REPOROOT/.hg/changes/tags.changes``.
1278 # Make sure you check for HG_TAG_MOVED before reading that file as it
1274 # Make sure you check for HG_TAG_MOVED before reading that file as it
1279 # might exist from a previous transaction even if no tag were touched
1275 # might exist from a previous transaction even if no tag were touched
1280 # in this one. Changes are recorded in a line base format::
1276 # in this one. Changes are recorded in a line base format::
1281 #
1277 #
1282 # <action> <hex-node> <tag-name>\n
1278 # <action> <hex-node> <tag-name>\n
1283 #
1279 #
1284 # Actions are defined as follow:
1280 # Actions are defined as follow:
1285 # "-R": tag is removed,
1281 # "-R": tag is removed,
1286 # "+A": tag is added,
1282 # "+A": tag is added,
1287 # "-M": tag is moved (old value),
1283 # "-M": tag is moved (old value),
1288 # "+M": tag is moved (new value),
1284 # "+M": tag is moved (new value),
1289 tracktags = lambda x: None
1285 tracktags = lambda x: None
1290 # experimental config: experimental.hook-track-tags
1286 # experimental config: experimental.hook-track-tags
1291 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1287 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1292 if desc != 'strip' and shouldtracktags:
1288 if desc != 'strip' and shouldtracktags:
1293 oldheads = self.changelog.headrevs()
1289 oldheads = self.changelog.headrevs()
1294 def tracktags(tr2):
1290 def tracktags(tr2):
1295 repo = reporef()
1291 repo = reporef()
1296 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1292 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1297 newheads = repo.changelog.headrevs()
1293 newheads = repo.changelog.headrevs()
1298 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1294 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1299 # notes: we compare lists here.
1295 # notes: we compare lists here.
1300 # As we do it only once buiding set would not be cheaper
1296 # As we do it only once buiding set would not be cheaper
1301 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1297 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1302 if changes:
1298 if changes:
1303 tr2.hookargs['tag_moved'] = '1'
1299 tr2.hookargs['tag_moved'] = '1'
1304 with repo.vfs('changes/tags.changes', 'w',
1300 with repo.vfs('changes/tags.changes', 'w',
1305 atomictemp=True) as changesfile:
1301 atomictemp=True) as changesfile:
1306 # note: we do not register the file to the transaction
1302 # note: we do not register the file to the transaction
1307 # because we needs it to still exist on the transaction
1303 # because we needs it to still exist on the transaction
1308 # is close (for txnclose hooks)
1304 # is close (for txnclose hooks)
1309 tagsmod.writediff(changesfile, changes)
1305 tagsmod.writediff(changesfile, changes)
1310 def validate(tr2):
1306 def validate(tr2):
1311 """will run pre-closing hooks"""
1307 """will run pre-closing hooks"""
1312 # XXX the transaction API is a bit lacking here so we take a hacky
1308 # XXX the transaction API is a bit lacking here so we take a hacky
1313 # path for now
1309 # path for now
1314 #
1310 #
1315 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1311 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1316 # dict is copied before these run. In addition we needs the data
1312 # dict is copied before these run. In addition we needs the data
1317 # available to in memory hooks too.
1313 # available to in memory hooks too.
1318 #
1314 #
1319 # Moreover, we also need to make sure this runs before txnclose
1315 # Moreover, we also need to make sure this runs before txnclose
1320 # hooks and there is no "pending" mechanism that would execute
1316 # hooks and there is no "pending" mechanism that would execute
1321 # logic only if hooks are about to run.
1317 # logic only if hooks are about to run.
1322 #
1318 #
1323 # Fixing this limitation of the transaction is also needed to track
1319 # Fixing this limitation of the transaction is also needed to track
1324 # other families of changes (bookmarks, phases, obsolescence).
1320 # other families of changes (bookmarks, phases, obsolescence).
1325 #
1321 #
1326 # This will have to be fixed before we remove the experimental
1322 # This will have to be fixed before we remove the experimental
1327 # gating.
1323 # gating.
1328 tracktags(tr2)
1324 tracktags(tr2)
1329 repo = reporef()
1325 repo = reporef()
1330 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1326 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1331 scmutil.enforcesinglehead(repo, tr2, desc)
1327 scmutil.enforcesinglehead(repo, tr2, desc)
1332 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1328 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1333 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1329 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1334 args = tr.hookargs.copy()
1330 args = tr.hookargs.copy()
1335 args.update(bookmarks.preparehookargs(name, old, new))
1331 args.update(bookmarks.preparehookargs(name, old, new))
1336 repo.hook('pretxnclose-bookmark', throw=True,
1332 repo.hook('pretxnclose-bookmark', throw=True,
1337 txnname=desc,
1333 txnname=desc,
1338 **pycompat.strkwargs(args))
1334 **pycompat.strkwargs(args))
1339 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1335 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1340 cl = repo.unfiltered().changelog
1336 cl = repo.unfiltered().changelog
1341 for rev, (old, new) in tr.changes['phases'].items():
1337 for rev, (old, new) in tr.changes['phases'].items():
1342 args = tr.hookargs.copy()
1338 args = tr.hookargs.copy()
1343 node = hex(cl.node(rev))
1339 node = hex(cl.node(rev))
1344 args.update(phases.preparehookargs(node, old, new))
1340 args.update(phases.preparehookargs(node, old, new))
1345 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1341 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1346 **pycompat.strkwargs(args))
1342 **pycompat.strkwargs(args))
1347
1343
1348 repo.hook('pretxnclose', throw=True,
1344 repo.hook('pretxnclose', throw=True,
1349 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1345 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1350 def releasefn(tr, success):
1346 def releasefn(tr, success):
1351 repo = reporef()
1347 repo = reporef()
1352 if success:
1348 if success:
1353 # this should be explicitly invoked here, because
1349 # this should be explicitly invoked here, because
1354 # in-memory changes aren't written out at closing
1350 # in-memory changes aren't written out at closing
1355 # transaction, if tr.addfilegenerator (via
1351 # transaction, if tr.addfilegenerator (via
1356 # dirstate.write or so) isn't invoked while
1352 # dirstate.write or so) isn't invoked while
1357 # transaction running
1353 # transaction running
1358 repo.dirstate.write(None)
1354 repo.dirstate.write(None)
1359 else:
1355 else:
1360 # discard all changes (including ones already written
1356 # discard all changes (including ones already written
1361 # out) in this transaction
1357 # out) in this transaction
1362 repo.dirstate.restorebackup(None, 'journal.dirstate')
1358 repo.dirstate.restorebackup(None, 'journal.dirstate')
1363
1359
1364 repo.invalidate(clearfilecache=True)
1360 repo.invalidate(clearfilecache=True)
1365
1361
1366 tr = transaction.transaction(rp, self.svfs, vfsmap,
1362 tr = transaction.transaction(rp, self.svfs, vfsmap,
1367 "journal",
1363 "journal",
1368 "undo",
1364 "undo",
1369 aftertrans(renames),
1365 aftertrans(renames),
1370 self.store.createmode,
1366 self.store.createmode,
1371 validator=validate,
1367 validator=validate,
1372 releasefn=releasefn,
1368 releasefn=releasefn,
1373 checkambigfiles=_cachedfiles,
1369 checkambigfiles=_cachedfiles,
1374 name=desc)
1370 name=desc)
1375 tr.changes['revs'] = xrange(0, 0)
1371 tr.changes['revs'] = xrange(0, 0)
1376 tr.changes['obsmarkers'] = set()
1372 tr.changes['obsmarkers'] = set()
1377 tr.changes['phases'] = {}
1373 tr.changes['phases'] = {}
1378 tr.changes['bookmarks'] = {}
1374 tr.changes['bookmarks'] = {}
1379
1375
1380 tr.hookargs['txnid'] = txnid
1376 tr.hookargs['txnid'] = txnid
1381 # note: writing the fncache only during finalize mean that the file is
1377 # note: writing the fncache only during finalize mean that the file is
1382 # outdated when running hooks. As fncache is used for streaming clone,
1378 # outdated when running hooks. As fncache is used for streaming clone,
1383 # this is not expected to break anything that happen during the hooks.
1379 # this is not expected to break anything that happen during the hooks.
1384 tr.addfinalize('flush-fncache', self.store.write)
1380 tr.addfinalize('flush-fncache', self.store.write)
1385 def txnclosehook(tr2):
1381 def txnclosehook(tr2):
1386 """To be run if transaction is successful, will schedule a hook run
1382 """To be run if transaction is successful, will schedule a hook run
1387 """
1383 """
1388 # Don't reference tr2 in hook() so we don't hold a reference.
1384 # Don't reference tr2 in hook() so we don't hold a reference.
1389 # This reduces memory consumption when there are multiple
1385 # This reduces memory consumption when there are multiple
1390 # transactions per lock. This can likely go away if issue5045
1386 # transactions per lock. This can likely go away if issue5045
1391 # fixes the function accumulation.
1387 # fixes the function accumulation.
1392 hookargs = tr2.hookargs
1388 hookargs = tr2.hookargs
1393
1389
1394 def hookfunc():
1390 def hookfunc():
1395 repo = reporef()
1391 repo = reporef()
1396 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1392 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1397 bmchanges = sorted(tr.changes['bookmarks'].items())
1393 bmchanges = sorted(tr.changes['bookmarks'].items())
1398 for name, (old, new) in bmchanges:
1394 for name, (old, new) in bmchanges:
1399 args = tr.hookargs.copy()
1395 args = tr.hookargs.copy()
1400 args.update(bookmarks.preparehookargs(name, old, new))
1396 args.update(bookmarks.preparehookargs(name, old, new))
1401 repo.hook('txnclose-bookmark', throw=False,
1397 repo.hook('txnclose-bookmark', throw=False,
1402 txnname=desc, **pycompat.strkwargs(args))
1398 txnname=desc, **pycompat.strkwargs(args))
1403
1399
1404 if hook.hashook(repo.ui, 'txnclose-phase'):
1400 if hook.hashook(repo.ui, 'txnclose-phase'):
1405 cl = repo.unfiltered().changelog
1401 cl = repo.unfiltered().changelog
1406 phasemv = sorted(tr.changes['phases'].items())
1402 phasemv = sorted(tr.changes['phases'].items())
1407 for rev, (old, new) in phasemv:
1403 for rev, (old, new) in phasemv:
1408 args = tr.hookargs.copy()
1404 args = tr.hookargs.copy()
1409 node = hex(cl.node(rev))
1405 node = hex(cl.node(rev))
1410 args.update(phases.preparehookargs(node, old, new))
1406 args.update(phases.preparehookargs(node, old, new))
1411 repo.hook('txnclose-phase', throw=False, txnname=desc,
1407 repo.hook('txnclose-phase', throw=False, txnname=desc,
1412 **pycompat.strkwargs(args))
1408 **pycompat.strkwargs(args))
1413
1409
1414 repo.hook('txnclose', throw=False, txnname=desc,
1410 repo.hook('txnclose', throw=False, txnname=desc,
1415 **pycompat.strkwargs(hookargs))
1411 **pycompat.strkwargs(hookargs))
1416 reporef()._afterlock(hookfunc)
1412 reporef()._afterlock(hookfunc)
1417 tr.addfinalize('txnclose-hook', txnclosehook)
1413 tr.addfinalize('txnclose-hook', txnclosehook)
1418 # Include a leading "-" to make it happen before the transaction summary
1414 # Include a leading "-" to make it happen before the transaction summary
1419 # reports registered via scmutil.registersummarycallback() whose names
1415 # reports registered via scmutil.registersummarycallback() whose names
1420 # are 00-txnreport etc. That way, the caches will be warm when the
1416 # are 00-txnreport etc. That way, the caches will be warm when the
1421 # callbacks run.
1417 # callbacks run.
1422 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1418 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1423 def txnaborthook(tr2):
1419 def txnaborthook(tr2):
1424 """To be run if transaction is aborted
1420 """To be run if transaction is aborted
1425 """
1421 """
1426 reporef().hook('txnabort', throw=False, txnname=desc,
1422 reporef().hook('txnabort', throw=False, txnname=desc,
1427 **pycompat.strkwargs(tr2.hookargs))
1423 **pycompat.strkwargs(tr2.hookargs))
1428 tr.addabort('txnabort-hook', txnaborthook)
1424 tr.addabort('txnabort-hook', txnaborthook)
1429 # avoid eager cache invalidation. in-memory data should be identical
1425 # avoid eager cache invalidation. in-memory data should be identical
1430 # to stored data if transaction has no error.
1426 # to stored data if transaction has no error.
1431 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1427 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1432 self._transref = weakref.ref(tr)
1428 self._transref = weakref.ref(tr)
1433 scmutil.registersummarycallback(self, tr, desc)
1429 scmutil.registersummarycallback(self, tr, desc)
1434 return tr
1430 return tr
1435
1431
1436 def _journalfiles(self):
1432 def _journalfiles(self):
1437 return ((self.svfs, 'journal'),
1433 return ((self.svfs, 'journal'),
1438 (self.vfs, 'journal.dirstate'),
1434 (self.vfs, 'journal.dirstate'),
1439 (self.vfs, 'journal.branch'),
1435 (self.vfs, 'journal.branch'),
1440 (self.vfs, 'journal.desc'),
1436 (self.vfs, 'journal.desc'),
1441 (self.vfs, 'journal.bookmarks'),
1437 (self.vfs, 'journal.bookmarks'),
1442 (self.svfs, 'journal.phaseroots'))
1438 (self.svfs, 'journal.phaseroots'))
1443
1439
1444 def undofiles(self):
1440 def undofiles(self):
1445 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1441 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1446
1442
1447 @unfilteredmethod
1443 @unfilteredmethod
1448 def _writejournal(self, desc):
1444 def _writejournal(self, desc):
1449 self.dirstate.savebackup(None, 'journal.dirstate')
1445 self.dirstate.savebackup(None, 'journal.dirstate')
1450 self.vfs.write("journal.branch",
1446 self.vfs.write("journal.branch",
1451 encoding.fromlocal(self.dirstate.branch()))
1447 encoding.fromlocal(self.dirstate.branch()))
1452 self.vfs.write("journal.desc",
1448 self.vfs.write("journal.desc",
1453 "%d\n%s\n" % (len(self), desc))
1449 "%d\n%s\n" % (len(self), desc))
1454 self.vfs.write("journal.bookmarks",
1450 self.vfs.write("journal.bookmarks",
1455 self.vfs.tryread("bookmarks"))
1451 self.vfs.tryread("bookmarks"))
1456 self.svfs.write("journal.phaseroots",
1452 self.svfs.write("journal.phaseroots",
1457 self.svfs.tryread("phaseroots"))
1453 self.svfs.tryread("phaseroots"))
1458
1454
1459 def recover(self):
1455 def recover(self):
1460 with self.lock():
1456 with self.lock():
1461 if self.svfs.exists("journal"):
1457 if self.svfs.exists("journal"):
1462 self.ui.status(_("rolling back interrupted transaction\n"))
1458 self.ui.status(_("rolling back interrupted transaction\n"))
1463 vfsmap = {'': self.svfs,
1459 vfsmap = {'': self.svfs,
1464 'plain': self.vfs,}
1460 'plain': self.vfs,}
1465 transaction.rollback(self.svfs, vfsmap, "journal",
1461 transaction.rollback(self.svfs, vfsmap, "journal",
1466 self.ui.warn,
1462 self.ui.warn,
1467 checkambigfiles=_cachedfiles)
1463 checkambigfiles=_cachedfiles)
1468 self.invalidate()
1464 self.invalidate()
1469 return True
1465 return True
1470 else:
1466 else:
1471 self.ui.warn(_("no interrupted transaction available\n"))
1467 self.ui.warn(_("no interrupted transaction available\n"))
1472 return False
1468 return False
1473
1469
1474 def rollback(self, dryrun=False, force=False):
1470 def rollback(self, dryrun=False, force=False):
1475 wlock = lock = dsguard = None
1471 wlock = lock = dsguard = None
1476 try:
1472 try:
1477 wlock = self.wlock()
1473 wlock = self.wlock()
1478 lock = self.lock()
1474 lock = self.lock()
1479 if self.svfs.exists("undo"):
1475 if self.svfs.exists("undo"):
1480 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1476 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1481
1477
1482 return self._rollback(dryrun, force, dsguard)
1478 return self._rollback(dryrun, force, dsguard)
1483 else:
1479 else:
1484 self.ui.warn(_("no rollback information available\n"))
1480 self.ui.warn(_("no rollback information available\n"))
1485 return 1
1481 return 1
1486 finally:
1482 finally:
1487 release(dsguard, lock, wlock)
1483 release(dsguard, lock, wlock)
1488
1484
1489 @unfilteredmethod # Until we get smarter cache management
1485 @unfilteredmethod # Until we get smarter cache management
1490 def _rollback(self, dryrun, force, dsguard):
1486 def _rollback(self, dryrun, force, dsguard):
1491 ui = self.ui
1487 ui = self.ui
1492 try:
1488 try:
1493 args = self.vfs.read('undo.desc').splitlines()
1489 args = self.vfs.read('undo.desc').splitlines()
1494 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1490 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1495 if len(args) >= 3:
1491 if len(args) >= 3:
1496 detail = args[2]
1492 detail = args[2]
1497 oldtip = oldlen - 1
1493 oldtip = oldlen - 1
1498
1494
1499 if detail and ui.verbose:
1495 if detail and ui.verbose:
1500 msg = (_('repository tip rolled back to revision %d'
1496 msg = (_('repository tip rolled back to revision %d'
1501 ' (undo %s: %s)\n')
1497 ' (undo %s: %s)\n')
1502 % (oldtip, desc, detail))
1498 % (oldtip, desc, detail))
1503 else:
1499 else:
1504 msg = (_('repository tip rolled back to revision %d'
1500 msg = (_('repository tip rolled back to revision %d'
1505 ' (undo %s)\n')
1501 ' (undo %s)\n')
1506 % (oldtip, desc))
1502 % (oldtip, desc))
1507 except IOError:
1503 except IOError:
1508 msg = _('rolling back unknown transaction\n')
1504 msg = _('rolling back unknown transaction\n')
1509 desc = None
1505 desc = None
1510
1506
1511 if not force and self['.'] != self['tip'] and desc == 'commit':
1507 if not force and self['.'] != self['tip'] and desc == 'commit':
1512 raise error.Abort(
1508 raise error.Abort(
1513 _('rollback of last commit while not checked out '
1509 _('rollback of last commit while not checked out '
1514 'may lose data'), hint=_('use -f to force'))
1510 'may lose data'), hint=_('use -f to force'))
1515
1511
1516 ui.status(msg)
1512 ui.status(msg)
1517 if dryrun:
1513 if dryrun:
1518 return 0
1514 return 0
1519
1515
1520 parents = self.dirstate.parents()
1516 parents = self.dirstate.parents()
1521 self.destroying()
1517 self.destroying()
1522 vfsmap = {'plain': self.vfs, '': self.svfs}
1518 vfsmap = {'plain': self.vfs, '': self.svfs}
1523 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1519 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1524 checkambigfiles=_cachedfiles)
1520 checkambigfiles=_cachedfiles)
1525 if self.vfs.exists('undo.bookmarks'):
1521 if self.vfs.exists('undo.bookmarks'):
1526 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1522 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1527 if self.svfs.exists('undo.phaseroots'):
1523 if self.svfs.exists('undo.phaseroots'):
1528 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1524 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1529 self.invalidate()
1525 self.invalidate()
1530
1526
1531 parentgone = (parents[0] not in self.changelog.nodemap or
1527 parentgone = (parents[0] not in self.changelog.nodemap or
1532 parents[1] not in self.changelog.nodemap)
1528 parents[1] not in self.changelog.nodemap)
1533 if parentgone:
1529 if parentgone:
1534 # prevent dirstateguard from overwriting already restored one
1530 # prevent dirstateguard from overwriting already restored one
1535 dsguard.close()
1531 dsguard.close()
1536
1532
1537 self.dirstate.restorebackup(None, 'undo.dirstate')
1533 self.dirstate.restorebackup(None, 'undo.dirstate')
1538 try:
1534 try:
1539 branch = self.vfs.read('undo.branch')
1535 branch = self.vfs.read('undo.branch')
1540 self.dirstate.setbranch(encoding.tolocal(branch))
1536 self.dirstate.setbranch(encoding.tolocal(branch))
1541 except IOError:
1537 except IOError:
1542 ui.warn(_('named branch could not be reset: '
1538 ui.warn(_('named branch could not be reset: '
1543 'current branch is still \'%s\'\n')
1539 'current branch is still \'%s\'\n')
1544 % self.dirstate.branch())
1540 % self.dirstate.branch())
1545
1541
1546 parents = tuple([p.rev() for p in self[None].parents()])
1542 parents = tuple([p.rev() for p in self[None].parents()])
1547 if len(parents) > 1:
1543 if len(parents) > 1:
1548 ui.status(_('working directory now based on '
1544 ui.status(_('working directory now based on '
1549 'revisions %d and %d\n') % parents)
1545 'revisions %d and %d\n') % parents)
1550 else:
1546 else:
1551 ui.status(_('working directory now based on '
1547 ui.status(_('working directory now based on '
1552 'revision %d\n') % parents)
1548 'revision %d\n') % parents)
1553 mergemod.mergestate.clean(self, self['.'].node())
1549 mergemod.mergestate.clean(self, self['.'].node())
1554
1550
1555 # TODO: if we know which new heads may result from this rollback, pass
1551 # TODO: if we know which new heads may result from this rollback, pass
1556 # them to destroy(), which will prevent the branchhead cache from being
1552 # them to destroy(), which will prevent the branchhead cache from being
1557 # invalidated.
1553 # invalidated.
1558 self.destroyed()
1554 self.destroyed()
1559 return 0
1555 return 0
1560
1556
1561 def _buildcacheupdater(self, newtransaction):
1557 def _buildcacheupdater(self, newtransaction):
1562 """called during transaction to build the callback updating cache
1558 """called during transaction to build the callback updating cache
1563
1559
1564 Lives on the repository to help extension who might want to augment
1560 Lives on the repository to help extension who might want to augment
1565 this logic. For this purpose, the created transaction is passed to the
1561 this logic. For this purpose, the created transaction is passed to the
1566 method.
1562 method.
1567 """
1563 """
1568 # we must avoid cyclic reference between repo and transaction.
1564 # we must avoid cyclic reference between repo and transaction.
1569 reporef = weakref.ref(self)
1565 reporef = weakref.ref(self)
1570 def updater(tr):
1566 def updater(tr):
1571 repo = reporef()
1567 repo = reporef()
1572 repo.updatecaches(tr)
1568 repo.updatecaches(tr)
1573 return updater
1569 return updater
1574
1570
1575 @unfilteredmethod
1571 @unfilteredmethod
1576 def updatecaches(self, tr=None, full=False):
1572 def updatecaches(self, tr=None, full=False):
1577 """warm appropriate caches
1573 """warm appropriate caches
1578
1574
1579 If this function is called after a transaction closed. The transaction
1575 If this function is called after a transaction closed. The transaction
1580 will be available in the 'tr' argument. This can be used to selectively
1576 will be available in the 'tr' argument. This can be used to selectively
1581 update caches relevant to the changes in that transaction.
1577 update caches relevant to the changes in that transaction.
1582
1578
1583 If 'full' is set, make sure all caches the function knows about have
1579 If 'full' is set, make sure all caches the function knows about have
1584 up-to-date data. Even the ones usually loaded more lazily.
1580 up-to-date data. Even the ones usually loaded more lazily.
1585 """
1581 """
1586 if tr is not None and tr.hookargs.get('source') == 'strip':
1582 if tr is not None and tr.hookargs.get('source') == 'strip':
1587 # During strip, many caches are invalid but
1583 # During strip, many caches are invalid but
1588 # later call to `destroyed` will refresh them.
1584 # later call to `destroyed` will refresh them.
1589 return
1585 return
1590
1586
1591 if tr is None or tr.changes['revs']:
1587 if tr is None or tr.changes['revs']:
1592 # updating the unfiltered branchmap should refresh all the others,
1588 # updating the unfiltered branchmap should refresh all the others,
1593 self.ui.debug('updating the branch cache\n')
1589 self.ui.debug('updating the branch cache\n')
1594 branchmap.updatecache(self.filtered('served'))
1590 branchmap.updatecache(self.filtered('served'))
1595
1591
1596 if full:
1592 if full:
1597 rbc = self.revbranchcache()
1593 rbc = self.revbranchcache()
1598 for r in self.changelog:
1594 for r in self.changelog:
1599 rbc.branchinfo(r)
1595 rbc.branchinfo(r)
1600 rbc.write()
1596 rbc.write()
1601
1597
1602 def invalidatecaches(self):
1598 def invalidatecaches(self):
1603
1599
1604 if '_tagscache' in vars(self):
1600 if '_tagscache' in vars(self):
1605 # can't use delattr on proxy
1601 # can't use delattr on proxy
1606 del self.__dict__['_tagscache']
1602 del self.__dict__['_tagscache']
1607
1603
1608 self.unfiltered()._branchcaches.clear()
1604 self.unfiltered()._branchcaches.clear()
1609 self.invalidatevolatilesets()
1605 self.invalidatevolatilesets()
1610 self._sparsesignaturecache.clear()
1606 self._sparsesignaturecache.clear()
1611
1607
1612 def invalidatevolatilesets(self):
1608 def invalidatevolatilesets(self):
1613 self.filteredrevcache.clear()
1609 self.filteredrevcache.clear()
1614 obsolete.clearobscaches(self)
1610 obsolete.clearobscaches(self)
1615
1611
1616 def invalidatedirstate(self):
1612 def invalidatedirstate(self):
1617 '''Invalidates the dirstate, causing the next call to dirstate
1613 '''Invalidates the dirstate, causing the next call to dirstate
1618 to check if it was modified since the last time it was read,
1614 to check if it was modified since the last time it was read,
1619 rereading it if it has.
1615 rereading it if it has.
1620
1616
1621 This is different to dirstate.invalidate() that it doesn't always
1617 This is different to dirstate.invalidate() that it doesn't always
1622 rereads the dirstate. Use dirstate.invalidate() if you want to
1618 rereads the dirstate. Use dirstate.invalidate() if you want to
1623 explicitly read the dirstate again (i.e. restoring it to a previous
1619 explicitly read the dirstate again (i.e. restoring it to a previous
1624 known good state).'''
1620 known good state).'''
1625 if hasunfilteredcache(self, 'dirstate'):
1621 if hasunfilteredcache(self, 'dirstate'):
1626 for k in self.dirstate._filecache:
1622 for k in self.dirstate._filecache:
1627 try:
1623 try:
1628 delattr(self.dirstate, k)
1624 delattr(self.dirstate, k)
1629 except AttributeError:
1625 except AttributeError:
1630 pass
1626 pass
1631 delattr(self.unfiltered(), 'dirstate')
1627 delattr(self.unfiltered(), 'dirstate')
1632
1628
1633 def invalidate(self, clearfilecache=False):
1629 def invalidate(self, clearfilecache=False):
1634 '''Invalidates both store and non-store parts other than dirstate
1630 '''Invalidates both store and non-store parts other than dirstate
1635
1631
1636 If a transaction is running, invalidation of store is omitted,
1632 If a transaction is running, invalidation of store is omitted,
1637 because discarding in-memory changes might cause inconsistency
1633 because discarding in-memory changes might cause inconsistency
1638 (e.g. incomplete fncache causes unintentional failure, but
1634 (e.g. incomplete fncache causes unintentional failure, but
1639 redundant one doesn't).
1635 redundant one doesn't).
1640 '''
1636 '''
1641 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1637 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1642 for k in list(self._filecache.keys()):
1638 for k in list(self._filecache.keys()):
1643 # dirstate is invalidated separately in invalidatedirstate()
1639 # dirstate is invalidated separately in invalidatedirstate()
1644 if k == 'dirstate':
1640 if k == 'dirstate':
1645 continue
1641 continue
1646 if (k == 'changelog' and
1642 if (k == 'changelog' and
1647 self.currenttransaction() and
1643 self.currenttransaction() and
1648 self.changelog._delayed):
1644 self.changelog._delayed):
1649 # The changelog object may store unwritten revisions. We don't
1645 # The changelog object may store unwritten revisions. We don't
1650 # want to lose them.
1646 # want to lose them.
1651 # TODO: Solve the problem instead of working around it.
1647 # TODO: Solve the problem instead of working around it.
1652 continue
1648 continue
1653
1649
1654 if clearfilecache:
1650 if clearfilecache:
1655 del self._filecache[k]
1651 del self._filecache[k]
1656 try:
1652 try:
1657 delattr(unfiltered, k)
1653 delattr(unfiltered, k)
1658 except AttributeError:
1654 except AttributeError:
1659 pass
1655 pass
1660 self.invalidatecaches()
1656 self.invalidatecaches()
1661 if not self.currenttransaction():
1657 if not self.currenttransaction():
1662 # TODO: Changing contents of store outside transaction
1658 # TODO: Changing contents of store outside transaction
1663 # causes inconsistency. We should make in-memory store
1659 # causes inconsistency. We should make in-memory store
1664 # changes detectable, and abort if changed.
1660 # changes detectable, and abort if changed.
1665 self.store.invalidatecaches()
1661 self.store.invalidatecaches()
1666
1662
1667 def invalidateall(self):
1663 def invalidateall(self):
1668 '''Fully invalidates both store and non-store parts, causing the
1664 '''Fully invalidates both store and non-store parts, causing the
1669 subsequent operation to reread any outside changes.'''
1665 subsequent operation to reread any outside changes.'''
1670 # extension should hook this to invalidate its caches
1666 # extension should hook this to invalidate its caches
1671 self.invalidate()
1667 self.invalidate()
1672 self.invalidatedirstate()
1668 self.invalidatedirstate()
1673
1669
1674 @unfilteredmethod
1670 @unfilteredmethod
1675 def _refreshfilecachestats(self, tr):
1671 def _refreshfilecachestats(self, tr):
1676 """Reload stats of cached files so that they are flagged as valid"""
1672 """Reload stats of cached files so that they are flagged as valid"""
1677 for k, ce in self._filecache.items():
1673 for k, ce in self._filecache.items():
1678 k = pycompat.sysstr(k)
1674 k = pycompat.sysstr(k)
1679 if k == r'dirstate' or k not in self.__dict__:
1675 if k == r'dirstate' or k not in self.__dict__:
1680 continue
1676 continue
1681 ce.refresh()
1677 ce.refresh()
1682
1678
1683 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1679 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1684 inheritchecker=None, parentenvvar=None):
1680 inheritchecker=None, parentenvvar=None):
1685 parentlock = None
1681 parentlock = None
1686 # the contents of parentenvvar are used by the underlying lock to
1682 # the contents of parentenvvar are used by the underlying lock to
1687 # determine whether it can be inherited
1683 # determine whether it can be inherited
1688 if parentenvvar is not None:
1684 if parentenvvar is not None:
1689 parentlock = encoding.environ.get(parentenvvar)
1685 parentlock = encoding.environ.get(parentenvvar)
1690
1686
1691 timeout = 0
1687 timeout = 0
1692 warntimeout = 0
1688 warntimeout = 0
1693 if wait:
1689 if wait:
1694 timeout = self.ui.configint("ui", "timeout")
1690 timeout = self.ui.configint("ui", "timeout")
1695 warntimeout = self.ui.configint("ui", "timeout.warn")
1691 warntimeout = self.ui.configint("ui", "timeout.warn")
1696
1692
1697 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1693 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1698 releasefn=releasefn,
1694 releasefn=releasefn,
1699 acquirefn=acquirefn, desc=desc,
1695 acquirefn=acquirefn, desc=desc,
1700 inheritchecker=inheritchecker,
1696 inheritchecker=inheritchecker,
1701 parentlock=parentlock)
1697 parentlock=parentlock)
1702 return l
1698 return l
1703
1699
1704 def _afterlock(self, callback):
1700 def _afterlock(self, callback):
1705 """add a callback to be run when the repository is fully unlocked
1701 """add a callback to be run when the repository is fully unlocked
1706
1702
1707 The callback will be executed when the outermost lock is released
1703 The callback will be executed when the outermost lock is released
1708 (with wlock being higher level than 'lock')."""
1704 (with wlock being higher level than 'lock')."""
1709 for ref in (self._wlockref, self._lockref):
1705 for ref in (self._wlockref, self._lockref):
1710 l = ref and ref()
1706 l = ref and ref()
1711 if l and l.held:
1707 if l and l.held:
1712 l.postrelease.append(callback)
1708 l.postrelease.append(callback)
1713 break
1709 break
1714 else: # no lock have been found.
1710 else: # no lock have been found.
1715 callback()
1711 callback()
1716
1712
1717 def lock(self, wait=True):
1713 def lock(self, wait=True):
1718 '''Lock the repository store (.hg/store) and return a weak reference
1714 '''Lock the repository store (.hg/store) and return a weak reference
1719 to the lock. Use this before modifying the store (e.g. committing or
1715 to the lock. Use this before modifying the store (e.g. committing or
1720 stripping). If you are opening a transaction, get a lock as well.)
1716 stripping). If you are opening a transaction, get a lock as well.)
1721
1717
1722 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1718 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1723 'wlock' first to avoid a dead-lock hazard.'''
1719 'wlock' first to avoid a dead-lock hazard.'''
1724 l = self._currentlock(self._lockref)
1720 l = self._currentlock(self._lockref)
1725 if l is not None:
1721 if l is not None:
1726 l.lock()
1722 l.lock()
1727 return l
1723 return l
1728
1724
1729 l = self._lock(self.svfs, "lock", wait, None,
1725 l = self._lock(self.svfs, "lock", wait, None,
1730 self.invalidate, _('repository %s') % self.origroot)
1726 self.invalidate, _('repository %s') % self.origroot)
1731 self._lockref = weakref.ref(l)
1727 self._lockref = weakref.ref(l)
1732 return l
1728 return l
1733
1729
1734 def _wlockchecktransaction(self):
1730 def _wlockchecktransaction(self):
1735 if self.currenttransaction() is not None:
1731 if self.currenttransaction() is not None:
1736 raise error.LockInheritanceContractViolation(
1732 raise error.LockInheritanceContractViolation(
1737 'wlock cannot be inherited in the middle of a transaction')
1733 'wlock cannot be inherited in the middle of a transaction')
1738
1734
1739 def wlock(self, wait=True):
1735 def wlock(self, wait=True):
1740 '''Lock the non-store parts of the repository (everything under
1736 '''Lock the non-store parts of the repository (everything under
1741 .hg except .hg/store) and return a weak reference to the lock.
1737 .hg except .hg/store) and return a weak reference to the lock.
1742
1738
1743 Use this before modifying files in .hg.
1739 Use this before modifying files in .hg.
1744
1740
1745 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1741 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1746 'wlock' first to avoid a dead-lock hazard.'''
1742 'wlock' first to avoid a dead-lock hazard.'''
1747 l = self._wlockref and self._wlockref()
1743 l = self._wlockref and self._wlockref()
1748 if l is not None and l.held:
1744 if l is not None and l.held:
1749 l.lock()
1745 l.lock()
1750 return l
1746 return l
1751
1747
1752 # We do not need to check for non-waiting lock acquisition. Such
1748 # We do not need to check for non-waiting lock acquisition. Such
1753 # acquisition would not cause dead-lock as they would just fail.
1749 # acquisition would not cause dead-lock as they would just fail.
1754 if wait and (self.ui.configbool('devel', 'all-warnings')
1750 if wait and (self.ui.configbool('devel', 'all-warnings')
1755 or self.ui.configbool('devel', 'check-locks')):
1751 or self.ui.configbool('devel', 'check-locks')):
1756 if self._currentlock(self._lockref) is not None:
1752 if self._currentlock(self._lockref) is not None:
1757 self.ui.develwarn('"wlock" acquired after "lock"')
1753 self.ui.develwarn('"wlock" acquired after "lock"')
1758
1754
1759 def unlock():
1755 def unlock():
1760 if self.dirstate.pendingparentchange():
1756 if self.dirstate.pendingparentchange():
1761 self.dirstate.invalidate()
1757 self.dirstate.invalidate()
1762 else:
1758 else:
1763 self.dirstate.write(None)
1759 self.dirstate.write(None)
1764
1760
1765 self._filecache['dirstate'].refresh()
1761 self._filecache['dirstate'].refresh()
1766
1762
1767 l = self._lock(self.vfs, "wlock", wait, unlock,
1763 l = self._lock(self.vfs, "wlock", wait, unlock,
1768 self.invalidatedirstate, _('working directory of %s') %
1764 self.invalidatedirstate, _('working directory of %s') %
1769 self.origroot,
1765 self.origroot,
1770 inheritchecker=self._wlockchecktransaction,
1766 inheritchecker=self._wlockchecktransaction,
1771 parentenvvar='HG_WLOCK_LOCKER')
1767 parentenvvar='HG_WLOCK_LOCKER')
1772 self._wlockref = weakref.ref(l)
1768 self._wlockref = weakref.ref(l)
1773 return l
1769 return l
1774
1770
1775 def _currentlock(self, lockref):
1771 def _currentlock(self, lockref):
1776 """Returns the lock if it's held, or None if it's not."""
1772 """Returns the lock if it's held, or None if it's not."""
1777 if lockref is None:
1773 if lockref is None:
1778 return None
1774 return None
1779 l = lockref()
1775 l = lockref()
1780 if l is None or not l.held:
1776 if l is None or not l.held:
1781 return None
1777 return None
1782 return l
1778 return l
1783
1779
1784 def currentwlock(self):
1780 def currentwlock(self):
1785 """Returns the wlock if it's held, or None if it's not."""
1781 """Returns the wlock if it's held, or None if it's not."""
1786 return self._currentlock(self._wlockref)
1782 return self._currentlock(self._wlockref)
1787
1783
1788 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1784 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1789 """
1785 """
1790 commit an individual file as part of a larger transaction
1786 commit an individual file as part of a larger transaction
1791 """
1787 """
1792
1788
1793 fname = fctx.path()
1789 fname = fctx.path()
1794 fparent1 = manifest1.get(fname, nullid)
1790 fparent1 = manifest1.get(fname, nullid)
1795 fparent2 = manifest2.get(fname, nullid)
1791 fparent2 = manifest2.get(fname, nullid)
1796 if isinstance(fctx, context.filectx):
1792 if isinstance(fctx, context.filectx):
1797 node = fctx.filenode()
1793 node = fctx.filenode()
1798 if node in [fparent1, fparent2]:
1794 if node in [fparent1, fparent2]:
1799 self.ui.debug('reusing %s filelog entry\n' % fname)
1795 self.ui.debug('reusing %s filelog entry\n' % fname)
1800 if manifest1.flags(fname) != fctx.flags():
1796 if manifest1.flags(fname) != fctx.flags():
1801 changelist.append(fname)
1797 changelist.append(fname)
1802 return node
1798 return node
1803
1799
1804 flog = self.file(fname)
1800 flog = self.file(fname)
1805 meta = {}
1801 meta = {}
1806 copy = fctx.renamed()
1802 copy = fctx.renamed()
1807 if copy and copy[0] != fname:
1803 if copy and copy[0] != fname:
1808 # Mark the new revision of this file as a copy of another
1804 # Mark the new revision of this file as a copy of another
1809 # file. This copy data will effectively act as a parent
1805 # file. This copy data will effectively act as a parent
1810 # of this new revision. If this is a merge, the first
1806 # of this new revision. If this is a merge, the first
1811 # parent will be the nullid (meaning "look up the copy data")
1807 # parent will be the nullid (meaning "look up the copy data")
1812 # and the second one will be the other parent. For example:
1808 # and the second one will be the other parent. For example:
1813 #
1809 #
1814 # 0 --- 1 --- 3 rev1 changes file foo
1810 # 0 --- 1 --- 3 rev1 changes file foo
1815 # \ / rev2 renames foo to bar and changes it
1811 # \ / rev2 renames foo to bar and changes it
1816 # \- 2 -/ rev3 should have bar with all changes and
1812 # \- 2 -/ rev3 should have bar with all changes and
1817 # should record that bar descends from
1813 # should record that bar descends from
1818 # bar in rev2 and foo in rev1
1814 # bar in rev2 and foo in rev1
1819 #
1815 #
1820 # this allows this merge to succeed:
1816 # this allows this merge to succeed:
1821 #
1817 #
1822 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1818 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1823 # \ / merging rev3 and rev4 should use bar@rev2
1819 # \ / merging rev3 and rev4 should use bar@rev2
1824 # \- 2 --- 4 as the merge base
1820 # \- 2 --- 4 as the merge base
1825 #
1821 #
1826
1822
1827 cfname = copy[0]
1823 cfname = copy[0]
1828 crev = manifest1.get(cfname)
1824 crev = manifest1.get(cfname)
1829 newfparent = fparent2
1825 newfparent = fparent2
1830
1826
1831 if manifest2: # branch merge
1827 if manifest2: # branch merge
1832 if fparent2 == nullid or crev is None: # copied on remote side
1828 if fparent2 == nullid or crev is None: # copied on remote side
1833 if cfname in manifest2:
1829 if cfname in manifest2:
1834 crev = manifest2[cfname]
1830 crev = manifest2[cfname]
1835 newfparent = fparent1
1831 newfparent = fparent1
1836
1832
1837 # Here, we used to search backwards through history to try to find
1833 # Here, we used to search backwards through history to try to find
1838 # where the file copy came from if the source of a copy was not in
1834 # where the file copy came from if the source of a copy was not in
1839 # the parent directory. However, this doesn't actually make sense to
1835 # the parent directory. However, this doesn't actually make sense to
1840 # do (what does a copy from something not in your working copy even
1836 # do (what does a copy from something not in your working copy even
1841 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1837 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1842 # the user that copy information was dropped, so if they didn't
1838 # the user that copy information was dropped, so if they didn't
1843 # expect this outcome it can be fixed, but this is the correct
1839 # expect this outcome it can be fixed, but this is the correct
1844 # behavior in this circumstance.
1840 # behavior in this circumstance.
1845
1841
1846 if crev:
1842 if crev:
1847 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1843 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1848 meta["copy"] = cfname
1844 meta["copy"] = cfname
1849 meta["copyrev"] = hex(crev)
1845 meta["copyrev"] = hex(crev)
1850 fparent1, fparent2 = nullid, newfparent
1846 fparent1, fparent2 = nullid, newfparent
1851 else:
1847 else:
1852 self.ui.warn(_("warning: can't find ancestor for '%s' "
1848 self.ui.warn(_("warning: can't find ancestor for '%s' "
1853 "copied from '%s'!\n") % (fname, cfname))
1849 "copied from '%s'!\n") % (fname, cfname))
1854
1850
1855 elif fparent1 == nullid:
1851 elif fparent1 == nullid:
1856 fparent1, fparent2 = fparent2, nullid
1852 fparent1, fparent2 = fparent2, nullid
1857 elif fparent2 != nullid:
1853 elif fparent2 != nullid:
1858 # is one parent an ancestor of the other?
1854 # is one parent an ancestor of the other?
1859 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1855 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1860 if fparent1 in fparentancestors:
1856 if fparent1 in fparentancestors:
1861 fparent1, fparent2 = fparent2, nullid
1857 fparent1, fparent2 = fparent2, nullid
1862 elif fparent2 in fparentancestors:
1858 elif fparent2 in fparentancestors:
1863 fparent2 = nullid
1859 fparent2 = nullid
1864
1860
1865 # is the file changed?
1861 # is the file changed?
1866 text = fctx.data()
1862 text = fctx.data()
1867 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1863 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1868 changelist.append(fname)
1864 changelist.append(fname)
1869 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1865 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1870 # are just the flags changed during merge?
1866 # are just the flags changed during merge?
1871 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1867 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1872 changelist.append(fname)
1868 changelist.append(fname)
1873
1869
1874 return fparent1
1870 return fparent1
1875
1871
1876 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1872 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1877 """check for commit arguments that aren't committable"""
1873 """check for commit arguments that aren't committable"""
1878 if match.isexact() or match.prefix():
1874 if match.isexact() or match.prefix():
1879 matched = set(status.modified + status.added + status.removed)
1875 matched = set(status.modified + status.added + status.removed)
1880
1876
1881 for f in match.files():
1877 for f in match.files():
1882 f = self.dirstate.normalize(f)
1878 f = self.dirstate.normalize(f)
1883 if f == '.' or f in matched or f in wctx.substate:
1879 if f == '.' or f in matched or f in wctx.substate:
1884 continue
1880 continue
1885 if f in status.deleted:
1881 if f in status.deleted:
1886 fail(f, _('file not found!'))
1882 fail(f, _('file not found!'))
1887 if f in vdirs: # visited directory
1883 if f in vdirs: # visited directory
1888 d = f + '/'
1884 d = f + '/'
1889 for mf in matched:
1885 for mf in matched:
1890 if mf.startswith(d):
1886 if mf.startswith(d):
1891 break
1887 break
1892 else:
1888 else:
1893 fail(f, _("no match under directory!"))
1889 fail(f, _("no match under directory!"))
1894 elif f not in self.dirstate:
1890 elif f not in self.dirstate:
1895 fail(f, _("file not tracked!"))
1891 fail(f, _("file not tracked!"))
1896
1892
1897 @unfilteredmethod
1893 @unfilteredmethod
1898 def commit(self, text="", user=None, date=None, match=None, force=False,
1894 def commit(self, text="", user=None, date=None, match=None, force=False,
1899 editor=False, extra=None):
1895 editor=False, extra=None):
1900 """Add a new revision to current repository.
1896 """Add a new revision to current repository.
1901
1897
1902 Revision information is gathered from the working directory,
1898 Revision information is gathered from the working directory,
1903 match can be used to filter the committed files. If editor is
1899 match can be used to filter the committed files. If editor is
1904 supplied, it is called to get a commit message.
1900 supplied, it is called to get a commit message.
1905 """
1901 """
1906 if extra is None:
1902 if extra is None:
1907 extra = {}
1903 extra = {}
1908
1904
1909 def fail(f, msg):
1905 def fail(f, msg):
1910 raise error.Abort('%s: %s' % (f, msg))
1906 raise error.Abort('%s: %s' % (f, msg))
1911
1907
1912 if not match:
1908 if not match:
1913 match = matchmod.always(self.root, '')
1909 match = matchmod.always(self.root, '')
1914
1910
1915 if not force:
1911 if not force:
1916 vdirs = []
1912 vdirs = []
1917 match.explicitdir = vdirs.append
1913 match.explicitdir = vdirs.append
1918 match.bad = fail
1914 match.bad = fail
1919
1915
1920 wlock = lock = tr = None
1916 wlock = lock = tr = None
1921 try:
1917 try:
1922 wlock = self.wlock()
1918 wlock = self.wlock()
1923 lock = self.lock() # for recent changelog (see issue4368)
1919 lock = self.lock() # for recent changelog (see issue4368)
1924
1920
1925 wctx = self[None]
1921 wctx = self[None]
1926 merge = len(wctx.parents()) > 1
1922 merge = len(wctx.parents()) > 1
1927
1923
1928 if not force and merge and not match.always():
1924 if not force and merge and not match.always():
1929 raise error.Abort(_('cannot partially commit a merge '
1925 raise error.Abort(_('cannot partially commit a merge '
1930 '(do not specify files or patterns)'))
1926 '(do not specify files or patterns)'))
1931
1927
1932 status = self.status(match=match, clean=force)
1928 status = self.status(match=match, clean=force)
1933 if force:
1929 if force:
1934 status.modified.extend(status.clean) # mq may commit clean files
1930 status.modified.extend(status.clean) # mq may commit clean files
1935
1931
1936 # check subrepos
1932 # check subrepos
1937 subs, commitsubs, newstate = subrepoutil.precommit(
1933 subs, commitsubs, newstate = subrepoutil.precommit(
1938 self.ui, wctx, status, match, force=force)
1934 self.ui, wctx, status, match, force=force)
1939
1935
1940 # make sure all explicit patterns are matched
1936 # make sure all explicit patterns are matched
1941 if not force:
1937 if not force:
1942 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1938 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1943
1939
1944 cctx = context.workingcommitctx(self, status,
1940 cctx = context.workingcommitctx(self, status,
1945 text, user, date, extra)
1941 text, user, date, extra)
1946
1942
1947 # internal config: ui.allowemptycommit
1943 # internal config: ui.allowemptycommit
1948 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1944 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1949 or extra.get('close') or merge or cctx.files()
1945 or extra.get('close') or merge or cctx.files()
1950 or self.ui.configbool('ui', 'allowemptycommit'))
1946 or self.ui.configbool('ui', 'allowemptycommit'))
1951 if not allowemptycommit:
1947 if not allowemptycommit:
1952 return None
1948 return None
1953
1949
1954 if merge and cctx.deleted():
1950 if merge and cctx.deleted():
1955 raise error.Abort(_("cannot commit merge with missing files"))
1951 raise error.Abort(_("cannot commit merge with missing files"))
1956
1952
1957 ms = mergemod.mergestate.read(self)
1953 ms = mergemod.mergestate.read(self)
1958 mergeutil.checkunresolved(ms)
1954 mergeutil.checkunresolved(ms)
1959
1955
1960 if editor:
1956 if editor:
1961 cctx._text = editor(self, cctx, subs)
1957 cctx._text = editor(self, cctx, subs)
1962 edited = (text != cctx._text)
1958 edited = (text != cctx._text)
1963
1959
1964 # Save commit message in case this transaction gets rolled back
1960 # Save commit message in case this transaction gets rolled back
1965 # (e.g. by a pretxncommit hook). Leave the content alone on
1961 # (e.g. by a pretxncommit hook). Leave the content alone on
1966 # the assumption that the user will use the same editor again.
1962 # the assumption that the user will use the same editor again.
1967 msgfn = self.savecommitmessage(cctx._text)
1963 msgfn = self.savecommitmessage(cctx._text)
1968
1964
1969 # commit subs and write new state
1965 # commit subs and write new state
1970 if subs:
1966 if subs:
1971 for s in sorted(commitsubs):
1967 for s in sorted(commitsubs):
1972 sub = wctx.sub(s)
1968 sub = wctx.sub(s)
1973 self.ui.status(_('committing subrepository %s\n') %
1969 self.ui.status(_('committing subrepository %s\n') %
1974 subrepoutil.subrelpath(sub))
1970 subrepoutil.subrelpath(sub))
1975 sr = sub.commit(cctx._text, user, date)
1971 sr = sub.commit(cctx._text, user, date)
1976 newstate[s] = (newstate[s][0], sr)
1972 newstate[s] = (newstate[s][0], sr)
1977 subrepoutil.writestate(self, newstate)
1973 subrepoutil.writestate(self, newstate)
1978
1974
1979 p1, p2 = self.dirstate.parents()
1975 p1, p2 = self.dirstate.parents()
1980 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1976 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1981 try:
1977 try:
1982 self.hook("precommit", throw=True, parent1=hookp1,
1978 self.hook("precommit", throw=True, parent1=hookp1,
1983 parent2=hookp2)
1979 parent2=hookp2)
1984 tr = self.transaction('commit')
1980 tr = self.transaction('commit')
1985 ret = self.commitctx(cctx, True)
1981 ret = self.commitctx(cctx, True)
1986 except: # re-raises
1982 except: # re-raises
1987 if edited:
1983 if edited:
1988 self.ui.write(
1984 self.ui.write(
1989 _('note: commit message saved in %s\n') % msgfn)
1985 _('note: commit message saved in %s\n') % msgfn)
1990 raise
1986 raise
1991 # update bookmarks, dirstate and mergestate
1987 # update bookmarks, dirstate and mergestate
1992 bookmarks.update(self, [p1, p2], ret)
1988 bookmarks.update(self, [p1, p2], ret)
1993 cctx.markcommitted(ret)
1989 cctx.markcommitted(ret)
1994 ms.reset()
1990 ms.reset()
1995 tr.close()
1991 tr.close()
1996
1992
1997 finally:
1993 finally:
1998 lockmod.release(tr, lock, wlock)
1994 lockmod.release(tr, lock, wlock)
1999
1995
2000 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1996 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2001 # hack for command that use a temporary commit (eg: histedit)
1997 # hack for command that use a temporary commit (eg: histedit)
2002 # temporary commit got stripped before hook release
1998 # temporary commit got stripped before hook release
2003 if self.changelog.hasnode(ret):
1999 if self.changelog.hasnode(ret):
2004 self.hook("commit", node=node, parent1=parent1,
2000 self.hook("commit", node=node, parent1=parent1,
2005 parent2=parent2)
2001 parent2=parent2)
2006 self._afterlock(commithook)
2002 self._afterlock(commithook)
2007 return ret
2003 return ret
2008
2004
2009 @unfilteredmethod
2005 @unfilteredmethod
2010 def commitctx(self, ctx, error=False):
2006 def commitctx(self, ctx, error=False):
2011 """Add a new revision to current repository.
2007 """Add a new revision to current repository.
2012 Revision information is passed via the context argument.
2008 Revision information is passed via the context argument.
2013 """
2009 """
2014
2010
2015 tr = None
2011 tr = None
2016 p1, p2 = ctx.p1(), ctx.p2()
2012 p1, p2 = ctx.p1(), ctx.p2()
2017 user = ctx.user()
2013 user = ctx.user()
2018
2014
2019 lock = self.lock()
2015 lock = self.lock()
2020 try:
2016 try:
2021 tr = self.transaction("commit")
2017 tr = self.transaction("commit")
2022 trp = weakref.proxy(tr)
2018 trp = weakref.proxy(tr)
2023
2019
2024 if ctx.manifestnode():
2020 if ctx.manifestnode():
2025 # reuse an existing manifest revision
2021 # reuse an existing manifest revision
2026 mn = ctx.manifestnode()
2022 mn = ctx.manifestnode()
2027 files = ctx.files()
2023 files = ctx.files()
2028 elif ctx.files():
2024 elif ctx.files():
2029 m1ctx = p1.manifestctx()
2025 m1ctx = p1.manifestctx()
2030 m2ctx = p2.manifestctx()
2026 m2ctx = p2.manifestctx()
2031 mctx = m1ctx.copy()
2027 mctx = m1ctx.copy()
2032
2028
2033 m = mctx.read()
2029 m = mctx.read()
2034 m1 = m1ctx.read()
2030 m1 = m1ctx.read()
2035 m2 = m2ctx.read()
2031 m2 = m2ctx.read()
2036
2032
2037 # check in files
2033 # check in files
2038 added = []
2034 added = []
2039 changed = []
2035 changed = []
2040 removed = list(ctx.removed())
2036 removed = list(ctx.removed())
2041 linkrev = len(self)
2037 linkrev = len(self)
2042 self.ui.note(_("committing files:\n"))
2038 self.ui.note(_("committing files:\n"))
2043 for f in sorted(ctx.modified() + ctx.added()):
2039 for f in sorted(ctx.modified() + ctx.added()):
2044 self.ui.note(f + "\n")
2040 self.ui.note(f + "\n")
2045 try:
2041 try:
2046 fctx = ctx[f]
2042 fctx = ctx[f]
2047 if fctx is None:
2043 if fctx is None:
2048 removed.append(f)
2044 removed.append(f)
2049 else:
2045 else:
2050 added.append(f)
2046 added.append(f)
2051 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2047 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2052 trp, changed)
2048 trp, changed)
2053 m.setflag(f, fctx.flags())
2049 m.setflag(f, fctx.flags())
2054 except OSError as inst:
2050 except OSError as inst:
2055 self.ui.warn(_("trouble committing %s!\n") % f)
2051 self.ui.warn(_("trouble committing %s!\n") % f)
2056 raise
2052 raise
2057 except IOError as inst:
2053 except IOError as inst:
2058 errcode = getattr(inst, 'errno', errno.ENOENT)
2054 errcode = getattr(inst, 'errno', errno.ENOENT)
2059 if error or errcode and errcode != errno.ENOENT:
2055 if error or errcode and errcode != errno.ENOENT:
2060 self.ui.warn(_("trouble committing %s!\n") % f)
2056 self.ui.warn(_("trouble committing %s!\n") % f)
2061 raise
2057 raise
2062
2058
2063 # update manifest
2059 # update manifest
2064 self.ui.note(_("committing manifest\n"))
2060 self.ui.note(_("committing manifest\n"))
2065 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2061 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2066 drop = [f for f in removed if f in m]
2062 drop = [f for f in removed if f in m]
2067 for f in drop:
2063 for f in drop:
2068 del m[f]
2064 del m[f]
2069 mn = mctx.write(trp, linkrev,
2065 mn = mctx.write(trp, linkrev,
2070 p1.manifestnode(), p2.manifestnode(),
2066 p1.manifestnode(), p2.manifestnode(),
2071 added, drop)
2067 added, drop)
2072 files = changed + removed
2068 files = changed + removed
2073 else:
2069 else:
2074 mn = p1.manifestnode()
2070 mn = p1.manifestnode()
2075 files = []
2071 files = []
2076
2072
2077 # update changelog
2073 # update changelog
2078 self.ui.note(_("committing changelog\n"))
2074 self.ui.note(_("committing changelog\n"))
2079 self.changelog.delayupdate(tr)
2075 self.changelog.delayupdate(tr)
2080 n = self.changelog.add(mn, files, ctx.description(),
2076 n = self.changelog.add(mn, files, ctx.description(),
2081 trp, p1.node(), p2.node(),
2077 trp, p1.node(), p2.node(),
2082 user, ctx.date(), ctx.extra().copy())
2078 user, ctx.date(), ctx.extra().copy())
2083 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2079 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2084 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2080 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2085 parent2=xp2)
2081 parent2=xp2)
2086 # set the new commit is proper phase
2082 # set the new commit is proper phase
2087 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2083 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2088 if targetphase:
2084 if targetphase:
2089 # retract boundary do not alter parent changeset.
2085 # retract boundary do not alter parent changeset.
2090 # if a parent have higher the resulting phase will
2086 # if a parent have higher the resulting phase will
2091 # be compliant anyway
2087 # be compliant anyway
2092 #
2088 #
2093 # if minimal phase was 0 we don't need to retract anything
2089 # if minimal phase was 0 we don't need to retract anything
2094 phases.registernew(self, tr, targetphase, [n])
2090 phases.registernew(self, tr, targetphase, [n])
2095 tr.close()
2091 tr.close()
2096 return n
2092 return n
2097 finally:
2093 finally:
2098 if tr:
2094 if tr:
2099 tr.release()
2095 tr.release()
2100 lock.release()
2096 lock.release()
2101
2097
2102 @unfilteredmethod
2098 @unfilteredmethod
2103 def destroying(self):
2099 def destroying(self):
2104 '''Inform the repository that nodes are about to be destroyed.
2100 '''Inform the repository that nodes are about to be destroyed.
2105 Intended for use by strip and rollback, so there's a common
2101 Intended for use by strip and rollback, so there's a common
2106 place for anything that has to be done before destroying history.
2102 place for anything that has to be done before destroying history.
2107
2103
2108 This is mostly useful for saving state that is in memory and waiting
2104 This is mostly useful for saving state that is in memory and waiting
2109 to be flushed when the current lock is released. Because a call to
2105 to be flushed when the current lock is released. Because a call to
2110 destroyed is imminent, the repo will be invalidated causing those
2106 destroyed is imminent, the repo will be invalidated causing those
2111 changes to stay in memory (waiting for the next unlock), or vanish
2107 changes to stay in memory (waiting for the next unlock), or vanish
2112 completely.
2108 completely.
2113 '''
2109 '''
2114 # When using the same lock to commit and strip, the phasecache is left
2110 # When using the same lock to commit and strip, the phasecache is left
2115 # dirty after committing. Then when we strip, the repo is invalidated,
2111 # dirty after committing. Then when we strip, the repo is invalidated,
2116 # causing those changes to disappear.
2112 # causing those changes to disappear.
2117 if '_phasecache' in vars(self):
2113 if '_phasecache' in vars(self):
2118 self._phasecache.write()
2114 self._phasecache.write()
2119
2115
2120 @unfilteredmethod
2116 @unfilteredmethod
2121 def destroyed(self):
2117 def destroyed(self):
2122 '''Inform the repository that nodes have been destroyed.
2118 '''Inform the repository that nodes have been destroyed.
2123 Intended for use by strip and rollback, so there's a common
2119 Intended for use by strip and rollback, so there's a common
2124 place for anything that has to be done after destroying history.
2120 place for anything that has to be done after destroying history.
2125 '''
2121 '''
2126 # When one tries to:
2122 # When one tries to:
2127 # 1) destroy nodes thus calling this method (e.g. strip)
2123 # 1) destroy nodes thus calling this method (e.g. strip)
2128 # 2) use phasecache somewhere (e.g. commit)
2124 # 2) use phasecache somewhere (e.g. commit)
2129 #
2125 #
2130 # then 2) will fail because the phasecache contains nodes that were
2126 # then 2) will fail because the phasecache contains nodes that were
2131 # removed. We can either remove phasecache from the filecache,
2127 # removed. We can either remove phasecache from the filecache,
2132 # causing it to reload next time it is accessed, or simply filter
2128 # causing it to reload next time it is accessed, or simply filter
2133 # the removed nodes now and write the updated cache.
2129 # the removed nodes now and write the updated cache.
2134 self._phasecache.filterunknown(self)
2130 self._phasecache.filterunknown(self)
2135 self._phasecache.write()
2131 self._phasecache.write()
2136
2132
2137 # refresh all repository caches
2133 # refresh all repository caches
2138 self.updatecaches()
2134 self.updatecaches()
2139
2135
2140 # Ensure the persistent tag cache is updated. Doing it now
2136 # Ensure the persistent tag cache is updated. Doing it now
2141 # means that the tag cache only has to worry about destroyed
2137 # means that the tag cache only has to worry about destroyed
2142 # heads immediately after a strip/rollback. That in turn
2138 # heads immediately after a strip/rollback. That in turn
2143 # guarantees that "cachetip == currenttip" (comparing both rev
2139 # guarantees that "cachetip == currenttip" (comparing both rev
2144 # and node) always means no nodes have been added or destroyed.
2140 # and node) always means no nodes have been added or destroyed.
2145
2141
2146 # XXX this is suboptimal when qrefresh'ing: we strip the current
2142 # XXX this is suboptimal when qrefresh'ing: we strip the current
2147 # head, refresh the tag cache, then immediately add a new head.
2143 # head, refresh the tag cache, then immediately add a new head.
2148 # But I think doing it this way is necessary for the "instant
2144 # But I think doing it this way is necessary for the "instant
2149 # tag cache retrieval" case to work.
2145 # tag cache retrieval" case to work.
2150 self.invalidate()
2146 self.invalidate()
2151
2147
2152 def status(self, node1='.', node2=None, match=None,
2148 def status(self, node1='.', node2=None, match=None,
2153 ignored=False, clean=False, unknown=False,
2149 ignored=False, clean=False, unknown=False,
2154 listsubrepos=False):
2150 listsubrepos=False):
2155 '''a convenience method that calls node1.status(node2)'''
2151 '''a convenience method that calls node1.status(node2)'''
2156 return self[node1].status(node2, match, ignored, clean, unknown,
2152 return self[node1].status(node2, match, ignored, clean, unknown,
2157 listsubrepos)
2153 listsubrepos)
2158
2154
2159 def addpostdsstatus(self, ps):
2155 def addpostdsstatus(self, ps):
2160 """Add a callback to run within the wlock, at the point at which status
2156 """Add a callback to run within the wlock, at the point at which status
2161 fixups happen.
2157 fixups happen.
2162
2158
2163 On status completion, callback(wctx, status) will be called with the
2159 On status completion, callback(wctx, status) will be called with the
2164 wlock held, unless the dirstate has changed from underneath or the wlock
2160 wlock held, unless the dirstate has changed from underneath or the wlock
2165 couldn't be grabbed.
2161 couldn't be grabbed.
2166
2162
2167 Callbacks should not capture and use a cached copy of the dirstate --
2163 Callbacks should not capture and use a cached copy of the dirstate --
2168 it might change in the meanwhile. Instead, they should access the
2164 it might change in the meanwhile. Instead, they should access the
2169 dirstate via wctx.repo().dirstate.
2165 dirstate via wctx.repo().dirstate.
2170
2166
2171 This list is emptied out after each status run -- extensions should
2167 This list is emptied out after each status run -- extensions should
2172 make sure it adds to this list each time dirstate.status is called.
2168 make sure it adds to this list each time dirstate.status is called.
2173 Extensions should also make sure they don't call this for statuses
2169 Extensions should also make sure they don't call this for statuses
2174 that don't involve the dirstate.
2170 that don't involve the dirstate.
2175 """
2171 """
2176
2172
2177 # The list is located here for uniqueness reasons -- it is actually
2173 # The list is located here for uniqueness reasons -- it is actually
2178 # managed by the workingctx, but that isn't unique per-repo.
2174 # managed by the workingctx, but that isn't unique per-repo.
2179 self._postdsstatus.append(ps)
2175 self._postdsstatus.append(ps)
2180
2176
2181 def postdsstatus(self):
2177 def postdsstatus(self):
2182 """Used by workingctx to get the list of post-dirstate-status hooks."""
2178 """Used by workingctx to get the list of post-dirstate-status hooks."""
2183 return self._postdsstatus
2179 return self._postdsstatus
2184
2180
2185 def clearpostdsstatus(self):
2181 def clearpostdsstatus(self):
2186 """Used by workingctx to clear post-dirstate-status hooks."""
2182 """Used by workingctx to clear post-dirstate-status hooks."""
2187 del self._postdsstatus[:]
2183 del self._postdsstatus[:]
2188
2184
2189 def heads(self, start=None):
2185 def heads(self, start=None):
2190 if start is None:
2186 if start is None:
2191 cl = self.changelog
2187 cl = self.changelog
2192 headrevs = reversed(cl.headrevs())
2188 headrevs = reversed(cl.headrevs())
2193 return [cl.node(rev) for rev in headrevs]
2189 return [cl.node(rev) for rev in headrevs]
2194
2190
2195 heads = self.changelog.heads(start)
2191 heads = self.changelog.heads(start)
2196 # sort the output in rev descending order
2192 # sort the output in rev descending order
2197 return sorted(heads, key=self.changelog.rev, reverse=True)
2193 return sorted(heads, key=self.changelog.rev, reverse=True)
2198
2194
2199 def branchheads(self, branch=None, start=None, closed=False):
2195 def branchheads(self, branch=None, start=None, closed=False):
2200 '''return a (possibly filtered) list of heads for the given branch
2196 '''return a (possibly filtered) list of heads for the given branch
2201
2197
2202 Heads are returned in topological order, from newest to oldest.
2198 Heads are returned in topological order, from newest to oldest.
2203 If branch is None, use the dirstate branch.
2199 If branch is None, use the dirstate branch.
2204 If start is not None, return only heads reachable from start.
2200 If start is not None, return only heads reachable from start.
2205 If closed is True, return heads that are marked as closed as well.
2201 If closed is True, return heads that are marked as closed as well.
2206 '''
2202 '''
2207 if branch is None:
2203 if branch is None:
2208 branch = self[None].branch()
2204 branch = self[None].branch()
2209 branches = self.branchmap()
2205 branches = self.branchmap()
2210 if branch not in branches:
2206 if branch not in branches:
2211 return []
2207 return []
2212 # the cache returns heads ordered lowest to highest
2208 # the cache returns heads ordered lowest to highest
2213 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2209 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2214 if start is not None:
2210 if start is not None:
2215 # filter out the heads that cannot be reached from startrev
2211 # filter out the heads that cannot be reached from startrev
2216 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2212 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2217 bheads = [h for h in bheads if h in fbheads]
2213 bheads = [h for h in bheads if h in fbheads]
2218 return bheads
2214 return bheads
2219
2215
2220 def branches(self, nodes):
2216 def branches(self, nodes):
2221 if not nodes:
2217 if not nodes:
2222 nodes = [self.changelog.tip()]
2218 nodes = [self.changelog.tip()]
2223 b = []
2219 b = []
2224 for n in nodes:
2220 for n in nodes:
2225 t = n
2221 t = n
2226 while True:
2222 while True:
2227 p = self.changelog.parents(n)
2223 p = self.changelog.parents(n)
2228 if p[1] != nullid or p[0] == nullid:
2224 if p[1] != nullid or p[0] == nullid:
2229 b.append((t, n, p[0], p[1]))
2225 b.append((t, n, p[0], p[1]))
2230 break
2226 break
2231 n = p[0]
2227 n = p[0]
2232 return b
2228 return b
2233
2229
2234 def between(self, pairs):
2230 def between(self, pairs):
2235 r = []
2231 r = []
2236
2232
2237 for top, bottom in pairs:
2233 for top, bottom in pairs:
2238 n, l, i = top, [], 0
2234 n, l, i = top, [], 0
2239 f = 1
2235 f = 1
2240
2236
2241 while n != bottom and n != nullid:
2237 while n != bottom and n != nullid:
2242 p = self.changelog.parents(n)[0]
2238 p = self.changelog.parents(n)[0]
2243 if i == f:
2239 if i == f:
2244 l.append(n)
2240 l.append(n)
2245 f = f * 2
2241 f = f * 2
2246 n = p
2242 n = p
2247 i += 1
2243 i += 1
2248
2244
2249 r.append(l)
2245 r.append(l)
2250
2246
2251 return r
2247 return r
2252
2248
2253 def checkpush(self, pushop):
2249 def checkpush(self, pushop):
2254 """Extensions can override this function if additional checks have
2250 """Extensions can override this function if additional checks have
2255 to be performed before pushing, or call it if they override push
2251 to be performed before pushing, or call it if they override push
2256 command.
2252 command.
2257 """
2253 """
2258
2254
2259 @unfilteredpropertycache
2255 @unfilteredpropertycache
2260 def prepushoutgoinghooks(self):
2256 def prepushoutgoinghooks(self):
2261 """Return util.hooks consists of a pushop with repo, remote, outgoing
2257 """Return util.hooks consists of a pushop with repo, remote, outgoing
2262 methods, which are called before pushing changesets.
2258 methods, which are called before pushing changesets.
2263 """
2259 """
2264 return util.hooks()
2260 return util.hooks()
2265
2261
2266 def pushkey(self, namespace, key, old, new):
2262 def pushkey(self, namespace, key, old, new):
2267 try:
2263 try:
2268 tr = self.currenttransaction()
2264 tr = self.currenttransaction()
2269 hookargs = {}
2265 hookargs = {}
2270 if tr is not None:
2266 if tr is not None:
2271 hookargs.update(tr.hookargs)
2267 hookargs.update(tr.hookargs)
2272 hookargs = pycompat.strkwargs(hookargs)
2268 hookargs = pycompat.strkwargs(hookargs)
2273 hookargs[r'namespace'] = namespace
2269 hookargs[r'namespace'] = namespace
2274 hookargs[r'key'] = key
2270 hookargs[r'key'] = key
2275 hookargs[r'old'] = old
2271 hookargs[r'old'] = old
2276 hookargs[r'new'] = new
2272 hookargs[r'new'] = new
2277 self.hook('prepushkey', throw=True, **hookargs)
2273 self.hook('prepushkey', throw=True, **hookargs)
2278 except error.HookAbort as exc:
2274 except error.HookAbort as exc:
2279 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2275 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2280 if exc.hint:
2276 if exc.hint:
2281 self.ui.write_err(_("(%s)\n") % exc.hint)
2277 self.ui.write_err(_("(%s)\n") % exc.hint)
2282 return False
2278 return False
2283 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2279 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2284 ret = pushkey.push(self, namespace, key, old, new)
2280 ret = pushkey.push(self, namespace, key, old, new)
2285 def runhook():
2281 def runhook():
2286 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2282 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2287 ret=ret)
2283 ret=ret)
2288 self._afterlock(runhook)
2284 self._afterlock(runhook)
2289 return ret
2285 return ret
2290
2286
2291 def listkeys(self, namespace):
2287 def listkeys(self, namespace):
2292 self.hook('prelistkeys', throw=True, namespace=namespace)
2288 self.hook('prelistkeys', throw=True, namespace=namespace)
2293 self.ui.debug('listing keys for "%s"\n' % namespace)
2289 self.ui.debug('listing keys for "%s"\n' % namespace)
2294 values = pushkey.list(self, namespace)
2290 values = pushkey.list(self, namespace)
2295 self.hook('listkeys', namespace=namespace, values=values)
2291 self.hook('listkeys', namespace=namespace, values=values)
2296 return values
2292 return values
2297
2293
2298 def debugwireargs(self, one, two, three=None, four=None, five=None):
2294 def debugwireargs(self, one, two, three=None, four=None, five=None):
2299 '''used to test argument passing over the wire'''
2295 '''used to test argument passing over the wire'''
2300 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2296 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2301 pycompat.bytestr(four),
2297 pycompat.bytestr(four),
2302 pycompat.bytestr(five))
2298 pycompat.bytestr(five))
2303
2299
2304 def savecommitmessage(self, text):
2300 def savecommitmessage(self, text):
2305 fp = self.vfs('last-message.txt', 'wb')
2301 fp = self.vfs('last-message.txt', 'wb')
2306 try:
2302 try:
2307 fp.write(text)
2303 fp.write(text)
2308 finally:
2304 finally:
2309 fp.close()
2305 fp.close()
2310 return self.pathto(fp.name[len(self.root) + 1:])
2306 return self.pathto(fp.name[len(self.root) + 1:])
2311
2307
2312 # used to avoid circular references so destructors work
2308 # used to avoid circular references so destructors work
2313 def aftertrans(files):
2309 def aftertrans(files):
2314 renamefiles = [tuple(t) for t in files]
2310 renamefiles = [tuple(t) for t in files]
2315 def a():
2311 def a():
2316 for vfs, src, dest in renamefiles:
2312 for vfs, src, dest in renamefiles:
2317 # if src and dest refer to a same file, vfs.rename is a no-op,
2313 # if src and dest refer to a same file, vfs.rename is a no-op,
2318 # leaving both src and dest on disk. delete dest to make sure
2314 # leaving both src and dest on disk. delete dest to make sure
2319 # the rename couldn't be such a no-op.
2315 # the rename couldn't be such a no-op.
2320 vfs.tryunlink(dest)
2316 vfs.tryunlink(dest)
2321 try:
2317 try:
2322 vfs.rename(src, dest)
2318 vfs.rename(src, dest)
2323 except OSError: # journal file does not yet exist
2319 except OSError: # journal file does not yet exist
2324 pass
2320 pass
2325 return a
2321 return a
2326
2322
2327 def undoname(fn):
2323 def undoname(fn):
2328 base, name = os.path.split(fn)
2324 base, name = os.path.split(fn)
2329 assert name.startswith('journal')
2325 assert name.startswith('journal')
2330 return os.path.join(base, name.replace('journal', 'undo', 1))
2326 return os.path.join(base, name.replace('journal', 'undo', 1))
2331
2327
2332 def instance(ui, path, create, intents=None):
2328 def instance(ui, path, create, intents=None):
2333 return localrepository(ui, util.urllocalpath(path), create,
2329 return localrepository(ui, util.urllocalpath(path), create,
2334 intents=intents)
2330 intents=intents)
2335
2331
2336 def islocal(path):
2332 def islocal(path):
2337 return True
2333 return True
2338
2334
2339 def newreporequirements(repo):
2335 def newreporequirements(repo):
2340 """Determine the set of requirements for a new local repository.
2336 """Determine the set of requirements for a new local repository.
2341
2337
2342 Extensions can wrap this function to specify custom requirements for
2338 Extensions can wrap this function to specify custom requirements for
2343 new repositories.
2339 new repositories.
2344 """
2340 """
2345 ui = repo.ui
2341 ui = repo.ui
2346 requirements = {'revlogv1'}
2342 requirements = {'revlogv1'}
2347 if ui.configbool('format', 'usestore'):
2343 if ui.configbool('format', 'usestore'):
2348 requirements.add('store')
2344 requirements.add('store')
2349 if ui.configbool('format', 'usefncache'):
2345 if ui.configbool('format', 'usefncache'):
2350 requirements.add('fncache')
2346 requirements.add('fncache')
2351 if ui.configbool('format', 'dotencode'):
2347 if ui.configbool('format', 'dotencode'):
2352 requirements.add('dotencode')
2348 requirements.add('dotencode')
2353
2349
2354 compengine = ui.config('experimental', 'format.compression')
2350 compengine = ui.config('experimental', 'format.compression')
2355 if compengine not in util.compengines:
2351 if compengine not in util.compengines:
2356 raise error.Abort(_('compression engine %s defined by '
2352 raise error.Abort(_('compression engine %s defined by '
2357 'experimental.format.compression not available') %
2353 'experimental.format.compression not available') %
2358 compengine,
2354 compengine,
2359 hint=_('run "hg debuginstall" to list available '
2355 hint=_('run "hg debuginstall" to list available '
2360 'compression engines'))
2356 'compression engines'))
2361
2357
2362 # zlib is the historical default and doesn't need an explicit requirement.
2358 # zlib is the historical default and doesn't need an explicit requirement.
2363 if compengine != 'zlib':
2359 if compengine != 'zlib':
2364 requirements.add('exp-compression-%s' % compengine)
2360 requirements.add('exp-compression-%s' % compengine)
2365
2361
2366 if scmutil.gdinitconfig(ui):
2362 if scmutil.gdinitconfig(ui):
2367 requirements.add('generaldelta')
2363 requirements.add('generaldelta')
2368 if ui.configbool('experimental', 'treemanifest'):
2364 if ui.configbool('experimental', 'treemanifest'):
2369 requirements.add('treemanifest')
2365 requirements.add('treemanifest')
2370
2366
2371 revlogv2 = ui.config('experimental', 'revlogv2')
2367 revlogv2 = ui.config('experimental', 'revlogv2')
2372 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2368 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2373 requirements.remove('revlogv1')
2369 requirements.remove('revlogv1')
2374 # generaldelta is implied by revlogv2.
2370 # generaldelta is implied by revlogv2.
2375 requirements.discard('generaldelta')
2371 requirements.discard('generaldelta')
2376 requirements.add(REVLOGV2_REQUIREMENT)
2372 requirements.add(REVLOGV2_REQUIREMENT)
2377
2373
2378 return requirements
2374 return requirements
General Comments 0
You need to be logged in to leave comments. Login now