##// END OF EJS Templates
bookmarks: move property methods into localrepo
Matt Mackall -
r13355:cce2e7b7 default
parent child Browse files
Show More
@@ -1,431 +1,423
1 1 # Mercurial extension to provide the 'hg bookmark' command
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''track a line of development with movable markers
9 9
10 10 Bookmarks are local movable markers to changesets. Every bookmark
11 11 points to a changeset identified by its hash. If you commit a
12 12 changeset that is based on a changeset that has a bookmark on it, the
13 13 bookmark shifts to the new changeset.
14 14
15 15 It is possible to use bookmark names in every revision lookup (e.g.
16 16 :hg:`merge`, :hg:`update`).
17 17
18 18 By default, when several bookmarks point to the same changeset, they
19 19 will all move forward together. It is possible to obtain a more
20 20 git-like experience by adding the following configuration option to
21 21 your configuration file::
22 22
23 23 [bookmarks]
24 24 track.current = True
25 25
26 26 This will cause Mercurial to track the bookmark that you are currently
27 27 using, and only update it. This is similar to git's approach to
28 28 branching.
29 29 '''
30 30
31 31 from mercurial.i18n import _
32 32 from mercurial.node import nullid, nullrev, bin, hex, short
33 33 from mercurial import util, commands, repair, extensions, pushkey, hg, url
34 34 from mercurial import revset, encoding
35 35 from mercurial import bookmarks
36 36 import os
37 37
38 38 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
39 39 '''track a line of development with movable markers
40 40
41 41 Bookmarks are pointers to certain commits that move when
42 42 committing. Bookmarks are local. They can be renamed, copied and
43 43 deleted. It is possible to use bookmark names in :hg:`merge` and
44 44 :hg:`update` to merge and update respectively to a given bookmark.
45 45
46 46 You can use :hg:`bookmark NAME` to set a bookmark on the working
47 47 directory's parent revision with the given name. If you specify
48 48 a revision using -r REV (where REV may be an existing bookmark),
49 49 the bookmark is assigned to that revision.
50 50
51 51 Bookmarks can be pushed and pulled between repositories (see :hg:`help
52 52 push` and :hg:`help pull`). This requires the bookmark extension to be
53 53 enabled for both the local and remote repositories.
54 54 '''
55 55 hexfn = ui.debugflag and hex or short
56 56 marks = repo._bookmarks
57 57 cur = repo.changectx('.').node()
58 58
59 59 if rename:
60 60 if rename not in marks:
61 61 raise util.Abort(_("a bookmark of this name does not exist"))
62 62 if mark in marks and not force:
63 63 raise util.Abort(_("a bookmark of the same name already exists"))
64 64 if mark is None:
65 65 raise util.Abort(_("new bookmark name required"))
66 66 marks[mark] = marks[rename]
67 67 del marks[rename]
68 68 if repo._bookmarkcurrent == rename:
69 69 bookmarks.setcurrent(repo, mark)
70 70 bookmarks.write(repo)
71 71 return
72 72
73 73 if delete:
74 74 if mark is None:
75 75 raise util.Abort(_("bookmark name required"))
76 76 if mark not in marks:
77 77 raise util.Abort(_("a bookmark of this name does not exist"))
78 78 if mark == repo._bookmarkcurrent:
79 79 bookmarks.setcurrent(repo, None)
80 80 del marks[mark]
81 81 bookmarks.write(repo)
82 82 return
83 83
84 84 if mark is not None:
85 85 if "\n" in mark:
86 86 raise util.Abort(_("bookmark name cannot contain newlines"))
87 87 mark = mark.strip()
88 88 if not mark:
89 89 raise util.Abort(_("bookmark names cannot consist entirely of "
90 90 "whitespace"))
91 91 if mark in marks and not force:
92 92 raise util.Abort(_("a bookmark of the same name already exists"))
93 93 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
94 94 and not force):
95 95 raise util.Abort(
96 96 _("a bookmark cannot have the name of an existing branch"))
97 97 if rev:
98 98 marks[mark] = repo.lookup(rev)
99 99 else:
100 100 marks[mark] = repo.changectx('.').node()
101 101 bookmarks.setcurrent(repo, mark)
102 102 bookmarks.write(repo)
103 103 return
104 104
105 105 if mark is None:
106 106 if rev:
107 107 raise util.Abort(_("bookmark name required"))
108 108 if len(marks) == 0:
109 109 ui.status(_("no bookmarks set\n"))
110 110 else:
111 111 for bmark, n in marks.iteritems():
112 112 if ui.configbool('bookmarks', 'track.current'):
113 113 current = repo._bookmarkcurrent
114 114 if bmark == current and n == cur:
115 115 prefix, label = '*', 'bookmarks.current'
116 116 else:
117 117 prefix, label = ' ', ''
118 118 else:
119 119 if n == cur:
120 120 prefix, label = '*', 'bookmarks.current'
121 121 else:
122 122 prefix, label = ' ', ''
123 123
124 124 if ui.quiet:
125 125 ui.write("%s\n" % bmark, label=label)
126 126 else:
127 127 ui.write(" %s %-25s %d:%s\n" % (
128 128 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
129 129 label=label)
130 130 return
131 131
132 132 def _revstostrip(changelog, node):
133 133 srev = changelog.rev(node)
134 134 tostrip = [srev]
135 135 saveheads = []
136 136 for r in xrange(srev, len(changelog)):
137 137 parents = changelog.parentrevs(r)
138 138 if parents[0] in tostrip or parents[1] in tostrip:
139 139 tostrip.append(r)
140 140 if parents[1] != nullrev:
141 141 for p in parents:
142 142 if p not in tostrip and p > srev:
143 143 saveheads.append(p)
144 144 return [r for r in tostrip if r not in saveheads]
145 145
146 146 def strip(oldstrip, ui, repo, node, backup="all"):
147 147 """Strip bookmarks if revisions are stripped using
148 148 the mercurial.strip method. This usually happens during
149 149 qpush and qpop"""
150 150 revisions = _revstostrip(repo.changelog, node)
151 151 marks = repo._bookmarks
152 152 update = []
153 153 for mark, n in marks.iteritems():
154 154 if repo.changelog.rev(n) in revisions:
155 155 update.append(mark)
156 156 oldstrip(ui, repo, node, backup)
157 157 if len(update) > 0:
158 158 for m in update:
159 159 marks[m] = repo.changectx('.').node()
160 160 bookmarks.write(repo)
161 161
162 162 def reposetup(ui, repo):
163 163 if not repo.local():
164 164 return
165 165
166 166 class bookmark_repo(repo.__class__):
167 @util.propertycache
168 def _bookmarks(self):
169 return bookmarks.read(self)
170
171 @util.propertycache
172 def _bookmarkcurrent(self):
173 return bookmarks.readcurrent(self)
174
175 167 def rollback(self, dryrun=False):
176 168 if os.path.exists(self.join('undo.bookmarks')):
177 169 if not dryrun:
178 170 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
179 171 elif not os.path.exists(self.sjoin("undo")):
180 172 # avoid "no rollback information available" message
181 173 return 0
182 174 return super(bookmark_repo, self).rollback(dryrun)
183 175
184 176 def lookup(self, key):
185 177 if key in self._bookmarks:
186 178 key = self._bookmarks[key]
187 179 return super(bookmark_repo, self).lookup(key)
188 180
189 181 def commitctx(self, ctx, error=False):
190 182 """Add a revision to the repository and
191 183 move the bookmark"""
192 184 wlock = self.wlock() # do both commit and bookmark with lock held
193 185 try:
194 186 node = super(bookmark_repo, self).commitctx(ctx, error)
195 187 if node is None:
196 188 return None
197 189 parents = self.changelog.parents(node)
198 190 if parents[1] == nullid:
199 191 parents = (parents[0],)
200 192
201 193 bookmarks.update(self, parents, node)
202 194 return node
203 195 finally:
204 196 wlock.release()
205 197
206 198 def pull(self, remote, heads=None, force=False):
207 199 result = super(bookmark_repo, self).pull(remote, heads, force)
208 200
209 201 self.ui.debug("checking for updated bookmarks\n")
210 202 rb = remote.listkeys('bookmarks')
211 203 changed = False
212 204 for k in rb.keys():
213 205 if k in self._bookmarks:
214 206 nr, nl = rb[k], self._bookmarks[k]
215 207 if nr in self:
216 208 cr = self[nr]
217 209 cl = self[nl]
218 210 if cl.rev() >= cr.rev():
219 211 continue
220 212 if cr in cl.descendants():
221 213 self._bookmarks[k] = cr.node()
222 214 changed = True
223 215 self.ui.status(_("updating bookmark %s\n") % k)
224 216 else:
225 217 self.ui.warn(_("not updating divergent"
226 218 " bookmark %s\n") % k)
227 219 if changed:
228 220 bookmarks.write(repo)
229 221
230 222 return result
231 223
232 224 def push(self, remote, force=False, revs=None, newbranch=False):
233 225 result = super(bookmark_repo, self).push(remote, force, revs,
234 226 newbranch)
235 227
236 228 self.ui.debug("checking for updated bookmarks\n")
237 229 rb = remote.listkeys('bookmarks')
238 230 for k in rb.keys():
239 231 if k in self._bookmarks:
240 232 nr, nl = rb[k], hex(self._bookmarks[k])
241 233 if nr in self:
242 234 cr = self[nr]
243 235 cl = self[nl]
244 236 if cl in cr.descendants():
245 237 r = remote.pushkey('bookmarks', k, nr, nl)
246 238 if r:
247 239 self.ui.status(_("updating bookmark %s\n") % k)
248 240 else:
249 241 self.ui.warn(_('updating bookmark %s'
250 242 ' failed!\n') % k)
251 243
252 244 return result
253 245
254 246 def addchangegroup(self, *args, **kwargs):
255 247 result = super(bookmark_repo, self).addchangegroup(*args, **kwargs)
256 248 if result > 1:
257 249 # We have more heads than before
258 250 return result
259 251 node = self.changelog.tip()
260 252 parents = self.dirstate.parents()
261 253 bookmarks.update(self, parents, node)
262 254 return result
263 255
264 256 def _findtags(self):
265 257 """Merge bookmarks with normal tags"""
266 258 (tags, tagtypes) = super(bookmark_repo, self)._findtags()
267 259 tags.update(self._bookmarks)
268 260 return (tags, tagtypes)
269 261
270 262 if hasattr(repo, 'invalidate'):
271 263 def invalidate(self):
272 264 super(bookmark_repo, self).invalidate()
273 265 for attr in ('_bookmarks', '_bookmarkcurrent'):
274 266 if attr in self.__dict__:
275 267 delattr(self, attr)
276 268
277 269 repo.__class__ = bookmark_repo
278 270
279 271 def pull(oldpull, ui, repo, source="default", **opts):
280 272 # translate bookmark args to rev args for actual pull
281 273 if opts.get('bookmark'):
282 274 # this is an unpleasant hack as pull will do this internally
283 275 source, branches = hg.parseurl(ui.expandpath(source),
284 276 opts.get('branch'))
285 277 other = hg.repository(hg.remoteui(repo, opts), source)
286 278 rb = other.listkeys('bookmarks')
287 279
288 280 for b in opts['bookmark']:
289 281 if b not in rb:
290 282 raise util.Abort(_('remote bookmark %s not found!') % b)
291 283 opts.setdefault('rev', []).append(b)
292 284
293 285 result = oldpull(ui, repo, source, **opts)
294 286
295 287 # update specified bookmarks
296 288 if opts.get('bookmark'):
297 289 for b in opts['bookmark']:
298 290 # explicit pull overrides local bookmark if any
299 291 ui.status(_("importing bookmark %s\n") % b)
300 292 repo._bookmarks[b] = repo[rb[b]].node()
301 293 bookmarks.write(repo)
302 294
303 295 return result
304 296
305 297 def push(oldpush, ui, repo, dest=None, **opts):
306 298 dopush = True
307 299 if opts.get('bookmark'):
308 300 dopush = False
309 301 for b in opts['bookmark']:
310 302 if b in repo._bookmarks:
311 303 dopush = True
312 304 opts.setdefault('rev', []).append(b)
313 305
314 306 result = 0
315 307 if dopush:
316 308 result = oldpush(ui, repo, dest, **opts)
317 309
318 310 if opts.get('bookmark'):
319 311 # this is an unpleasant hack as push will do this internally
320 312 dest = ui.expandpath(dest or 'default-push', dest or 'default')
321 313 dest, branches = hg.parseurl(dest, opts.get('branch'))
322 314 other = hg.repository(hg.remoteui(repo, opts), dest)
323 315 rb = other.listkeys('bookmarks')
324 316 for b in opts['bookmark']:
325 317 # explicit push overrides remote bookmark if any
326 318 if b in repo._bookmarks:
327 319 ui.status(_("exporting bookmark %s\n") % b)
328 320 new = repo[b].hex()
329 321 elif b in rb:
330 322 ui.status(_("deleting remote bookmark %s\n") % b)
331 323 new = '' # delete
332 324 else:
333 325 ui.warn(_('bookmark %s does not exist on the local '
334 326 'or remote repository!\n') % b)
335 327 return 2
336 328 old = rb.get(b, '')
337 329 r = other.pushkey('bookmarks', b, old, new)
338 330 if not r:
339 331 ui.warn(_('updating bookmark %s failed!\n') % b)
340 332 if not result:
341 333 result = 2
342 334
343 335 return result
344 336
345 337 def incoming(oldincoming, ui, repo, source="default", **opts):
346 338 if opts.get('bookmarks'):
347 339 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
348 340 other = hg.repository(hg.remoteui(repo, opts), source)
349 341 ui.status(_('comparing with %s\n') % url.hidepassword(source))
350 342 return bookmarks.diff(ui, repo, other)
351 343 else:
352 344 return oldincoming(ui, repo, source, **opts)
353 345
354 346 def outgoing(oldoutgoing, ui, repo, dest=None, **opts):
355 347 if opts.get('bookmarks'):
356 348 dest = ui.expandpath(dest or 'default-push', dest or 'default')
357 349 dest, branches = hg.parseurl(dest, opts.get('branch'))
358 350 other = hg.repository(hg.remoteui(repo, opts), dest)
359 351 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
360 352 return bookmarks.diff(ui, other, repo)
361 353 else:
362 354 return oldoutgoing(ui, repo, dest, **opts)
363 355
364 356 def uisetup(ui):
365 357 extensions.wrapfunction(repair, "strip", strip)
366 358 if ui.configbool('bookmarks', 'track.current'):
367 359 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
368 360
369 361 entry = extensions.wrapcommand(commands.table, 'pull', pull)
370 362 entry[1].append(('B', 'bookmark', [],
371 363 _("bookmark to import"),
372 364 _('BOOKMARK')))
373 365 entry = extensions.wrapcommand(commands.table, 'push', push)
374 366 entry[1].append(('B', 'bookmark', [],
375 367 _("bookmark to export"),
376 368 _('BOOKMARK')))
377 369 entry = extensions.wrapcommand(commands.table, 'incoming', incoming)
378 370 entry[1].append(('B', 'bookmarks', False,
379 371 _("compare bookmark")))
380 372 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
381 373 entry[1].append(('B', 'bookmarks', False,
382 374 _("compare bookmark")))
383 375
384 376 def updatecurbookmark(orig, ui, repo, *args, **opts):
385 377 '''Set the current bookmark
386 378
387 379 If the user updates to a bookmark we update the .hg/bookmarks.current
388 380 file.
389 381 '''
390 382 res = orig(ui, repo, *args, **opts)
391 383 rev = opts['rev']
392 384 if not rev and len(args) > 0:
393 385 rev = args[0]
394 386 bookmarks.setcurrent(repo, rev)
395 387 return res
396 388
397 389 def bmrevset(repo, subset, x):
398 390 """``bookmark([name])``
399 391 The named bookmark or all bookmarks.
400 392 """
401 393 # i18n: "bookmark" is a keyword
402 394 args = revset.getargs(x, 0, 1, _('bookmark takes one or no arguments'))
403 395 if args:
404 396 bm = revset.getstring(args[0],
405 397 # i18n: "bookmark" is a keyword
406 398 _('the argument to bookmark must be a string'))
407 399 bmrev = bookmarks.listbookmarks(repo).get(bm, None)
408 400 if bmrev:
409 401 bmrev = repo.changelog.rev(bin(bmrev))
410 402 return [r for r in subset if r == bmrev]
411 403 bms = set([repo.changelog.rev(bin(r))
412 404 for r in bookmarks.listbookmarks(repo).values()])
413 405 return [r for r in subset if r in bms]
414 406
415 407 def extsetup(ui):
416 408 revset.symbols['bookmark'] = bmrevset
417 409
418 410 cmdtable = {
419 411 "bookmarks":
420 412 (bookmark,
421 413 [('f', 'force', False, _('force')),
422 414 ('r', 'rev', '', _('revision'), _('REV')),
423 415 ('d', 'delete', False, _('delete a given bookmark')),
424 416 ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))],
425 417 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
426 418 }
427 419
428 420 colortable = {'bookmarks.current': 'green'}
429 421
430 422 # tell hggettext to extract docstrings from these functions:
431 423 i18nfunctions = [bmrevset]
@@ -1,1945 +1,1952
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 24 supportedformats = set(('revlogv1', 'parentdelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=0):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = util.path_auditor(self.root, self._checknested)
34 34 self.opener = util.opener(self.path)
35 35 self.wopener = util.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 os.mkdir(self.path)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener("00changelog.i", "a").write(
60 60 '\0\0\0\2' # represents revlogv2
61 61 ' dummy changelog to prevent using the old repo layout'
62 62 )
63 63 if self.ui.configbool('format', 'parentdelta', False):
64 64 requirements.append("parentdelta")
65 65 else:
66 66 raise error.RepoError(_("repository %s not found") % path)
67 67 elif create:
68 68 raise error.RepoError(_("repository %s already exists") % path)
69 69 else:
70 70 # find requirements
71 71 requirements = set()
72 72 try:
73 73 requirements = set(self.opener("requires").read().splitlines())
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 for r in requirements - self.supported:
78 78 raise error.RepoError(_("requirement '%s' not supported") % r)
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener("sharedpath").read())
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 def _applyrequirements(self, requirements):
116 116 self.requirements = requirements
117 117 self.sopener.options = {}
118 118 if 'parentdelta' in requirements:
119 119 self.sopener.options['parentdelta'] = 1
120 120
121 121 def _writerequirements(self):
122 122 reqfile = self.opener("requires", "w")
123 123 for r in self.requirements:
124 124 reqfile.write("%s\n" % r)
125 125 reqfile.close()
126 126
127 127 def _checknested(self, path):
128 128 """Determine if path is a legal nested repository."""
129 129 if not path.startswith(self.root):
130 130 return False
131 131 subpath = path[len(self.root) + 1:]
132 132
133 133 # XXX: Checking against the current working copy is wrong in
134 134 # the sense that it can reject things like
135 135 #
136 136 # $ hg cat -r 10 sub/x.txt
137 137 #
138 138 # if sub/ is no longer a subrepository in the working copy
139 139 # parent revision.
140 140 #
141 141 # However, it can of course also allow things that would have
142 142 # been rejected before, such as the above cat command if sub/
143 143 # is a subrepository now, but was a normal directory before.
144 144 # The old path auditor would have rejected by mistake since it
145 145 # panics when it sees sub/.hg/.
146 146 #
147 147 # All in all, checking against the working copy seems sensible
148 148 # since we want to prevent access to nested repositories on
149 149 # the filesystem *now*.
150 150 ctx = self[None]
151 151 parts = util.splitpath(subpath)
152 152 while parts:
153 153 prefix = os.sep.join(parts)
154 154 if prefix in ctx.substate:
155 155 if prefix == subpath:
156 156 return True
157 157 else:
158 158 sub = ctx.sub(prefix)
159 159 return sub.checknested(subpath[len(prefix) + 1:])
160 160 else:
161 161 parts.pop()
162 162 return False
163 163
164 @util.propertycache
165 def _bookmarks(self):
166 return bookmarks.read(self)
167
168 @util.propertycache
169 def _bookmarkcurrent(self):
170 return bookmarks.readcurrent(self)
164 171
165 172 @propertycache
166 173 def changelog(self):
167 174 c = changelog.changelog(self.sopener)
168 175 if 'HG_PENDING' in os.environ:
169 176 p = os.environ['HG_PENDING']
170 177 if p.startswith(self.root):
171 178 c.readpending('00changelog.i.a')
172 179 self.sopener.options['defversion'] = c.version
173 180 return c
174 181
175 182 @propertycache
176 183 def manifest(self):
177 184 return manifest.manifest(self.sopener)
178 185
179 186 @propertycache
180 187 def dirstate(self):
181 188 warned = [0]
182 189 def validate(node):
183 190 try:
184 191 r = self.changelog.rev(node)
185 192 return node
186 193 except error.LookupError:
187 194 if not warned[0]:
188 195 warned[0] = True
189 196 self.ui.warn(_("warning: ignoring unknown"
190 197 " working parent %s!\n") % short(node))
191 198 return nullid
192 199
193 200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
194 201
195 202 def __getitem__(self, changeid):
196 203 if changeid is None:
197 204 return context.workingctx(self)
198 205 return context.changectx(self, changeid)
199 206
200 207 def __contains__(self, changeid):
201 208 try:
202 209 return bool(self.lookup(changeid))
203 210 except error.RepoLookupError:
204 211 return False
205 212
206 213 def __nonzero__(self):
207 214 return True
208 215
209 216 def __len__(self):
210 217 return len(self.changelog)
211 218
212 219 def __iter__(self):
213 220 for i in xrange(len(self)):
214 221 yield i
215 222
216 223 def url(self):
217 224 return 'file:' + self.root
218 225
219 226 def hook(self, name, throw=False, **args):
220 227 return hook.hook(self.ui, self, name, throw, **args)
221 228
222 229 tag_disallowed = ':\r\n'
223 230
224 231 def _tag(self, names, node, message, local, user, date, extra={}):
225 232 if isinstance(names, str):
226 233 allchars = names
227 234 names = (names,)
228 235 else:
229 236 allchars = ''.join(names)
230 237 for c in self.tag_disallowed:
231 238 if c in allchars:
232 239 raise util.Abort(_('%r cannot be used in a tag name') % c)
233 240
234 241 branches = self.branchmap()
235 242 for name in names:
236 243 self.hook('pretag', throw=True, node=hex(node), tag=name,
237 244 local=local)
238 245 if name in branches:
239 246 self.ui.warn(_("warning: tag %s conflicts with existing"
240 247 " branch name\n") % name)
241 248
242 249 def writetags(fp, names, munge, prevtags):
243 250 fp.seek(0, 2)
244 251 if prevtags and prevtags[-1] != '\n':
245 252 fp.write('\n')
246 253 for name in names:
247 254 m = munge and munge(name) or name
248 255 if self._tagtypes and name in self._tagtypes:
249 256 old = self._tags.get(name, nullid)
250 257 fp.write('%s %s\n' % (hex(old), m))
251 258 fp.write('%s %s\n' % (hex(node), m))
252 259 fp.close()
253 260
254 261 prevtags = ''
255 262 if local:
256 263 try:
257 264 fp = self.opener('localtags', 'r+')
258 265 except IOError:
259 266 fp = self.opener('localtags', 'a')
260 267 else:
261 268 prevtags = fp.read()
262 269
263 270 # local tags are stored in the current charset
264 271 writetags(fp, names, None, prevtags)
265 272 for name in names:
266 273 self.hook('tag', node=hex(node), tag=name, local=local)
267 274 return
268 275
269 276 try:
270 277 fp = self.wfile('.hgtags', 'rb+')
271 278 except IOError:
272 279 fp = self.wfile('.hgtags', 'ab')
273 280 else:
274 281 prevtags = fp.read()
275 282
276 283 # committed tags are stored in UTF-8
277 284 writetags(fp, names, encoding.fromlocal, prevtags)
278 285
279 286 if '.hgtags' not in self.dirstate:
280 287 self[None].add(['.hgtags'])
281 288
282 289 m = matchmod.exact(self.root, '', ['.hgtags'])
283 290 tagnode = self.commit(message, user, date, extra=extra, match=m)
284 291
285 292 for name in names:
286 293 self.hook('tag', node=hex(node), tag=name, local=local)
287 294
288 295 return tagnode
289 296
290 297 def tag(self, names, node, message, local, user, date):
291 298 '''tag a revision with one or more symbolic names.
292 299
293 300 names is a list of strings or, when adding a single tag, names may be a
294 301 string.
295 302
296 303 if local is True, the tags are stored in a per-repository file.
297 304 otherwise, they are stored in the .hgtags file, and a new
298 305 changeset is committed with the change.
299 306
300 307 keyword arguments:
301 308
302 309 local: whether to store tags in non-version-controlled file
303 310 (default False)
304 311
305 312 message: commit message to use if committing
306 313
307 314 user: name of user to use if committing
308 315
309 316 date: date tuple to use if committing'''
310 317
311 318 if not local:
312 319 for x in self.status()[:5]:
313 320 if '.hgtags' in x:
314 321 raise util.Abort(_('working copy of .hgtags is changed '
315 322 '(please commit .hgtags manually)'))
316 323
317 324 self.tags() # instantiate the cache
318 325 self._tag(names, node, message, local, user, date)
319 326
320 327 def tags(self):
321 328 '''return a mapping of tag to node'''
322 329 if self._tags is None:
323 330 (self._tags, self._tagtypes) = self._findtags()
324 331
325 332 return self._tags
326 333
327 334 def _findtags(self):
328 335 '''Do the hard work of finding tags. Return a pair of dicts
329 336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
330 337 maps tag name to a string like \'global\' or \'local\'.
331 338 Subclasses or extensions are free to add their own tags, but
332 339 should be aware that the returned dicts will be retained for the
333 340 duration of the localrepo object.'''
334 341
335 342 # XXX what tagtype should subclasses/extensions use? Currently
336 343 # mq and bookmarks add tags, but do not set the tagtype at all.
337 344 # Should each extension invent its own tag type? Should there
338 345 # be one tagtype for all such "virtual" tags? Or is the status
339 346 # quo fine?
340 347
341 348 alltags = {} # map tag name to (node, hist)
342 349 tagtypes = {}
343 350
344 351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
345 352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
346 353
347 354 # Build the return dicts. Have to re-encode tag names because
348 355 # the tags module always uses UTF-8 (in order not to lose info
349 356 # writing to the cache), but the rest of Mercurial wants them in
350 357 # local encoding.
351 358 tags = {}
352 359 for (name, (node, hist)) in alltags.iteritems():
353 360 if node != nullid:
354 361 tags[encoding.tolocal(name)] = node
355 362 tags['tip'] = self.changelog.tip()
356 363 tagtypes = dict([(encoding.tolocal(name), value)
357 364 for (name, value) in tagtypes.iteritems()])
358 365 return (tags, tagtypes)
359 366
360 367 def tagtype(self, tagname):
361 368 '''
362 369 return the type of the given tag. result can be:
363 370
364 371 'local' : a local tag
365 372 'global' : a global tag
366 373 None : tag does not exist
367 374 '''
368 375
369 376 self.tags()
370 377
371 378 return self._tagtypes.get(tagname)
372 379
373 380 def tagslist(self):
374 381 '''return a list of tags ordered by revision'''
375 382 l = []
376 383 for t, n in self.tags().iteritems():
377 384 try:
378 385 r = self.changelog.rev(n)
379 386 except:
380 387 r = -2 # sort to the beginning of the list if unknown
381 388 l.append((r, t, n))
382 389 return [(t, n) for r, t, n in sorted(l)]
383 390
384 391 def nodetags(self, node):
385 392 '''return the tags associated with a node'''
386 393 if not self.nodetagscache:
387 394 self.nodetagscache = {}
388 395 for t, n in self.tags().iteritems():
389 396 self.nodetagscache.setdefault(n, []).append(t)
390 397 for tags in self.nodetagscache.itervalues():
391 398 tags.sort()
392 399 return self.nodetagscache.get(node, [])
393 400
394 401 def _branchtags(self, partial, lrev):
395 402 # TODO: rename this function?
396 403 tiprev = len(self) - 1
397 404 if lrev != tiprev:
398 405 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
399 406 self._updatebranchcache(partial, ctxgen)
400 407 self._writebranchcache(partial, self.changelog.tip(), tiprev)
401 408
402 409 return partial
403 410
404 411 def updatebranchcache(self):
405 412 tip = self.changelog.tip()
406 413 if self._branchcache is not None and self._branchcachetip == tip:
407 414 return self._branchcache
408 415
409 416 oldtip = self._branchcachetip
410 417 self._branchcachetip = tip
411 418 if oldtip is None or oldtip not in self.changelog.nodemap:
412 419 partial, last, lrev = self._readbranchcache()
413 420 else:
414 421 lrev = self.changelog.rev(oldtip)
415 422 partial = self._branchcache
416 423
417 424 self._branchtags(partial, lrev)
418 425 # this private cache holds all heads (not just tips)
419 426 self._branchcache = partial
420 427
421 428 def branchmap(self):
422 429 '''returns a dictionary {branch: [branchheads]}'''
423 430 self.updatebranchcache()
424 431 return self._branchcache
425 432
426 433 def branchtags(self):
427 434 '''return a dict where branch names map to the tipmost head of
428 435 the branch, open heads come before closed'''
429 436 bt = {}
430 437 for bn, heads in self.branchmap().iteritems():
431 438 tip = heads[-1]
432 439 for h in reversed(heads):
433 440 if 'close' not in self.changelog.read(h)[5]:
434 441 tip = h
435 442 break
436 443 bt[bn] = tip
437 444 return bt
438 445
439 446 def _readbranchcache(self):
440 447 partial = {}
441 448 try:
442 449 f = self.opener("cache/branchheads")
443 450 lines = f.read().split('\n')
444 451 f.close()
445 452 except (IOError, OSError):
446 453 return {}, nullid, nullrev
447 454
448 455 try:
449 456 last, lrev = lines.pop(0).split(" ", 1)
450 457 last, lrev = bin(last), int(lrev)
451 458 if lrev >= len(self) or self[lrev].node() != last:
452 459 # invalidate the cache
453 460 raise ValueError('invalidating branch cache (tip differs)')
454 461 for l in lines:
455 462 if not l:
456 463 continue
457 464 node, label = l.split(" ", 1)
458 465 label = encoding.tolocal(label.strip())
459 466 partial.setdefault(label, []).append(bin(node))
460 467 except KeyboardInterrupt:
461 468 raise
462 469 except Exception, inst:
463 470 if self.ui.debugflag:
464 471 self.ui.warn(str(inst), '\n')
465 472 partial, last, lrev = {}, nullid, nullrev
466 473 return partial, last, lrev
467 474
468 475 def _writebranchcache(self, branches, tip, tiprev):
469 476 try:
470 477 f = self.opener("cache/branchheads", "w", atomictemp=True)
471 478 f.write("%s %s\n" % (hex(tip), tiprev))
472 479 for label, nodes in branches.iteritems():
473 480 for node in nodes:
474 481 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
475 482 f.rename()
476 483 except (IOError, OSError):
477 484 pass
478 485
479 486 def _updatebranchcache(self, partial, ctxgen):
480 487 # collect new branch entries
481 488 newbranches = {}
482 489 for c in ctxgen:
483 490 newbranches.setdefault(c.branch(), []).append(c.node())
484 491 # if older branchheads are reachable from new ones, they aren't
485 492 # really branchheads. Note checking parents is insufficient:
486 493 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
487 494 for branch, newnodes in newbranches.iteritems():
488 495 bheads = partial.setdefault(branch, [])
489 496 bheads.extend(newnodes)
490 497 if len(bheads) <= 1:
491 498 continue
492 499 # starting from tip means fewer passes over reachable
493 500 while newnodes:
494 501 latest = newnodes.pop()
495 502 if latest not in bheads:
496 503 continue
497 504 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
498 505 reachable = self.changelog.reachable(latest, minbhrev)
499 506 reachable.remove(latest)
500 507 bheads = [b for b in bheads if b not in reachable]
501 508 partial[branch] = bheads
502 509
503 510 def lookup(self, key):
504 511 if isinstance(key, int):
505 512 return self.changelog.node(key)
506 513 elif key == '.':
507 514 return self.dirstate.parents()[0]
508 515 elif key == 'null':
509 516 return nullid
510 517 elif key == 'tip':
511 518 return self.changelog.tip()
512 519 n = self.changelog._match(key)
513 520 if n:
514 521 return n
515 522 if key in self.tags():
516 523 return self.tags()[key]
517 524 if key in self.branchtags():
518 525 return self.branchtags()[key]
519 526 n = self.changelog._partialmatch(key)
520 527 if n:
521 528 return n
522 529
523 530 # can't find key, check if it might have come from damaged dirstate
524 531 if key in self.dirstate.parents():
525 532 raise error.Abort(_("working directory has unknown parent '%s'!")
526 533 % short(key))
527 534 try:
528 535 if len(key) == 20:
529 536 key = hex(key)
530 537 except:
531 538 pass
532 539 raise error.RepoLookupError(_("unknown revision '%s'") % key)
533 540
534 541 def lookupbranch(self, key, remote=None):
535 542 repo = remote or self
536 543 if key in repo.branchmap():
537 544 return key
538 545
539 546 repo = (remote and remote.local()) and remote or self
540 547 return repo[key].branch()
541 548
542 549 def local(self):
543 550 return True
544 551
545 552 def join(self, f):
546 553 return os.path.join(self.path, f)
547 554
548 555 def wjoin(self, f):
549 556 return os.path.join(self.root, f)
550 557
551 558 def file(self, f):
552 559 if f[0] == '/':
553 560 f = f[1:]
554 561 return filelog.filelog(self.sopener, f)
555 562
556 563 def changectx(self, changeid):
557 564 return self[changeid]
558 565
559 566 def parents(self, changeid=None):
560 567 '''get list of changectxs for parents of changeid'''
561 568 return self[changeid].parents()
562 569
563 570 def filectx(self, path, changeid=None, fileid=None):
564 571 """changeid can be a changeset revision, node, or tag.
565 572 fileid can be a file revision or node."""
566 573 return context.filectx(self, path, changeid, fileid)
567 574
568 575 def getcwd(self):
569 576 return self.dirstate.getcwd()
570 577
571 578 def pathto(self, f, cwd=None):
572 579 return self.dirstate.pathto(f, cwd)
573 580
574 581 def wfile(self, f, mode='r'):
575 582 return self.wopener(f, mode)
576 583
577 584 def _link(self, f):
578 585 return os.path.islink(self.wjoin(f))
579 586
580 587 def _loadfilter(self, filter):
581 588 if filter not in self.filterpats:
582 589 l = []
583 590 for pat, cmd in self.ui.configitems(filter):
584 591 if cmd == '!':
585 592 continue
586 593 mf = matchmod.match(self.root, '', [pat])
587 594 fn = None
588 595 params = cmd
589 596 for name, filterfn in self._datafilters.iteritems():
590 597 if cmd.startswith(name):
591 598 fn = filterfn
592 599 params = cmd[len(name):].lstrip()
593 600 break
594 601 if not fn:
595 602 fn = lambda s, c, **kwargs: util.filter(s, c)
596 603 # Wrap old filters not supporting keyword arguments
597 604 if not inspect.getargspec(fn)[2]:
598 605 oldfn = fn
599 606 fn = lambda s, c, **kwargs: oldfn(s, c)
600 607 l.append((mf, fn, params))
601 608 self.filterpats[filter] = l
602 609 return self.filterpats[filter]
603 610
604 611 def _filter(self, filterpats, filename, data):
605 612 for mf, fn, cmd in filterpats:
606 613 if mf(filename):
607 614 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
608 615 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
609 616 break
610 617
611 618 return data
612 619
613 620 @propertycache
614 621 def _encodefilterpats(self):
615 622 return self._loadfilter('encode')
616 623
617 624 @propertycache
618 625 def _decodefilterpats(self):
619 626 return self._loadfilter('decode')
620 627
621 628 def adddatafilter(self, name, filter):
622 629 self._datafilters[name] = filter
623 630
624 631 def wread(self, filename):
625 632 if self._link(filename):
626 633 data = os.readlink(self.wjoin(filename))
627 634 else:
628 635 data = self.wopener(filename, 'r').read()
629 636 return self._filter(self._encodefilterpats, filename, data)
630 637
631 638 def wwrite(self, filename, data, flags):
632 639 data = self._filter(self._decodefilterpats, filename, data)
633 640 if 'l' in flags:
634 641 self.wopener.symlink(data, filename)
635 642 else:
636 643 self.wopener(filename, 'w').write(data)
637 644 if 'x' in flags:
638 645 util.set_flags(self.wjoin(filename), False, True)
639 646
640 647 def wwritedata(self, filename, data):
641 648 return self._filter(self._decodefilterpats, filename, data)
642 649
643 650 def transaction(self, desc):
644 651 tr = self._transref and self._transref() or None
645 652 if tr and tr.running():
646 653 return tr.nest()
647 654
648 655 # abort here if the journal already exists
649 656 if os.path.exists(self.sjoin("journal")):
650 657 raise error.RepoError(
651 658 _("abandoned transaction found - run hg recover"))
652 659
653 660 # save dirstate for rollback
654 661 try:
655 662 ds = self.opener("dirstate").read()
656 663 except IOError:
657 664 ds = ""
658 665 self.opener("journal.dirstate", "w").write(ds)
659 666 self.opener("journal.branch", "w").write(
660 667 encoding.fromlocal(self.dirstate.branch()))
661 668 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
662 669
663 670 renames = [(self.sjoin("journal"), self.sjoin("undo")),
664 671 (self.join("journal.dirstate"), self.join("undo.dirstate")),
665 672 (self.join("journal.branch"), self.join("undo.branch")),
666 673 (self.join("journal.desc"), self.join("undo.desc"))]
667 674 tr = transaction.transaction(self.ui.warn, self.sopener,
668 675 self.sjoin("journal"),
669 676 aftertrans(renames),
670 677 self.store.createmode)
671 678 self._transref = weakref.ref(tr)
672 679 return tr
673 680
674 681 def recover(self):
675 682 lock = self.lock()
676 683 try:
677 684 if os.path.exists(self.sjoin("journal")):
678 685 self.ui.status(_("rolling back interrupted transaction\n"))
679 686 transaction.rollback(self.sopener, self.sjoin("journal"),
680 687 self.ui.warn)
681 688 self.invalidate()
682 689 return True
683 690 else:
684 691 self.ui.warn(_("no interrupted transaction available\n"))
685 692 return False
686 693 finally:
687 694 lock.release()
688 695
689 696 def rollback(self, dryrun=False):
690 697 wlock = lock = None
691 698 try:
692 699 wlock = self.wlock()
693 700 lock = self.lock()
694 701 if os.path.exists(self.sjoin("undo")):
695 702 try:
696 703 args = self.opener("undo.desc", "r").read().splitlines()
697 704 if len(args) >= 3 and self.ui.verbose:
698 705 desc = _("rolling back to revision %s"
699 706 " (undo %s: %s)\n") % (
700 707 int(args[0]) - 1, args[1], args[2])
701 708 elif len(args) >= 2:
702 709 desc = _("rolling back to revision %s (undo %s)\n") % (
703 710 int(args[0]) - 1, args[1])
704 711 except IOError:
705 712 desc = _("rolling back unknown transaction\n")
706 713 self.ui.status(desc)
707 714 if dryrun:
708 715 return
709 716 transaction.rollback(self.sopener, self.sjoin("undo"),
710 717 self.ui.warn)
711 718 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
712 719 try:
713 720 branch = self.opener("undo.branch").read()
714 721 self.dirstate.setbranch(branch)
715 722 except IOError:
716 723 self.ui.warn(_("Named branch could not be reset, "
717 724 "current branch still is: %s\n")
718 725 % self.dirstate.branch())
719 726 self.invalidate()
720 727 self.dirstate.invalidate()
721 728 self.destroyed()
722 729 else:
723 730 self.ui.warn(_("no rollback information available\n"))
724 731 return 1
725 732 finally:
726 733 release(lock, wlock)
727 734
728 735 def invalidatecaches(self):
729 736 self._tags = None
730 737 self._tagtypes = None
731 738 self.nodetagscache = None
732 739 self._branchcache = None # in UTF-8
733 740 self._branchcachetip = None
734 741
735 742 def invalidate(self):
736 743 for a in ("changelog", "manifest"):
737 744 if a in self.__dict__:
738 745 delattr(self, a)
739 746 self.invalidatecaches()
740 747
741 748 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
742 749 try:
743 750 l = lock.lock(lockname, 0, releasefn, desc=desc)
744 751 except error.LockHeld, inst:
745 752 if not wait:
746 753 raise
747 754 self.ui.warn(_("waiting for lock on %s held by %r\n") %
748 755 (desc, inst.locker))
749 756 # default to 600 seconds timeout
750 757 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
751 758 releasefn, desc=desc)
752 759 if acquirefn:
753 760 acquirefn()
754 761 return l
755 762
756 763 def lock(self, wait=True):
757 764 '''Lock the repository store (.hg/store) and return a weak reference
758 765 to the lock. Use this before modifying the store (e.g. committing or
759 766 stripping). If you are opening a transaction, get a lock as well.)'''
760 767 l = self._lockref and self._lockref()
761 768 if l is not None and l.held:
762 769 l.lock()
763 770 return l
764 771
765 772 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
766 773 _('repository %s') % self.origroot)
767 774 self._lockref = weakref.ref(l)
768 775 return l
769 776
770 777 def wlock(self, wait=True):
771 778 '''Lock the non-store parts of the repository (everything under
772 779 .hg except .hg/store) and return a weak reference to the lock.
773 780 Use this before modifying files in .hg.'''
774 781 l = self._wlockref and self._wlockref()
775 782 if l is not None and l.held:
776 783 l.lock()
777 784 return l
778 785
779 786 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
780 787 self.dirstate.invalidate, _('working directory of %s') %
781 788 self.origroot)
782 789 self._wlockref = weakref.ref(l)
783 790 return l
784 791
785 792 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
786 793 """
787 794 commit an individual file as part of a larger transaction
788 795 """
789 796
790 797 fname = fctx.path()
791 798 text = fctx.data()
792 799 flog = self.file(fname)
793 800 fparent1 = manifest1.get(fname, nullid)
794 801 fparent2 = fparent2o = manifest2.get(fname, nullid)
795 802
796 803 meta = {}
797 804 copy = fctx.renamed()
798 805 if copy and copy[0] != fname:
799 806 # Mark the new revision of this file as a copy of another
800 807 # file. This copy data will effectively act as a parent
801 808 # of this new revision. If this is a merge, the first
802 809 # parent will be the nullid (meaning "look up the copy data")
803 810 # and the second one will be the other parent. For example:
804 811 #
805 812 # 0 --- 1 --- 3 rev1 changes file foo
806 813 # \ / rev2 renames foo to bar and changes it
807 814 # \- 2 -/ rev3 should have bar with all changes and
808 815 # should record that bar descends from
809 816 # bar in rev2 and foo in rev1
810 817 #
811 818 # this allows this merge to succeed:
812 819 #
813 820 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
814 821 # \ / merging rev3 and rev4 should use bar@rev2
815 822 # \- 2 --- 4 as the merge base
816 823 #
817 824
818 825 cfname = copy[0]
819 826 crev = manifest1.get(cfname)
820 827 newfparent = fparent2
821 828
822 829 if manifest2: # branch merge
823 830 if fparent2 == nullid or crev is None: # copied on remote side
824 831 if cfname in manifest2:
825 832 crev = manifest2[cfname]
826 833 newfparent = fparent1
827 834
828 835 # find source in nearest ancestor if we've lost track
829 836 if not crev:
830 837 self.ui.debug(" %s: searching for copy revision for %s\n" %
831 838 (fname, cfname))
832 839 for ancestor in self[None].ancestors():
833 840 if cfname in ancestor:
834 841 crev = ancestor[cfname].filenode()
835 842 break
836 843
837 844 if crev:
838 845 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
839 846 meta["copy"] = cfname
840 847 meta["copyrev"] = hex(crev)
841 848 fparent1, fparent2 = nullid, newfparent
842 849 else:
843 850 self.ui.warn(_("warning: can't find ancestor for '%s' "
844 851 "copied from '%s'!\n") % (fname, cfname))
845 852
846 853 elif fparent2 != nullid:
847 854 # is one parent an ancestor of the other?
848 855 fparentancestor = flog.ancestor(fparent1, fparent2)
849 856 if fparentancestor == fparent1:
850 857 fparent1, fparent2 = fparent2, nullid
851 858 elif fparentancestor == fparent2:
852 859 fparent2 = nullid
853 860
854 861 # is the file changed?
855 862 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
856 863 changelist.append(fname)
857 864 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
858 865
859 866 # are just the flags changed during merge?
860 867 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
861 868 changelist.append(fname)
862 869
863 870 return fparent1
864 871
865 872 def commit(self, text="", user=None, date=None, match=None, force=False,
866 873 editor=False, extra={}):
867 874 """Add a new revision to current repository.
868 875
869 876 Revision information is gathered from the working directory,
870 877 match can be used to filter the committed files. If editor is
871 878 supplied, it is called to get a commit message.
872 879 """
873 880
874 881 def fail(f, msg):
875 882 raise util.Abort('%s: %s' % (f, msg))
876 883
877 884 if not match:
878 885 match = matchmod.always(self.root, '')
879 886
880 887 if not force:
881 888 vdirs = []
882 889 match.dir = vdirs.append
883 890 match.bad = fail
884 891
885 892 wlock = self.wlock()
886 893 try:
887 894 wctx = self[None]
888 895 merge = len(wctx.parents()) > 1
889 896
890 897 if (not force and merge and match and
891 898 (match.files() or match.anypats())):
892 899 raise util.Abort(_('cannot partially commit a merge '
893 900 '(do not specify files or patterns)'))
894 901
895 902 changes = self.status(match=match, clean=force)
896 903 if force:
897 904 changes[0].extend(changes[6]) # mq may commit unchanged files
898 905
899 906 # check subrepos
900 907 subs = []
901 908 removedsubs = set()
902 909 for p in wctx.parents():
903 910 removedsubs.update(s for s in p.substate if match(s))
904 911 for s in wctx.substate:
905 912 removedsubs.discard(s)
906 913 if match(s) and wctx.sub(s).dirty():
907 914 subs.append(s)
908 915 if (subs or removedsubs):
909 916 if (not match('.hgsub') and
910 917 '.hgsub' in (wctx.modified() + wctx.added())):
911 918 raise util.Abort(_("can't commit subrepos without .hgsub"))
912 919 if '.hgsubstate' not in changes[0]:
913 920 changes[0].insert(0, '.hgsubstate')
914 921
915 922 # make sure all explicit patterns are matched
916 923 if not force and match.files():
917 924 matched = set(changes[0] + changes[1] + changes[2])
918 925
919 926 for f in match.files():
920 927 if f == '.' or f in matched or f in wctx.substate:
921 928 continue
922 929 if f in changes[3]: # missing
923 930 fail(f, _('file not found!'))
924 931 if f in vdirs: # visited directory
925 932 d = f + '/'
926 933 for mf in matched:
927 934 if mf.startswith(d):
928 935 break
929 936 else:
930 937 fail(f, _("no match under directory!"))
931 938 elif f not in self.dirstate:
932 939 fail(f, _("file not tracked!"))
933 940
934 941 if (not force and not extra.get("close") and not merge
935 942 and not (changes[0] or changes[1] or changes[2])
936 943 and wctx.branch() == wctx.p1().branch()):
937 944 return None
938 945
939 946 ms = mergemod.mergestate(self)
940 947 for f in changes[0]:
941 948 if f in ms and ms[f] == 'u':
942 949 raise util.Abort(_("unresolved merge conflicts "
943 950 "(see hg resolve)"))
944 951
945 952 cctx = context.workingctx(self, text, user, date, extra, changes)
946 953 if editor:
947 954 cctx._text = editor(self, cctx, subs)
948 955 edited = (text != cctx._text)
949 956
950 957 # commit subs
951 958 if subs or removedsubs:
952 959 state = wctx.substate.copy()
953 960 for s in sorted(subs):
954 961 sub = wctx.sub(s)
955 962 self.ui.status(_('committing subrepository %s\n') %
956 963 subrepo.subrelpath(sub))
957 964 sr = sub.commit(cctx._text, user, date)
958 965 state[s] = (state[s][0], sr)
959 966 subrepo.writestate(self, state)
960 967
961 968 # Save commit message in case this transaction gets rolled back
962 969 # (e.g. by a pretxncommit hook). Leave the content alone on
963 970 # the assumption that the user will use the same editor again.
964 971 msgfile = self.opener('last-message.txt', 'wb')
965 972 msgfile.write(cctx._text)
966 973 msgfile.close()
967 974
968 975 p1, p2 = self.dirstate.parents()
969 976 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
970 977 try:
971 978 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
972 979 ret = self.commitctx(cctx, True)
973 980 except:
974 981 if edited:
975 982 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
976 983 self.ui.write(
977 984 _('note: commit message saved in %s\n') % msgfn)
978 985 raise
979 986
980 987 # update dirstate and mergestate
981 988 for f in changes[0] + changes[1]:
982 989 self.dirstate.normal(f)
983 990 for f in changes[2]:
984 991 self.dirstate.forget(f)
985 992 self.dirstate.setparents(ret)
986 993 ms.reset()
987 994 finally:
988 995 wlock.release()
989 996
990 997 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
991 998 return ret
992 999
993 1000 def commitctx(self, ctx, error=False):
994 1001 """Add a new revision to current repository.
995 1002 Revision information is passed via the context argument.
996 1003 """
997 1004
998 1005 tr = lock = None
999 1006 removed = list(ctx.removed())
1000 1007 p1, p2 = ctx.p1(), ctx.p2()
1001 1008 m1 = p1.manifest().copy()
1002 1009 m2 = p2.manifest()
1003 1010 user = ctx.user()
1004 1011
1005 1012 lock = self.lock()
1006 1013 try:
1007 1014 tr = self.transaction("commit")
1008 1015 trp = weakref.proxy(tr)
1009 1016
1010 1017 # check in files
1011 1018 new = {}
1012 1019 changed = []
1013 1020 linkrev = len(self)
1014 1021 for f in sorted(ctx.modified() + ctx.added()):
1015 1022 self.ui.note(f + "\n")
1016 1023 try:
1017 1024 fctx = ctx[f]
1018 1025 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1019 1026 changed)
1020 1027 m1.set(f, fctx.flags())
1021 1028 except OSError, inst:
1022 1029 self.ui.warn(_("trouble committing %s!\n") % f)
1023 1030 raise
1024 1031 except IOError, inst:
1025 1032 errcode = getattr(inst, 'errno', errno.ENOENT)
1026 1033 if error or errcode and errcode != errno.ENOENT:
1027 1034 self.ui.warn(_("trouble committing %s!\n") % f)
1028 1035 raise
1029 1036 else:
1030 1037 removed.append(f)
1031 1038
1032 1039 # update manifest
1033 1040 m1.update(new)
1034 1041 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1035 1042 drop = [f for f in removed if f in m1]
1036 1043 for f in drop:
1037 1044 del m1[f]
1038 1045 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1039 1046 p2.manifestnode(), (new, drop))
1040 1047
1041 1048 # update changelog
1042 1049 self.changelog.delayupdate()
1043 1050 n = self.changelog.add(mn, changed + removed, ctx.description(),
1044 1051 trp, p1.node(), p2.node(),
1045 1052 user, ctx.date(), ctx.extra().copy())
1046 1053 p = lambda: self.changelog.writepending() and self.root or ""
1047 1054 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1048 1055 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1049 1056 parent2=xp2, pending=p)
1050 1057 self.changelog.finalize(trp)
1051 1058 tr.close()
1052 1059
1053 1060 if self._branchcache:
1054 1061 self.updatebranchcache()
1055 1062 return n
1056 1063 finally:
1057 1064 if tr:
1058 1065 tr.release()
1059 1066 lock.release()
1060 1067
1061 1068 def destroyed(self):
1062 1069 '''Inform the repository that nodes have been destroyed.
1063 1070 Intended for use by strip and rollback, so there's a common
1064 1071 place for anything that has to be done after destroying history.'''
1065 1072 # XXX it might be nice if we could take the list of destroyed
1066 1073 # nodes, but I don't see an easy way for rollback() to do that
1067 1074
1068 1075 # Ensure the persistent tag cache is updated. Doing it now
1069 1076 # means that the tag cache only has to worry about destroyed
1070 1077 # heads immediately after a strip/rollback. That in turn
1071 1078 # guarantees that "cachetip == currenttip" (comparing both rev
1072 1079 # and node) always means no nodes have been added or destroyed.
1073 1080
1074 1081 # XXX this is suboptimal when qrefresh'ing: we strip the current
1075 1082 # head, refresh the tag cache, then immediately add a new head.
1076 1083 # But I think doing it this way is necessary for the "instant
1077 1084 # tag cache retrieval" case to work.
1078 1085 self.invalidatecaches()
1079 1086
1080 1087 def walk(self, match, node=None):
1081 1088 '''
1082 1089 walk recursively through the directory tree or a given
1083 1090 changeset, finding all files matched by the match
1084 1091 function
1085 1092 '''
1086 1093 return self[node].walk(match)
1087 1094
1088 1095 def status(self, node1='.', node2=None, match=None,
1089 1096 ignored=False, clean=False, unknown=False,
1090 1097 listsubrepos=False):
1091 1098 """return status of files between two nodes or node and working directory
1092 1099
1093 1100 If node1 is None, use the first dirstate parent instead.
1094 1101 If node2 is None, compare node1 with working directory.
1095 1102 """
1096 1103
1097 1104 def mfmatches(ctx):
1098 1105 mf = ctx.manifest().copy()
1099 1106 for fn in mf.keys():
1100 1107 if not match(fn):
1101 1108 del mf[fn]
1102 1109 return mf
1103 1110
1104 1111 if isinstance(node1, context.changectx):
1105 1112 ctx1 = node1
1106 1113 else:
1107 1114 ctx1 = self[node1]
1108 1115 if isinstance(node2, context.changectx):
1109 1116 ctx2 = node2
1110 1117 else:
1111 1118 ctx2 = self[node2]
1112 1119
1113 1120 working = ctx2.rev() is None
1114 1121 parentworking = working and ctx1 == self['.']
1115 1122 match = match or matchmod.always(self.root, self.getcwd())
1116 1123 listignored, listclean, listunknown = ignored, clean, unknown
1117 1124
1118 1125 # load earliest manifest first for caching reasons
1119 1126 if not working and ctx2.rev() < ctx1.rev():
1120 1127 ctx2.manifest()
1121 1128
1122 1129 if not parentworking:
1123 1130 def bad(f, msg):
1124 1131 if f not in ctx1:
1125 1132 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1126 1133 match.bad = bad
1127 1134
1128 1135 if working: # we need to scan the working dir
1129 1136 subrepos = []
1130 1137 if '.hgsub' in self.dirstate:
1131 1138 subrepos = ctx1.substate.keys()
1132 1139 s = self.dirstate.status(match, subrepos, listignored,
1133 1140 listclean, listunknown)
1134 1141 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1135 1142
1136 1143 # check for any possibly clean files
1137 1144 if parentworking and cmp:
1138 1145 fixup = []
1139 1146 # do a full compare of any files that might have changed
1140 1147 for f in sorted(cmp):
1141 1148 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1142 1149 or ctx1[f].cmp(ctx2[f])):
1143 1150 modified.append(f)
1144 1151 else:
1145 1152 fixup.append(f)
1146 1153
1147 1154 # update dirstate for files that are actually clean
1148 1155 if fixup:
1149 1156 if listclean:
1150 1157 clean += fixup
1151 1158
1152 1159 try:
1153 1160 # updating the dirstate is optional
1154 1161 # so we don't wait on the lock
1155 1162 wlock = self.wlock(False)
1156 1163 try:
1157 1164 for f in fixup:
1158 1165 self.dirstate.normal(f)
1159 1166 finally:
1160 1167 wlock.release()
1161 1168 except error.LockError:
1162 1169 pass
1163 1170
1164 1171 if not parentworking:
1165 1172 mf1 = mfmatches(ctx1)
1166 1173 if working:
1167 1174 # we are comparing working dir against non-parent
1168 1175 # generate a pseudo-manifest for the working dir
1169 1176 mf2 = mfmatches(self['.'])
1170 1177 for f in cmp + modified + added:
1171 1178 mf2[f] = None
1172 1179 mf2.set(f, ctx2.flags(f))
1173 1180 for f in removed:
1174 1181 if f in mf2:
1175 1182 del mf2[f]
1176 1183 else:
1177 1184 # we are comparing two revisions
1178 1185 deleted, unknown, ignored = [], [], []
1179 1186 mf2 = mfmatches(ctx2)
1180 1187
1181 1188 modified, added, clean = [], [], []
1182 1189 for fn in mf2:
1183 1190 if fn in mf1:
1184 1191 if (mf1.flags(fn) != mf2.flags(fn) or
1185 1192 (mf1[fn] != mf2[fn] and
1186 1193 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1187 1194 modified.append(fn)
1188 1195 elif listclean:
1189 1196 clean.append(fn)
1190 1197 del mf1[fn]
1191 1198 else:
1192 1199 added.append(fn)
1193 1200 removed = mf1.keys()
1194 1201
1195 1202 r = modified, added, removed, deleted, unknown, ignored, clean
1196 1203
1197 1204 if listsubrepos:
1198 1205 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1199 1206 if working:
1200 1207 rev2 = None
1201 1208 else:
1202 1209 rev2 = ctx2.substate[subpath][1]
1203 1210 try:
1204 1211 submatch = matchmod.narrowmatcher(subpath, match)
1205 1212 s = sub.status(rev2, match=submatch, ignored=listignored,
1206 1213 clean=listclean, unknown=listunknown,
1207 1214 listsubrepos=True)
1208 1215 for rfiles, sfiles in zip(r, s):
1209 1216 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1210 1217 except error.LookupError:
1211 1218 self.ui.status(_("skipping missing subrepository: %s\n")
1212 1219 % subpath)
1213 1220
1214 1221 [l.sort() for l in r]
1215 1222 return r
1216 1223
1217 1224 def heads(self, start=None):
1218 1225 heads = self.changelog.heads(start)
1219 1226 # sort the output in rev descending order
1220 1227 return sorted(heads, key=self.changelog.rev, reverse=True)
1221 1228
1222 1229 def branchheads(self, branch=None, start=None, closed=False):
1223 1230 '''return a (possibly filtered) list of heads for the given branch
1224 1231
1225 1232 Heads are returned in topological order, from newest to oldest.
1226 1233 If branch is None, use the dirstate branch.
1227 1234 If start is not None, return only heads reachable from start.
1228 1235 If closed is True, return heads that are marked as closed as well.
1229 1236 '''
1230 1237 if branch is None:
1231 1238 branch = self[None].branch()
1232 1239 branches = self.branchmap()
1233 1240 if branch not in branches:
1234 1241 return []
1235 1242 # the cache returns heads ordered lowest to highest
1236 1243 bheads = list(reversed(branches[branch]))
1237 1244 if start is not None:
1238 1245 # filter out the heads that cannot be reached from startrev
1239 1246 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1240 1247 bheads = [h for h in bheads if h in fbheads]
1241 1248 if not closed:
1242 1249 bheads = [h for h in bheads if
1243 1250 ('close' not in self.changelog.read(h)[5])]
1244 1251 return bheads
1245 1252
1246 1253 def branches(self, nodes):
1247 1254 if not nodes:
1248 1255 nodes = [self.changelog.tip()]
1249 1256 b = []
1250 1257 for n in nodes:
1251 1258 t = n
1252 1259 while 1:
1253 1260 p = self.changelog.parents(n)
1254 1261 if p[1] != nullid or p[0] == nullid:
1255 1262 b.append((t, n, p[0], p[1]))
1256 1263 break
1257 1264 n = p[0]
1258 1265 return b
1259 1266
1260 1267 def between(self, pairs):
1261 1268 r = []
1262 1269
1263 1270 for top, bottom in pairs:
1264 1271 n, l, i = top, [], 0
1265 1272 f = 1
1266 1273
1267 1274 while n != bottom and n != nullid:
1268 1275 p = self.changelog.parents(n)[0]
1269 1276 if i == f:
1270 1277 l.append(n)
1271 1278 f = f * 2
1272 1279 n = p
1273 1280 i += 1
1274 1281
1275 1282 r.append(l)
1276 1283
1277 1284 return r
1278 1285
1279 1286 def pull(self, remote, heads=None, force=False):
1280 1287 lock = self.lock()
1281 1288 try:
1282 1289 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1283 1290 force=force)
1284 1291 common, fetch, rheads = tmp
1285 1292 if not fetch:
1286 1293 self.ui.status(_("no changes found\n"))
1287 1294 return 0
1288 1295
1289 1296 if heads is None and fetch == [nullid]:
1290 1297 self.ui.status(_("requesting all changes\n"))
1291 1298 elif heads is None and remote.capable('changegroupsubset'):
1292 1299 # issue1320, avoid a race if remote changed after discovery
1293 1300 heads = rheads
1294 1301
1295 1302 if heads is None:
1296 1303 cg = remote.changegroup(fetch, 'pull')
1297 1304 else:
1298 1305 if not remote.capable('changegroupsubset'):
1299 1306 raise util.Abort(_("partial pull cannot be done because "
1300 1307 "other repository doesn't support "
1301 1308 "changegroupsubset."))
1302 1309 cg = remote.changegroupsubset(fetch, heads, 'pull')
1303 1310 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1304 1311 finally:
1305 1312 lock.release()
1306 1313
1307 1314 def checkpush(self, force, revs):
1308 1315 """Extensions can override this function if additional checks have
1309 1316 to be performed before pushing, or call it if they override push
1310 1317 command.
1311 1318 """
1312 1319 pass
1313 1320
1314 1321 def push(self, remote, force=False, revs=None, newbranch=False):
1315 1322 '''Push outgoing changesets (limited by revs) from the current
1316 1323 repository to remote. Return an integer:
1317 1324 - 0 means HTTP error *or* nothing to push
1318 1325 - 1 means we pushed and remote head count is unchanged *or*
1319 1326 we have outgoing changesets but refused to push
1320 1327 - other values as described by addchangegroup()
1321 1328 '''
1322 1329 # there are two ways to push to remote repo:
1323 1330 #
1324 1331 # addchangegroup assumes local user can lock remote
1325 1332 # repo (local filesystem, old ssh servers).
1326 1333 #
1327 1334 # unbundle assumes local user cannot lock remote repo (new ssh
1328 1335 # servers, http servers).
1329 1336
1330 1337 self.checkpush(force, revs)
1331 1338 lock = None
1332 1339 unbundle = remote.capable('unbundle')
1333 1340 if not unbundle:
1334 1341 lock = remote.lock()
1335 1342 try:
1336 1343 ret = discovery.prepush(self, remote, force, revs, newbranch)
1337 1344 if ret[0] is None:
1338 1345 # and here we return 0 for "nothing to push" or 1 for
1339 1346 # "something to push but I refuse"
1340 1347 return ret[1]
1341 1348
1342 1349 cg, remote_heads = ret
1343 1350 if unbundle:
1344 1351 # local repo finds heads on server, finds out what revs it must
1345 1352 # push. once revs transferred, if server finds it has
1346 1353 # different heads (someone else won commit/push race), server
1347 1354 # aborts.
1348 1355 if force:
1349 1356 remote_heads = ['force']
1350 1357 # ssh: return remote's addchangegroup()
1351 1358 # http: return remote's addchangegroup() or 0 for error
1352 1359 return remote.unbundle(cg, remote_heads, 'push')
1353 1360 else:
1354 1361 # we return an integer indicating remote head count change
1355 1362 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1356 1363 finally:
1357 1364 if lock is not None:
1358 1365 lock.release()
1359 1366
1360 1367 def changegroupinfo(self, nodes, source):
1361 1368 if self.ui.verbose or source == 'bundle':
1362 1369 self.ui.status(_("%d changesets found\n") % len(nodes))
1363 1370 if self.ui.debugflag:
1364 1371 self.ui.debug("list of changesets:\n")
1365 1372 for node in nodes:
1366 1373 self.ui.debug("%s\n" % hex(node))
1367 1374
1368 1375 def changegroupsubset(self, bases, heads, source, extranodes=None):
1369 1376 """Compute a changegroup consisting of all the nodes that are
1370 1377 descendents of any of the bases and ancestors of any of the heads.
1371 1378 Return a chunkbuffer object whose read() method will return
1372 1379 successive changegroup chunks.
1373 1380
1374 1381 It is fairly complex as determining which filenodes and which
1375 1382 manifest nodes need to be included for the changeset to be complete
1376 1383 is non-trivial.
1377 1384
1378 1385 Another wrinkle is doing the reverse, figuring out which changeset in
1379 1386 the changegroup a particular filenode or manifestnode belongs to.
1380 1387
1381 1388 The caller can specify some nodes that must be included in the
1382 1389 changegroup using the extranodes argument. It should be a dict
1383 1390 where the keys are the filenames (or 1 for the manifest), and the
1384 1391 values are lists of (node, linknode) tuples, where node is a wanted
1385 1392 node and linknode is the changelog node that should be transmitted as
1386 1393 the linkrev.
1387 1394 """
1388 1395
1389 1396 # Set up some initial variables
1390 1397 # Make it easy to refer to self.changelog
1391 1398 cl = self.changelog
1392 1399 # Compute the list of changesets in this changegroup.
1393 1400 # Some bases may turn out to be superfluous, and some heads may be
1394 1401 # too. nodesbetween will return the minimal set of bases and heads
1395 1402 # necessary to re-create the changegroup.
1396 1403 if not bases:
1397 1404 bases = [nullid]
1398 1405 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1399 1406
1400 1407 if extranodes is None:
1401 1408 # can we go through the fast path ?
1402 1409 heads.sort()
1403 1410 allheads = self.heads()
1404 1411 allheads.sort()
1405 1412 if heads == allheads:
1406 1413 return self._changegroup(msng_cl_lst, source)
1407 1414
1408 1415 # slow path
1409 1416 self.hook('preoutgoing', throw=True, source=source)
1410 1417
1411 1418 self.changegroupinfo(msng_cl_lst, source)
1412 1419
1413 1420 # We assume that all ancestors of bases are known
1414 1421 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1415 1422
1416 1423 # Make it easy to refer to self.manifest
1417 1424 mnfst = self.manifest
1418 1425 # We don't know which manifests are missing yet
1419 1426 msng_mnfst_set = {}
1420 1427 # Nor do we know which filenodes are missing.
1421 1428 msng_filenode_set = {}
1422 1429
1423 1430 # A changeset always belongs to itself, so the changenode lookup
1424 1431 # function for a changenode is identity.
1425 1432 def identity(x):
1426 1433 return x
1427 1434
1428 1435 # A function generating function that sets up the initial environment
1429 1436 # the inner function.
1430 1437 def filenode_collector(changedfiles):
1431 1438 # This gathers information from each manifestnode included in the
1432 1439 # changegroup about which filenodes the manifest node references
1433 1440 # so we can include those in the changegroup too.
1434 1441 #
1435 1442 # It also remembers which changenode each filenode belongs to. It
1436 1443 # does this by assuming the a filenode belongs to the changenode
1437 1444 # the first manifest that references it belongs to.
1438 1445 def collect_msng_filenodes(mnfstnode):
1439 1446 r = mnfst.rev(mnfstnode)
1440 1447 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1441 1448 # If the previous rev is one of the parents,
1442 1449 # we only need to see a diff.
1443 1450 deltamf = mnfst.readdelta(mnfstnode)
1444 1451 # For each line in the delta
1445 1452 for f, fnode in deltamf.iteritems():
1446 1453 # And if the file is in the list of files we care
1447 1454 # about.
1448 1455 if f in changedfiles:
1449 1456 # Get the changenode this manifest belongs to
1450 1457 clnode = msng_mnfst_set[mnfstnode]
1451 1458 # Create the set of filenodes for the file if
1452 1459 # there isn't one already.
1453 1460 ndset = msng_filenode_set.setdefault(f, {})
1454 1461 # And set the filenode's changelog node to the
1455 1462 # manifest's if it hasn't been set already.
1456 1463 ndset.setdefault(fnode, clnode)
1457 1464 else:
1458 1465 # Otherwise we need a full manifest.
1459 1466 m = mnfst.read(mnfstnode)
1460 1467 # For every file in we care about.
1461 1468 for f in changedfiles:
1462 1469 fnode = m.get(f, None)
1463 1470 # If it's in the manifest
1464 1471 if fnode is not None:
1465 1472 # See comments above.
1466 1473 clnode = msng_mnfst_set[mnfstnode]
1467 1474 ndset = msng_filenode_set.setdefault(f, {})
1468 1475 ndset.setdefault(fnode, clnode)
1469 1476 return collect_msng_filenodes
1470 1477
1471 1478 # If we determine that a particular file or manifest node must be a
1472 1479 # node that the recipient of the changegroup will already have, we can
1473 1480 # also assume the recipient will have all the parents. This function
1474 1481 # prunes them from the set of missing nodes.
1475 1482 def prune(revlog, missingnodes):
1476 1483 hasset = set()
1477 1484 # If a 'missing' filenode thinks it belongs to a changenode we
1478 1485 # assume the recipient must have, then the recipient must have
1479 1486 # that filenode.
1480 1487 for n in missingnodes:
1481 1488 clrev = revlog.linkrev(revlog.rev(n))
1482 1489 if clrev in commonrevs:
1483 1490 hasset.add(n)
1484 1491 for n in hasset:
1485 1492 missingnodes.pop(n, None)
1486 1493 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1487 1494 missingnodes.pop(revlog.node(r), None)
1488 1495
1489 1496 # Add the nodes that were explicitly requested.
1490 1497 def add_extra_nodes(name, nodes):
1491 1498 if not extranodes or name not in extranodes:
1492 1499 return
1493 1500
1494 1501 for node, linknode in extranodes[name]:
1495 1502 if node not in nodes:
1496 1503 nodes[node] = linknode
1497 1504
1498 1505 # Now that we have all theses utility functions to help out and
1499 1506 # logically divide up the task, generate the group.
1500 1507 def gengroup():
1501 1508 # The set of changed files starts empty.
1502 1509 changedfiles = set()
1503 1510 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1504 1511
1505 1512 # Create a changenode group generator that will call our functions
1506 1513 # back to lookup the owning changenode and collect information.
1507 1514 group = cl.group(msng_cl_lst, identity, collect)
1508 1515 for cnt, chnk in enumerate(group):
1509 1516 yield chnk
1510 1517 # revlog.group yields three entries per node, so
1511 1518 # dividing by 3 gives an approximation of how many
1512 1519 # nodes have been processed.
1513 1520 self.ui.progress(_('bundling'), cnt / 3,
1514 1521 unit=_('changesets'))
1515 1522 changecount = cnt / 3
1516 1523 self.ui.progress(_('bundling'), None)
1517 1524
1518 1525 prune(mnfst, msng_mnfst_set)
1519 1526 add_extra_nodes(1, msng_mnfst_set)
1520 1527 msng_mnfst_lst = msng_mnfst_set.keys()
1521 1528 # Sort the manifestnodes by revision number.
1522 1529 msng_mnfst_lst.sort(key=mnfst.rev)
1523 1530 # Create a generator for the manifestnodes that calls our lookup
1524 1531 # and data collection functions back.
1525 1532 group = mnfst.group(msng_mnfst_lst,
1526 1533 lambda mnode: msng_mnfst_set[mnode],
1527 1534 filenode_collector(changedfiles))
1528 1535 efiles = {}
1529 1536 for cnt, chnk in enumerate(group):
1530 1537 if cnt % 3 == 1:
1531 1538 mnode = chnk[:20]
1532 1539 efiles.update(mnfst.readdelta(mnode))
1533 1540 yield chnk
1534 1541 # see above comment for why we divide by 3
1535 1542 self.ui.progress(_('bundling'), cnt / 3,
1536 1543 unit=_('manifests'), total=changecount)
1537 1544 self.ui.progress(_('bundling'), None)
1538 1545 efiles = len(efiles)
1539 1546
1540 1547 # These are no longer needed, dereference and toss the memory for
1541 1548 # them.
1542 1549 msng_mnfst_lst = None
1543 1550 msng_mnfst_set.clear()
1544 1551
1545 1552 if extranodes:
1546 1553 for fname in extranodes:
1547 1554 if isinstance(fname, int):
1548 1555 continue
1549 1556 msng_filenode_set.setdefault(fname, {})
1550 1557 changedfiles.add(fname)
1551 1558 # Go through all our files in order sorted by name.
1552 1559 for idx, fname in enumerate(sorted(changedfiles)):
1553 1560 filerevlog = self.file(fname)
1554 1561 if not len(filerevlog):
1555 1562 raise util.Abort(_("empty or missing revlog for %s") % fname)
1556 1563 # Toss out the filenodes that the recipient isn't really
1557 1564 # missing.
1558 1565 missingfnodes = msng_filenode_set.pop(fname, {})
1559 1566 prune(filerevlog, missingfnodes)
1560 1567 add_extra_nodes(fname, missingfnodes)
1561 1568 # If any filenodes are left, generate the group for them,
1562 1569 # otherwise don't bother.
1563 1570 if missingfnodes:
1564 1571 yield changegroup.chunkheader(len(fname))
1565 1572 yield fname
1566 1573 # Sort the filenodes by their revision # (topological order)
1567 1574 nodeiter = list(missingfnodes)
1568 1575 nodeiter.sort(key=filerevlog.rev)
1569 1576 # Create a group generator and only pass in a changenode
1570 1577 # lookup function as we need to collect no information
1571 1578 # from filenodes.
1572 1579 group = filerevlog.group(nodeiter,
1573 1580 lambda fnode: missingfnodes[fnode])
1574 1581 for chnk in group:
1575 1582 # even though we print the same progress on
1576 1583 # most loop iterations, put the progress call
1577 1584 # here so that time estimates (if any) can be updated
1578 1585 self.ui.progress(
1579 1586 _('bundling'), idx, item=fname,
1580 1587 unit=_('files'), total=efiles)
1581 1588 yield chnk
1582 1589 # Signal that no more groups are left.
1583 1590 yield changegroup.closechunk()
1584 1591 self.ui.progress(_('bundling'), None)
1585 1592
1586 1593 if msng_cl_lst:
1587 1594 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1588 1595
1589 1596 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1590 1597
1591 1598 def changegroup(self, basenodes, source):
1592 1599 # to avoid a race we use changegroupsubset() (issue1320)
1593 1600 return self.changegroupsubset(basenodes, self.heads(), source)
1594 1601
1595 1602 def _changegroup(self, nodes, source):
1596 1603 """Compute the changegroup of all nodes that we have that a recipient
1597 1604 doesn't. Return a chunkbuffer object whose read() method will return
1598 1605 successive changegroup chunks.
1599 1606
1600 1607 This is much easier than the previous function as we can assume that
1601 1608 the recipient has any changenode we aren't sending them.
1602 1609
1603 1610 nodes is the set of nodes to send"""
1604 1611
1605 1612 self.hook('preoutgoing', throw=True, source=source)
1606 1613
1607 1614 cl = self.changelog
1608 1615 revset = set([cl.rev(n) for n in nodes])
1609 1616 self.changegroupinfo(nodes, source)
1610 1617
1611 1618 def identity(x):
1612 1619 return x
1613 1620
1614 1621 def gennodelst(log):
1615 1622 for r in log:
1616 1623 if log.linkrev(r) in revset:
1617 1624 yield log.node(r)
1618 1625
1619 1626 def lookuplinkrev_func(revlog):
1620 1627 def lookuplinkrev(n):
1621 1628 return cl.node(revlog.linkrev(revlog.rev(n)))
1622 1629 return lookuplinkrev
1623 1630
1624 1631 def gengroup():
1625 1632 '''yield a sequence of changegroup chunks (strings)'''
1626 1633 # construct a list of all changed files
1627 1634 changedfiles = set()
1628 1635 mmfs = {}
1629 1636 collect = changegroup.collector(cl, mmfs, changedfiles)
1630 1637
1631 1638 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1632 1639 # revlog.group yields three entries per node, so
1633 1640 # dividing by 3 gives an approximation of how many
1634 1641 # nodes have been processed.
1635 1642 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1636 1643 yield chnk
1637 1644 changecount = cnt / 3
1638 1645 self.ui.progress(_('bundling'), None)
1639 1646
1640 1647 mnfst = self.manifest
1641 1648 nodeiter = gennodelst(mnfst)
1642 1649 efiles = {}
1643 1650 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1644 1651 lookuplinkrev_func(mnfst))):
1645 1652 if cnt % 3 == 1:
1646 1653 mnode = chnk[:20]
1647 1654 efiles.update(mnfst.readdelta(mnode))
1648 1655 # see above comment for why we divide by 3
1649 1656 self.ui.progress(_('bundling'), cnt / 3,
1650 1657 unit=_('manifests'), total=changecount)
1651 1658 yield chnk
1652 1659 efiles = len(efiles)
1653 1660 self.ui.progress(_('bundling'), None)
1654 1661
1655 1662 for idx, fname in enumerate(sorted(changedfiles)):
1656 1663 filerevlog = self.file(fname)
1657 1664 if not len(filerevlog):
1658 1665 raise util.Abort(_("empty or missing revlog for %s") % fname)
1659 1666 nodeiter = gennodelst(filerevlog)
1660 1667 nodeiter = list(nodeiter)
1661 1668 if nodeiter:
1662 1669 yield changegroup.chunkheader(len(fname))
1663 1670 yield fname
1664 1671 lookup = lookuplinkrev_func(filerevlog)
1665 1672 for chnk in filerevlog.group(nodeiter, lookup):
1666 1673 self.ui.progress(
1667 1674 _('bundling'), idx, item=fname,
1668 1675 total=efiles, unit=_('files'))
1669 1676 yield chnk
1670 1677 self.ui.progress(_('bundling'), None)
1671 1678
1672 1679 yield changegroup.closechunk()
1673 1680
1674 1681 if nodes:
1675 1682 self.hook('outgoing', node=hex(nodes[0]), source=source)
1676 1683
1677 1684 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1678 1685
1679 1686 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1680 1687 """Add the changegroup returned by source.read() to this repo.
1681 1688 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1682 1689 the URL of the repo where this changegroup is coming from.
1683 1690 If lock is not None, the function takes ownership of the lock
1684 1691 and releases it after the changegroup is added.
1685 1692
1686 1693 Return an integer summarizing the change to this repo:
1687 1694 - nothing changed or no source: 0
1688 1695 - more heads than before: 1+added heads (2..n)
1689 1696 - fewer heads than before: -1-removed heads (-2..-n)
1690 1697 - number of heads stays the same: 1
1691 1698 """
1692 1699 def csmap(x):
1693 1700 self.ui.debug("add changeset %s\n" % short(x))
1694 1701 return len(cl)
1695 1702
1696 1703 def revmap(x):
1697 1704 return cl.rev(x)
1698 1705
1699 1706 if not source:
1700 1707 return 0
1701 1708
1702 1709 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1703 1710
1704 1711 changesets = files = revisions = 0
1705 1712 efiles = set()
1706 1713
1707 1714 # write changelog data to temp files so concurrent readers will not see
1708 1715 # inconsistent view
1709 1716 cl = self.changelog
1710 1717 cl.delayupdate()
1711 1718 oldheads = len(cl.heads())
1712 1719
1713 1720 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1714 1721 try:
1715 1722 trp = weakref.proxy(tr)
1716 1723 # pull off the changeset group
1717 1724 self.ui.status(_("adding changesets\n"))
1718 1725 clstart = len(cl)
1719 1726 class prog(object):
1720 1727 step = _('changesets')
1721 1728 count = 1
1722 1729 ui = self.ui
1723 1730 total = None
1724 1731 def __call__(self):
1725 1732 self.ui.progress(self.step, self.count, unit=_('chunks'),
1726 1733 total=self.total)
1727 1734 self.count += 1
1728 1735 pr = prog()
1729 1736 source.callback = pr
1730 1737
1731 1738 if (cl.addgroup(source, csmap, trp) is None
1732 1739 and not emptyok):
1733 1740 raise util.Abort(_("received changelog group is empty"))
1734 1741 clend = len(cl)
1735 1742 changesets = clend - clstart
1736 1743 for c in xrange(clstart, clend):
1737 1744 efiles.update(self[c].files())
1738 1745 efiles = len(efiles)
1739 1746 self.ui.progress(_('changesets'), None)
1740 1747
1741 1748 # pull off the manifest group
1742 1749 self.ui.status(_("adding manifests\n"))
1743 1750 pr.step = _('manifests')
1744 1751 pr.count = 1
1745 1752 pr.total = changesets # manifests <= changesets
1746 1753 # no need to check for empty manifest group here:
1747 1754 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1748 1755 # no new manifest will be created and the manifest group will
1749 1756 # be empty during the pull
1750 1757 self.manifest.addgroup(source, revmap, trp)
1751 1758 self.ui.progress(_('manifests'), None)
1752 1759
1753 1760 needfiles = {}
1754 1761 if self.ui.configbool('server', 'validate', default=False):
1755 1762 # validate incoming csets have their manifests
1756 1763 for cset in xrange(clstart, clend):
1757 1764 mfest = self.changelog.read(self.changelog.node(cset))[0]
1758 1765 mfest = self.manifest.readdelta(mfest)
1759 1766 # store file nodes we must see
1760 1767 for f, n in mfest.iteritems():
1761 1768 needfiles.setdefault(f, set()).add(n)
1762 1769
1763 1770 # process the files
1764 1771 self.ui.status(_("adding file changes\n"))
1765 1772 pr.step = 'files'
1766 1773 pr.count = 1
1767 1774 pr.total = efiles
1768 1775 source.callback = None
1769 1776
1770 1777 while 1:
1771 1778 f = source.chunk()
1772 1779 if not f:
1773 1780 break
1774 1781 self.ui.debug("adding %s revisions\n" % f)
1775 1782 pr()
1776 1783 fl = self.file(f)
1777 1784 o = len(fl)
1778 1785 if fl.addgroup(source, revmap, trp) is None:
1779 1786 raise util.Abort(_("received file revlog group is empty"))
1780 1787 revisions += len(fl) - o
1781 1788 files += 1
1782 1789 if f in needfiles:
1783 1790 needs = needfiles[f]
1784 1791 for new in xrange(o, len(fl)):
1785 1792 n = fl.node(new)
1786 1793 if n in needs:
1787 1794 needs.remove(n)
1788 1795 if not needs:
1789 1796 del needfiles[f]
1790 1797 self.ui.progress(_('files'), None)
1791 1798
1792 1799 for f, needs in needfiles.iteritems():
1793 1800 fl = self.file(f)
1794 1801 for n in needs:
1795 1802 try:
1796 1803 fl.rev(n)
1797 1804 except error.LookupError:
1798 1805 raise util.Abort(
1799 1806 _('missing file data for %s:%s - run hg verify') %
1800 1807 (f, hex(n)))
1801 1808
1802 1809 newheads = len(cl.heads())
1803 1810 heads = ""
1804 1811 if oldheads and newheads != oldheads:
1805 1812 heads = _(" (%+d heads)") % (newheads - oldheads)
1806 1813
1807 1814 self.ui.status(_("added %d changesets"
1808 1815 " with %d changes to %d files%s\n")
1809 1816 % (changesets, revisions, files, heads))
1810 1817
1811 1818 if changesets > 0:
1812 1819 p = lambda: cl.writepending() and self.root or ""
1813 1820 self.hook('pretxnchangegroup', throw=True,
1814 1821 node=hex(cl.node(clstart)), source=srctype,
1815 1822 url=url, pending=p)
1816 1823
1817 1824 # make changelog see real files again
1818 1825 cl.finalize(trp)
1819 1826
1820 1827 tr.close()
1821 1828 finally:
1822 1829 tr.release()
1823 1830 if lock:
1824 1831 lock.release()
1825 1832
1826 1833 if changesets > 0:
1827 1834 # forcefully update the on-disk branch cache
1828 1835 self.ui.debug("updating the branch cache\n")
1829 1836 self.updatebranchcache()
1830 1837 self.hook("changegroup", node=hex(cl.node(clstart)),
1831 1838 source=srctype, url=url)
1832 1839
1833 1840 for i in xrange(clstart, clend):
1834 1841 self.hook("incoming", node=hex(cl.node(i)),
1835 1842 source=srctype, url=url)
1836 1843
1837 1844 # never return 0 here:
1838 1845 if newheads < oldheads:
1839 1846 return newheads - oldheads - 1
1840 1847 else:
1841 1848 return newheads - oldheads + 1
1842 1849
1843 1850
1844 1851 def stream_in(self, remote, requirements):
1845 1852 fp = remote.stream_out()
1846 1853 l = fp.readline()
1847 1854 try:
1848 1855 resp = int(l)
1849 1856 except ValueError:
1850 1857 raise error.ResponseError(
1851 1858 _('Unexpected response from remote server:'), l)
1852 1859 if resp == 1:
1853 1860 raise util.Abort(_('operation forbidden by server'))
1854 1861 elif resp == 2:
1855 1862 raise util.Abort(_('locking the remote repository failed'))
1856 1863 elif resp != 0:
1857 1864 raise util.Abort(_('the server sent an unknown error code'))
1858 1865 self.ui.status(_('streaming all changes\n'))
1859 1866 l = fp.readline()
1860 1867 try:
1861 1868 total_files, total_bytes = map(int, l.split(' ', 1))
1862 1869 except (ValueError, TypeError):
1863 1870 raise error.ResponseError(
1864 1871 _('Unexpected response from remote server:'), l)
1865 1872 self.ui.status(_('%d files to transfer, %s of data\n') %
1866 1873 (total_files, util.bytecount(total_bytes)))
1867 1874 start = time.time()
1868 1875 for i in xrange(total_files):
1869 1876 # XXX doesn't support '\n' or '\r' in filenames
1870 1877 l = fp.readline()
1871 1878 try:
1872 1879 name, size = l.split('\0', 1)
1873 1880 size = int(size)
1874 1881 except (ValueError, TypeError):
1875 1882 raise error.ResponseError(
1876 1883 _('Unexpected response from remote server:'), l)
1877 1884 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1878 1885 # for backwards compat, name was partially encoded
1879 1886 ofp = self.sopener(store.decodedir(name), 'w')
1880 1887 for chunk in util.filechunkiter(fp, limit=size):
1881 1888 ofp.write(chunk)
1882 1889 ofp.close()
1883 1890 elapsed = time.time() - start
1884 1891 if elapsed <= 0:
1885 1892 elapsed = 0.001
1886 1893 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1887 1894 (util.bytecount(total_bytes), elapsed,
1888 1895 util.bytecount(total_bytes / elapsed)))
1889 1896
1890 1897 # new requirements = old non-format requirements + new format-related
1891 1898 # requirements from the streamed-in repository
1892 1899 requirements.update(set(self.requirements) - self.supportedformats)
1893 1900 self._applyrequirements(requirements)
1894 1901 self._writerequirements()
1895 1902
1896 1903 self.invalidate()
1897 1904 return len(self.heads()) + 1
1898 1905
1899 1906 def clone(self, remote, heads=[], stream=False):
1900 1907 '''clone remote repository.
1901 1908
1902 1909 keyword arguments:
1903 1910 heads: list of revs to clone (forces use of pull)
1904 1911 stream: use streaming clone if possible'''
1905 1912
1906 1913 # now, all clients that can request uncompressed clones can
1907 1914 # read repo formats supported by all servers that can serve
1908 1915 # them.
1909 1916
1910 1917 # if revlog format changes, client will have to check version
1911 1918 # and format flags on "stream" capability, and use
1912 1919 # uncompressed only if compatible.
1913 1920
1914 1921 if stream and not heads:
1915 1922 # 'stream' means remote revlog format is revlogv1 only
1916 1923 if remote.capable('stream'):
1917 1924 return self.stream_in(remote, set(('revlogv1',)))
1918 1925 # otherwise, 'streamreqs' contains the remote revlog format
1919 1926 streamreqs = remote.capable('streamreqs')
1920 1927 if streamreqs:
1921 1928 streamreqs = set(streamreqs.split(','))
1922 1929 # if we support it, stream in and adjust our requirements
1923 1930 if not streamreqs - self.supportedformats:
1924 1931 return self.stream_in(remote, streamreqs)
1925 1932 return self.pull(remote, heads)
1926 1933
1927 1934 def pushkey(self, namespace, key, old, new):
1928 1935 return pushkey.push(self, namespace, key, old, new)
1929 1936
1930 1937 def listkeys(self, namespace):
1931 1938 return pushkey.list(self, namespace)
1932 1939
1933 1940 # used to avoid circular references so destructors work
1934 1941 def aftertrans(files):
1935 1942 renamefiles = [tuple(t) for t in files]
1936 1943 def a():
1937 1944 for src, dest in renamefiles:
1938 1945 util.rename(src, dest)
1939 1946 return a
1940 1947
1941 1948 def instance(ui, path, create):
1942 1949 return localrepository(ui, util.drop_scheme('file', path), create)
1943 1950
1944 1951 def islocal(path):
1945 1952 return True
General Comments 0
You need to be logged in to leave comments. Login now