##// END OF EJS Templates
branchmap: make update responsible to update the cache key...
Pierre-Yves David -
r18130:1b05ffce default
parent child Browse files
Show More
@@ -1,155 +1,173 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev
9 9 import encoding
10 10
11 11 def read(repo):
12 12 partial = branchcache()
13 13 try:
14 14 f = repo.opener("cache/branchheads")
15 15 lines = f.read().split('\n')
16 16 f.close()
17 17 except (IOError, OSError):
18 18 return branchcache()
19 19
20 20 try:
21 21 last, lrev = lines.pop(0).split(" ", 1)
22 22 last, lrev = bin(last), int(lrev)
23 23 if lrev >= len(repo) or repo[lrev].node() != last:
24 24 # invalidate the cache
25 25 raise ValueError('invalidating branch cache (tip differs)')
26 26 for l in lines:
27 27 if not l:
28 28 continue
29 29 node, label = l.split(" ", 1)
30 30 label = encoding.tolocal(label.strip())
31 31 if not node in repo:
32 32 raise ValueError('invalidating branch cache because node '+
33 33 '%s does not exist' % node)
34 34 partial.setdefault(label, []).append(bin(node))
35 35 partial.tipnode = last
36 36 partial.tiprev = lrev
37 37 except KeyboardInterrupt:
38 38 raise
39 39 except Exception, inst:
40 40 if repo.ui.debugflag:
41 41 repo.ui.warn(str(inst), '\n')
42 42 partial = branchcache()
43 43 return partial
44 44
45 45 def update(repo, partial, ctxgen):
46 46 """Given a branchhead cache, partial, that may have extra nodes or be
47 47 missing heads, and a generator of nodes that are at least a superset of
48 48 heads missing, this function updates partial to be correct.
49 49 """
50 50 cl = repo.changelog
51 51 # collect new branch entries
52 52 newbranches = {}
53 53 for c in ctxgen:
54 54 newbranches.setdefault(c.branch(), []).append(c.node())
55 55 # if older branchheads are reachable from new ones, they aren't
56 56 # really branchheads. Note checking parents is insufficient:
57 57 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
58 58 for branch, newnodes in newbranches.iteritems():
59 59 bheads = partial.setdefault(branch, [])
60 60 # Remove candidate heads that no longer are in the repo (e.g., as
61 61 # the result of a strip that just happened). Avoid using 'node in
62 62 # self' here because that dives down into branchcache code somewhat
63 63 # recursively.
64 64 bheadrevs = [cl.rev(node) for node in bheads
65 65 if cl.hasnode(node)]
66 66 newheadrevs = [cl.rev(node) for node in newnodes
67 67 if cl.hasnode(node)]
68 68 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
69 69 # Remove duplicates - nodes that are in newheadrevs and are already
70 70 # in bheadrevs. This can happen if you strip a node whose parent
71 71 # was already a head (because they're on different branches).
72 72 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
73 73
74 74 # Starting from tip means fewer passes over reachable. If we know
75 75 # the new candidates are not ancestors of existing heads, we don't
76 76 # have to examine ancestors of existing heads
77 77 if ctxisnew:
78 78 iterrevs = sorted(newheadrevs)
79 79 else:
80 80 iterrevs = list(bheadrevs)
81 81
82 82 # This loop prunes out two kinds of heads - heads that are
83 83 # superseded by a head in newheadrevs, and newheadrevs that are not
84 84 # heads because an existing head is their descendant.
85 85 while iterrevs:
86 86 latest = iterrevs.pop()
87 87 if latest not in bheadrevs:
88 88 continue
89 89 ancestors = set(cl.ancestors([latest],
90 90 bheadrevs[0]))
91 91 if ancestors:
92 92 bheadrevs = [b for b in bheadrevs if b not in ancestors]
93 93 partial[branch] = [cl.node(rev) for rev in bheadrevs]
94 tiprev = max(bheadrevs)
95 if tiprev > partial.tiprev:
96 partial.tipnode = cl.node(tiprev)
97 partial.tiprev = tiprev
98
94 99
95 100 # There may be branches that cease to exist when the last commit in the
96 101 # branch was stripped. This code filters them out. Note that the
97 102 # branch that ceased to exist may not be in newbranches because
98 103 # newbranches is the set of candidate heads, which when you strip the
99 104 # last commit in a branch will be the parent branch.
105 droppednodes = []
100 106 for branch in partial.keys():
101 107 nodes = [head for head in partial[branch]
102 108 if cl.hasnode(head)]
103 109 if not nodes:
110 droppednodes.extend(nodes)
104 111 del partial[branch]
112 try:
113 node = cl.node(partial.tiprev)
114 except IndexError:
115 node = None
116 if ((partial.tipnode != node)
117 or (partial.tipnode in droppednodes)):
118 # cache key are not valid anymore
119 partial.tipnode = nullid
120 partial.tiprev = nullrev
121 for heads in partial.values():
122 tiprev = max(cl.rev(node) for node in heads)
123 if tiprev > partial.tiprev:
124 partial.tipnode = cl.node(tiprev)
125 partial.tiprev = tiprev
126
105 127
106 128 def updatecache(repo):
107 129 repo = repo.unfiltered() # Until we get a smarter cache management
108 130 cl = repo.changelog
109 131 tip = cl.tip()
110 132 partial = repo._branchcache
111 133 if partial is not None and partial.tipnode == tip:
112 134 return
113 135
114 136 if partial is None or partial.tipnode not in cl.nodemap:
115 137 partial = read(repo)
116 138
117 139 catip = repo._cacheabletip()
118 140 # if partial.tiprev == catip: cache is already up to date
119 141 # if partial.tiprev > catip: we have uncachable element in `partial` can't
120 142 # write on disk
121 143 if partial.tiprev < catip:
122 144 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, catip))
123 145 update(repo, partial, ctxgen)
124 partial.tipnode = cl.node(catip)
125 partial.tiprev = catip
126 146 partial.write(repo)
127 147 # If cacheable tip were lower than actual tip, we need to update the
128 148 # cache up to tip. This update (from cacheable to actual tip) is not
129 149 # written to disk since it's not cacheable.
130 150 tiprev = len(repo) - 1
131 151 if partial.tiprev < tiprev:
132 152 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, tiprev))
133 153 update(repo, partial, ctxgen)
134 partial.tipnode = cl.node(tiprev)
135 partial.tiprev = tiprev
136 154 repo._branchcache = partial
137 155
138 156 class branchcache(dict):
139 157 """A dict like object that hold branches heads cache"""
140 158
141 159 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev):
142 160 super(branchcache, self).__init__(entries)
143 161 self.tipnode = tipnode
144 162 self.tiprev = tiprev
145 163
146 164 def write(self, repo):
147 165 try:
148 166 f = repo.opener("cache/branchheads", "w", atomictemp=True)
149 167 f.write("%s %s\n" % (hex(self.tipnode), self.tiprev))
150 168 for label, nodes in self.iteritems():
151 169 for node in nodes:
152 170 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
153 171 f.close()
154 172 except (IOError, OSError):
155 173 pass
@@ -1,2587 +1,2585 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return discovery.visiblebranchmap(self._repo)
95 95
96 96 def heads(self):
97 97 return discovery.visibleheads(self._repo)
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 150
151 151 def _baserequirements(self, create):
152 152 return self.requirements[:]
153 153
154 154 def __init__(self, baseui, path=None, create=False):
155 155 self.wvfs = scmutil.vfs(path, expand=True)
156 156 self.wopener = self.wvfs
157 157 self.root = self.wvfs.base
158 158 self.path = self.wvfs.join(".hg")
159 159 self.origroot = path
160 160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
161 161 self.vfs = scmutil.vfs(self.path)
162 162 self.opener = self.vfs
163 163 self.baseui = baseui
164 164 self.ui = baseui.copy()
165 165 # A list of callback to shape the phase if no data were found.
166 166 # Callback are in the form: func(repo, roots) --> processed root.
167 167 # This list it to be filled by extension during repo setup
168 168 self._phasedefaults = []
169 169 try:
170 170 self.ui.readconfig(self.join("hgrc"), self.root)
171 171 extensions.loadall(self.ui)
172 172 except IOError:
173 173 pass
174 174
175 175 if not self.vfs.isdir():
176 176 if create:
177 177 if not self.wvfs.exists():
178 178 self.wvfs.makedirs()
179 179 self.vfs.makedir(notindexed=True)
180 180 requirements = self._baserequirements(create)
181 181 if self.ui.configbool('format', 'usestore', True):
182 182 self.vfs.mkdir("store")
183 183 requirements.append("store")
184 184 if self.ui.configbool('format', 'usefncache', True):
185 185 requirements.append("fncache")
186 186 if self.ui.configbool('format', 'dotencode', True):
187 187 requirements.append('dotencode')
188 188 # create an invalid changelog
189 189 self.vfs.append(
190 190 "00changelog.i",
191 191 '\0\0\0\2' # represents revlogv2
192 192 ' dummy changelog to prevent using the old repo layout'
193 193 )
194 194 if self.ui.configbool('format', 'generaldelta', False):
195 195 requirements.append("generaldelta")
196 196 requirements = set(requirements)
197 197 else:
198 198 raise error.RepoError(_("repository %s not found") % path)
199 199 elif create:
200 200 raise error.RepoError(_("repository %s already exists") % path)
201 201 else:
202 202 try:
203 203 requirements = scmutil.readrequires(self.vfs, self.supported)
204 204 except IOError, inst:
205 205 if inst.errno != errno.ENOENT:
206 206 raise
207 207 requirements = set()
208 208
209 209 self.sharedpath = self.path
210 210 try:
211 211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
212 212 if not os.path.exists(s):
213 213 raise error.RepoError(
214 214 _('.hg/sharedpath points to nonexistent directory %s') % s)
215 215 self.sharedpath = s
216 216 except IOError, inst:
217 217 if inst.errno != errno.ENOENT:
218 218 raise
219 219
220 220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
221 221 self.spath = self.store.path
222 222 self.svfs = self.store.vfs
223 223 self.sopener = self.svfs
224 224 self.sjoin = self.store.join
225 225 self.vfs.createmode = self.store.createmode
226 226 self._applyrequirements(requirements)
227 227 if create:
228 228 self._writerequirements()
229 229
230 230
231 231 self._branchcache = None
232 232 self.filterpats = {}
233 233 self._datafilters = {}
234 234 self._transref = self._lockref = self._wlockref = None
235 235
236 236 # A cache for various files under .hg/ that tracks file changes,
237 237 # (used by the filecache decorator)
238 238 #
239 239 # Maps a property name to its util.filecacheentry
240 240 self._filecache = {}
241 241
242 242 # hold sets of revision to be filtered
243 243 # should be cleared when something might have changed the filter value:
244 244 # - new changesets,
245 245 # - phase change,
246 246 # - new obsolescence marker,
247 247 # - working directory parent change,
248 248 # - bookmark changes
249 249 self.filteredrevcache = {}
250 250
251 251 def close(self):
252 252 pass
253 253
254 254 def _restrictcapabilities(self, caps):
255 255 return caps
256 256
257 257 def _applyrequirements(self, requirements):
258 258 self.requirements = requirements
259 259 self.sopener.options = dict((r, 1) for r in requirements
260 260 if r in self.openerreqs)
261 261
262 262 def _writerequirements(self):
263 263 reqfile = self.opener("requires", "w")
264 264 for r in self.requirements:
265 265 reqfile.write("%s\n" % r)
266 266 reqfile.close()
267 267
268 268 def _checknested(self, path):
269 269 """Determine if path is a legal nested repository."""
270 270 if not path.startswith(self.root):
271 271 return False
272 272 subpath = path[len(self.root) + 1:]
273 273 normsubpath = util.pconvert(subpath)
274 274
275 275 # XXX: Checking against the current working copy is wrong in
276 276 # the sense that it can reject things like
277 277 #
278 278 # $ hg cat -r 10 sub/x.txt
279 279 #
280 280 # if sub/ is no longer a subrepository in the working copy
281 281 # parent revision.
282 282 #
283 283 # However, it can of course also allow things that would have
284 284 # been rejected before, such as the above cat command if sub/
285 285 # is a subrepository now, but was a normal directory before.
286 286 # The old path auditor would have rejected by mistake since it
287 287 # panics when it sees sub/.hg/.
288 288 #
289 289 # All in all, checking against the working copy seems sensible
290 290 # since we want to prevent access to nested repositories on
291 291 # the filesystem *now*.
292 292 ctx = self[None]
293 293 parts = util.splitpath(subpath)
294 294 while parts:
295 295 prefix = '/'.join(parts)
296 296 if prefix in ctx.substate:
297 297 if prefix == normsubpath:
298 298 return True
299 299 else:
300 300 sub = ctx.sub(prefix)
301 301 return sub.checknested(subpath[len(prefix) + 1:])
302 302 else:
303 303 parts.pop()
304 304 return False
305 305
306 306 def peer(self):
307 307 return localpeer(self) # not cached to avoid reference cycle
308 308
309 309 def unfiltered(self):
310 310 """Return unfiltered version of the repository
311 311
312 312 Intended to be ovewritten by filtered repo."""
313 313 return self
314 314
315 315 def filtered(self, name):
316 316 """Return a filtered version of a repository"""
317 317 # build a new class with the mixin and the current class
318 318 # (possibily subclass of the repo)
319 319 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 320 pass
321 321 return proxycls(self, name)
322 322
323 323 @repofilecache('bookmarks')
324 324 def _bookmarks(self):
325 325 return bookmarks.bmstore(self)
326 326
327 327 @repofilecache('bookmarks.current')
328 328 def _bookmarkcurrent(self):
329 329 return bookmarks.readcurrent(self)
330 330
331 331 def bookmarkheads(self, bookmark):
332 332 name = bookmark.split('@', 1)[0]
333 333 heads = []
334 334 for mark, n in self._bookmarks.iteritems():
335 335 if mark.split('@', 1)[0] == name:
336 336 heads.append(n)
337 337 return heads
338 338
339 339 @storecache('phaseroots')
340 340 def _phasecache(self):
341 341 return phases.phasecache(self, self._phasedefaults)
342 342
343 343 @storecache('obsstore')
344 344 def obsstore(self):
345 345 store = obsolete.obsstore(self.sopener)
346 346 if store and not obsolete._enabled:
347 347 # message is rare enough to not be translated
348 348 msg = 'obsolete feature not enabled but %i markers found!\n'
349 349 self.ui.warn(msg % len(list(store)))
350 350 return store
351 351
352 352 @unfilteredpropertycache
353 353 def hiddenrevs(self):
354 354 """hiddenrevs: revs that should be hidden by command and tools
355 355
356 356 This set is carried on the repo to ease initialization and lazy
357 357 loading; it'll probably move back to changelog for efficiency and
358 358 consistency reasons.
359 359
360 360 Note that the hiddenrevs will needs invalidations when
361 361 - a new changesets is added (possible unstable above extinct)
362 362 - a new obsolete marker is added (possible new extinct changeset)
363 363
364 364 hidden changesets cannot have non-hidden descendants
365 365 """
366 366 hidden = set()
367 367 if self.obsstore:
368 368 ### hide extinct changeset that are not accessible by any mean
369 369 hiddenquery = 'extinct() - ::(. + bookmark())'
370 370 hidden.update(self.revs(hiddenquery))
371 371 return hidden
372 372
373 373 @storecache('00changelog.i')
374 374 def changelog(self):
375 375 c = changelog.changelog(self.sopener)
376 376 if 'HG_PENDING' in os.environ:
377 377 p = os.environ['HG_PENDING']
378 378 if p.startswith(self.root):
379 379 c.readpending('00changelog.i.a')
380 380 return c
381 381
382 382 @storecache('00manifest.i')
383 383 def manifest(self):
384 384 return manifest.manifest(self.sopener)
385 385
386 386 @repofilecache('dirstate')
387 387 def dirstate(self):
388 388 warned = [0]
389 389 def validate(node):
390 390 try:
391 391 self.changelog.rev(node)
392 392 return node
393 393 except error.LookupError:
394 394 if not warned[0]:
395 395 warned[0] = True
396 396 self.ui.warn(_("warning: ignoring unknown"
397 397 " working parent %s!\n") % short(node))
398 398 return nullid
399 399
400 400 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401 401
402 402 def __getitem__(self, changeid):
403 403 if changeid is None:
404 404 return context.workingctx(self)
405 405 return context.changectx(self, changeid)
406 406
407 407 def __contains__(self, changeid):
408 408 try:
409 409 return bool(self.lookup(changeid))
410 410 except error.RepoLookupError:
411 411 return False
412 412
413 413 def __nonzero__(self):
414 414 return True
415 415
416 416 def __len__(self):
417 417 return len(self.changelog)
418 418
419 419 def __iter__(self):
420 420 return iter(self.changelog)
421 421
422 422 def revs(self, expr, *args):
423 423 '''Return a list of revisions matching the given revset'''
424 424 expr = revset.formatspec(expr, *args)
425 425 m = revset.match(None, expr)
426 426 return [r for r in m(self, list(self))]
427 427
428 428 def set(self, expr, *args):
429 429 '''
430 430 Yield a context for each matching revision, after doing arg
431 431 replacement via revset.formatspec
432 432 '''
433 433 for r in self.revs(expr, *args):
434 434 yield self[r]
435 435
436 436 def url(self):
437 437 return 'file:' + self.root
438 438
439 439 def hook(self, name, throw=False, **args):
440 440 return hook.hook(self.ui, self, name, throw, **args)
441 441
442 442 @unfilteredmethod
443 443 def _tag(self, names, node, message, local, user, date, extra={}):
444 444 if isinstance(names, str):
445 445 names = (names,)
446 446
447 447 branches = self.branchmap()
448 448 for name in names:
449 449 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 450 local=local)
451 451 if name in branches:
452 452 self.ui.warn(_("warning: tag %s conflicts with existing"
453 453 " branch name\n") % name)
454 454
455 455 def writetags(fp, names, munge, prevtags):
456 456 fp.seek(0, 2)
457 457 if prevtags and prevtags[-1] != '\n':
458 458 fp.write('\n')
459 459 for name in names:
460 460 m = munge and munge(name) or name
461 461 if (self._tagscache.tagtypes and
462 462 name in self._tagscache.tagtypes):
463 463 old = self.tags().get(name, nullid)
464 464 fp.write('%s %s\n' % (hex(old), m))
465 465 fp.write('%s %s\n' % (hex(node), m))
466 466 fp.close()
467 467
468 468 prevtags = ''
469 469 if local:
470 470 try:
471 471 fp = self.opener('localtags', 'r+')
472 472 except IOError:
473 473 fp = self.opener('localtags', 'a')
474 474 else:
475 475 prevtags = fp.read()
476 476
477 477 # local tags are stored in the current charset
478 478 writetags(fp, names, None, prevtags)
479 479 for name in names:
480 480 self.hook('tag', node=hex(node), tag=name, local=local)
481 481 return
482 482
483 483 try:
484 484 fp = self.wfile('.hgtags', 'rb+')
485 485 except IOError, e:
486 486 if e.errno != errno.ENOENT:
487 487 raise
488 488 fp = self.wfile('.hgtags', 'ab')
489 489 else:
490 490 prevtags = fp.read()
491 491
492 492 # committed tags are stored in UTF-8
493 493 writetags(fp, names, encoding.fromlocal, prevtags)
494 494
495 495 fp.close()
496 496
497 497 self.invalidatecaches()
498 498
499 499 if '.hgtags' not in self.dirstate:
500 500 self[None].add(['.hgtags'])
501 501
502 502 m = matchmod.exact(self.root, '', ['.hgtags'])
503 503 tagnode = self.commit(message, user, date, extra=extra, match=m)
504 504
505 505 for name in names:
506 506 self.hook('tag', node=hex(node), tag=name, local=local)
507 507
508 508 return tagnode
509 509
510 510 def tag(self, names, node, message, local, user, date):
511 511 '''tag a revision with one or more symbolic names.
512 512
513 513 names is a list of strings or, when adding a single tag, names may be a
514 514 string.
515 515
516 516 if local is True, the tags are stored in a per-repository file.
517 517 otherwise, they are stored in the .hgtags file, and a new
518 518 changeset is committed with the change.
519 519
520 520 keyword arguments:
521 521
522 522 local: whether to store tags in non-version-controlled file
523 523 (default False)
524 524
525 525 message: commit message to use if committing
526 526
527 527 user: name of user to use if committing
528 528
529 529 date: date tuple to use if committing'''
530 530
531 531 if not local:
532 532 for x in self.status()[:5]:
533 533 if '.hgtags' in x:
534 534 raise util.Abort(_('working copy of .hgtags is changed '
535 535 '(please commit .hgtags manually)'))
536 536
537 537 self.tags() # instantiate the cache
538 538 self._tag(names, node, message, local, user, date)
539 539
540 540 @filteredpropertycache
541 541 def _tagscache(self):
542 542 '''Returns a tagscache object that contains various tags related
543 543 caches.'''
544 544
545 545 # This simplifies its cache management by having one decorated
546 546 # function (this one) and the rest simply fetch things from it.
547 547 class tagscache(object):
548 548 def __init__(self):
549 549 # These two define the set of tags for this repository. tags
550 550 # maps tag name to node; tagtypes maps tag name to 'global' or
551 551 # 'local'. (Global tags are defined by .hgtags across all
552 552 # heads, and local tags are defined in .hg/localtags.)
553 553 # They constitute the in-memory cache of tags.
554 554 self.tags = self.tagtypes = None
555 555
556 556 self.nodetagscache = self.tagslist = None
557 557
558 558 cache = tagscache()
559 559 cache.tags, cache.tagtypes = self._findtags()
560 560
561 561 return cache
562 562
563 563 def tags(self):
564 564 '''return a mapping of tag to node'''
565 565 t = {}
566 566 if self.changelog.filteredrevs:
567 567 tags, tt = self._findtags()
568 568 else:
569 569 tags = self._tagscache.tags
570 570 for k, v in tags.iteritems():
571 571 try:
572 572 # ignore tags to unknown nodes
573 573 self.changelog.rev(v)
574 574 t[k] = v
575 575 except (error.LookupError, ValueError):
576 576 pass
577 577 return t
578 578
579 579 def _findtags(self):
580 580 '''Do the hard work of finding tags. Return a pair of dicts
581 581 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 582 maps tag name to a string like \'global\' or \'local\'.
583 583 Subclasses or extensions are free to add their own tags, but
584 584 should be aware that the returned dicts will be retained for the
585 585 duration of the localrepo object.'''
586 586
587 587 # XXX what tagtype should subclasses/extensions use? Currently
588 588 # mq and bookmarks add tags, but do not set the tagtype at all.
589 589 # Should each extension invent its own tag type? Should there
590 590 # be one tagtype for all such "virtual" tags? Or is the status
591 591 # quo fine?
592 592
593 593 alltags = {} # map tag name to (node, hist)
594 594 tagtypes = {}
595 595
596 596 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 597 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598 598
599 599 # Build the return dicts. Have to re-encode tag names because
600 600 # the tags module always uses UTF-8 (in order not to lose info
601 601 # writing to the cache), but the rest of Mercurial wants them in
602 602 # local encoding.
603 603 tags = {}
604 604 for (name, (node, hist)) in alltags.iteritems():
605 605 if node != nullid:
606 606 tags[encoding.tolocal(name)] = node
607 607 tags['tip'] = self.changelog.tip()
608 608 tagtypes = dict([(encoding.tolocal(name), value)
609 609 for (name, value) in tagtypes.iteritems()])
610 610 return (tags, tagtypes)
611 611
612 612 def tagtype(self, tagname):
613 613 '''
614 614 return the type of the given tag. result can be:
615 615
616 616 'local' : a local tag
617 617 'global' : a global tag
618 618 None : tag does not exist
619 619 '''
620 620
621 621 return self._tagscache.tagtypes.get(tagname)
622 622
623 623 def tagslist(self):
624 624 '''return a list of tags ordered by revision'''
625 625 if not self._tagscache.tagslist:
626 626 l = []
627 627 for t, n in self.tags().iteritems():
628 628 r = self.changelog.rev(n)
629 629 l.append((r, t, n))
630 630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631 631
632 632 return self._tagscache.tagslist
633 633
634 634 def nodetags(self, node):
635 635 '''return the tags associated with a node'''
636 636 if not self._tagscache.nodetagscache:
637 637 nodetagscache = {}
638 638 for t, n in self._tagscache.tags.iteritems():
639 639 nodetagscache.setdefault(n, []).append(t)
640 640 for tags in nodetagscache.itervalues():
641 641 tags.sort()
642 642 self._tagscache.nodetagscache = nodetagscache
643 643 return self._tagscache.nodetagscache.get(node, [])
644 644
645 645 def nodebookmarks(self, node):
646 646 marks = []
647 647 for bookmark, n in self._bookmarks.iteritems():
648 648 if n == node:
649 649 marks.append(bookmark)
650 650 return sorted(marks)
651 651
652 652 def _cacheabletip(self):
653 653 """tip-most revision stable enought to used in persistent cache
654 654
655 655 This function is overwritten by MQ to ensure we do not write cache for
656 656 a part of the history that will likely change.
657 657
658 658 Efficient handling of filtered revision in branchcache should offer a
659 659 better alternative. But we are using this approach until it is ready.
660 660 """
661 661 cl = self.changelog
662 662 return cl.rev(cl.tip())
663 663
664 664 def branchmap(self):
665 665 '''returns a dictionary {branch: [branchheads]}'''
666 666 if self.changelog.filteredrevs:
667 667 # some changeset are excluded we can't use the cache
668 668 bmap = branchmap.branchcache()
669 669 branchmap.update(self, bmap, (self[r] for r in self))
670 670 return bmap
671 671 else:
672 672 branchmap.updatecache(self)
673 673 return self._branchcache
674 674
675 675
676 676 def _branchtip(self, heads):
677 677 '''return the tipmost branch head in heads'''
678 678 tip = heads[-1]
679 679 for h in reversed(heads):
680 680 if not self[h].closesbranch():
681 681 tip = h
682 682 break
683 683 return tip
684 684
685 685 def branchtip(self, branch):
686 686 '''return the tip node for a given branch'''
687 687 if branch not in self.branchmap():
688 688 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
689 689 return self._branchtip(self.branchmap()[branch])
690 690
691 691 def branchtags(self):
692 692 '''return a dict where branch names map to the tipmost head of
693 693 the branch, open heads come before closed'''
694 694 bt = {}
695 695 for bn, heads in self.branchmap().iteritems():
696 696 bt[bn] = self._branchtip(heads)
697 697 return bt
698 698
699 699 def lookup(self, key):
700 700 return self[key].node()
701 701
702 702 def lookupbranch(self, key, remote=None):
703 703 repo = remote or self
704 704 if key in repo.branchmap():
705 705 return key
706 706
707 707 repo = (remote and remote.local()) and remote or self
708 708 return repo[key].branch()
709 709
710 710 def known(self, nodes):
711 711 nm = self.changelog.nodemap
712 712 pc = self._phasecache
713 713 result = []
714 714 for n in nodes:
715 715 r = nm.get(n)
716 716 resp = not (r is None or pc.phase(self, r) >= phases.secret)
717 717 result.append(resp)
718 718 return result
719 719
720 720 def local(self):
721 721 return self
722 722
723 723 def cancopy(self):
724 724 return self.local() # so statichttprepo's override of local() works
725 725
726 726 def join(self, f):
727 727 return os.path.join(self.path, f)
728 728
729 729 def wjoin(self, f):
730 730 return os.path.join(self.root, f)
731 731
732 732 def file(self, f):
733 733 if f[0] == '/':
734 734 f = f[1:]
735 735 return filelog.filelog(self.sopener, f)
736 736
737 737 def changectx(self, changeid):
738 738 return self[changeid]
739 739
740 740 def parents(self, changeid=None):
741 741 '''get list of changectxs for parents of changeid'''
742 742 return self[changeid].parents()
743 743
744 744 def setparents(self, p1, p2=nullid):
745 745 copies = self.dirstate.setparents(p1, p2)
746 746 if copies:
747 747 # Adjust copy records, the dirstate cannot do it, it
748 748 # requires access to parents manifests. Preserve them
749 749 # only for entries added to first parent.
750 750 pctx = self[p1]
751 751 for f in copies:
752 752 if f not in pctx and copies[f] in pctx:
753 753 self.dirstate.copy(copies[f], f)
754 754
755 755 def filectx(self, path, changeid=None, fileid=None):
756 756 """changeid can be a changeset revision, node, or tag.
757 757 fileid can be a file revision or node."""
758 758 return context.filectx(self, path, changeid, fileid)
759 759
760 760 def getcwd(self):
761 761 return self.dirstate.getcwd()
762 762
763 763 def pathto(self, f, cwd=None):
764 764 return self.dirstate.pathto(f, cwd)
765 765
766 766 def wfile(self, f, mode='r'):
767 767 return self.wopener(f, mode)
768 768
769 769 def _link(self, f):
770 770 return os.path.islink(self.wjoin(f))
771 771
772 772 def _loadfilter(self, filter):
773 773 if filter not in self.filterpats:
774 774 l = []
775 775 for pat, cmd in self.ui.configitems(filter):
776 776 if cmd == '!':
777 777 continue
778 778 mf = matchmod.match(self.root, '', [pat])
779 779 fn = None
780 780 params = cmd
781 781 for name, filterfn in self._datafilters.iteritems():
782 782 if cmd.startswith(name):
783 783 fn = filterfn
784 784 params = cmd[len(name):].lstrip()
785 785 break
786 786 if not fn:
787 787 fn = lambda s, c, **kwargs: util.filter(s, c)
788 788 # Wrap old filters not supporting keyword arguments
789 789 if not inspect.getargspec(fn)[2]:
790 790 oldfn = fn
791 791 fn = lambda s, c, **kwargs: oldfn(s, c)
792 792 l.append((mf, fn, params))
793 793 self.filterpats[filter] = l
794 794 return self.filterpats[filter]
795 795
796 796 def _filter(self, filterpats, filename, data):
797 797 for mf, fn, cmd in filterpats:
798 798 if mf(filename):
799 799 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
800 800 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
801 801 break
802 802
803 803 return data
804 804
805 805 @unfilteredpropertycache
806 806 def _encodefilterpats(self):
807 807 return self._loadfilter('encode')
808 808
809 809 @unfilteredpropertycache
810 810 def _decodefilterpats(self):
811 811 return self._loadfilter('decode')
812 812
813 813 def adddatafilter(self, name, filter):
814 814 self._datafilters[name] = filter
815 815
816 816 def wread(self, filename):
817 817 if self._link(filename):
818 818 data = os.readlink(self.wjoin(filename))
819 819 else:
820 820 data = self.wopener.read(filename)
821 821 return self._filter(self._encodefilterpats, filename, data)
822 822
823 823 def wwrite(self, filename, data, flags):
824 824 data = self._filter(self._decodefilterpats, filename, data)
825 825 if 'l' in flags:
826 826 self.wopener.symlink(data, filename)
827 827 else:
828 828 self.wopener.write(filename, data)
829 829 if 'x' in flags:
830 830 util.setflags(self.wjoin(filename), False, True)
831 831
832 832 def wwritedata(self, filename, data):
833 833 return self._filter(self._decodefilterpats, filename, data)
834 834
835 835 def transaction(self, desc):
836 836 tr = self._transref and self._transref() or None
837 837 if tr and tr.running():
838 838 return tr.nest()
839 839
840 840 # abort here if the journal already exists
841 841 if os.path.exists(self.sjoin("journal")):
842 842 raise error.RepoError(
843 843 _("abandoned transaction found - run hg recover"))
844 844
845 845 self._writejournal(desc)
846 846 renames = [(x, undoname(x)) for x in self._journalfiles()]
847 847
848 848 tr = transaction.transaction(self.ui.warn, self.sopener,
849 849 self.sjoin("journal"),
850 850 aftertrans(renames),
851 851 self.store.createmode)
852 852 self._transref = weakref.ref(tr)
853 853 return tr
854 854
855 855 def _journalfiles(self):
856 856 return (self.sjoin('journal'), self.join('journal.dirstate'),
857 857 self.join('journal.branch'), self.join('journal.desc'),
858 858 self.join('journal.bookmarks'),
859 859 self.sjoin('journal.phaseroots'))
860 860
861 861 def undofiles(self):
862 862 return [undoname(x) for x in self._journalfiles()]
863 863
864 864 def _writejournal(self, desc):
865 865 self.opener.write("journal.dirstate",
866 866 self.opener.tryread("dirstate"))
867 867 self.opener.write("journal.branch",
868 868 encoding.fromlocal(self.dirstate.branch()))
869 869 self.opener.write("journal.desc",
870 870 "%d\n%s\n" % (len(self), desc))
871 871 self.opener.write("journal.bookmarks",
872 872 self.opener.tryread("bookmarks"))
873 873 self.sopener.write("journal.phaseroots",
874 874 self.sopener.tryread("phaseroots"))
875 875
876 876 def recover(self):
877 877 lock = self.lock()
878 878 try:
879 879 if os.path.exists(self.sjoin("journal")):
880 880 self.ui.status(_("rolling back interrupted transaction\n"))
881 881 transaction.rollback(self.sopener, self.sjoin("journal"),
882 882 self.ui.warn)
883 883 self.invalidate()
884 884 return True
885 885 else:
886 886 self.ui.warn(_("no interrupted transaction available\n"))
887 887 return False
888 888 finally:
889 889 lock.release()
890 890
891 891 def rollback(self, dryrun=False, force=False):
892 892 wlock = lock = None
893 893 try:
894 894 wlock = self.wlock()
895 895 lock = self.lock()
896 896 if os.path.exists(self.sjoin("undo")):
897 897 return self._rollback(dryrun, force)
898 898 else:
899 899 self.ui.warn(_("no rollback information available\n"))
900 900 return 1
901 901 finally:
902 902 release(lock, wlock)
903 903
904 904 @unfilteredmethod # Until we get smarter cache management
905 905 def _rollback(self, dryrun, force):
906 906 ui = self.ui
907 907 try:
908 908 args = self.opener.read('undo.desc').splitlines()
909 909 (oldlen, desc, detail) = (int(args[0]), args[1], None)
910 910 if len(args) >= 3:
911 911 detail = args[2]
912 912 oldtip = oldlen - 1
913 913
914 914 if detail and ui.verbose:
915 915 msg = (_('repository tip rolled back to revision %s'
916 916 ' (undo %s: %s)\n')
917 917 % (oldtip, desc, detail))
918 918 else:
919 919 msg = (_('repository tip rolled back to revision %s'
920 920 ' (undo %s)\n')
921 921 % (oldtip, desc))
922 922 except IOError:
923 923 msg = _('rolling back unknown transaction\n')
924 924 desc = None
925 925
926 926 if not force and self['.'] != self['tip'] and desc == 'commit':
927 927 raise util.Abort(
928 928 _('rollback of last commit while not checked out '
929 929 'may lose data'), hint=_('use -f to force'))
930 930
931 931 ui.status(msg)
932 932 if dryrun:
933 933 return 0
934 934
935 935 parents = self.dirstate.parents()
936 936 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
937 937 if os.path.exists(self.join('undo.bookmarks')):
938 938 util.rename(self.join('undo.bookmarks'),
939 939 self.join('bookmarks'))
940 940 if os.path.exists(self.sjoin('undo.phaseroots')):
941 941 util.rename(self.sjoin('undo.phaseroots'),
942 942 self.sjoin('phaseroots'))
943 943 self.invalidate()
944 944
945 945 # Discard all cache entries to force reloading everything.
946 946 self._filecache.clear()
947 947
948 948 parentgone = (parents[0] not in self.changelog.nodemap or
949 949 parents[1] not in self.changelog.nodemap)
950 950 if parentgone:
951 951 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
952 952 try:
953 953 branch = self.opener.read('undo.branch')
954 954 self.dirstate.setbranch(encoding.tolocal(branch))
955 955 except IOError:
956 956 ui.warn(_('named branch could not be reset: '
957 957 'current branch is still \'%s\'\n')
958 958 % self.dirstate.branch())
959 959
960 960 self.dirstate.invalidate()
961 961 parents = tuple([p.rev() for p in self.parents()])
962 962 if len(parents) > 1:
963 963 ui.status(_('working directory now based on '
964 964 'revisions %d and %d\n') % parents)
965 965 else:
966 966 ui.status(_('working directory now based on '
967 967 'revision %d\n') % parents)
968 968 # TODO: if we know which new heads may result from this rollback, pass
969 969 # them to destroy(), which will prevent the branchhead cache from being
970 970 # invalidated.
971 971 self.destroyed()
972 972 return 0
973 973
974 974 def invalidatecaches(self):
975 975
976 976 if '_tagscache' in vars(self):
977 977 # can't use delattr on proxy
978 978 del self.__dict__['_tagscache']
979 979
980 980 self.unfiltered()._branchcache = None # in UTF-8
981 981 self.invalidatevolatilesets()
982 982
983 983 def invalidatevolatilesets(self):
984 984 self.filteredrevcache.clear()
985 985 obsolete.clearobscaches(self)
986 986 if 'hiddenrevs' in vars(self):
987 987 del self.hiddenrevs
988 988
989 989 def invalidatedirstate(self):
990 990 '''Invalidates the dirstate, causing the next call to dirstate
991 991 to check if it was modified since the last time it was read,
992 992 rereading it if it has.
993 993
994 994 This is different to dirstate.invalidate() that it doesn't always
995 995 rereads the dirstate. Use dirstate.invalidate() if you want to
996 996 explicitly read the dirstate again (i.e. restoring it to a previous
997 997 known good state).'''
998 998 if hasunfilteredcache(self, 'dirstate'):
999 999 for k in self.dirstate._filecache:
1000 1000 try:
1001 1001 delattr(self.dirstate, k)
1002 1002 except AttributeError:
1003 1003 pass
1004 1004 delattr(self.unfiltered(), 'dirstate')
1005 1005
1006 1006 def invalidate(self):
1007 1007 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1008 1008 for k in self._filecache:
1009 1009 # dirstate is invalidated separately in invalidatedirstate()
1010 1010 if k == 'dirstate':
1011 1011 continue
1012 1012
1013 1013 try:
1014 1014 delattr(unfiltered, k)
1015 1015 except AttributeError:
1016 1016 pass
1017 1017 self.invalidatecaches()
1018 1018
1019 1019 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1020 1020 try:
1021 1021 l = lock.lock(lockname, 0, releasefn, desc=desc)
1022 1022 except error.LockHeld, inst:
1023 1023 if not wait:
1024 1024 raise
1025 1025 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1026 1026 (desc, inst.locker))
1027 1027 # default to 600 seconds timeout
1028 1028 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1029 1029 releasefn, desc=desc)
1030 1030 if acquirefn:
1031 1031 acquirefn()
1032 1032 return l
1033 1033
1034 1034 def _afterlock(self, callback):
1035 1035 """add a callback to the current repository lock.
1036 1036
1037 1037 The callback will be executed on lock release."""
1038 1038 l = self._lockref and self._lockref()
1039 1039 if l:
1040 1040 l.postrelease.append(callback)
1041 1041 else:
1042 1042 callback()
1043 1043
1044 1044 def lock(self, wait=True):
1045 1045 '''Lock the repository store (.hg/store) and return a weak reference
1046 1046 to the lock. Use this before modifying the store (e.g. committing or
1047 1047 stripping). If you are opening a transaction, get a lock as well.)'''
1048 1048 l = self._lockref and self._lockref()
1049 1049 if l is not None and l.held:
1050 1050 l.lock()
1051 1051 return l
1052 1052
1053 1053 def unlock():
1054 1054 self.store.write()
1055 1055 if hasunfilteredcache(self, '_phasecache'):
1056 1056 self._phasecache.write()
1057 1057 for k, ce in self._filecache.items():
1058 1058 if k == 'dirstate':
1059 1059 continue
1060 1060 ce.refresh()
1061 1061
1062 1062 l = self._lock(self.sjoin("lock"), wait, unlock,
1063 1063 self.invalidate, _('repository %s') % self.origroot)
1064 1064 self._lockref = weakref.ref(l)
1065 1065 return l
1066 1066
1067 1067 def wlock(self, wait=True):
1068 1068 '''Lock the non-store parts of the repository (everything under
1069 1069 .hg except .hg/store) and return a weak reference to the lock.
1070 1070 Use this before modifying files in .hg.'''
1071 1071 l = self._wlockref and self._wlockref()
1072 1072 if l is not None and l.held:
1073 1073 l.lock()
1074 1074 return l
1075 1075
1076 1076 def unlock():
1077 1077 self.dirstate.write()
1078 1078 ce = self._filecache.get('dirstate')
1079 1079 if ce:
1080 1080 ce.refresh()
1081 1081
1082 1082 l = self._lock(self.join("wlock"), wait, unlock,
1083 1083 self.invalidatedirstate, _('working directory of %s') %
1084 1084 self.origroot)
1085 1085 self._wlockref = weakref.ref(l)
1086 1086 return l
1087 1087
1088 1088 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1089 1089 """
1090 1090 commit an individual file as part of a larger transaction
1091 1091 """
1092 1092
1093 1093 fname = fctx.path()
1094 1094 text = fctx.data()
1095 1095 flog = self.file(fname)
1096 1096 fparent1 = manifest1.get(fname, nullid)
1097 1097 fparent2 = fparent2o = manifest2.get(fname, nullid)
1098 1098
1099 1099 meta = {}
1100 1100 copy = fctx.renamed()
1101 1101 if copy and copy[0] != fname:
1102 1102 # Mark the new revision of this file as a copy of another
1103 1103 # file. This copy data will effectively act as a parent
1104 1104 # of this new revision. If this is a merge, the first
1105 1105 # parent will be the nullid (meaning "look up the copy data")
1106 1106 # and the second one will be the other parent. For example:
1107 1107 #
1108 1108 # 0 --- 1 --- 3 rev1 changes file foo
1109 1109 # \ / rev2 renames foo to bar and changes it
1110 1110 # \- 2 -/ rev3 should have bar with all changes and
1111 1111 # should record that bar descends from
1112 1112 # bar in rev2 and foo in rev1
1113 1113 #
1114 1114 # this allows this merge to succeed:
1115 1115 #
1116 1116 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1117 1117 # \ / merging rev3 and rev4 should use bar@rev2
1118 1118 # \- 2 --- 4 as the merge base
1119 1119 #
1120 1120
1121 1121 cfname = copy[0]
1122 1122 crev = manifest1.get(cfname)
1123 1123 newfparent = fparent2
1124 1124
1125 1125 if manifest2: # branch merge
1126 1126 if fparent2 == nullid or crev is None: # copied on remote side
1127 1127 if cfname in manifest2:
1128 1128 crev = manifest2[cfname]
1129 1129 newfparent = fparent1
1130 1130
1131 1131 # find source in nearest ancestor if we've lost track
1132 1132 if not crev:
1133 1133 self.ui.debug(" %s: searching for copy revision for %s\n" %
1134 1134 (fname, cfname))
1135 1135 for ancestor in self[None].ancestors():
1136 1136 if cfname in ancestor:
1137 1137 crev = ancestor[cfname].filenode()
1138 1138 break
1139 1139
1140 1140 if crev:
1141 1141 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1142 1142 meta["copy"] = cfname
1143 1143 meta["copyrev"] = hex(crev)
1144 1144 fparent1, fparent2 = nullid, newfparent
1145 1145 else:
1146 1146 self.ui.warn(_("warning: can't find ancestor for '%s' "
1147 1147 "copied from '%s'!\n") % (fname, cfname))
1148 1148
1149 1149 elif fparent2 != nullid:
1150 1150 # is one parent an ancestor of the other?
1151 1151 fparentancestor = flog.ancestor(fparent1, fparent2)
1152 1152 if fparentancestor == fparent1:
1153 1153 fparent1, fparent2 = fparent2, nullid
1154 1154 elif fparentancestor == fparent2:
1155 1155 fparent2 = nullid
1156 1156
1157 1157 # is the file changed?
1158 1158 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1159 1159 changelist.append(fname)
1160 1160 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1161 1161
1162 1162 # are just the flags changed during merge?
1163 1163 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1164 1164 changelist.append(fname)
1165 1165
1166 1166 return fparent1
1167 1167
1168 1168 @unfilteredmethod
1169 1169 def commit(self, text="", user=None, date=None, match=None, force=False,
1170 1170 editor=False, extra={}):
1171 1171 """Add a new revision to current repository.
1172 1172
1173 1173 Revision information is gathered from the working directory,
1174 1174 match can be used to filter the committed files. If editor is
1175 1175 supplied, it is called to get a commit message.
1176 1176 """
1177 1177
1178 1178 def fail(f, msg):
1179 1179 raise util.Abort('%s: %s' % (f, msg))
1180 1180
1181 1181 if not match:
1182 1182 match = matchmod.always(self.root, '')
1183 1183
1184 1184 if not force:
1185 1185 vdirs = []
1186 1186 match.dir = vdirs.append
1187 1187 match.bad = fail
1188 1188
1189 1189 wlock = self.wlock()
1190 1190 try:
1191 1191 wctx = self[None]
1192 1192 merge = len(wctx.parents()) > 1
1193 1193
1194 1194 if (not force and merge and match and
1195 1195 (match.files() or match.anypats())):
1196 1196 raise util.Abort(_('cannot partially commit a merge '
1197 1197 '(do not specify files or patterns)'))
1198 1198
1199 1199 changes = self.status(match=match, clean=force)
1200 1200 if force:
1201 1201 changes[0].extend(changes[6]) # mq may commit unchanged files
1202 1202
1203 1203 # check subrepos
1204 1204 subs = []
1205 1205 commitsubs = set()
1206 1206 newstate = wctx.substate.copy()
1207 1207 # only manage subrepos and .hgsubstate if .hgsub is present
1208 1208 if '.hgsub' in wctx:
1209 1209 # we'll decide whether to track this ourselves, thanks
1210 1210 if '.hgsubstate' in changes[0]:
1211 1211 changes[0].remove('.hgsubstate')
1212 1212 if '.hgsubstate' in changes[2]:
1213 1213 changes[2].remove('.hgsubstate')
1214 1214
1215 1215 # compare current state to last committed state
1216 1216 # build new substate based on last committed state
1217 1217 oldstate = wctx.p1().substate
1218 1218 for s in sorted(newstate.keys()):
1219 1219 if not match(s):
1220 1220 # ignore working copy, use old state if present
1221 1221 if s in oldstate:
1222 1222 newstate[s] = oldstate[s]
1223 1223 continue
1224 1224 if not force:
1225 1225 raise util.Abort(
1226 1226 _("commit with new subrepo %s excluded") % s)
1227 1227 if wctx.sub(s).dirty(True):
1228 1228 if not self.ui.configbool('ui', 'commitsubrepos'):
1229 1229 raise util.Abort(
1230 1230 _("uncommitted changes in subrepo %s") % s,
1231 1231 hint=_("use --subrepos for recursive commit"))
1232 1232 subs.append(s)
1233 1233 commitsubs.add(s)
1234 1234 else:
1235 1235 bs = wctx.sub(s).basestate()
1236 1236 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1237 1237 if oldstate.get(s, (None, None, None))[1] != bs:
1238 1238 subs.append(s)
1239 1239
1240 1240 # check for removed subrepos
1241 1241 for p in wctx.parents():
1242 1242 r = [s for s in p.substate if s not in newstate]
1243 1243 subs += [s for s in r if match(s)]
1244 1244 if subs:
1245 1245 if (not match('.hgsub') and
1246 1246 '.hgsub' in (wctx.modified() + wctx.added())):
1247 1247 raise util.Abort(
1248 1248 _("can't commit subrepos without .hgsub"))
1249 1249 changes[0].insert(0, '.hgsubstate')
1250 1250
1251 1251 elif '.hgsub' in changes[2]:
1252 1252 # clean up .hgsubstate when .hgsub is removed
1253 1253 if ('.hgsubstate' in wctx and
1254 1254 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1255 1255 changes[2].insert(0, '.hgsubstate')
1256 1256
1257 1257 # make sure all explicit patterns are matched
1258 1258 if not force and match.files():
1259 1259 matched = set(changes[0] + changes[1] + changes[2])
1260 1260
1261 1261 for f in match.files():
1262 1262 f = self.dirstate.normalize(f)
1263 1263 if f == '.' or f in matched or f in wctx.substate:
1264 1264 continue
1265 1265 if f in changes[3]: # missing
1266 1266 fail(f, _('file not found!'))
1267 1267 if f in vdirs: # visited directory
1268 1268 d = f + '/'
1269 1269 for mf in matched:
1270 1270 if mf.startswith(d):
1271 1271 break
1272 1272 else:
1273 1273 fail(f, _("no match under directory!"))
1274 1274 elif f not in self.dirstate:
1275 1275 fail(f, _("file not tracked!"))
1276 1276
1277 1277 if (not force and not extra.get("close") and not merge
1278 1278 and not (changes[0] or changes[1] or changes[2])
1279 1279 and wctx.branch() == wctx.p1().branch()):
1280 1280 return None
1281 1281
1282 1282 if merge and changes[3]:
1283 1283 raise util.Abort(_("cannot commit merge with missing files"))
1284 1284
1285 1285 ms = mergemod.mergestate(self)
1286 1286 for f in changes[0]:
1287 1287 if f in ms and ms[f] == 'u':
1288 1288 raise util.Abort(_("unresolved merge conflicts "
1289 1289 "(see hg help resolve)"))
1290 1290
1291 1291 cctx = context.workingctx(self, text, user, date, extra, changes)
1292 1292 if editor:
1293 1293 cctx._text = editor(self, cctx, subs)
1294 1294 edited = (text != cctx._text)
1295 1295
1296 1296 # commit subs and write new state
1297 1297 if subs:
1298 1298 for s in sorted(commitsubs):
1299 1299 sub = wctx.sub(s)
1300 1300 self.ui.status(_('committing subrepository %s\n') %
1301 1301 subrepo.subrelpath(sub))
1302 1302 sr = sub.commit(cctx._text, user, date)
1303 1303 newstate[s] = (newstate[s][0], sr)
1304 1304 subrepo.writestate(self, newstate)
1305 1305
1306 1306 # Save commit message in case this transaction gets rolled back
1307 1307 # (e.g. by a pretxncommit hook). Leave the content alone on
1308 1308 # the assumption that the user will use the same editor again.
1309 1309 msgfn = self.savecommitmessage(cctx._text)
1310 1310
1311 1311 p1, p2 = self.dirstate.parents()
1312 1312 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1313 1313 try:
1314 1314 self.hook("precommit", throw=True, parent1=hookp1,
1315 1315 parent2=hookp2)
1316 1316 ret = self.commitctx(cctx, True)
1317 1317 except: # re-raises
1318 1318 if edited:
1319 1319 self.ui.write(
1320 1320 _('note: commit message saved in %s\n') % msgfn)
1321 1321 raise
1322 1322
1323 1323 # update bookmarks, dirstate and mergestate
1324 1324 bookmarks.update(self, [p1, p2], ret)
1325 1325 for f in changes[0] + changes[1]:
1326 1326 self.dirstate.normal(f)
1327 1327 for f in changes[2]:
1328 1328 self.dirstate.drop(f)
1329 1329 self.dirstate.setparents(ret)
1330 1330 ms.reset()
1331 1331 finally:
1332 1332 wlock.release()
1333 1333
1334 1334 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1335 1335 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1336 1336 self._afterlock(commithook)
1337 1337 return ret
1338 1338
1339 1339 @unfilteredmethod
1340 1340 def commitctx(self, ctx, error=False):
1341 1341 """Add a new revision to current repository.
1342 1342 Revision information is passed via the context argument.
1343 1343 """
1344 1344
1345 1345 tr = lock = None
1346 1346 removed = list(ctx.removed())
1347 1347 p1, p2 = ctx.p1(), ctx.p2()
1348 1348 user = ctx.user()
1349 1349
1350 1350 lock = self.lock()
1351 1351 try:
1352 1352 tr = self.transaction("commit")
1353 1353 trp = weakref.proxy(tr)
1354 1354
1355 1355 if ctx.files():
1356 1356 m1 = p1.manifest().copy()
1357 1357 m2 = p2.manifest()
1358 1358
1359 1359 # check in files
1360 1360 new = {}
1361 1361 changed = []
1362 1362 linkrev = len(self)
1363 1363 for f in sorted(ctx.modified() + ctx.added()):
1364 1364 self.ui.note(f + "\n")
1365 1365 try:
1366 1366 fctx = ctx[f]
1367 1367 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1368 1368 changed)
1369 1369 m1.set(f, fctx.flags())
1370 1370 except OSError, inst:
1371 1371 self.ui.warn(_("trouble committing %s!\n") % f)
1372 1372 raise
1373 1373 except IOError, inst:
1374 1374 errcode = getattr(inst, 'errno', errno.ENOENT)
1375 1375 if error or errcode and errcode != errno.ENOENT:
1376 1376 self.ui.warn(_("trouble committing %s!\n") % f)
1377 1377 raise
1378 1378 else:
1379 1379 removed.append(f)
1380 1380
1381 1381 # update manifest
1382 1382 m1.update(new)
1383 1383 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1384 1384 drop = [f for f in removed if f in m1]
1385 1385 for f in drop:
1386 1386 del m1[f]
1387 1387 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1388 1388 p2.manifestnode(), (new, drop))
1389 1389 files = changed + removed
1390 1390 else:
1391 1391 mn = p1.manifestnode()
1392 1392 files = []
1393 1393
1394 1394 # update changelog
1395 1395 self.changelog.delayupdate()
1396 1396 n = self.changelog.add(mn, files, ctx.description(),
1397 1397 trp, p1.node(), p2.node(),
1398 1398 user, ctx.date(), ctx.extra().copy())
1399 1399 p = lambda: self.changelog.writepending() and self.root or ""
1400 1400 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1401 1401 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1402 1402 parent2=xp2, pending=p)
1403 1403 self.changelog.finalize(trp)
1404 1404 # set the new commit is proper phase
1405 1405 targetphase = phases.newcommitphase(self.ui)
1406 1406 if targetphase:
1407 1407 # retract boundary do not alter parent changeset.
1408 1408 # if a parent have higher the resulting phase will
1409 1409 # be compliant anyway
1410 1410 #
1411 1411 # if minimal phase was 0 we don't need to retract anything
1412 1412 phases.retractboundary(self, targetphase, [n])
1413 1413 tr.close()
1414 1414 branchmap.updatecache(self)
1415 1415 return n
1416 1416 finally:
1417 1417 if tr:
1418 1418 tr.release()
1419 1419 lock.release()
1420 1420
1421 1421 @unfilteredmethod
1422 1422 def destroyed(self, newheadnodes=None):
1423 1423 '''Inform the repository that nodes have been destroyed.
1424 1424 Intended for use by strip and rollback, so there's a common
1425 1425 place for anything that has to be done after destroying history.
1426 1426
1427 1427 If you know the branchheadcache was uptodate before nodes were removed
1428 1428 and you also know the set of candidate new heads that may have resulted
1429 1429 from the destruction, you can set newheadnodes. This will enable the
1430 1430 code to update the branchheads cache, rather than having future code
1431 1431 decide it's invalid and regenerating it from scratch.
1432 1432 '''
1433 1433 # If we have info, newheadnodes, on how to update the branch cache, do
1434 1434 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1435 1435 # will be caught the next time it is read.
1436 1436 if newheadnodes:
1437 1437 ctxgen = (self[node] for node in newheadnodes
1438 1438 if self.changelog.hasnode(node))
1439 1439 cache = self._branchcache
1440 1440 branchmap.update(self, cache, ctxgen)
1441 cache.tipnode = self.changelog.tip()
1442 cache.tiprev = self.changelog.rev(cache.tipnode)
1443 1441 cache.write(self)
1444 1442
1445 1443 # Ensure the persistent tag cache is updated. Doing it now
1446 1444 # means that the tag cache only has to worry about destroyed
1447 1445 # heads immediately after a strip/rollback. That in turn
1448 1446 # guarantees that "cachetip == currenttip" (comparing both rev
1449 1447 # and node) always means no nodes have been added or destroyed.
1450 1448
1451 1449 # XXX this is suboptimal when qrefresh'ing: we strip the current
1452 1450 # head, refresh the tag cache, then immediately add a new head.
1453 1451 # But I think doing it this way is necessary for the "instant
1454 1452 # tag cache retrieval" case to work.
1455 1453 self.invalidatecaches()
1456 1454
1457 1455 # Discard all cache entries to force reloading everything.
1458 1456 self._filecache.clear()
1459 1457
1460 1458 def walk(self, match, node=None):
1461 1459 '''
1462 1460 walk recursively through the directory tree or a given
1463 1461 changeset, finding all files matched by the match
1464 1462 function
1465 1463 '''
1466 1464 return self[node].walk(match)
1467 1465
1468 1466 def status(self, node1='.', node2=None, match=None,
1469 1467 ignored=False, clean=False, unknown=False,
1470 1468 listsubrepos=False):
1471 1469 """return status of files between two nodes or node and working
1472 1470 directory.
1473 1471
1474 1472 If node1 is None, use the first dirstate parent instead.
1475 1473 If node2 is None, compare node1 with working directory.
1476 1474 """
1477 1475
1478 1476 def mfmatches(ctx):
1479 1477 mf = ctx.manifest().copy()
1480 1478 if match.always():
1481 1479 return mf
1482 1480 for fn in mf.keys():
1483 1481 if not match(fn):
1484 1482 del mf[fn]
1485 1483 return mf
1486 1484
1487 1485 if isinstance(node1, context.changectx):
1488 1486 ctx1 = node1
1489 1487 else:
1490 1488 ctx1 = self[node1]
1491 1489 if isinstance(node2, context.changectx):
1492 1490 ctx2 = node2
1493 1491 else:
1494 1492 ctx2 = self[node2]
1495 1493
1496 1494 working = ctx2.rev() is None
1497 1495 parentworking = working and ctx1 == self['.']
1498 1496 match = match or matchmod.always(self.root, self.getcwd())
1499 1497 listignored, listclean, listunknown = ignored, clean, unknown
1500 1498
1501 1499 # load earliest manifest first for caching reasons
1502 1500 if not working and ctx2.rev() < ctx1.rev():
1503 1501 ctx2.manifest()
1504 1502
1505 1503 if not parentworking:
1506 1504 def bad(f, msg):
1507 1505 # 'f' may be a directory pattern from 'match.files()',
1508 1506 # so 'f not in ctx1' is not enough
1509 1507 if f not in ctx1 and f not in ctx1.dirs():
1510 1508 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1511 1509 match.bad = bad
1512 1510
1513 1511 if working: # we need to scan the working dir
1514 1512 subrepos = []
1515 1513 if '.hgsub' in self.dirstate:
1516 1514 subrepos = ctx2.substate.keys()
1517 1515 s = self.dirstate.status(match, subrepos, listignored,
1518 1516 listclean, listunknown)
1519 1517 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1520 1518
1521 1519 # check for any possibly clean files
1522 1520 if parentworking and cmp:
1523 1521 fixup = []
1524 1522 # do a full compare of any files that might have changed
1525 1523 for f in sorted(cmp):
1526 1524 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1527 1525 or ctx1[f].cmp(ctx2[f])):
1528 1526 modified.append(f)
1529 1527 else:
1530 1528 fixup.append(f)
1531 1529
1532 1530 # update dirstate for files that are actually clean
1533 1531 if fixup:
1534 1532 if listclean:
1535 1533 clean += fixup
1536 1534
1537 1535 try:
1538 1536 # updating the dirstate is optional
1539 1537 # so we don't wait on the lock
1540 1538 wlock = self.wlock(False)
1541 1539 try:
1542 1540 for f in fixup:
1543 1541 self.dirstate.normal(f)
1544 1542 finally:
1545 1543 wlock.release()
1546 1544 except error.LockError:
1547 1545 pass
1548 1546
1549 1547 if not parentworking:
1550 1548 mf1 = mfmatches(ctx1)
1551 1549 if working:
1552 1550 # we are comparing working dir against non-parent
1553 1551 # generate a pseudo-manifest for the working dir
1554 1552 mf2 = mfmatches(self['.'])
1555 1553 for f in cmp + modified + added:
1556 1554 mf2[f] = None
1557 1555 mf2.set(f, ctx2.flags(f))
1558 1556 for f in removed:
1559 1557 if f in mf2:
1560 1558 del mf2[f]
1561 1559 else:
1562 1560 # we are comparing two revisions
1563 1561 deleted, unknown, ignored = [], [], []
1564 1562 mf2 = mfmatches(ctx2)
1565 1563
1566 1564 modified, added, clean = [], [], []
1567 1565 withflags = mf1.withflags() | mf2.withflags()
1568 1566 for fn in mf2:
1569 1567 if fn in mf1:
1570 1568 if (fn not in deleted and
1571 1569 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1572 1570 (mf1[fn] != mf2[fn] and
1573 1571 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1574 1572 modified.append(fn)
1575 1573 elif listclean:
1576 1574 clean.append(fn)
1577 1575 del mf1[fn]
1578 1576 elif fn not in deleted:
1579 1577 added.append(fn)
1580 1578 removed = mf1.keys()
1581 1579
1582 1580 if working and modified and not self.dirstate._checklink:
1583 1581 # Symlink placeholders may get non-symlink-like contents
1584 1582 # via user error or dereferencing by NFS or Samba servers,
1585 1583 # so we filter out any placeholders that don't look like a
1586 1584 # symlink
1587 1585 sane = []
1588 1586 for f in modified:
1589 1587 if ctx2.flags(f) == 'l':
1590 1588 d = ctx2[f].data()
1591 1589 if len(d) >= 1024 or '\n' in d or util.binary(d):
1592 1590 self.ui.debug('ignoring suspect symlink placeholder'
1593 1591 ' "%s"\n' % f)
1594 1592 continue
1595 1593 sane.append(f)
1596 1594 modified = sane
1597 1595
1598 1596 r = modified, added, removed, deleted, unknown, ignored, clean
1599 1597
1600 1598 if listsubrepos:
1601 1599 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1602 1600 if working:
1603 1601 rev2 = None
1604 1602 else:
1605 1603 rev2 = ctx2.substate[subpath][1]
1606 1604 try:
1607 1605 submatch = matchmod.narrowmatcher(subpath, match)
1608 1606 s = sub.status(rev2, match=submatch, ignored=listignored,
1609 1607 clean=listclean, unknown=listunknown,
1610 1608 listsubrepos=True)
1611 1609 for rfiles, sfiles in zip(r, s):
1612 1610 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1613 1611 except error.LookupError:
1614 1612 self.ui.status(_("skipping missing subrepository: %s\n")
1615 1613 % subpath)
1616 1614
1617 1615 for l in r:
1618 1616 l.sort()
1619 1617 return r
1620 1618
1621 1619 def heads(self, start=None):
1622 1620 heads = self.changelog.heads(start)
1623 1621 # sort the output in rev descending order
1624 1622 return sorted(heads, key=self.changelog.rev, reverse=True)
1625 1623
1626 1624 def branchheads(self, branch=None, start=None, closed=False):
1627 1625 '''return a (possibly filtered) list of heads for the given branch
1628 1626
1629 1627 Heads are returned in topological order, from newest to oldest.
1630 1628 If branch is None, use the dirstate branch.
1631 1629 If start is not None, return only heads reachable from start.
1632 1630 If closed is True, return heads that are marked as closed as well.
1633 1631 '''
1634 1632 if branch is None:
1635 1633 branch = self[None].branch()
1636 1634 branches = self.branchmap()
1637 1635 if branch not in branches:
1638 1636 return []
1639 1637 # the cache returns heads ordered lowest to highest
1640 1638 bheads = list(reversed(branches[branch]))
1641 1639 if start is not None:
1642 1640 # filter out the heads that cannot be reached from startrev
1643 1641 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1644 1642 bheads = [h for h in bheads if h in fbheads]
1645 1643 if not closed:
1646 1644 bheads = [h for h in bheads if not self[h].closesbranch()]
1647 1645 return bheads
1648 1646
1649 1647 def branches(self, nodes):
1650 1648 if not nodes:
1651 1649 nodes = [self.changelog.tip()]
1652 1650 b = []
1653 1651 for n in nodes:
1654 1652 t = n
1655 1653 while True:
1656 1654 p = self.changelog.parents(n)
1657 1655 if p[1] != nullid or p[0] == nullid:
1658 1656 b.append((t, n, p[0], p[1]))
1659 1657 break
1660 1658 n = p[0]
1661 1659 return b
1662 1660
1663 1661 def between(self, pairs):
1664 1662 r = []
1665 1663
1666 1664 for top, bottom in pairs:
1667 1665 n, l, i = top, [], 0
1668 1666 f = 1
1669 1667
1670 1668 while n != bottom and n != nullid:
1671 1669 p = self.changelog.parents(n)[0]
1672 1670 if i == f:
1673 1671 l.append(n)
1674 1672 f = f * 2
1675 1673 n = p
1676 1674 i += 1
1677 1675
1678 1676 r.append(l)
1679 1677
1680 1678 return r
1681 1679
1682 1680 def pull(self, remote, heads=None, force=False):
1683 1681 # don't open transaction for nothing or you break future useful
1684 1682 # rollback call
1685 1683 tr = None
1686 1684 trname = 'pull\n' + util.hidepassword(remote.url())
1687 1685 lock = self.lock()
1688 1686 try:
1689 1687 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1690 1688 force=force)
1691 1689 common, fetch, rheads = tmp
1692 1690 if not fetch:
1693 1691 self.ui.status(_("no changes found\n"))
1694 1692 added = []
1695 1693 result = 0
1696 1694 else:
1697 1695 tr = self.transaction(trname)
1698 1696 if heads is None and list(common) == [nullid]:
1699 1697 self.ui.status(_("requesting all changes\n"))
1700 1698 elif heads is None and remote.capable('changegroupsubset'):
1701 1699 # issue1320, avoid a race if remote changed after discovery
1702 1700 heads = rheads
1703 1701
1704 1702 if remote.capable('getbundle'):
1705 1703 cg = remote.getbundle('pull', common=common,
1706 1704 heads=heads or rheads)
1707 1705 elif heads is None:
1708 1706 cg = remote.changegroup(fetch, 'pull')
1709 1707 elif not remote.capable('changegroupsubset'):
1710 1708 raise util.Abort(_("partial pull cannot be done because "
1711 1709 "other repository doesn't support "
1712 1710 "changegroupsubset."))
1713 1711 else:
1714 1712 cg = remote.changegroupsubset(fetch, heads, 'pull')
1715 1713 clstart = len(self.changelog)
1716 1714 result = self.addchangegroup(cg, 'pull', remote.url())
1717 1715 clend = len(self.changelog)
1718 1716 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1719 1717
1720 1718 # compute target subset
1721 1719 if heads is None:
1722 1720 # We pulled every thing possible
1723 1721 # sync on everything common
1724 1722 subset = common + added
1725 1723 else:
1726 1724 # We pulled a specific subset
1727 1725 # sync on this subset
1728 1726 subset = heads
1729 1727
1730 1728 # Get remote phases data from remote
1731 1729 remotephases = remote.listkeys('phases')
1732 1730 publishing = bool(remotephases.get('publishing', False))
1733 1731 if remotephases and not publishing:
1734 1732 # remote is new and unpublishing
1735 1733 pheads, _dr = phases.analyzeremotephases(self, subset,
1736 1734 remotephases)
1737 1735 phases.advanceboundary(self, phases.public, pheads)
1738 1736 phases.advanceboundary(self, phases.draft, subset)
1739 1737 else:
1740 1738 # Remote is old or publishing all common changesets
1741 1739 # should be seen as public
1742 1740 phases.advanceboundary(self, phases.public, subset)
1743 1741
1744 1742 if obsolete._enabled:
1745 1743 self.ui.debug('fetching remote obsolete markers\n')
1746 1744 remoteobs = remote.listkeys('obsolete')
1747 1745 if 'dump0' in remoteobs:
1748 1746 if tr is None:
1749 1747 tr = self.transaction(trname)
1750 1748 for key in sorted(remoteobs, reverse=True):
1751 1749 if key.startswith('dump'):
1752 1750 data = base85.b85decode(remoteobs[key])
1753 1751 self.obsstore.mergemarkers(tr, data)
1754 1752 self.invalidatevolatilesets()
1755 1753 if tr is not None:
1756 1754 tr.close()
1757 1755 finally:
1758 1756 if tr is not None:
1759 1757 tr.release()
1760 1758 lock.release()
1761 1759
1762 1760 return result
1763 1761
1764 1762 def checkpush(self, force, revs):
1765 1763 """Extensions can override this function if additional checks have
1766 1764 to be performed before pushing, or call it if they override push
1767 1765 command.
1768 1766 """
1769 1767 pass
1770 1768
1771 1769 def push(self, remote, force=False, revs=None, newbranch=False):
1772 1770 '''Push outgoing changesets (limited by revs) from the current
1773 1771 repository to remote. Return an integer:
1774 1772 - None means nothing to push
1775 1773 - 0 means HTTP error
1776 1774 - 1 means we pushed and remote head count is unchanged *or*
1777 1775 we have outgoing changesets but refused to push
1778 1776 - other values as described by addchangegroup()
1779 1777 '''
1780 1778 # there are two ways to push to remote repo:
1781 1779 #
1782 1780 # addchangegroup assumes local user can lock remote
1783 1781 # repo (local filesystem, old ssh servers).
1784 1782 #
1785 1783 # unbundle assumes local user cannot lock remote repo (new ssh
1786 1784 # servers, http servers).
1787 1785
1788 1786 if not remote.canpush():
1789 1787 raise util.Abort(_("destination does not support push"))
1790 1788 unfi = self.unfiltered()
1791 1789 # get local lock as we might write phase data
1792 1790 locallock = self.lock()
1793 1791 try:
1794 1792 self.checkpush(force, revs)
1795 1793 lock = None
1796 1794 unbundle = remote.capable('unbundle')
1797 1795 if not unbundle:
1798 1796 lock = remote.lock()
1799 1797 try:
1800 1798 # discovery
1801 1799 fci = discovery.findcommonincoming
1802 1800 commoninc = fci(unfi, remote, force=force)
1803 1801 common, inc, remoteheads = commoninc
1804 1802 fco = discovery.findcommonoutgoing
1805 1803 outgoing = fco(unfi, remote, onlyheads=revs,
1806 1804 commoninc=commoninc, force=force)
1807 1805
1808 1806
1809 1807 if not outgoing.missing:
1810 1808 # nothing to push
1811 1809 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1812 1810 ret = None
1813 1811 else:
1814 1812 # something to push
1815 1813 if not force:
1816 1814 # if self.obsstore == False --> no obsolete
1817 1815 # then, save the iteration
1818 1816 if unfi.obsstore:
1819 1817 # this message are here for 80 char limit reason
1820 1818 mso = _("push includes obsolete changeset: %s!")
1821 1819 msu = _("push includes unstable changeset: %s!")
1822 1820 msb = _("push includes bumped changeset: %s!")
1823 1821 msd = _("push includes divergent changeset: %s!")
1824 1822 # If we are to push if there is at least one
1825 1823 # obsolete or unstable changeset in missing, at
1826 1824 # least one of the missinghead will be obsolete or
1827 1825 # unstable. So checking heads only is ok
1828 1826 for node in outgoing.missingheads:
1829 1827 ctx = unfi[node]
1830 1828 if ctx.obsolete():
1831 1829 raise util.Abort(mso % ctx)
1832 1830 elif ctx.unstable():
1833 1831 raise util.Abort(msu % ctx)
1834 1832 elif ctx.bumped():
1835 1833 raise util.Abort(msb % ctx)
1836 1834 elif ctx.divergent():
1837 1835 raise util.Abort(msd % ctx)
1838 1836 discovery.checkheads(unfi, remote, outgoing,
1839 1837 remoteheads, newbranch,
1840 1838 bool(inc))
1841 1839
1842 1840 # create a changegroup from local
1843 1841 if revs is None and not outgoing.excluded:
1844 1842 # push everything,
1845 1843 # use the fast path, no race possible on push
1846 1844 cg = self._changegroup(outgoing.missing, 'push')
1847 1845 else:
1848 1846 cg = self.getlocalbundle('push', outgoing)
1849 1847
1850 1848 # apply changegroup to remote
1851 1849 if unbundle:
1852 1850 # local repo finds heads on server, finds out what
1853 1851 # revs it must push. once revs transferred, if server
1854 1852 # finds it has different heads (someone else won
1855 1853 # commit/push race), server aborts.
1856 1854 if force:
1857 1855 remoteheads = ['force']
1858 1856 # ssh: return remote's addchangegroup()
1859 1857 # http: return remote's addchangegroup() or 0 for error
1860 1858 ret = remote.unbundle(cg, remoteheads, 'push')
1861 1859 else:
1862 1860 # we return an integer indicating remote head count
1863 1861 # change
1864 1862 ret = remote.addchangegroup(cg, 'push', self.url())
1865 1863
1866 1864 if ret:
1867 1865 # push succeed, synchronize target of the push
1868 1866 cheads = outgoing.missingheads
1869 1867 elif revs is None:
1870 1868 # All out push fails. synchronize all common
1871 1869 cheads = outgoing.commonheads
1872 1870 else:
1873 1871 # I want cheads = heads(::missingheads and ::commonheads)
1874 1872 # (missingheads is revs with secret changeset filtered out)
1875 1873 #
1876 1874 # This can be expressed as:
1877 1875 # cheads = ( (missingheads and ::commonheads)
1878 1876 # + (commonheads and ::missingheads))"
1879 1877 # )
1880 1878 #
1881 1879 # while trying to push we already computed the following:
1882 1880 # common = (::commonheads)
1883 1881 # missing = ((commonheads::missingheads) - commonheads)
1884 1882 #
1885 1883 # We can pick:
1886 1884 # * missingheads part of common (::commonheads)
1887 1885 common = set(outgoing.common)
1888 1886 cheads = [node for node in revs if node in common]
1889 1887 # and
1890 1888 # * commonheads parents on missing
1891 1889 revset = unfi.set('%ln and parents(roots(%ln))',
1892 1890 outgoing.commonheads,
1893 1891 outgoing.missing)
1894 1892 cheads.extend(c.node() for c in revset)
1895 1893 # even when we don't push, exchanging phase data is useful
1896 1894 remotephases = remote.listkeys('phases')
1897 1895 if not remotephases: # old server or public only repo
1898 1896 phases.advanceboundary(self, phases.public, cheads)
1899 1897 # don't push any phase data as there is nothing to push
1900 1898 else:
1901 1899 ana = phases.analyzeremotephases(self, cheads, remotephases)
1902 1900 pheads, droots = ana
1903 1901 ### Apply remote phase on local
1904 1902 if remotephases.get('publishing', False):
1905 1903 phases.advanceboundary(self, phases.public, cheads)
1906 1904 else: # publish = False
1907 1905 phases.advanceboundary(self, phases.public, pheads)
1908 1906 phases.advanceboundary(self, phases.draft, cheads)
1909 1907 ### Apply local phase on remote
1910 1908
1911 1909 # Get the list of all revs draft on remote by public here.
1912 1910 # XXX Beware that revset break if droots is not strictly
1913 1911 # XXX root we may want to ensure it is but it is costly
1914 1912 outdated = unfi.set('heads((%ln::%ln) and public())',
1915 1913 droots, cheads)
1916 1914 for newremotehead in outdated:
1917 1915 r = remote.pushkey('phases',
1918 1916 newremotehead.hex(),
1919 1917 str(phases.draft),
1920 1918 str(phases.public))
1921 1919 if not r:
1922 1920 self.ui.warn(_('updating %s to public failed!\n')
1923 1921 % newremotehead)
1924 1922 self.ui.debug('try to push obsolete markers to remote\n')
1925 1923 if (obsolete._enabled and self.obsstore and
1926 1924 'obsolete' in remote.listkeys('namespaces')):
1927 1925 rslts = []
1928 1926 remotedata = self.listkeys('obsolete')
1929 1927 for key in sorted(remotedata, reverse=True):
1930 1928 # reverse sort to ensure we end with dump0
1931 1929 data = remotedata[key]
1932 1930 rslts.append(remote.pushkey('obsolete', key, '', data))
1933 1931 if [r for r in rslts if not r]:
1934 1932 msg = _('failed to push some obsolete markers!\n')
1935 1933 self.ui.warn(msg)
1936 1934 finally:
1937 1935 if lock is not None:
1938 1936 lock.release()
1939 1937 finally:
1940 1938 locallock.release()
1941 1939
1942 1940 self.ui.debug("checking for updated bookmarks\n")
1943 1941 rb = remote.listkeys('bookmarks')
1944 1942 for k in rb.keys():
1945 1943 if k in unfi._bookmarks:
1946 1944 nr, nl = rb[k], hex(self._bookmarks[k])
1947 1945 if nr in unfi:
1948 1946 cr = unfi[nr]
1949 1947 cl = unfi[nl]
1950 1948 if bookmarks.validdest(unfi, cr, cl):
1951 1949 r = remote.pushkey('bookmarks', k, nr, nl)
1952 1950 if r:
1953 1951 self.ui.status(_("updating bookmark %s\n") % k)
1954 1952 else:
1955 1953 self.ui.warn(_('updating bookmark %s'
1956 1954 ' failed!\n') % k)
1957 1955
1958 1956 return ret
1959 1957
1960 1958 def changegroupinfo(self, nodes, source):
1961 1959 if self.ui.verbose or source == 'bundle':
1962 1960 self.ui.status(_("%d changesets found\n") % len(nodes))
1963 1961 if self.ui.debugflag:
1964 1962 self.ui.debug("list of changesets:\n")
1965 1963 for node in nodes:
1966 1964 self.ui.debug("%s\n" % hex(node))
1967 1965
1968 1966 def changegroupsubset(self, bases, heads, source):
1969 1967 """Compute a changegroup consisting of all the nodes that are
1970 1968 descendants of any of the bases and ancestors of any of the heads.
1971 1969 Return a chunkbuffer object whose read() method will return
1972 1970 successive changegroup chunks.
1973 1971
1974 1972 It is fairly complex as determining which filenodes and which
1975 1973 manifest nodes need to be included for the changeset to be complete
1976 1974 is non-trivial.
1977 1975
1978 1976 Another wrinkle is doing the reverse, figuring out which changeset in
1979 1977 the changegroup a particular filenode or manifestnode belongs to.
1980 1978 """
1981 1979 cl = self.changelog
1982 1980 if not bases:
1983 1981 bases = [nullid]
1984 1982 csets, bases, heads = cl.nodesbetween(bases, heads)
1985 1983 # We assume that all ancestors of bases are known
1986 1984 common = cl.ancestors([cl.rev(n) for n in bases])
1987 1985 return self._changegroupsubset(common, csets, heads, source)
1988 1986
1989 1987 def getlocalbundle(self, source, outgoing):
1990 1988 """Like getbundle, but taking a discovery.outgoing as an argument.
1991 1989
1992 1990 This is only implemented for local repos and reuses potentially
1993 1991 precomputed sets in outgoing."""
1994 1992 if not outgoing.missing:
1995 1993 return None
1996 1994 return self._changegroupsubset(outgoing.common,
1997 1995 outgoing.missing,
1998 1996 outgoing.missingheads,
1999 1997 source)
2000 1998
2001 1999 def getbundle(self, source, heads=None, common=None):
2002 2000 """Like changegroupsubset, but returns the set difference between the
2003 2001 ancestors of heads and the ancestors common.
2004 2002
2005 2003 If heads is None, use the local heads. If common is None, use [nullid].
2006 2004
2007 2005 The nodes in common might not all be known locally due to the way the
2008 2006 current discovery protocol works.
2009 2007 """
2010 2008 cl = self.changelog
2011 2009 if common:
2012 2010 hasnode = cl.hasnode
2013 2011 common = [n for n in common if hasnode(n)]
2014 2012 else:
2015 2013 common = [nullid]
2016 2014 if not heads:
2017 2015 heads = cl.heads()
2018 2016 return self.getlocalbundle(source,
2019 2017 discovery.outgoing(cl, common, heads))
2020 2018
2021 2019 @unfilteredmethod
2022 2020 def _changegroupsubset(self, commonrevs, csets, heads, source):
2023 2021
2024 2022 cl = self.changelog
2025 2023 mf = self.manifest
2026 2024 mfs = {} # needed manifests
2027 2025 fnodes = {} # needed file nodes
2028 2026 changedfiles = set()
2029 2027 fstate = ['', {}]
2030 2028 count = [0, 0]
2031 2029
2032 2030 # can we go through the fast path ?
2033 2031 heads.sort()
2034 2032 if heads == sorted(self.heads()):
2035 2033 return self._changegroup(csets, source)
2036 2034
2037 2035 # slow path
2038 2036 self.hook('preoutgoing', throw=True, source=source)
2039 2037 self.changegroupinfo(csets, source)
2040 2038
2041 2039 # filter any nodes that claim to be part of the known set
2042 2040 def prune(revlog, missing):
2043 2041 rr, rl = revlog.rev, revlog.linkrev
2044 2042 return [n for n in missing
2045 2043 if rl(rr(n)) not in commonrevs]
2046 2044
2047 2045 progress = self.ui.progress
2048 2046 _bundling = _('bundling')
2049 2047 _changesets = _('changesets')
2050 2048 _manifests = _('manifests')
2051 2049 _files = _('files')
2052 2050
2053 2051 def lookup(revlog, x):
2054 2052 if revlog == cl:
2055 2053 c = cl.read(x)
2056 2054 changedfiles.update(c[3])
2057 2055 mfs.setdefault(c[0], x)
2058 2056 count[0] += 1
2059 2057 progress(_bundling, count[0],
2060 2058 unit=_changesets, total=count[1])
2061 2059 return x
2062 2060 elif revlog == mf:
2063 2061 clnode = mfs[x]
2064 2062 mdata = mf.readfast(x)
2065 2063 for f, n in mdata.iteritems():
2066 2064 if f in changedfiles:
2067 2065 fnodes[f].setdefault(n, clnode)
2068 2066 count[0] += 1
2069 2067 progress(_bundling, count[0],
2070 2068 unit=_manifests, total=count[1])
2071 2069 return clnode
2072 2070 else:
2073 2071 progress(_bundling, count[0], item=fstate[0],
2074 2072 unit=_files, total=count[1])
2075 2073 return fstate[1][x]
2076 2074
2077 2075 bundler = changegroup.bundle10(lookup)
2078 2076 reorder = self.ui.config('bundle', 'reorder', 'auto')
2079 2077 if reorder == 'auto':
2080 2078 reorder = None
2081 2079 else:
2082 2080 reorder = util.parsebool(reorder)
2083 2081
2084 2082 def gengroup():
2085 2083 # Create a changenode group generator that will call our functions
2086 2084 # back to lookup the owning changenode and collect information.
2087 2085 count[:] = [0, len(csets)]
2088 2086 for chunk in cl.group(csets, bundler, reorder=reorder):
2089 2087 yield chunk
2090 2088 progress(_bundling, None)
2091 2089
2092 2090 # Create a generator for the manifestnodes that calls our lookup
2093 2091 # and data collection functions back.
2094 2092 for f in changedfiles:
2095 2093 fnodes[f] = {}
2096 2094 count[:] = [0, len(mfs)]
2097 2095 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2098 2096 yield chunk
2099 2097 progress(_bundling, None)
2100 2098
2101 2099 mfs.clear()
2102 2100
2103 2101 # Go through all our files in order sorted by name.
2104 2102 count[:] = [0, len(changedfiles)]
2105 2103 for fname in sorted(changedfiles):
2106 2104 filerevlog = self.file(fname)
2107 2105 if not len(filerevlog):
2108 2106 raise util.Abort(_("empty or missing revlog for %s")
2109 2107 % fname)
2110 2108 fstate[0] = fname
2111 2109 fstate[1] = fnodes.pop(fname, {})
2112 2110
2113 2111 nodelist = prune(filerevlog, fstate[1])
2114 2112 if nodelist:
2115 2113 count[0] += 1
2116 2114 yield bundler.fileheader(fname)
2117 2115 for chunk in filerevlog.group(nodelist, bundler, reorder):
2118 2116 yield chunk
2119 2117
2120 2118 # Signal that no more groups are left.
2121 2119 yield bundler.close()
2122 2120 progress(_bundling, None)
2123 2121
2124 2122 if csets:
2125 2123 self.hook('outgoing', node=hex(csets[0]), source=source)
2126 2124
2127 2125 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2128 2126
2129 2127 def changegroup(self, basenodes, source):
2130 2128 # to avoid a race we use changegroupsubset() (issue1320)
2131 2129 return self.changegroupsubset(basenodes, self.heads(), source)
2132 2130
2133 2131 @unfilteredmethod
2134 2132 def _changegroup(self, nodes, source):
2135 2133 """Compute the changegroup of all nodes that we have that a recipient
2136 2134 doesn't. Return a chunkbuffer object whose read() method will return
2137 2135 successive changegroup chunks.
2138 2136
2139 2137 This is much easier than the previous function as we can assume that
2140 2138 the recipient has any changenode we aren't sending them.
2141 2139
2142 2140 nodes is the set of nodes to send"""
2143 2141
2144 2142 cl = self.changelog
2145 2143 mf = self.manifest
2146 2144 mfs = {}
2147 2145 changedfiles = set()
2148 2146 fstate = ['']
2149 2147 count = [0, 0]
2150 2148
2151 2149 self.hook('preoutgoing', throw=True, source=source)
2152 2150 self.changegroupinfo(nodes, source)
2153 2151
2154 2152 revset = set([cl.rev(n) for n in nodes])
2155 2153
2156 2154 def gennodelst(log):
2157 2155 ln, llr = log.node, log.linkrev
2158 2156 return [ln(r) for r in log if llr(r) in revset]
2159 2157
2160 2158 progress = self.ui.progress
2161 2159 _bundling = _('bundling')
2162 2160 _changesets = _('changesets')
2163 2161 _manifests = _('manifests')
2164 2162 _files = _('files')
2165 2163
2166 2164 def lookup(revlog, x):
2167 2165 if revlog == cl:
2168 2166 c = cl.read(x)
2169 2167 changedfiles.update(c[3])
2170 2168 mfs.setdefault(c[0], x)
2171 2169 count[0] += 1
2172 2170 progress(_bundling, count[0],
2173 2171 unit=_changesets, total=count[1])
2174 2172 return x
2175 2173 elif revlog == mf:
2176 2174 count[0] += 1
2177 2175 progress(_bundling, count[0],
2178 2176 unit=_manifests, total=count[1])
2179 2177 return cl.node(revlog.linkrev(revlog.rev(x)))
2180 2178 else:
2181 2179 progress(_bundling, count[0], item=fstate[0],
2182 2180 total=count[1], unit=_files)
2183 2181 return cl.node(revlog.linkrev(revlog.rev(x)))
2184 2182
2185 2183 bundler = changegroup.bundle10(lookup)
2186 2184 reorder = self.ui.config('bundle', 'reorder', 'auto')
2187 2185 if reorder == 'auto':
2188 2186 reorder = None
2189 2187 else:
2190 2188 reorder = util.parsebool(reorder)
2191 2189
2192 2190 def gengroup():
2193 2191 '''yield a sequence of changegroup chunks (strings)'''
2194 2192 # construct a list of all changed files
2195 2193
2196 2194 count[:] = [0, len(nodes)]
2197 2195 for chunk in cl.group(nodes, bundler, reorder=reorder):
2198 2196 yield chunk
2199 2197 progress(_bundling, None)
2200 2198
2201 2199 count[:] = [0, len(mfs)]
2202 2200 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2203 2201 yield chunk
2204 2202 progress(_bundling, None)
2205 2203
2206 2204 count[:] = [0, len(changedfiles)]
2207 2205 for fname in sorted(changedfiles):
2208 2206 filerevlog = self.file(fname)
2209 2207 if not len(filerevlog):
2210 2208 raise util.Abort(_("empty or missing revlog for %s")
2211 2209 % fname)
2212 2210 fstate[0] = fname
2213 2211 nodelist = gennodelst(filerevlog)
2214 2212 if nodelist:
2215 2213 count[0] += 1
2216 2214 yield bundler.fileheader(fname)
2217 2215 for chunk in filerevlog.group(nodelist, bundler, reorder):
2218 2216 yield chunk
2219 2217 yield bundler.close()
2220 2218 progress(_bundling, None)
2221 2219
2222 2220 if nodes:
2223 2221 self.hook('outgoing', node=hex(nodes[0]), source=source)
2224 2222
2225 2223 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2226 2224
2227 2225 @unfilteredmethod
2228 2226 def addchangegroup(self, source, srctype, url, emptyok=False):
2229 2227 """Add the changegroup returned by source.read() to this repo.
2230 2228 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2231 2229 the URL of the repo where this changegroup is coming from.
2232 2230
2233 2231 Return an integer summarizing the change to this repo:
2234 2232 - nothing changed or no source: 0
2235 2233 - more heads than before: 1+added heads (2..n)
2236 2234 - fewer heads than before: -1-removed heads (-2..-n)
2237 2235 - number of heads stays the same: 1
2238 2236 """
2239 2237 def csmap(x):
2240 2238 self.ui.debug("add changeset %s\n" % short(x))
2241 2239 return len(cl)
2242 2240
2243 2241 def revmap(x):
2244 2242 return cl.rev(x)
2245 2243
2246 2244 if not source:
2247 2245 return 0
2248 2246
2249 2247 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2250 2248
2251 2249 changesets = files = revisions = 0
2252 2250 efiles = set()
2253 2251
2254 2252 # write changelog data to temp files so concurrent readers will not see
2255 2253 # inconsistent view
2256 2254 cl = self.changelog
2257 2255 cl.delayupdate()
2258 2256 oldheads = cl.heads()
2259 2257
2260 2258 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2261 2259 try:
2262 2260 trp = weakref.proxy(tr)
2263 2261 # pull off the changeset group
2264 2262 self.ui.status(_("adding changesets\n"))
2265 2263 clstart = len(cl)
2266 2264 class prog(object):
2267 2265 step = _('changesets')
2268 2266 count = 1
2269 2267 ui = self.ui
2270 2268 total = None
2271 2269 def __call__(self):
2272 2270 self.ui.progress(self.step, self.count, unit=_('chunks'),
2273 2271 total=self.total)
2274 2272 self.count += 1
2275 2273 pr = prog()
2276 2274 source.callback = pr
2277 2275
2278 2276 source.changelogheader()
2279 2277 srccontent = cl.addgroup(source, csmap, trp)
2280 2278 if not (srccontent or emptyok):
2281 2279 raise util.Abort(_("received changelog group is empty"))
2282 2280 clend = len(cl)
2283 2281 changesets = clend - clstart
2284 2282 for c in xrange(clstart, clend):
2285 2283 efiles.update(self[c].files())
2286 2284 efiles = len(efiles)
2287 2285 self.ui.progress(_('changesets'), None)
2288 2286
2289 2287 # pull off the manifest group
2290 2288 self.ui.status(_("adding manifests\n"))
2291 2289 pr.step = _('manifests')
2292 2290 pr.count = 1
2293 2291 pr.total = changesets # manifests <= changesets
2294 2292 # no need to check for empty manifest group here:
2295 2293 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2296 2294 # no new manifest will be created and the manifest group will
2297 2295 # be empty during the pull
2298 2296 source.manifestheader()
2299 2297 self.manifest.addgroup(source, revmap, trp)
2300 2298 self.ui.progress(_('manifests'), None)
2301 2299
2302 2300 needfiles = {}
2303 2301 if self.ui.configbool('server', 'validate', default=False):
2304 2302 # validate incoming csets have their manifests
2305 2303 for cset in xrange(clstart, clend):
2306 2304 mfest = self.changelog.read(self.changelog.node(cset))[0]
2307 2305 mfest = self.manifest.readdelta(mfest)
2308 2306 # store file nodes we must see
2309 2307 for f, n in mfest.iteritems():
2310 2308 needfiles.setdefault(f, set()).add(n)
2311 2309
2312 2310 # process the files
2313 2311 self.ui.status(_("adding file changes\n"))
2314 2312 pr.step = _('files')
2315 2313 pr.count = 1
2316 2314 pr.total = efiles
2317 2315 source.callback = None
2318 2316
2319 2317 while True:
2320 2318 chunkdata = source.filelogheader()
2321 2319 if not chunkdata:
2322 2320 break
2323 2321 f = chunkdata["filename"]
2324 2322 self.ui.debug("adding %s revisions\n" % f)
2325 2323 pr()
2326 2324 fl = self.file(f)
2327 2325 o = len(fl)
2328 2326 if not fl.addgroup(source, revmap, trp):
2329 2327 raise util.Abort(_("received file revlog group is empty"))
2330 2328 revisions += len(fl) - o
2331 2329 files += 1
2332 2330 if f in needfiles:
2333 2331 needs = needfiles[f]
2334 2332 for new in xrange(o, len(fl)):
2335 2333 n = fl.node(new)
2336 2334 if n in needs:
2337 2335 needs.remove(n)
2338 2336 if not needs:
2339 2337 del needfiles[f]
2340 2338 self.ui.progress(_('files'), None)
2341 2339
2342 2340 for f, needs in needfiles.iteritems():
2343 2341 fl = self.file(f)
2344 2342 for n in needs:
2345 2343 try:
2346 2344 fl.rev(n)
2347 2345 except error.LookupError:
2348 2346 raise util.Abort(
2349 2347 _('missing file data for %s:%s - run hg verify') %
2350 2348 (f, hex(n)))
2351 2349
2352 2350 dh = 0
2353 2351 if oldheads:
2354 2352 heads = cl.heads()
2355 2353 dh = len(heads) - len(oldheads)
2356 2354 for h in heads:
2357 2355 if h not in oldheads and self[h].closesbranch():
2358 2356 dh -= 1
2359 2357 htext = ""
2360 2358 if dh:
2361 2359 htext = _(" (%+d heads)") % dh
2362 2360
2363 2361 self.ui.status(_("added %d changesets"
2364 2362 " with %d changes to %d files%s\n")
2365 2363 % (changesets, revisions, files, htext))
2366 2364 self.invalidatevolatilesets()
2367 2365
2368 2366 if changesets > 0:
2369 2367 p = lambda: cl.writepending() and self.root or ""
2370 2368 self.hook('pretxnchangegroup', throw=True,
2371 2369 node=hex(cl.node(clstart)), source=srctype,
2372 2370 url=url, pending=p)
2373 2371
2374 2372 added = [cl.node(r) for r in xrange(clstart, clend)]
2375 2373 publishing = self.ui.configbool('phases', 'publish', True)
2376 2374 if srctype == 'push':
2377 2375 # Old server can not push the boundary themself.
2378 2376 # New server won't push the boundary if changeset already
2379 2377 # existed locally as secrete
2380 2378 #
2381 2379 # We should not use added here but the list of all change in
2382 2380 # the bundle
2383 2381 if publishing:
2384 2382 phases.advanceboundary(self, phases.public, srccontent)
2385 2383 else:
2386 2384 phases.advanceboundary(self, phases.draft, srccontent)
2387 2385 phases.retractboundary(self, phases.draft, added)
2388 2386 elif srctype != 'strip':
2389 2387 # publishing only alter behavior during push
2390 2388 #
2391 2389 # strip should not touch boundary at all
2392 2390 phases.retractboundary(self, phases.draft, added)
2393 2391
2394 2392 # make changelog see real files again
2395 2393 cl.finalize(trp)
2396 2394
2397 2395 tr.close()
2398 2396
2399 2397 if changesets > 0:
2400 2398 branchmap.updatecache(self)
2401 2399 def runhooks():
2402 2400 # forcefully update the on-disk branch cache
2403 2401 self.ui.debug("updating the branch cache\n")
2404 2402 self.hook("changegroup", node=hex(cl.node(clstart)),
2405 2403 source=srctype, url=url)
2406 2404
2407 2405 for n in added:
2408 2406 self.hook("incoming", node=hex(n), source=srctype,
2409 2407 url=url)
2410 2408 self._afterlock(runhooks)
2411 2409
2412 2410 finally:
2413 2411 tr.release()
2414 2412 # never return 0 here:
2415 2413 if dh < 0:
2416 2414 return dh - 1
2417 2415 else:
2418 2416 return dh + 1
2419 2417
2420 2418 def stream_in(self, remote, requirements):
2421 2419 lock = self.lock()
2422 2420 try:
2423 2421 # Save remote branchmap. We will use it later
2424 2422 # to speed up branchcache creation
2425 2423 rbranchmap = None
2426 2424 if remote.capable("branchmap"):
2427 2425 rbranchmap = remote.branchmap()
2428 2426
2429 2427 fp = remote.stream_out()
2430 2428 l = fp.readline()
2431 2429 try:
2432 2430 resp = int(l)
2433 2431 except ValueError:
2434 2432 raise error.ResponseError(
2435 2433 _('unexpected response from remote server:'), l)
2436 2434 if resp == 1:
2437 2435 raise util.Abort(_('operation forbidden by server'))
2438 2436 elif resp == 2:
2439 2437 raise util.Abort(_('locking the remote repository failed'))
2440 2438 elif resp != 0:
2441 2439 raise util.Abort(_('the server sent an unknown error code'))
2442 2440 self.ui.status(_('streaming all changes\n'))
2443 2441 l = fp.readline()
2444 2442 try:
2445 2443 total_files, total_bytes = map(int, l.split(' ', 1))
2446 2444 except (ValueError, TypeError):
2447 2445 raise error.ResponseError(
2448 2446 _('unexpected response from remote server:'), l)
2449 2447 self.ui.status(_('%d files to transfer, %s of data\n') %
2450 2448 (total_files, util.bytecount(total_bytes)))
2451 2449 handled_bytes = 0
2452 2450 self.ui.progress(_('clone'), 0, total=total_bytes)
2453 2451 start = time.time()
2454 2452 for i in xrange(total_files):
2455 2453 # XXX doesn't support '\n' or '\r' in filenames
2456 2454 l = fp.readline()
2457 2455 try:
2458 2456 name, size = l.split('\0', 1)
2459 2457 size = int(size)
2460 2458 except (ValueError, TypeError):
2461 2459 raise error.ResponseError(
2462 2460 _('unexpected response from remote server:'), l)
2463 2461 if self.ui.debugflag:
2464 2462 self.ui.debug('adding %s (%s)\n' %
2465 2463 (name, util.bytecount(size)))
2466 2464 # for backwards compat, name was partially encoded
2467 2465 ofp = self.sopener(store.decodedir(name), 'w')
2468 2466 for chunk in util.filechunkiter(fp, limit=size):
2469 2467 handled_bytes += len(chunk)
2470 2468 self.ui.progress(_('clone'), handled_bytes,
2471 2469 total=total_bytes)
2472 2470 ofp.write(chunk)
2473 2471 ofp.close()
2474 2472 elapsed = time.time() - start
2475 2473 if elapsed <= 0:
2476 2474 elapsed = 0.001
2477 2475 self.ui.progress(_('clone'), None)
2478 2476 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2479 2477 (util.bytecount(total_bytes), elapsed,
2480 2478 util.bytecount(total_bytes / elapsed)))
2481 2479
2482 2480 # new requirements = old non-format requirements +
2483 2481 # new format-related
2484 2482 # requirements from the streamed-in repository
2485 2483 requirements.update(set(self.requirements) - self.supportedformats)
2486 2484 self._applyrequirements(requirements)
2487 2485 self._writerequirements()
2488 2486
2489 2487 if rbranchmap:
2490 2488 rbheads = []
2491 2489 for bheads in rbranchmap.itervalues():
2492 2490 rbheads.extend(bheads)
2493 2491
2494 2492 if rbheads:
2495 2493 rtiprev = max((int(self.changelog.rev(node))
2496 2494 for node in rbheads))
2497 2495 cache = branchmap.branchcache(rbranchmap,
2498 2496 self[rtiprev].node(),
2499 2497 rtiprev)
2500 2498 self._branchcache = cache
2501 2499 cache.write(self)
2502 2500 self.invalidate()
2503 2501 return len(self.heads()) + 1
2504 2502 finally:
2505 2503 lock.release()
2506 2504
2507 2505 def clone(self, remote, heads=[], stream=False):
2508 2506 '''clone remote repository.
2509 2507
2510 2508 keyword arguments:
2511 2509 heads: list of revs to clone (forces use of pull)
2512 2510 stream: use streaming clone if possible'''
2513 2511
2514 2512 # now, all clients that can request uncompressed clones can
2515 2513 # read repo formats supported by all servers that can serve
2516 2514 # them.
2517 2515
2518 2516 # if revlog format changes, client will have to check version
2519 2517 # and format flags on "stream" capability, and use
2520 2518 # uncompressed only if compatible.
2521 2519
2522 2520 if not stream:
2523 2521 # if the server explicitly prefers to stream (for fast LANs)
2524 2522 stream = remote.capable('stream-preferred')
2525 2523
2526 2524 if stream and not heads:
2527 2525 # 'stream' means remote revlog format is revlogv1 only
2528 2526 if remote.capable('stream'):
2529 2527 return self.stream_in(remote, set(('revlogv1',)))
2530 2528 # otherwise, 'streamreqs' contains the remote revlog format
2531 2529 streamreqs = remote.capable('streamreqs')
2532 2530 if streamreqs:
2533 2531 streamreqs = set(streamreqs.split(','))
2534 2532 # if we support it, stream in and adjust our requirements
2535 2533 if not streamreqs - self.supportedformats:
2536 2534 return self.stream_in(remote, streamreqs)
2537 2535 return self.pull(remote, heads)
2538 2536
2539 2537 def pushkey(self, namespace, key, old, new):
2540 2538 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2541 2539 old=old, new=new)
2542 2540 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2543 2541 ret = pushkey.push(self, namespace, key, old, new)
2544 2542 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2545 2543 ret=ret)
2546 2544 return ret
2547 2545
2548 2546 def listkeys(self, namespace):
2549 2547 self.hook('prelistkeys', throw=True, namespace=namespace)
2550 2548 self.ui.debug('listing keys for "%s"\n' % namespace)
2551 2549 values = pushkey.list(self, namespace)
2552 2550 self.hook('listkeys', namespace=namespace, values=values)
2553 2551 return values
2554 2552
2555 2553 def debugwireargs(self, one, two, three=None, four=None, five=None):
2556 2554 '''used to test argument passing over the wire'''
2557 2555 return "%s %s %s %s %s" % (one, two, three, four, five)
2558 2556
2559 2557 def savecommitmessage(self, text):
2560 2558 fp = self.opener('last-message.txt', 'wb')
2561 2559 try:
2562 2560 fp.write(text)
2563 2561 finally:
2564 2562 fp.close()
2565 2563 return self.pathto(fp.name[len(self.root) + 1:])
2566 2564
2567 2565 # used to avoid circular references so destructors work
2568 2566 def aftertrans(files):
2569 2567 renamefiles = [tuple(t) for t in files]
2570 2568 def a():
2571 2569 for src, dest in renamefiles:
2572 2570 try:
2573 2571 util.rename(src, dest)
2574 2572 except OSError: # journal file does not yet exist
2575 2573 pass
2576 2574 return a
2577 2575
2578 2576 def undoname(fn):
2579 2577 base, name = os.path.split(fn)
2580 2578 assert name.startswith('journal')
2581 2579 return os.path.join(base, name.replace('journal', 'undo', 1))
2582 2580
2583 2581 def instance(ui, path, create):
2584 2582 return localrepository(ui, util.urllocalpath(path), create)
2585 2583
2586 2584 def islocal(path):
2587 2585 return True
General Comments 0
You need to be logged in to leave comments. Login now