##// END OF EJS Templates
branchmap: extract _updatebranchcache from repo
Pierre-Yves David -
r18120:88990d3e default
parent child Browse files
Show More
@@ -1,52 +1,113 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev
9 9 import encoding
10 10
11 11 def read(repo):
12 12 partial = {}
13 13 try:
14 14 f = repo.opener("cache/branchheads")
15 15 lines = f.read().split('\n')
16 16 f.close()
17 17 except (IOError, OSError):
18 18 return {}, nullid, nullrev
19 19
20 20 try:
21 21 last, lrev = lines.pop(0).split(" ", 1)
22 22 last, lrev = bin(last), int(lrev)
23 23 if lrev >= len(repo) or repo[lrev].node() != last:
24 24 # invalidate the cache
25 25 raise ValueError('invalidating branch cache (tip differs)')
26 26 for l in lines:
27 27 if not l:
28 28 continue
29 29 node, label = l.split(" ", 1)
30 30 label = encoding.tolocal(label.strip())
31 31 if not node in repo:
32 32 raise ValueError('invalidating branch cache because node '+
33 33 '%s does not exist' % node)
34 34 partial.setdefault(label, []).append(bin(node))
35 35 except KeyboardInterrupt:
36 36 raise
37 37 except Exception, inst:
38 38 if repo.ui.debugflag:
39 39 repo.ui.warn(str(inst), '\n')
40 40 partial, last, lrev = {}, nullid, nullrev
41 41 return partial, last, lrev
42 42
43 43 def write(repo, branches, tip, tiprev):
44 44 try:
45 45 f = repo.opener("cache/branchheads", "w", atomictemp=True)
46 46 f.write("%s %s\n" % (hex(tip), tiprev))
47 47 for label, nodes in branches.iteritems():
48 48 for node in nodes:
49 49 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
50 50 f.close()
51 51 except (IOError, OSError):
52 52 pass
53
54 def update(repo, partial, ctxgen):
55 """Given a branchhead cache, partial, that may have extra nodes or be
56 missing heads, and a generator of nodes that are at least a superset of
57 heads missing, this function updates partial to be correct.
58 """
59 # collect new branch entries
60 newbranches = {}
61 for c in ctxgen:
62 newbranches.setdefault(c.branch(), []).append(c.node())
63 # if older branchheads are reachable from new ones, they aren't
64 # really branchheads. Note checking parents is insufficient:
65 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
66 for branch, newnodes in newbranches.iteritems():
67 bheads = partial.setdefault(branch, [])
68 # Remove candidate heads that no longer are in the repo (e.g., as
69 # the result of a strip that just happened). Avoid using 'node in
70 # self' here because that dives down into branchcache code somewhat
71 # recursively.
72 bheadrevs = [repo.changelog.rev(node) for node in bheads
73 if repo.changelog.hasnode(node)]
74 newheadrevs = [repo.changelog.rev(node) for node in newnodes
75 if repo.changelog.hasnode(node)]
76 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
77 # Remove duplicates - nodes that are in newheadrevs and are already
78 # in bheadrevs. This can happen if you strip a node whose parent
79 # was already a head (because they're on different branches).
80 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
81
82 # Starting from tip means fewer passes over reachable. If we know
83 # the new candidates are not ancestors of existing heads, we don't
84 # have to examine ancestors of existing heads
85 if ctxisnew:
86 iterrevs = sorted(newheadrevs)
87 else:
88 iterrevs = list(bheadrevs)
89
90 # This loop prunes out two kinds of heads - heads that are
91 # superseded by a head in newheadrevs, and newheadrevs that are not
92 # heads because an existing head is their descendant.
93 while iterrevs:
94 latest = iterrevs.pop()
95 if latest not in bheadrevs:
96 continue
97 ancestors = set(repo.changelog.ancestors([latest],
98 bheadrevs[0]))
99 if ancestors:
100 bheadrevs = [b for b in bheadrevs if b not in ancestors]
101 partial[branch] = [repo.changelog.node(rev) for rev in bheadrevs]
102
103 # There may be branches that cease to exist when the last commit in the
104 # branch was stripped. This code filters them out. Note that the
105 # branch that ceased to exist may not be in newbranches because
106 # newbranches is the set of candidate heads, which when you strip the
107 # last commit in a branch will be the parent branch.
108 for branch in partial.keys():
109 nodes = [head for head in partial[branch]
110 if repo.changelog.hasnode(head)]
111 if not nodes:
112 del partial[branch]
113
@@ -1,346 +1,347 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
11 import branchmap
11 12
12 13 def findcommonincoming(repo, remote, heads=None, force=False):
13 14 """Return a tuple (common, anyincoming, heads) used to identify the common
14 15 subset of nodes between repo and remote.
15 16
16 17 "common" is a list of (at least) the heads of the common subset.
17 18 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 19 locally. If remote does not support getbundle, this actually is a list of
19 20 roots of the nodes that would be incoming, to be supplied to
20 21 changegroupsubset. No code except for pull should be relying on this fact
21 22 any longer.
22 23 "heads" is either the supplied heads, or else the remote's heads.
23 24
24 25 If you pass heads and they are all known locally, the response lists just
25 26 these heads in "common" and in "heads".
26 27
27 28 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 29 extensions a good hook into outgoing.
29 30 """
30 31
31 32 if not remote.capable('getbundle'):
32 33 return treediscovery.findcommonincoming(repo, remote, heads, force)
33 34
34 35 if heads:
35 36 allknown = True
36 37 nm = repo.changelog.nodemap
37 38 for h in heads:
38 39 if nm.get(h) is None:
39 40 allknown = False
40 41 break
41 42 if allknown:
42 43 return (heads, False, heads)
43 44
44 45 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 46 abortwhenunrelated=not force)
46 47 common, anyinc, srvheads = res
47 48 return (list(common), anyinc, heads or list(srvheads))
48 49
49 50 class outgoing(object):
50 51 '''Represents the set of nodes present in a local repo but not in a
51 52 (possibly) remote one.
52 53
53 54 Members:
54 55
55 56 missing is a list of all nodes present in local but not in remote.
56 57 common is a list of all nodes shared between the two repos.
57 58 excluded is the list of missing changeset that shouldn't be sent remotely.
58 59 missingheads is the list of heads of missing.
59 60 commonheads is the list of heads of common.
60 61
61 62 The sets are computed on demand from the heads, unless provided upfront
62 63 by discovery.'''
63 64
64 65 def __init__(self, revlog, commonheads, missingheads):
65 66 self.commonheads = commonheads
66 67 self.missingheads = missingheads
67 68 self._revlog = revlog
68 69 self._common = None
69 70 self._missing = None
70 71 self.excluded = []
71 72
72 73 def _computecommonmissing(self):
73 74 sets = self._revlog.findcommonmissing(self.commonheads,
74 75 self.missingheads)
75 76 self._common, self._missing = sets
76 77
77 78 @util.propertycache
78 79 def common(self):
79 80 if self._common is None:
80 81 self._computecommonmissing()
81 82 return self._common
82 83
83 84 @util.propertycache
84 85 def missing(self):
85 86 if self._missing is None:
86 87 self._computecommonmissing()
87 88 return self._missing
88 89
89 90 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 91 commoninc=None, portable=False):
91 92 '''Return an outgoing instance to identify the nodes present in repo but
92 93 not in other.
93 94
94 95 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 96 (inclusive) are included. If you already know the local repo's heads,
96 97 passing them in onlyheads is faster than letting them be recomputed here.
97 98
98 99 If commoninc is given, it must be the result of a prior call to
99 100 findcommonincoming(repo, other, force) to avoid recomputing it here.
100 101
101 102 If portable is given, compute more conservative common and missingheads,
102 103 to make bundles created from the instance more portable.'''
103 104 # declare an empty outgoing object to be filled later
104 105 og = outgoing(repo.changelog, None, None)
105 106
106 107 # get common set if not provided
107 108 if commoninc is None:
108 109 commoninc = findcommonincoming(repo, other, force=force)
109 110 og.commonheads, _any, _hds = commoninc
110 111
111 112 # compute outgoing
112 113 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
113 114 if not mayexclude:
114 115 og.missingheads = onlyheads or repo.heads()
115 116 elif onlyheads is None:
116 117 # use visible heads as it should be cached
117 118 og.missingheads = visibleheads(repo)
118 119 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
119 120 else:
120 121 # compute common, missing and exclude secret stuff
121 122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
122 123 og._common, allmissing = sets
123 124 og._missing = missing = []
124 125 og.excluded = excluded = []
125 126 for node in allmissing:
126 127 ctx = repo[node]
127 128 if ctx.phase() >= phases.secret or ctx.extinct():
128 129 excluded.append(node)
129 130 else:
130 131 missing.append(node)
131 132 if len(missing) == len(allmissing):
132 133 missingheads = onlyheads
133 134 else: # update missing heads
134 135 missingheads = phases.newheads(repo, onlyheads, excluded)
135 136 og.missingheads = missingheads
136 137 if portable:
137 138 # recompute common and missingheads as if -r<rev> had been given for
138 139 # each head of missing, and --base <rev> for each head of the proper
139 140 # ancestors of missing
140 141 og._computecommonmissing()
141 142 cl = repo.changelog
142 143 missingrevs = set(cl.rev(n) for n in og._missing)
143 144 og._common = set(cl.ancestors(missingrevs)) - missingrevs
144 145 commonheads = set(og.commonheads)
145 146 og.missingheads = [h for h in og.missingheads if h not in commonheads]
146 147
147 148 return og
148 149
149 150 def _headssummary(repo, remote, outgoing):
150 151 """compute a summary of branch and heads status before and after push
151 152
152 153 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
153 154
154 155 - branch: the branch name
155 156 - remoteheads: the list of remote heads known locally
156 157 None is the branch is new
157 158 - newheads: the new remote heads (known locally) with outgoing pushed
158 159 - unsyncedheads: the list of remote heads unknown locally.
159 160 """
160 161 cl = repo.changelog
161 162 headssum = {}
162 163 # A. Create set of branches involved in the push.
163 164 branches = set(repo[n].branch() for n in outgoing.missing)
164 165 remotemap = remote.branchmap()
165 166 newbranches = branches - set(remotemap)
166 167 branches.difference_update(newbranches)
167 168
168 169 # A. register remote heads
169 170 remotebranches = set()
170 171 for branch, heads in remote.branchmap().iteritems():
171 172 remotebranches.add(branch)
172 173 known = []
173 174 unsynced = []
174 175 for h in heads:
175 176 if h in cl.nodemap:
176 177 known.append(h)
177 178 else:
178 179 unsynced.append(h)
179 180 headssum[branch] = (known, list(known), unsynced)
180 181 # B. add new branch data
181 182 missingctx = list(repo[n] for n in outgoing.missing)
182 183 touchedbranches = set()
183 184 for ctx in missingctx:
184 185 branch = ctx.branch()
185 186 touchedbranches.add(branch)
186 187 if branch not in headssum:
187 188 headssum[branch] = (None, [], [])
188 189
189 190 # C drop data about untouched branches:
190 191 for branch in remotebranches - touchedbranches:
191 192 del headssum[branch]
192 193
193 194 # D. Update newmap with outgoing changes.
194 195 # This will possibly add new heads and remove existing ones.
195 196 newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
196 197 if heads[0] is not None)
197 repo._updatebranchcache(newmap, missingctx)
198 branchmap.update(repo, newmap, missingctx)
198 199 for branch, newheads in newmap.iteritems():
199 200 headssum[branch][1][:] = newheads
200 201 return headssum
201 202
202 203 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
203 204 """Compute branchmapsummary for repo without branchmap support"""
204 205
205 206 cl = repo.changelog
206 207 # 1-4b. old servers: Check for new topological heads.
207 208 # Construct {old,new}map with branch = None (topological branch).
208 # (code based on _updatebranchcache)
209 # (code based on update)
209 210 oldheads = set(h for h in remoteheads if h in cl.nodemap)
210 211 # all nodes in outgoing.missing are children of either:
211 212 # - an element of oldheads
212 213 # - another element of outgoing.missing
213 214 # - nullrev
214 215 # This explains why the new head are very simple to compute.
215 216 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
216 217 newheads = list(c.node() for c in r)
217 218 unsynced = inc and set([None]) or set()
218 219 return {None: (oldheads, newheads, unsynced)}
219 220
220 221 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
221 222 """Check that a push won't add any outgoing head
222 223
223 224 raise Abort error and display ui message as needed.
224 225 """
225 226 # Check for each named branch if we're creating new remote heads.
226 227 # To be a remote head after push, node must be either:
227 228 # - unknown locally
228 229 # - a local outgoing head descended from update
229 230 # - a remote head that's known locally and not
230 231 # ancestral to an outgoing head
231 232 if remoteheads == [nullid]:
232 233 # remote is empty, nothing to check.
233 234 return
234 235
235 236 if remote.capable('branchmap'):
236 237 headssum = _headssummary(repo, remote, outgoing)
237 238 else:
238 239 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
239 240 newbranches = [branch for branch, heads in headssum.iteritems()
240 241 if heads[0] is None]
241 242 # 1. Check for new branches on the remote.
242 243 if newbranches and not newbranch: # new branch requires --new-branch
243 244 branchnames = ', '.join(sorted(newbranches))
244 245 raise util.Abort(_("push creates new remote branches: %s!")
245 246 % branchnames,
246 247 hint=_("use 'hg push --new-branch' to create"
247 248 " new remote branches"))
248 249
249 250 # 2 compute newly pushed bookmarks. We
250 251 # we don't warned about bookmarked heads.
251 252 localbookmarks = repo._bookmarks
252 253 remotebookmarks = remote.listkeys('bookmarks')
253 254 bookmarkedheads = set()
254 255 for bm in localbookmarks:
255 256 rnode = remotebookmarks.get(bm)
256 257 if rnode and rnode in repo:
257 258 lctx, rctx = repo[bm], repo[rnode]
258 259 if bookmarks.validdest(repo, rctx, lctx):
259 260 bookmarkedheads.add(lctx.node())
260 261
261 262 # 3. Check for new heads.
262 263 # If there are more heads after the push than before, a suitable
263 264 # error message, depending on unsynced status, is displayed.
264 265 error = None
265 266 unsynced = False
266 267 allmissing = set(outgoing.missing)
267 268 allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
268 269 allfuturecommon.update(allmissing)
269 270 for branch, heads in headssum.iteritems():
270 271 if heads[0] is None:
271 272 # Maybe we should abort if we push more that one head
272 273 # for new branches ?
273 274 continue
274 275 candidate_newhs = set(heads[1])
275 276 # add unsynced data
276 277 oldhs = set(heads[0])
277 278 oldhs.update(heads[2])
278 279 candidate_newhs.update(heads[2])
279 280 dhs = None
280 281 discardedheads = set()
281 282 if repo.obsstore:
282 283 # remove future heads which are actually obsolete by another
283 284 # pushed element:
284 285 #
285 286 # XXX as above, There are several cases this case does not handle
286 287 # XXX properly
287 288 #
288 289 # (1) if <nh> is public, it won't be affected by obsolete marker
289 290 # and a new is created
290 291 #
291 292 # (2) if the new heads have ancestors which are not obsolete and
292 293 # not ancestors of any other heads we will have a new head too.
293 294 #
294 295 # This two case will be easy to handle for know changeset but much
295 296 # more tricky for unsynced changes.
296 297 newhs = set()
297 298 for nh in candidate_newhs:
298 299 if nh in repo and repo[nh].phase() <= phases.public:
299 300 newhs.add(nh)
300 301 else:
301 302 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
302 303 if suc != nh and suc in allfuturecommon:
303 304 discardedheads.add(nh)
304 305 break
305 306 else:
306 307 newhs.add(nh)
307 308 else:
308 309 newhs = candidate_newhs
309 310 if [h for h in heads[2] if h not in discardedheads]:
310 311 unsynced = True
311 312 if len(newhs) > len(oldhs):
312 313 # strip updates to existing remote heads from the new heads list
313 314 dhs = list(newhs - bookmarkedheads - oldhs)
314 315 if dhs:
315 316 if error is None:
316 317 if branch not in ('default', None):
317 318 error = _("push creates new remote head %s "
318 319 "on branch '%s'!") % (short(dhs[0]), branch)
319 320 else:
320 321 error = _("push creates new remote head %s!"
321 322 ) % short(dhs[0])
322 323 if heads[2]: # unsynced
323 324 hint = _("you should pull and merge or "
324 325 "use push -f to force")
325 326 else:
326 327 hint = _("did you forget to merge? "
327 328 "use push -f to force")
328 329 if branch is not None:
329 330 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
330 331 for h in dhs:
331 332 repo.ui.note(_("new remote head %s\n") % short(h))
332 333 if error:
333 334 raise util.Abort(error, hint=hint)
334 335
335 336 # 6. Check for unsynced changes on involved branches.
336 337 if unsynced:
337 338 repo.ui.warn(_("note: unsynced remote changes!\n"))
338 339
339 340 def visibleheads(repo):
340 341 """return the set of visible head of this repo"""
341 342 return repo.filtered('unserved').heads()
342 343
343 344
344 345 def visiblebranchmap(repo):
345 346 """return a branchmap for the visible set"""
346 347 return repo.filtered('unserved').branchmap()
@@ -1,2679 +1,2619 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return discovery.visiblebranchmap(self._repo)
95 95
96 96 def heads(self):
97 97 return discovery.visibleheads(self._repo)
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 150
151 151 def _baserequirements(self, create):
152 152 return self.requirements[:]
153 153
154 154 def __init__(self, baseui, path=None, create=False):
155 155 self.wvfs = scmutil.vfs(path, expand=True)
156 156 self.wopener = self.wvfs
157 157 self.root = self.wvfs.base
158 158 self.path = self.wvfs.join(".hg")
159 159 self.origroot = path
160 160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
161 161 self.vfs = scmutil.vfs(self.path)
162 162 self.opener = self.vfs
163 163 self.baseui = baseui
164 164 self.ui = baseui.copy()
165 165 # A list of callback to shape the phase if no data were found.
166 166 # Callback are in the form: func(repo, roots) --> processed root.
167 167 # This list it to be filled by extension during repo setup
168 168 self._phasedefaults = []
169 169 try:
170 170 self.ui.readconfig(self.join("hgrc"), self.root)
171 171 extensions.loadall(self.ui)
172 172 except IOError:
173 173 pass
174 174
175 175 if not self.vfs.isdir():
176 176 if create:
177 177 if not self.wvfs.exists():
178 178 self.wvfs.makedirs()
179 179 self.vfs.makedir(notindexed=True)
180 180 requirements = self._baserequirements(create)
181 181 if self.ui.configbool('format', 'usestore', True):
182 182 self.vfs.mkdir("store")
183 183 requirements.append("store")
184 184 if self.ui.configbool('format', 'usefncache', True):
185 185 requirements.append("fncache")
186 186 if self.ui.configbool('format', 'dotencode', True):
187 187 requirements.append('dotencode')
188 188 # create an invalid changelog
189 189 self.vfs.append(
190 190 "00changelog.i",
191 191 '\0\0\0\2' # represents revlogv2
192 192 ' dummy changelog to prevent using the old repo layout'
193 193 )
194 194 if self.ui.configbool('format', 'generaldelta', False):
195 195 requirements.append("generaldelta")
196 196 requirements = set(requirements)
197 197 else:
198 198 raise error.RepoError(_("repository %s not found") % path)
199 199 elif create:
200 200 raise error.RepoError(_("repository %s already exists") % path)
201 201 else:
202 202 try:
203 203 requirements = scmutil.readrequires(self.vfs, self.supported)
204 204 except IOError, inst:
205 205 if inst.errno != errno.ENOENT:
206 206 raise
207 207 requirements = set()
208 208
209 209 self.sharedpath = self.path
210 210 try:
211 211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
212 212 if not os.path.exists(s):
213 213 raise error.RepoError(
214 214 _('.hg/sharedpath points to nonexistent directory %s') % s)
215 215 self.sharedpath = s
216 216 except IOError, inst:
217 217 if inst.errno != errno.ENOENT:
218 218 raise
219 219
220 220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
221 221 self.spath = self.store.path
222 222 self.svfs = self.store.vfs
223 223 self.sopener = self.svfs
224 224 self.sjoin = self.store.join
225 225 self.vfs.createmode = self.store.createmode
226 226 self._applyrequirements(requirements)
227 227 if create:
228 228 self._writerequirements()
229 229
230 230
231 231 self._branchcache = None
232 232 self._branchcachetip = None
233 233 self.filterpats = {}
234 234 self._datafilters = {}
235 235 self._transref = self._lockref = self._wlockref = None
236 236
237 237 # A cache for various files under .hg/ that tracks file changes,
238 238 # (used by the filecache decorator)
239 239 #
240 240 # Maps a property name to its util.filecacheentry
241 241 self._filecache = {}
242 242
243 243 # hold sets of revision to be filtered
244 244 # should be cleared when something might have changed the filter value:
245 245 # - new changesets,
246 246 # - phase change,
247 247 # - new obsolescence marker,
248 248 # - working directory parent change,
249 249 # - bookmark changes
250 250 self.filteredrevcache = {}
251 251
252 252 def close(self):
253 253 pass
254 254
255 255 def _restrictcapabilities(self, caps):
256 256 return caps
257 257
258 258 def _applyrequirements(self, requirements):
259 259 self.requirements = requirements
260 260 self.sopener.options = dict((r, 1) for r in requirements
261 261 if r in self.openerreqs)
262 262
263 263 def _writerequirements(self):
264 264 reqfile = self.opener("requires", "w")
265 265 for r in self.requirements:
266 266 reqfile.write("%s\n" % r)
267 267 reqfile.close()
268 268
269 269 def _checknested(self, path):
270 270 """Determine if path is a legal nested repository."""
271 271 if not path.startswith(self.root):
272 272 return False
273 273 subpath = path[len(self.root) + 1:]
274 274 normsubpath = util.pconvert(subpath)
275 275
276 276 # XXX: Checking against the current working copy is wrong in
277 277 # the sense that it can reject things like
278 278 #
279 279 # $ hg cat -r 10 sub/x.txt
280 280 #
281 281 # if sub/ is no longer a subrepository in the working copy
282 282 # parent revision.
283 283 #
284 284 # However, it can of course also allow things that would have
285 285 # been rejected before, such as the above cat command if sub/
286 286 # is a subrepository now, but was a normal directory before.
287 287 # The old path auditor would have rejected by mistake since it
288 288 # panics when it sees sub/.hg/.
289 289 #
290 290 # All in all, checking against the working copy seems sensible
291 291 # since we want to prevent access to nested repositories on
292 292 # the filesystem *now*.
293 293 ctx = self[None]
294 294 parts = util.splitpath(subpath)
295 295 while parts:
296 296 prefix = '/'.join(parts)
297 297 if prefix in ctx.substate:
298 298 if prefix == normsubpath:
299 299 return True
300 300 else:
301 301 sub = ctx.sub(prefix)
302 302 return sub.checknested(subpath[len(prefix) + 1:])
303 303 else:
304 304 parts.pop()
305 305 return False
306 306
307 307 def peer(self):
308 308 return localpeer(self) # not cached to avoid reference cycle
309 309
310 310 def unfiltered(self):
311 311 """Return unfiltered version of the repository
312 312
313 313 Intended to be ovewritten by filtered repo."""
314 314 return self
315 315
316 316 def filtered(self, name):
317 317 """Return a filtered version of a repository"""
318 318 # build a new class with the mixin and the current class
319 319 # (possibily subclass of the repo)
320 320 class proxycls(repoview.repoview, self.unfiltered().__class__):
321 321 pass
322 322 return proxycls(self, name)
323 323
324 324 @repofilecache('bookmarks')
325 325 def _bookmarks(self):
326 326 return bookmarks.bmstore(self)
327 327
328 328 @repofilecache('bookmarks.current')
329 329 def _bookmarkcurrent(self):
330 330 return bookmarks.readcurrent(self)
331 331
332 332 def bookmarkheads(self, bookmark):
333 333 name = bookmark.split('@', 1)[0]
334 334 heads = []
335 335 for mark, n in self._bookmarks.iteritems():
336 336 if mark.split('@', 1)[0] == name:
337 337 heads.append(n)
338 338 return heads
339 339
340 340 @storecache('phaseroots')
341 341 def _phasecache(self):
342 342 return phases.phasecache(self, self._phasedefaults)
343 343
344 344 @storecache('obsstore')
345 345 def obsstore(self):
346 346 store = obsolete.obsstore(self.sopener)
347 347 if store and not obsolete._enabled:
348 348 # message is rare enough to not be translated
349 349 msg = 'obsolete feature not enabled but %i markers found!\n'
350 350 self.ui.warn(msg % len(list(store)))
351 351 return store
352 352
353 353 @unfilteredpropertycache
354 354 def hiddenrevs(self):
355 355 """hiddenrevs: revs that should be hidden by command and tools
356 356
357 357 This set is carried on the repo to ease initialization and lazy
358 358 loading; it'll probably move back to changelog for efficiency and
359 359 consistency reasons.
360 360
361 361 Note that the hiddenrevs will needs invalidations when
362 362 - a new changesets is added (possible unstable above extinct)
363 363 - a new obsolete marker is added (possible new extinct changeset)
364 364
365 365 hidden changesets cannot have non-hidden descendants
366 366 """
367 367 hidden = set()
368 368 if self.obsstore:
369 369 ### hide extinct changeset that are not accessible by any mean
370 370 hiddenquery = 'extinct() - ::(. + bookmark())'
371 371 hidden.update(self.revs(hiddenquery))
372 372 return hidden
373 373
374 374 @storecache('00changelog.i')
375 375 def changelog(self):
376 376 c = changelog.changelog(self.sopener)
377 377 if 'HG_PENDING' in os.environ:
378 378 p = os.environ['HG_PENDING']
379 379 if p.startswith(self.root):
380 380 c.readpending('00changelog.i.a')
381 381 return c
382 382
383 383 @storecache('00manifest.i')
384 384 def manifest(self):
385 385 return manifest.manifest(self.sopener)
386 386
387 387 @repofilecache('dirstate')
388 388 def dirstate(self):
389 389 warned = [0]
390 390 def validate(node):
391 391 try:
392 392 self.changelog.rev(node)
393 393 return node
394 394 except error.LookupError:
395 395 if not warned[0]:
396 396 warned[0] = True
397 397 self.ui.warn(_("warning: ignoring unknown"
398 398 " working parent %s!\n") % short(node))
399 399 return nullid
400 400
401 401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
402 402
403 403 def __getitem__(self, changeid):
404 404 if changeid is None:
405 405 return context.workingctx(self)
406 406 return context.changectx(self, changeid)
407 407
408 408 def __contains__(self, changeid):
409 409 try:
410 410 return bool(self.lookup(changeid))
411 411 except error.RepoLookupError:
412 412 return False
413 413
414 414 def __nonzero__(self):
415 415 return True
416 416
417 417 def __len__(self):
418 418 return len(self.changelog)
419 419
420 420 def __iter__(self):
421 421 return iter(self.changelog)
422 422
423 423 def revs(self, expr, *args):
424 424 '''Return a list of revisions matching the given revset'''
425 425 expr = revset.formatspec(expr, *args)
426 426 m = revset.match(None, expr)
427 427 return [r for r in m(self, list(self))]
428 428
429 429 def set(self, expr, *args):
430 430 '''
431 431 Yield a context for each matching revision, after doing arg
432 432 replacement via revset.formatspec
433 433 '''
434 434 for r in self.revs(expr, *args):
435 435 yield self[r]
436 436
437 437 def url(self):
438 438 return 'file:' + self.root
439 439
440 440 def hook(self, name, throw=False, **args):
441 441 return hook.hook(self.ui, self, name, throw, **args)
442 442
443 443 @unfilteredmethod
444 444 def _tag(self, names, node, message, local, user, date, extra={}):
445 445 if isinstance(names, str):
446 446 names = (names,)
447 447
448 448 branches = self.branchmap()
449 449 for name in names:
450 450 self.hook('pretag', throw=True, node=hex(node), tag=name,
451 451 local=local)
452 452 if name in branches:
453 453 self.ui.warn(_("warning: tag %s conflicts with existing"
454 454 " branch name\n") % name)
455 455
456 456 def writetags(fp, names, munge, prevtags):
457 457 fp.seek(0, 2)
458 458 if prevtags and prevtags[-1] != '\n':
459 459 fp.write('\n')
460 460 for name in names:
461 461 m = munge and munge(name) or name
462 462 if (self._tagscache.tagtypes and
463 463 name in self._tagscache.tagtypes):
464 464 old = self.tags().get(name, nullid)
465 465 fp.write('%s %s\n' % (hex(old), m))
466 466 fp.write('%s %s\n' % (hex(node), m))
467 467 fp.close()
468 468
469 469 prevtags = ''
470 470 if local:
471 471 try:
472 472 fp = self.opener('localtags', 'r+')
473 473 except IOError:
474 474 fp = self.opener('localtags', 'a')
475 475 else:
476 476 prevtags = fp.read()
477 477
478 478 # local tags are stored in the current charset
479 479 writetags(fp, names, None, prevtags)
480 480 for name in names:
481 481 self.hook('tag', node=hex(node), tag=name, local=local)
482 482 return
483 483
484 484 try:
485 485 fp = self.wfile('.hgtags', 'rb+')
486 486 except IOError, e:
487 487 if e.errno != errno.ENOENT:
488 488 raise
489 489 fp = self.wfile('.hgtags', 'ab')
490 490 else:
491 491 prevtags = fp.read()
492 492
493 493 # committed tags are stored in UTF-8
494 494 writetags(fp, names, encoding.fromlocal, prevtags)
495 495
496 496 fp.close()
497 497
498 498 self.invalidatecaches()
499 499
500 500 if '.hgtags' not in self.dirstate:
501 501 self[None].add(['.hgtags'])
502 502
503 503 m = matchmod.exact(self.root, '', ['.hgtags'])
504 504 tagnode = self.commit(message, user, date, extra=extra, match=m)
505 505
506 506 for name in names:
507 507 self.hook('tag', node=hex(node), tag=name, local=local)
508 508
509 509 return tagnode
510 510
511 511 def tag(self, names, node, message, local, user, date):
512 512 '''tag a revision with one or more symbolic names.
513 513
514 514 names is a list of strings or, when adding a single tag, names may be a
515 515 string.
516 516
517 517 if local is True, the tags are stored in a per-repository file.
518 518 otherwise, they are stored in the .hgtags file, and a new
519 519 changeset is committed with the change.
520 520
521 521 keyword arguments:
522 522
523 523 local: whether to store tags in non-version-controlled file
524 524 (default False)
525 525
526 526 message: commit message to use if committing
527 527
528 528 user: name of user to use if committing
529 529
530 530 date: date tuple to use if committing'''
531 531
532 532 if not local:
533 533 for x in self.status()[:5]:
534 534 if '.hgtags' in x:
535 535 raise util.Abort(_('working copy of .hgtags is changed '
536 536 '(please commit .hgtags manually)'))
537 537
538 538 self.tags() # instantiate the cache
539 539 self._tag(names, node, message, local, user, date)
540 540
541 541 @filteredpropertycache
542 542 def _tagscache(self):
543 543 '''Returns a tagscache object that contains various tags related
544 544 caches.'''
545 545
546 546 # This simplifies its cache management by having one decorated
547 547 # function (this one) and the rest simply fetch things from it.
548 548 class tagscache(object):
549 549 def __init__(self):
550 550 # These two define the set of tags for this repository. tags
551 551 # maps tag name to node; tagtypes maps tag name to 'global' or
552 552 # 'local'. (Global tags are defined by .hgtags across all
553 553 # heads, and local tags are defined in .hg/localtags.)
554 554 # They constitute the in-memory cache of tags.
555 555 self.tags = self.tagtypes = None
556 556
557 557 self.nodetagscache = self.tagslist = None
558 558
559 559 cache = tagscache()
560 560 cache.tags, cache.tagtypes = self._findtags()
561 561
562 562 return cache
563 563
564 564 def tags(self):
565 565 '''return a mapping of tag to node'''
566 566 t = {}
567 567 if self.changelog.filteredrevs:
568 568 tags, tt = self._findtags()
569 569 else:
570 570 tags = self._tagscache.tags
571 571 for k, v in tags.iteritems():
572 572 try:
573 573 # ignore tags to unknown nodes
574 574 self.changelog.rev(v)
575 575 t[k] = v
576 576 except (error.LookupError, ValueError):
577 577 pass
578 578 return t
579 579
580 580 def _findtags(self):
581 581 '''Do the hard work of finding tags. Return a pair of dicts
582 582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
583 583 maps tag name to a string like \'global\' or \'local\'.
584 584 Subclasses or extensions are free to add their own tags, but
585 585 should be aware that the returned dicts will be retained for the
586 586 duration of the localrepo object.'''
587 587
588 588 # XXX what tagtype should subclasses/extensions use? Currently
589 589 # mq and bookmarks add tags, but do not set the tagtype at all.
590 590 # Should each extension invent its own tag type? Should there
591 591 # be one tagtype for all such "virtual" tags? Or is the status
592 592 # quo fine?
593 593
594 594 alltags = {} # map tag name to (node, hist)
595 595 tagtypes = {}
596 596
597 597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
598 598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
599 599
600 600 # Build the return dicts. Have to re-encode tag names because
601 601 # the tags module always uses UTF-8 (in order not to lose info
602 602 # writing to the cache), but the rest of Mercurial wants them in
603 603 # local encoding.
604 604 tags = {}
605 605 for (name, (node, hist)) in alltags.iteritems():
606 606 if node != nullid:
607 607 tags[encoding.tolocal(name)] = node
608 608 tags['tip'] = self.changelog.tip()
609 609 tagtypes = dict([(encoding.tolocal(name), value)
610 610 for (name, value) in tagtypes.iteritems()])
611 611 return (tags, tagtypes)
612 612
613 613 def tagtype(self, tagname):
614 614 '''
615 615 return the type of the given tag. result can be:
616 616
617 617 'local' : a local tag
618 618 'global' : a global tag
619 619 None : tag does not exist
620 620 '''
621 621
622 622 return self._tagscache.tagtypes.get(tagname)
623 623
624 624 def tagslist(self):
625 625 '''return a list of tags ordered by revision'''
626 626 if not self._tagscache.tagslist:
627 627 l = []
628 628 for t, n in self.tags().iteritems():
629 629 r = self.changelog.rev(n)
630 630 l.append((r, t, n))
631 631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
632 632
633 633 return self._tagscache.tagslist
634 634
635 635 def nodetags(self, node):
636 636 '''return the tags associated with a node'''
637 637 if not self._tagscache.nodetagscache:
638 638 nodetagscache = {}
639 639 for t, n in self._tagscache.tags.iteritems():
640 640 nodetagscache.setdefault(n, []).append(t)
641 641 for tags in nodetagscache.itervalues():
642 642 tags.sort()
643 643 self._tagscache.nodetagscache = nodetagscache
644 644 return self._tagscache.nodetagscache.get(node, [])
645 645
646 646 def nodebookmarks(self, node):
647 647 marks = []
648 648 for bookmark, n in self._bookmarks.iteritems():
649 649 if n == node:
650 650 marks.append(bookmark)
651 651 return sorted(marks)
652 652
653 653 def _cacheabletip(self):
654 654 """tip-most revision stable enought to used in persistent cache
655 655
656 656 This function is overwritten by MQ to ensure we do not write cache for
657 657 a part of the history that will likely change.
658 658
659 659 Efficient handling of filtered revision in branchcache should offer a
660 660 better alternative. But we are using this approach until it is ready.
661 661 """
662 662 cl = self.changelog
663 663 return cl.rev(cl.tip())
664 664
665 665 @unfilteredmethod # Until we get a smarter cache management
666 666 def updatebranchcache(self):
667 667 cl = self.changelog
668 668 tip = cl.tip()
669 669 if self._branchcache is not None and self._branchcachetip == tip:
670 670 return
671 671
672 672 oldtip = self._branchcachetip
673 673 if oldtip is None or oldtip not in cl.nodemap:
674 674 partial, last, lrev = branchmap.read(self)
675 675 else:
676 676 lrev = cl.rev(oldtip)
677 677 partial = self._branchcache
678 678
679 679 catip = self._cacheabletip()
680 680 # if lrev == catip: cache is already up to date
681 681 # if lrev > catip: we have uncachable element in `partial` can't write
682 682 # on disk
683 683 if lrev < catip:
684 684 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip))
685 self._updatebranchcache(partial, ctxgen)
685 branchmap.update(self, partial, ctxgen)
686 686 branchmap.write(self, partial, cl.node(catip), catip)
687 687 lrev = catip
688 688 # If cacheable tip were lower than actual tip, we need to update the
689 689 # cache up to tip. This update (from cacheable to actual tip) is not
690 690 # written to disk since it's not cacheable.
691 691 tiprev = len(self) - 1
692 692 if lrev < tiprev:
693 693 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev))
694 self._updatebranchcache(partial, ctxgen)
694 branchmap.update(self, partial, ctxgen)
695 695 self._branchcache = partial
696 696 self._branchcachetip = tip
697 697
698 698 def branchmap(self):
699 699 '''returns a dictionary {branch: [branchheads]}'''
700 700 if self.changelog.filteredrevs:
701 701 # some changeset are excluded we can't use the cache
702 branchmap = {}
703 self._updatebranchcache(branchmap, (self[r] for r in self))
704 return branchmap
702 bmap = {}
703 branchmap.update(self, bmap, (self[r] for r in self))
704 return bmap
705 705 else:
706 706 self.updatebranchcache()
707 707 return self._branchcache
708 708
709 709
710 710 def _branchtip(self, heads):
711 711 '''return the tipmost branch head in heads'''
712 712 tip = heads[-1]
713 713 for h in reversed(heads):
714 714 if not self[h].closesbranch():
715 715 tip = h
716 716 break
717 717 return tip
718 718
719 719 def branchtip(self, branch):
720 720 '''return the tip node for a given branch'''
721 721 if branch not in self.branchmap():
722 722 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
723 723 return self._branchtip(self.branchmap()[branch])
724 724
725 725 def branchtags(self):
726 726 '''return a dict where branch names map to the tipmost head of
727 727 the branch, open heads come before closed'''
728 728 bt = {}
729 729 for bn, heads in self.branchmap().iteritems():
730 730 bt[bn] = self._branchtip(heads)
731 731 return bt
732 732
733 def _updatebranchcache(self, partial, ctxgen):
734 """Given a branchhead cache, partial, that may have extra nodes or be
735 missing heads, and a generator of nodes that are at least a superset of
736 heads missing, this function updates partial to be correct.
737 """
738 # collect new branch entries
739 newbranches = {}
740 for c in ctxgen:
741 newbranches.setdefault(c.branch(), []).append(c.node())
742 # if older branchheads are reachable from new ones, they aren't
743 # really branchheads. Note checking parents is insufficient:
744 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
745 for branch, newnodes in newbranches.iteritems():
746 bheads = partial.setdefault(branch, [])
747 # Remove candidate heads that no longer are in the repo (e.g., as
748 # the result of a strip that just happened). Avoid using 'node in
749 # self' here because that dives down into branchcache code somewhat
750 # recursively.
751 bheadrevs = [self.changelog.rev(node) for node in bheads
752 if self.changelog.hasnode(node)]
753 newheadrevs = [self.changelog.rev(node) for node in newnodes
754 if self.changelog.hasnode(node)]
755 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
756 # Remove duplicates - nodes that are in newheadrevs and are already
757 # in bheadrevs. This can happen if you strip a node whose parent
758 # was already a head (because they're on different branches).
759 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
760
761 # Starting from tip means fewer passes over reachable. If we know
762 # the new candidates are not ancestors of existing heads, we don't
763 # have to examine ancestors of existing heads
764 if ctxisnew:
765 iterrevs = sorted(newheadrevs)
766 else:
767 iterrevs = list(bheadrevs)
768
769 # This loop prunes out two kinds of heads - heads that are
770 # superseded by a head in newheadrevs, and newheadrevs that are not
771 # heads because an existing head is their descendant.
772 while iterrevs:
773 latest = iterrevs.pop()
774 if latest not in bheadrevs:
775 continue
776 ancestors = set(self.changelog.ancestors([latest],
777 bheadrevs[0]))
778 if ancestors:
779 bheadrevs = [b for b in bheadrevs if b not in ancestors]
780 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
781
782 # There may be branches that cease to exist when the last commit in the
783 # branch was stripped. This code filters them out. Note that the
784 # branch that ceased to exist may not be in newbranches because
785 # newbranches is the set of candidate heads, which when you strip the
786 # last commit in a branch will be the parent branch.
787 for branch in partial.keys():
788 nodes = [head for head in partial[branch]
789 if self.changelog.hasnode(head)]
790 if not nodes:
791 del partial[branch]
792
793 733 def lookup(self, key):
794 734 return self[key].node()
795 735
796 736 def lookupbranch(self, key, remote=None):
797 737 repo = remote or self
798 738 if key in repo.branchmap():
799 739 return key
800 740
801 741 repo = (remote and remote.local()) and remote or self
802 742 return repo[key].branch()
803 743
804 744 def known(self, nodes):
805 745 nm = self.changelog.nodemap
806 746 pc = self._phasecache
807 747 result = []
808 748 for n in nodes:
809 749 r = nm.get(n)
810 750 resp = not (r is None or pc.phase(self, r) >= phases.secret)
811 751 result.append(resp)
812 752 return result
813 753
814 754 def local(self):
815 755 return self
816 756
817 757 def cancopy(self):
818 758 return self.local() # so statichttprepo's override of local() works
819 759
820 760 def join(self, f):
821 761 return os.path.join(self.path, f)
822 762
823 763 def wjoin(self, f):
824 764 return os.path.join(self.root, f)
825 765
826 766 def file(self, f):
827 767 if f[0] == '/':
828 768 f = f[1:]
829 769 return filelog.filelog(self.sopener, f)
830 770
831 771 def changectx(self, changeid):
832 772 return self[changeid]
833 773
834 774 def parents(self, changeid=None):
835 775 '''get list of changectxs for parents of changeid'''
836 776 return self[changeid].parents()
837 777
838 778 def setparents(self, p1, p2=nullid):
839 779 copies = self.dirstate.setparents(p1, p2)
840 780 if copies:
841 781 # Adjust copy records, the dirstate cannot do it, it
842 782 # requires access to parents manifests. Preserve them
843 783 # only for entries added to first parent.
844 784 pctx = self[p1]
845 785 for f in copies:
846 786 if f not in pctx and copies[f] in pctx:
847 787 self.dirstate.copy(copies[f], f)
848 788
849 789 def filectx(self, path, changeid=None, fileid=None):
850 790 """changeid can be a changeset revision, node, or tag.
851 791 fileid can be a file revision or node."""
852 792 return context.filectx(self, path, changeid, fileid)
853 793
854 794 def getcwd(self):
855 795 return self.dirstate.getcwd()
856 796
857 797 def pathto(self, f, cwd=None):
858 798 return self.dirstate.pathto(f, cwd)
859 799
860 800 def wfile(self, f, mode='r'):
861 801 return self.wopener(f, mode)
862 802
863 803 def _link(self, f):
864 804 return os.path.islink(self.wjoin(f))
865 805
866 806 def _loadfilter(self, filter):
867 807 if filter not in self.filterpats:
868 808 l = []
869 809 for pat, cmd in self.ui.configitems(filter):
870 810 if cmd == '!':
871 811 continue
872 812 mf = matchmod.match(self.root, '', [pat])
873 813 fn = None
874 814 params = cmd
875 815 for name, filterfn in self._datafilters.iteritems():
876 816 if cmd.startswith(name):
877 817 fn = filterfn
878 818 params = cmd[len(name):].lstrip()
879 819 break
880 820 if not fn:
881 821 fn = lambda s, c, **kwargs: util.filter(s, c)
882 822 # Wrap old filters not supporting keyword arguments
883 823 if not inspect.getargspec(fn)[2]:
884 824 oldfn = fn
885 825 fn = lambda s, c, **kwargs: oldfn(s, c)
886 826 l.append((mf, fn, params))
887 827 self.filterpats[filter] = l
888 828 return self.filterpats[filter]
889 829
890 830 def _filter(self, filterpats, filename, data):
891 831 for mf, fn, cmd in filterpats:
892 832 if mf(filename):
893 833 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 834 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 835 break
896 836
897 837 return data
898 838
899 839 @unfilteredpropertycache
900 840 def _encodefilterpats(self):
901 841 return self._loadfilter('encode')
902 842
903 843 @unfilteredpropertycache
904 844 def _decodefilterpats(self):
905 845 return self._loadfilter('decode')
906 846
907 847 def adddatafilter(self, name, filter):
908 848 self._datafilters[name] = filter
909 849
910 850 def wread(self, filename):
911 851 if self._link(filename):
912 852 data = os.readlink(self.wjoin(filename))
913 853 else:
914 854 data = self.wopener.read(filename)
915 855 return self._filter(self._encodefilterpats, filename, data)
916 856
917 857 def wwrite(self, filename, data, flags):
918 858 data = self._filter(self._decodefilterpats, filename, data)
919 859 if 'l' in flags:
920 860 self.wopener.symlink(data, filename)
921 861 else:
922 862 self.wopener.write(filename, data)
923 863 if 'x' in flags:
924 864 util.setflags(self.wjoin(filename), False, True)
925 865
926 866 def wwritedata(self, filename, data):
927 867 return self._filter(self._decodefilterpats, filename, data)
928 868
929 869 def transaction(self, desc):
930 870 tr = self._transref and self._transref() or None
931 871 if tr and tr.running():
932 872 return tr.nest()
933 873
934 874 # abort here if the journal already exists
935 875 if os.path.exists(self.sjoin("journal")):
936 876 raise error.RepoError(
937 877 _("abandoned transaction found - run hg recover"))
938 878
939 879 self._writejournal(desc)
940 880 renames = [(x, undoname(x)) for x in self._journalfiles()]
941 881
942 882 tr = transaction.transaction(self.ui.warn, self.sopener,
943 883 self.sjoin("journal"),
944 884 aftertrans(renames),
945 885 self.store.createmode)
946 886 self._transref = weakref.ref(tr)
947 887 return tr
948 888
949 889 def _journalfiles(self):
950 890 return (self.sjoin('journal'), self.join('journal.dirstate'),
951 891 self.join('journal.branch'), self.join('journal.desc'),
952 892 self.join('journal.bookmarks'),
953 893 self.sjoin('journal.phaseroots'))
954 894
955 895 def undofiles(self):
956 896 return [undoname(x) for x in self._journalfiles()]
957 897
958 898 def _writejournal(self, desc):
959 899 self.opener.write("journal.dirstate",
960 900 self.opener.tryread("dirstate"))
961 901 self.opener.write("journal.branch",
962 902 encoding.fromlocal(self.dirstate.branch()))
963 903 self.opener.write("journal.desc",
964 904 "%d\n%s\n" % (len(self), desc))
965 905 self.opener.write("journal.bookmarks",
966 906 self.opener.tryread("bookmarks"))
967 907 self.sopener.write("journal.phaseroots",
968 908 self.sopener.tryread("phaseroots"))
969 909
970 910 def recover(self):
971 911 lock = self.lock()
972 912 try:
973 913 if os.path.exists(self.sjoin("journal")):
974 914 self.ui.status(_("rolling back interrupted transaction\n"))
975 915 transaction.rollback(self.sopener, self.sjoin("journal"),
976 916 self.ui.warn)
977 917 self.invalidate()
978 918 return True
979 919 else:
980 920 self.ui.warn(_("no interrupted transaction available\n"))
981 921 return False
982 922 finally:
983 923 lock.release()
984 924
985 925 def rollback(self, dryrun=False, force=False):
986 926 wlock = lock = None
987 927 try:
988 928 wlock = self.wlock()
989 929 lock = self.lock()
990 930 if os.path.exists(self.sjoin("undo")):
991 931 return self._rollback(dryrun, force)
992 932 else:
993 933 self.ui.warn(_("no rollback information available\n"))
994 934 return 1
995 935 finally:
996 936 release(lock, wlock)
997 937
998 938 @unfilteredmethod # Until we get smarter cache management
999 939 def _rollback(self, dryrun, force):
1000 940 ui = self.ui
1001 941 try:
1002 942 args = self.opener.read('undo.desc').splitlines()
1003 943 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1004 944 if len(args) >= 3:
1005 945 detail = args[2]
1006 946 oldtip = oldlen - 1
1007 947
1008 948 if detail and ui.verbose:
1009 949 msg = (_('repository tip rolled back to revision %s'
1010 950 ' (undo %s: %s)\n')
1011 951 % (oldtip, desc, detail))
1012 952 else:
1013 953 msg = (_('repository tip rolled back to revision %s'
1014 954 ' (undo %s)\n')
1015 955 % (oldtip, desc))
1016 956 except IOError:
1017 957 msg = _('rolling back unknown transaction\n')
1018 958 desc = None
1019 959
1020 960 if not force and self['.'] != self['tip'] and desc == 'commit':
1021 961 raise util.Abort(
1022 962 _('rollback of last commit while not checked out '
1023 963 'may lose data'), hint=_('use -f to force'))
1024 964
1025 965 ui.status(msg)
1026 966 if dryrun:
1027 967 return 0
1028 968
1029 969 parents = self.dirstate.parents()
1030 970 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1031 971 if os.path.exists(self.join('undo.bookmarks')):
1032 972 util.rename(self.join('undo.bookmarks'),
1033 973 self.join('bookmarks'))
1034 974 if os.path.exists(self.sjoin('undo.phaseroots')):
1035 975 util.rename(self.sjoin('undo.phaseroots'),
1036 976 self.sjoin('phaseroots'))
1037 977 self.invalidate()
1038 978
1039 979 # Discard all cache entries to force reloading everything.
1040 980 self._filecache.clear()
1041 981
1042 982 parentgone = (parents[0] not in self.changelog.nodemap or
1043 983 parents[1] not in self.changelog.nodemap)
1044 984 if parentgone:
1045 985 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1046 986 try:
1047 987 branch = self.opener.read('undo.branch')
1048 988 self.dirstate.setbranch(encoding.tolocal(branch))
1049 989 except IOError:
1050 990 ui.warn(_('named branch could not be reset: '
1051 991 'current branch is still \'%s\'\n')
1052 992 % self.dirstate.branch())
1053 993
1054 994 self.dirstate.invalidate()
1055 995 parents = tuple([p.rev() for p in self.parents()])
1056 996 if len(parents) > 1:
1057 997 ui.status(_('working directory now based on '
1058 998 'revisions %d and %d\n') % parents)
1059 999 else:
1060 1000 ui.status(_('working directory now based on '
1061 1001 'revision %d\n') % parents)
1062 1002 # TODO: if we know which new heads may result from this rollback, pass
1063 1003 # them to destroy(), which will prevent the branchhead cache from being
1064 1004 # invalidated.
1065 1005 self.destroyed()
1066 1006 return 0
1067 1007
1068 1008 def invalidatecaches(self):
1069 1009
1070 1010 if '_tagscache' in vars(self):
1071 1011 # can't use delattr on proxy
1072 1012 del self.__dict__['_tagscache']
1073 1013
1074 1014 self.unfiltered()._branchcache = None # in UTF-8
1075 1015 self.unfiltered()._branchcachetip = None
1076 1016 self.invalidatevolatilesets()
1077 1017
1078 1018 def invalidatevolatilesets(self):
1079 1019 self.filteredrevcache.clear()
1080 1020 obsolete.clearobscaches(self)
1081 1021 if 'hiddenrevs' in vars(self):
1082 1022 del self.hiddenrevs
1083 1023
1084 1024 def invalidatedirstate(self):
1085 1025 '''Invalidates the dirstate, causing the next call to dirstate
1086 1026 to check if it was modified since the last time it was read,
1087 1027 rereading it if it has.
1088 1028
1089 1029 This is different to dirstate.invalidate() that it doesn't always
1090 1030 rereads the dirstate. Use dirstate.invalidate() if you want to
1091 1031 explicitly read the dirstate again (i.e. restoring it to a previous
1092 1032 known good state).'''
1093 1033 if hasunfilteredcache(self, 'dirstate'):
1094 1034 for k in self.dirstate._filecache:
1095 1035 try:
1096 1036 delattr(self.dirstate, k)
1097 1037 except AttributeError:
1098 1038 pass
1099 1039 delattr(self.unfiltered(), 'dirstate')
1100 1040
1101 1041 def invalidate(self):
1102 1042 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1103 1043 for k in self._filecache:
1104 1044 # dirstate is invalidated separately in invalidatedirstate()
1105 1045 if k == 'dirstate':
1106 1046 continue
1107 1047
1108 1048 try:
1109 1049 delattr(unfiltered, k)
1110 1050 except AttributeError:
1111 1051 pass
1112 1052 self.invalidatecaches()
1113 1053
1114 1054 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1115 1055 try:
1116 1056 l = lock.lock(lockname, 0, releasefn, desc=desc)
1117 1057 except error.LockHeld, inst:
1118 1058 if not wait:
1119 1059 raise
1120 1060 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1121 1061 (desc, inst.locker))
1122 1062 # default to 600 seconds timeout
1123 1063 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1124 1064 releasefn, desc=desc)
1125 1065 if acquirefn:
1126 1066 acquirefn()
1127 1067 return l
1128 1068
1129 1069 def _afterlock(self, callback):
1130 1070 """add a callback to the current repository lock.
1131 1071
1132 1072 The callback will be executed on lock release."""
1133 1073 l = self._lockref and self._lockref()
1134 1074 if l:
1135 1075 l.postrelease.append(callback)
1136 1076 else:
1137 1077 callback()
1138 1078
1139 1079 def lock(self, wait=True):
1140 1080 '''Lock the repository store (.hg/store) and return a weak reference
1141 1081 to the lock. Use this before modifying the store (e.g. committing or
1142 1082 stripping). If you are opening a transaction, get a lock as well.)'''
1143 1083 l = self._lockref and self._lockref()
1144 1084 if l is not None and l.held:
1145 1085 l.lock()
1146 1086 return l
1147 1087
1148 1088 def unlock():
1149 1089 self.store.write()
1150 1090 if hasunfilteredcache(self, '_phasecache'):
1151 1091 self._phasecache.write()
1152 1092 for k, ce in self._filecache.items():
1153 1093 if k == 'dirstate':
1154 1094 continue
1155 1095 ce.refresh()
1156 1096
1157 1097 l = self._lock(self.sjoin("lock"), wait, unlock,
1158 1098 self.invalidate, _('repository %s') % self.origroot)
1159 1099 self._lockref = weakref.ref(l)
1160 1100 return l
1161 1101
1162 1102 def wlock(self, wait=True):
1163 1103 '''Lock the non-store parts of the repository (everything under
1164 1104 .hg except .hg/store) and return a weak reference to the lock.
1165 1105 Use this before modifying files in .hg.'''
1166 1106 l = self._wlockref and self._wlockref()
1167 1107 if l is not None and l.held:
1168 1108 l.lock()
1169 1109 return l
1170 1110
1171 1111 def unlock():
1172 1112 self.dirstate.write()
1173 1113 ce = self._filecache.get('dirstate')
1174 1114 if ce:
1175 1115 ce.refresh()
1176 1116
1177 1117 l = self._lock(self.join("wlock"), wait, unlock,
1178 1118 self.invalidatedirstate, _('working directory of %s') %
1179 1119 self.origroot)
1180 1120 self._wlockref = weakref.ref(l)
1181 1121 return l
1182 1122
1183 1123 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1184 1124 """
1185 1125 commit an individual file as part of a larger transaction
1186 1126 """
1187 1127
1188 1128 fname = fctx.path()
1189 1129 text = fctx.data()
1190 1130 flog = self.file(fname)
1191 1131 fparent1 = manifest1.get(fname, nullid)
1192 1132 fparent2 = fparent2o = manifest2.get(fname, nullid)
1193 1133
1194 1134 meta = {}
1195 1135 copy = fctx.renamed()
1196 1136 if copy and copy[0] != fname:
1197 1137 # Mark the new revision of this file as a copy of another
1198 1138 # file. This copy data will effectively act as a parent
1199 1139 # of this new revision. If this is a merge, the first
1200 1140 # parent will be the nullid (meaning "look up the copy data")
1201 1141 # and the second one will be the other parent. For example:
1202 1142 #
1203 1143 # 0 --- 1 --- 3 rev1 changes file foo
1204 1144 # \ / rev2 renames foo to bar and changes it
1205 1145 # \- 2 -/ rev3 should have bar with all changes and
1206 1146 # should record that bar descends from
1207 1147 # bar in rev2 and foo in rev1
1208 1148 #
1209 1149 # this allows this merge to succeed:
1210 1150 #
1211 1151 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1212 1152 # \ / merging rev3 and rev4 should use bar@rev2
1213 1153 # \- 2 --- 4 as the merge base
1214 1154 #
1215 1155
1216 1156 cfname = copy[0]
1217 1157 crev = manifest1.get(cfname)
1218 1158 newfparent = fparent2
1219 1159
1220 1160 if manifest2: # branch merge
1221 1161 if fparent2 == nullid or crev is None: # copied on remote side
1222 1162 if cfname in manifest2:
1223 1163 crev = manifest2[cfname]
1224 1164 newfparent = fparent1
1225 1165
1226 1166 # find source in nearest ancestor if we've lost track
1227 1167 if not crev:
1228 1168 self.ui.debug(" %s: searching for copy revision for %s\n" %
1229 1169 (fname, cfname))
1230 1170 for ancestor in self[None].ancestors():
1231 1171 if cfname in ancestor:
1232 1172 crev = ancestor[cfname].filenode()
1233 1173 break
1234 1174
1235 1175 if crev:
1236 1176 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1237 1177 meta["copy"] = cfname
1238 1178 meta["copyrev"] = hex(crev)
1239 1179 fparent1, fparent2 = nullid, newfparent
1240 1180 else:
1241 1181 self.ui.warn(_("warning: can't find ancestor for '%s' "
1242 1182 "copied from '%s'!\n") % (fname, cfname))
1243 1183
1244 1184 elif fparent2 != nullid:
1245 1185 # is one parent an ancestor of the other?
1246 1186 fparentancestor = flog.ancestor(fparent1, fparent2)
1247 1187 if fparentancestor == fparent1:
1248 1188 fparent1, fparent2 = fparent2, nullid
1249 1189 elif fparentancestor == fparent2:
1250 1190 fparent2 = nullid
1251 1191
1252 1192 # is the file changed?
1253 1193 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1254 1194 changelist.append(fname)
1255 1195 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1256 1196
1257 1197 # are just the flags changed during merge?
1258 1198 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1259 1199 changelist.append(fname)
1260 1200
1261 1201 return fparent1
1262 1202
1263 1203 @unfilteredmethod
1264 1204 def commit(self, text="", user=None, date=None, match=None, force=False,
1265 1205 editor=False, extra={}):
1266 1206 """Add a new revision to current repository.
1267 1207
1268 1208 Revision information is gathered from the working directory,
1269 1209 match can be used to filter the committed files. If editor is
1270 1210 supplied, it is called to get a commit message.
1271 1211 """
1272 1212
1273 1213 def fail(f, msg):
1274 1214 raise util.Abort('%s: %s' % (f, msg))
1275 1215
1276 1216 if not match:
1277 1217 match = matchmod.always(self.root, '')
1278 1218
1279 1219 if not force:
1280 1220 vdirs = []
1281 1221 match.dir = vdirs.append
1282 1222 match.bad = fail
1283 1223
1284 1224 wlock = self.wlock()
1285 1225 try:
1286 1226 wctx = self[None]
1287 1227 merge = len(wctx.parents()) > 1
1288 1228
1289 1229 if (not force and merge and match and
1290 1230 (match.files() or match.anypats())):
1291 1231 raise util.Abort(_('cannot partially commit a merge '
1292 1232 '(do not specify files or patterns)'))
1293 1233
1294 1234 changes = self.status(match=match, clean=force)
1295 1235 if force:
1296 1236 changes[0].extend(changes[6]) # mq may commit unchanged files
1297 1237
1298 1238 # check subrepos
1299 1239 subs = []
1300 1240 commitsubs = set()
1301 1241 newstate = wctx.substate.copy()
1302 1242 # only manage subrepos and .hgsubstate if .hgsub is present
1303 1243 if '.hgsub' in wctx:
1304 1244 # we'll decide whether to track this ourselves, thanks
1305 1245 if '.hgsubstate' in changes[0]:
1306 1246 changes[0].remove('.hgsubstate')
1307 1247 if '.hgsubstate' in changes[2]:
1308 1248 changes[2].remove('.hgsubstate')
1309 1249
1310 1250 # compare current state to last committed state
1311 1251 # build new substate based on last committed state
1312 1252 oldstate = wctx.p1().substate
1313 1253 for s in sorted(newstate.keys()):
1314 1254 if not match(s):
1315 1255 # ignore working copy, use old state if present
1316 1256 if s in oldstate:
1317 1257 newstate[s] = oldstate[s]
1318 1258 continue
1319 1259 if not force:
1320 1260 raise util.Abort(
1321 1261 _("commit with new subrepo %s excluded") % s)
1322 1262 if wctx.sub(s).dirty(True):
1323 1263 if not self.ui.configbool('ui', 'commitsubrepos'):
1324 1264 raise util.Abort(
1325 1265 _("uncommitted changes in subrepo %s") % s,
1326 1266 hint=_("use --subrepos for recursive commit"))
1327 1267 subs.append(s)
1328 1268 commitsubs.add(s)
1329 1269 else:
1330 1270 bs = wctx.sub(s).basestate()
1331 1271 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1332 1272 if oldstate.get(s, (None, None, None))[1] != bs:
1333 1273 subs.append(s)
1334 1274
1335 1275 # check for removed subrepos
1336 1276 for p in wctx.parents():
1337 1277 r = [s for s in p.substate if s not in newstate]
1338 1278 subs += [s for s in r if match(s)]
1339 1279 if subs:
1340 1280 if (not match('.hgsub') and
1341 1281 '.hgsub' in (wctx.modified() + wctx.added())):
1342 1282 raise util.Abort(
1343 1283 _("can't commit subrepos without .hgsub"))
1344 1284 changes[0].insert(0, '.hgsubstate')
1345 1285
1346 1286 elif '.hgsub' in changes[2]:
1347 1287 # clean up .hgsubstate when .hgsub is removed
1348 1288 if ('.hgsubstate' in wctx and
1349 1289 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1350 1290 changes[2].insert(0, '.hgsubstate')
1351 1291
1352 1292 # make sure all explicit patterns are matched
1353 1293 if not force and match.files():
1354 1294 matched = set(changes[0] + changes[1] + changes[2])
1355 1295
1356 1296 for f in match.files():
1357 1297 f = self.dirstate.normalize(f)
1358 1298 if f == '.' or f in matched or f in wctx.substate:
1359 1299 continue
1360 1300 if f in changes[3]: # missing
1361 1301 fail(f, _('file not found!'))
1362 1302 if f in vdirs: # visited directory
1363 1303 d = f + '/'
1364 1304 for mf in matched:
1365 1305 if mf.startswith(d):
1366 1306 break
1367 1307 else:
1368 1308 fail(f, _("no match under directory!"))
1369 1309 elif f not in self.dirstate:
1370 1310 fail(f, _("file not tracked!"))
1371 1311
1372 1312 if (not force and not extra.get("close") and not merge
1373 1313 and not (changes[0] or changes[1] or changes[2])
1374 1314 and wctx.branch() == wctx.p1().branch()):
1375 1315 return None
1376 1316
1377 1317 if merge and changes[3]:
1378 1318 raise util.Abort(_("cannot commit merge with missing files"))
1379 1319
1380 1320 ms = mergemod.mergestate(self)
1381 1321 for f in changes[0]:
1382 1322 if f in ms and ms[f] == 'u':
1383 1323 raise util.Abort(_("unresolved merge conflicts "
1384 1324 "(see hg help resolve)"))
1385 1325
1386 1326 cctx = context.workingctx(self, text, user, date, extra, changes)
1387 1327 if editor:
1388 1328 cctx._text = editor(self, cctx, subs)
1389 1329 edited = (text != cctx._text)
1390 1330
1391 1331 # commit subs and write new state
1392 1332 if subs:
1393 1333 for s in sorted(commitsubs):
1394 1334 sub = wctx.sub(s)
1395 1335 self.ui.status(_('committing subrepository %s\n') %
1396 1336 subrepo.subrelpath(sub))
1397 1337 sr = sub.commit(cctx._text, user, date)
1398 1338 newstate[s] = (newstate[s][0], sr)
1399 1339 subrepo.writestate(self, newstate)
1400 1340
1401 1341 # Save commit message in case this transaction gets rolled back
1402 1342 # (e.g. by a pretxncommit hook). Leave the content alone on
1403 1343 # the assumption that the user will use the same editor again.
1404 1344 msgfn = self.savecommitmessage(cctx._text)
1405 1345
1406 1346 p1, p2 = self.dirstate.parents()
1407 1347 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1408 1348 try:
1409 1349 self.hook("precommit", throw=True, parent1=hookp1,
1410 1350 parent2=hookp2)
1411 1351 ret = self.commitctx(cctx, True)
1412 1352 except: # re-raises
1413 1353 if edited:
1414 1354 self.ui.write(
1415 1355 _('note: commit message saved in %s\n') % msgfn)
1416 1356 raise
1417 1357
1418 1358 # update bookmarks, dirstate and mergestate
1419 1359 bookmarks.update(self, [p1, p2], ret)
1420 1360 for f in changes[0] + changes[1]:
1421 1361 self.dirstate.normal(f)
1422 1362 for f in changes[2]:
1423 1363 self.dirstate.drop(f)
1424 1364 self.dirstate.setparents(ret)
1425 1365 ms.reset()
1426 1366 finally:
1427 1367 wlock.release()
1428 1368
1429 1369 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1430 1370 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1431 1371 self._afterlock(commithook)
1432 1372 return ret
1433 1373
1434 1374 @unfilteredmethod
1435 1375 def commitctx(self, ctx, error=False):
1436 1376 """Add a new revision to current repository.
1437 1377 Revision information is passed via the context argument.
1438 1378 """
1439 1379
1440 1380 tr = lock = None
1441 1381 removed = list(ctx.removed())
1442 1382 p1, p2 = ctx.p1(), ctx.p2()
1443 1383 user = ctx.user()
1444 1384
1445 1385 lock = self.lock()
1446 1386 try:
1447 1387 tr = self.transaction("commit")
1448 1388 trp = weakref.proxy(tr)
1449 1389
1450 1390 if ctx.files():
1451 1391 m1 = p1.manifest().copy()
1452 1392 m2 = p2.manifest()
1453 1393
1454 1394 # check in files
1455 1395 new = {}
1456 1396 changed = []
1457 1397 linkrev = len(self)
1458 1398 for f in sorted(ctx.modified() + ctx.added()):
1459 1399 self.ui.note(f + "\n")
1460 1400 try:
1461 1401 fctx = ctx[f]
1462 1402 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1463 1403 changed)
1464 1404 m1.set(f, fctx.flags())
1465 1405 except OSError, inst:
1466 1406 self.ui.warn(_("trouble committing %s!\n") % f)
1467 1407 raise
1468 1408 except IOError, inst:
1469 1409 errcode = getattr(inst, 'errno', errno.ENOENT)
1470 1410 if error or errcode and errcode != errno.ENOENT:
1471 1411 self.ui.warn(_("trouble committing %s!\n") % f)
1472 1412 raise
1473 1413 else:
1474 1414 removed.append(f)
1475 1415
1476 1416 # update manifest
1477 1417 m1.update(new)
1478 1418 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1479 1419 drop = [f for f in removed if f in m1]
1480 1420 for f in drop:
1481 1421 del m1[f]
1482 1422 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1483 1423 p2.manifestnode(), (new, drop))
1484 1424 files = changed + removed
1485 1425 else:
1486 1426 mn = p1.manifestnode()
1487 1427 files = []
1488 1428
1489 1429 # update changelog
1490 1430 self.changelog.delayupdate()
1491 1431 n = self.changelog.add(mn, files, ctx.description(),
1492 1432 trp, p1.node(), p2.node(),
1493 1433 user, ctx.date(), ctx.extra().copy())
1494 1434 p = lambda: self.changelog.writepending() and self.root or ""
1495 1435 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1496 1436 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1497 1437 parent2=xp2, pending=p)
1498 1438 self.changelog.finalize(trp)
1499 1439 # set the new commit is proper phase
1500 1440 targetphase = phases.newcommitphase(self.ui)
1501 1441 if targetphase:
1502 1442 # retract boundary do not alter parent changeset.
1503 1443 # if a parent have higher the resulting phase will
1504 1444 # be compliant anyway
1505 1445 #
1506 1446 # if minimal phase was 0 we don't need to retract anything
1507 1447 phases.retractboundary(self, targetphase, [n])
1508 1448 tr.close()
1509 1449 self.updatebranchcache()
1510 1450 return n
1511 1451 finally:
1512 1452 if tr:
1513 1453 tr.release()
1514 1454 lock.release()
1515 1455
1516 1456 @unfilteredmethod
1517 1457 def destroyed(self, newheadnodes=None):
1518 1458 '''Inform the repository that nodes have been destroyed.
1519 1459 Intended for use by strip and rollback, so there's a common
1520 1460 place for anything that has to be done after destroying history.
1521 1461
1522 1462 If you know the branchheadcache was uptodate before nodes were removed
1523 1463 and you also know the set of candidate new heads that may have resulted
1524 1464 from the destruction, you can set newheadnodes. This will enable the
1525 1465 code to update the branchheads cache, rather than having future code
1526 1466 decide it's invalid and regenerating it from scratch.
1527 1467 '''
1528 1468 # If we have info, newheadnodes, on how to update the branch cache, do
1529 1469 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1530 1470 # will be caught the next time it is read.
1531 1471 if newheadnodes:
1532 1472 tiprev = len(self) - 1
1533 1473 ctxgen = (self[node] for node in newheadnodes
1534 1474 if self.changelog.hasnode(node))
1535 self._updatebranchcache(self._branchcache, ctxgen)
1475 branchmap.update(self, self._branchcache, ctxgen)
1536 1476 branchmap.write(self, self._branchcache, self.changelog.tip(),
1537 1477 tiprev)
1538 1478
1539 1479 # Ensure the persistent tag cache is updated. Doing it now
1540 1480 # means that the tag cache only has to worry about destroyed
1541 1481 # heads immediately after a strip/rollback. That in turn
1542 1482 # guarantees that "cachetip == currenttip" (comparing both rev
1543 1483 # and node) always means no nodes have been added or destroyed.
1544 1484
1545 1485 # XXX this is suboptimal when qrefresh'ing: we strip the current
1546 1486 # head, refresh the tag cache, then immediately add a new head.
1547 1487 # But I think doing it this way is necessary for the "instant
1548 1488 # tag cache retrieval" case to work.
1549 1489 self.invalidatecaches()
1550 1490
1551 1491 # Discard all cache entries to force reloading everything.
1552 1492 self._filecache.clear()
1553 1493
1554 1494 def walk(self, match, node=None):
1555 1495 '''
1556 1496 walk recursively through the directory tree or a given
1557 1497 changeset, finding all files matched by the match
1558 1498 function
1559 1499 '''
1560 1500 return self[node].walk(match)
1561 1501
1562 1502 def status(self, node1='.', node2=None, match=None,
1563 1503 ignored=False, clean=False, unknown=False,
1564 1504 listsubrepos=False):
1565 1505 """return status of files between two nodes or node and working
1566 1506 directory.
1567 1507
1568 1508 If node1 is None, use the first dirstate parent instead.
1569 1509 If node2 is None, compare node1 with working directory.
1570 1510 """
1571 1511
1572 1512 def mfmatches(ctx):
1573 1513 mf = ctx.manifest().copy()
1574 1514 if match.always():
1575 1515 return mf
1576 1516 for fn in mf.keys():
1577 1517 if not match(fn):
1578 1518 del mf[fn]
1579 1519 return mf
1580 1520
1581 1521 if isinstance(node1, context.changectx):
1582 1522 ctx1 = node1
1583 1523 else:
1584 1524 ctx1 = self[node1]
1585 1525 if isinstance(node2, context.changectx):
1586 1526 ctx2 = node2
1587 1527 else:
1588 1528 ctx2 = self[node2]
1589 1529
1590 1530 working = ctx2.rev() is None
1591 1531 parentworking = working and ctx1 == self['.']
1592 1532 match = match or matchmod.always(self.root, self.getcwd())
1593 1533 listignored, listclean, listunknown = ignored, clean, unknown
1594 1534
1595 1535 # load earliest manifest first for caching reasons
1596 1536 if not working and ctx2.rev() < ctx1.rev():
1597 1537 ctx2.manifest()
1598 1538
1599 1539 if not parentworking:
1600 1540 def bad(f, msg):
1601 1541 # 'f' may be a directory pattern from 'match.files()',
1602 1542 # so 'f not in ctx1' is not enough
1603 1543 if f not in ctx1 and f not in ctx1.dirs():
1604 1544 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1605 1545 match.bad = bad
1606 1546
1607 1547 if working: # we need to scan the working dir
1608 1548 subrepos = []
1609 1549 if '.hgsub' in self.dirstate:
1610 1550 subrepos = ctx2.substate.keys()
1611 1551 s = self.dirstate.status(match, subrepos, listignored,
1612 1552 listclean, listunknown)
1613 1553 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1614 1554
1615 1555 # check for any possibly clean files
1616 1556 if parentworking and cmp:
1617 1557 fixup = []
1618 1558 # do a full compare of any files that might have changed
1619 1559 for f in sorted(cmp):
1620 1560 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1621 1561 or ctx1[f].cmp(ctx2[f])):
1622 1562 modified.append(f)
1623 1563 else:
1624 1564 fixup.append(f)
1625 1565
1626 1566 # update dirstate for files that are actually clean
1627 1567 if fixup:
1628 1568 if listclean:
1629 1569 clean += fixup
1630 1570
1631 1571 try:
1632 1572 # updating the dirstate is optional
1633 1573 # so we don't wait on the lock
1634 1574 wlock = self.wlock(False)
1635 1575 try:
1636 1576 for f in fixup:
1637 1577 self.dirstate.normal(f)
1638 1578 finally:
1639 1579 wlock.release()
1640 1580 except error.LockError:
1641 1581 pass
1642 1582
1643 1583 if not parentworking:
1644 1584 mf1 = mfmatches(ctx1)
1645 1585 if working:
1646 1586 # we are comparing working dir against non-parent
1647 1587 # generate a pseudo-manifest for the working dir
1648 1588 mf2 = mfmatches(self['.'])
1649 1589 for f in cmp + modified + added:
1650 1590 mf2[f] = None
1651 1591 mf2.set(f, ctx2.flags(f))
1652 1592 for f in removed:
1653 1593 if f in mf2:
1654 1594 del mf2[f]
1655 1595 else:
1656 1596 # we are comparing two revisions
1657 1597 deleted, unknown, ignored = [], [], []
1658 1598 mf2 = mfmatches(ctx2)
1659 1599
1660 1600 modified, added, clean = [], [], []
1661 1601 withflags = mf1.withflags() | mf2.withflags()
1662 1602 for fn in mf2:
1663 1603 if fn in mf1:
1664 1604 if (fn not in deleted and
1665 1605 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1666 1606 (mf1[fn] != mf2[fn] and
1667 1607 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1668 1608 modified.append(fn)
1669 1609 elif listclean:
1670 1610 clean.append(fn)
1671 1611 del mf1[fn]
1672 1612 elif fn not in deleted:
1673 1613 added.append(fn)
1674 1614 removed = mf1.keys()
1675 1615
1676 1616 if working and modified and not self.dirstate._checklink:
1677 1617 # Symlink placeholders may get non-symlink-like contents
1678 1618 # via user error or dereferencing by NFS or Samba servers,
1679 1619 # so we filter out any placeholders that don't look like a
1680 1620 # symlink
1681 1621 sane = []
1682 1622 for f in modified:
1683 1623 if ctx2.flags(f) == 'l':
1684 1624 d = ctx2[f].data()
1685 1625 if len(d) >= 1024 or '\n' in d or util.binary(d):
1686 1626 self.ui.debug('ignoring suspect symlink placeholder'
1687 1627 ' "%s"\n' % f)
1688 1628 continue
1689 1629 sane.append(f)
1690 1630 modified = sane
1691 1631
1692 1632 r = modified, added, removed, deleted, unknown, ignored, clean
1693 1633
1694 1634 if listsubrepos:
1695 1635 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1696 1636 if working:
1697 1637 rev2 = None
1698 1638 else:
1699 1639 rev2 = ctx2.substate[subpath][1]
1700 1640 try:
1701 1641 submatch = matchmod.narrowmatcher(subpath, match)
1702 1642 s = sub.status(rev2, match=submatch, ignored=listignored,
1703 1643 clean=listclean, unknown=listunknown,
1704 1644 listsubrepos=True)
1705 1645 for rfiles, sfiles in zip(r, s):
1706 1646 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1707 1647 except error.LookupError:
1708 1648 self.ui.status(_("skipping missing subrepository: %s\n")
1709 1649 % subpath)
1710 1650
1711 1651 for l in r:
1712 1652 l.sort()
1713 1653 return r
1714 1654
1715 1655 def heads(self, start=None):
1716 1656 heads = self.changelog.heads(start)
1717 1657 # sort the output in rev descending order
1718 1658 return sorted(heads, key=self.changelog.rev, reverse=True)
1719 1659
1720 1660 def branchheads(self, branch=None, start=None, closed=False):
1721 1661 '''return a (possibly filtered) list of heads for the given branch
1722 1662
1723 1663 Heads are returned in topological order, from newest to oldest.
1724 1664 If branch is None, use the dirstate branch.
1725 1665 If start is not None, return only heads reachable from start.
1726 1666 If closed is True, return heads that are marked as closed as well.
1727 1667 '''
1728 1668 if branch is None:
1729 1669 branch = self[None].branch()
1730 1670 branches = self.branchmap()
1731 1671 if branch not in branches:
1732 1672 return []
1733 1673 # the cache returns heads ordered lowest to highest
1734 1674 bheads = list(reversed(branches[branch]))
1735 1675 if start is not None:
1736 1676 # filter out the heads that cannot be reached from startrev
1737 1677 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1738 1678 bheads = [h for h in bheads if h in fbheads]
1739 1679 if not closed:
1740 1680 bheads = [h for h in bheads if not self[h].closesbranch()]
1741 1681 return bheads
1742 1682
1743 1683 def branches(self, nodes):
1744 1684 if not nodes:
1745 1685 nodes = [self.changelog.tip()]
1746 1686 b = []
1747 1687 for n in nodes:
1748 1688 t = n
1749 1689 while True:
1750 1690 p = self.changelog.parents(n)
1751 1691 if p[1] != nullid or p[0] == nullid:
1752 1692 b.append((t, n, p[0], p[1]))
1753 1693 break
1754 1694 n = p[0]
1755 1695 return b
1756 1696
1757 1697 def between(self, pairs):
1758 1698 r = []
1759 1699
1760 1700 for top, bottom in pairs:
1761 1701 n, l, i = top, [], 0
1762 1702 f = 1
1763 1703
1764 1704 while n != bottom and n != nullid:
1765 1705 p = self.changelog.parents(n)[0]
1766 1706 if i == f:
1767 1707 l.append(n)
1768 1708 f = f * 2
1769 1709 n = p
1770 1710 i += 1
1771 1711
1772 1712 r.append(l)
1773 1713
1774 1714 return r
1775 1715
1776 1716 def pull(self, remote, heads=None, force=False):
1777 1717 # don't open transaction for nothing or you break future useful
1778 1718 # rollback call
1779 1719 tr = None
1780 1720 trname = 'pull\n' + util.hidepassword(remote.url())
1781 1721 lock = self.lock()
1782 1722 try:
1783 1723 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1784 1724 force=force)
1785 1725 common, fetch, rheads = tmp
1786 1726 if not fetch:
1787 1727 self.ui.status(_("no changes found\n"))
1788 1728 added = []
1789 1729 result = 0
1790 1730 else:
1791 1731 tr = self.transaction(trname)
1792 1732 if heads is None and list(common) == [nullid]:
1793 1733 self.ui.status(_("requesting all changes\n"))
1794 1734 elif heads is None and remote.capable('changegroupsubset'):
1795 1735 # issue1320, avoid a race if remote changed after discovery
1796 1736 heads = rheads
1797 1737
1798 1738 if remote.capable('getbundle'):
1799 1739 cg = remote.getbundle('pull', common=common,
1800 1740 heads=heads or rheads)
1801 1741 elif heads is None:
1802 1742 cg = remote.changegroup(fetch, 'pull')
1803 1743 elif not remote.capable('changegroupsubset'):
1804 1744 raise util.Abort(_("partial pull cannot be done because "
1805 1745 "other repository doesn't support "
1806 1746 "changegroupsubset."))
1807 1747 else:
1808 1748 cg = remote.changegroupsubset(fetch, heads, 'pull')
1809 1749 clstart = len(self.changelog)
1810 1750 result = self.addchangegroup(cg, 'pull', remote.url())
1811 1751 clend = len(self.changelog)
1812 1752 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1813 1753
1814 1754 # compute target subset
1815 1755 if heads is None:
1816 1756 # We pulled every thing possible
1817 1757 # sync on everything common
1818 1758 subset = common + added
1819 1759 else:
1820 1760 # We pulled a specific subset
1821 1761 # sync on this subset
1822 1762 subset = heads
1823 1763
1824 1764 # Get remote phases data from remote
1825 1765 remotephases = remote.listkeys('phases')
1826 1766 publishing = bool(remotephases.get('publishing', False))
1827 1767 if remotephases and not publishing:
1828 1768 # remote is new and unpublishing
1829 1769 pheads, _dr = phases.analyzeremotephases(self, subset,
1830 1770 remotephases)
1831 1771 phases.advanceboundary(self, phases.public, pheads)
1832 1772 phases.advanceboundary(self, phases.draft, subset)
1833 1773 else:
1834 1774 # Remote is old or publishing all common changesets
1835 1775 # should be seen as public
1836 1776 phases.advanceboundary(self, phases.public, subset)
1837 1777
1838 1778 if obsolete._enabled:
1839 1779 self.ui.debug('fetching remote obsolete markers\n')
1840 1780 remoteobs = remote.listkeys('obsolete')
1841 1781 if 'dump0' in remoteobs:
1842 1782 if tr is None:
1843 1783 tr = self.transaction(trname)
1844 1784 for key in sorted(remoteobs, reverse=True):
1845 1785 if key.startswith('dump'):
1846 1786 data = base85.b85decode(remoteobs[key])
1847 1787 self.obsstore.mergemarkers(tr, data)
1848 1788 self.invalidatevolatilesets()
1849 1789 if tr is not None:
1850 1790 tr.close()
1851 1791 finally:
1852 1792 if tr is not None:
1853 1793 tr.release()
1854 1794 lock.release()
1855 1795
1856 1796 return result
1857 1797
1858 1798 def checkpush(self, force, revs):
1859 1799 """Extensions can override this function if additional checks have
1860 1800 to be performed before pushing, or call it if they override push
1861 1801 command.
1862 1802 """
1863 1803 pass
1864 1804
1865 1805 def push(self, remote, force=False, revs=None, newbranch=False):
1866 1806 '''Push outgoing changesets (limited by revs) from the current
1867 1807 repository to remote. Return an integer:
1868 1808 - None means nothing to push
1869 1809 - 0 means HTTP error
1870 1810 - 1 means we pushed and remote head count is unchanged *or*
1871 1811 we have outgoing changesets but refused to push
1872 1812 - other values as described by addchangegroup()
1873 1813 '''
1874 1814 # there are two ways to push to remote repo:
1875 1815 #
1876 1816 # addchangegroup assumes local user can lock remote
1877 1817 # repo (local filesystem, old ssh servers).
1878 1818 #
1879 1819 # unbundle assumes local user cannot lock remote repo (new ssh
1880 1820 # servers, http servers).
1881 1821
1882 1822 if not remote.canpush():
1883 1823 raise util.Abort(_("destination does not support push"))
1884 1824 unfi = self.unfiltered()
1885 1825 # get local lock as we might write phase data
1886 1826 locallock = self.lock()
1887 1827 try:
1888 1828 self.checkpush(force, revs)
1889 1829 lock = None
1890 1830 unbundle = remote.capable('unbundle')
1891 1831 if not unbundle:
1892 1832 lock = remote.lock()
1893 1833 try:
1894 1834 # discovery
1895 1835 fci = discovery.findcommonincoming
1896 1836 commoninc = fci(unfi, remote, force=force)
1897 1837 common, inc, remoteheads = commoninc
1898 1838 fco = discovery.findcommonoutgoing
1899 1839 outgoing = fco(unfi, remote, onlyheads=revs,
1900 1840 commoninc=commoninc, force=force)
1901 1841
1902 1842
1903 1843 if not outgoing.missing:
1904 1844 # nothing to push
1905 1845 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1906 1846 ret = None
1907 1847 else:
1908 1848 # something to push
1909 1849 if not force:
1910 1850 # if self.obsstore == False --> no obsolete
1911 1851 # then, save the iteration
1912 1852 if unfi.obsstore:
1913 1853 # this message are here for 80 char limit reason
1914 1854 mso = _("push includes obsolete changeset: %s!")
1915 1855 msu = _("push includes unstable changeset: %s!")
1916 1856 msb = _("push includes bumped changeset: %s!")
1917 1857 msd = _("push includes divergent changeset: %s!")
1918 1858 # If we are to push if there is at least one
1919 1859 # obsolete or unstable changeset in missing, at
1920 1860 # least one of the missinghead will be obsolete or
1921 1861 # unstable. So checking heads only is ok
1922 1862 for node in outgoing.missingheads:
1923 1863 ctx = unfi[node]
1924 1864 if ctx.obsolete():
1925 1865 raise util.Abort(mso % ctx)
1926 1866 elif ctx.unstable():
1927 1867 raise util.Abort(msu % ctx)
1928 1868 elif ctx.bumped():
1929 1869 raise util.Abort(msb % ctx)
1930 1870 elif ctx.divergent():
1931 1871 raise util.Abort(msd % ctx)
1932 1872 discovery.checkheads(unfi, remote, outgoing,
1933 1873 remoteheads, newbranch,
1934 1874 bool(inc))
1935 1875
1936 1876 # create a changegroup from local
1937 1877 if revs is None and not outgoing.excluded:
1938 1878 # push everything,
1939 1879 # use the fast path, no race possible on push
1940 1880 cg = self._changegroup(outgoing.missing, 'push')
1941 1881 else:
1942 1882 cg = self.getlocalbundle('push', outgoing)
1943 1883
1944 1884 # apply changegroup to remote
1945 1885 if unbundle:
1946 1886 # local repo finds heads on server, finds out what
1947 1887 # revs it must push. once revs transferred, if server
1948 1888 # finds it has different heads (someone else won
1949 1889 # commit/push race), server aborts.
1950 1890 if force:
1951 1891 remoteheads = ['force']
1952 1892 # ssh: return remote's addchangegroup()
1953 1893 # http: return remote's addchangegroup() or 0 for error
1954 1894 ret = remote.unbundle(cg, remoteheads, 'push')
1955 1895 else:
1956 1896 # we return an integer indicating remote head count
1957 1897 # change
1958 1898 ret = remote.addchangegroup(cg, 'push', self.url())
1959 1899
1960 1900 if ret:
1961 1901 # push succeed, synchronize target of the push
1962 1902 cheads = outgoing.missingheads
1963 1903 elif revs is None:
1964 1904 # All out push fails. synchronize all common
1965 1905 cheads = outgoing.commonheads
1966 1906 else:
1967 1907 # I want cheads = heads(::missingheads and ::commonheads)
1968 1908 # (missingheads is revs with secret changeset filtered out)
1969 1909 #
1970 1910 # This can be expressed as:
1971 1911 # cheads = ( (missingheads and ::commonheads)
1972 1912 # + (commonheads and ::missingheads))"
1973 1913 # )
1974 1914 #
1975 1915 # while trying to push we already computed the following:
1976 1916 # common = (::commonheads)
1977 1917 # missing = ((commonheads::missingheads) - commonheads)
1978 1918 #
1979 1919 # We can pick:
1980 1920 # * missingheads part of common (::commonheads)
1981 1921 common = set(outgoing.common)
1982 1922 cheads = [node for node in revs if node in common]
1983 1923 # and
1984 1924 # * commonheads parents on missing
1985 1925 revset = unfi.set('%ln and parents(roots(%ln))',
1986 1926 outgoing.commonheads,
1987 1927 outgoing.missing)
1988 1928 cheads.extend(c.node() for c in revset)
1989 1929 # even when we don't push, exchanging phase data is useful
1990 1930 remotephases = remote.listkeys('phases')
1991 1931 if not remotephases: # old server or public only repo
1992 1932 phases.advanceboundary(self, phases.public, cheads)
1993 1933 # don't push any phase data as there is nothing to push
1994 1934 else:
1995 1935 ana = phases.analyzeremotephases(self, cheads, remotephases)
1996 1936 pheads, droots = ana
1997 1937 ### Apply remote phase on local
1998 1938 if remotephases.get('publishing', False):
1999 1939 phases.advanceboundary(self, phases.public, cheads)
2000 1940 else: # publish = False
2001 1941 phases.advanceboundary(self, phases.public, pheads)
2002 1942 phases.advanceboundary(self, phases.draft, cheads)
2003 1943 ### Apply local phase on remote
2004 1944
2005 1945 # Get the list of all revs draft on remote by public here.
2006 1946 # XXX Beware that revset break if droots is not strictly
2007 1947 # XXX root we may want to ensure it is but it is costly
2008 1948 outdated = unfi.set('heads((%ln::%ln) and public())',
2009 1949 droots, cheads)
2010 1950 for newremotehead in outdated:
2011 1951 r = remote.pushkey('phases',
2012 1952 newremotehead.hex(),
2013 1953 str(phases.draft),
2014 1954 str(phases.public))
2015 1955 if not r:
2016 1956 self.ui.warn(_('updating %s to public failed!\n')
2017 1957 % newremotehead)
2018 1958 self.ui.debug('try to push obsolete markers to remote\n')
2019 1959 if (obsolete._enabled and self.obsstore and
2020 1960 'obsolete' in remote.listkeys('namespaces')):
2021 1961 rslts = []
2022 1962 remotedata = self.listkeys('obsolete')
2023 1963 for key in sorted(remotedata, reverse=True):
2024 1964 # reverse sort to ensure we end with dump0
2025 1965 data = remotedata[key]
2026 1966 rslts.append(remote.pushkey('obsolete', key, '', data))
2027 1967 if [r for r in rslts if not r]:
2028 1968 msg = _('failed to push some obsolete markers!\n')
2029 1969 self.ui.warn(msg)
2030 1970 finally:
2031 1971 if lock is not None:
2032 1972 lock.release()
2033 1973 finally:
2034 1974 locallock.release()
2035 1975
2036 1976 self.ui.debug("checking for updated bookmarks\n")
2037 1977 rb = remote.listkeys('bookmarks')
2038 1978 for k in rb.keys():
2039 1979 if k in unfi._bookmarks:
2040 1980 nr, nl = rb[k], hex(self._bookmarks[k])
2041 1981 if nr in unfi:
2042 1982 cr = unfi[nr]
2043 1983 cl = unfi[nl]
2044 1984 if bookmarks.validdest(unfi, cr, cl):
2045 1985 r = remote.pushkey('bookmarks', k, nr, nl)
2046 1986 if r:
2047 1987 self.ui.status(_("updating bookmark %s\n") % k)
2048 1988 else:
2049 1989 self.ui.warn(_('updating bookmark %s'
2050 1990 ' failed!\n') % k)
2051 1991
2052 1992 return ret
2053 1993
2054 1994 def changegroupinfo(self, nodes, source):
2055 1995 if self.ui.verbose or source == 'bundle':
2056 1996 self.ui.status(_("%d changesets found\n") % len(nodes))
2057 1997 if self.ui.debugflag:
2058 1998 self.ui.debug("list of changesets:\n")
2059 1999 for node in nodes:
2060 2000 self.ui.debug("%s\n" % hex(node))
2061 2001
2062 2002 def changegroupsubset(self, bases, heads, source):
2063 2003 """Compute a changegroup consisting of all the nodes that are
2064 2004 descendants of any of the bases and ancestors of any of the heads.
2065 2005 Return a chunkbuffer object whose read() method will return
2066 2006 successive changegroup chunks.
2067 2007
2068 2008 It is fairly complex as determining which filenodes and which
2069 2009 manifest nodes need to be included for the changeset to be complete
2070 2010 is non-trivial.
2071 2011
2072 2012 Another wrinkle is doing the reverse, figuring out which changeset in
2073 2013 the changegroup a particular filenode or manifestnode belongs to.
2074 2014 """
2075 2015 cl = self.changelog
2076 2016 if not bases:
2077 2017 bases = [nullid]
2078 2018 csets, bases, heads = cl.nodesbetween(bases, heads)
2079 2019 # We assume that all ancestors of bases are known
2080 2020 common = cl.ancestors([cl.rev(n) for n in bases])
2081 2021 return self._changegroupsubset(common, csets, heads, source)
2082 2022
2083 2023 def getlocalbundle(self, source, outgoing):
2084 2024 """Like getbundle, but taking a discovery.outgoing as an argument.
2085 2025
2086 2026 This is only implemented for local repos and reuses potentially
2087 2027 precomputed sets in outgoing."""
2088 2028 if not outgoing.missing:
2089 2029 return None
2090 2030 return self._changegroupsubset(outgoing.common,
2091 2031 outgoing.missing,
2092 2032 outgoing.missingheads,
2093 2033 source)
2094 2034
2095 2035 def getbundle(self, source, heads=None, common=None):
2096 2036 """Like changegroupsubset, but returns the set difference between the
2097 2037 ancestors of heads and the ancestors common.
2098 2038
2099 2039 If heads is None, use the local heads. If common is None, use [nullid].
2100 2040
2101 2041 The nodes in common might not all be known locally due to the way the
2102 2042 current discovery protocol works.
2103 2043 """
2104 2044 cl = self.changelog
2105 2045 if common:
2106 2046 hasnode = cl.hasnode
2107 2047 common = [n for n in common if hasnode(n)]
2108 2048 else:
2109 2049 common = [nullid]
2110 2050 if not heads:
2111 2051 heads = cl.heads()
2112 2052 return self.getlocalbundle(source,
2113 2053 discovery.outgoing(cl, common, heads))
2114 2054
2115 2055 @unfilteredmethod
2116 2056 def _changegroupsubset(self, commonrevs, csets, heads, source):
2117 2057
2118 2058 cl = self.changelog
2119 2059 mf = self.manifest
2120 2060 mfs = {} # needed manifests
2121 2061 fnodes = {} # needed file nodes
2122 2062 changedfiles = set()
2123 2063 fstate = ['', {}]
2124 2064 count = [0, 0]
2125 2065
2126 2066 # can we go through the fast path ?
2127 2067 heads.sort()
2128 2068 if heads == sorted(self.heads()):
2129 2069 return self._changegroup(csets, source)
2130 2070
2131 2071 # slow path
2132 2072 self.hook('preoutgoing', throw=True, source=source)
2133 2073 self.changegroupinfo(csets, source)
2134 2074
2135 2075 # filter any nodes that claim to be part of the known set
2136 2076 def prune(revlog, missing):
2137 2077 rr, rl = revlog.rev, revlog.linkrev
2138 2078 return [n for n in missing
2139 2079 if rl(rr(n)) not in commonrevs]
2140 2080
2141 2081 progress = self.ui.progress
2142 2082 _bundling = _('bundling')
2143 2083 _changesets = _('changesets')
2144 2084 _manifests = _('manifests')
2145 2085 _files = _('files')
2146 2086
2147 2087 def lookup(revlog, x):
2148 2088 if revlog == cl:
2149 2089 c = cl.read(x)
2150 2090 changedfiles.update(c[3])
2151 2091 mfs.setdefault(c[0], x)
2152 2092 count[0] += 1
2153 2093 progress(_bundling, count[0],
2154 2094 unit=_changesets, total=count[1])
2155 2095 return x
2156 2096 elif revlog == mf:
2157 2097 clnode = mfs[x]
2158 2098 mdata = mf.readfast(x)
2159 2099 for f, n in mdata.iteritems():
2160 2100 if f in changedfiles:
2161 2101 fnodes[f].setdefault(n, clnode)
2162 2102 count[0] += 1
2163 2103 progress(_bundling, count[0],
2164 2104 unit=_manifests, total=count[1])
2165 2105 return clnode
2166 2106 else:
2167 2107 progress(_bundling, count[0], item=fstate[0],
2168 2108 unit=_files, total=count[1])
2169 2109 return fstate[1][x]
2170 2110
2171 2111 bundler = changegroup.bundle10(lookup)
2172 2112 reorder = self.ui.config('bundle', 'reorder', 'auto')
2173 2113 if reorder == 'auto':
2174 2114 reorder = None
2175 2115 else:
2176 2116 reorder = util.parsebool(reorder)
2177 2117
2178 2118 def gengroup():
2179 2119 # Create a changenode group generator that will call our functions
2180 2120 # back to lookup the owning changenode and collect information.
2181 2121 count[:] = [0, len(csets)]
2182 2122 for chunk in cl.group(csets, bundler, reorder=reorder):
2183 2123 yield chunk
2184 2124 progress(_bundling, None)
2185 2125
2186 2126 # Create a generator for the manifestnodes that calls our lookup
2187 2127 # and data collection functions back.
2188 2128 for f in changedfiles:
2189 2129 fnodes[f] = {}
2190 2130 count[:] = [0, len(mfs)]
2191 2131 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2192 2132 yield chunk
2193 2133 progress(_bundling, None)
2194 2134
2195 2135 mfs.clear()
2196 2136
2197 2137 # Go through all our files in order sorted by name.
2198 2138 count[:] = [0, len(changedfiles)]
2199 2139 for fname in sorted(changedfiles):
2200 2140 filerevlog = self.file(fname)
2201 2141 if not len(filerevlog):
2202 2142 raise util.Abort(_("empty or missing revlog for %s")
2203 2143 % fname)
2204 2144 fstate[0] = fname
2205 2145 fstate[1] = fnodes.pop(fname, {})
2206 2146
2207 2147 nodelist = prune(filerevlog, fstate[1])
2208 2148 if nodelist:
2209 2149 count[0] += 1
2210 2150 yield bundler.fileheader(fname)
2211 2151 for chunk in filerevlog.group(nodelist, bundler, reorder):
2212 2152 yield chunk
2213 2153
2214 2154 # Signal that no more groups are left.
2215 2155 yield bundler.close()
2216 2156 progress(_bundling, None)
2217 2157
2218 2158 if csets:
2219 2159 self.hook('outgoing', node=hex(csets[0]), source=source)
2220 2160
2221 2161 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2222 2162
2223 2163 def changegroup(self, basenodes, source):
2224 2164 # to avoid a race we use changegroupsubset() (issue1320)
2225 2165 return self.changegroupsubset(basenodes, self.heads(), source)
2226 2166
2227 2167 @unfilteredmethod
2228 2168 def _changegroup(self, nodes, source):
2229 2169 """Compute the changegroup of all nodes that we have that a recipient
2230 2170 doesn't. Return a chunkbuffer object whose read() method will return
2231 2171 successive changegroup chunks.
2232 2172
2233 2173 This is much easier than the previous function as we can assume that
2234 2174 the recipient has any changenode we aren't sending them.
2235 2175
2236 2176 nodes is the set of nodes to send"""
2237 2177
2238 2178 cl = self.changelog
2239 2179 mf = self.manifest
2240 2180 mfs = {}
2241 2181 changedfiles = set()
2242 2182 fstate = ['']
2243 2183 count = [0, 0]
2244 2184
2245 2185 self.hook('preoutgoing', throw=True, source=source)
2246 2186 self.changegroupinfo(nodes, source)
2247 2187
2248 2188 revset = set([cl.rev(n) for n in nodes])
2249 2189
2250 2190 def gennodelst(log):
2251 2191 ln, llr = log.node, log.linkrev
2252 2192 return [ln(r) for r in log if llr(r) in revset]
2253 2193
2254 2194 progress = self.ui.progress
2255 2195 _bundling = _('bundling')
2256 2196 _changesets = _('changesets')
2257 2197 _manifests = _('manifests')
2258 2198 _files = _('files')
2259 2199
2260 2200 def lookup(revlog, x):
2261 2201 if revlog == cl:
2262 2202 c = cl.read(x)
2263 2203 changedfiles.update(c[3])
2264 2204 mfs.setdefault(c[0], x)
2265 2205 count[0] += 1
2266 2206 progress(_bundling, count[0],
2267 2207 unit=_changesets, total=count[1])
2268 2208 return x
2269 2209 elif revlog == mf:
2270 2210 count[0] += 1
2271 2211 progress(_bundling, count[0],
2272 2212 unit=_manifests, total=count[1])
2273 2213 return cl.node(revlog.linkrev(revlog.rev(x)))
2274 2214 else:
2275 2215 progress(_bundling, count[0], item=fstate[0],
2276 2216 total=count[1], unit=_files)
2277 2217 return cl.node(revlog.linkrev(revlog.rev(x)))
2278 2218
2279 2219 bundler = changegroup.bundle10(lookup)
2280 2220 reorder = self.ui.config('bundle', 'reorder', 'auto')
2281 2221 if reorder == 'auto':
2282 2222 reorder = None
2283 2223 else:
2284 2224 reorder = util.parsebool(reorder)
2285 2225
2286 2226 def gengroup():
2287 2227 '''yield a sequence of changegroup chunks (strings)'''
2288 2228 # construct a list of all changed files
2289 2229
2290 2230 count[:] = [0, len(nodes)]
2291 2231 for chunk in cl.group(nodes, bundler, reorder=reorder):
2292 2232 yield chunk
2293 2233 progress(_bundling, None)
2294 2234
2295 2235 count[:] = [0, len(mfs)]
2296 2236 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2297 2237 yield chunk
2298 2238 progress(_bundling, None)
2299 2239
2300 2240 count[:] = [0, len(changedfiles)]
2301 2241 for fname in sorted(changedfiles):
2302 2242 filerevlog = self.file(fname)
2303 2243 if not len(filerevlog):
2304 2244 raise util.Abort(_("empty or missing revlog for %s")
2305 2245 % fname)
2306 2246 fstate[0] = fname
2307 2247 nodelist = gennodelst(filerevlog)
2308 2248 if nodelist:
2309 2249 count[0] += 1
2310 2250 yield bundler.fileheader(fname)
2311 2251 for chunk in filerevlog.group(nodelist, bundler, reorder):
2312 2252 yield chunk
2313 2253 yield bundler.close()
2314 2254 progress(_bundling, None)
2315 2255
2316 2256 if nodes:
2317 2257 self.hook('outgoing', node=hex(nodes[0]), source=source)
2318 2258
2319 2259 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2320 2260
2321 2261 @unfilteredmethod
2322 2262 def addchangegroup(self, source, srctype, url, emptyok=False):
2323 2263 """Add the changegroup returned by source.read() to this repo.
2324 2264 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2325 2265 the URL of the repo where this changegroup is coming from.
2326 2266
2327 2267 Return an integer summarizing the change to this repo:
2328 2268 - nothing changed or no source: 0
2329 2269 - more heads than before: 1+added heads (2..n)
2330 2270 - fewer heads than before: -1-removed heads (-2..-n)
2331 2271 - number of heads stays the same: 1
2332 2272 """
2333 2273 def csmap(x):
2334 2274 self.ui.debug("add changeset %s\n" % short(x))
2335 2275 return len(cl)
2336 2276
2337 2277 def revmap(x):
2338 2278 return cl.rev(x)
2339 2279
2340 2280 if not source:
2341 2281 return 0
2342 2282
2343 2283 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2344 2284
2345 2285 changesets = files = revisions = 0
2346 2286 efiles = set()
2347 2287
2348 2288 # write changelog data to temp files so concurrent readers will not see
2349 2289 # inconsistent view
2350 2290 cl = self.changelog
2351 2291 cl.delayupdate()
2352 2292 oldheads = cl.heads()
2353 2293
2354 2294 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2355 2295 try:
2356 2296 trp = weakref.proxy(tr)
2357 2297 # pull off the changeset group
2358 2298 self.ui.status(_("adding changesets\n"))
2359 2299 clstart = len(cl)
2360 2300 class prog(object):
2361 2301 step = _('changesets')
2362 2302 count = 1
2363 2303 ui = self.ui
2364 2304 total = None
2365 2305 def __call__(self):
2366 2306 self.ui.progress(self.step, self.count, unit=_('chunks'),
2367 2307 total=self.total)
2368 2308 self.count += 1
2369 2309 pr = prog()
2370 2310 source.callback = pr
2371 2311
2372 2312 source.changelogheader()
2373 2313 srccontent = cl.addgroup(source, csmap, trp)
2374 2314 if not (srccontent or emptyok):
2375 2315 raise util.Abort(_("received changelog group is empty"))
2376 2316 clend = len(cl)
2377 2317 changesets = clend - clstart
2378 2318 for c in xrange(clstart, clend):
2379 2319 efiles.update(self[c].files())
2380 2320 efiles = len(efiles)
2381 2321 self.ui.progress(_('changesets'), None)
2382 2322
2383 2323 # pull off the manifest group
2384 2324 self.ui.status(_("adding manifests\n"))
2385 2325 pr.step = _('manifests')
2386 2326 pr.count = 1
2387 2327 pr.total = changesets # manifests <= changesets
2388 2328 # no need to check for empty manifest group here:
2389 2329 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2390 2330 # no new manifest will be created and the manifest group will
2391 2331 # be empty during the pull
2392 2332 source.manifestheader()
2393 2333 self.manifest.addgroup(source, revmap, trp)
2394 2334 self.ui.progress(_('manifests'), None)
2395 2335
2396 2336 needfiles = {}
2397 2337 if self.ui.configbool('server', 'validate', default=False):
2398 2338 # validate incoming csets have their manifests
2399 2339 for cset in xrange(clstart, clend):
2400 2340 mfest = self.changelog.read(self.changelog.node(cset))[0]
2401 2341 mfest = self.manifest.readdelta(mfest)
2402 2342 # store file nodes we must see
2403 2343 for f, n in mfest.iteritems():
2404 2344 needfiles.setdefault(f, set()).add(n)
2405 2345
2406 2346 # process the files
2407 2347 self.ui.status(_("adding file changes\n"))
2408 2348 pr.step = _('files')
2409 2349 pr.count = 1
2410 2350 pr.total = efiles
2411 2351 source.callback = None
2412 2352
2413 2353 while True:
2414 2354 chunkdata = source.filelogheader()
2415 2355 if not chunkdata:
2416 2356 break
2417 2357 f = chunkdata["filename"]
2418 2358 self.ui.debug("adding %s revisions\n" % f)
2419 2359 pr()
2420 2360 fl = self.file(f)
2421 2361 o = len(fl)
2422 2362 if not fl.addgroup(source, revmap, trp):
2423 2363 raise util.Abort(_("received file revlog group is empty"))
2424 2364 revisions += len(fl) - o
2425 2365 files += 1
2426 2366 if f in needfiles:
2427 2367 needs = needfiles[f]
2428 2368 for new in xrange(o, len(fl)):
2429 2369 n = fl.node(new)
2430 2370 if n in needs:
2431 2371 needs.remove(n)
2432 2372 if not needs:
2433 2373 del needfiles[f]
2434 2374 self.ui.progress(_('files'), None)
2435 2375
2436 2376 for f, needs in needfiles.iteritems():
2437 2377 fl = self.file(f)
2438 2378 for n in needs:
2439 2379 try:
2440 2380 fl.rev(n)
2441 2381 except error.LookupError:
2442 2382 raise util.Abort(
2443 2383 _('missing file data for %s:%s - run hg verify') %
2444 2384 (f, hex(n)))
2445 2385
2446 2386 dh = 0
2447 2387 if oldheads:
2448 2388 heads = cl.heads()
2449 2389 dh = len(heads) - len(oldheads)
2450 2390 for h in heads:
2451 2391 if h not in oldheads and self[h].closesbranch():
2452 2392 dh -= 1
2453 2393 htext = ""
2454 2394 if dh:
2455 2395 htext = _(" (%+d heads)") % dh
2456 2396
2457 2397 self.ui.status(_("added %d changesets"
2458 2398 " with %d changes to %d files%s\n")
2459 2399 % (changesets, revisions, files, htext))
2460 2400 self.invalidatevolatilesets()
2461 2401
2462 2402 if changesets > 0:
2463 2403 p = lambda: cl.writepending() and self.root or ""
2464 2404 self.hook('pretxnchangegroup', throw=True,
2465 2405 node=hex(cl.node(clstart)), source=srctype,
2466 2406 url=url, pending=p)
2467 2407
2468 2408 added = [cl.node(r) for r in xrange(clstart, clend)]
2469 2409 publishing = self.ui.configbool('phases', 'publish', True)
2470 2410 if srctype == 'push':
2471 2411 # Old server can not push the boundary themself.
2472 2412 # New server won't push the boundary if changeset already
2473 2413 # existed locally as secrete
2474 2414 #
2475 2415 # We should not use added here but the list of all change in
2476 2416 # the bundle
2477 2417 if publishing:
2478 2418 phases.advanceboundary(self, phases.public, srccontent)
2479 2419 else:
2480 2420 phases.advanceboundary(self, phases.draft, srccontent)
2481 2421 phases.retractboundary(self, phases.draft, added)
2482 2422 elif srctype != 'strip':
2483 2423 # publishing only alter behavior during push
2484 2424 #
2485 2425 # strip should not touch boundary at all
2486 2426 phases.retractboundary(self, phases.draft, added)
2487 2427
2488 2428 # make changelog see real files again
2489 2429 cl.finalize(trp)
2490 2430
2491 2431 tr.close()
2492 2432
2493 2433 if changesets > 0:
2494 2434 self.updatebranchcache()
2495 2435 def runhooks():
2496 2436 # forcefully update the on-disk branch cache
2497 2437 self.ui.debug("updating the branch cache\n")
2498 2438 self.hook("changegroup", node=hex(cl.node(clstart)),
2499 2439 source=srctype, url=url)
2500 2440
2501 2441 for n in added:
2502 2442 self.hook("incoming", node=hex(n), source=srctype,
2503 2443 url=url)
2504 2444 self._afterlock(runhooks)
2505 2445
2506 2446 finally:
2507 2447 tr.release()
2508 2448 # never return 0 here:
2509 2449 if dh < 0:
2510 2450 return dh - 1
2511 2451 else:
2512 2452 return dh + 1
2513 2453
2514 2454 def stream_in(self, remote, requirements):
2515 2455 lock = self.lock()
2516 2456 try:
2517 2457 # Save remote branchmap. We will use it later
2518 2458 # to speed up branchcache creation
2519 2459 rbranchmap = None
2520 2460 if remote.capable("branchmap"):
2521 2461 rbranchmap = remote.branchmap()
2522 2462
2523 2463 fp = remote.stream_out()
2524 2464 l = fp.readline()
2525 2465 try:
2526 2466 resp = int(l)
2527 2467 except ValueError:
2528 2468 raise error.ResponseError(
2529 2469 _('unexpected response from remote server:'), l)
2530 2470 if resp == 1:
2531 2471 raise util.Abort(_('operation forbidden by server'))
2532 2472 elif resp == 2:
2533 2473 raise util.Abort(_('locking the remote repository failed'))
2534 2474 elif resp != 0:
2535 2475 raise util.Abort(_('the server sent an unknown error code'))
2536 2476 self.ui.status(_('streaming all changes\n'))
2537 2477 l = fp.readline()
2538 2478 try:
2539 2479 total_files, total_bytes = map(int, l.split(' ', 1))
2540 2480 except (ValueError, TypeError):
2541 2481 raise error.ResponseError(
2542 2482 _('unexpected response from remote server:'), l)
2543 2483 self.ui.status(_('%d files to transfer, %s of data\n') %
2544 2484 (total_files, util.bytecount(total_bytes)))
2545 2485 handled_bytes = 0
2546 2486 self.ui.progress(_('clone'), 0, total=total_bytes)
2547 2487 start = time.time()
2548 2488 for i in xrange(total_files):
2549 2489 # XXX doesn't support '\n' or '\r' in filenames
2550 2490 l = fp.readline()
2551 2491 try:
2552 2492 name, size = l.split('\0', 1)
2553 2493 size = int(size)
2554 2494 except (ValueError, TypeError):
2555 2495 raise error.ResponseError(
2556 2496 _('unexpected response from remote server:'), l)
2557 2497 if self.ui.debugflag:
2558 2498 self.ui.debug('adding %s (%s)\n' %
2559 2499 (name, util.bytecount(size)))
2560 2500 # for backwards compat, name was partially encoded
2561 2501 ofp = self.sopener(store.decodedir(name), 'w')
2562 2502 for chunk in util.filechunkiter(fp, limit=size):
2563 2503 handled_bytes += len(chunk)
2564 2504 self.ui.progress(_('clone'), handled_bytes,
2565 2505 total=total_bytes)
2566 2506 ofp.write(chunk)
2567 2507 ofp.close()
2568 2508 elapsed = time.time() - start
2569 2509 if elapsed <= 0:
2570 2510 elapsed = 0.001
2571 2511 self.ui.progress(_('clone'), None)
2572 2512 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2573 2513 (util.bytecount(total_bytes), elapsed,
2574 2514 util.bytecount(total_bytes / elapsed)))
2575 2515
2576 2516 # new requirements = old non-format requirements +
2577 2517 # new format-related
2578 2518 # requirements from the streamed-in repository
2579 2519 requirements.update(set(self.requirements) - self.supportedformats)
2580 2520 self._applyrequirements(requirements)
2581 2521 self._writerequirements()
2582 2522
2583 2523 if rbranchmap:
2584 2524 rbheads = []
2585 2525 for bheads in rbranchmap.itervalues():
2586 2526 rbheads.extend(bheads)
2587 2527
2588 2528 self.branchcache = rbranchmap
2589 2529 if rbheads:
2590 2530 rtiprev = max((int(self.changelog.rev(node))
2591 2531 for node in rbheads))
2592 2532 branchmap.write(self, self.branchcache,
2593 2533 self[rtiprev].node(), rtiprev)
2594 2534 self.invalidate()
2595 2535 return len(self.heads()) + 1
2596 2536 finally:
2597 2537 lock.release()
2598 2538
2599 2539 def clone(self, remote, heads=[], stream=False):
2600 2540 '''clone remote repository.
2601 2541
2602 2542 keyword arguments:
2603 2543 heads: list of revs to clone (forces use of pull)
2604 2544 stream: use streaming clone if possible'''
2605 2545
2606 2546 # now, all clients that can request uncompressed clones can
2607 2547 # read repo formats supported by all servers that can serve
2608 2548 # them.
2609 2549
2610 2550 # if revlog format changes, client will have to check version
2611 2551 # and format flags on "stream" capability, and use
2612 2552 # uncompressed only if compatible.
2613 2553
2614 2554 if not stream:
2615 2555 # if the server explicitly prefers to stream (for fast LANs)
2616 2556 stream = remote.capable('stream-preferred')
2617 2557
2618 2558 if stream and not heads:
2619 2559 # 'stream' means remote revlog format is revlogv1 only
2620 2560 if remote.capable('stream'):
2621 2561 return self.stream_in(remote, set(('revlogv1',)))
2622 2562 # otherwise, 'streamreqs' contains the remote revlog format
2623 2563 streamreqs = remote.capable('streamreqs')
2624 2564 if streamreqs:
2625 2565 streamreqs = set(streamreqs.split(','))
2626 2566 # if we support it, stream in and adjust our requirements
2627 2567 if not streamreqs - self.supportedformats:
2628 2568 return self.stream_in(remote, streamreqs)
2629 2569 return self.pull(remote, heads)
2630 2570
2631 2571 def pushkey(self, namespace, key, old, new):
2632 2572 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2633 2573 old=old, new=new)
2634 2574 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2635 2575 ret = pushkey.push(self, namespace, key, old, new)
2636 2576 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2637 2577 ret=ret)
2638 2578 return ret
2639 2579
2640 2580 def listkeys(self, namespace):
2641 2581 self.hook('prelistkeys', throw=True, namespace=namespace)
2642 2582 self.ui.debug('listing keys for "%s"\n' % namespace)
2643 2583 values = pushkey.list(self, namespace)
2644 2584 self.hook('listkeys', namespace=namespace, values=values)
2645 2585 return values
2646 2586
2647 2587 def debugwireargs(self, one, two, three=None, four=None, five=None):
2648 2588 '''used to test argument passing over the wire'''
2649 2589 return "%s %s %s %s %s" % (one, two, three, four, five)
2650 2590
2651 2591 def savecommitmessage(self, text):
2652 2592 fp = self.opener('last-message.txt', 'wb')
2653 2593 try:
2654 2594 fp.write(text)
2655 2595 finally:
2656 2596 fp.close()
2657 2597 return self.pathto(fp.name[len(self.root) + 1:])
2658 2598
2659 2599 # used to avoid circular references so destructors work
2660 2600 def aftertrans(files):
2661 2601 renamefiles = [tuple(t) for t in files]
2662 2602 def a():
2663 2603 for src, dest in renamefiles:
2664 2604 try:
2665 2605 util.rename(src, dest)
2666 2606 except OSError: # journal file does not yet exist
2667 2607 pass
2668 2608 return a
2669 2609
2670 2610 def undoname(fn):
2671 2611 base, name = os.path.split(fn)
2672 2612 assert name.startswith('journal')
2673 2613 return os.path.join(base, name.replace('journal', 'undo', 1))
2674 2614
2675 2615 def instance(ui, path, create):
2676 2616 return localrepository(ui, util.urllocalpath(path), create)
2677 2617
2678 2618 def islocal(path):
2679 2619 return True
General Comments 0
You need to be logged in to leave comments. Login now