##// END OF EJS Templates
branchmap: pass revision insteads of changectx to the update function...
Pierre-Yves David -
r18305:2502a15e default
parent child Browse files
Show More
@@ -1,223 +1,223
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev
9 9 import encoding
10 10 import util, repoview
11 11
12 12 def _filename(repo):
13 13 """name of a branchcache file for a given repo or repoview"""
14 14 filename = "cache/branchheads"
15 15 if repo.filtername:
16 16 filename = '%s-%s' % (filename, repo.filtername)
17 17 return filename
18 18
19 19 def read(repo):
20 20 try:
21 21 f = repo.opener(_filename(repo))
22 22 lines = f.read().split('\n')
23 23 f.close()
24 24 except (IOError, OSError):
25 25 return None
26 26
27 27 try:
28 28 cachekey = lines.pop(0).split(" ", 2)
29 29 last, lrev = cachekey[:2]
30 30 last, lrev = bin(last), int(lrev)
31 31 filteredhash = None
32 32 if len(cachekey) > 2:
33 33 filteredhash = bin(cachekey[2])
34 34 partial = branchcache(tipnode=last, tiprev=lrev,
35 35 filteredhash=filteredhash)
36 36 if not partial.validfor(repo):
37 37 # invalidate the cache
38 38 raise ValueError('tip differs')
39 39 for l in lines:
40 40 if not l:
41 41 continue
42 42 node, label = l.split(" ", 1)
43 43 label = encoding.tolocal(label.strip())
44 44 if not node in repo:
45 45 raise ValueError('node %s does not exist' % node)
46 46 partial.setdefault(label, []).append(bin(node))
47 47 except KeyboardInterrupt:
48 48 raise
49 49 except Exception, inst:
50 50 if repo.ui.debugflag:
51 51 msg = 'invalid branchheads cache'
52 52 if repo.filtername is not None:
53 53 msg += ' (%s)' % repo.filtername
54 54 msg += ': %s\n'
55 55 repo.ui.warn(msg % inst)
56 56 partial = None
57 57 return partial
58 58
59 59
60 60
61 61 def updatecache(repo):
62 62 cl = repo.changelog
63 63 filtername = repo.filtername
64 64 partial = repo._branchcaches.get(filtername)
65 65
66 66 revs = []
67 67 if partial is None or not partial.validfor(repo):
68 68 partial = read(repo)
69 69 if partial is None:
70 70 subsetname = repoview.subsettable.get(filtername)
71 71 if subsetname is None:
72 72 partial = branchcache()
73 73 else:
74 74 subset = repo.filtered(subsetname)
75 75 partial = subset.branchmap().copy()
76 76 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
77 77 revs.extend(r for r in extrarevs if r <= partial.tiprev)
78 78 revs.extend(cl.revs(start=partial.tiprev + 1))
79 79 if revs:
80 ctxgen = (repo[r] for r in revs)
81 partial.update(repo, ctxgen)
80 partial.update(repo, revs)
82 81 partial.write(repo)
83 82 assert partial.validfor(repo)
84 83 repo._branchcaches[repo.filtername] = partial
85 84
86 85 class branchcache(dict):
87 86 """A dict like object that hold branches heads cache"""
88 87
89 88 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
90 89 filteredhash=None):
91 90 super(branchcache, self).__init__(entries)
92 91 self.tipnode = tipnode
93 92 self.tiprev = tiprev
94 93 self.filteredhash = filteredhash
95 94
96 95 def _hashfiltered(self, repo):
97 96 """build hash of revision filtered in the current cache
98 97
99 98 Tracking tipnode and tiprev is not enough to ensure validaty of the
100 99 cache as they do not help to distinct cache that ignored various
101 100 revision bellow tiprev.
102 101
103 102 To detect such difference, we build a cache of all ignored revisions.
104 103 """
105 104 cl = repo.changelog
106 105 if not cl.filteredrevs:
107 106 return None
108 107 key = None
109 108 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
110 109 if revs:
111 110 s = util.sha1()
112 111 for rev in revs:
113 112 s.update('%s;' % rev)
114 113 key = s.digest()
115 114 return key
116 115
117 116 def validfor(self, repo):
118 117 """Is the cache content valide regarding a repo
119 118
120 119 - False when cached tipnode are unknown or if we detect a strip.
121 120 - True when cache is up to date or a subset of current repo."""
122 121 try:
123 122 return ((self.tipnode == repo.changelog.node(self.tiprev))
124 123 and (self.filteredhash == self._hashfiltered(repo)))
125 124 except IndexError:
126 125 return False
127 126
128 127 def copy(self):
129 128 """return an deep copy of the branchcache object"""
130 129 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash)
131 130
132 131 def write(self, repo):
133 132 try:
134 133 f = repo.opener(_filename(repo), "w", atomictemp=True)
135 134 cachekey = [hex(self.tipnode), str(self.tiprev)]
136 135 if self.filteredhash is not None:
137 136 cachekey.append(hex(self.filteredhash))
138 137 f.write(" ".join(cachekey) + '\n')
139 138 for label, nodes in self.iteritems():
140 139 for node in nodes:
141 140 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
142 141 f.close()
143 142 except (IOError, OSError, util.Abort):
144 143 # Abort may be raise by read only opener
145 144 pass
146 145
147 def update(self, repo, ctxgen):
146 def update(self, repo, revgen):
148 147 """Given a branchhead cache, self, that may have extra nodes or be
149 148 missing heads, and a generator of nodes that are at least a superset of
150 149 heads missing, this function updates self to be correct.
151 150 """
152 151 cl = repo.changelog
152 ctxgen = (repo[r] for r in revgen)
153 153 # collect new branch entries
154 154 newbranches = {}
155 155 for c in ctxgen:
156 156 newbranches.setdefault(c.branch(), []).append(c.node())
157 157 # if older branchheads are reachable from new ones, they aren't
158 158 # really branchheads. Note checking parents is insufficient:
159 159 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
160 160 for branch, newnodes in newbranches.iteritems():
161 161 bheads = self.setdefault(branch, [])
162 162 # Remove candidate heads that no longer are in the repo (e.g., as
163 163 # the result of a strip that just happened). Avoid using 'node in
164 164 # self' here because that dives down into branchcache code somewhat
165 165 # recursively.
166 166 bheadrevs = [cl.rev(node) for node in bheads
167 167 if cl.hasnode(node)]
168 168 newheadrevs = [cl.rev(node) for node in newnodes
169 169 if cl.hasnode(node)]
170 170 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
171 171 # Remove duplicates - nodes that are in newheadrevs and are already
172 172 # in bheadrevs. This can happen if you strip a node whose parent
173 173 # was already a head (because they're on different branches).
174 174 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
175 175
176 176 # Starting from tip means fewer passes over reachable. If we know
177 177 # the new candidates are not ancestors of existing heads, we don't
178 178 # have to examine ancestors of existing heads
179 179 if ctxisnew:
180 180 iterrevs = sorted(newheadrevs)
181 181 else:
182 182 iterrevs = list(bheadrevs)
183 183
184 184 # This loop prunes out two kinds of heads - heads that are
185 185 # superseded by a head in newheadrevs, and newheadrevs that are not
186 186 # heads because an existing head is their descendant.
187 187 while iterrevs:
188 188 latest = iterrevs.pop()
189 189 if latest not in bheadrevs:
190 190 continue
191 191 ancestors = set(cl.ancestors([latest],
192 192 bheadrevs[0]))
193 193 if ancestors:
194 194 bheadrevs = [b for b in bheadrevs if b not in ancestors]
195 195 self[branch] = [cl.node(rev) for rev in bheadrevs]
196 196 tiprev = max(bheadrevs)
197 197 if tiprev > self.tiprev:
198 198 self.tipnode = cl.node(tiprev)
199 199 self.tiprev = tiprev
200 200
201 201 # There may be branches that cease to exist when the last commit in the
202 202 # branch was stripped. This code filters them out. Note that the
203 203 # branch that ceased to exist may not be in newbranches because
204 204 # newbranches is the set of candidate heads, which when you strip the
205 205 # last commit in a branch will be the parent branch.
206 206 droppednodes = []
207 207 for branch in self.keys():
208 208 nodes = [head for head in self[branch]
209 209 if cl.hasnode(head)]
210 210 if not nodes:
211 211 droppednodes.extend(nodes)
212 212 del self[branch]
213 213 if ((not self.validfor(repo)) or (self.tipnode in droppednodes)):
214 214
215 215 # cache key are not valid anymore
216 216 self.tipnode = nullid
217 217 self.tiprev = nullrev
218 218 for heads in self.values():
219 219 tiprev = max(cl.rev(node) for node in heads)
220 220 if tiprev > self.tiprev:
221 221 self.tipnode = cl.node(tiprev)
222 222 self.tiprev = tiprev
223 223 self.filteredhash = self._hashfiltered(repo)
@@ -1,339 +1,339
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
11 11 import branchmap
12 12
13 13 def findcommonincoming(repo, remote, heads=None, force=False):
14 14 """Return a tuple (common, anyincoming, heads) used to identify the common
15 15 subset of nodes between repo and remote.
16 16
17 17 "common" is a list of (at least) the heads of the common subset.
18 18 "anyincoming" is testable as a boolean indicating if any nodes are missing
19 19 locally. If remote does not support getbundle, this actually is a list of
20 20 roots of the nodes that would be incoming, to be supplied to
21 21 changegroupsubset. No code except for pull should be relying on this fact
22 22 any longer.
23 23 "heads" is either the supplied heads, or else the remote's heads.
24 24
25 25 If you pass heads and they are all known locally, the response lists just
26 26 these heads in "common" and in "heads".
27 27
28 28 Please use findcommonoutgoing to compute the set of outgoing nodes to give
29 29 extensions a good hook into outgoing.
30 30 """
31 31
32 32 if not remote.capable('getbundle'):
33 33 return treediscovery.findcommonincoming(repo, remote, heads, force)
34 34
35 35 if heads:
36 36 allknown = True
37 37 nm = repo.changelog.nodemap
38 38 for h in heads:
39 39 if nm.get(h) is None:
40 40 allknown = False
41 41 break
42 42 if allknown:
43 43 return (heads, False, heads)
44 44
45 45 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
46 46 abortwhenunrelated=not force)
47 47 common, anyinc, srvheads = res
48 48 return (list(common), anyinc, heads or list(srvheads))
49 49
50 50 class outgoing(object):
51 51 '''Represents the set of nodes present in a local repo but not in a
52 52 (possibly) remote one.
53 53
54 54 Members:
55 55
56 56 missing is a list of all nodes present in local but not in remote.
57 57 common is a list of all nodes shared between the two repos.
58 58 excluded is the list of missing changeset that shouldn't be sent remotely.
59 59 missingheads is the list of heads of missing.
60 60 commonheads is the list of heads of common.
61 61
62 62 The sets are computed on demand from the heads, unless provided upfront
63 63 by discovery.'''
64 64
65 65 def __init__(self, revlog, commonheads, missingheads):
66 66 self.commonheads = commonheads
67 67 self.missingheads = missingheads
68 68 self._revlog = revlog
69 69 self._common = None
70 70 self._missing = None
71 71 self.excluded = []
72 72
73 73 def _computecommonmissing(self):
74 74 sets = self._revlog.findcommonmissing(self.commonheads,
75 75 self.missingheads)
76 76 self._common, self._missing = sets
77 77
78 78 @util.propertycache
79 79 def common(self):
80 80 if self._common is None:
81 81 self._computecommonmissing()
82 82 return self._common
83 83
84 84 @util.propertycache
85 85 def missing(self):
86 86 if self._missing is None:
87 87 self._computecommonmissing()
88 88 return self._missing
89 89
90 90 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
91 91 commoninc=None, portable=False):
92 92 '''Return an outgoing instance to identify the nodes present in repo but
93 93 not in other.
94 94
95 95 If onlyheads is given, only nodes ancestral to nodes in onlyheads
96 96 (inclusive) are included. If you already know the local repo's heads,
97 97 passing them in onlyheads is faster than letting them be recomputed here.
98 98
99 99 If commoninc is given, it must be the result of a prior call to
100 100 findcommonincoming(repo, other, force) to avoid recomputing it here.
101 101
102 102 If portable is given, compute more conservative common and missingheads,
103 103 to make bundles created from the instance more portable.'''
104 104 # declare an empty outgoing object to be filled later
105 105 og = outgoing(repo.changelog, None, None)
106 106
107 107 # get common set if not provided
108 108 if commoninc is None:
109 109 commoninc = findcommonincoming(repo, other, force=force)
110 110 og.commonheads, _any, _hds = commoninc
111 111
112 112 # compute outgoing
113 113 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
114 114 if not mayexclude:
115 115 og.missingheads = onlyheads or repo.heads()
116 116 elif onlyheads is None:
117 117 # use visible heads as it should be cached
118 118 og.missingheads = repo.filtered("unserved").heads()
119 119 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
120 120 else:
121 121 # compute common, missing and exclude secret stuff
122 122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
123 123 og._common, allmissing = sets
124 124 og._missing = missing = []
125 125 og.excluded = excluded = []
126 126 for node in allmissing:
127 127 ctx = repo[node]
128 128 if ctx.phase() >= phases.secret or ctx.extinct():
129 129 excluded.append(node)
130 130 else:
131 131 missing.append(node)
132 132 if len(missing) == len(allmissing):
133 133 missingheads = onlyheads
134 134 else: # update missing heads
135 135 missingheads = phases.newheads(repo, onlyheads, excluded)
136 136 og.missingheads = missingheads
137 137 if portable:
138 138 # recompute common and missingheads as if -r<rev> had been given for
139 139 # each head of missing, and --base <rev> for each head of the proper
140 140 # ancestors of missing
141 141 og._computecommonmissing()
142 142 cl = repo.changelog
143 143 missingrevs = set(cl.rev(n) for n in og._missing)
144 144 og._common = set(cl.ancestors(missingrevs)) - missingrevs
145 145 commonheads = set(og.commonheads)
146 146 og.missingheads = [h for h in og.missingheads if h not in commonheads]
147 147
148 148 return og
149 149
150 150 def _headssummary(repo, remote, outgoing):
151 151 """compute a summary of branch and heads status before and after push
152 152
153 153 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
154 154
155 155 - branch: the branch name
156 156 - remoteheads: the list of remote heads known locally
157 157 None is the branch is new
158 158 - newheads: the new remote heads (known locally) with outgoing pushed
159 159 - unsyncedheads: the list of remote heads unknown locally.
160 160 """
161 161 cl = repo.changelog
162 162 headssum = {}
163 163 # A. Create set of branches involved in the push.
164 164 branches = set(repo[n].branch() for n in outgoing.missing)
165 165 remotemap = remote.branchmap()
166 166 newbranches = branches - set(remotemap)
167 167 branches.difference_update(newbranches)
168 168
169 169 # A. register remote heads
170 170 remotebranches = set()
171 171 for branch, heads in remote.branchmap().iteritems():
172 172 remotebranches.add(branch)
173 173 known = []
174 174 unsynced = []
175 175 for h in heads:
176 176 if h in cl.nodemap:
177 177 known.append(h)
178 178 else:
179 179 unsynced.append(h)
180 180 headssum[branch] = (known, list(known), unsynced)
181 181 # B. add new branch data
182 182 missingctx = list(repo[n] for n in outgoing.missing)
183 183 touchedbranches = set()
184 184 for ctx in missingctx:
185 185 branch = ctx.branch()
186 186 touchedbranches.add(branch)
187 187 if branch not in headssum:
188 188 headssum[branch] = (None, [], [])
189 189
190 190 # C drop data about untouched branches:
191 191 for branch in remotebranches - touchedbranches:
192 192 del headssum[branch]
193 193
194 194 # D. Update newmap with outgoing changes.
195 195 # This will possibly add new heads and remove existing ones.
196 196 newmap = branchmap.branchcache((branch, heads[1])
197 197 for branch, heads in headssum.iteritems()
198 198 if heads[0] is not None)
199 newmap.update(repo, missingctx)
199 newmap.update(repo, (ctx.rev() for ctx in missingctx))
200 200 for branch, newheads in newmap.iteritems():
201 201 headssum[branch][1][:] = newheads
202 202 return headssum
203 203
204 204 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
205 205 """Compute branchmapsummary for repo without branchmap support"""
206 206
207 207 cl = repo.changelog
208 208 # 1-4b. old servers: Check for new topological heads.
209 209 # Construct {old,new}map with branch = None (topological branch).
210 210 # (code based on update)
211 211 oldheads = set(h for h in remoteheads if h in cl.nodemap)
212 212 # all nodes in outgoing.missing are children of either:
213 213 # - an element of oldheads
214 214 # - another element of outgoing.missing
215 215 # - nullrev
216 216 # This explains why the new head are very simple to compute.
217 217 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
218 218 newheads = list(c.node() for c in r)
219 219 unsynced = inc and set([None]) or set()
220 220 return {None: (oldheads, newheads, unsynced)}
221 221
222 222 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
223 223 """Check that a push won't add any outgoing head
224 224
225 225 raise Abort error and display ui message as needed.
226 226 """
227 227 # Check for each named branch if we're creating new remote heads.
228 228 # To be a remote head after push, node must be either:
229 229 # - unknown locally
230 230 # - a local outgoing head descended from update
231 231 # - a remote head that's known locally and not
232 232 # ancestral to an outgoing head
233 233 if remoteheads == [nullid]:
234 234 # remote is empty, nothing to check.
235 235 return
236 236
237 237 if remote.capable('branchmap'):
238 238 headssum = _headssummary(repo, remote, outgoing)
239 239 else:
240 240 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
241 241 newbranches = [branch for branch, heads in headssum.iteritems()
242 242 if heads[0] is None]
243 243 # 1. Check for new branches on the remote.
244 244 if newbranches and not newbranch: # new branch requires --new-branch
245 245 branchnames = ', '.join(sorted(newbranches))
246 246 raise util.Abort(_("push creates new remote branches: %s!")
247 247 % branchnames,
248 248 hint=_("use 'hg push --new-branch' to create"
249 249 " new remote branches"))
250 250
251 251 # 2 compute newly pushed bookmarks. We
252 252 # we don't warned about bookmarked heads.
253 253 localbookmarks = repo._bookmarks
254 254 remotebookmarks = remote.listkeys('bookmarks')
255 255 bookmarkedheads = set()
256 256 for bm in localbookmarks:
257 257 rnode = remotebookmarks.get(bm)
258 258 if rnode and rnode in repo:
259 259 lctx, rctx = repo[bm], repo[rnode]
260 260 if bookmarks.validdest(repo, rctx, lctx):
261 261 bookmarkedheads.add(lctx.node())
262 262
263 263 # 3. Check for new heads.
264 264 # If there are more heads after the push than before, a suitable
265 265 # error message, depending on unsynced status, is displayed.
266 266 error = None
267 267 unsynced = False
268 268 allmissing = set(outgoing.missing)
269 269 allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
270 270 allfuturecommon.update(allmissing)
271 271 for branch, heads in headssum.iteritems():
272 272 if heads[0] is None:
273 273 # Maybe we should abort if we push more that one head
274 274 # for new branches ?
275 275 continue
276 276 candidate_newhs = set(heads[1])
277 277 # add unsynced data
278 278 oldhs = set(heads[0])
279 279 oldhs.update(heads[2])
280 280 candidate_newhs.update(heads[2])
281 281 dhs = None
282 282 discardedheads = set()
283 283 if repo.obsstore:
284 284 # remove future heads which are actually obsolete by another
285 285 # pushed element:
286 286 #
287 287 # XXX as above, There are several cases this case does not handle
288 288 # XXX properly
289 289 #
290 290 # (1) if <nh> is public, it won't be affected by obsolete marker
291 291 # and a new is created
292 292 #
293 293 # (2) if the new heads have ancestors which are not obsolete and
294 294 # not ancestors of any other heads we will have a new head too.
295 295 #
296 296 # This two case will be easy to handle for know changeset but much
297 297 # more tricky for unsynced changes.
298 298 newhs = set()
299 299 for nh in candidate_newhs:
300 300 if nh in repo and repo[nh].phase() <= phases.public:
301 301 newhs.add(nh)
302 302 else:
303 303 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
304 304 if suc != nh and suc in allfuturecommon:
305 305 discardedheads.add(nh)
306 306 break
307 307 else:
308 308 newhs.add(nh)
309 309 else:
310 310 newhs = candidate_newhs
311 311 if [h for h in heads[2] if h not in discardedheads]:
312 312 unsynced = True
313 313 if len(newhs) > len(oldhs):
314 314 # strip updates to existing remote heads from the new heads list
315 315 dhs = list(newhs - bookmarkedheads - oldhs)
316 316 if dhs:
317 317 if error is None:
318 318 if branch not in ('default', None):
319 319 error = _("push creates new remote head %s "
320 320 "on branch '%s'!") % (short(dhs[0]), branch)
321 321 else:
322 322 error = _("push creates new remote head %s!"
323 323 ) % short(dhs[0])
324 324 if heads[2]: # unsynced
325 325 hint = _("you should pull and merge or "
326 326 "use push -f to force")
327 327 else:
328 328 hint = _("did you forget to merge? "
329 329 "use push -f to force")
330 330 if branch is not None:
331 331 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
332 332 for h in dhs:
333 333 repo.ui.note(_("new remote head %s\n") % short(h))
334 334 if error:
335 335 raise util.Abort(error, hint=hint)
336 336
337 337 # 6. Check for unsynced changes on involved branches.
338 338 if unsynced:
339 339 repo.ui.warn(_("note: unsynced remote changes!\n"))
@@ -1,2561 +1,2562
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo.filtered('unserved')
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return self._repo.branchmap()
95 95
96 96 def heads(self):
97 97 return self._repo.heads()
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 150 filtername = None
151 151
152 152 def _baserequirements(self, create):
153 153 return self.requirements[:]
154 154
155 155 def __init__(self, baseui, path=None, create=False):
156 156 self.wvfs = scmutil.vfs(path, expand=True)
157 157 self.wopener = self.wvfs
158 158 self.root = self.wvfs.base
159 159 self.path = self.wvfs.join(".hg")
160 160 self.origroot = path
161 161 self.auditor = scmutil.pathauditor(self.root, self._checknested)
162 162 self.vfs = scmutil.vfs(self.path)
163 163 self.opener = self.vfs
164 164 self.baseui = baseui
165 165 self.ui = baseui.copy()
166 166 # A list of callback to shape the phase if no data were found.
167 167 # Callback are in the form: func(repo, roots) --> processed root.
168 168 # This list it to be filled by extension during repo setup
169 169 self._phasedefaults = []
170 170 try:
171 171 self.ui.readconfig(self.join("hgrc"), self.root)
172 172 extensions.loadall(self.ui)
173 173 except IOError:
174 174 pass
175 175
176 176 if not self.vfs.isdir():
177 177 if create:
178 178 if not self.wvfs.exists():
179 179 self.wvfs.makedirs()
180 180 self.vfs.makedir(notindexed=True)
181 181 requirements = self._baserequirements(create)
182 182 if self.ui.configbool('format', 'usestore', True):
183 183 self.vfs.mkdir("store")
184 184 requirements.append("store")
185 185 if self.ui.configbool('format', 'usefncache', True):
186 186 requirements.append("fncache")
187 187 if self.ui.configbool('format', 'dotencode', True):
188 188 requirements.append('dotencode')
189 189 # create an invalid changelog
190 190 self.vfs.append(
191 191 "00changelog.i",
192 192 '\0\0\0\2' # represents revlogv2
193 193 ' dummy changelog to prevent using the old repo layout'
194 194 )
195 195 if self.ui.configbool('format', 'generaldelta', False):
196 196 requirements.append("generaldelta")
197 197 requirements = set(requirements)
198 198 else:
199 199 raise error.RepoError(_("repository %s not found") % path)
200 200 elif create:
201 201 raise error.RepoError(_("repository %s already exists") % path)
202 202 else:
203 203 try:
204 204 requirements = scmutil.readrequires(self.vfs, self.supported)
205 205 except IOError, inst:
206 206 if inst.errno != errno.ENOENT:
207 207 raise
208 208 requirements = set()
209 209
210 210 self.sharedpath = self.path
211 211 try:
212 212 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
213 213 if not os.path.exists(s):
214 214 raise error.RepoError(
215 215 _('.hg/sharedpath points to nonexistent directory %s') % s)
216 216 self.sharedpath = s
217 217 except IOError, inst:
218 218 if inst.errno != errno.ENOENT:
219 219 raise
220 220
221 221 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
222 222 self.spath = self.store.path
223 223 self.svfs = self.store.vfs
224 224 self.sopener = self.svfs
225 225 self.sjoin = self.store.join
226 226 self.vfs.createmode = self.store.createmode
227 227 self._applyrequirements(requirements)
228 228 if create:
229 229 self._writerequirements()
230 230
231 231
232 232 self._branchcaches = {}
233 233 self.filterpats = {}
234 234 self._datafilters = {}
235 235 self._transref = self._lockref = self._wlockref = None
236 236
237 237 # A cache for various files under .hg/ that tracks file changes,
238 238 # (used by the filecache decorator)
239 239 #
240 240 # Maps a property name to its util.filecacheentry
241 241 self._filecache = {}
242 242
243 243 # hold sets of revision to be filtered
244 244 # should be cleared when something might have changed the filter value:
245 245 # - new changesets,
246 246 # - phase change,
247 247 # - new obsolescence marker,
248 248 # - working directory parent change,
249 249 # - bookmark changes
250 250 self.filteredrevcache = {}
251 251
252 252 def close(self):
253 253 pass
254 254
255 255 def _restrictcapabilities(self, caps):
256 256 return caps
257 257
258 258 def _applyrequirements(self, requirements):
259 259 self.requirements = requirements
260 260 self.sopener.options = dict((r, 1) for r in requirements
261 261 if r in self.openerreqs)
262 262
263 263 def _writerequirements(self):
264 264 reqfile = self.opener("requires", "w")
265 265 for r in self.requirements:
266 266 reqfile.write("%s\n" % r)
267 267 reqfile.close()
268 268
269 269 def _checknested(self, path):
270 270 """Determine if path is a legal nested repository."""
271 271 if not path.startswith(self.root):
272 272 return False
273 273 subpath = path[len(self.root) + 1:]
274 274 normsubpath = util.pconvert(subpath)
275 275
276 276 # XXX: Checking against the current working copy is wrong in
277 277 # the sense that it can reject things like
278 278 #
279 279 # $ hg cat -r 10 sub/x.txt
280 280 #
281 281 # if sub/ is no longer a subrepository in the working copy
282 282 # parent revision.
283 283 #
284 284 # However, it can of course also allow things that would have
285 285 # been rejected before, such as the above cat command if sub/
286 286 # is a subrepository now, but was a normal directory before.
287 287 # The old path auditor would have rejected by mistake since it
288 288 # panics when it sees sub/.hg/.
289 289 #
290 290 # All in all, checking against the working copy seems sensible
291 291 # since we want to prevent access to nested repositories on
292 292 # the filesystem *now*.
293 293 ctx = self[None]
294 294 parts = util.splitpath(subpath)
295 295 while parts:
296 296 prefix = '/'.join(parts)
297 297 if prefix in ctx.substate:
298 298 if prefix == normsubpath:
299 299 return True
300 300 else:
301 301 sub = ctx.sub(prefix)
302 302 return sub.checknested(subpath[len(prefix) + 1:])
303 303 else:
304 304 parts.pop()
305 305 return False
306 306
307 307 def peer(self):
308 308 return localpeer(self) # not cached to avoid reference cycle
309 309
310 310 def unfiltered(self):
311 311 """Return unfiltered version of the repository
312 312
313 313 Intended to be ovewritten by filtered repo."""
314 314 return self
315 315
316 316 def filtered(self, name):
317 317 """Return a filtered version of a repository"""
318 318 # build a new class with the mixin and the current class
319 319 # (possibily subclass of the repo)
320 320 class proxycls(repoview.repoview, self.unfiltered().__class__):
321 321 pass
322 322 return proxycls(self, name)
323 323
324 324 @repofilecache('bookmarks')
325 325 def _bookmarks(self):
326 326 return bookmarks.bmstore(self)
327 327
328 328 @repofilecache('bookmarks.current')
329 329 def _bookmarkcurrent(self):
330 330 return bookmarks.readcurrent(self)
331 331
332 332 def bookmarkheads(self, bookmark):
333 333 name = bookmark.split('@', 1)[0]
334 334 heads = []
335 335 for mark, n in self._bookmarks.iteritems():
336 336 if mark.split('@', 1)[0] == name:
337 337 heads.append(n)
338 338 return heads
339 339
340 340 @storecache('phaseroots')
341 341 def _phasecache(self):
342 342 return phases.phasecache(self, self._phasedefaults)
343 343
344 344 @storecache('obsstore')
345 345 def obsstore(self):
346 346 store = obsolete.obsstore(self.sopener)
347 347 if store and not obsolete._enabled:
348 348 # message is rare enough to not be translated
349 349 msg = 'obsolete feature not enabled but %i markers found!\n'
350 350 self.ui.warn(msg % len(list(store)))
351 351 return store
352 352
353 353 @storecache('00changelog.i')
354 354 def changelog(self):
355 355 c = changelog.changelog(self.sopener)
356 356 if 'HG_PENDING' in os.environ:
357 357 p = os.environ['HG_PENDING']
358 358 if p.startswith(self.root):
359 359 c.readpending('00changelog.i.a')
360 360 return c
361 361
362 362 @storecache('00manifest.i')
363 363 def manifest(self):
364 364 return manifest.manifest(self.sopener)
365 365
366 366 @repofilecache('dirstate')
367 367 def dirstate(self):
368 368 warned = [0]
369 369 def validate(node):
370 370 try:
371 371 self.changelog.rev(node)
372 372 return node
373 373 except error.LookupError:
374 374 if not warned[0]:
375 375 warned[0] = True
376 376 self.ui.warn(_("warning: ignoring unknown"
377 377 " working parent %s!\n") % short(node))
378 378 return nullid
379 379
380 380 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
381 381
382 382 def __getitem__(self, changeid):
383 383 if changeid is None:
384 384 return context.workingctx(self)
385 385 return context.changectx(self, changeid)
386 386
387 387 def __contains__(self, changeid):
388 388 try:
389 389 return bool(self.lookup(changeid))
390 390 except error.RepoLookupError:
391 391 return False
392 392
393 393 def __nonzero__(self):
394 394 return True
395 395
396 396 def __len__(self):
397 397 return len(self.changelog)
398 398
399 399 def __iter__(self):
400 400 return iter(self.changelog)
401 401
402 402 def revs(self, expr, *args):
403 403 '''Return a list of revisions matching the given revset'''
404 404 expr = revset.formatspec(expr, *args)
405 405 m = revset.match(None, expr)
406 406 return [r for r in m(self, list(self))]
407 407
408 408 def set(self, expr, *args):
409 409 '''
410 410 Yield a context for each matching revision, after doing arg
411 411 replacement via revset.formatspec
412 412 '''
413 413 for r in self.revs(expr, *args):
414 414 yield self[r]
415 415
416 416 def url(self):
417 417 return 'file:' + self.root
418 418
419 419 def hook(self, name, throw=False, **args):
420 420 return hook.hook(self.ui, self, name, throw, **args)
421 421
422 422 @unfilteredmethod
423 423 def _tag(self, names, node, message, local, user, date, extra={}):
424 424 if isinstance(names, str):
425 425 names = (names,)
426 426
427 427 branches = self.branchmap()
428 428 for name in names:
429 429 self.hook('pretag', throw=True, node=hex(node), tag=name,
430 430 local=local)
431 431 if name in branches:
432 432 self.ui.warn(_("warning: tag %s conflicts with existing"
433 433 " branch name\n") % name)
434 434
435 435 def writetags(fp, names, munge, prevtags):
436 436 fp.seek(0, 2)
437 437 if prevtags and prevtags[-1] != '\n':
438 438 fp.write('\n')
439 439 for name in names:
440 440 m = munge and munge(name) or name
441 441 if (self._tagscache.tagtypes and
442 442 name in self._tagscache.tagtypes):
443 443 old = self.tags().get(name, nullid)
444 444 fp.write('%s %s\n' % (hex(old), m))
445 445 fp.write('%s %s\n' % (hex(node), m))
446 446 fp.close()
447 447
448 448 prevtags = ''
449 449 if local:
450 450 try:
451 451 fp = self.opener('localtags', 'r+')
452 452 except IOError:
453 453 fp = self.opener('localtags', 'a')
454 454 else:
455 455 prevtags = fp.read()
456 456
457 457 # local tags are stored in the current charset
458 458 writetags(fp, names, None, prevtags)
459 459 for name in names:
460 460 self.hook('tag', node=hex(node), tag=name, local=local)
461 461 return
462 462
463 463 try:
464 464 fp = self.wfile('.hgtags', 'rb+')
465 465 except IOError, e:
466 466 if e.errno != errno.ENOENT:
467 467 raise
468 468 fp = self.wfile('.hgtags', 'ab')
469 469 else:
470 470 prevtags = fp.read()
471 471
472 472 # committed tags are stored in UTF-8
473 473 writetags(fp, names, encoding.fromlocal, prevtags)
474 474
475 475 fp.close()
476 476
477 477 self.invalidatecaches()
478 478
479 479 if '.hgtags' not in self.dirstate:
480 480 self[None].add(['.hgtags'])
481 481
482 482 m = matchmod.exact(self.root, '', ['.hgtags'])
483 483 tagnode = self.commit(message, user, date, extra=extra, match=m)
484 484
485 485 for name in names:
486 486 self.hook('tag', node=hex(node), tag=name, local=local)
487 487
488 488 return tagnode
489 489
490 490 def tag(self, names, node, message, local, user, date):
491 491 '''tag a revision with one or more symbolic names.
492 492
493 493 names is a list of strings or, when adding a single tag, names may be a
494 494 string.
495 495
496 496 if local is True, the tags are stored in a per-repository file.
497 497 otherwise, they are stored in the .hgtags file, and a new
498 498 changeset is committed with the change.
499 499
500 500 keyword arguments:
501 501
502 502 local: whether to store tags in non-version-controlled file
503 503 (default False)
504 504
505 505 message: commit message to use if committing
506 506
507 507 user: name of user to use if committing
508 508
509 509 date: date tuple to use if committing'''
510 510
511 511 if not local:
512 512 for x in self.status()[:5]:
513 513 if '.hgtags' in x:
514 514 raise util.Abort(_('working copy of .hgtags is changed '
515 515 '(please commit .hgtags manually)'))
516 516
517 517 self.tags() # instantiate the cache
518 518 self._tag(names, node, message, local, user, date)
519 519
520 520 @filteredpropertycache
521 521 def _tagscache(self):
522 522 '''Returns a tagscache object that contains various tags related
523 523 caches.'''
524 524
525 525 # This simplifies its cache management by having one decorated
526 526 # function (this one) and the rest simply fetch things from it.
527 527 class tagscache(object):
528 528 def __init__(self):
529 529 # These two define the set of tags for this repository. tags
530 530 # maps tag name to node; tagtypes maps tag name to 'global' or
531 531 # 'local'. (Global tags are defined by .hgtags across all
532 532 # heads, and local tags are defined in .hg/localtags.)
533 533 # They constitute the in-memory cache of tags.
534 534 self.tags = self.tagtypes = None
535 535
536 536 self.nodetagscache = self.tagslist = None
537 537
538 538 cache = tagscache()
539 539 cache.tags, cache.tagtypes = self._findtags()
540 540
541 541 return cache
542 542
543 543 def tags(self):
544 544 '''return a mapping of tag to node'''
545 545 t = {}
546 546 if self.changelog.filteredrevs:
547 547 tags, tt = self._findtags()
548 548 else:
549 549 tags = self._tagscache.tags
550 550 for k, v in tags.iteritems():
551 551 try:
552 552 # ignore tags to unknown nodes
553 553 self.changelog.rev(v)
554 554 t[k] = v
555 555 except (error.LookupError, ValueError):
556 556 pass
557 557 return t
558 558
559 559 def _findtags(self):
560 560 '''Do the hard work of finding tags. Return a pair of dicts
561 561 (tags, tagtypes) where tags maps tag name to node, and tagtypes
562 562 maps tag name to a string like \'global\' or \'local\'.
563 563 Subclasses or extensions are free to add their own tags, but
564 564 should be aware that the returned dicts will be retained for the
565 565 duration of the localrepo object.'''
566 566
567 567 # XXX what tagtype should subclasses/extensions use? Currently
568 568 # mq and bookmarks add tags, but do not set the tagtype at all.
569 569 # Should each extension invent its own tag type? Should there
570 570 # be one tagtype for all such "virtual" tags? Or is the status
571 571 # quo fine?
572 572
573 573 alltags = {} # map tag name to (node, hist)
574 574 tagtypes = {}
575 575
576 576 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
577 577 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
578 578
579 579 # Build the return dicts. Have to re-encode tag names because
580 580 # the tags module always uses UTF-8 (in order not to lose info
581 581 # writing to the cache), but the rest of Mercurial wants them in
582 582 # local encoding.
583 583 tags = {}
584 584 for (name, (node, hist)) in alltags.iteritems():
585 585 if node != nullid:
586 586 tags[encoding.tolocal(name)] = node
587 587 tags['tip'] = self.changelog.tip()
588 588 tagtypes = dict([(encoding.tolocal(name), value)
589 589 for (name, value) in tagtypes.iteritems()])
590 590 return (tags, tagtypes)
591 591
592 592 def tagtype(self, tagname):
593 593 '''
594 594 return the type of the given tag. result can be:
595 595
596 596 'local' : a local tag
597 597 'global' : a global tag
598 598 None : tag does not exist
599 599 '''
600 600
601 601 return self._tagscache.tagtypes.get(tagname)
602 602
603 603 def tagslist(self):
604 604 '''return a list of tags ordered by revision'''
605 605 if not self._tagscache.tagslist:
606 606 l = []
607 607 for t, n in self.tags().iteritems():
608 608 r = self.changelog.rev(n)
609 609 l.append((r, t, n))
610 610 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
611 611
612 612 return self._tagscache.tagslist
613 613
614 614 def nodetags(self, node):
615 615 '''return the tags associated with a node'''
616 616 if not self._tagscache.nodetagscache:
617 617 nodetagscache = {}
618 618 for t, n in self._tagscache.tags.iteritems():
619 619 nodetagscache.setdefault(n, []).append(t)
620 620 for tags in nodetagscache.itervalues():
621 621 tags.sort()
622 622 self._tagscache.nodetagscache = nodetagscache
623 623 return self._tagscache.nodetagscache.get(node, [])
624 624
625 625 def nodebookmarks(self, node):
626 626 marks = []
627 627 for bookmark, n in self._bookmarks.iteritems():
628 628 if n == node:
629 629 marks.append(bookmark)
630 630 return sorted(marks)
631 631
632 632 def branchmap(self):
633 633 '''returns a dictionary {branch: [branchheads]}'''
634 634 branchmap.updatecache(self)
635 635 return self._branchcaches[self.filtername]
636 636
637 637
638 638 def _branchtip(self, heads):
639 639 '''return the tipmost branch head in heads'''
640 640 tip = heads[-1]
641 641 for h in reversed(heads):
642 642 if not self[h].closesbranch():
643 643 tip = h
644 644 break
645 645 return tip
646 646
647 647 def branchtip(self, branch):
648 648 '''return the tip node for a given branch'''
649 649 if branch not in self.branchmap():
650 650 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
651 651 return self._branchtip(self.branchmap()[branch])
652 652
653 653 def branchtags(self):
654 654 '''return a dict where branch names map to the tipmost head of
655 655 the branch, open heads come before closed'''
656 656 bt = {}
657 657 for bn, heads in self.branchmap().iteritems():
658 658 bt[bn] = self._branchtip(heads)
659 659 return bt
660 660
661 661 def lookup(self, key):
662 662 return self[key].node()
663 663
664 664 def lookupbranch(self, key, remote=None):
665 665 repo = remote or self
666 666 if key in repo.branchmap():
667 667 return key
668 668
669 669 repo = (remote and remote.local()) and remote or self
670 670 return repo[key].branch()
671 671
672 672 def known(self, nodes):
673 673 nm = self.changelog.nodemap
674 674 pc = self._phasecache
675 675 result = []
676 676 for n in nodes:
677 677 r = nm.get(n)
678 678 resp = not (r is None or pc.phase(self, r) >= phases.secret)
679 679 result.append(resp)
680 680 return result
681 681
682 682 def local(self):
683 683 return self
684 684
685 685 def cancopy(self):
686 686 return self.local() # so statichttprepo's override of local() works
687 687
688 688 def join(self, f):
689 689 return os.path.join(self.path, f)
690 690
691 691 def wjoin(self, f):
692 692 return os.path.join(self.root, f)
693 693
694 694 def file(self, f):
695 695 if f[0] == '/':
696 696 f = f[1:]
697 697 return filelog.filelog(self.sopener, f)
698 698
699 699 def changectx(self, changeid):
700 700 return self[changeid]
701 701
702 702 def parents(self, changeid=None):
703 703 '''get list of changectxs for parents of changeid'''
704 704 return self[changeid].parents()
705 705
706 706 def setparents(self, p1, p2=nullid):
707 707 copies = self.dirstate.setparents(p1, p2)
708 708 if copies:
709 709 # Adjust copy records, the dirstate cannot do it, it
710 710 # requires access to parents manifests. Preserve them
711 711 # only for entries added to first parent.
712 712 pctx = self[p1]
713 713 for f in copies:
714 714 if f not in pctx and copies[f] in pctx:
715 715 self.dirstate.copy(copies[f], f)
716 716
717 717 def filectx(self, path, changeid=None, fileid=None):
718 718 """changeid can be a changeset revision, node, or tag.
719 719 fileid can be a file revision or node."""
720 720 return context.filectx(self, path, changeid, fileid)
721 721
722 722 def getcwd(self):
723 723 return self.dirstate.getcwd()
724 724
725 725 def pathto(self, f, cwd=None):
726 726 return self.dirstate.pathto(f, cwd)
727 727
728 728 def wfile(self, f, mode='r'):
729 729 return self.wopener(f, mode)
730 730
731 731 def _link(self, f):
732 732 return os.path.islink(self.wjoin(f))
733 733
734 734 def _loadfilter(self, filter):
735 735 if filter not in self.filterpats:
736 736 l = []
737 737 for pat, cmd in self.ui.configitems(filter):
738 738 if cmd == '!':
739 739 continue
740 740 mf = matchmod.match(self.root, '', [pat])
741 741 fn = None
742 742 params = cmd
743 743 for name, filterfn in self._datafilters.iteritems():
744 744 if cmd.startswith(name):
745 745 fn = filterfn
746 746 params = cmd[len(name):].lstrip()
747 747 break
748 748 if not fn:
749 749 fn = lambda s, c, **kwargs: util.filter(s, c)
750 750 # Wrap old filters not supporting keyword arguments
751 751 if not inspect.getargspec(fn)[2]:
752 752 oldfn = fn
753 753 fn = lambda s, c, **kwargs: oldfn(s, c)
754 754 l.append((mf, fn, params))
755 755 self.filterpats[filter] = l
756 756 return self.filterpats[filter]
757 757
758 758 def _filter(self, filterpats, filename, data):
759 759 for mf, fn, cmd in filterpats:
760 760 if mf(filename):
761 761 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
762 762 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
763 763 break
764 764
765 765 return data
766 766
767 767 @unfilteredpropertycache
768 768 def _encodefilterpats(self):
769 769 return self._loadfilter('encode')
770 770
771 771 @unfilteredpropertycache
772 772 def _decodefilterpats(self):
773 773 return self._loadfilter('decode')
774 774
775 775 def adddatafilter(self, name, filter):
776 776 self._datafilters[name] = filter
777 777
778 778 def wread(self, filename):
779 779 if self._link(filename):
780 780 data = os.readlink(self.wjoin(filename))
781 781 else:
782 782 data = self.wopener.read(filename)
783 783 return self._filter(self._encodefilterpats, filename, data)
784 784
785 785 def wwrite(self, filename, data, flags):
786 786 data = self._filter(self._decodefilterpats, filename, data)
787 787 if 'l' in flags:
788 788 self.wopener.symlink(data, filename)
789 789 else:
790 790 self.wopener.write(filename, data)
791 791 if 'x' in flags:
792 792 util.setflags(self.wjoin(filename), False, True)
793 793
794 794 def wwritedata(self, filename, data):
795 795 return self._filter(self._decodefilterpats, filename, data)
796 796
797 797 def transaction(self, desc):
798 798 tr = self._transref and self._transref() or None
799 799 if tr and tr.running():
800 800 return tr.nest()
801 801
802 802 # abort here if the journal already exists
803 803 if os.path.exists(self.sjoin("journal")):
804 804 raise error.RepoError(
805 805 _("abandoned transaction found - run hg recover"))
806 806
807 807 self._writejournal(desc)
808 808 renames = [(x, undoname(x)) for x in self._journalfiles()]
809 809
810 810 tr = transaction.transaction(self.ui.warn, self.sopener,
811 811 self.sjoin("journal"),
812 812 aftertrans(renames),
813 813 self.store.createmode)
814 814 self._transref = weakref.ref(tr)
815 815 return tr
816 816
817 817 def _journalfiles(self):
818 818 return (self.sjoin('journal'), self.join('journal.dirstate'),
819 819 self.join('journal.branch'), self.join('journal.desc'),
820 820 self.join('journal.bookmarks'),
821 821 self.sjoin('journal.phaseroots'))
822 822
823 823 def undofiles(self):
824 824 return [undoname(x) for x in self._journalfiles()]
825 825
826 826 def _writejournal(self, desc):
827 827 self.opener.write("journal.dirstate",
828 828 self.opener.tryread("dirstate"))
829 829 self.opener.write("journal.branch",
830 830 encoding.fromlocal(self.dirstate.branch()))
831 831 self.opener.write("journal.desc",
832 832 "%d\n%s\n" % (len(self), desc))
833 833 self.opener.write("journal.bookmarks",
834 834 self.opener.tryread("bookmarks"))
835 835 self.sopener.write("journal.phaseroots",
836 836 self.sopener.tryread("phaseroots"))
837 837
838 838 def recover(self):
839 839 lock = self.lock()
840 840 try:
841 841 if os.path.exists(self.sjoin("journal")):
842 842 self.ui.status(_("rolling back interrupted transaction\n"))
843 843 transaction.rollback(self.sopener, self.sjoin("journal"),
844 844 self.ui.warn)
845 845 self.invalidate()
846 846 return True
847 847 else:
848 848 self.ui.warn(_("no interrupted transaction available\n"))
849 849 return False
850 850 finally:
851 851 lock.release()
852 852
853 853 def rollback(self, dryrun=False, force=False):
854 854 wlock = lock = None
855 855 try:
856 856 wlock = self.wlock()
857 857 lock = self.lock()
858 858 if os.path.exists(self.sjoin("undo")):
859 859 return self._rollback(dryrun, force)
860 860 else:
861 861 self.ui.warn(_("no rollback information available\n"))
862 862 return 1
863 863 finally:
864 864 release(lock, wlock)
865 865
866 866 @unfilteredmethod # Until we get smarter cache management
867 867 def _rollback(self, dryrun, force):
868 868 ui = self.ui
869 869 try:
870 870 args = self.opener.read('undo.desc').splitlines()
871 871 (oldlen, desc, detail) = (int(args[0]), args[1], None)
872 872 if len(args) >= 3:
873 873 detail = args[2]
874 874 oldtip = oldlen - 1
875 875
876 876 if detail and ui.verbose:
877 877 msg = (_('repository tip rolled back to revision %s'
878 878 ' (undo %s: %s)\n')
879 879 % (oldtip, desc, detail))
880 880 else:
881 881 msg = (_('repository tip rolled back to revision %s'
882 882 ' (undo %s)\n')
883 883 % (oldtip, desc))
884 884 except IOError:
885 885 msg = _('rolling back unknown transaction\n')
886 886 desc = None
887 887
888 888 if not force and self['.'] != self['tip'] and desc == 'commit':
889 889 raise util.Abort(
890 890 _('rollback of last commit while not checked out '
891 891 'may lose data'), hint=_('use -f to force'))
892 892
893 893 ui.status(msg)
894 894 if dryrun:
895 895 return 0
896 896
897 897 parents = self.dirstate.parents()
898 898 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
899 899 if os.path.exists(self.join('undo.bookmarks')):
900 900 util.rename(self.join('undo.bookmarks'),
901 901 self.join('bookmarks'))
902 902 if os.path.exists(self.sjoin('undo.phaseroots')):
903 903 util.rename(self.sjoin('undo.phaseroots'),
904 904 self.sjoin('phaseroots'))
905 905 self.invalidate()
906 906
907 907 # Discard all cache entries to force reloading everything.
908 908 self._filecache.clear()
909 909
910 910 parentgone = (parents[0] not in self.changelog.nodemap or
911 911 parents[1] not in self.changelog.nodemap)
912 912 if parentgone:
913 913 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
914 914 try:
915 915 branch = self.opener.read('undo.branch')
916 916 self.dirstate.setbranch(encoding.tolocal(branch))
917 917 except IOError:
918 918 ui.warn(_('named branch could not be reset: '
919 919 'current branch is still \'%s\'\n')
920 920 % self.dirstate.branch())
921 921
922 922 self.dirstate.invalidate()
923 923 parents = tuple([p.rev() for p in self.parents()])
924 924 if len(parents) > 1:
925 925 ui.status(_('working directory now based on '
926 926 'revisions %d and %d\n') % parents)
927 927 else:
928 928 ui.status(_('working directory now based on '
929 929 'revision %d\n') % parents)
930 930 # TODO: if we know which new heads may result from this rollback, pass
931 931 # them to destroy(), which will prevent the branchhead cache from being
932 932 # invalidated.
933 933 self.destroyed()
934 934 return 0
935 935
936 936 def invalidatecaches(self):
937 937
938 938 if '_tagscache' in vars(self):
939 939 # can't use delattr on proxy
940 940 del self.__dict__['_tagscache']
941 941
942 942 self.unfiltered()._branchcaches.clear()
943 943 self.invalidatevolatilesets()
944 944
945 945 def invalidatevolatilesets(self):
946 946 self.filteredrevcache.clear()
947 947 obsolete.clearobscaches(self)
948 948
949 949 def invalidatedirstate(self):
950 950 '''Invalidates the dirstate, causing the next call to dirstate
951 951 to check if it was modified since the last time it was read,
952 952 rereading it if it has.
953 953
954 954 This is different to dirstate.invalidate() that it doesn't always
955 955 rereads the dirstate. Use dirstate.invalidate() if you want to
956 956 explicitly read the dirstate again (i.e. restoring it to a previous
957 957 known good state).'''
958 958 if hasunfilteredcache(self, 'dirstate'):
959 959 for k in self.dirstate._filecache:
960 960 try:
961 961 delattr(self.dirstate, k)
962 962 except AttributeError:
963 963 pass
964 964 delattr(self.unfiltered(), 'dirstate')
965 965
966 966 def invalidate(self):
967 967 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
968 968 for k in self._filecache:
969 969 # dirstate is invalidated separately in invalidatedirstate()
970 970 if k == 'dirstate':
971 971 continue
972 972
973 973 try:
974 974 delattr(unfiltered, k)
975 975 except AttributeError:
976 976 pass
977 977 self.invalidatecaches()
978 978
979 979 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
980 980 try:
981 981 l = lock.lock(lockname, 0, releasefn, desc=desc)
982 982 except error.LockHeld, inst:
983 983 if not wait:
984 984 raise
985 985 self.ui.warn(_("waiting for lock on %s held by %r\n") %
986 986 (desc, inst.locker))
987 987 # default to 600 seconds timeout
988 988 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
989 989 releasefn, desc=desc)
990 990 if acquirefn:
991 991 acquirefn()
992 992 return l
993 993
994 994 def _afterlock(self, callback):
995 995 """add a callback to the current repository lock.
996 996
997 997 The callback will be executed on lock release."""
998 998 l = self._lockref and self._lockref()
999 999 if l:
1000 1000 l.postrelease.append(callback)
1001 1001 else:
1002 1002 callback()
1003 1003
1004 1004 def lock(self, wait=True):
1005 1005 '''Lock the repository store (.hg/store) and return a weak reference
1006 1006 to the lock. Use this before modifying the store (e.g. committing or
1007 1007 stripping). If you are opening a transaction, get a lock as well.)'''
1008 1008 l = self._lockref and self._lockref()
1009 1009 if l is not None and l.held:
1010 1010 l.lock()
1011 1011 return l
1012 1012
1013 1013 def unlock():
1014 1014 self.store.write()
1015 1015 if hasunfilteredcache(self, '_phasecache'):
1016 1016 self._phasecache.write()
1017 1017 for k, ce in self._filecache.items():
1018 1018 if k == 'dirstate':
1019 1019 continue
1020 1020 ce.refresh()
1021 1021
1022 1022 l = self._lock(self.sjoin("lock"), wait, unlock,
1023 1023 self.invalidate, _('repository %s') % self.origroot)
1024 1024 self._lockref = weakref.ref(l)
1025 1025 return l
1026 1026
1027 1027 def wlock(self, wait=True):
1028 1028 '''Lock the non-store parts of the repository (everything under
1029 1029 .hg except .hg/store) and return a weak reference to the lock.
1030 1030 Use this before modifying files in .hg.'''
1031 1031 l = self._wlockref and self._wlockref()
1032 1032 if l is not None and l.held:
1033 1033 l.lock()
1034 1034 return l
1035 1035
1036 1036 def unlock():
1037 1037 self.dirstate.write()
1038 1038 ce = self._filecache.get('dirstate')
1039 1039 if ce:
1040 1040 ce.refresh()
1041 1041
1042 1042 l = self._lock(self.join("wlock"), wait, unlock,
1043 1043 self.invalidatedirstate, _('working directory of %s') %
1044 1044 self.origroot)
1045 1045 self._wlockref = weakref.ref(l)
1046 1046 return l
1047 1047
1048 1048 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1049 1049 """
1050 1050 commit an individual file as part of a larger transaction
1051 1051 """
1052 1052
1053 1053 fname = fctx.path()
1054 1054 text = fctx.data()
1055 1055 flog = self.file(fname)
1056 1056 fparent1 = manifest1.get(fname, nullid)
1057 1057 fparent2 = fparent2o = manifest2.get(fname, nullid)
1058 1058
1059 1059 meta = {}
1060 1060 copy = fctx.renamed()
1061 1061 if copy and copy[0] != fname:
1062 1062 # Mark the new revision of this file as a copy of another
1063 1063 # file. This copy data will effectively act as a parent
1064 1064 # of this new revision. If this is a merge, the first
1065 1065 # parent will be the nullid (meaning "look up the copy data")
1066 1066 # and the second one will be the other parent. For example:
1067 1067 #
1068 1068 # 0 --- 1 --- 3 rev1 changes file foo
1069 1069 # \ / rev2 renames foo to bar and changes it
1070 1070 # \- 2 -/ rev3 should have bar with all changes and
1071 1071 # should record that bar descends from
1072 1072 # bar in rev2 and foo in rev1
1073 1073 #
1074 1074 # this allows this merge to succeed:
1075 1075 #
1076 1076 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1077 1077 # \ / merging rev3 and rev4 should use bar@rev2
1078 1078 # \- 2 --- 4 as the merge base
1079 1079 #
1080 1080
1081 1081 cfname = copy[0]
1082 1082 crev = manifest1.get(cfname)
1083 1083 newfparent = fparent2
1084 1084
1085 1085 if manifest2: # branch merge
1086 1086 if fparent2 == nullid or crev is None: # copied on remote side
1087 1087 if cfname in manifest2:
1088 1088 crev = manifest2[cfname]
1089 1089 newfparent = fparent1
1090 1090
1091 1091 # find source in nearest ancestor if we've lost track
1092 1092 if not crev:
1093 1093 self.ui.debug(" %s: searching for copy revision for %s\n" %
1094 1094 (fname, cfname))
1095 1095 for ancestor in self[None].ancestors():
1096 1096 if cfname in ancestor:
1097 1097 crev = ancestor[cfname].filenode()
1098 1098 break
1099 1099
1100 1100 if crev:
1101 1101 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1102 1102 meta["copy"] = cfname
1103 1103 meta["copyrev"] = hex(crev)
1104 1104 fparent1, fparent2 = nullid, newfparent
1105 1105 else:
1106 1106 self.ui.warn(_("warning: can't find ancestor for '%s' "
1107 1107 "copied from '%s'!\n") % (fname, cfname))
1108 1108
1109 1109 elif fparent2 != nullid:
1110 1110 # is one parent an ancestor of the other?
1111 1111 fparentancestor = flog.ancestor(fparent1, fparent2)
1112 1112 if fparentancestor == fparent1:
1113 1113 fparent1, fparent2 = fparent2, nullid
1114 1114 elif fparentancestor == fparent2:
1115 1115 fparent2 = nullid
1116 1116
1117 1117 # is the file changed?
1118 1118 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1119 1119 changelist.append(fname)
1120 1120 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1121 1121
1122 1122 # are just the flags changed during merge?
1123 1123 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1124 1124 changelist.append(fname)
1125 1125
1126 1126 return fparent1
1127 1127
1128 1128 @unfilteredmethod
1129 1129 def commit(self, text="", user=None, date=None, match=None, force=False,
1130 1130 editor=False, extra={}):
1131 1131 """Add a new revision to current repository.
1132 1132
1133 1133 Revision information is gathered from the working directory,
1134 1134 match can be used to filter the committed files. If editor is
1135 1135 supplied, it is called to get a commit message.
1136 1136 """
1137 1137
1138 1138 def fail(f, msg):
1139 1139 raise util.Abort('%s: %s' % (f, msg))
1140 1140
1141 1141 if not match:
1142 1142 match = matchmod.always(self.root, '')
1143 1143
1144 1144 if not force:
1145 1145 vdirs = []
1146 1146 match.dir = vdirs.append
1147 1147 match.bad = fail
1148 1148
1149 1149 wlock = self.wlock()
1150 1150 try:
1151 1151 wctx = self[None]
1152 1152 merge = len(wctx.parents()) > 1
1153 1153
1154 1154 if (not force and merge and match and
1155 1155 (match.files() or match.anypats())):
1156 1156 raise util.Abort(_('cannot partially commit a merge '
1157 1157 '(do not specify files or patterns)'))
1158 1158
1159 1159 changes = self.status(match=match, clean=force)
1160 1160 if force:
1161 1161 changes[0].extend(changes[6]) # mq may commit unchanged files
1162 1162
1163 1163 # check subrepos
1164 1164 subs = []
1165 1165 commitsubs = set()
1166 1166 newstate = wctx.substate.copy()
1167 1167 # only manage subrepos and .hgsubstate if .hgsub is present
1168 1168 if '.hgsub' in wctx:
1169 1169 # we'll decide whether to track this ourselves, thanks
1170 1170 if '.hgsubstate' in changes[0]:
1171 1171 changes[0].remove('.hgsubstate')
1172 1172 if '.hgsubstate' in changes[2]:
1173 1173 changes[2].remove('.hgsubstate')
1174 1174
1175 1175 # compare current state to last committed state
1176 1176 # build new substate based on last committed state
1177 1177 oldstate = wctx.p1().substate
1178 1178 for s in sorted(newstate.keys()):
1179 1179 if not match(s):
1180 1180 # ignore working copy, use old state if present
1181 1181 if s in oldstate:
1182 1182 newstate[s] = oldstate[s]
1183 1183 continue
1184 1184 if not force:
1185 1185 raise util.Abort(
1186 1186 _("commit with new subrepo %s excluded") % s)
1187 1187 if wctx.sub(s).dirty(True):
1188 1188 if not self.ui.configbool('ui', 'commitsubrepos'):
1189 1189 raise util.Abort(
1190 1190 _("uncommitted changes in subrepo %s") % s,
1191 1191 hint=_("use --subrepos for recursive commit"))
1192 1192 subs.append(s)
1193 1193 commitsubs.add(s)
1194 1194 else:
1195 1195 bs = wctx.sub(s).basestate()
1196 1196 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1197 1197 if oldstate.get(s, (None, None, None))[1] != bs:
1198 1198 subs.append(s)
1199 1199
1200 1200 # check for removed subrepos
1201 1201 for p in wctx.parents():
1202 1202 r = [s for s in p.substate if s not in newstate]
1203 1203 subs += [s for s in r if match(s)]
1204 1204 if subs:
1205 1205 if (not match('.hgsub') and
1206 1206 '.hgsub' in (wctx.modified() + wctx.added())):
1207 1207 raise util.Abort(
1208 1208 _("can't commit subrepos without .hgsub"))
1209 1209 changes[0].insert(0, '.hgsubstate')
1210 1210
1211 1211 elif '.hgsub' in changes[2]:
1212 1212 # clean up .hgsubstate when .hgsub is removed
1213 1213 if ('.hgsubstate' in wctx and
1214 1214 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1215 1215 changes[2].insert(0, '.hgsubstate')
1216 1216
1217 1217 # make sure all explicit patterns are matched
1218 1218 if not force and match.files():
1219 1219 matched = set(changes[0] + changes[1] + changes[2])
1220 1220
1221 1221 for f in match.files():
1222 1222 f = self.dirstate.normalize(f)
1223 1223 if f == '.' or f in matched or f in wctx.substate:
1224 1224 continue
1225 1225 if f in changes[3]: # missing
1226 1226 fail(f, _('file not found!'))
1227 1227 if f in vdirs: # visited directory
1228 1228 d = f + '/'
1229 1229 for mf in matched:
1230 1230 if mf.startswith(d):
1231 1231 break
1232 1232 else:
1233 1233 fail(f, _("no match under directory!"))
1234 1234 elif f not in self.dirstate:
1235 1235 fail(f, _("file not tracked!"))
1236 1236
1237 1237 if (not force and not extra.get("close") and not merge
1238 1238 and not (changes[0] or changes[1] or changes[2])
1239 1239 and wctx.branch() == wctx.p1().branch()):
1240 1240 return None
1241 1241
1242 1242 if merge and changes[3]:
1243 1243 raise util.Abort(_("cannot commit merge with missing files"))
1244 1244
1245 1245 ms = mergemod.mergestate(self)
1246 1246 for f in changes[0]:
1247 1247 if f in ms and ms[f] == 'u':
1248 1248 raise util.Abort(_("unresolved merge conflicts "
1249 1249 "(see hg help resolve)"))
1250 1250
1251 1251 cctx = context.workingctx(self, text, user, date, extra, changes)
1252 1252 if editor:
1253 1253 cctx._text = editor(self, cctx, subs)
1254 1254 edited = (text != cctx._text)
1255 1255
1256 1256 # commit subs and write new state
1257 1257 if subs:
1258 1258 for s in sorted(commitsubs):
1259 1259 sub = wctx.sub(s)
1260 1260 self.ui.status(_('committing subrepository %s\n') %
1261 1261 subrepo.subrelpath(sub))
1262 1262 sr = sub.commit(cctx._text, user, date)
1263 1263 newstate[s] = (newstate[s][0], sr)
1264 1264 subrepo.writestate(self, newstate)
1265 1265
1266 1266 # Save commit message in case this transaction gets rolled back
1267 1267 # (e.g. by a pretxncommit hook). Leave the content alone on
1268 1268 # the assumption that the user will use the same editor again.
1269 1269 msgfn = self.savecommitmessage(cctx._text)
1270 1270
1271 1271 p1, p2 = self.dirstate.parents()
1272 1272 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1273 1273 try:
1274 1274 self.hook("precommit", throw=True, parent1=hookp1,
1275 1275 parent2=hookp2)
1276 1276 ret = self.commitctx(cctx, True)
1277 1277 except: # re-raises
1278 1278 if edited:
1279 1279 self.ui.write(
1280 1280 _('note: commit message saved in %s\n') % msgfn)
1281 1281 raise
1282 1282
1283 1283 # update bookmarks, dirstate and mergestate
1284 1284 bookmarks.update(self, [p1, p2], ret)
1285 1285 for f in changes[0] + changes[1]:
1286 1286 self.dirstate.normal(f)
1287 1287 for f in changes[2]:
1288 1288 self.dirstate.drop(f)
1289 1289 self.dirstate.setparents(ret)
1290 1290 ms.reset()
1291 1291 finally:
1292 1292 wlock.release()
1293 1293
1294 1294 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1295 1295 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1296 1296 self._afterlock(commithook)
1297 1297 return ret
1298 1298
1299 1299 @unfilteredmethod
1300 1300 def commitctx(self, ctx, error=False):
1301 1301 """Add a new revision to current repository.
1302 1302 Revision information is passed via the context argument.
1303 1303 """
1304 1304
1305 1305 tr = lock = None
1306 1306 removed = list(ctx.removed())
1307 1307 p1, p2 = ctx.p1(), ctx.p2()
1308 1308 user = ctx.user()
1309 1309
1310 1310 lock = self.lock()
1311 1311 try:
1312 1312 tr = self.transaction("commit")
1313 1313 trp = weakref.proxy(tr)
1314 1314
1315 1315 if ctx.files():
1316 1316 m1 = p1.manifest().copy()
1317 1317 m2 = p2.manifest()
1318 1318
1319 1319 # check in files
1320 1320 new = {}
1321 1321 changed = []
1322 1322 linkrev = len(self)
1323 1323 for f in sorted(ctx.modified() + ctx.added()):
1324 1324 self.ui.note(f + "\n")
1325 1325 try:
1326 1326 fctx = ctx[f]
1327 1327 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1328 1328 changed)
1329 1329 m1.set(f, fctx.flags())
1330 1330 except OSError, inst:
1331 1331 self.ui.warn(_("trouble committing %s!\n") % f)
1332 1332 raise
1333 1333 except IOError, inst:
1334 1334 errcode = getattr(inst, 'errno', errno.ENOENT)
1335 1335 if error or errcode and errcode != errno.ENOENT:
1336 1336 self.ui.warn(_("trouble committing %s!\n") % f)
1337 1337 raise
1338 1338 else:
1339 1339 removed.append(f)
1340 1340
1341 1341 # update manifest
1342 1342 m1.update(new)
1343 1343 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1344 1344 drop = [f for f in removed if f in m1]
1345 1345 for f in drop:
1346 1346 del m1[f]
1347 1347 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1348 1348 p2.manifestnode(), (new, drop))
1349 1349 files = changed + removed
1350 1350 else:
1351 1351 mn = p1.manifestnode()
1352 1352 files = []
1353 1353
1354 1354 # update changelog
1355 1355 self.changelog.delayupdate()
1356 1356 n = self.changelog.add(mn, files, ctx.description(),
1357 1357 trp, p1.node(), p2.node(),
1358 1358 user, ctx.date(), ctx.extra().copy())
1359 1359 p = lambda: self.changelog.writepending() and self.root or ""
1360 1360 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1361 1361 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1362 1362 parent2=xp2, pending=p)
1363 1363 self.changelog.finalize(trp)
1364 1364 # set the new commit is proper phase
1365 1365 targetphase = phases.newcommitphase(self.ui)
1366 1366 if targetphase:
1367 1367 # retract boundary do not alter parent changeset.
1368 1368 # if a parent have higher the resulting phase will
1369 1369 # be compliant anyway
1370 1370 #
1371 1371 # if minimal phase was 0 we don't need to retract anything
1372 1372 phases.retractboundary(self, targetphase, [n])
1373 1373 tr.close()
1374 1374 branchmap.updatecache(self)
1375 1375 return n
1376 1376 finally:
1377 1377 if tr:
1378 1378 tr.release()
1379 1379 lock.release()
1380 1380
1381 1381 @unfilteredmethod
1382 1382 def destroyed(self, newheadnodes=None):
1383 1383 '''Inform the repository that nodes have been destroyed.
1384 1384 Intended for use by strip and rollback, so there's a common
1385 1385 place for anything that has to be done after destroying history.
1386 1386
1387 1387 If you know the branchheadcache was uptodate before nodes were removed
1388 1388 and you also know the set of candidate new heads that may have resulted
1389 1389 from the destruction, you can set newheadnodes. This will enable the
1390 1390 code to update the branchheads cache, rather than having future code
1391 1391 decide it's invalid and regenerating it from scratch.
1392 1392 '''
1393 1393 # When one tries to:
1394 1394 # 1) destroy nodes thus calling this method (e.g. strip)
1395 1395 # 2) use phasecache somewhere (e.g. commit)
1396 1396 #
1397 1397 # then 2) will fail because the phasecache contains nodes that were
1398 1398 # removed. We can either remove phasecache from the filecache,
1399 1399 # causing it to reload next time it is accessed, or simply filter
1400 1400 # the removed nodes now and write the updated cache.
1401 1401 if '_phasecache' in self._filecache:
1402 1402 self._phasecache.filterunknown(self)
1403 1403 self._phasecache.write()
1404 1404
1405 1405 # If we have info, newheadnodes, on how to update the branch cache, do
1406 1406 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1407 1407 # will be caught the next time it is read.
1408 1408 if newheadnodes:
1409 ctxgen = (self[node] for node in newheadnodes
1410 if self.changelog.hasnode(node))
1409 cl = self.changelog
1410 revgen = (cl.rev(node) for node in newheadnodes
1411 if cl.hasnode(node))
1411 1412 cache = self._branchcaches[None]
1412 cache.update(self, ctxgen)
1413 cache.update(self, revgen)
1413 1414 cache.write(self)
1414 1415
1415 1416 # Ensure the persistent tag cache is updated. Doing it now
1416 1417 # means that the tag cache only has to worry about destroyed
1417 1418 # heads immediately after a strip/rollback. That in turn
1418 1419 # guarantees that "cachetip == currenttip" (comparing both rev
1419 1420 # and node) always means no nodes have been added or destroyed.
1420 1421
1421 1422 # XXX this is suboptimal when qrefresh'ing: we strip the current
1422 1423 # head, refresh the tag cache, then immediately add a new head.
1423 1424 # But I think doing it this way is necessary for the "instant
1424 1425 # tag cache retrieval" case to work.
1425 1426 self.invalidatecaches()
1426 1427
1427 1428 # Discard all cache entries to force reloading everything.
1428 1429 self._filecache.clear()
1429 1430
1430 1431 def walk(self, match, node=None):
1431 1432 '''
1432 1433 walk recursively through the directory tree or a given
1433 1434 changeset, finding all files matched by the match
1434 1435 function
1435 1436 '''
1436 1437 return self[node].walk(match)
1437 1438
1438 1439 def status(self, node1='.', node2=None, match=None,
1439 1440 ignored=False, clean=False, unknown=False,
1440 1441 listsubrepos=False):
1441 1442 """return status of files between two nodes or node and working
1442 1443 directory.
1443 1444
1444 1445 If node1 is None, use the first dirstate parent instead.
1445 1446 If node2 is None, compare node1 with working directory.
1446 1447 """
1447 1448
1448 1449 def mfmatches(ctx):
1449 1450 mf = ctx.manifest().copy()
1450 1451 if match.always():
1451 1452 return mf
1452 1453 for fn in mf.keys():
1453 1454 if not match(fn):
1454 1455 del mf[fn]
1455 1456 return mf
1456 1457
1457 1458 if isinstance(node1, context.changectx):
1458 1459 ctx1 = node1
1459 1460 else:
1460 1461 ctx1 = self[node1]
1461 1462 if isinstance(node2, context.changectx):
1462 1463 ctx2 = node2
1463 1464 else:
1464 1465 ctx2 = self[node2]
1465 1466
1466 1467 working = ctx2.rev() is None
1467 1468 parentworking = working and ctx1 == self['.']
1468 1469 match = match or matchmod.always(self.root, self.getcwd())
1469 1470 listignored, listclean, listunknown = ignored, clean, unknown
1470 1471
1471 1472 # load earliest manifest first for caching reasons
1472 1473 if not working and ctx2.rev() < ctx1.rev():
1473 1474 ctx2.manifest()
1474 1475
1475 1476 if not parentworking:
1476 1477 def bad(f, msg):
1477 1478 # 'f' may be a directory pattern from 'match.files()',
1478 1479 # so 'f not in ctx1' is not enough
1479 1480 if f not in ctx1 and f not in ctx1.dirs():
1480 1481 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1481 1482 match.bad = bad
1482 1483
1483 1484 if working: # we need to scan the working dir
1484 1485 subrepos = []
1485 1486 if '.hgsub' in self.dirstate:
1486 1487 subrepos = ctx2.substate.keys()
1487 1488 s = self.dirstate.status(match, subrepos, listignored,
1488 1489 listclean, listunknown)
1489 1490 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1490 1491
1491 1492 # check for any possibly clean files
1492 1493 if parentworking and cmp:
1493 1494 fixup = []
1494 1495 # do a full compare of any files that might have changed
1495 1496 for f in sorted(cmp):
1496 1497 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1497 1498 or ctx1[f].cmp(ctx2[f])):
1498 1499 modified.append(f)
1499 1500 else:
1500 1501 fixup.append(f)
1501 1502
1502 1503 # update dirstate for files that are actually clean
1503 1504 if fixup:
1504 1505 if listclean:
1505 1506 clean += fixup
1506 1507
1507 1508 try:
1508 1509 # updating the dirstate is optional
1509 1510 # so we don't wait on the lock
1510 1511 wlock = self.wlock(False)
1511 1512 try:
1512 1513 for f in fixup:
1513 1514 self.dirstate.normal(f)
1514 1515 finally:
1515 1516 wlock.release()
1516 1517 except error.LockError:
1517 1518 pass
1518 1519
1519 1520 if not parentworking:
1520 1521 mf1 = mfmatches(ctx1)
1521 1522 if working:
1522 1523 # we are comparing working dir against non-parent
1523 1524 # generate a pseudo-manifest for the working dir
1524 1525 mf2 = mfmatches(self['.'])
1525 1526 for f in cmp + modified + added:
1526 1527 mf2[f] = None
1527 1528 mf2.set(f, ctx2.flags(f))
1528 1529 for f in removed:
1529 1530 if f in mf2:
1530 1531 del mf2[f]
1531 1532 else:
1532 1533 # we are comparing two revisions
1533 1534 deleted, unknown, ignored = [], [], []
1534 1535 mf2 = mfmatches(ctx2)
1535 1536
1536 1537 modified, added, clean = [], [], []
1537 1538 withflags = mf1.withflags() | mf2.withflags()
1538 1539 for fn in mf2:
1539 1540 if fn in mf1:
1540 1541 if (fn not in deleted and
1541 1542 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1542 1543 (mf1[fn] != mf2[fn] and
1543 1544 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1544 1545 modified.append(fn)
1545 1546 elif listclean:
1546 1547 clean.append(fn)
1547 1548 del mf1[fn]
1548 1549 elif fn not in deleted:
1549 1550 added.append(fn)
1550 1551 removed = mf1.keys()
1551 1552
1552 1553 if working and modified and not self.dirstate._checklink:
1553 1554 # Symlink placeholders may get non-symlink-like contents
1554 1555 # via user error or dereferencing by NFS or Samba servers,
1555 1556 # so we filter out any placeholders that don't look like a
1556 1557 # symlink
1557 1558 sane = []
1558 1559 for f in modified:
1559 1560 if ctx2.flags(f) == 'l':
1560 1561 d = ctx2[f].data()
1561 1562 if len(d) >= 1024 or '\n' in d or util.binary(d):
1562 1563 self.ui.debug('ignoring suspect symlink placeholder'
1563 1564 ' "%s"\n' % f)
1564 1565 continue
1565 1566 sane.append(f)
1566 1567 modified = sane
1567 1568
1568 1569 r = modified, added, removed, deleted, unknown, ignored, clean
1569 1570
1570 1571 if listsubrepos:
1571 1572 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1572 1573 if working:
1573 1574 rev2 = None
1574 1575 else:
1575 1576 rev2 = ctx2.substate[subpath][1]
1576 1577 try:
1577 1578 submatch = matchmod.narrowmatcher(subpath, match)
1578 1579 s = sub.status(rev2, match=submatch, ignored=listignored,
1579 1580 clean=listclean, unknown=listunknown,
1580 1581 listsubrepos=True)
1581 1582 for rfiles, sfiles in zip(r, s):
1582 1583 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1583 1584 except error.LookupError:
1584 1585 self.ui.status(_("skipping missing subrepository: %s\n")
1585 1586 % subpath)
1586 1587
1587 1588 for l in r:
1588 1589 l.sort()
1589 1590 return r
1590 1591
1591 1592 def heads(self, start=None):
1592 1593 heads = self.changelog.heads(start)
1593 1594 # sort the output in rev descending order
1594 1595 return sorted(heads, key=self.changelog.rev, reverse=True)
1595 1596
1596 1597 def branchheads(self, branch=None, start=None, closed=False):
1597 1598 '''return a (possibly filtered) list of heads for the given branch
1598 1599
1599 1600 Heads are returned in topological order, from newest to oldest.
1600 1601 If branch is None, use the dirstate branch.
1601 1602 If start is not None, return only heads reachable from start.
1602 1603 If closed is True, return heads that are marked as closed as well.
1603 1604 '''
1604 1605 if branch is None:
1605 1606 branch = self[None].branch()
1606 1607 branches = self.branchmap()
1607 1608 if branch not in branches:
1608 1609 return []
1609 1610 # the cache returns heads ordered lowest to highest
1610 1611 bheads = list(reversed(branches[branch]))
1611 1612 if start is not None:
1612 1613 # filter out the heads that cannot be reached from startrev
1613 1614 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1614 1615 bheads = [h for h in bheads if h in fbheads]
1615 1616 if not closed:
1616 1617 bheads = [h for h in bheads if not self[h].closesbranch()]
1617 1618 return bheads
1618 1619
1619 1620 def branches(self, nodes):
1620 1621 if not nodes:
1621 1622 nodes = [self.changelog.tip()]
1622 1623 b = []
1623 1624 for n in nodes:
1624 1625 t = n
1625 1626 while True:
1626 1627 p = self.changelog.parents(n)
1627 1628 if p[1] != nullid or p[0] == nullid:
1628 1629 b.append((t, n, p[0], p[1]))
1629 1630 break
1630 1631 n = p[0]
1631 1632 return b
1632 1633
1633 1634 def between(self, pairs):
1634 1635 r = []
1635 1636
1636 1637 for top, bottom in pairs:
1637 1638 n, l, i = top, [], 0
1638 1639 f = 1
1639 1640
1640 1641 while n != bottom and n != nullid:
1641 1642 p = self.changelog.parents(n)[0]
1642 1643 if i == f:
1643 1644 l.append(n)
1644 1645 f = f * 2
1645 1646 n = p
1646 1647 i += 1
1647 1648
1648 1649 r.append(l)
1649 1650
1650 1651 return r
1651 1652
1652 1653 def pull(self, remote, heads=None, force=False):
1653 1654 # don't open transaction for nothing or you break future useful
1654 1655 # rollback call
1655 1656 tr = None
1656 1657 trname = 'pull\n' + util.hidepassword(remote.url())
1657 1658 lock = self.lock()
1658 1659 try:
1659 1660 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1660 1661 force=force)
1661 1662 common, fetch, rheads = tmp
1662 1663 if not fetch:
1663 1664 self.ui.status(_("no changes found\n"))
1664 1665 added = []
1665 1666 result = 0
1666 1667 else:
1667 1668 tr = self.transaction(trname)
1668 1669 if heads is None and list(common) == [nullid]:
1669 1670 self.ui.status(_("requesting all changes\n"))
1670 1671 elif heads is None and remote.capable('changegroupsubset'):
1671 1672 # issue1320, avoid a race if remote changed after discovery
1672 1673 heads = rheads
1673 1674
1674 1675 if remote.capable('getbundle'):
1675 1676 cg = remote.getbundle('pull', common=common,
1676 1677 heads=heads or rheads)
1677 1678 elif heads is None:
1678 1679 cg = remote.changegroup(fetch, 'pull')
1679 1680 elif not remote.capable('changegroupsubset'):
1680 1681 raise util.Abort(_("partial pull cannot be done because "
1681 1682 "other repository doesn't support "
1682 1683 "changegroupsubset."))
1683 1684 else:
1684 1685 cg = remote.changegroupsubset(fetch, heads, 'pull')
1685 1686 clstart = len(self.changelog)
1686 1687 result = self.addchangegroup(cg, 'pull', remote.url())
1687 1688 clend = len(self.changelog)
1688 1689 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1689 1690
1690 1691 # compute target subset
1691 1692 if heads is None:
1692 1693 # We pulled every thing possible
1693 1694 # sync on everything common
1694 1695 subset = common + added
1695 1696 else:
1696 1697 # We pulled a specific subset
1697 1698 # sync on this subset
1698 1699 subset = heads
1699 1700
1700 1701 # Get remote phases data from remote
1701 1702 remotephases = remote.listkeys('phases')
1702 1703 publishing = bool(remotephases.get('publishing', False))
1703 1704 if remotephases and not publishing:
1704 1705 # remote is new and unpublishing
1705 1706 pheads, _dr = phases.analyzeremotephases(self, subset,
1706 1707 remotephases)
1707 1708 phases.advanceboundary(self, phases.public, pheads)
1708 1709 phases.advanceboundary(self, phases.draft, subset)
1709 1710 else:
1710 1711 # Remote is old or publishing all common changesets
1711 1712 # should be seen as public
1712 1713 phases.advanceboundary(self, phases.public, subset)
1713 1714
1714 1715 if obsolete._enabled:
1715 1716 self.ui.debug('fetching remote obsolete markers\n')
1716 1717 remoteobs = remote.listkeys('obsolete')
1717 1718 if 'dump0' in remoteobs:
1718 1719 if tr is None:
1719 1720 tr = self.transaction(trname)
1720 1721 for key in sorted(remoteobs, reverse=True):
1721 1722 if key.startswith('dump'):
1722 1723 data = base85.b85decode(remoteobs[key])
1723 1724 self.obsstore.mergemarkers(tr, data)
1724 1725 self.invalidatevolatilesets()
1725 1726 if tr is not None:
1726 1727 tr.close()
1727 1728 finally:
1728 1729 if tr is not None:
1729 1730 tr.release()
1730 1731 lock.release()
1731 1732
1732 1733 return result
1733 1734
1734 1735 def checkpush(self, force, revs):
1735 1736 """Extensions can override this function if additional checks have
1736 1737 to be performed before pushing, or call it if they override push
1737 1738 command.
1738 1739 """
1739 1740 pass
1740 1741
1741 1742 def push(self, remote, force=False, revs=None, newbranch=False):
1742 1743 '''Push outgoing changesets (limited by revs) from the current
1743 1744 repository to remote. Return an integer:
1744 1745 - None means nothing to push
1745 1746 - 0 means HTTP error
1746 1747 - 1 means we pushed and remote head count is unchanged *or*
1747 1748 we have outgoing changesets but refused to push
1748 1749 - other values as described by addchangegroup()
1749 1750 '''
1750 1751 # there are two ways to push to remote repo:
1751 1752 #
1752 1753 # addchangegroup assumes local user can lock remote
1753 1754 # repo (local filesystem, old ssh servers).
1754 1755 #
1755 1756 # unbundle assumes local user cannot lock remote repo (new ssh
1756 1757 # servers, http servers).
1757 1758
1758 1759 if not remote.canpush():
1759 1760 raise util.Abort(_("destination does not support push"))
1760 1761 unfi = self.unfiltered()
1761 1762 # get local lock as we might write phase data
1762 1763 locallock = self.lock()
1763 1764 try:
1764 1765 self.checkpush(force, revs)
1765 1766 lock = None
1766 1767 unbundle = remote.capable('unbundle')
1767 1768 if not unbundle:
1768 1769 lock = remote.lock()
1769 1770 try:
1770 1771 # discovery
1771 1772 fci = discovery.findcommonincoming
1772 1773 commoninc = fci(unfi, remote, force=force)
1773 1774 common, inc, remoteheads = commoninc
1774 1775 fco = discovery.findcommonoutgoing
1775 1776 outgoing = fco(unfi, remote, onlyheads=revs,
1776 1777 commoninc=commoninc, force=force)
1777 1778
1778 1779
1779 1780 if not outgoing.missing:
1780 1781 # nothing to push
1781 1782 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1782 1783 ret = None
1783 1784 else:
1784 1785 # something to push
1785 1786 if not force:
1786 1787 # if self.obsstore == False --> no obsolete
1787 1788 # then, save the iteration
1788 1789 if unfi.obsstore:
1789 1790 # this message are here for 80 char limit reason
1790 1791 mso = _("push includes obsolete changeset: %s!")
1791 1792 mst = "push includes %s changeset: %s!"
1792 1793 # plain versions for i18n tool to detect them
1793 1794 _("push includes unstable changeset: %s!")
1794 1795 _("push includes bumped changeset: %s!")
1795 1796 _("push includes divergent changeset: %s!")
1796 1797 # If we are to push if there is at least one
1797 1798 # obsolete or unstable changeset in missing, at
1798 1799 # least one of the missinghead will be obsolete or
1799 1800 # unstable. So checking heads only is ok
1800 1801 for node in outgoing.missingheads:
1801 1802 ctx = unfi[node]
1802 1803 if ctx.obsolete():
1803 1804 raise util.Abort(mso % ctx)
1804 1805 elif ctx.troubled():
1805 1806 raise util.Abort(_(mst)
1806 1807 % (ctx.troubles()[0],
1807 1808 ctx))
1808 1809 discovery.checkheads(unfi, remote, outgoing,
1809 1810 remoteheads, newbranch,
1810 1811 bool(inc))
1811 1812
1812 1813 # create a changegroup from local
1813 1814 if revs is None and not outgoing.excluded:
1814 1815 # push everything,
1815 1816 # use the fast path, no race possible on push
1816 1817 cg = self._changegroup(outgoing.missing, 'push')
1817 1818 else:
1818 1819 cg = self.getlocalbundle('push', outgoing)
1819 1820
1820 1821 # apply changegroup to remote
1821 1822 if unbundle:
1822 1823 # local repo finds heads on server, finds out what
1823 1824 # revs it must push. once revs transferred, if server
1824 1825 # finds it has different heads (someone else won
1825 1826 # commit/push race), server aborts.
1826 1827 if force:
1827 1828 remoteheads = ['force']
1828 1829 # ssh: return remote's addchangegroup()
1829 1830 # http: return remote's addchangegroup() or 0 for error
1830 1831 ret = remote.unbundle(cg, remoteheads, 'push')
1831 1832 else:
1832 1833 # we return an integer indicating remote head count
1833 1834 # change
1834 1835 ret = remote.addchangegroup(cg, 'push', self.url())
1835 1836
1836 1837 if ret:
1837 1838 # push succeed, synchronize target of the push
1838 1839 cheads = outgoing.missingheads
1839 1840 elif revs is None:
1840 1841 # All out push fails. synchronize all common
1841 1842 cheads = outgoing.commonheads
1842 1843 else:
1843 1844 # I want cheads = heads(::missingheads and ::commonheads)
1844 1845 # (missingheads is revs with secret changeset filtered out)
1845 1846 #
1846 1847 # This can be expressed as:
1847 1848 # cheads = ( (missingheads and ::commonheads)
1848 1849 # + (commonheads and ::missingheads))"
1849 1850 # )
1850 1851 #
1851 1852 # while trying to push we already computed the following:
1852 1853 # common = (::commonheads)
1853 1854 # missing = ((commonheads::missingheads) - commonheads)
1854 1855 #
1855 1856 # We can pick:
1856 1857 # * missingheads part of common (::commonheads)
1857 1858 common = set(outgoing.common)
1858 1859 cheads = [node for node in revs if node in common]
1859 1860 # and
1860 1861 # * commonheads parents on missing
1861 1862 revset = unfi.set('%ln and parents(roots(%ln))',
1862 1863 outgoing.commonheads,
1863 1864 outgoing.missing)
1864 1865 cheads.extend(c.node() for c in revset)
1865 1866 # even when we don't push, exchanging phase data is useful
1866 1867 remotephases = remote.listkeys('phases')
1867 1868 if not remotephases: # old server or public only repo
1868 1869 phases.advanceboundary(self, phases.public, cheads)
1869 1870 # don't push any phase data as there is nothing to push
1870 1871 else:
1871 1872 ana = phases.analyzeremotephases(self, cheads, remotephases)
1872 1873 pheads, droots = ana
1873 1874 ### Apply remote phase on local
1874 1875 if remotephases.get('publishing', False):
1875 1876 phases.advanceboundary(self, phases.public, cheads)
1876 1877 else: # publish = False
1877 1878 phases.advanceboundary(self, phases.public, pheads)
1878 1879 phases.advanceboundary(self, phases.draft, cheads)
1879 1880 ### Apply local phase on remote
1880 1881
1881 1882 # Get the list of all revs draft on remote by public here.
1882 1883 # XXX Beware that revset break if droots is not strictly
1883 1884 # XXX root we may want to ensure it is but it is costly
1884 1885 outdated = unfi.set('heads((%ln::%ln) and public())',
1885 1886 droots, cheads)
1886 1887 for newremotehead in outdated:
1887 1888 r = remote.pushkey('phases',
1888 1889 newremotehead.hex(),
1889 1890 str(phases.draft),
1890 1891 str(phases.public))
1891 1892 if not r:
1892 1893 self.ui.warn(_('updating %s to public failed!\n')
1893 1894 % newremotehead)
1894 1895 self.ui.debug('try to push obsolete markers to remote\n')
1895 1896 if (obsolete._enabled and self.obsstore and
1896 1897 'obsolete' in remote.listkeys('namespaces')):
1897 1898 rslts = []
1898 1899 remotedata = self.listkeys('obsolete')
1899 1900 for key in sorted(remotedata, reverse=True):
1900 1901 # reverse sort to ensure we end with dump0
1901 1902 data = remotedata[key]
1902 1903 rslts.append(remote.pushkey('obsolete', key, '', data))
1903 1904 if [r for r in rslts if not r]:
1904 1905 msg = _('failed to push some obsolete markers!\n')
1905 1906 self.ui.warn(msg)
1906 1907 finally:
1907 1908 if lock is not None:
1908 1909 lock.release()
1909 1910 finally:
1910 1911 locallock.release()
1911 1912
1912 1913 self.ui.debug("checking for updated bookmarks\n")
1913 1914 rb = remote.listkeys('bookmarks')
1914 1915 for k in rb.keys():
1915 1916 if k in unfi._bookmarks:
1916 1917 nr, nl = rb[k], hex(self._bookmarks[k])
1917 1918 if nr in unfi:
1918 1919 cr = unfi[nr]
1919 1920 cl = unfi[nl]
1920 1921 if bookmarks.validdest(unfi, cr, cl):
1921 1922 r = remote.pushkey('bookmarks', k, nr, nl)
1922 1923 if r:
1923 1924 self.ui.status(_("updating bookmark %s\n") % k)
1924 1925 else:
1925 1926 self.ui.warn(_('updating bookmark %s'
1926 1927 ' failed!\n') % k)
1927 1928
1928 1929 return ret
1929 1930
1930 1931 def changegroupinfo(self, nodes, source):
1931 1932 if self.ui.verbose or source == 'bundle':
1932 1933 self.ui.status(_("%d changesets found\n") % len(nodes))
1933 1934 if self.ui.debugflag:
1934 1935 self.ui.debug("list of changesets:\n")
1935 1936 for node in nodes:
1936 1937 self.ui.debug("%s\n" % hex(node))
1937 1938
1938 1939 def changegroupsubset(self, bases, heads, source):
1939 1940 """Compute a changegroup consisting of all the nodes that are
1940 1941 descendants of any of the bases and ancestors of any of the heads.
1941 1942 Return a chunkbuffer object whose read() method will return
1942 1943 successive changegroup chunks.
1943 1944
1944 1945 It is fairly complex as determining which filenodes and which
1945 1946 manifest nodes need to be included for the changeset to be complete
1946 1947 is non-trivial.
1947 1948
1948 1949 Another wrinkle is doing the reverse, figuring out which changeset in
1949 1950 the changegroup a particular filenode or manifestnode belongs to.
1950 1951 """
1951 1952 cl = self.changelog
1952 1953 if not bases:
1953 1954 bases = [nullid]
1954 1955 csets, bases, heads = cl.nodesbetween(bases, heads)
1955 1956 # We assume that all ancestors of bases are known
1956 1957 common = cl.ancestors([cl.rev(n) for n in bases])
1957 1958 return self._changegroupsubset(common, csets, heads, source)
1958 1959
1959 1960 def getlocalbundle(self, source, outgoing):
1960 1961 """Like getbundle, but taking a discovery.outgoing as an argument.
1961 1962
1962 1963 This is only implemented for local repos and reuses potentially
1963 1964 precomputed sets in outgoing."""
1964 1965 if not outgoing.missing:
1965 1966 return None
1966 1967 return self._changegroupsubset(outgoing.common,
1967 1968 outgoing.missing,
1968 1969 outgoing.missingheads,
1969 1970 source)
1970 1971
1971 1972 def getbundle(self, source, heads=None, common=None):
1972 1973 """Like changegroupsubset, but returns the set difference between the
1973 1974 ancestors of heads and the ancestors common.
1974 1975
1975 1976 If heads is None, use the local heads. If common is None, use [nullid].
1976 1977
1977 1978 The nodes in common might not all be known locally due to the way the
1978 1979 current discovery protocol works.
1979 1980 """
1980 1981 cl = self.changelog
1981 1982 if common:
1982 1983 hasnode = cl.hasnode
1983 1984 common = [n for n in common if hasnode(n)]
1984 1985 else:
1985 1986 common = [nullid]
1986 1987 if not heads:
1987 1988 heads = cl.heads()
1988 1989 return self.getlocalbundle(source,
1989 1990 discovery.outgoing(cl, common, heads))
1990 1991
1991 1992 @unfilteredmethod
1992 1993 def _changegroupsubset(self, commonrevs, csets, heads, source):
1993 1994
1994 1995 cl = self.changelog
1995 1996 mf = self.manifest
1996 1997 mfs = {} # needed manifests
1997 1998 fnodes = {} # needed file nodes
1998 1999 changedfiles = set()
1999 2000 fstate = ['', {}]
2000 2001 count = [0, 0]
2001 2002
2002 2003 # can we go through the fast path ?
2003 2004 heads.sort()
2004 2005 if heads == sorted(self.heads()):
2005 2006 return self._changegroup(csets, source)
2006 2007
2007 2008 # slow path
2008 2009 self.hook('preoutgoing', throw=True, source=source)
2009 2010 self.changegroupinfo(csets, source)
2010 2011
2011 2012 # filter any nodes that claim to be part of the known set
2012 2013 def prune(revlog, missing):
2013 2014 rr, rl = revlog.rev, revlog.linkrev
2014 2015 return [n for n in missing
2015 2016 if rl(rr(n)) not in commonrevs]
2016 2017
2017 2018 progress = self.ui.progress
2018 2019 _bundling = _('bundling')
2019 2020 _changesets = _('changesets')
2020 2021 _manifests = _('manifests')
2021 2022 _files = _('files')
2022 2023
2023 2024 def lookup(revlog, x):
2024 2025 if revlog == cl:
2025 2026 c = cl.read(x)
2026 2027 changedfiles.update(c[3])
2027 2028 mfs.setdefault(c[0], x)
2028 2029 count[0] += 1
2029 2030 progress(_bundling, count[0],
2030 2031 unit=_changesets, total=count[1])
2031 2032 return x
2032 2033 elif revlog == mf:
2033 2034 clnode = mfs[x]
2034 2035 mdata = mf.readfast(x)
2035 2036 for f, n in mdata.iteritems():
2036 2037 if f in changedfiles:
2037 2038 fnodes[f].setdefault(n, clnode)
2038 2039 count[0] += 1
2039 2040 progress(_bundling, count[0],
2040 2041 unit=_manifests, total=count[1])
2041 2042 return clnode
2042 2043 else:
2043 2044 progress(_bundling, count[0], item=fstate[0],
2044 2045 unit=_files, total=count[1])
2045 2046 return fstate[1][x]
2046 2047
2047 2048 bundler = changegroup.bundle10(lookup)
2048 2049 reorder = self.ui.config('bundle', 'reorder', 'auto')
2049 2050 if reorder == 'auto':
2050 2051 reorder = None
2051 2052 else:
2052 2053 reorder = util.parsebool(reorder)
2053 2054
2054 2055 def gengroup():
2055 2056 # Create a changenode group generator that will call our functions
2056 2057 # back to lookup the owning changenode and collect information.
2057 2058 count[:] = [0, len(csets)]
2058 2059 for chunk in cl.group(csets, bundler, reorder=reorder):
2059 2060 yield chunk
2060 2061 progress(_bundling, None)
2061 2062
2062 2063 # Create a generator for the manifestnodes that calls our lookup
2063 2064 # and data collection functions back.
2064 2065 for f in changedfiles:
2065 2066 fnodes[f] = {}
2066 2067 count[:] = [0, len(mfs)]
2067 2068 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2068 2069 yield chunk
2069 2070 progress(_bundling, None)
2070 2071
2071 2072 mfs.clear()
2072 2073
2073 2074 # Go through all our files in order sorted by name.
2074 2075 count[:] = [0, len(changedfiles)]
2075 2076 for fname in sorted(changedfiles):
2076 2077 filerevlog = self.file(fname)
2077 2078 if not len(filerevlog):
2078 2079 raise util.Abort(_("empty or missing revlog for %s")
2079 2080 % fname)
2080 2081 fstate[0] = fname
2081 2082 fstate[1] = fnodes.pop(fname, {})
2082 2083
2083 2084 nodelist = prune(filerevlog, fstate[1])
2084 2085 if nodelist:
2085 2086 count[0] += 1
2086 2087 yield bundler.fileheader(fname)
2087 2088 for chunk in filerevlog.group(nodelist, bundler, reorder):
2088 2089 yield chunk
2089 2090
2090 2091 # Signal that no more groups are left.
2091 2092 yield bundler.close()
2092 2093 progress(_bundling, None)
2093 2094
2094 2095 if csets:
2095 2096 self.hook('outgoing', node=hex(csets[0]), source=source)
2096 2097
2097 2098 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2098 2099
2099 2100 def changegroup(self, basenodes, source):
2100 2101 # to avoid a race we use changegroupsubset() (issue1320)
2101 2102 return self.changegroupsubset(basenodes, self.heads(), source)
2102 2103
2103 2104 @unfilteredmethod
2104 2105 def _changegroup(self, nodes, source):
2105 2106 """Compute the changegroup of all nodes that we have that a recipient
2106 2107 doesn't. Return a chunkbuffer object whose read() method will return
2107 2108 successive changegroup chunks.
2108 2109
2109 2110 This is much easier than the previous function as we can assume that
2110 2111 the recipient has any changenode we aren't sending them.
2111 2112
2112 2113 nodes is the set of nodes to send"""
2113 2114
2114 2115 cl = self.changelog
2115 2116 mf = self.manifest
2116 2117 mfs = {}
2117 2118 changedfiles = set()
2118 2119 fstate = ['']
2119 2120 count = [0, 0]
2120 2121
2121 2122 self.hook('preoutgoing', throw=True, source=source)
2122 2123 self.changegroupinfo(nodes, source)
2123 2124
2124 2125 revset = set([cl.rev(n) for n in nodes])
2125 2126
2126 2127 def gennodelst(log):
2127 2128 ln, llr = log.node, log.linkrev
2128 2129 return [ln(r) for r in log if llr(r) in revset]
2129 2130
2130 2131 progress = self.ui.progress
2131 2132 _bundling = _('bundling')
2132 2133 _changesets = _('changesets')
2133 2134 _manifests = _('manifests')
2134 2135 _files = _('files')
2135 2136
2136 2137 def lookup(revlog, x):
2137 2138 if revlog == cl:
2138 2139 c = cl.read(x)
2139 2140 changedfiles.update(c[3])
2140 2141 mfs.setdefault(c[0], x)
2141 2142 count[0] += 1
2142 2143 progress(_bundling, count[0],
2143 2144 unit=_changesets, total=count[1])
2144 2145 return x
2145 2146 elif revlog == mf:
2146 2147 count[0] += 1
2147 2148 progress(_bundling, count[0],
2148 2149 unit=_manifests, total=count[1])
2149 2150 return cl.node(revlog.linkrev(revlog.rev(x)))
2150 2151 else:
2151 2152 progress(_bundling, count[0], item=fstate[0],
2152 2153 total=count[1], unit=_files)
2153 2154 return cl.node(revlog.linkrev(revlog.rev(x)))
2154 2155
2155 2156 bundler = changegroup.bundle10(lookup)
2156 2157 reorder = self.ui.config('bundle', 'reorder', 'auto')
2157 2158 if reorder == 'auto':
2158 2159 reorder = None
2159 2160 else:
2160 2161 reorder = util.parsebool(reorder)
2161 2162
2162 2163 def gengroup():
2163 2164 '''yield a sequence of changegroup chunks (strings)'''
2164 2165 # construct a list of all changed files
2165 2166
2166 2167 count[:] = [0, len(nodes)]
2167 2168 for chunk in cl.group(nodes, bundler, reorder=reorder):
2168 2169 yield chunk
2169 2170 progress(_bundling, None)
2170 2171
2171 2172 count[:] = [0, len(mfs)]
2172 2173 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2173 2174 yield chunk
2174 2175 progress(_bundling, None)
2175 2176
2176 2177 count[:] = [0, len(changedfiles)]
2177 2178 for fname in sorted(changedfiles):
2178 2179 filerevlog = self.file(fname)
2179 2180 if not len(filerevlog):
2180 2181 raise util.Abort(_("empty or missing revlog for %s")
2181 2182 % fname)
2182 2183 fstate[0] = fname
2183 2184 nodelist = gennodelst(filerevlog)
2184 2185 if nodelist:
2185 2186 count[0] += 1
2186 2187 yield bundler.fileheader(fname)
2187 2188 for chunk in filerevlog.group(nodelist, bundler, reorder):
2188 2189 yield chunk
2189 2190 yield bundler.close()
2190 2191 progress(_bundling, None)
2191 2192
2192 2193 if nodes:
2193 2194 self.hook('outgoing', node=hex(nodes[0]), source=source)
2194 2195
2195 2196 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2196 2197
2197 2198 @unfilteredmethod
2198 2199 def addchangegroup(self, source, srctype, url, emptyok=False):
2199 2200 """Add the changegroup returned by source.read() to this repo.
2200 2201 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2201 2202 the URL of the repo where this changegroup is coming from.
2202 2203
2203 2204 Return an integer summarizing the change to this repo:
2204 2205 - nothing changed or no source: 0
2205 2206 - more heads than before: 1+added heads (2..n)
2206 2207 - fewer heads than before: -1-removed heads (-2..-n)
2207 2208 - number of heads stays the same: 1
2208 2209 """
2209 2210 def csmap(x):
2210 2211 self.ui.debug("add changeset %s\n" % short(x))
2211 2212 return len(cl)
2212 2213
2213 2214 def revmap(x):
2214 2215 return cl.rev(x)
2215 2216
2216 2217 if not source:
2217 2218 return 0
2218 2219
2219 2220 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2220 2221
2221 2222 changesets = files = revisions = 0
2222 2223 efiles = set()
2223 2224
2224 2225 # write changelog data to temp files so concurrent readers will not see
2225 2226 # inconsistent view
2226 2227 cl = self.changelog
2227 2228 cl.delayupdate()
2228 2229 oldheads = cl.heads()
2229 2230
2230 2231 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2231 2232 try:
2232 2233 trp = weakref.proxy(tr)
2233 2234 # pull off the changeset group
2234 2235 self.ui.status(_("adding changesets\n"))
2235 2236 clstart = len(cl)
2236 2237 class prog(object):
2237 2238 step = _('changesets')
2238 2239 count = 1
2239 2240 ui = self.ui
2240 2241 total = None
2241 2242 def __call__(self):
2242 2243 self.ui.progress(self.step, self.count, unit=_('chunks'),
2243 2244 total=self.total)
2244 2245 self.count += 1
2245 2246 pr = prog()
2246 2247 source.callback = pr
2247 2248
2248 2249 source.changelogheader()
2249 2250 srccontent = cl.addgroup(source, csmap, trp)
2250 2251 if not (srccontent or emptyok):
2251 2252 raise util.Abort(_("received changelog group is empty"))
2252 2253 clend = len(cl)
2253 2254 changesets = clend - clstart
2254 2255 for c in xrange(clstart, clend):
2255 2256 efiles.update(self[c].files())
2256 2257 efiles = len(efiles)
2257 2258 self.ui.progress(_('changesets'), None)
2258 2259
2259 2260 # pull off the manifest group
2260 2261 self.ui.status(_("adding manifests\n"))
2261 2262 pr.step = _('manifests')
2262 2263 pr.count = 1
2263 2264 pr.total = changesets # manifests <= changesets
2264 2265 # no need to check for empty manifest group here:
2265 2266 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2266 2267 # no new manifest will be created and the manifest group will
2267 2268 # be empty during the pull
2268 2269 source.manifestheader()
2269 2270 self.manifest.addgroup(source, revmap, trp)
2270 2271 self.ui.progress(_('manifests'), None)
2271 2272
2272 2273 needfiles = {}
2273 2274 if self.ui.configbool('server', 'validate', default=False):
2274 2275 # validate incoming csets have their manifests
2275 2276 for cset in xrange(clstart, clend):
2276 2277 mfest = self.changelog.read(self.changelog.node(cset))[0]
2277 2278 mfest = self.manifest.readdelta(mfest)
2278 2279 # store file nodes we must see
2279 2280 for f, n in mfest.iteritems():
2280 2281 needfiles.setdefault(f, set()).add(n)
2281 2282
2282 2283 # process the files
2283 2284 self.ui.status(_("adding file changes\n"))
2284 2285 pr.step = _('files')
2285 2286 pr.count = 1
2286 2287 pr.total = efiles
2287 2288 source.callback = None
2288 2289
2289 2290 while True:
2290 2291 chunkdata = source.filelogheader()
2291 2292 if not chunkdata:
2292 2293 break
2293 2294 f = chunkdata["filename"]
2294 2295 self.ui.debug("adding %s revisions\n" % f)
2295 2296 pr()
2296 2297 fl = self.file(f)
2297 2298 o = len(fl)
2298 2299 if not fl.addgroup(source, revmap, trp):
2299 2300 raise util.Abort(_("received file revlog group is empty"))
2300 2301 revisions += len(fl) - o
2301 2302 files += 1
2302 2303 if f in needfiles:
2303 2304 needs = needfiles[f]
2304 2305 for new in xrange(o, len(fl)):
2305 2306 n = fl.node(new)
2306 2307 if n in needs:
2307 2308 needs.remove(n)
2308 2309 if not needs:
2309 2310 del needfiles[f]
2310 2311 self.ui.progress(_('files'), None)
2311 2312
2312 2313 for f, needs in needfiles.iteritems():
2313 2314 fl = self.file(f)
2314 2315 for n in needs:
2315 2316 try:
2316 2317 fl.rev(n)
2317 2318 except error.LookupError:
2318 2319 raise util.Abort(
2319 2320 _('missing file data for %s:%s - run hg verify') %
2320 2321 (f, hex(n)))
2321 2322
2322 2323 dh = 0
2323 2324 if oldheads:
2324 2325 heads = cl.heads()
2325 2326 dh = len(heads) - len(oldheads)
2326 2327 for h in heads:
2327 2328 if h not in oldheads and self[h].closesbranch():
2328 2329 dh -= 1
2329 2330 htext = ""
2330 2331 if dh:
2331 2332 htext = _(" (%+d heads)") % dh
2332 2333
2333 2334 self.ui.status(_("added %d changesets"
2334 2335 " with %d changes to %d files%s\n")
2335 2336 % (changesets, revisions, files, htext))
2336 2337 self.invalidatevolatilesets()
2337 2338
2338 2339 if changesets > 0:
2339 2340 p = lambda: cl.writepending() and self.root or ""
2340 2341 self.hook('pretxnchangegroup', throw=True,
2341 2342 node=hex(cl.node(clstart)), source=srctype,
2342 2343 url=url, pending=p)
2343 2344
2344 2345 added = [cl.node(r) for r in xrange(clstart, clend)]
2345 2346 publishing = self.ui.configbool('phases', 'publish', True)
2346 2347 if srctype == 'push':
2347 2348 # Old server can not push the boundary themself.
2348 2349 # New server won't push the boundary if changeset already
2349 2350 # existed locally as secrete
2350 2351 #
2351 2352 # We should not use added here but the list of all change in
2352 2353 # the bundle
2353 2354 if publishing:
2354 2355 phases.advanceboundary(self, phases.public, srccontent)
2355 2356 else:
2356 2357 phases.advanceboundary(self, phases.draft, srccontent)
2357 2358 phases.retractboundary(self, phases.draft, added)
2358 2359 elif srctype != 'strip':
2359 2360 # publishing only alter behavior during push
2360 2361 #
2361 2362 # strip should not touch boundary at all
2362 2363 phases.retractboundary(self, phases.draft, added)
2363 2364
2364 2365 # make changelog see real files again
2365 2366 cl.finalize(trp)
2366 2367
2367 2368 tr.close()
2368 2369
2369 2370 if changesets > 0:
2370 2371 if srctype != 'strip':
2371 2372 # During strip, branchcache is invalid but coming call to
2372 2373 # `destroyed` will repair it.
2373 2374 # In other case we can safely update cache on disk.
2374 2375 branchmap.updatecache(self)
2375 2376 def runhooks():
2376 2377 # forcefully update the on-disk branch cache
2377 2378 self.ui.debug("updating the branch cache\n")
2378 2379 self.hook("changegroup", node=hex(cl.node(clstart)),
2379 2380 source=srctype, url=url)
2380 2381
2381 2382 for n in added:
2382 2383 self.hook("incoming", node=hex(n), source=srctype,
2383 2384 url=url)
2384 2385 self._afterlock(runhooks)
2385 2386
2386 2387 finally:
2387 2388 tr.release()
2388 2389 # never return 0 here:
2389 2390 if dh < 0:
2390 2391 return dh - 1
2391 2392 else:
2392 2393 return dh + 1
2393 2394
2394 2395 def stream_in(self, remote, requirements):
2395 2396 lock = self.lock()
2396 2397 try:
2397 2398 # Save remote branchmap. We will use it later
2398 2399 # to speed up branchcache creation
2399 2400 rbranchmap = None
2400 2401 if remote.capable("branchmap"):
2401 2402 rbranchmap = remote.branchmap()
2402 2403
2403 2404 fp = remote.stream_out()
2404 2405 l = fp.readline()
2405 2406 try:
2406 2407 resp = int(l)
2407 2408 except ValueError:
2408 2409 raise error.ResponseError(
2409 2410 _('unexpected response from remote server:'), l)
2410 2411 if resp == 1:
2411 2412 raise util.Abort(_('operation forbidden by server'))
2412 2413 elif resp == 2:
2413 2414 raise util.Abort(_('locking the remote repository failed'))
2414 2415 elif resp != 0:
2415 2416 raise util.Abort(_('the server sent an unknown error code'))
2416 2417 self.ui.status(_('streaming all changes\n'))
2417 2418 l = fp.readline()
2418 2419 try:
2419 2420 total_files, total_bytes = map(int, l.split(' ', 1))
2420 2421 except (ValueError, TypeError):
2421 2422 raise error.ResponseError(
2422 2423 _('unexpected response from remote server:'), l)
2423 2424 self.ui.status(_('%d files to transfer, %s of data\n') %
2424 2425 (total_files, util.bytecount(total_bytes)))
2425 2426 handled_bytes = 0
2426 2427 self.ui.progress(_('clone'), 0, total=total_bytes)
2427 2428 start = time.time()
2428 2429 for i in xrange(total_files):
2429 2430 # XXX doesn't support '\n' or '\r' in filenames
2430 2431 l = fp.readline()
2431 2432 try:
2432 2433 name, size = l.split('\0', 1)
2433 2434 size = int(size)
2434 2435 except (ValueError, TypeError):
2435 2436 raise error.ResponseError(
2436 2437 _('unexpected response from remote server:'), l)
2437 2438 if self.ui.debugflag:
2438 2439 self.ui.debug('adding %s (%s)\n' %
2439 2440 (name, util.bytecount(size)))
2440 2441 # for backwards compat, name was partially encoded
2441 2442 ofp = self.sopener(store.decodedir(name), 'w')
2442 2443 for chunk in util.filechunkiter(fp, limit=size):
2443 2444 handled_bytes += len(chunk)
2444 2445 self.ui.progress(_('clone'), handled_bytes,
2445 2446 total=total_bytes)
2446 2447 ofp.write(chunk)
2447 2448 ofp.close()
2448 2449 elapsed = time.time() - start
2449 2450 if elapsed <= 0:
2450 2451 elapsed = 0.001
2451 2452 self.ui.progress(_('clone'), None)
2452 2453 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2453 2454 (util.bytecount(total_bytes), elapsed,
2454 2455 util.bytecount(total_bytes / elapsed)))
2455 2456
2456 2457 # new requirements = old non-format requirements +
2457 2458 # new format-related
2458 2459 # requirements from the streamed-in repository
2459 2460 requirements.update(set(self.requirements) - self.supportedformats)
2460 2461 self._applyrequirements(requirements)
2461 2462 self._writerequirements()
2462 2463
2463 2464 if rbranchmap:
2464 2465 rbheads = []
2465 2466 for bheads in rbranchmap.itervalues():
2466 2467 rbheads.extend(bheads)
2467 2468
2468 2469 if rbheads:
2469 2470 rtiprev = max((int(self.changelog.rev(node))
2470 2471 for node in rbheads))
2471 2472 cache = branchmap.branchcache(rbranchmap,
2472 2473 self[rtiprev].node(),
2473 2474 rtiprev)
2474 2475 self._branchcaches[None] = cache
2475 2476 cache.write(self.unfiltered())
2476 2477 self.invalidate()
2477 2478 return len(self.heads()) + 1
2478 2479 finally:
2479 2480 lock.release()
2480 2481
2481 2482 def clone(self, remote, heads=[], stream=False):
2482 2483 '''clone remote repository.
2483 2484
2484 2485 keyword arguments:
2485 2486 heads: list of revs to clone (forces use of pull)
2486 2487 stream: use streaming clone if possible'''
2487 2488
2488 2489 # now, all clients that can request uncompressed clones can
2489 2490 # read repo formats supported by all servers that can serve
2490 2491 # them.
2491 2492
2492 2493 # if revlog format changes, client will have to check version
2493 2494 # and format flags on "stream" capability, and use
2494 2495 # uncompressed only if compatible.
2495 2496
2496 2497 if not stream:
2497 2498 # if the server explicitly prefers to stream (for fast LANs)
2498 2499 stream = remote.capable('stream-preferred')
2499 2500
2500 2501 if stream and not heads:
2501 2502 # 'stream' means remote revlog format is revlogv1 only
2502 2503 if remote.capable('stream'):
2503 2504 return self.stream_in(remote, set(('revlogv1',)))
2504 2505 # otherwise, 'streamreqs' contains the remote revlog format
2505 2506 streamreqs = remote.capable('streamreqs')
2506 2507 if streamreqs:
2507 2508 streamreqs = set(streamreqs.split(','))
2508 2509 # if we support it, stream in and adjust our requirements
2509 2510 if not streamreqs - self.supportedformats:
2510 2511 return self.stream_in(remote, streamreqs)
2511 2512 return self.pull(remote, heads)
2512 2513
2513 2514 def pushkey(self, namespace, key, old, new):
2514 2515 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2515 2516 old=old, new=new)
2516 2517 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2517 2518 ret = pushkey.push(self, namespace, key, old, new)
2518 2519 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2519 2520 ret=ret)
2520 2521 return ret
2521 2522
2522 2523 def listkeys(self, namespace):
2523 2524 self.hook('prelistkeys', throw=True, namespace=namespace)
2524 2525 self.ui.debug('listing keys for "%s"\n' % namespace)
2525 2526 values = pushkey.list(self, namespace)
2526 2527 self.hook('listkeys', namespace=namespace, values=values)
2527 2528 return values
2528 2529
2529 2530 def debugwireargs(self, one, two, three=None, four=None, five=None):
2530 2531 '''used to test argument passing over the wire'''
2531 2532 return "%s %s %s %s %s" % (one, two, three, four, five)
2532 2533
2533 2534 def savecommitmessage(self, text):
2534 2535 fp = self.opener('last-message.txt', 'wb')
2535 2536 try:
2536 2537 fp.write(text)
2537 2538 finally:
2538 2539 fp.close()
2539 2540 return self.pathto(fp.name[len(self.root) + 1:])
2540 2541
2541 2542 # used to avoid circular references so destructors work
2542 2543 def aftertrans(files):
2543 2544 renamefiles = [tuple(t) for t in files]
2544 2545 def a():
2545 2546 for src, dest in renamefiles:
2546 2547 try:
2547 2548 util.rename(src, dest)
2548 2549 except OSError: # journal file does not yet exist
2549 2550 pass
2550 2551 return a
2551 2552
2552 2553 def undoname(fn):
2553 2554 base, name = os.path.split(fn)
2554 2555 assert name.startswith('journal')
2555 2556 return os.path.join(base, name.replace('journal', 'undo', 1))
2556 2557
2557 2558 def instance(ui, path, create):
2558 2559 return localrepository(ui, util.urllocalpath(path), create)
2559 2560
2560 2561 def islocal(path):
2561 2562 return True
General Comments 0
You need to be logged in to leave comments. Login now