##// END OF EJS Templates
discovery: fix regression when checking heads for pre 1.4 client (issue3218)...
Pierre-Yves David -
r15986:ba959f6e stable
parent child Browse files
Show More
@@ -1,238 +1,238 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases
11 11
12 12 def findcommonincoming(repo, remote, heads=None, force=False):
13 13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 14 subset of nodes between repo and remote.
15 15
16 16 "common" is a list of (at least) the heads of the common subset.
17 17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 18 locally. If remote does not support getbundle, this actually is a list of
19 19 roots of the nodes that would be incoming, to be supplied to
20 20 changegroupsubset. No code except for pull should be relying on this fact
21 21 any longer.
22 22 "heads" is either the supplied heads, or else the remote's heads.
23 23
24 24 If you pass heads and they are all known locally, the reponse lists justs
25 25 these heads in "common" and in "heads".
26 26
27 27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 28 extensions a good hook into outgoing.
29 29 """
30 30
31 31 if not remote.capable('getbundle'):
32 32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33 33
34 34 if heads:
35 35 allknown = True
36 36 nm = repo.changelog.nodemap
37 37 for h in heads:
38 38 if nm.get(h) is None:
39 39 allknown = False
40 40 break
41 41 if allknown:
42 42 return (heads, False, heads)
43 43
44 44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 45 abortwhenunrelated=not force)
46 46 common, anyinc, srvheads = res
47 47 return (list(common), anyinc, heads or list(srvheads))
48 48
49 49 class outgoing(object):
50 50 '''Represents the set of nodes present in a local repo but not in a
51 51 (possibly) remote one.
52 52
53 53 Members:
54 54
55 55 missing is a list of all nodes present in local but not in remote.
56 56 common is a list of all nodes shared between the two repos.
57 57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 58 missingheads is the list of heads of missing.
59 59 commonheads is the list of heads of common.
60 60
61 61 The sets are computed on demand from the heads, unless provided upfront
62 62 by discovery.'''
63 63
64 64 def __init__(self, revlog, commonheads, missingheads):
65 65 self.commonheads = commonheads
66 66 self.missingheads = missingheads
67 67 self._revlog = revlog
68 68 self._common = None
69 69 self._missing = None
70 70 self.excluded = []
71 71
72 72 def _computecommonmissing(self):
73 73 sets = self._revlog.findcommonmissing(self.commonheads,
74 74 self.missingheads)
75 75 self._common, self._missing = sets
76 76
77 77 @util.propertycache
78 78 def common(self):
79 79 if self._common is None:
80 80 self._computecommonmissing()
81 81 return self._common
82 82
83 83 @util.propertycache
84 84 def missing(self):
85 85 if self._missing is None:
86 86 self._computecommonmissing()
87 87 return self._missing
88 88
89 89 def findcommonoutgoing(repo, other, onlyheads=None, force=False, commoninc=None):
90 90 '''Return an outgoing instance to identify the nodes present in repo but
91 91 not in other.
92 92
93 93 If onlyheads is given, only nodes ancestral to nodes in onlyheads (inclusive)
94 94 are included. If you already know the local repo's heads, passing them in
95 95 onlyheads is faster than letting them be recomputed here.
96 96
97 97 If commoninc is given, it must the the result of a prior call to
98 98 findcommonincoming(repo, other, force) to avoid recomputing it here.'''
99 99 # declare an empty outgoing object to be filled later
100 100 og = outgoing(repo.changelog, None, None)
101 101
102 102 # get common set if not provided
103 103 if commoninc is None:
104 104 commoninc = findcommonincoming(repo, other, force=force)
105 105 og.commonheads, _any, _hds = commoninc
106 106
107 107 # compute outgoing
108 108 if not repo._phaseroots[phases.secret]:
109 109 og.missingheads = onlyheads or repo.heads()
110 110 elif onlyheads is None:
111 111 # use visible heads as it should be cached
112 112 og.missingheads = phases.visibleheads(repo)
113 113 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
114 114 else:
115 115 # compute common, missing and exclude secret stuff
116 116 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
117 117 og._common, allmissing = sets
118 118 og._missing = missing = []
119 119 og.excluded = excluded = []
120 120 for node in allmissing:
121 121 if repo[node].phase() >= phases.secret:
122 122 excluded.append(node)
123 123 else:
124 124 missing.append(node)
125 125 if excluded:
126 126 # update missing heads
127 127 missingheads = phases.newheads(repo, onlyheads, excluded)
128 128 else:
129 129 missingheads = onlyheads
130 130 og.missingheads = missingheads
131 131
132 132 return og
133 133
134 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False):
134 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
135 135 """Check that a push won't add any outgoing head
136 136
137 137 raise Abort error and display ui message as needed.
138 138 """
139 139 if remoteheads == [nullid]:
140 140 # remote is empty, nothing to check.
141 141 return
142 142
143 143 cl = repo.changelog
144 144 if remote.capable('branchmap'):
145 145 # Check for each named branch if we're creating new remote heads.
146 146 # To be a remote head after push, node must be either:
147 147 # - unknown locally
148 148 # - a local outgoing head descended from update
149 149 # - a remote head that's known locally and not
150 150 # ancestral to an outgoing head
151 151
152 152 # 1. Create set of branches involved in the push.
153 153 branches = set(repo[n].branch() for n in outgoing.missing)
154 154
155 155 # 2. Check for new branches on the remote.
156 156 remotemap = remote.branchmap()
157 157 newbranches = branches - set(remotemap)
158 158 if newbranches and not newbranch: # new branch requires --new-branch
159 159 branchnames = ', '.join(sorted(newbranches))
160 160 raise util.Abort(_("push creates new remote branches: %s!")
161 161 % branchnames,
162 162 hint=_("use 'hg push --new-branch' to create"
163 163 " new remote branches"))
164 164 branches.difference_update(newbranches)
165 165
166 166 # 3. Construct the initial oldmap and newmap dicts.
167 167 # They contain information about the remote heads before and
168 168 # after the push, respectively.
169 169 # Heads not found locally are not included in either dict,
170 170 # since they won't be affected by the push.
171 171 # unsynced contains all branches with incoming changesets.
172 172 oldmap = {}
173 173 newmap = {}
174 174 unsynced = set()
175 175 for branch in branches:
176 176 remotebrheads = remotemap[branch]
177 177 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
178 178 oldmap[branch] = prunedbrheads
179 179 newmap[branch] = list(prunedbrheads)
180 180 if len(remotebrheads) > len(prunedbrheads):
181 181 unsynced.add(branch)
182 182
183 183 # 4. Update newmap with outgoing changes.
184 184 # This will possibly add new heads and remove existing ones.
185 185 ctxgen = (repo[n] for n in outgoing.missing)
186 186 repo._updatebranchcache(newmap, ctxgen)
187 187
188 188 else:
189 189 # 1-4b. old servers: Check for new topological heads.
190 190 # Construct {old,new}map with branch = None (topological branch).
191 191 # (code based on _updatebranchcache)
192 192 oldheads = set(h for h in remoteheads if h in cl.nodemap)
193 newheads = oldheads.union(outg)
193 newheads = oldheads.union(outgoing.missing)
194 194 if len(newheads) > 1:
195 for latest in reversed(outg):
195 for latest in reversed(outgoing.missing):
196 196 if latest not in newheads:
197 197 continue
198 198 minhrev = min(cl.rev(h) for h in newheads)
199 199 reachable = cl.reachable(latest, cl.node(minhrev))
200 200 reachable.remove(latest)
201 201 newheads.difference_update(reachable)
202 202 branches = set([None])
203 203 newmap = {None: newheads}
204 204 oldmap = {None: oldheads}
205 205 unsynced = inc and branches or set()
206 206
207 207 # 5. Check for new heads.
208 208 # If there are more heads after the push than before, a suitable
209 209 # error message, depending on unsynced status, is displayed.
210 210 error = None
211 211 for branch in branches:
212 212 newhs = set(newmap[branch])
213 213 oldhs = set(oldmap[branch])
214 214 if len(newhs) > len(oldhs):
215 215 dhs = list(newhs - oldhs)
216 216 if error is None:
217 217 if branch not in ('default', None):
218 218 error = _("push creates new remote head %s "
219 219 "on branch '%s'!") % (short(dhs[0]), branch)
220 220 else:
221 221 error = _("push creates new remote head %s!"
222 222 ) % short(dhs[0])
223 223 if branch in unsynced:
224 224 hint = _("you should pull and merge or "
225 225 "use push -f to force")
226 226 else:
227 227 hint = _("did you forget to merge? "
228 228 "use push -f to force")
229 229 if branch is not None:
230 230 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
231 231 for h in dhs:
232 232 repo.ui.note(_("new remote head %s\n") % short(h))
233 233 if error:
234 234 raise util.Abort(error, hint=hint)
235 235
236 236 # 6. Check for unsynced changes on involved branches.
237 237 if unsynced:
238 238 repo.ui.warn(_("note: unsynced remote changes!\n"))
@@ -1,2313 +1,2314 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39 self._dirtyphases = False
40 40 # A list of callback to shape the phase if no data were found.
41 41 # Callback are in the form: func(repo, roots) --> processed root.
42 42 # This list it to be filled by extension during repo setup
43 43 self._phasedefaults = []
44 44
45 45 try:
46 46 self.ui.readconfig(self.join("hgrc"), self.root)
47 47 extensions.loadall(self.ui)
48 48 except IOError:
49 49 pass
50 50
51 51 if not os.path.isdir(self.path):
52 52 if create:
53 53 if not os.path.exists(path):
54 54 util.makedirs(path)
55 55 util.makedir(self.path, notindexed=True)
56 56 requirements = ["revlogv1"]
57 57 if self.ui.configbool('format', 'usestore', True):
58 58 os.mkdir(os.path.join(self.path, "store"))
59 59 requirements.append("store")
60 60 if self.ui.configbool('format', 'usefncache', True):
61 61 requirements.append("fncache")
62 62 if self.ui.configbool('format', 'dotencode', True):
63 63 requirements.append('dotencode')
64 64 # create an invalid changelog
65 65 self.opener.append(
66 66 "00changelog.i",
67 67 '\0\0\0\2' # represents revlogv2
68 68 ' dummy changelog to prevent using the old repo layout'
69 69 )
70 70 if self.ui.configbool('format', 'generaldelta', False):
71 71 requirements.append("generaldelta")
72 72 requirements = set(requirements)
73 73 else:
74 74 raise error.RepoError(_("repository %s not found") % path)
75 75 elif create:
76 76 raise error.RepoError(_("repository %s already exists") % path)
77 77 else:
78 78 try:
79 79 requirements = scmutil.readrequires(self.opener, self.supported)
80 80 except IOError, inst:
81 81 if inst.errno != errno.ENOENT:
82 82 raise
83 83 requirements = set()
84 84
85 85 self.sharedpath = self.path
86 86 try:
87 87 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
88 88 if not os.path.exists(s):
89 89 raise error.RepoError(
90 90 _('.hg/sharedpath points to nonexistent directory %s') % s)
91 91 self.sharedpath = s
92 92 except IOError, inst:
93 93 if inst.errno != errno.ENOENT:
94 94 raise
95 95
96 96 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
97 97 self.spath = self.store.path
98 98 self.sopener = self.store.opener
99 99 self.sjoin = self.store.join
100 100 self.opener.createmode = self.store.createmode
101 101 self._applyrequirements(requirements)
102 102 if create:
103 103 self._writerequirements()
104 104
105 105
106 106 self._branchcache = None
107 107 self._branchcachetip = None
108 108 self.filterpats = {}
109 109 self._datafilters = {}
110 110 self._transref = self._lockref = self._wlockref = None
111 111
112 112 # A cache for various files under .hg/ that tracks file changes,
113 113 # (used by the filecache decorator)
114 114 #
115 115 # Maps a property name to its util.filecacheentry
116 116 self._filecache = {}
117 117
118 118 def _applyrequirements(self, requirements):
119 119 self.requirements = requirements
120 120 openerreqs = set(('revlogv1', 'generaldelta'))
121 121 self.sopener.options = dict((r, 1) for r in requirements
122 122 if r in openerreqs)
123 123
124 124 def _writerequirements(self):
125 125 reqfile = self.opener("requires", "w")
126 126 for r in self.requirements:
127 127 reqfile.write("%s\n" % r)
128 128 reqfile.close()
129 129
130 130 def _checknested(self, path):
131 131 """Determine if path is a legal nested repository."""
132 132 if not path.startswith(self.root):
133 133 return False
134 134 subpath = path[len(self.root) + 1:]
135 135 normsubpath = util.pconvert(subpath)
136 136
137 137 # XXX: Checking against the current working copy is wrong in
138 138 # the sense that it can reject things like
139 139 #
140 140 # $ hg cat -r 10 sub/x.txt
141 141 #
142 142 # if sub/ is no longer a subrepository in the working copy
143 143 # parent revision.
144 144 #
145 145 # However, it can of course also allow things that would have
146 146 # been rejected before, such as the above cat command if sub/
147 147 # is a subrepository now, but was a normal directory before.
148 148 # The old path auditor would have rejected by mistake since it
149 149 # panics when it sees sub/.hg/.
150 150 #
151 151 # All in all, checking against the working copy seems sensible
152 152 # since we want to prevent access to nested repositories on
153 153 # the filesystem *now*.
154 154 ctx = self[None]
155 155 parts = util.splitpath(subpath)
156 156 while parts:
157 157 prefix = '/'.join(parts)
158 158 if prefix in ctx.substate:
159 159 if prefix == normsubpath:
160 160 return True
161 161 else:
162 162 sub = ctx.sub(prefix)
163 163 return sub.checknested(subpath[len(prefix) + 1:])
164 164 else:
165 165 parts.pop()
166 166 return False
167 167
168 168 @filecache('bookmarks')
169 169 def _bookmarks(self):
170 170 return bookmarks.read(self)
171 171
172 172 @filecache('bookmarks.current')
173 173 def _bookmarkcurrent(self):
174 174 return bookmarks.readcurrent(self)
175 175
176 176 def _writebookmarks(self, marks):
177 177 bookmarks.write(self)
178 178
179 179 @filecache('phaseroots')
180 180 def _phaseroots(self):
181 181 self._dirtyphases = False
182 182 phaseroots = phases.readroots(self)
183 183 phases.filterunknown(self, phaseroots)
184 184 return phaseroots
185 185
186 186 @propertycache
187 187 def _phaserev(self):
188 188 cache = [phases.public] * len(self)
189 189 for phase in phases.trackedphases:
190 190 roots = map(self.changelog.rev, self._phaseroots[phase])
191 191 if roots:
192 192 for rev in roots:
193 193 cache[rev] = phase
194 194 for rev in self.changelog.descendants(*roots):
195 195 cache[rev] = phase
196 196 return cache
197 197
198 198 @filecache('00changelog.i', True)
199 199 def changelog(self):
200 200 c = changelog.changelog(self.sopener)
201 201 if 'HG_PENDING' in os.environ:
202 202 p = os.environ['HG_PENDING']
203 203 if p.startswith(self.root):
204 204 c.readpending('00changelog.i.a')
205 205 return c
206 206
207 207 @filecache('00manifest.i', True)
208 208 def manifest(self):
209 209 return manifest.manifest(self.sopener)
210 210
211 211 @filecache('dirstate')
212 212 def dirstate(self):
213 213 warned = [0]
214 214 def validate(node):
215 215 try:
216 216 self.changelog.rev(node)
217 217 return node
218 218 except error.LookupError:
219 219 if not warned[0]:
220 220 warned[0] = True
221 221 self.ui.warn(_("warning: ignoring unknown"
222 222 " working parent %s!\n") % short(node))
223 223 return nullid
224 224
225 225 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
226 226
227 227 def __getitem__(self, changeid):
228 228 if changeid is None:
229 229 return context.workingctx(self)
230 230 return context.changectx(self, changeid)
231 231
232 232 def __contains__(self, changeid):
233 233 try:
234 234 return bool(self.lookup(changeid))
235 235 except error.RepoLookupError:
236 236 return False
237 237
238 238 def __nonzero__(self):
239 239 return True
240 240
241 241 def __len__(self):
242 242 return len(self.changelog)
243 243
244 244 def __iter__(self):
245 245 for i in xrange(len(self)):
246 246 yield i
247 247
248 248 def revs(self, expr, *args):
249 249 '''Return a list of revisions matching the given revset'''
250 250 expr = revset.formatspec(expr, *args)
251 251 m = revset.match(None, expr)
252 252 return [r for r in m(self, range(len(self)))]
253 253
254 254 def set(self, expr, *args):
255 255 '''
256 256 Yield a context for each matching revision, after doing arg
257 257 replacement via revset.formatspec
258 258 '''
259 259 for r in self.revs(expr, *args):
260 260 yield self[r]
261 261
262 262 def url(self):
263 263 return 'file:' + self.root
264 264
265 265 def hook(self, name, throw=False, **args):
266 266 return hook.hook(self.ui, self, name, throw, **args)
267 267
268 268 tag_disallowed = ':\r\n'
269 269
270 270 def _tag(self, names, node, message, local, user, date, extra={}):
271 271 if isinstance(names, str):
272 272 allchars = names
273 273 names = (names,)
274 274 else:
275 275 allchars = ''.join(names)
276 276 for c in self.tag_disallowed:
277 277 if c in allchars:
278 278 raise util.Abort(_('%r cannot be used in a tag name') % c)
279 279
280 280 branches = self.branchmap()
281 281 for name in names:
282 282 self.hook('pretag', throw=True, node=hex(node), tag=name,
283 283 local=local)
284 284 if name in branches:
285 285 self.ui.warn(_("warning: tag %s conflicts with existing"
286 286 " branch name\n") % name)
287 287
288 288 def writetags(fp, names, munge, prevtags):
289 289 fp.seek(0, 2)
290 290 if prevtags and prevtags[-1] != '\n':
291 291 fp.write('\n')
292 292 for name in names:
293 293 m = munge and munge(name) or name
294 294 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
295 295 old = self.tags().get(name, nullid)
296 296 fp.write('%s %s\n' % (hex(old), m))
297 297 fp.write('%s %s\n' % (hex(node), m))
298 298 fp.close()
299 299
300 300 prevtags = ''
301 301 if local:
302 302 try:
303 303 fp = self.opener('localtags', 'r+')
304 304 except IOError:
305 305 fp = self.opener('localtags', 'a')
306 306 else:
307 307 prevtags = fp.read()
308 308
309 309 # local tags are stored in the current charset
310 310 writetags(fp, names, None, prevtags)
311 311 for name in names:
312 312 self.hook('tag', node=hex(node), tag=name, local=local)
313 313 return
314 314
315 315 try:
316 316 fp = self.wfile('.hgtags', 'rb+')
317 317 except IOError, e:
318 318 if e.errno != errno.ENOENT:
319 319 raise
320 320 fp = self.wfile('.hgtags', 'ab')
321 321 else:
322 322 prevtags = fp.read()
323 323
324 324 # committed tags are stored in UTF-8
325 325 writetags(fp, names, encoding.fromlocal, prevtags)
326 326
327 327 fp.close()
328 328
329 329 self.invalidatecaches()
330 330
331 331 if '.hgtags' not in self.dirstate:
332 332 self[None].add(['.hgtags'])
333 333
334 334 m = matchmod.exact(self.root, '', ['.hgtags'])
335 335 tagnode = self.commit(message, user, date, extra=extra, match=m)
336 336
337 337 for name in names:
338 338 self.hook('tag', node=hex(node), tag=name, local=local)
339 339
340 340 return tagnode
341 341
342 342 def tag(self, names, node, message, local, user, date):
343 343 '''tag a revision with one or more symbolic names.
344 344
345 345 names is a list of strings or, when adding a single tag, names may be a
346 346 string.
347 347
348 348 if local is True, the tags are stored in a per-repository file.
349 349 otherwise, they are stored in the .hgtags file, and a new
350 350 changeset is committed with the change.
351 351
352 352 keyword arguments:
353 353
354 354 local: whether to store tags in non-version-controlled file
355 355 (default False)
356 356
357 357 message: commit message to use if committing
358 358
359 359 user: name of user to use if committing
360 360
361 361 date: date tuple to use if committing'''
362 362
363 363 if not local:
364 364 for x in self.status()[:5]:
365 365 if '.hgtags' in x:
366 366 raise util.Abort(_('working copy of .hgtags is changed '
367 367 '(please commit .hgtags manually)'))
368 368
369 369 self.tags() # instantiate the cache
370 370 self._tag(names, node, message, local, user, date)
371 371
372 372 @propertycache
373 373 def _tagscache(self):
374 374 '''Returns a tagscache object that contains various tags related caches.'''
375 375
376 376 # This simplifies its cache management by having one decorated
377 377 # function (this one) and the rest simply fetch things from it.
378 378 class tagscache(object):
379 379 def __init__(self):
380 380 # These two define the set of tags for this repository. tags
381 381 # maps tag name to node; tagtypes maps tag name to 'global' or
382 382 # 'local'. (Global tags are defined by .hgtags across all
383 383 # heads, and local tags are defined in .hg/localtags.)
384 384 # They constitute the in-memory cache of tags.
385 385 self.tags = self.tagtypes = None
386 386
387 387 self.nodetagscache = self.tagslist = None
388 388
389 389 cache = tagscache()
390 390 cache.tags, cache.tagtypes = self._findtags()
391 391
392 392 return cache
393 393
394 394 def tags(self):
395 395 '''return a mapping of tag to node'''
396 396 return self._tagscache.tags
397 397
398 398 def _findtags(self):
399 399 '''Do the hard work of finding tags. Return a pair of dicts
400 400 (tags, tagtypes) where tags maps tag name to node, and tagtypes
401 401 maps tag name to a string like \'global\' or \'local\'.
402 402 Subclasses or extensions are free to add their own tags, but
403 403 should be aware that the returned dicts will be retained for the
404 404 duration of the localrepo object.'''
405 405
406 406 # XXX what tagtype should subclasses/extensions use? Currently
407 407 # mq and bookmarks add tags, but do not set the tagtype at all.
408 408 # Should each extension invent its own tag type? Should there
409 409 # be one tagtype for all such "virtual" tags? Or is the status
410 410 # quo fine?
411 411
412 412 alltags = {} # map tag name to (node, hist)
413 413 tagtypes = {}
414 414
415 415 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
416 416 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
417 417
418 418 # Build the return dicts. Have to re-encode tag names because
419 419 # the tags module always uses UTF-8 (in order not to lose info
420 420 # writing to the cache), but the rest of Mercurial wants them in
421 421 # local encoding.
422 422 tags = {}
423 423 for (name, (node, hist)) in alltags.iteritems():
424 424 if node != nullid:
425 425 try:
426 426 # ignore tags to unknown nodes
427 427 self.changelog.lookup(node)
428 428 tags[encoding.tolocal(name)] = node
429 429 except error.LookupError:
430 430 pass
431 431 tags['tip'] = self.changelog.tip()
432 432 tagtypes = dict([(encoding.tolocal(name), value)
433 433 for (name, value) in tagtypes.iteritems()])
434 434 return (tags, tagtypes)
435 435
436 436 def tagtype(self, tagname):
437 437 '''
438 438 return the type of the given tag. result can be:
439 439
440 440 'local' : a local tag
441 441 'global' : a global tag
442 442 None : tag does not exist
443 443 '''
444 444
445 445 return self._tagscache.tagtypes.get(tagname)
446 446
447 447 def tagslist(self):
448 448 '''return a list of tags ordered by revision'''
449 449 if not self._tagscache.tagslist:
450 450 l = []
451 451 for t, n in self.tags().iteritems():
452 452 r = self.changelog.rev(n)
453 453 l.append((r, t, n))
454 454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
455 455
456 456 return self._tagscache.tagslist
457 457
458 458 def nodetags(self, node):
459 459 '''return the tags associated with a node'''
460 460 if not self._tagscache.nodetagscache:
461 461 nodetagscache = {}
462 462 for t, n in self.tags().iteritems():
463 463 nodetagscache.setdefault(n, []).append(t)
464 464 for tags in nodetagscache.itervalues():
465 465 tags.sort()
466 466 self._tagscache.nodetagscache = nodetagscache
467 467 return self._tagscache.nodetagscache.get(node, [])
468 468
469 469 def nodebookmarks(self, node):
470 470 marks = []
471 471 for bookmark, n in self._bookmarks.iteritems():
472 472 if n == node:
473 473 marks.append(bookmark)
474 474 return sorted(marks)
475 475
476 476 def _branchtags(self, partial, lrev):
477 477 # TODO: rename this function?
478 478 tiprev = len(self) - 1
479 479 if lrev != tiprev:
480 480 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
481 481 self._updatebranchcache(partial, ctxgen)
482 482 self._writebranchcache(partial, self.changelog.tip(), tiprev)
483 483
484 484 return partial
485 485
486 486 def updatebranchcache(self):
487 487 tip = self.changelog.tip()
488 488 if self._branchcache is not None and self._branchcachetip == tip:
489 489 return
490 490
491 491 oldtip = self._branchcachetip
492 492 self._branchcachetip = tip
493 493 if oldtip is None or oldtip not in self.changelog.nodemap:
494 494 partial, last, lrev = self._readbranchcache()
495 495 else:
496 496 lrev = self.changelog.rev(oldtip)
497 497 partial = self._branchcache
498 498
499 499 self._branchtags(partial, lrev)
500 500 # this private cache holds all heads (not just tips)
501 501 self._branchcache = partial
502 502
503 503 def branchmap(self):
504 504 '''returns a dictionary {branch: [branchheads]}'''
505 505 self.updatebranchcache()
506 506 return self._branchcache
507 507
508 508 def branchtags(self):
509 509 '''return a dict where branch names map to the tipmost head of
510 510 the branch, open heads come before closed'''
511 511 bt = {}
512 512 for bn, heads in self.branchmap().iteritems():
513 513 tip = heads[-1]
514 514 for h in reversed(heads):
515 515 if 'close' not in self.changelog.read(h)[5]:
516 516 tip = h
517 517 break
518 518 bt[bn] = tip
519 519 return bt
520 520
521 521 def _readbranchcache(self):
522 522 partial = {}
523 523 try:
524 524 f = self.opener("cache/branchheads")
525 525 lines = f.read().split('\n')
526 526 f.close()
527 527 except (IOError, OSError):
528 528 return {}, nullid, nullrev
529 529
530 530 try:
531 531 last, lrev = lines.pop(0).split(" ", 1)
532 532 last, lrev = bin(last), int(lrev)
533 533 if lrev >= len(self) or self[lrev].node() != last:
534 534 # invalidate the cache
535 535 raise ValueError('invalidating branch cache (tip differs)')
536 536 for l in lines:
537 537 if not l:
538 538 continue
539 539 node, label = l.split(" ", 1)
540 540 label = encoding.tolocal(label.strip())
541 541 partial.setdefault(label, []).append(bin(node))
542 542 except KeyboardInterrupt:
543 543 raise
544 544 except Exception, inst:
545 545 if self.ui.debugflag:
546 546 self.ui.warn(str(inst), '\n')
547 547 partial, last, lrev = {}, nullid, nullrev
548 548 return partial, last, lrev
549 549
550 550 def _writebranchcache(self, branches, tip, tiprev):
551 551 try:
552 552 f = self.opener("cache/branchheads", "w", atomictemp=True)
553 553 f.write("%s %s\n" % (hex(tip), tiprev))
554 554 for label, nodes in branches.iteritems():
555 555 for node in nodes:
556 556 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
557 557 f.close()
558 558 except (IOError, OSError):
559 559 pass
560 560
561 561 def _updatebranchcache(self, partial, ctxgen):
562 562 # collect new branch entries
563 563 newbranches = {}
564 564 for c in ctxgen:
565 565 newbranches.setdefault(c.branch(), []).append(c.node())
566 566 # if older branchheads are reachable from new ones, they aren't
567 567 # really branchheads. Note checking parents is insufficient:
568 568 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
569 569 for branch, newnodes in newbranches.iteritems():
570 570 bheads = partial.setdefault(branch, [])
571 571 bheads.extend(newnodes)
572 572 if len(bheads) <= 1:
573 573 continue
574 574 bheads = sorted(bheads, key=lambda x: self[x].rev())
575 575 # starting from tip means fewer passes over reachable
576 576 while newnodes:
577 577 latest = newnodes.pop()
578 578 if latest not in bheads:
579 579 continue
580 580 minbhrev = self[bheads[0]].node()
581 581 reachable = self.changelog.reachable(latest, minbhrev)
582 582 reachable.remove(latest)
583 583 if reachable:
584 584 bheads = [b for b in bheads if b not in reachable]
585 585 partial[branch] = bheads
586 586
587 587 def lookup(self, key):
588 588 if isinstance(key, int):
589 589 return self.changelog.node(key)
590 590 elif key == '.':
591 591 return self.dirstate.p1()
592 592 elif key == 'null':
593 593 return nullid
594 594 elif key == 'tip':
595 595 return self.changelog.tip()
596 596 n = self.changelog._match(key)
597 597 if n:
598 598 return n
599 599 if key in self._bookmarks:
600 600 return self._bookmarks[key]
601 601 if key in self.tags():
602 602 return self.tags()[key]
603 603 if key in self.branchtags():
604 604 return self.branchtags()[key]
605 605 n = self.changelog._partialmatch(key)
606 606 if n:
607 607 return n
608 608
609 609 # can't find key, check if it might have come from damaged dirstate
610 610 if key in self.dirstate.parents():
611 611 raise error.Abort(_("working directory has unknown parent '%s'!")
612 612 % short(key))
613 613 try:
614 614 if len(key) == 20:
615 615 key = hex(key)
616 616 except TypeError:
617 617 pass
618 618 raise error.RepoLookupError(_("unknown revision '%s'") % key)
619 619
620 620 def lookupbranch(self, key, remote=None):
621 621 repo = remote or self
622 622 if key in repo.branchmap():
623 623 return key
624 624
625 625 repo = (remote and remote.local()) and remote or self
626 626 return repo[key].branch()
627 627
628 628 def known(self, nodes):
629 629 nm = self.changelog.nodemap
630 630 result = []
631 631 for n in nodes:
632 632 r = nm.get(n)
633 633 resp = not (r is None or self._phaserev[r] >= phases.secret)
634 634 result.append(resp)
635 635 return result
636 636
637 637 def local(self):
638 638 return self
639 639
640 640 def cancopy(self):
641 641 return (repo.repository.cancopy(self)
642 642 and not self._phaseroots[phases.secret])
643 643
644 644 def join(self, f):
645 645 return os.path.join(self.path, f)
646 646
647 647 def wjoin(self, f):
648 648 return os.path.join(self.root, f)
649 649
650 650 def file(self, f):
651 651 if f[0] == '/':
652 652 f = f[1:]
653 653 return filelog.filelog(self.sopener, f)
654 654
655 655 def changectx(self, changeid):
656 656 return self[changeid]
657 657
658 658 def parents(self, changeid=None):
659 659 '''get list of changectxs for parents of changeid'''
660 660 return self[changeid].parents()
661 661
662 662 def filectx(self, path, changeid=None, fileid=None):
663 663 """changeid can be a changeset revision, node, or tag.
664 664 fileid can be a file revision or node."""
665 665 return context.filectx(self, path, changeid, fileid)
666 666
667 667 def getcwd(self):
668 668 return self.dirstate.getcwd()
669 669
670 670 def pathto(self, f, cwd=None):
671 671 return self.dirstate.pathto(f, cwd)
672 672
673 673 def wfile(self, f, mode='r'):
674 674 return self.wopener(f, mode)
675 675
676 676 def _link(self, f):
677 677 return os.path.islink(self.wjoin(f))
678 678
679 679 def _loadfilter(self, filter):
680 680 if filter not in self.filterpats:
681 681 l = []
682 682 for pat, cmd in self.ui.configitems(filter):
683 683 if cmd == '!':
684 684 continue
685 685 mf = matchmod.match(self.root, '', [pat])
686 686 fn = None
687 687 params = cmd
688 688 for name, filterfn in self._datafilters.iteritems():
689 689 if cmd.startswith(name):
690 690 fn = filterfn
691 691 params = cmd[len(name):].lstrip()
692 692 break
693 693 if not fn:
694 694 fn = lambda s, c, **kwargs: util.filter(s, c)
695 695 # Wrap old filters not supporting keyword arguments
696 696 if not inspect.getargspec(fn)[2]:
697 697 oldfn = fn
698 698 fn = lambda s, c, **kwargs: oldfn(s, c)
699 699 l.append((mf, fn, params))
700 700 self.filterpats[filter] = l
701 701 return self.filterpats[filter]
702 702
703 703 def _filter(self, filterpats, filename, data):
704 704 for mf, fn, cmd in filterpats:
705 705 if mf(filename):
706 706 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
707 707 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
708 708 break
709 709
710 710 return data
711 711
712 712 @propertycache
713 713 def _encodefilterpats(self):
714 714 return self._loadfilter('encode')
715 715
716 716 @propertycache
717 717 def _decodefilterpats(self):
718 718 return self._loadfilter('decode')
719 719
720 720 def adddatafilter(self, name, filter):
721 721 self._datafilters[name] = filter
722 722
723 723 def wread(self, filename):
724 724 if self._link(filename):
725 725 data = os.readlink(self.wjoin(filename))
726 726 else:
727 727 data = self.wopener.read(filename)
728 728 return self._filter(self._encodefilterpats, filename, data)
729 729
730 730 def wwrite(self, filename, data, flags):
731 731 data = self._filter(self._decodefilterpats, filename, data)
732 732 if 'l' in flags:
733 733 self.wopener.symlink(data, filename)
734 734 else:
735 735 self.wopener.write(filename, data)
736 736 if 'x' in flags:
737 737 util.setflags(self.wjoin(filename), False, True)
738 738
739 739 def wwritedata(self, filename, data):
740 740 return self._filter(self._decodefilterpats, filename, data)
741 741
742 742 def transaction(self, desc):
743 743 tr = self._transref and self._transref() or None
744 744 if tr and tr.running():
745 745 return tr.nest()
746 746
747 747 # abort here if the journal already exists
748 748 if os.path.exists(self.sjoin("journal")):
749 749 raise error.RepoError(
750 750 _("abandoned transaction found - run hg recover"))
751 751
752 752 journalfiles = self._writejournal(desc)
753 753 renames = [(x, undoname(x)) for x in journalfiles]
754 754
755 755 tr = transaction.transaction(self.ui.warn, self.sopener,
756 756 self.sjoin("journal"),
757 757 aftertrans(renames),
758 758 self.store.createmode)
759 759 self._transref = weakref.ref(tr)
760 760 return tr
761 761
762 762 def _writejournal(self, desc):
763 763 # save dirstate for rollback
764 764 try:
765 765 ds = self.opener.read("dirstate")
766 766 except IOError:
767 767 ds = ""
768 768 self.opener.write("journal.dirstate", ds)
769 769 self.opener.write("journal.branch",
770 770 encoding.fromlocal(self.dirstate.branch()))
771 771 self.opener.write("journal.desc",
772 772 "%d\n%s\n" % (len(self), desc))
773 773
774 774 bkname = self.join('bookmarks')
775 775 if os.path.exists(bkname):
776 776 util.copyfile(bkname, self.join('journal.bookmarks'))
777 777 else:
778 778 self.opener.write('journal.bookmarks', '')
779 779 phasesname = self.sjoin('phaseroots')
780 780 if os.path.exists(phasesname):
781 781 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
782 782 else:
783 783 self.sopener.write('journal.phaseroots', '')
784 784
785 785 return (self.sjoin('journal'), self.join('journal.dirstate'),
786 786 self.join('journal.branch'), self.join('journal.desc'),
787 787 self.join('journal.bookmarks'),
788 788 self.sjoin('journal.phaseroots'))
789 789
790 790 def recover(self):
791 791 lock = self.lock()
792 792 try:
793 793 if os.path.exists(self.sjoin("journal")):
794 794 self.ui.status(_("rolling back interrupted transaction\n"))
795 795 transaction.rollback(self.sopener, self.sjoin("journal"),
796 796 self.ui.warn)
797 797 self.invalidate()
798 798 return True
799 799 else:
800 800 self.ui.warn(_("no interrupted transaction available\n"))
801 801 return False
802 802 finally:
803 803 lock.release()
804 804
805 805 def rollback(self, dryrun=False, force=False):
806 806 wlock = lock = None
807 807 try:
808 808 wlock = self.wlock()
809 809 lock = self.lock()
810 810 if os.path.exists(self.sjoin("undo")):
811 811 return self._rollback(dryrun, force)
812 812 else:
813 813 self.ui.warn(_("no rollback information available\n"))
814 814 return 1
815 815 finally:
816 816 release(lock, wlock)
817 817
818 818 def _rollback(self, dryrun, force):
819 819 ui = self.ui
820 820 try:
821 821 args = self.opener.read('undo.desc').splitlines()
822 822 (oldlen, desc, detail) = (int(args[0]), args[1], None)
823 823 if len(args) >= 3:
824 824 detail = args[2]
825 825 oldtip = oldlen - 1
826 826
827 827 if detail and ui.verbose:
828 828 msg = (_('repository tip rolled back to revision %s'
829 829 ' (undo %s: %s)\n')
830 830 % (oldtip, desc, detail))
831 831 else:
832 832 msg = (_('repository tip rolled back to revision %s'
833 833 ' (undo %s)\n')
834 834 % (oldtip, desc))
835 835 except IOError:
836 836 msg = _('rolling back unknown transaction\n')
837 837 desc = None
838 838
839 839 if not force and self['.'] != self['tip'] and desc == 'commit':
840 840 raise util.Abort(
841 841 _('rollback of last commit while not checked out '
842 842 'may lose data'), hint=_('use -f to force'))
843 843
844 844 ui.status(msg)
845 845 if dryrun:
846 846 return 0
847 847
848 848 parents = self.dirstate.parents()
849 849 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
850 850 if os.path.exists(self.join('undo.bookmarks')):
851 851 util.rename(self.join('undo.bookmarks'),
852 852 self.join('bookmarks'))
853 853 if os.path.exists(self.sjoin('undo.phaseroots')):
854 854 util.rename(self.sjoin('undo.phaseroots'),
855 855 self.sjoin('phaseroots'))
856 856 self.invalidate()
857 857
858 858 parentgone = (parents[0] not in self.changelog.nodemap or
859 859 parents[1] not in self.changelog.nodemap)
860 860 if parentgone:
861 861 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
862 862 try:
863 863 branch = self.opener.read('undo.branch')
864 864 self.dirstate.setbranch(branch)
865 865 except IOError:
866 866 ui.warn(_('named branch could not be reset: '
867 867 'current branch is still \'%s\'\n')
868 868 % self.dirstate.branch())
869 869
870 870 self.dirstate.invalidate()
871 871 parents = tuple([p.rev() for p in self.parents()])
872 872 if len(parents) > 1:
873 873 ui.status(_('working directory now based on '
874 874 'revisions %d and %d\n') % parents)
875 875 else:
876 876 ui.status(_('working directory now based on '
877 877 'revision %d\n') % parents)
878 878 self.destroyed()
879 879 return 0
880 880
881 881 def invalidatecaches(self):
882 882 try:
883 883 delattr(self, '_tagscache')
884 884 except AttributeError:
885 885 pass
886 886
887 887 self._branchcache = None # in UTF-8
888 888 self._branchcachetip = None
889 889
890 890 def invalidatedirstate(self):
891 891 '''Invalidates the dirstate, causing the next call to dirstate
892 892 to check if it was modified since the last time it was read,
893 893 rereading it if it has.
894 894
895 895 This is different to dirstate.invalidate() that it doesn't always
896 896 rereads the dirstate. Use dirstate.invalidate() if you want to
897 897 explicitly read the dirstate again (i.e. restoring it to a previous
898 898 known good state).'''
899 899 try:
900 900 delattr(self, 'dirstate')
901 901 except AttributeError:
902 902 pass
903 903
904 904 def invalidate(self):
905 905 for k in self._filecache:
906 906 # dirstate is invalidated separately in invalidatedirstate()
907 907 if k == 'dirstate':
908 908 continue
909 909
910 910 try:
911 911 delattr(self, k)
912 912 except AttributeError:
913 913 pass
914 914 self.invalidatecaches()
915 915
916 916 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
917 917 try:
918 918 l = lock.lock(lockname, 0, releasefn, desc=desc)
919 919 except error.LockHeld, inst:
920 920 if not wait:
921 921 raise
922 922 self.ui.warn(_("waiting for lock on %s held by %r\n") %
923 923 (desc, inst.locker))
924 924 # default to 600 seconds timeout
925 925 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
926 926 releasefn, desc=desc)
927 927 if acquirefn:
928 928 acquirefn()
929 929 return l
930 930
931 931 def _afterlock(self, callback):
932 932 """add a callback to the current repository lock.
933 933
934 934 The callback will be executed on lock release."""
935 935 l = self._lockref and self._lockref()
936 936 if l:
937 937 l.postrelease.append(callback)
938 938
939 939 def lock(self, wait=True):
940 940 '''Lock the repository store (.hg/store) and return a weak reference
941 941 to the lock. Use this before modifying the store (e.g. committing or
942 942 stripping). If you are opening a transaction, get a lock as well.)'''
943 943 l = self._lockref and self._lockref()
944 944 if l is not None and l.held:
945 945 l.lock()
946 946 return l
947 947
948 948 def unlock():
949 949 self.store.write()
950 950 if self._dirtyphases:
951 951 phases.writeroots(self)
952 952 for k, ce in self._filecache.items():
953 953 if k == 'dirstate':
954 954 continue
955 955 ce.refresh()
956 956
957 957 l = self._lock(self.sjoin("lock"), wait, unlock,
958 958 self.invalidate, _('repository %s') % self.origroot)
959 959 self._lockref = weakref.ref(l)
960 960 return l
961 961
962 962 def wlock(self, wait=True):
963 963 '''Lock the non-store parts of the repository (everything under
964 964 .hg except .hg/store) and return a weak reference to the lock.
965 965 Use this before modifying files in .hg.'''
966 966 l = self._wlockref and self._wlockref()
967 967 if l is not None and l.held:
968 968 l.lock()
969 969 return l
970 970
971 971 def unlock():
972 972 self.dirstate.write()
973 973 ce = self._filecache.get('dirstate')
974 974 if ce:
975 975 ce.refresh()
976 976
977 977 l = self._lock(self.join("wlock"), wait, unlock,
978 978 self.invalidatedirstate, _('working directory of %s') %
979 979 self.origroot)
980 980 self._wlockref = weakref.ref(l)
981 981 return l
982 982
983 983 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
984 984 """
985 985 commit an individual file as part of a larger transaction
986 986 """
987 987
988 988 fname = fctx.path()
989 989 text = fctx.data()
990 990 flog = self.file(fname)
991 991 fparent1 = manifest1.get(fname, nullid)
992 992 fparent2 = fparent2o = manifest2.get(fname, nullid)
993 993
994 994 meta = {}
995 995 copy = fctx.renamed()
996 996 if copy and copy[0] != fname:
997 997 # Mark the new revision of this file as a copy of another
998 998 # file. This copy data will effectively act as a parent
999 999 # of this new revision. If this is a merge, the first
1000 1000 # parent will be the nullid (meaning "look up the copy data")
1001 1001 # and the second one will be the other parent. For example:
1002 1002 #
1003 1003 # 0 --- 1 --- 3 rev1 changes file foo
1004 1004 # \ / rev2 renames foo to bar and changes it
1005 1005 # \- 2 -/ rev3 should have bar with all changes and
1006 1006 # should record that bar descends from
1007 1007 # bar in rev2 and foo in rev1
1008 1008 #
1009 1009 # this allows this merge to succeed:
1010 1010 #
1011 1011 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1012 1012 # \ / merging rev3 and rev4 should use bar@rev2
1013 1013 # \- 2 --- 4 as the merge base
1014 1014 #
1015 1015
1016 1016 cfname = copy[0]
1017 1017 crev = manifest1.get(cfname)
1018 1018 newfparent = fparent2
1019 1019
1020 1020 if manifest2: # branch merge
1021 1021 if fparent2 == nullid or crev is None: # copied on remote side
1022 1022 if cfname in manifest2:
1023 1023 crev = manifest2[cfname]
1024 1024 newfparent = fparent1
1025 1025
1026 1026 # find source in nearest ancestor if we've lost track
1027 1027 if not crev:
1028 1028 self.ui.debug(" %s: searching for copy revision for %s\n" %
1029 1029 (fname, cfname))
1030 1030 for ancestor in self[None].ancestors():
1031 1031 if cfname in ancestor:
1032 1032 crev = ancestor[cfname].filenode()
1033 1033 break
1034 1034
1035 1035 if crev:
1036 1036 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1037 1037 meta["copy"] = cfname
1038 1038 meta["copyrev"] = hex(crev)
1039 1039 fparent1, fparent2 = nullid, newfparent
1040 1040 else:
1041 1041 self.ui.warn(_("warning: can't find ancestor for '%s' "
1042 1042 "copied from '%s'!\n") % (fname, cfname))
1043 1043
1044 1044 elif fparent2 != nullid:
1045 1045 # is one parent an ancestor of the other?
1046 1046 fparentancestor = flog.ancestor(fparent1, fparent2)
1047 1047 if fparentancestor == fparent1:
1048 1048 fparent1, fparent2 = fparent2, nullid
1049 1049 elif fparentancestor == fparent2:
1050 1050 fparent2 = nullid
1051 1051
1052 1052 # is the file changed?
1053 1053 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1054 1054 changelist.append(fname)
1055 1055 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1056 1056
1057 1057 # are just the flags changed during merge?
1058 1058 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1059 1059 changelist.append(fname)
1060 1060
1061 1061 return fparent1
1062 1062
1063 1063 def commit(self, text="", user=None, date=None, match=None, force=False,
1064 1064 editor=False, extra={}):
1065 1065 """Add a new revision to current repository.
1066 1066
1067 1067 Revision information is gathered from the working directory,
1068 1068 match can be used to filter the committed files. If editor is
1069 1069 supplied, it is called to get a commit message.
1070 1070 """
1071 1071
1072 1072 def fail(f, msg):
1073 1073 raise util.Abort('%s: %s' % (f, msg))
1074 1074
1075 1075 if not match:
1076 1076 match = matchmod.always(self.root, '')
1077 1077
1078 1078 if not force:
1079 1079 vdirs = []
1080 1080 match.dir = vdirs.append
1081 1081 match.bad = fail
1082 1082
1083 1083 wlock = self.wlock()
1084 1084 try:
1085 1085 wctx = self[None]
1086 1086 merge = len(wctx.parents()) > 1
1087 1087
1088 1088 if (not force and merge and match and
1089 1089 (match.files() or match.anypats())):
1090 1090 raise util.Abort(_('cannot partially commit a merge '
1091 1091 '(do not specify files or patterns)'))
1092 1092
1093 1093 changes = self.status(match=match, clean=force)
1094 1094 if force:
1095 1095 changes[0].extend(changes[6]) # mq may commit unchanged files
1096 1096
1097 1097 # check subrepos
1098 1098 subs = []
1099 1099 removedsubs = set()
1100 1100 if '.hgsub' in wctx:
1101 1101 # only manage subrepos and .hgsubstate if .hgsub is present
1102 1102 for p in wctx.parents():
1103 1103 removedsubs.update(s for s in p.substate if match(s))
1104 1104 for s in wctx.substate:
1105 1105 removedsubs.discard(s)
1106 1106 if match(s) and wctx.sub(s).dirty():
1107 1107 subs.append(s)
1108 1108 if (subs or removedsubs):
1109 1109 if (not match('.hgsub') and
1110 1110 '.hgsub' in (wctx.modified() + wctx.added())):
1111 1111 raise util.Abort(
1112 1112 _("can't commit subrepos without .hgsub"))
1113 1113 if '.hgsubstate' not in changes[0]:
1114 1114 changes[0].insert(0, '.hgsubstate')
1115 1115 if '.hgsubstate' in changes[2]:
1116 1116 changes[2].remove('.hgsubstate')
1117 1117 elif '.hgsub' in changes[2]:
1118 1118 # clean up .hgsubstate when .hgsub is removed
1119 1119 if ('.hgsubstate' in wctx and
1120 1120 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1121 1121 changes[2].insert(0, '.hgsubstate')
1122 1122
1123 1123 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1124 1124 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1125 1125 if changedsubs:
1126 1126 raise util.Abort(_("uncommitted changes in subrepo %s")
1127 1127 % changedsubs[0],
1128 1128 hint=_("use --subrepos for recursive commit"))
1129 1129
1130 1130 # make sure all explicit patterns are matched
1131 1131 if not force and match.files():
1132 1132 matched = set(changes[0] + changes[1] + changes[2])
1133 1133
1134 1134 for f in match.files():
1135 1135 if f == '.' or f in matched or f in wctx.substate:
1136 1136 continue
1137 1137 if f in changes[3]: # missing
1138 1138 fail(f, _('file not found!'))
1139 1139 if f in vdirs: # visited directory
1140 1140 d = f + '/'
1141 1141 for mf in matched:
1142 1142 if mf.startswith(d):
1143 1143 break
1144 1144 else:
1145 1145 fail(f, _("no match under directory!"))
1146 1146 elif f not in self.dirstate:
1147 1147 fail(f, _("file not tracked!"))
1148 1148
1149 1149 if (not force and not extra.get("close") and not merge
1150 1150 and not (changes[0] or changes[1] or changes[2])
1151 1151 and wctx.branch() == wctx.p1().branch()):
1152 1152 return None
1153 1153
1154 1154 ms = mergemod.mergestate(self)
1155 1155 for f in changes[0]:
1156 1156 if f in ms and ms[f] == 'u':
1157 1157 raise util.Abort(_("unresolved merge conflicts "
1158 1158 "(see hg help resolve)"))
1159 1159
1160 1160 cctx = context.workingctx(self, text, user, date, extra, changes)
1161 1161 if editor:
1162 1162 cctx._text = editor(self, cctx, subs)
1163 1163 edited = (text != cctx._text)
1164 1164
1165 1165 # commit subs
1166 1166 if subs or removedsubs:
1167 1167 state = wctx.substate.copy()
1168 1168 for s in sorted(subs):
1169 1169 sub = wctx.sub(s)
1170 1170 self.ui.status(_('committing subrepository %s\n') %
1171 1171 subrepo.subrelpath(sub))
1172 1172 sr = sub.commit(cctx._text, user, date)
1173 1173 state[s] = (state[s][0], sr)
1174 1174 subrepo.writestate(self, state)
1175 1175
1176 1176 # Save commit message in case this transaction gets rolled back
1177 1177 # (e.g. by a pretxncommit hook). Leave the content alone on
1178 1178 # the assumption that the user will use the same editor again.
1179 1179 msgfn = self.savecommitmessage(cctx._text)
1180 1180
1181 1181 p1, p2 = self.dirstate.parents()
1182 1182 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1183 1183 try:
1184 1184 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1185 1185 ret = self.commitctx(cctx, True)
1186 1186 except:
1187 1187 if edited:
1188 1188 self.ui.write(
1189 1189 _('note: commit message saved in %s\n') % msgfn)
1190 1190 raise
1191 1191
1192 1192 # update bookmarks, dirstate and mergestate
1193 1193 bookmarks.update(self, p1, ret)
1194 1194 for f in changes[0] + changes[1]:
1195 1195 self.dirstate.normal(f)
1196 1196 for f in changes[2]:
1197 1197 self.dirstate.drop(f)
1198 1198 self.dirstate.setparents(ret)
1199 1199 ms.reset()
1200 1200 finally:
1201 1201 wlock.release()
1202 1202
1203 1203 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1204 1204 return ret
1205 1205
1206 1206 def commitctx(self, ctx, error=False):
1207 1207 """Add a new revision to current repository.
1208 1208 Revision information is passed via the context argument.
1209 1209 """
1210 1210
1211 1211 tr = lock = None
1212 1212 removed = list(ctx.removed())
1213 1213 p1, p2 = ctx.p1(), ctx.p2()
1214 1214 user = ctx.user()
1215 1215
1216 1216 lock = self.lock()
1217 1217 try:
1218 1218 tr = self.transaction("commit")
1219 1219 trp = weakref.proxy(tr)
1220 1220
1221 1221 if ctx.files():
1222 1222 m1 = p1.manifest().copy()
1223 1223 m2 = p2.manifest()
1224 1224
1225 1225 # check in files
1226 1226 new = {}
1227 1227 changed = []
1228 1228 linkrev = len(self)
1229 1229 for f in sorted(ctx.modified() + ctx.added()):
1230 1230 self.ui.note(f + "\n")
1231 1231 try:
1232 1232 fctx = ctx[f]
1233 1233 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1234 1234 changed)
1235 1235 m1.set(f, fctx.flags())
1236 1236 except OSError, inst:
1237 1237 self.ui.warn(_("trouble committing %s!\n") % f)
1238 1238 raise
1239 1239 except IOError, inst:
1240 1240 errcode = getattr(inst, 'errno', errno.ENOENT)
1241 1241 if error or errcode and errcode != errno.ENOENT:
1242 1242 self.ui.warn(_("trouble committing %s!\n") % f)
1243 1243 raise
1244 1244 else:
1245 1245 removed.append(f)
1246 1246
1247 1247 # update manifest
1248 1248 m1.update(new)
1249 1249 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1250 1250 drop = [f for f in removed if f in m1]
1251 1251 for f in drop:
1252 1252 del m1[f]
1253 1253 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1254 1254 p2.manifestnode(), (new, drop))
1255 1255 files = changed + removed
1256 1256 else:
1257 1257 mn = p1.manifestnode()
1258 1258 files = []
1259 1259
1260 1260 # update changelog
1261 1261 self.changelog.delayupdate()
1262 1262 n = self.changelog.add(mn, files, ctx.description(),
1263 1263 trp, p1.node(), p2.node(),
1264 1264 user, ctx.date(), ctx.extra().copy())
1265 1265 p = lambda: self.changelog.writepending() and self.root or ""
1266 1266 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1267 1267 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1268 1268 parent2=xp2, pending=p)
1269 1269 self.changelog.finalize(trp)
1270 1270 # set the new commit is proper phase
1271 1271 targetphase = self.ui.configint('phases', 'new-commit',
1272 1272 phases.draft)
1273 1273 if targetphase:
1274 1274 # retract boundary do not alter parent changeset.
1275 1275 # if a parent have higher the resulting phase will
1276 1276 # be compliant anyway
1277 1277 #
1278 1278 # if minimal phase was 0 we don't need to retract anything
1279 1279 phases.retractboundary(self, targetphase, [n])
1280 1280 tr.close()
1281 1281 self.updatebranchcache()
1282 1282 return n
1283 1283 finally:
1284 1284 if tr:
1285 1285 tr.release()
1286 1286 lock.release()
1287 1287
1288 1288 def destroyed(self):
1289 1289 '''Inform the repository that nodes have been destroyed.
1290 1290 Intended for use by strip and rollback, so there's a common
1291 1291 place for anything that has to be done after destroying history.'''
1292 1292 # XXX it might be nice if we could take the list of destroyed
1293 1293 # nodes, but I don't see an easy way for rollback() to do that
1294 1294
1295 1295 # Ensure the persistent tag cache is updated. Doing it now
1296 1296 # means that the tag cache only has to worry about destroyed
1297 1297 # heads immediately after a strip/rollback. That in turn
1298 1298 # guarantees that "cachetip == currenttip" (comparing both rev
1299 1299 # and node) always means no nodes have been added or destroyed.
1300 1300
1301 1301 # XXX this is suboptimal when qrefresh'ing: we strip the current
1302 1302 # head, refresh the tag cache, then immediately add a new head.
1303 1303 # But I think doing it this way is necessary for the "instant
1304 1304 # tag cache retrieval" case to work.
1305 1305 self.invalidatecaches()
1306 1306
1307 1307 def walk(self, match, node=None):
1308 1308 '''
1309 1309 walk recursively through the directory tree or a given
1310 1310 changeset, finding all files matched by the match
1311 1311 function
1312 1312 '''
1313 1313 return self[node].walk(match)
1314 1314
1315 1315 def status(self, node1='.', node2=None, match=None,
1316 1316 ignored=False, clean=False, unknown=False,
1317 1317 listsubrepos=False):
1318 1318 """return status of files between two nodes or node and working directory
1319 1319
1320 1320 If node1 is None, use the first dirstate parent instead.
1321 1321 If node2 is None, compare node1 with working directory.
1322 1322 """
1323 1323
1324 1324 def mfmatches(ctx):
1325 1325 mf = ctx.manifest().copy()
1326 1326 for fn in mf.keys():
1327 1327 if not match(fn):
1328 1328 del mf[fn]
1329 1329 return mf
1330 1330
1331 1331 if isinstance(node1, context.changectx):
1332 1332 ctx1 = node1
1333 1333 else:
1334 1334 ctx1 = self[node1]
1335 1335 if isinstance(node2, context.changectx):
1336 1336 ctx2 = node2
1337 1337 else:
1338 1338 ctx2 = self[node2]
1339 1339
1340 1340 working = ctx2.rev() is None
1341 1341 parentworking = working and ctx1 == self['.']
1342 1342 match = match or matchmod.always(self.root, self.getcwd())
1343 1343 listignored, listclean, listunknown = ignored, clean, unknown
1344 1344
1345 1345 # load earliest manifest first for caching reasons
1346 1346 if not working and ctx2.rev() < ctx1.rev():
1347 1347 ctx2.manifest()
1348 1348
1349 1349 if not parentworking:
1350 1350 def bad(f, msg):
1351 1351 if f not in ctx1:
1352 1352 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1353 1353 match.bad = bad
1354 1354
1355 1355 if working: # we need to scan the working dir
1356 1356 subrepos = []
1357 1357 if '.hgsub' in self.dirstate:
1358 1358 subrepos = ctx2.substate.keys()
1359 1359 s = self.dirstate.status(match, subrepos, listignored,
1360 1360 listclean, listunknown)
1361 1361 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1362 1362
1363 1363 # check for any possibly clean files
1364 1364 if parentworking and cmp:
1365 1365 fixup = []
1366 1366 # do a full compare of any files that might have changed
1367 1367 for f in sorted(cmp):
1368 1368 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1369 1369 or ctx1[f].cmp(ctx2[f])):
1370 1370 modified.append(f)
1371 1371 else:
1372 1372 fixup.append(f)
1373 1373
1374 1374 # update dirstate for files that are actually clean
1375 1375 if fixup:
1376 1376 if listclean:
1377 1377 clean += fixup
1378 1378
1379 1379 try:
1380 1380 # updating the dirstate is optional
1381 1381 # so we don't wait on the lock
1382 1382 wlock = self.wlock(False)
1383 1383 try:
1384 1384 for f in fixup:
1385 1385 self.dirstate.normal(f)
1386 1386 finally:
1387 1387 wlock.release()
1388 1388 except error.LockError:
1389 1389 pass
1390 1390
1391 1391 if not parentworking:
1392 1392 mf1 = mfmatches(ctx1)
1393 1393 if working:
1394 1394 # we are comparing working dir against non-parent
1395 1395 # generate a pseudo-manifest for the working dir
1396 1396 mf2 = mfmatches(self['.'])
1397 1397 for f in cmp + modified + added:
1398 1398 mf2[f] = None
1399 1399 mf2.set(f, ctx2.flags(f))
1400 1400 for f in removed:
1401 1401 if f in mf2:
1402 1402 del mf2[f]
1403 1403 else:
1404 1404 # we are comparing two revisions
1405 1405 deleted, unknown, ignored = [], [], []
1406 1406 mf2 = mfmatches(ctx2)
1407 1407
1408 1408 modified, added, clean = [], [], []
1409 1409 for fn in mf2:
1410 1410 if fn in mf1:
1411 1411 if (fn not in deleted and
1412 1412 (mf1.flags(fn) != mf2.flags(fn) or
1413 1413 (mf1[fn] != mf2[fn] and
1414 1414 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1415 1415 modified.append(fn)
1416 1416 elif listclean:
1417 1417 clean.append(fn)
1418 1418 del mf1[fn]
1419 1419 elif fn not in deleted:
1420 1420 added.append(fn)
1421 1421 removed = mf1.keys()
1422 1422
1423 1423 if working and modified and not self.dirstate._checklink:
1424 1424 # Symlink placeholders may get non-symlink-like contents
1425 1425 # via user error or dereferencing by NFS or Samba servers,
1426 1426 # so we filter out any placeholders that don't look like a
1427 1427 # symlink
1428 1428 sane = []
1429 1429 for f in modified:
1430 1430 if ctx2.flags(f) == 'l':
1431 1431 d = ctx2[f].data()
1432 1432 if len(d) >= 1024 or '\n' in d or util.binary(d):
1433 1433 self.ui.debug('ignoring suspect symlink placeholder'
1434 1434 ' "%s"\n' % f)
1435 1435 continue
1436 1436 sane.append(f)
1437 1437 modified = sane
1438 1438
1439 1439 r = modified, added, removed, deleted, unknown, ignored, clean
1440 1440
1441 1441 if listsubrepos:
1442 1442 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1443 1443 if working:
1444 1444 rev2 = None
1445 1445 else:
1446 1446 rev2 = ctx2.substate[subpath][1]
1447 1447 try:
1448 1448 submatch = matchmod.narrowmatcher(subpath, match)
1449 1449 s = sub.status(rev2, match=submatch, ignored=listignored,
1450 1450 clean=listclean, unknown=listunknown,
1451 1451 listsubrepos=True)
1452 1452 for rfiles, sfiles in zip(r, s):
1453 1453 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1454 1454 except error.LookupError:
1455 1455 self.ui.status(_("skipping missing subrepository: %s\n")
1456 1456 % subpath)
1457 1457
1458 1458 for l in r:
1459 1459 l.sort()
1460 1460 return r
1461 1461
1462 1462 def heads(self, start=None):
1463 1463 heads = self.changelog.heads(start)
1464 1464 # sort the output in rev descending order
1465 1465 return sorted(heads, key=self.changelog.rev, reverse=True)
1466 1466
1467 1467 def branchheads(self, branch=None, start=None, closed=False):
1468 1468 '''return a (possibly filtered) list of heads for the given branch
1469 1469
1470 1470 Heads are returned in topological order, from newest to oldest.
1471 1471 If branch is None, use the dirstate branch.
1472 1472 If start is not None, return only heads reachable from start.
1473 1473 If closed is True, return heads that are marked as closed as well.
1474 1474 '''
1475 1475 if branch is None:
1476 1476 branch = self[None].branch()
1477 1477 branches = self.branchmap()
1478 1478 if branch not in branches:
1479 1479 return []
1480 1480 # the cache returns heads ordered lowest to highest
1481 1481 bheads = list(reversed(branches[branch]))
1482 1482 if start is not None:
1483 1483 # filter out the heads that cannot be reached from startrev
1484 1484 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1485 1485 bheads = [h for h in bheads if h in fbheads]
1486 1486 if not closed:
1487 1487 bheads = [h for h in bheads if
1488 1488 ('close' not in self.changelog.read(h)[5])]
1489 1489 return bheads
1490 1490
1491 1491 def branches(self, nodes):
1492 1492 if not nodes:
1493 1493 nodes = [self.changelog.tip()]
1494 1494 b = []
1495 1495 for n in nodes:
1496 1496 t = n
1497 1497 while True:
1498 1498 p = self.changelog.parents(n)
1499 1499 if p[1] != nullid or p[0] == nullid:
1500 1500 b.append((t, n, p[0], p[1]))
1501 1501 break
1502 1502 n = p[0]
1503 1503 return b
1504 1504
1505 1505 def between(self, pairs):
1506 1506 r = []
1507 1507
1508 1508 for top, bottom in pairs:
1509 1509 n, l, i = top, [], 0
1510 1510 f = 1
1511 1511
1512 1512 while n != bottom and n != nullid:
1513 1513 p = self.changelog.parents(n)[0]
1514 1514 if i == f:
1515 1515 l.append(n)
1516 1516 f = f * 2
1517 1517 n = p
1518 1518 i += 1
1519 1519
1520 1520 r.append(l)
1521 1521
1522 1522 return r
1523 1523
1524 1524 def pull(self, remote, heads=None, force=False):
1525 1525 lock = self.lock()
1526 1526 try:
1527 1527 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1528 1528 force=force)
1529 1529 common, fetch, rheads = tmp
1530 1530 if not fetch:
1531 1531 self.ui.status(_("no changes found\n"))
1532 1532 added = []
1533 1533 result = 0
1534 1534 else:
1535 1535 if heads is None and list(common) == [nullid]:
1536 1536 self.ui.status(_("requesting all changes\n"))
1537 1537 elif heads is None and remote.capable('changegroupsubset'):
1538 1538 # issue1320, avoid a race if remote changed after discovery
1539 1539 heads = rheads
1540 1540
1541 1541 if remote.capable('getbundle'):
1542 1542 cg = remote.getbundle('pull', common=common,
1543 1543 heads=heads or rheads)
1544 1544 elif heads is None:
1545 1545 cg = remote.changegroup(fetch, 'pull')
1546 1546 elif not remote.capable('changegroupsubset'):
1547 1547 raise util.Abort(_("partial pull cannot be done because "
1548 1548 "other repository doesn't support "
1549 1549 "changegroupsubset."))
1550 1550 else:
1551 1551 cg = remote.changegroupsubset(fetch, heads, 'pull')
1552 1552 clstart = len(self.changelog)
1553 1553 result = self.addchangegroup(cg, 'pull', remote.url())
1554 1554 clend = len(self.changelog)
1555 1555 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1556 1556
1557 1557 # compute target subset
1558 1558 if heads is None:
1559 1559 # We pulled every thing possible
1560 1560 # sync on everything common
1561 1561 subset = common + added
1562 1562 else:
1563 1563 # We pulled a specific subset
1564 1564 # sync on this subset
1565 1565 subset = heads
1566 1566
1567 1567 # Get remote phases data from remote
1568 1568 remotephases = remote.listkeys('phases')
1569 1569 publishing = bool(remotephases.get('publishing', False))
1570 1570 if remotephases and not publishing:
1571 1571 # remote is new and unpublishing
1572 1572 pheads, _dr = phases.analyzeremotephases(self, subset,
1573 1573 remotephases)
1574 1574 phases.advanceboundary(self, phases.public, pheads)
1575 1575 phases.advanceboundary(self, phases.draft, subset)
1576 1576 else:
1577 1577 # Remote is old or publishing all common changesets
1578 1578 # should be seen as public
1579 1579 phases.advanceboundary(self, phases.public, subset)
1580 1580 finally:
1581 1581 lock.release()
1582 1582
1583 1583 return result
1584 1584
1585 1585 def checkpush(self, force, revs):
1586 1586 """Extensions can override this function if additional checks have
1587 1587 to be performed before pushing, or call it if they override push
1588 1588 command.
1589 1589 """
1590 1590 pass
1591 1591
1592 1592 def push(self, remote, force=False, revs=None, newbranch=False):
1593 1593 '''Push outgoing changesets (limited by revs) from the current
1594 1594 repository to remote. Return an integer:
1595 1595 - 0 means HTTP error *or* nothing to push
1596 1596 - 1 means we pushed and remote head count is unchanged *or*
1597 1597 we have outgoing changesets but refused to push
1598 1598 - other values as described by addchangegroup()
1599 1599 '''
1600 1600 # there are two ways to push to remote repo:
1601 1601 #
1602 1602 # addchangegroup assumes local user can lock remote
1603 1603 # repo (local filesystem, old ssh servers).
1604 1604 #
1605 1605 # unbundle assumes local user cannot lock remote repo (new ssh
1606 1606 # servers, http servers).
1607 1607
1608 1608 # get local lock as we might write phase data
1609 1609 locallock = self.lock()
1610 1610 try:
1611 1611 self.checkpush(force, revs)
1612 1612 lock = None
1613 1613 unbundle = remote.capable('unbundle')
1614 1614 if not unbundle:
1615 1615 lock = remote.lock()
1616 1616 try:
1617 1617 # discovery
1618 1618 fci = discovery.findcommonincoming
1619 1619 commoninc = fci(self, remote, force=force)
1620 1620 common, inc, remoteheads = commoninc
1621 1621 fco = discovery.findcommonoutgoing
1622 1622 outgoing = fco(self, remote, onlyheads=revs,
1623 1623 commoninc=commoninc, force=force)
1624 1624
1625 1625
1626 1626 if not outgoing.missing:
1627 1627 # nothing to push
1628 1628 if outgoing.excluded:
1629 1629 msg = "no changes to push but %i secret changesets\n"
1630 1630 self.ui.status(_(msg) % len(outgoing.excluded))
1631 1631 else:
1632 1632 self.ui.status(_("no changes found\n"))
1633 1633 ret = 1
1634 1634 else:
1635 1635 # something to push
1636 1636 if not force:
1637 1637 discovery.checkheads(self, remote, outgoing,
1638 remoteheads, newbranch)
1638 remoteheads, newbranch,
1639 bool(inc))
1639 1640
1640 1641 # create a changegroup from local
1641 1642 if revs is None and not outgoing.excluded:
1642 1643 # push everything,
1643 1644 # use the fast path, no race possible on push
1644 1645 cg = self._changegroup(outgoing.missing, 'push')
1645 1646 else:
1646 1647 cg = self.getlocalbundle('push', outgoing)
1647 1648
1648 1649 # apply changegroup to remote
1649 1650 if unbundle:
1650 1651 # local repo finds heads on server, finds out what
1651 1652 # revs it must push. once revs transferred, if server
1652 1653 # finds it has different heads (someone else won
1653 1654 # commit/push race), server aborts.
1654 1655 if force:
1655 1656 remoteheads = ['force']
1656 1657 # ssh: return remote's addchangegroup()
1657 1658 # http: return remote's addchangegroup() or 0 for error
1658 1659 ret = remote.unbundle(cg, remoteheads, 'push')
1659 1660 else:
1660 1661 # we return an integer indicating remote head count change
1661 1662 ret = remote.addchangegroup(cg, 'push', self.url())
1662 1663
1663 1664 if ret:
1664 1665 # push succeed, synchonize target of the push
1665 1666 cheads = outgoing.missingheads
1666 1667 elif revs is None:
1667 1668 # All out push fails. synchronize all common
1668 1669 cheads = outgoing.commonheads
1669 1670 else:
1670 1671 # I want cheads = heads(::missingheads and ::commonheads)
1671 1672 # (missingheads is revs with secret changeset filtered out)
1672 1673 #
1673 1674 # This can be expressed as:
1674 1675 # cheads = ( (missingheads and ::commonheads)
1675 1676 # + (commonheads and ::missingheads))"
1676 1677 # )
1677 1678 #
1678 1679 # while trying to push we already computed the following:
1679 1680 # common = (::commonheads)
1680 1681 # missing = ((commonheads::missingheads) - commonheads)
1681 1682 #
1682 1683 # We can pick:
1683 1684 # * missingheads part of comon (::commonheads)
1684 1685 common = set(outgoing.common)
1685 1686 cheads = [n for node in revs if n in common]
1686 1687 # and
1687 1688 # * commonheads parents on missing
1688 1689 rvset = repo.revset('%ln and parents(roots(%ln))',
1689 1690 outgoing.commonheads,
1690 1691 outgoing.missing)
1691 1692 cheads.extend(c.node() for c in rvset)
1692 1693 # even when we don't push, exchanging phase data is useful
1693 1694 remotephases = remote.listkeys('phases')
1694 1695 if not remotephases: # old server or public only repo
1695 1696 phases.advanceboundary(self, phases.public, cheads)
1696 1697 # don't push any phase data as there is nothing to push
1697 1698 else:
1698 1699 ana = phases.analyzeremotephases(self, cheads, remotephases)
1699 1700 pheads, droots = ana
1700 1701 ### Apply remote phase on local
1701 1702 if remotephases.get('publishing', False):
1702 1703 phases.advanceboundary(self, phases.public, cheads)
1703 1704 else: # publish = False
1704 1705 phases.advanceboundary(self, phases.public, pheads)
1705 1706 phases.advanceboundary(self, phases.draft, cheads)
1706 1707 ### Apply local phase on remote
1707 1708
1708 1709 # Get the list of all revs draft on remote by public here.
1709 1710 # XXX Beware that revset break if droots is not strictly
1710 1711 # XXX root we may want to ensure it is but it is costly
1711 1712 outdated = self.set('heads((%ln::%ln) and public())',
1712 1713 droots, cheads)
1713 1714 for newremotehead in outdated:
1714 1715 r = remote.pushkey('phases',
1715 1716 newremotehead.hex(),
1716 1717 str(phases.draft),
1717 1718 str(phases.public))
1718 1719 if not r:
1719 1720 self.ui.warn(_('updating %s to public failed!\n')
1720 1721 % newremotehead)
1721 1722 finally:
1722 1723 if lock is not None:
1723 1724 lock.release()
1724 1725 finally:
1725 1726 locallock.release()
1726 1727
1727 1728 self.ui.debug("checking for updated bookmarks\n")
1728 1729 rb = remote.listkeys('bookmarks')
1729 1730 for k in rb.keys():
1730 1731 if k in self._bookmarks:
1731 1732 nr, nl = rb[k], hex(self._bookmarks[k])
1732 1733 if nr in self:
1733 1734 cr = self[nr]
1734 1735 cl = self[nl]
1735 1736 if cl in cr.descendants():
1736 1737 r = remote.pushkey('bookmarks', k, nr, nl)
1737 1738 if r:
1738 1739 self.ui.status(_("updating bookmark %s\n") % k)
1739 1740 else:
1740 1741 self.ui.warn(_('updating bookmark %s'
1741 1742 ' failed!\n') % k)
1742 1743
1743 1744 return ret
1744 1745
1745 1746 def changegroupinfo(self, nodes, source):
1746 1747 if self.ui.verbose or source == 'bundle':
1747 1748 self.ui.status(_("%d changesets found\n") % len(nodes))
1748 1749 if self.ui.debugflag:
1749 1750 self.ui.debug("list of changesets:\n")
1750 1751 for node in nodes:
1751 1752 self.ui.debug("%s\n" % hex(node))
1752 1753
1753 1754 def changegroupsubset(self, bases, heads, source):
1754 1755 """Compute a changegroup consisting of all the nodes that are
1755 1756 descendants of any of the bases and ancestors of any of the heads.
1756 1757 Return a chunkbuffer object whose read() method will return
1757 1758 successive changegroup chunks.
1758 1759
1759 1760 It is fairly complex as determining which filenodes and which
1760 1761 manifest nodes need to be included for the changeset to be complete
1761 1762 is non-trivial.
1762 1763
1763 1764 Another wrinkle is doing the reverse, figuring out which changeset in
1764 1765 the changegroup a particular filenode or manifestnode belongs to.
1765 1766 """
1766 1767 cl = self.changelog
1767 1768 if not bases:
1768 1769 bases = [nullid]
1769 1770 csets, bases, heads = cl.nodesbetween(bases, heads)
1770 1771 # We assume that all ancestors of bases are known
1771 1772 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1772 1773 return self._changegroupsubset(common, csets, heads, source)
1773 1774
1774 1775 def getlocalbundle(self, source, outgoing):
1775 1776 """Like getbundle, but taking a discovery.outgoing as an argument.
1776 1777
1777 1778 This is only implemented for local repos and reuses potentially
1778 1779 precomputed sets in outgoing."""
1779 1780 if not outgoing.missing:
1780 1781 return None
1781 1782 return self._changegroupsubset(outgoing.common,
1782 1783 outgoing.missing,
1783 1784 outgoing.missingheads,
1784 1785 source)
1785 1786
1786 1787 def getbundle(self, source, heads=None, common=None):
1787 1788 """Like changegroupsubset, but returns the set difference between the
1788 1789 ancestors of heads and the ancestors common.
1789 1790
1790 1791 If heads is None, use the local heads. If common is None, use [nullid].
1791 1792
1792 1793 The nodes in common might not all be known locally due to the way the
1793 1794 current discovery protocol works.
1794 1795 """
1795 1796 cl = self.changelog
1796 1797 if common:
1797 1798 nm = cl.nodemap
1798 1799 common = [n for n in common if n in nm]
1799 1800 else:
1800 1801 common = [nullid]
1801 1802 if not heads:
1802 1803 heads = cl.heads()
1803 1804 return self.getlocalbundle(source,
1804 1805 discovery.outgoing(cl, common, heads))
1805 1806
1806 1807 def _changegroupsubset(self, commonrevs, csets, heads, source):
1807 1808
1808 1809 cl = self.changelog
1809 1810 mf = self.manifest
1810 1811 mfs = {} # needed manifests
1811 1812 fnodes = {} # needed file nodes
1812 1813 changedfiles = set()
1813 1814 fstate = ['', {}]
1814 1815 count = [0]
1815 1816
1816 1817 # can we go through the fast path ?
1817 1818 heads.sort()
1818 1819 if heads == sorted(self.heads()):
1819 1820 return self._changegroup(csets, source)
1820 1821
1821 1822 # slow path
1822 1823 self.hook('preoutgoing', throw=True, source=source)
1823 1824 self.changegroupinfo(csets, source)
1824 1825
1825 1826 # filter any nodes that claim to be part of the known set
1826 1827 def prune(revlog, missing):
1827 1828 return [n for n in missing
1828 1829 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1829 1830
1830 1831 def lookup(revlog, x):
1831 1832 if revlog == cl:
1832 1833 c = cl.read(x)
1833 1834 changedfiles.update(c[3])
1834 1835 mfs.setdefault(c[0], x)
1835 1836 count[0] += 1
1836 1837 self.ui.progress(_('bundling'), count[0],
1837 1838 unit=_('changesets'), total=len(csets))
1838 1839 return x
1839 1840 elif revlog == mf:
1840 1841 clnode = mfs[x]
1841 1842 mdata = mf.readfast(x)
1842 1843 for f in changedfiles:
1843 1844 if f in mdata:
1844 1845 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1845 1846 count[0] += 1
1846 1847 self.ui.progress(_('bundling'), count[0],
1847 1848 unit=_('manifests'), total=len(mfs))
1848 1849 return mfs[x]
1849 1850 else:
1850 1851 self.ui.progress(
1851 1852 _('bundling'), count[0], item=fstate[0],
1852 1853 unit=_('files'), total=len(changedfiles))
1853 1854 return fstate[1][x]
1854 1855
1855 1856 bundler = changegroup.bundle10(lookup)
1856 1857 reorder = self.ui.config('bundle', 'reorder', 'auto')
1857 1858 if reorder == 'auto':
1858 1859 reorder = None
1859 1860 else:
1860 1861 reorder = util.parsebool(reorder)
1861 1862
1862 1863 def gengroup():
1863 1864 # Create a changenode group generator that will call our functions
1864 1865 # back to lookup the owning changenode and collect information.
1865 1866 for chunk in cl.group(csets, bundler, reorder=reorder):
1866 1867 yield chunk
1867 1868 self.ui.progress(_('bundling'), None)
1868 1869
1869 1870 # Create a generator for the manifestnodes that calls our lookup
1870 1871 # and data collection functions back.
1871 1872 count[0] = 0
1872 1873 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1873 1874 yield chunk
1874 1875 self.ui.progress(_('bundling'), None)
1875 1876
1876 1877 mfs.clear()
1877 1878
1878 1879 # Go through all our files in order sorted by name.
1879 1880 count[0] = 0
1880 1881 for fname in sorted(changedfiles):
1881 1882 filerevlog = self.file(fname)
1882 1883 if not len(filerevlog):
1883 1884 raise util.Abort(_("empty or missing revlog for %s") % fname)
1884 1885 fstate[0] = fname
1885 1886 fstate[1] = fnodes.pop(fname, {})
1886 1887
1887 1888 nodelist = prune(filerevlog, fstate[1])
1888 1889 if nodelist:
1889 1890 count[0] += 1
1890 1891 yield bundler.fileheader(fname)
1891 1892 for chunk in filerevlog.group(nodelist, bundler, reorder):
1892 1893 yield chunk
1893 1894
1894 1895 # Signal that no more groups are left.
1895 1896 yield bundler.close()
1896 1897 self.ui.progress(_('bundling'), None)
1897 1898
1898 1899 if csets:
1899 1900 self.hook('outgoing', node=hex(csets[0]), source=source)
1900 1901
1901 1902 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1902 1903
1903 1904 def changegroup(self, basenodes, source):
1904 1905 # to avoid a race we use changegroupsubset() (issue1320)
1905 1906 return self.changegroupsubset(basenodes, self.heads(), source)
1906 1907
1907 1908 def _changegroup(self, nodes, source):
1908 1909 """Compute the changegroup of all nodes that we have that a recipient
1909 1910 doesn't. Return a chunkbuffer object whose read() method will return
1910 1911 successive changegroup chunks.
1911 1912
1912 1913 This is much easier than the previous function as we can assume that
1913 1914 the recipient has any changenode we aren't sending them.
1914 1915
1915 1916 nodes is the set of nodes to send"""
1916 1917
1917 1918 cl = self.changelog
1918 1919 mf = self.manifest
1919 1920 mfs = {}
1920 1921 changedfiles = set()
1921 1922 fstate = ['']
1922 1923 count = [0]
1923 1924
1924 1925 self.hook('preoutgoing', throw=True, source=source)
1925 1926 self.changegroupinfo(nodes, source)
1926 1927
1927 1928 revset = set([cl.rev(n) for n in nodes])
1928 1929
1929 1930 def gennodelst(log):
1930 1931 return [log.node(r) for r in log if log.linkrev(r) in revset]
1931 1932
1932 1933 def lookup(revlog, x):
1933 1934 if revlog == cl:
1934 1935 c = cl.read(x)
1935 1936 changedfiles.update(c[3])
1936 1937 mfs.setdefault(c[0], x)
1937 1938 count[0] += 1
1938 1939 self.ui.progress(_('bundling'), count[0],
1939 1940 unit=_('changesets'), total=len(nodes))
1940 1941 return x
1941 1942 elif revlog == mf:
1942 1943 count[0] += 1
1943 1944 self.ui.progress(_('bundling'), count[0],
1944 1945 unit=_('manifests'), total=len(mfs))
1945 1946 return cl.node(revlog.linkrev(revlog.rev(x)))
1946 1947 else:
1947 1948 self.ui.progress(
1948 1949 _('bundling'), count[0], item=fstate[0],
1949 1950 total=len(changedfiles), unit=_('files'))
1950 1951 return cl.node(revlog.linkrev(revlog.rev(x)))
1951 1952
1952 1953 bundler = changegroup.bundle10(lookup)
1953 1954 reorder = self.ui.config('bundle', 'reorder', 'auto')
1954 1955 if reorder == 'auto':
1955 1956 reorder = None
1956 1957 else:
1957 1958 reorder = util.parsebool(reorder)
1958 1959
1959 1960 def gengroup():
1960 1961 '''yield a sequence of changegroup chunks (strings)'''
1961 1962 # construct a list of all changed files
1962 1963
1963 1964 for chunk in cl.group(nodes, bundler, reorder=reorder):
1964 1965 yield chunk
1965 1966 self.ui.progress(_('bundling'), None)
1966 1967
1967 1968 count[0] = 0
1968 1969 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1969 1970 yield chunk
1970 1971 self.ui.progress(_('bundling'), None)
1971 1972
1972 1973 count[0] = 0
1973 1974 for fname in sorted(changedfiles):
1974 1975 filerevlog = self.file(fname)
1975 1976 if not len(filerevlog):
1976 1977 raise util.Abort(_("empty or missing revlog for %s") % fname)
1977 1978 fstate[0] = fname
1978 1979 nodelist = gennodelst(filerevlog)
1979 1980 if nodelist:
1980 1981 count[0] += 1
1981 1982 yield bundler.fileheader(fname)
1982 1983 for chunk in filerevlog.group(nodelist, bundler, reorder):
1983 1984 yield chunk
1984 1985 yield bundler.close()
1985 1986 self.ui.progress(_('bundling'), None)
1986 1987
1987 1988 if nodes:
1988 1989 self.hook('outgoing', node=hex(nodes[0]), source=source)
1989 1990
1990 1991 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1991 1992
1992 1993 def addchangegroup(self, source, srctype, url, emptyok=False):
1993 1994 """Add the changegroup returned by source.read() to this repo.
1994 1995 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1995 1996 the URL of the repo where this changegroup is coming from.
1996 1997
1997 1998 Return an integer summarizing the change to this repo:
1998 1999 - nothing changed or no source: 0
1999 2000 - more heads than before: 1+added heads (2..n)
2000 2001 - fewer heads than before: -1-removed heads (-2..-n)
2001 2002 - number of heads stays the same: 1
2002 2003 """
2003 2004 def csmap(x):
2004 2005 self.ui.debug("add changeset %s\n" % short(x))
2005 2006 return len(cl)
2006 2007
2007 2008 def revmap(x):
2008 2009 return cl.rev(x)
2009 2010
2010 2011 if not source:
2011 2012 return 0
2012 2013
2013 2014 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2014 2015
2015 2016 changesets = files = revisions = 0
2016 2017 efiles = set()
2017 2018
2018 2019 # write changelog data to temp files so concurrent readers will not see
2019 2020 # inconsistent view
2020 2021 cl = self.changelog
2021 2022 cl.delayupdate()
2022 2023 oldheads = cl.heads()
2023 2024
2024 2025 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2025 2026 try:
2026 2027 trp = weakref.proxy(tr)
2027 2028 # pull off the changeset group
2028 2029 self.ui.status(_("adding changesets\n"))
2029 2030 clstart = len(cl)
2030 2031 class prog(object):
2031 2032 step = _('changesets')
2032 2033 count = 1
2033 2034 ui = self.ui
2034 2035 total = None
2035 2036 def __call__(self):
2036 2037 self.ui.progress(self.step, self.count, unit=_('chunks'),
2037 2038 total=self.total)
2038 2039 self.count += 1
2039 2040 pr = prog()
2040 2041 source.callback = pr
2041 2042
2042 2043 source.changelogheader()
2043 2044 srccontent = cl.addgroup(source, csmap, trp)
2044 2045 if not (srccontent or emptyok):
2045 2046 raise util.Abort(_("received changelog group is empty"))
2046 2047 clend = len(cl)
2047 2048 changesets = clend - clstart
2048 2049 for c in xrange(clstart, clend):
2049 2050 efiles.update(self[c].files())
2050 2051 efiles = len(efiles)
2051 2052 self.ui.progress(_('changesets'), None)
2052 2053
2053 2054 # pull off the manifest group
2054 2055 self.ui.status(_("adding manifests\n"))
2055 2056 pr.step = _('manifests')
2056 2057 pr.count = 1
2057 2058 pr.total = changesets # manifests <= changesets
2058 2059 # no need to check for empty manifest group here:
2059 2060 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2060 2061 # no new manifest will be created and the manifest group will
2061 2062 # be empty during the pull
2062 2063 source.manifestheader()
2063 2064 self.manifest.addgroup(source, revmap, trp)
2064 2065 self.ui.progress(_('manifests'), None)
2065 2066
2066 2067 needfiles = {}
2067 2068 if self.ui.configbool('server', 'validate', default=False):
2068 2069 # validate incoming csets have their manifests
2069 2070 for cset in xrange(clstart, clend):
2070 2071 mfest = self.changelog.read(self.changelog.node(cset))[0]
2071 2072 mfest = self.manifest.readdelta(mfest)
2072 2073 # store file nodes we must see
2073 2074 for f, n in mfest.iteritems():
2074 2075 needfiles.setdefault(f, set()).add(n)
2075 2076
2076 2077 # process the files
2077 2078 self.ui.status(_("adding file changes\n"))
2078 2079 pr.step = _('files')
2079 2080 pr.count = 1
2080 2081 pr.total = efiles
2081 2082 source.callback = None
2082 2083
2083 2084 while True:
2084 2085 chunkdata = source.filelogheader()
2085 2086 if not chunkdata:
2086 2087 break
2087 2088 f = chunkdata["filename"]
2088 2089 self.ui.debug("adding %s revisions\n" % f)
2089 2090 pr()
2090 2091 fl = self.file(f)
2091 2092 o = len(fl)
2092 2093 if not fl.addgroup(source, revmap, trp):
2093 2094 raise util.Abort(_("received file revlog group is empty"))
2094 2095 revisions += len(fl) - o
2095 2096 files += 1
2096 2097 if f in needfiles:
2097 2098 needs = needfiles[f]
2098 2099 for new in xrange(o, len(fl)):
2099 2100 n = fl.node(new)
2100 2101 if n in needs:
2101 2102 needs.remove(n)
2102 2103 if not needs:
2103 2104 del needfiles[f]
2104 2105 self.ui.progress(_('files'), None)
2105 2106
2106 2107 for f, needs in needfiles.iteritems():
2107 2108 fl = self.file(f)
2108 2109 for n in needs:
2109 2110 try:
2110 2111 fl.rev(n)
2111 2112 except error.LookupError:
2112 2113 raise util.Abort(
2113 2114 _('missing file data for %s:%s - run hg verify') %
2114 2115 (f, hex(n)))
2115 2116
2116 2117 dh = 0
2117 2118 if oldheads:
2118 2119 heads = cl.heads()
2119 2120 dh = len(heads) - len(oldheads)
2120 2121 for h in heads:
2121 2122 if h not in oldheads and 'close' in self[h].extra():
2122 2123 dh -= 1
2123 2124 htext = ""
2124 2125 if dh:
2125 2126 htext = _(" (%+d heads)") % dh
2126 2127
2127 2128 self.ui.status(_("added %d changesets"
2128 2129 " with %d changes to %d files%s\n")
2129 2130 % (changesets, revisions, files, htext))
2130 2131
2131 2132 if changesets > 0:
2132 2133 p = lambda: cl.writepending() and self.root or ""
2133 2134 self.hook('pretxnchangegroup', throw=True,
2134 2135 node=hex(cl.node(clstart)), source=srctype,
2135 2136 url=url, pending=p)
2136 2137
2137 2138 added = [cl.node(r) for r in xrange(clstart, clend)]
2138 2139 publishing = self.ui.configbool('phases', 'publish', True)
2139 2140 if srctype == 'push':
2140 2141 # Old server can not push the boundary themself.
2141 2142 # New server won't push the boundary if changeset already
2142 2143 # existed locally as secrete
2143 2144 #
2144 2145 # We should not use added here but the list of all change in
2145 2146 # the bundle
2146 2147 if publishing:
2147 2148 phases.advanceboundary(self, phases.public, srccontent)
2148 2149 else:
2149 2150 phases.advanceboundary(self, phases.draft, srccontent)
2150 2151 phases.retractboundary(self, phases.draft, added)
2151 2152 elif srctype != 'strip':
2152 2153 # publishing only alter behavior during push
2153 2154 #
2154 2155 # strip should not touch boundary at all
2155 2156 phases.retractboundary(self, phases.draft, added)
2156 2157
2157 2158 # make changelog see real files again
2158 2159 cl.finalize(trp)
2159 2160
2160 2161 tr.close()
2161 2162
2162 2163 if changesets > 0:
2163 2164 def runhooks():
2164 2165 # forcefully update the on-disk branch cache
2165 2166 self.ui.debug("updating the branch cache\n")
2166 2167 self.updatebranchcache()
2167 2168 self.hook("changegroup", node=hex(cl.node(clstart)),
2168 2169 source=srctype, url=url)
2169 2170
2170 2171 for n in added:
2171 2172 self.hook("incoming", node=hex(n), source=srctype,
2172 2173 url=url)
2173 2174 self._afterlock(runhooks)
2174 2175
2175 2176 finally:
2176 2177 tr.release()
2177 2178 # never return 0 here:
2178 2179 if dh < 0:
2179 2180 return dh - 1
2180 2181 else:
2181 2182 return dh + 1
2182 2183
2183 2184 def stream_in(self, remote, requirements):
2184 2185 lock = self.lock()
2185 2186 try:
2186 2187 fp = remote.stream_out()
2187 2188 l = fp.readline()
2188 2189 try:
2189 2190 resp = int(l)
2190 2191 except ValueError:
2191 2192 raise error.ResponseError(
2192 2193 _('Unexpected response from remote server:'), l)
2193 2194 if resp == 1:
2194 2195 raise util.Abort(_('operation forbidden by server'))
2195 2196 elif resp == 2:
2196 2197 raise util.Abort(_('locking the remote repository failed'))
2197 2198 elif resp != 0:
2198 2199 raise util.Abort(_('the server sent an unknown error code'))
2199 2200 self.ui.status(_('streaming all changes\n'))
2200 2201 l = fp.readline()
2201 2202 try:
2202 2203 total_files, total_bytes = map(int, l.split(' ', 1))
2203 2204 except (ValueError, TypeError):
2204 2205 raise error.ResponseError(
2205 2206 _('Unexpected response from remote server:'), l)
2206 2207 self.ui.status(_('%d files to transfer, %s of data\n') %
2207 2208 (total_files, util.bytecount(total_bytes)))
2208 2209 start = time.time()
2209 2210 for i in xrange(total_files):
2210 2211 # XXX doesn't support '\n' or '\r' in filenames
2211 2212 l = fp.readline()
2212 2213 try:
2213 2214 name, size = l.split('\0', 1)
2214 2215 size = int(size)
2215 2216 except (ValueError, TypeError):
2216 2217 raise error.ResponseError(
2217 2218 _('Unexpected response from remote server:'), l)
2218 2219 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2219 2220 # for backwards compat, name was partially encoded
2220 2221 ofp = self.sopener(store.decodedir(name), 'w')
2221 2222 for chunk in util.filechunkiter(fp, limit=size):
2222 2223 ofp.write(chunk)
2223 2224 ofp.close()
2224 2225 elapsed = time.time() - start
2225 2226 if elapsed <= 0:
2226 2227 elapsed = 0.001
2227 2228 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2228 2229 (util.bytecount(total_bytes), elapsed,
2229 2230 util.bytecount(total_bytes / elapsed)))
2230 2231
2231 2232 # new requirements = old non-format requirements + new format-related
2232 2233 # requirements from the streamed-in repository
2233 2234 requirements.update(set(self.requirements) - self.supportedformats)
2234 2235 self._applyrequirements(requirements)
2235 2236 self._writerequirements()
2236 2237
2237 2238 self.invalidate()
2238 2239 return len(self.heads()) + 1
2239 2240 finally:
2240 2241 lock.release()
2241 2242
2242 2243 def clone(self, remote, heads=[], stream=False):
2243 2244 '''clone remote repository.
2244 2245
2245 2246 keyword arguments:
2246 2247 heads: list of revs to clone (forces use of pull)
2247 2248 stream: use streaming clone if possible'''
2248 2249
2249 2250 # now, all clients that can request uncompressed clones can
2250 2251 # read repo formats supported by all servers that can serve
2251 2252 # them.
2252 2253
2253 2254 # if revlog format changes, client will have to check version
2254 2255 # and format flags on "stream" capability, and use
2255 2256 # uncompressed only if compatible.
2256 2257
2257 2258 if stream and not heads:
2258 2259 # 'stream' means remote revlog format is revlogv1 only
2259 2260 if remote.capable('stream'):
2260 2261 return self.stream_in(remote, set(('revlogv1',)))
2261 2262 # otherwise, 'streamreqs' contains the remote revlog format
2262 2263 streamreqs = remote.capable('streamreqs')
2263 2264 if streamreqs:
2264 2265 streamreqs = set(streamreqs.split(','))
2265 2266 # if we support it, stream in and adjust our requirements
2266 2267 if not streamreqs - self.supportedformats:
2267 2268 return self.stream_in(remote, streamreqs)
2268 2269 return self.pull(remote, heads)
2269 2270
2270 2271 def pushkey(self, namespace, key, old, new):
2271 2272 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2272 2273 old=old, new=new)
2273 2274 ret = pushkey.push(self, namespace, key, old, new)
2274 2275 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2275 2276 ret=ret)
2276 2277 return ret
2277 2278
2278 2279 def listkeys(self, namespace):
2279 2280 self.hook('prelistkeys', throw=True, namespace=namespace)
2280 2281 values = pushkey.list(self, namespace)
2281 2282 self.hook('listkeys', namespace=namespace, values=values)
2282 2283 return values
2283 2284
2284 2285 def debugwireargs(self, one, two, three=None, four=None, five=None):
2285 2286 '''used to test argument passing over the wire'''
2286 2287 return "%s %s %s %s %s" % (one, two, three, four, five)
2287 2288
2288 2289 def savecommitmessage(self, text):
2289 2290 fp = self.opener('last-message.txt', 'wb')
2290 2291 try:
2291 2292 fp.write(text)
2292 2293 finally:
2293 2294 fp.close()
2294 2295 return self.pathto(fp.name[len(self.root)+1:])
2295 2296
2296 2297 # used to avoid circular references so destructors work
2297 2298 def aftertrans(files):
2298 2299 renamefiles = [tuple(t) for t in files]
2299 2300 def a():
2300 2301 for src, dest in renamefiles:
2301 2302 util.rename(src, dest)
2302 2303 return a
2303 2304
2304 2305 def undoname(fn):
2305 2306 base, name = os.path.split(fn)
2306 2307 assert name.startswith('journal')
2307 2308 return os.path.join(base, name.replace('journal', 'undo', 1))
2308 2309
2309 2310 def instance(ui, path, create):
2310 2311 return localrepository(ui, util.urllocalpath(path), create)
2311 2312
2312 2313 def islocal(path):
2313 2314 return True
General Comments 0
You need to be logged in to leave comments. Login now