##// END OF EJS Templates
move discovery methods from localrepo into new discovery module
Dirkjan Ochtman -
r11301:3d0591a6 default
parent child Browse files
Show More
@@ -18,7 +18,7 b' from mercurial.commands import templateo'
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19 from mercurial.node import nullrev
19 from mercurial.node import nullrev
20 from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions
20 from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions
21 from mercurial import hg, url, util, graphmod
21 from mercurial import hg, url, util, graphmod, discovery
22
22
23 ASCIIDATA = 'ASC'
23 ASCIIDATA = 'ASC'
24
24
@@ -283,7 +283,7 b' def goutgoing(ui, repo, dest=None, **opt'
283 if revs:
283 if revs:
284 revs = [repo.lookup(rev) for rev in revs]
284 revs = [repo.lookup(rev) for rev in revs]
285 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
285 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
286 o = repo.findoutgoing(other, force=opts.get('force'))
286 o = discovery.findoutgoing(repo, other, force=opts.get('force'))
287 if not o:
287 if not o:
288 ui.status(_("no changes found\n"))
288 ui.status(_("no changes found\n"))
289 return
289 return
@@ -311,7 +311,8 b' def gincoming(ui, repo, source="default"'
311 ui.status(_('comparing with %s\n') % url.hidepassword(source))
311 ui.status(_('comparing with %s\n') % url.hidepassword(source))
312 if revs:
312 if revs:
313 revs = [other.lookup(rev) for rev in revs]
313 revs = [other.lookup(rev) for rev in revs]
314 incoming = repo.findincoming(other, heads=revs, force=opts["force"])
314 incoming = discovery.findincoming(repo, other, heads=revs,
315 force=opts["force"])
315 if not incoming:
316 if not incoming:
316 try:
317 try:
317 os.unlink(opts["bundle"])
318 os.unlink(opts["bundle"])
@@ -76,7 +76,7 b' hgrc(5) for details.'
76 import os, errno, socket, tempfile, cStringIO, time
76 import os, errno, socket, tempfile, cStringIO, time
77 import email.MIMEMultipart, email.MIMEBase
77 import email.MIMEMultipart, email.MIMEBase
78 import email.Utils, email.Encoders, email.Generator
78 import email.Utils, email.Encoders, email.Generator
79 from mercurial import cmdutil, commands, hg, mail, patch, util
79 from mercurial import cmdutil, commands, hg, mail, patch, util, discovery
80 from mercurial.i18n import _
80 from mercurial.i18n import _
81 from mercurial.node import bin
81 from mercurial.node import bin
82
82
@@ -244,7 +244,7 b' def patchbomb(ui, repo, *revs, **opts):'
244 revs = [repo.lookup(rev) for rev in revs]
244 revs = [repo.lookup(rev) for rev in revs]
245 other = hg.repository(hg.remoteui(repo, opts), dest)
245 other = hg.repository(hg.remoteui(repo, opts), dest)
246 ui.status(_('comparing with %s\n') % dest)
246 ui.status(_('comparing with %s\n') % dest)
247 o = repo.findoutgoing(other)
247 o = discovery.findoutgoing(repo, other)
248 if not o:
248 if not o:
249 ui.status(_("no changes found\n"))
249 ui.status(_("no changes found\n"))
250 return []
250 return []
@@ -16,7 +16,7 b' map from a changeset hash to its hash in'
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 import os, tempfile
17 import os, tempfile
18 from mercurial import bundlerepo, changegroup, cmdutil, hg, merge, match
18 from mercurial import bundlerepo, changegroup, cmdutil, hg, merge, match
19 from mercurial import patch, revlog, util, error
19 from mercurial import patch, revlog, util, error, discovery
20
20
21 class transplantentry(object):
21 class transplantentry(object):
22 def __init__(self, lnode, rnode):
22 def __init__(self, lnode, rnode):
@@ -472,7 +472,8 b' def transplant(ui, repo, *revs, **opts):'
472 def getremotechanges(repo, url):
472 def getremotechanges(repo, url):
473 sourcerepo = ui.expandpath(url)
473 sourcerepo = ui.expandpath(url)
474 source = hg.repository(ui, sourcerepo)
474 source = hg.repository(ui, sourcerepo)
475 common, incoming, rheads = repo.findcommonincoming(source, force=True)
475 tmp = discovery.findcommonincoming(repo, source, force=True)
476 common, incoming, rheads = tmp
476 if not incoming:
477 if not incoming:
477 return (source, None, None)
478 return (source, None, None)
478
479
@@ -10,7 +10,7 b' from lock import release'
10 from i18n import _, gettext
10 from i18n import _, gettext
11 import os, re, sys, difflib, time, tempfile
11 import os, re, sys, difflib, time, tempfile
12 import hg, util, revlog, bundlerepo, extensions, copies, error
12 import hg, util, revlog, bundlerepo, extensions, copies, error
13 import patch, help, mdiff, url, encoding, templatekw
13 import patch, help, mdiff, url, encoding, templatekw, discovery
14 import archival, changegroup, cmdutil, sshserver, hbisect, hgweb, hgweb.server
14 import archival, changegroup, cmdutil, sshserver, hbisect, hgweb, hgweb.server
15 import merge as mergemod
15 import merge as mergemod
16 import minirst, revset
16 import minirst, revset
@@ -596,7 +596,7 b' def bundle(ui, repo, fname, dest=None, *'
596 dest, branches = hg.parseurl(dest, opts.get('branch'))
596 dest, branches = hg.parseurl(dest, opts.get('branch'))
597 other = hg.repository(hg.remoteui(repo, opts), dest)
597 other = hg.repository(hg.remoteui(repo, opts), dest)
598 revs, checkout = hg.addbranchrevs(repo, other, branches, revs)
598 revs, checkout = hg.addbranchrevs(repo, other, branches, revs)
599 o = repo.findoutgoing(other, force=opts.get('force'))
599 o = discovery.findoutgoing(repo, other, force=opts.get('force'))
600
600
601 if not o:
601 if not o:
602 ui.status(_("no changes found\n"))
602 ui.status(_("no changes found\n"))
@@ -2090,8 +2090,10 b' def incoming(ui, repo, source="default",'
2090 revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
2090 revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
2091 if revs:
2091 if revs:
2092 revs = [other.lookup(rev) for rev in revs]
2092 revs = [other.lookup(rev) for rev in revs]
2093 common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
2093
2094 force=opts["force"])
2094 tmp = discovery.findcommonincoming(repo, other, heads=revs,
2095 force=opts.get('force'))
2096 common, incoming, rheads = tmp
2095 if not incoming:
2097 if not incoming:
2096 try:
2098 try:
2097 os.unlink(opts["bundle"])
2099 os.unlink(opts["bundle"])
@@ -2395,7 +2397,7 b' def outgoing(ui, repo, dest=None, **opts'
2395
2397
2396 other = hg.repository(hg.remoteui(repo, opts), dest)
2398 other = hg.repository(hg.remoteui(repo, opts), dest)
2397 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
2399 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
2398 o = repo.findoutgoing(other, force=opts.get('force'))
2400 o = discovery.findoutgoing(repo, other, force=opts.get('force'))
2399 if not o:
2401 if not o:
2400 ui.status(_("no changes found\n"))
2402 ui.status(_("no changes found\n"))
2401 return 1
2403 return 1
@@ -3324,7 +3326,7 b' def summary(ui, repo, **opts):'
3324 revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
3326 revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
3325 ui.debug('comparing with %s\n' % url.hidepassword(source))
3327 ui.debug('comparing with %s\n' % url.hidepassword(source))
3326 repo.ui.pushbuffer()
3328 repo.ui.pushbuffer()
3327 common, incoming, rheads = repo.findcommonincoming(other)
3329 common, incoming, rheads = discovery.findcommonincoming(repo, other)
3328 repo.ui.popbuffer()
3330 repo.ui.popbuffer()
3329 if incoming:
3331 if incoming:
3330 t.append(_('1 or more incoming'))
3332 t.append(_('1 or more incoming'))
@@ -3334,7 +3336,7 b' def summary(ui, repo, **opts):'
3334 other = hg.repository(hg.remoteui(repo, {}), dest)
3336 other = hg.repository(hg.remoteui(repo, {}), dest)
3335 ui.debug('comparing with %s\n' % url.hidepassword(dest))
3337 ui.debug('comparing with %s\n' % url.hidepassword(dest))
3336 repo.ui.pushbuffer()
3338 repo.ui.pushbuffer()
3337 o = repo.findoutgoing(other)
3339 o = discovery.findoutgoing(repo, other)
3338 repo.ui.popbuffer()
3340 repo.ui.popbuffer()
3339 o = repo.changelog.nodesbetween(o, None)[0]
3341 o = repo.changelog.nodesbetween(o, None)[0]
3340 if o:
3342 if o:
This diff has been collapsed as it changes many lines, (2537 lines changed) Show them Hide them
@@ -5,2293 +5,346 b''
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import nullid, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import util, error
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
14 import match as matchmod
15 import merge as mergemod
16 import tags as tagsmod
17 import url as urlmod
18 from lock import release
19 import weakref, stat, errno, os, time, inspect
20 propertycache = util.propertycache
21
11
22 class localrepository(repo.repository):
12 def findincoming(repo, remote, base=None, heads=None, force=False):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
13 """Return list of roots of the subsets of missing nodes from remote
24 supported = set('revlogv1 store fncache shared'.split())
25
26 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
28 self.root = os.path.realpath(util.expandpath(path))
29 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
31 self.opener = util.opener(self.path)
32 self.wopener = util.opener(self.root)
33 self.baseui = baseui
34 self.ui = baseui.copy()
35
36 try:
37 self.ui.readconfig(self.join("hgrc"), self.root)
38 extensions.loadall(self.ui)
39 except IOError:
40 pass
41
14
42 if not os.path.isdir(self.path):
15 If base dict is specified, assume that these nodes and their parents
43 if create:
16 exist on the remote side and that no child of a node of base exists
44 if not os.path.exists(path):
17 in both remote and repo.
45 os.mkdir(path)
18 Furthermore base will be updated to include the nodes that exists
46 os.mkdir(self.path)
19 in repo and remote but no children exists in repo and remote.
47 requirements = ["revlogv1"]
20 If a list of heads is specified, return only nodes which are heads
48 if self.ui.configbool('format', 'usestore', True):
21 or ancestors of these heads.
49 os.mkdir(os.path.join(self.path, "store"))
50 requirements.append("store")
51 if self.ui.configbool('format', 'usefncache', True):
52 requirements.append("fncache")
53 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
57 )
58 reqfile = self.opener("requires", "w")
59 for r in requirements:
60 reqfile.write("%s\n" % r)
61 reqfile.close()
62 else:
63 raise error.RepoError(_("repository %s not found") % path)
64 elif create:
65 raise error.RepoError(_("repository %s already exists") % path)
66 else:
67 # find requirements
68 requirements = set()
69 try:
70 requirements = set(self.opener("requires").read().splitlines())
71 except IOError, inst:
72 if inst.errno != errno.ENOENT:
73 raise
74 for r in requirements - self.supported:
75 raise error.RepoError(_("requirement '%s' not supported") % r)
76
77 self.sharedpath = self.path
78 try:
79 s = os.path.realpath(self.opener("sharedpath").read())
80 if not os.path.exists(s):
81 raise error.RepoError(
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
83 self.sharedpath = s
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
86 raise
87
88 self.store = store.store(requirements, self.sharedpath, util.opener)
89 self.spath = self.store.path
90 self.sopener = self.store.opener
91 self.sjoin = self.store.join
92 self.opener.createmode = self.store.createmode
93 self.sopener.options = {}
94
95 # These two define the set of tags for this repository. _tags
96 # maps tag name to node; _tagtypes maps tag name to 'global' or
97 # 'local'. (Global tags are defined by .hgtags across all
98 # heads, and local tags are defined in .hg/localtags.) They
99 # constitute the in-memory cache of tags.
100 self._tags = None
101 self._tagtypes = None
102
103 self._branchcache = None # in UTF-8
104 self._branchcachetip = None
105 self.nodetagscache = None
106 self.filterpats = {}
107 self._datafilters = {}
108 self._transref = self._lockref = self._wlockref = None
109
110 @propertycache
111 def changelog(self):
112 c = changelog.changelog(self.sopener)
113 if 'HG_PENDING' in os.environ:
114 p = os.environ['HG_PENDING']
115 if p.startswith(self.root):
116 c.readpending('00changelog.i.a')
117 self.sopener.options['defversion'] = c.version
118 return c
119
120 @propertycache
121 def manifest(self):
122 return manifest.manifest(self.sopener)
123
124 @propertycache
125 def dirstate(self):
126 return dirstate.dirstate(self.opener, self.ui, self.root)
127
128 def __getitem__(self, changeid):
129 if changeid is None:
130 return context.workingctx(self)
131 return context.changectx(self, changeid)
132
133 def __contains__(self, changeid):
134 try:
135 return bool(self.lookup(changeid))
136 except error.RepoLookupError:
137 return False
138
139 def __nonzero__(self):
140 return True
141
22
142 def __len__(self):
23 All the ancestors of base are in repo and in remote.
143 return len(self.changelog)
24 All the descendants of the list returned are missing in repo.
144
25 (and so we know that the rest of the nodes are missing in remote, see
145 def __iter__(self):
26 outgoing)
146 for i in xrange(len(self)):
27 """
147 yield i
28 return findcommonincoming(repo, remote, base, heads, force)[1]
148
149 def url(self):
150 return 'file:' + self.root
151
152 def hook(self, name, throw=False, **args):
153 return hook.hook(self.ui, self, name, throw, **args)
154
155 tag_disallowed = ':\r\n'
156
157 def _tag(self, names, node, message, local, user, date, extra={}):
158 if isinstance(names, str):
159 allchars = names
160 names = (names,)
161 else:
162 allchars = ''.join(names)
163 for c in self.tag_disallowed:
164 if c in allchars:
165 raise util.Abort(_('%r cannot be used in a tag name') % c)
166
167 branches = self.branchmap()
168 for name in names:
169 self.hook('pretag', throw=True, node=hex(node), tag=name,
170 local=local)
171 if name in branches:
172 self.ui.warn(_("warning: tag %s conflicts with existing"
173 " branch name\n") % name)
174
175 def writetags(fp, names, munge, prevtags):
176 fp.seek(0, 2)
177 if prevtags and prevtags[-1] != '\n':
178 fp.write('\n')
179 for name in names:
180 m = munge and munge(name) or name
181 if self._tagtypes and name in self._tagtypes:
182 old = self._tags.get(name, nullid)
183 fp.write('%s %s\n' % (hex(old), m))
184 fp.write('%s %s\n' % (hex(node), m))
185 fp.close()
186
187 prevtags = ''
188 if local:
189 try:
190 fp = self.opener('localtags', 'r+')
191 except IOError:
192 fp = self.opener('localtags', 'a')
193 else:
194 prevtags = fp.read()
195
196 # local tags are stored in the current charset
197 writetags(fp, names, None, prevtags)
198 for name in names:
199 self.hook('tag', node=hex(node), tag=name, local=local)
200 return
201
202 try:
203 fp = self.wfile('.hgtags', 'rb+')
204 except IOError:
205 fp = self.wfile('.hgtags', 'ab')
206 else:
207 prevtags = fp.read()
208
29
209 # committed tags are stored in UTF-8
30 def findcommonincoming(repo, remote, base=None, heads=None, force=False):
210 writetags(fp, names, encoding.fromlocal, prevtags)
31 """Return a tuple (common, missing roots, heads) used to identify
211
32 missing nodes from remote.
212 if '.hgtags' not in self.dirstate:
213 self.add(['.hgtags'])
214
215 m = matchmod.exact(self.root, '', ['.hgtags'])
216 tagnode = self.commit(message, user, date, extra=extra, match=m)
217
218 for name in names:
219 self.hook('tag', node=hex(node), tag=name, local=local)
220
221 return tagnode
222
223 def tag(self, names, node, message, local, user, date):
224 '''tag a revision with one or more symbolic names.
225
226 names is a list of strings or, when adding a single tag, names may be a
227 string.
228
229 if local is True, the tags are stored in a per-repository file.
230 otherwise, they are stored in the .hgtags file, and a new
231 changeset is committed with the change.
232
233 keyword arguments:
234
235 local: whether to store tags in non-version-controlled file
236 (default False)
237
238 message: commit message to use if committing
239
240 user: name of user to use if committing
241
242 date: date tuple to use if committing'''
243
33
244 for x in self.status()[:5]:
34 If base dict is specified, assume that these nodes and their parents
245 if '.hgtags' in x:
35 exist on the remote side and that no child of a node of base exists
246 raise util.Abort(_('working copy of .hgtags is changed '
36 in both remote and repo.
247 '(please commit .hgtags manually)'))
37 Furthermore base will be updated to include the nodes that exists
248
38 in repo and remote but no children exists in repo and remote.
249 self.tags() # instantiate the cache
39 If a list of heads is specified, return only nodes which are heads
250 self._tag(names, node, message, local, user, date)
40 or ancestors of these heads.
251
252 def tags(self):
253 '''return a mapping of tag to node'''
254 if self._tags is None:
255 (self._tags, self._tagtypes) = self._findtags()
256
257 return self._tags
258
259 def _findtags(self):
260 '''Do the hard work of finding tags. Return a pair of dicts
261 (tags, tagtypes) where tags maps tag name to node, and tagtypes
262 maps tag name to a string like \'global\' or \'local\'.
263 Subclasses or extensions are free to add their own tags, but
264 should be aware that the returned dicts will be retained for the
265 duration of the localrepo object.'''
266
267 # XXX what tagtype should subclasses/extensions use? Currently
268 # mq and bookmarks add tags, but do not set the tagtype at all.
269 # Should each extension invent its own tag type? Should there
270 # be one tagtype for all such "virtual" tags? Or is the status
271 # quo fine?
272
273 alltags = {} # map tag name to (node, hist)
274 tagtypes = {}
275
276 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
277 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
278
41
279 # Build the return dicts. Have to re-encode tag names because
42 All the ancestors of base are in repo and in remote.
280 # the tags module always uses UTF-8 (in order not to lose info
43 """
281 # writing to the cache), but the rest of Mercurial wants them in
44 m = repo.changelog.nodemap
282 # local encoding.
45 search = []
283 tags = {}
46 fetch = set()
284 for (name, (node, hist)) in alltags.iteritems():
47 seen = set()
285 if node != nullid:
48 seenbranch = set()
286 tags[encoding.tolocal(name)] = node
49 if base is None:
287 tags['tip'] = self.changelog.tip()
50 base = {}
288 tagtypes = dict([(encoding.tolocal(name), value)
289 for (name, value) in tagtypes.iteritems()])
290 return (tags, tagtypes)
291
292 def tagtype(self, tagname):
293 '''
294 return the type of the given tag. result can be:
295
296 'local' : a local tag
297 'global' : a global tag
298 None : tag does not exist
299 '''
300
301 self.tags()
302
303 return self._tagtypes.get(tagname)
304
51
305 def tagslist(self):
52 if not heads:
306 '''return a list of tags ordered by revision'''
53 heads = remote.heads()
307 l = []
308 for t, n in self.tags().iteritems():
309 try:
310 r = self.changelog.rev(n)
311 except:
312 r = -2 # sort to the beginning of the list if unknown
313 l.append((r, t, n))
314 return [(t, n) for r, t, n in sorted(l)]
315
316 def nodetags(self, node):
317 '''return the tags associated with a node'''
318 if not self.nodetagscache:
319 self.nodetagscache = {}
320 for t, n in self.tags().iteritems():
321 self.nodetagscache.setdefault(n, []).append(t)
322 for tags in self.nodetagscache.itervalues():
323 tags.sort()
324 return self.nodetagscache.get(node, [])
325
54
326 def _branchtags(self, partial, lrev):
55 if repo.changelog.tip() == nullid:
327 # TODO: rename this function?
56 base[nullid] = 1
328 tiprev = len(self) - 1
57 if heads != [nullid]:
329 if lrev != tiprev:
58 return [nullid], [nullid], list(heads)
330 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
59 return [nullid], [], []
331 self._updatebranchcache(partial, ctxgen)
332 self._writebranchcache(partial, self.changelog.tip(), tiprev)
333
334 return partial
335
60
336 def branchmap(self):
61 # assume we're closer to the tip than the root
337 '''returns a dictionary {branch: [branchheads]}'''
62 # and start by examining the heads
338 tip = self.changelog.tip()
63 repo.ui.status(_("searching for changes\n"))
339 if self._branchcache is not None and self._branchcachetip == tip:
340 return self._branchcache
341
342 oldtip = self._branchcachetip
343 self._branchcachetip = tip
344 if oldtip is None or oldtip not in self.changelog.nodemap:
345 partial, last, lrev = self._readbranchcache()
346 else:
347 lrev = self.changelog.rev(oldtip)
348 partial = self._branchcache
349
64
350 self._branchtags(partial, lrev)
65 unknown = []
351 # this private cache holds all heads (not just tips)
66 for h in heads:
352 self._branchcache = partial
67 if h not in m:
353
68 unknown.append(h)
354 return self._branchcache
69 else:
70 base[h] = 1
355
71
356 def branchtags(self):
72 heads = unknown
357 '''return a dict where branch names map to the tipmost head of
73 if not unknown:
358 the branch, open heads come before closed'''
74 return base.keys(), [], []
359 bt = {}
360 for bn, heads in self.branchmap().iteritems():
361 tip = heads[-1]
362 for h in reversed(heads):
363 if 'close' not in self.changelog.read(h)[5]:
364 tip = h
365 break
366 bt[bn] = tip
367 return bt
368
369
370 def _readbranchcache(self):
371 partial = {}
372 try:
373 f = self.opener("branchheads.cache")
374 lines = f.read().split('\n')
375 f.close()
376 except (IOError, OSError):
377 return {}, nullid, nullrev
378
75
379 try:
76 req = set(unknown)
380 last, lrev = lines.pop(0).split(" ", 1)
77 reqcnt = 0
381 last, lrev = bin(last), int(lrev)
382 if lrev >= len(self) or self[lrev].node() != last:
383 # invalidate the cache
384 raise ValueError('invalidating branch cache (tip differs)')
385 for l in lines:
386 if not l:
387 continue
388 node, label = l.split(" ", 1)
389 partial.setdefault(label.strip(), []).append(bin(node))
390 except KeyboardInterrupt:
391 raise
392 except Exception, inst:
393 if self.ui.debugflag:
394 self.ui.warn(str(inst), '\n')
395 partial, last, lrev = {}, nullid, nullrev
396 return partial, last, lrev
397
78
398 def _writebranchcache(self, branches, tip, tiprev):
79 # search through remote branches
399 try:
80 # a 'branch' here is a linear segment of history, with four parts:
400 f = self.opener("branchheads.cache", "w", atomictemp=True)
81 # head, root, first parent, second parent
401 f.write("%s %s\n" % (hex(tip), tiprev))
82 # (a branch always has two parents (or none) by definition)
402 for label, nodes in branches.iteritems():
83 unknown = remote.branches(unknown)
403 for node in nodes:
84 while unknown:
404 f.write("%s %s\n" % (hex(node), label))
85 r = []
405 f.rename()
86 while unknown:
406 except (IOError, OSError):
87 n = unknown.pop(0)
407 pass
88 if n[0] in seen:
408
409 def _updatebranchcache(self, partial, ctxgen):
410 # collect new branch entries
411 newbranches = {}
412 for c in ctxgen:
413 newbranches.setdefault(c.branch(), []).append(c.node())
414 # if older branchheads are reachable from new ones, they aren't
415 # really branchheads. Note checking parents is insufficient:
416 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
417 for branch, newnodes in newbranches.iteritems():
418 bheads = partial.setdefault(branch, [])
419 bheads.extend(newnodes)
420 if len(bheads) <= 1:
421 continue
89 continue
422 # starting from tip means fewer passes over reachable
423 while newnodes:
424 latest = newnodes.pop()
425 if latest not in bheads:
426 continue
427 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
428 reachable = self.changelog.reachable(latest, minbhrev)
429 reachable.remove(latest)
430 bheads = [b for b in bheads if b not in reachable]
431 partial[branch] = bheads
432
433 def lookup(self, key):
434 if isinstance(key, int):
435 return self.changelog.node(key)
436 elif key == '.':
437 return self.dirstate.parents()[0]
438 elif key == 'null':
439 return nullid
440 elif key == 'tip':
441 return self.changelog.tip()
442 n = self.changelog._match(key)
443 if n:
444 return n
445 if key in self.tags():
446 return self.tags()[key]
447 if key in self.branchtags():
448 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
450 if n:
451 return n
452
453 # can't find key, check if it might have come from damaged dirstate
454 if key in self.dirstate.parents():
455 raise error.Abort(_("working directory has unknown parent '%s'!")
456 % short(key))
457 try:
458 if len(key) == 20:
459 key = hex(key)
460 except:
461 pass
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
463
464 def lookupbranch(self, key, remote=None):
465 repo = remote or self
466 if key in repo.branchmap():
467 return key
468
469 repo = (remote and remote.local()) and remote or self
470 return repo[key].branch()
471
472 def local(self):
473 return True
474
475 def join(self, f):
476 return os.path.join(self.path, f)
477
478 def wjoin(self, f):
479 return os.path.join(self.root, f)
480
481 def rjoin(self, f):
482 return os.path.join(self.root, util.pconvert(f))
483
484 def file(self, f):
485 if f[0] == '/':
486 f = f[1:]
487 return filelog.filelog(self.sopener, f)
488
489 def changectx(self, changeid):
490 return self[changeid]
491
492 def parents(self, changeid=None):
493 '''get list of changectxs for parents of changeid'''
494 return self[changeid].parents()
495
496 def filectx(self, path, changeid=None, fileid=None):
497 """changeid can be a changeset revision, node, or tag.
498 fileid can be a file revision or node."""
499 return context.filectx(self, path, changeid, fileid)
500
501 def getcwd(self):
502 return self.dirstate.getcwd()
503
504 def pathto(self, f, cwd=None):
505 return self.dirstate.pathto(f, cwd)
506
507 def wfile(self, f, mode='r'):
508 return self.wopener(f, mode)
509
510 def _link(self, f):
511 return os.path.islink(self.wjoin(f))
512
513 def _filter(self, filter, filename, data):
514 if filter not in self.filterpats:
515 l = []
516 for pat, cmd in self.ui.configitems(filter):
517 if cmd == '!':
518 continue
519 mf = matchmod.match(self.root, '', [pat])
520 fn = None
521 params = cmd
522 for name, filterfn in self._datafilters.iteritems():
523 if cmd.startswith(name):
524 fn = filterfn
525 params = cmd[len(name):].lstrip()
526 break
527 if not fn:
528 fn = lambda s, c, **kwargs: util.filter(s, c)
529 # Wrap old filters not supporting keyword arguments
530 if not inspect.getargspec(fn)[2]:
531 oldfn = fn
532 fn = lambda s, c, **kwargs: oldfn(s, c)
533 l.append((mf, fn, params))
534 self.filterpats[filter] = l
535
536 for mf, fn, cmd in self.filterpats[filter]:
537 if mf(filename):
538 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
539 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
540 break
541
542 return data
543
544 def adddatafilter(self, name, filter):
545 self._datafilters[name] = filter
546
547 def wread(self, filename):
548 if self._link(filename):
549 data = os.readlink(self.wjoin(filename))
550 else:
551 data = self.wopener(filename, 'r').read()
552 return self._filter("encode", filename, data)
553
554 def wwrite(self, filename, data, flags):
555 data = self._filter("decode", filename, data)
556 try:
557 os.unlink(self.wjoin(filename))
558 except OSError:
559 pass
560 if 'l' in flags:
561 self.wopener.symlink(data, filename)
562 else:
563 self.wopener(filename, 'w').write(data)
564 if 'x' in flags:
565 util.set_flags(self.wjoin(filename), False, True)
566
567 def wwritedata(self, filename, data):
568 return self._filter("decode", filename, data)
569
570 def transaction(self, desc):
571 tr = self._transref and self._transref() or None
572 if tr and tr.running():
573 return tr.nest()
574
90
575 # abort here if the journal already exists
91 repo.ui.debug("examining %s:%s\n"
576 if os.path.exists(self.sjoin("journal")):
92 % (short(n[0]), short(n[1])))
577 raise error.RepoError(
93 if n[0] == nullid: # found the end of the branch
578 _("abandoned transaction found - run hg recover"))
94 pass
579
95 elif n in seenbranch:
580 # save dirstate for rollback
96 repo.ui.debug("branch already found\n")
581 try:
97 continue
582 ds = self.opener("dirstate").read()
98 elif n[1] and n[1] in m: # do we know the base?
583 except IOError:
99 repo.ui.debug("found incomplete branch %s:%s\n"
584 ds = ""
100 % (short(n[0]), short(n[1])))
585 self.opener("journal.dirstate", "w").write(ds)
101 search.append(n[0:2]) # schedule branch range for scanning
586 self.opener("journal.branch", "w").write(self.dirstate.branch())
102 seenbranch.add(n)
587 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
588
589 renames = [(self.sjoin("journal"), self.sjoin("undo")),
590 (self.join("journal.dirstate"), self.join("undo.dirstate")),
591 (self.join("journal.branch"), self.join("undo.branch")),
592 (self.join("journal.desc"), self.join("undo.desc"))]
593 tr = transaction.transaction(self.ui.warn, self.sopener,
594 self.sjoin("journal"),
595 aftertrans(renames),
596 self.store.createmode)
597 self._transref = weakref.ref(tr)
598 return tr
599
600 def recover(self):
601 lock = self.lock()
602 try:
603 if os.path.exists(self.sjoin("journal")):
604 self.ui.status(_("rolling back interrupted transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("journal"),
606 self.ui.warn)
607 self.invalidate()
608 return True
609 else:
103 else:
610 self.ui.warn(_("no interrupted transaction available\n"))
104 if n[1] not in seen and n[1] not in fetch:
611 return False
105 if n[2] in m and n[3] in m:
612 finally:
106 repo.ui.debug("found new changeset %s\n" %
613 lock.release()
107 short(n[1]))
614
108 fetch.add(n[1]) # earliest unknown
615 def rollback(self, dryrun=False):
109 for p in n[2:4]:
616 wlock = lock = None
110 if p in m:
617 try:
111 base[p] = 1 # latest known
618 wlock = self.wlock()
619 lock = self.lock()
620 if os.path.exists(self.sjoin("undo")):
621 try:
622 args = self.opener("undo.desc", "r").read().splitlines()
623 if len(args) >= 3 and self.ui.verbose:
624 desc = _("rolling back to revision %s"
625 " (undo %s: %s)\n") % (
626 int(args[0]) - 1, args[1], args[2])
627 elif len(args) >= 2:
628 desc = _("rolling back to revision %s (undo %s)\n") % (
629 int(args[0]) - 1, args[1])
630 except IOError:
631 desc = _("rolling back unknown transaction\n")
632 self.ui.status(desc)
633 if dryrun:
634 return
635 transaction.rollback(self.sopener, self.sjoin("undo"),
636 self.ui.warn)
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
638 try:
639 branch = self.opener("undo.branch").read()
640 self.dirstate.setbranch(branch)
641 except IOError:
642 self.ui.warn(_("Named branch could not be reset, "
643 "current branch still is: %s\n")
644 % encoding.tolocal(self.dirstate.branch()))
645 self.invalidate()
646 self.dirstate.invalidate()
647 self.destroyed()
648 else:
649 self.ui.warn(_("no rollback information available\n"))
650 return 1
651 finally:
652 release(lock, wlock)
653
654 def invalidatecaches(self):
655 self._tags = None
656 self._tagtypes = None
657 self.nodetagscache = None
658 self._branchcache = None # in UTF-8
659 self._branchcachetip = None
660
661 def invalidate(self):
662 for a in "changelog manifest".split():
663 if a in self.__dict__:
664 delattr(self, a)
665 self.invalidatecaches()
666
667 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
668 try:
669 l = lock.lock(lockname, 0, releasefn, desc=desc)
670 except error.LockHeld, inst:
671 if not wait:
672 raise
673 self.ui.warn(_("waiting for lock on %s held by %r\n") %
674 (desc, inst.locker))
675 # default to 600 seconds timeout
676 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
677 releasefn, desc=desc)
678 if acquirefn:
679 acquirefn()
680 return l
681
682 def lock(self, wait=True):
683 '''Lock the repository store (.hg/store) and return a weak reference
684 to the lock. Use this before modifying the store (e.g. committing or
685 stripping). If you are opening a transaction, get a lock as well.)'''
686 l = self._lockref and self._lockref()
687 if l is not None and l.held:
688 l.lock()
689 return l
690
691 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
692 _('repository %s') % self.origroot)
693 self._lockref = weakref.ref(l)
694 return l
695
696 def wlock(self, wait=True):
697 '''Lock the non-store parts of the repository (everything under
698 .hg except .hg/store) and return a weak reference to the lock.
699 Use this before modifying files in .hg.'''
700 l = self._wlockref and self._wlockref()
701 if l is not None and l.held:
702 l.lock()
703 return l
704
705 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
706 self.dirstate.invalidate, _('working directory of %s') %
707 self.origroot)
708 self._wlockref = weakref.ref(l)
709 return l
710
112
711 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
113 for p in n[2:4]:
712 """
114 if p not in req and p not in m:
713 commit an individual file as part of a larger transaction
115 r.append(p)
714 """
116 req.add(p)
715
117 seen.add(n[0])
716 fname = fctx.path()
717 text = fctx.data()
718 flog = self.file(fname)
719 fparent1 = manifest1.get(fname, nullid)
720 fparent2 = fparent2o = manifest2.get(fname, nullid)
721
722 meta = {}
723 copy = fctx.renamed()
724 if copy and copy[0] != fname:
725 # Mark the new revision of this file as a copy of another
726 # file. This copy data will effectively act as a parent
727 # of this new revision. If this is a merge, the first
728 # parent will be the nullid (meaning "look up the copy data")
729 # and the second one will be the other parent. For example:
730 #
731 # 0 --- 1 --- 3 rev1 changes file foo
732 # \ / rev2 renames foo to bar and changes it
733 # \- 2 -/ rev3 should have bar with all changes and
734 # should record that bar descends from
735 # bar in rev2 and foo in rev1
736 #
737 # this allows this merge to succeed:
738 #
739 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
740 # \ / merging rev3 and rev4 should use bar@rev2
741 # \- 2 --- 4 as the merge base
742 #
743
744 cfname = copy[0]
745 crev = manifest1.get(cfname)
746 newfparent = fparent2
747
748 if manifest2: # branch merge
749 if fparent2 == nullid or crev is None: # copied on remote side
750 if cfname in manifest2:
751 crev = manifest2[cfname]
752 newfparent = fparent1
753
754 # find source in nearest ancestor if we've lost track
755 if not crev:
756 self.ui.debug(" %s: searching for copy revision for %s\n" %
757 (fname, cfname))
758 for ancestor in self['.'].ancestors():
759 if cfname in ancestor:
760 crev = ancestor[cfname].filenode()
761 break
762
763 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
764 meta["copy"] = cfname
765 meta["copyrev"] = hex(crev)
766 fparent1, fparent2 = nullid, newfparent
767 elif fparent2 != nullid:
768 # is one parent an ancestor of the other?
769 fparentancestor = flog.ancestor(fparent1, fparent2)
770 if fparentancestor == fparent1:
771 fparent1, fparent2 = fparent2, nullid
772 elif fparentancestor == fparent2:
773 fparent2 = nullid
774
775 # is the file changed?
776 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
777 changelist.append(fname)
778 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
779
118
780 # are just the flags changed during merge?
119 if r:
781 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
120 reqcnt += 1
782 changelist.append(fname)
121 repo.ui.progress(_('searching'), reqcnt, unit=_('queries'))
783
122 repo.ui.debug("request %d: %s\n" %
784 return fparent1
123 (reqcnt, " ".join(map(short, r))))
785
124 for p in xrange(0, len(r), 10):
786 def commit(self, text="", user=None, date=None, match=None, force=False,
125 for b in remote.branches(r[p:p + 10]):
787 editor=False, extra={}):
126 repo.ui.debug("received %s:%s\n" %
788 """Add a new revision to current repository.
127 (short(b[0]), short(b[1])))
789
128 unknown.append(b)
790 Revision information is gathered from the working directory,
791 match can be used to filter the committed files. If editor is
792 supplied, it is called to get a commit message.
793 """
794
795 def fail(f, msg):
796 raise util.Abort('%s: %s' % (f, msg))
797
798 if not match:
799 match = matchmod.always(self.root, '')
800
801 if not force:
802 vdirs = []
803 match.dir = vdirs.append
804 match.bad = fail
805
806 wlock = self.wlock()
807 try:
808 wctx = self[None]
809 merge = len(wctx.parents()) > 1
810
811 if (not force and merge and match and
812 (match.files() or match.anypats())):
813 raise util.Abort(_('cannot partially commit a merge '
814 '(do not specify files or patterns)'))
815
816 changes = self.status(match=match, clean=force)
817 if force:
818 changes[0].extend(changes[6]) # mq may commit unchanged files
819
820 # check subrepos
821 subs = []
822 removedsubs = set()
823 for p in wctx.parents():
824 removedsubs.update(s for s in p.substate if match(s))
825 for s in wctx.substate:
826 removedsubs.discard(s)
827 if match(s) and wctx.sub(s).dirty():
828 subs.append(s)
829 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
830 changes[0].insert(0, '.hgsubstate')
831
832 # make sure all explicit patterns are matched
833 if not force and match.files():
834 matched = set(changes[0] + changes[1] + changes[2])
835
836 for f in match.files():
837 if f == '.' or f in matched or f in wctx.substate:
838 continue
839 if f in changes[3]: # missing
840 fail(f, _('file not found!'))
841 if f in vdirs: # visited directory
842 d = f + '/'
843 for mf in matched:
844 if mf.startswith(d):
845 break
846 else:
847 fail(f, _("no match under directory!"))
848 elif f not in self.dirstate:
849 fail(f, _("file not tracked!"))
850
851 if (not force and not extra.get("close") and not merge
852 and not (changes[0] or changes[1] or changes[2])
853 and wctx.branch() == wctx.p1().branch()):
854 return None
855
129
856 ms = mergemod.mergestate(self)
130 # do binary search on the branches we found
857 for f in changes[0]:
131 while search:
858 if f in ms and ms[f] == 'u':
132 newsearch = []
859 raise util.Abort(_("unresolved merge conflicts "
133 reqcnt += 1
860 "(see hg resolve)"))
134 repo.ui.progress(_('searching'), reqcnt, unit=_('queries'))
861
135 for n, l in zip(search, remote.between(search)):
862 cctx = context.workingctx(self, text, user, date, extra, changes)
136 l.append(n[1])
863 if editor:
137 p = n[0]
864 cctx._text = editor(self, cctx, subs)
138 f = 1
865 edited = (text != cctx._text)
139 for i in l:
866
140 repo.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
867 # commit subs
141 if i in m:
868 if subs or removedsubs:
142 if f <= 2:
869 state = wctx.substate.copy()
143 repo.ui.debug("found new branch changeset %s\n" %
870 for s in subs:
144 short(p))
871 sub = wctx.sub(s)
145 fetch.add(p)
872 self.ui.status(_('committing subrepository %s\n') %
146 base[i] = 1
873 subrepo.relpath(sub))
874 sr = sub.commit(cctx._text, user, date)
875 state[s] = (state[s][0], sr)
876 subrepo.writestate(self, state)
877
878 # Save commit message in case this transaction gets rolled back
879 # (e.g. by a pretxncommit hook). Leave the content alone on
880 # the assumption that the user will use the same editor again.
881 msgfile = self.opener('last-message.txt', 'wb')
882 msgfile.write(cctx._text)
883 msgfile.close()
884
885 p1, p2 = self.dirstate.parents()
886 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
887 try:
888 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
889 ret = self.commitctx(cctx, True)
890 except:
891 if edited:
892 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
893 self.ui.write(
894 _('note: commit message saved in %s\n') % msgfn)
895 raise
896
897 # update dirstate and mergestate
898 for f in changes[0] + changes[1]:
899 self.dirstate.normal(f)
900 for f in changes[2]:
901 self.dirstate.forget(f)
902 self.dirstate.setparents(ret)
903 ms.reset()
904 finally:
905 wlock.release()
906
907 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
908 return ret
909
910 def commitctx(self, ctx, error=False):
911 """Add a new revision to current repository.
912 Revision information is passed via the context argument.
913 """
914
915 tr = lock = None
916 removed = ctx.removed()
917 p1, p2 = ctx.p1(), ctx.p2()
918 m1 = p1.manifest().copy()
919 m2 = p2.manifest()
920 user = ctx.user()
921
922 lock = self.lock()
923 try:
924 tr = self.transaction("commit")
925 trp = weakref.proxy(tr)
926
927 # check in files
928 new = {}
929 changed = []
930 linkrev = len(self)
931 for f in sorted(ctx.modified() + ctx.added()):
932 self.ui.note(f + "\n")
933 try:
934 fctx = ctx[f]
935 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
936 changed)
937 m1.set(f, fctx.flags())
938 except OSError, inst:
939 self.ui.warn(_("trouble committing %s!\n") % f)
940 raise
941 except IOError, inst:
942 errcode = getattr(inst, 'errno', errno.ENOENT)
943 if error or errcode and errcode != errno.ENOENT:
944 self.ui.warn(_("trouble committing %s!\n") % f)
945 raise
946 else:
147 else:
947 removed.append(f)
148 repo.ui.debug("narrowed branch search to %s:%s\n"
948
149 % (short(p), short(i)))
949 # update manifest
150 newsearch.append((p, i))
950 m1.update(new)
151 break
951 removed = [f for f in sorted(removed) if f in m1 or f in m2]
152 p, f = i, f * 2
952 drop = [f for f in removed if f in m1]
153 search = newsearch
953 for f in drop:
954 del m1[f]
955 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
956 p2.manifestnode(), (new, drop))
957
958 # update changelog
959 self.changelog.delayupdate()
960 n = self.changelog.add(mn, changed + removed, ctx.description(),
961 trp, p1.node(), p2.node(),
962 user, ctx.date(), ctx.extra().copy())
963 p = lambda: self.changelog.writepending() and self.root or ""
964 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
965 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
966 parent2=xp2, pending=p)
967 self.changelog.finalize(trp)
968 tr.close()
969
970 if self._branchcache:
971 self.branchtags()
972 return n
973 finally:
974 if tr:
975 tr.release()
976 lock.release()
977
978 def destroyed(self):
979 '''Inform the repository that nodes have been destroyed.
980 Intended for use by strip and rollback, so there's a common
981 place for anything that has to be done after destroying history.'''
982 # XXX it might be nice if we could take the list of destroyed
983 # nodes, but I don't see an easy way for rollback() to do that
984
985 # Ensure the persistent tag cache is updated. Doing it now
986 # means that the tag cache only has to worry about destroyed
987 # heads immediately after a strip/rollback. That in turn
988 # guarantees that "cachetip == currenttip" (comparing both rev
989 # and node) always means no nodes have been added or destroyed.
990
991 # XXX this is suboptimal when qrefresh'ing: we strip the current
992 # head, refresh the tag cache, then immediately add a new head.
993 # But I think doing it this way is necessary for the "instant
994 # tag cache retrieval" case to work.
995 self.invalidatecaches()
996
154
997 def walk(self, match, node=None):
155 # sanity check our fetch list
998 '''
156 for f in fetch:
999 walk recursively through the directory tree or a given
157 if f in m:
1000 changeset, finding all files matched by the match
158 raise error.RepoError(_("already have changeset ")
1001 function
159 + short(f[:4]))
1002 '''
1003 return self[node].walk(match)
1004
1005 def status(self, node1='.', node2=None, match=None,
1006 ignored=False, clean=False, unknown=False):
1007 """return status of files between two nodes or node and working directory
1008
1009 If node1 is None, use the first dirstate parent instead.
1010 If node2 is None, compare node1 with working directory.
1011 """
1012
1013 def mfmatches(ctx):
1014 mf = ctx.manifest().copy()
1015 for fn in mf.keys():
1016 if not match(fn):
1017 del mf[fn]
1018 return mf
1019
1020 if isinstance(node1, context.changectx):
1021 ctx1 = node1
1022 else:
1023 ctx1 = self[node1]
1024 if isinstance(node2, context.changectx):
1025 ctx2 = node2
1026 else:
1027 ctx2 = self[node2]
1028
1029 working = ctx2.rev() is None
1030 parentworking = working and ctx1 == self['.']
1031 match = match or matchmod.always(self.root, self.getcwd())
1032 listignored, listclean, listunknown = ignored, clean, unknown
1033
1034 # load earliest manifest first for caching reasons
1035 if not working and ctx2.rev() < ctx1.rev():
1036 ctx2.manifest()
1037
1038 if not parentworking:
1039 def bad(f, msg):
1040 if f not in ctx1:
1041 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1042 match.bad = bad
1043
1044 if working: # we need to scan the working dir
1045 subrepos = []
1046 if '.hgsub' in self.dirstate:
1047 subrepos = ctx1.substate.keys()
1048 s = self.dirstate.status(match, subrepos, listignored,
1049 listclean, listunknown)
1050 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1051
1052 # check for any possibly clean files
1053 if parentworking and cmp:
1054 fixup = []
1055 # do a full compare of any files that might have changed
1056 for f in sorted(cmp):
1057 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1058 or ctx1[f].cmp(ctx2[f].data())):
1059 modified.append(f)
1060 else:
1061 fixup.append(f)
1062
1063 if listclean:
1064 clean += fixup
1065
160
1066 # update dirstate for files that are actually clean
161 if base.keys() == [nullid]:
1067 if fixup:
162 if force:
1068 try:
163 repo.ui.warn(_("warning: repository is unrelated\n"))
1069 # updating the dirstate is optional
164 else:
1070 # so we don't wait on the lock
165 raise util.Abort(_("repository is unrelated"))
1071 wlock = self.wlock(False)
1072 try:
1073 for f in fixup:
1074 self.dirstate.normal(f)
1075 finally:
1076 wlock.release()
1077 except error.LockError:
1078 pass
1079
1080 if not parentworking:
1081 mf1 = mfmatches(ctx1)
1082 if working:
1083 # we are comparing working dir against non-parent
1084 # generate a pseudo-manifest for the working dir
1085 mf2 = mfmatches(self['.'])
1086 for f in cmp + modified + added:
1087 mf2[f] = None
1088 mf2.set(f, ctx2.flags(f))
1089 for f in removed:
1090 if f in mf2:
1091 del mf2[f]
1092 else:
1093 # we are comparing two revisions
1094 deleted, unknown, ignored = [], [], []
1095 mf2 = mfmatches(ctx2)
1096
166
1097 modified, added, clean = [], [], []
167 repo.ui.debug("found new changesets starting at " +
1098 for fn in mf2:
168 " ".join([short(f) for f in fetch]) + "\n")
1099 if fn in mf1:
1100 if (mf1.flags(fn) != mf2.flags(fn) or
1101 (mf1[fn] != mf2[fn] and
1102 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1103 modified.append(fn)
1104 elif listclean:
1105 clean.append(fn)
1106 del mf1[fn]
1107 else:
1108 added.append(fn)
1109 removed = mf1.keys()
1110
1111 r = modified, added, removed, deleted, unknown, ignored, clean
1112 [l.sort() for l in r]
1113 return r
1114
169
1115 def add(self, list):
170 repo.ui.progress(_('searching'), None)
1116 wlock = self.wlock()
171 repo.ui.debug("%d total queries\n" % reqcnt)
1117 try:
172
1118 rejected = []
173 return base.keys(), list(fetch), heads
1119 for f in list:
174
1120 p = self.wjoin(f)
175 def findoutgoing(repo, remote, base=None, heads=None, force=False):
1121 try:
176 """Return list of nodes that are roots of subsets not in remote
1122 st = os.lstat(p)
1123 except:
1124 self.ui.warn(_("%s does not exist!\n") % f)
1125 rejected.append(f)
1126 continue
1127 if st.st_size > 10000000:
1128 self.ui.warn(_("%s: up to %d MB of RAM may be required "
1129 "to manage this file\n"
1130 "(use 'hg revert %s' to cancel the "
1131 "pending addition)\n")
1132 % (f, 3 * st.st_size // 1000000, f))
1133 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1134 self.ui.warn(_("%s not added: only files and symlinks "
1135 "supported currently\n") % f)
1136 rejected.append(p)
1137 elif self.dirstate[f] in 'amn':
1138 self.ui.warn(_("%s already tracked!\n") % f)
1139 elif self.dirstate[f] == 'r':
1140 self.dirstate.normallookup(f)
1141 else:
1142 self.dirstate.add(f)
1143 return rejected
1144 finally:
1145 wlock.release()
1146
177
1147 def forget(self, list):
178 If base dict is specified, assume that these nodes and their parents
1148 wlock = self.wlock()
179 exist on the remote side.
1149 try:
180 If a list of heads is specified, return only nodes which are heads
1150 for f in list:
181 or ancestors of these heads, and return a second element which
1151 if self.dirstate[f] != 'a':
182 contains all remote heads which get new children.
1152 self.ui.warn(_("%s not added!\n") % f)
183 """
1153 else:
184 if base is None:
1154 self.dirstate.forget(f)
185 base = {}
1155 finally:
186 findincoming(repo, remote, base, heads, force=force)
1156 wlock.release()
1157
1158 def remove(self, list, unlink=False):
1159 if unlink:
1160 for f in list:
1161 try:
1162 util.unlink(self.wjoin(f))
1163 except OSError, inst:
1164 if inst.errno != errno.ENOENT:
1165 raise
1166 wlock = self.wlock()
1167 try:
1168 for f in list:
1169 if unlink and os.path.exists(self.wjoin(f)):
1170 self.ui.warn(_("%s still exists!\n") % f)
1171 elif self.dirstate[f] == 'a':
1172 self.dirstate.forget(f)
1173 elif f not in self.dirstate:
1174 self.ui.warn(_("%s not tracked!\n") % f)
1175 else:
1176 self.dirstate.remove(f)
1177 finally:
1178 wlock.release()
1179
1180 def undelete(self, list):
1181 manifests = [self.manifest.read(self.changelog.read(p)[0])
1182 for p in self.dirstate.parents() if p != nullid]
1183 wlock = self.wlock()
1184 try:
1185 for f in list:
1186 if self.dirstate[f] != 'r':
1187 self.ui.warn(_("%s not removed!\n") % f)
1188 else:
1189 m = f in manifests[0] and manifests[0] or manifests[1]
1190 t = self.file(f).read(m[f])
1191 self.wwrite(f, t, m.flags(f))
1192 self.dirstate.normal(f)
1193 finally:
1194 wlock.release()
1195
1196 def copy(self, source, dest):
1197 p = self.wjoin(dest)
1198 if not (os.path.exists(p) or os.path.islink(p)):
1199 self.ui.warn(_("%s does not exist!\n") % dest)
1200 elif not (os.path.isfile(p) or os.path.islink(p)):
1201 self.ui.warn(_("copy failed: %s is not a file or a "
1202 "symbolic link\n") % dest)
1203 else:
1204 wlock = self.wlock()
1205 try:
1206 if self.dirstate[dest] in '?r':
1207 self.dirstate.add(dest)
1208 self.dirstate.copy(source, dest)
1209 finally:
1210 wlock.release()
1211
1212 def heads(self, start=None):
1213 heads = self.changelog.heads(start)
1214 # sort the output in rev descending order
1215 heads = [(-self.changelog.rev(h), h) for h in heads]
1216 return [n for (r, n) in sorted(heads)]
1217
1218 def branchheads(self, branch=None, start=None, closed=False):
1219 '''return a (possibly filtered) list of heads for the given branch
1220
1221 Heads are returned in topological order, from newest to oldest.
1222 If branch is None, use the dirstate branch.
1223 If start is not None, return only heads reachable from start.
1224 If closed is True, return heads that are marked as closed as well.
1225 '''
1226 if branch is None:
1227 branch = self[None].branch()
1228 branches = self.branchmap()
1229 if branch not in branches:
1230 return []
1231 # the cache returns heads ordered lowest to highest
1232 bheads = list(reversed(branches[branch]))
1233 if start is not None:
1234 # filter out the heads that cannot be reached from startrev
1235 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1236 bheads = [h for h in bheads if h in fbheads]
1237 if not closed:
1238 bheads = [h for h in bheads if
1239 ('close' not in self.changelog.read(h)[5])]
1240 return bheads
1241
1242 def branches(self, nodes):
1243 if not nodes:
1244 nodes = [self.changelog.tip()]
1245 b = []
1246 for n in nodes:
1247 t = n
1248 while 1:
1249 p = self.changelog.parents(n)
1250 if p[1] != nullid or p[0] == nullid:
1251 b.append((t, n, p[0], p[1]))
1252 break
1253 n = p[0]
1254 return b
1255
1256 def between(self, pairs):
1257 r = []
1258
1259 for top, bottom in pairs:
1260 n, l, i = top, [], 0
1261 f = 1
1262
1263 while n != bottom and n != nullid:
1264 p = self.changelog.parents(n)[0]
1265 if i == f:
1266 l.append(n)
1267 f = f * 2
1268 n = p
1269 i += 1
1270
1271 r.append(l)
1272
1273 return r
1274
1275 def findincoming(self, remote, base=None, heads=None, force=False):
1276 """Return list of roots of the subsets of missing nodes from remote
1277
1278 If base dict is specified, assume that these nodes and their parents
1279 exist on the remote side and that no child of a node of base exists
1280 in both remote and self.
1281 Furthermore base will be updated to include the nodes that exists
1282 in self and remote but no children exists in self and remote.
1283 If a list of heads is specified, return only nodes which are heads
1284 or ancestors of these heads.
1285
187
1286 All the ancestors of base are in self and in remote.
188 repo.ui.debug("common changesets up to "
1287 All the descendants of the list returned are missing in self.
189 + " ".join(map(short, base.keys())) + "\n")
1288 (and so we know that the rest of the nodes are missing in remote, see
1289 outgoing)
1290 """
1291 return self.findcommonincoming(remote, base, heads, force)[1]
1292
1293 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1294 """Return a tuple (common, missing roots, heads) used to identify
1295 missing nodes from remote.
1296
1297 If base dict is specified, assume that these nodes and their parents
1298 exist on the remote side and that no child of a node of base exists
1299 in both remote and self.
1300 Furthermore base will be updated to include the nodes that exists
1301 in self and remote but no children exists in self and remote.
1302 If a list of heads is specified, return only nodes which are heads
1303 or ancestors of these heads.
1304
1305 All the ancestors of base are in self and in remote.
1306 """
1307 m = self.changelog.nodemap
1308 search = []
1309 fetch = set()
1310 seen = set()
1311 seenbranch = set()
1312 if base is None:
1313 base = {}
1314
1315 if not heads:
1316 heads = remote.heads()
1317
190
1318 if self.changelog.tip() == nullid:
191 remain = set(repo.changelog.nodemap)
1319 base[nullid] = 1
1320 if heads != [nullid]:
1321 return [nullid], [nullid], list(heads)
1322 return [nullid], [], []
1323
1324 # assume we're closer to the tip than the root
1325 # and start by examining the heads
1326 self.ui.status(_("searching for changes\n"))
1327
1328 unknown = []
1329 for h in heads:
1330 if h not in m:
1331 unknown.append(h)
1332 else:
1333 base[h] = 1
1334
1335 heads = unknown
1336 if not unknown:
1337 return base.keys(), [], []
1338
1339 req = set(unknown)
1340 reqcnt = 0
1341
1342 # search through remote branches
1343 # a 'branch' here is a linear segment of history, with four parts:
1344 # head, root, first parent, second parent
1345 # (a branch always has two parents (or none) by definition)
1346 unknown = remote.branches(unknown)
1347 while unknown:
1348 r = []
1349 while unknown:
1350 n = unknown.pop(0)
1351 if n[0] in seen:
1352 continue
1353
192
1354 self.ui.debug("examining %s:%s\n"
193 # prune everything remote has from the tree
1355 % (short(n[0]), short(n[1])))
194 remain.remove(nullid)
1356 if n[0] == nullid: # found the end of the branch
195 remove = base.keys()
1357 pass
196 while remove:
1358 elif n in seenbranch:
197 n = remove.pop(0)
1359 self.ui.debug("branch already found\n")
198 if n in remain:
1360 continue
199 remain.remove(n)
1361 elif n[1] and n[1] in m: # do we know the base?
200 for p in repo.changelog.parents(n):
1362 self.ui.debug("found incomplete branch %s:%s\n"
201 remove.append(p)
1363 % (short(n[0]), short(n[1])))
1364 search.append(n[0:2]) # schedule branch range for scanning
1365 seenbranch.add(n)
1366 else:
1367 if n[1] not in seen and n[1] not in fetch:
1368 if n[2] in m and n[3] in m:
1369 self.ui.debug("found new changeset %s\n" %
1370 short(n[1]))
1371 fetch.add(n[1]) # earliest unknown
1372 for p in n[2:4]:
1373 if p in m:
1374 base[p] = 1 # latest known
1375
1376 for p in n[2:4]:
1377 if p not in req and p not in m:
1378 r.append(p)
1379 req.add(p)
1380 seen.add(n[0])
1381
1382 if r:
1383 reqcnt += 1
1384 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1385 self.ui.debug("request %d: %s\n" %
1386 (reqcnt, " ".join(map(short, r))))
1387 for p in xrange(0, len(r), 10):
1388 for b in remote.branches(r[p:p + 10]):
1389 self.ui.debug("received %s:%s\n" %
1390 (short(b[0]), short(b[1])))
1391 unknown.append(b)
1392
1393 # do binary search on the branches we found
1394 while search:
1395 newsearch = []
1396 reqcnt += 1
1397 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1398 for n, l in zip(search, remote.between(search)):
1399 l.append(n[1])
1400 p = n[0]
1401 f = 1
1402 for i in l:
1403 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1404 if i in m:
1405 if f <= 2:
1406 self.ui.debug("found new branch changeset %s\n" %
1407 short(p))
1408 fetch.add(p)
1409 base[i] = 1
1410 else:
1411 self.ui.debug("narrowed branch search to %s:%s\n"
1412 % (short(p), short(i)))
1413 newsearch.append((p, i))
1414 break
1415 p, f = i, f * 2
1416 search = newsearch
1417
1418 # sanity check our fetch list
1419 for f in fetch:
1420 if f in m:
1421 raise error.RepoError(_("already have changeset ")
1422 + short(f[:4]))
1423
1424 if base.keys() == [nullid]:
1425 if force:
1426 self.ui.warn(_("warning: repository is unrelated\n"))
1427 else:
1428 raise util.Abort(_("repository is unrelated"))
1429
1430 self.ui.debug("found new changesets starting at " +
1431 " ".join([short(f) for f in fetch]) + "\n")
1432
202
1433 self.ui.progress(_('searching'), None)
203 # find every node whose parents have been pruned
1434 self.ui.debug("%d total queries\n" % reqcnt)
204 subset = []
1435
205 # find every remote head that will get new children
1436 return base.keys(), list(fetch), heads
206 updated_heads = set()
1437
207 for n in remain:
1438 def findoutgoing(self, remote, base=None, heads=None, force=False):
208 p1, p2 = repo.changelog.parents(n)
1439 """Return list of nodes that are roots of subsets not in remote
209 if p1 not in remain and p2 not in remain:
1440
210 subset.append(n)
1441 If base dict is specified, assume that these nodes and their parents
1442 exist on the remote side.
1443 If a list of heads is specified, return only nodes which are heads
1444 or ancestors of these heads, and return a second element which
1445 contains all remote heads which get new children.
1446 """
1447 if base is None:
1448 base = {}
1449 self.findincoming(remote, base, heads, force=force)
1450
1451 self.ui.debug("common changesets up to "
1452 + " ".join(map(short, base.keys())) + "\n")
1453
1454 remain = set(self.changelog.nodemap)
1455
1456 # prune everything remote has from the tree
1457 remain.remove(nullid)
1458 remove = base.keys()
1459 while remove:
1460 n = remove.pop(0)
1461 if n in remain:
1462 remain.remove(n)
1463 for p in self.changelog.parents(n):
1464 remove.append(p)
1465
1466 # find every node whose parents have been pruned
1467 subset = []
1468 # find every remote head that will get new children
1469 updated_heads = set()
1470 for n in remain:
1471 p1, p2 = self.changelog.parents(n)
1472 if p1 not in remain and p2 not in remain:
1473 subset.append(n)
1474 if heads:
1475 if p1 in heads:
1476 updated_heads.add(p1)
1477 if p2 in heads:
1478 updated_heads.add(p2)
1479
1480 # this is the set of all roots we have to push
1481 if heads:
211 if heads:
1482 return subset, list(updated_heads)
212 if p1 in heads:
1483 else:
213 updated_heads.add(p1)
1484 return subset
214 if p2 in heads:
1485
215 updated_heads.add(p2)
1486 def pull(self, remote, heads=None, force=False):
1487 lock = self.lock()
1488 try:
1489 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1490 force=force)
1491 if not fetch:
1492 self.ui.status(_("no changes found\n"))
1493 return 0
1494
1495 if fetch == [nullid]:
1496 self.ui.status(_("requesting all changes\n"))
1497 elif heads is None and remote.capable('changegroupsubset'):
1498 # issue1320, avoid a race if remote changed after discovery
1499 heads = rheads
1500
216
1501 if heads is None:
217 # this is the set of all roots we have to push
1502 cg = remote.changegroup(fetch, 'pull')
218 if heads:
1503 else:
219 return subset, list(updated_heads)
1504 if not remote.capable('changegroupsubset'):
220 else:
1505 raise util.Abort(_("Partial pull cannot be done because "
221 return subset
1506 "other repository doesn't support "
1507 "changegroupsubset."))
1508 cg = remote.changegroupsubset(fetch, heads, 'pull')
1509 return self.addchangegroup(cg, 'pull', remote.url())
1510 finally:
1511 lock.release()
1512
1513 def push(self, remote, force=False, revs=None, newbranch=False):
1514 '''Push outgoing changesets (limited by revs) from the current
1515 repository to remote. Return an integer:
1516 - 0 means HTTP error *or* nothing to push
1517 - 1 means we pushed and remote head count is unchanged *or*
1518 we have outgoing changesets but refused to push
1519 - other values as described by addchangegroup()
1520 '''
1521 # there are two ways to push to remote repo:
1522 #
1523 # addchangegroup assumes local user can lock remote
1524 # repo (local filesystem, old ssh servers).
1525 #
1526 # unbundle assumes local user cannot lock remote repo (new ssh
1527 # servers, http servers).
1528
1529 if remote.capable('unbundle'):
1530 return self.push_unbundle(remote, force, revs, newbranch)
1531 return self.push_addchangegroup(remote, force, revs, newbranch)
1532
1533 def prepush(self, remote, force, revs, newbranch):
1534 '''Analyze the local and remote repositories and determine which
1535 changesets need to be pushed to the remote. Return value depends
1536 on circumstances:
1537
1538 If we are not going to push anything, return a tuple (None,
1539 outgoing) where outgoing is 0 if there are no outgoing
1540 changesets and 1 if there are, but we refuse to push them
1541 (e.g. would create new remote heads).
1542
1543 Otherwise, return a tuple (changegroup, remoteheads), where
1544 changegroup is a readable file-like object whose read() returns
1545 successive changegroup chunks ready to be sent over the wire and
1546 remoteheads is the list of remote heads.'''
1547 common = {}
1548 remote_heads = remote.heads()
1549 inc = self.findincoming(remote, common, remote_heads, force=force)
1550
1551 cl = self.changelog
1552 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1553 outg, bases, heads = cl.nodesbetween(update, revs)
1554
1555 if not bases:
1556 self.ui.status(_("no changes found\n"))
1557 return None, 1
1558
1559 if not force and remote_heads != [nullid]:
1560
1561 def fail_multiple_heads(unsynced, branch=None):
1562 if branch:
1563 msg = _("abort: push creates new remote heads"
1564 " on branch '%s'!\n") % branch
1565 else:
1566 msg = _("abort: push creates new remote heads!\n")
1567 self.ui.warn(msg)
1568 if unsynced:
1569 self.ui.status(_("(you should pull and merge or"
1570 " use push -f to force)\n"))
1571 else:
1572 self.ui.status(_("(did you forget to merge?"
1573 " use push -f to force)\n"))
1574 return None, 0
1575
222
1576 if remote.capable('branchmap'):
223 def prepush(repo, remote, force, revs, newbranch):
1577 # Check for each named branch if we're creating new remote heads.
224 '''Analyze the local and remote repositories and determine which
1578 # To be a remote head after push, node must be either:
225 changesets need to be pushed to the remote. Return value depends
1579 # - unknown locally
226 on circumstances:
1580 # - a local outgoing head descended from update
1581 # - a remote head that's known locally and not
1582 # ancestral to an outgoing head
1583 #
1584 # New named branches cannot be created without --force.
1585
1586 # 1. Create set of branches involved in the push.
1587 branches = set(self[n].branch() for n in outg)
1588
1589 # 2. Check for new branches on the remote.
1590 remotemap = remote.branchmap()
1591 newbranches = branches - set(remotemap)
1592 if newbranches and not newbranch: # new branch requires --new-branch
1593 branchnames = ', '.join("%s" % b for b in newbranches)
1594 self.ui.warn(_("abort: push creates "
1595 "new remote branches: %s!\n")
1596 % branchnames)
1597 self.ui.status(_("(use 'hg push --new-branch' to create new "
1598 "remote branches)\n"))
1599 return None, 0
1600 branches.difference_update(newbranches)
1601
227
1602 # 3. Construct the initial oldmap and newmap dicts.
228 If we are not going to push anything, return a tuple (None,
1603 # They contain information about the remote heads before and
229 outgoing) where outgoing is 0 if there are no outgoing
1604 # after the push, respectively.
230 changesets and 1 if there are, but we refuse to push them
1605 # Heads not found locally are not included in either dict,
231 (e.g. would create new remote heads).
1606 # since they won't be affected by the push.
1607 # unsynced contains all branches with incoming changesets.
1608 oldmap = {}
1609 newmap = {}
1610 unsynced = set()
1611 for branch in branches:
1612 remoteheads = remotemap[branch]
1613 prunedheads = [h for h in remoteheads if h in cl.nodemap]
1614 oldmap[branch] = prunedheads
1615 newmap[branch] = list(prunedheads)
1616 if len(remoteheads) > len(prunedheads):
1617 unsynced.add(branch)
1618
1619 # 4. Update newmap with outgoing changes.
1620 # This will possibly add new heads and remove existing ones.
1621 ctxgen = (self[n] for n in outg)
1622 self._updatebranchcache(newmap, ctxgen)
1623
1624 # 5. Check for new heads.
1625 # If there are more heads after the push than before, a suitable
1626 # warning, depending on unsynced status, is displayed.
1627 for branch in branches:
1628 if len(newmap[branch]) > len(oldmap[branch]):
1629 return fail_multiple_heads(branch in unsynced, branch)
1630
1631 # 6. Check for unsynced changes on involved branches.
1632 if unsynced:
1633 self.ui.warn(_("note: unsynced remote changes!\n"))
1634
232
1635 else:
233 Otherwise, return a tuple (changegroup, remoteheads), where
1636 # Old servers: Check for new topological heads.
234 changegroup is a readable file-like object whose read() returns
1637 # Code based on _updatebranchcache.
235 successive changegroup chunks ready to be sent over the wire and
1638 newheads = set(h for h in remote_heads if h in cl.nodemap)
236 remoteheads is the list of remote heads.'''
1639 oldheadcnt = len(newheads)
237 common = {}
1640 newheads.update(outg)
238 remote_heads = remote.heads()
1641 if len(newheads) > 1:
239 inc = findincoming(repo, remote, common, remote_heads, force=force)
1642 for latest in reversed(outg):
1643 if latest not in newheads:
1644 continue
1645 minhrev = min(cl.rev(h) for h in newheads)
1646 reachable = cl.reachable(latest, cl.node(minhrev))
1647 reachable.remove(latest)
1648 newheads.difference_update(reachable)
1649 if len(newheads) > oldheadcnt:
1650 return fail_multiple_heads(inc)
1651 if inc:
1652 self.ui.warn(_("note: unsynced remote changes!\n"))
1653
1654 if revs is None:
1655 # use the fast path, no race possible on push
1656 nodes = self.changelog.findmissing(common.keys())
1657 cg = self._changegroup(nodes, 'push')
1658 else:
1659 cg = self.changegroupsubset(update, revs, 'push')
1660 return cg, remote_heads
1661
240
1662 def push_addchangegroup(self, remote, force, revs, newbranch):
241 cl = repo.changelog
1663 '''Push a changegroup by locking the remote and sending the
242 update, updated_heads = findoutgoing(repo, remote, common, remote_heads)
1664 addchangegroup command to it. Used for local and old SSH repos.
243 outg, bases, heads = cl.nodesbetween(update, revs)
1665 Return an integer: see push().
1666 '''
1667 lock = remote.lock()
1668 try:
1669 ret = self.prepush(remote, force, revs, newbranch)
1670 if ret[0] is not None:
1671 cg, remote_heads = ret
1672 # here, we return an integer indicating remote head count change
1673 return remote.addchangegroup(cg, 'push', self.url())
1674 # and here we return 0 for "nothing to push" or 1 for
1675 # "something to push but I refuse"
1676 return ret[1]
1677 finally:
1678 lock.release()
1679
1680 def push_unbundle(self, remote, force, revs, newbranch):
1681 '''Push a changegroup by unbundling it on the remote. Used for new
1682 SSH and HTTP repos. Return an integer: see push().'''
1683 # local repo finds heads on server, finds out what revs it
1684 # must push. once revs transferred, if server finds it has
1685 # different heads (someone else won commit/push race), server
1686 # aborts.
1687
244
1688 ret = self.prepush(remote, force, revs, newbranch)
245 if not bases:
1689 if ret[0] is not None:
246 repo.ui.status(_("no changes found\n"))
1690 cg, remote_heads = ret
247 return None, 1
1691 if force:
1692 remote_heads = ['force']
1693 # ssh: return remote's addchangegroup()
1694 # http: return remote's addchangegroup() or 0 for error
1695 return remote.unbundle(cg, remote_heads, 'push')
1696 # as in push_addchangegroup()
1697 return ret[1]
1698
248
1699 def changegroupinfo(self, nodes, source):
249 if not force and remote_heads != [nullid]:
1700 if self.ui.verbose or source == 'bundle':
1701 self.ui.status(_("%d changesets found\n") % len(nodes))
1702 if self.ui.debugflag:
1703 self.ui.debug("list of changesets:\n")
1704 for node in nodes:
1705 self.ui.debug("%s\n" % hex(node))
1706
1707 def changegroupsubset(self, bases, heads, source, extranodes=None):
1708 """Compute a changegroup consisting of all the nodes that are
1709 descendents of any of the bases and ancestors of any of the heads.
1710 Return a chunkbuffer object whose read() method will return
1711 successive changegroup chunks.
1712
1713 It is fairly complex as determining which filenodes and which
1714 manifest nodes need to be included for the changeset to be complete
1715 is non-trivial.
1716
1717 Another wrinkle is doing the reverse, figuring out which changeset in
1718 the changegroup a particular filenode or manifestnode belongs to.
1719
250
1720 The caller can specify some nodes that must be included in the
251 def fail_multiple_heads(unsynced, branch=None):
1721 changegroup using the extranodes argument. It should be a dict
252 if branch:
1722 where the keys are the filenames (or 1 for the manifest), and the
253 msg = _("abort: push creates new remote heads"
1723 values are lists of (node, linknode) tuples, where node is a wanted
254 " on branch '%s'!\n") % branch
1724 node and linknode is the changelog node that should be transmitted as
255 else:
1725 the linkrev.
256 msg = _("abort: push creates new remote heads!\n")
1726 """
257 repo.ui.warn(msg)
1727
258 if unsynced:
1728 # Set up some initial variables
259 repo.ui.status(_("(you should pull and merge or"
1729 # Make it easy to refer to self.changelog
260 " use push -f to force)\n"))
1730 cl = self.changelog
261 else:
1731 # msng is short for missing - compute the list of changesets in this
262 repo.ui.status(_("(did you forget to merge?"
1732 # changegroup.
263 " use push -f to force)\n"))
1733 if not bases:
264 return None, 0
1734 bases = [nullid]
1735 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1736
1737 if extranodes is None:
1738 # can we go through the fast path ?
1739 heads.sort()
1740 allheads = self.heads()
1741 allheads.sort()
1742 if heads == allheads:
1743 return self._changegroup(msng_cl_lst, source)
1744
1745 # slow path
1746 self.hook('preoutgoing', throw=True, source=source)
1747
1748 self.changegroupinfo(msng_cl_lst, source)
1749 # Some bases may turn out to be superfluous, and some heads may be
1750 # too. nodesbetween will return the minimal set of bases and heads
1751 # necessary to re-create the changegroup.
1752
1753 # Known heads are the list of heads that it is assumed the recipient
1754 # of this changegroup will know about.
1755 knownheads = set()
1756 # We assume that all parents of bases are known heads.
1757 for n in bases:
1758 knownheads.update(cl.parents(n))
1759 knownheads.discard(nullid)
1760 knownheads = list(knownheads)
1761 if knownheads:
1762 # Now that we know what heads are known, we can compute which
1763 # changesets are known. The recipient must know about all
1764 # changesets required to reach the known heads from the null
1765 # changeset.
1766 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1767 junk = None
1768 # Transform the list into a set.
1769 has_cl_set = set(has_cl_set)
1770 else:
1771 # If there were no known heads, the recipient cannot be assumed to
1772 # know about any changesets.
1773 has_cl_set = set()
1774
1775 # Make it easy to refer to self.manifest
1776 mnfst = self.manifest
1777 # We don't know which manifests are missing yet
1778 msng_mnfst_set = {}
1779 # Nor do we know which filenodes are missing.
1780 msng_filenode_set = {}
1781
1782 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1783 junk = None
1784
1785 # A changeset always belongs to itself, so the changenode lookup
1786 # function for a changenode is identity.
1787 def identity(x):
1788 return x
1789
1790 # If we determine that a particular file or manifest node must be a
1791 # node that the recipient of the changegroup will already have, we can
1792 # also assume the recipient will have all the parents. This function
1793 # prunes them from the set of missing nodes.
1794 def prune_parents(revlog, hasset, msngset):
1795 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1796 msngset.pop(revlog.node(r), None)
1797
1798 # Use the information collected in collect_manifests_and_files to say
1799 # which changenode any manifestnode belongs to.
1800 def lookup_manifest_link(mnfstnode):
1801 return msng_mnfst_set[mnfstnode]
1802
1803 # A function generating function that sets up the initial environment
1804 # the inner function.
1805 def filenode_collector(changedfiles):
1806 # This gathers information from each manifestnode included in the
1807 # changegroup about which filenodes the manifest node references
1808 # so we can include those in the changegroup too.
1809 #
1810 # It also remembers which changenode each filenode belongs to. It
1811 # does this by assuming the a filenode belongs to the changenode
1812 # the first manifest that references it belongs to.
1813 def collect_msng_filenodes(mnfstnode):
1814 r = mnfst.rev(mnfstnode)
1815 if r - 1 in mnfst.parentrevs(r):
1816 # If the previous rev is one of the parents,
1817 # we only need to see a diff.
1818 deltamf = mnfst.readdelta(mnfstnode)
1819 # For each line in the delta
1820 for f, fnode in deltamf.iteritems():
1821 f = changedfiles.get(f, None)
1822 # And if the file is in the list of files we care
1823 # about.
1824 if f is not None:
1825 # Get the changenode this manifest belongs to
1826 clnode = msng_mnfst_set[mnfstnode]
1827 # Create the set of filenodes for the file if
1828 # there isn't one already.
1829 ndset = msng_filenode_set.setdefault(f, {})
1830 # And set the filenode's changelog node to the
1831 # manifest's if it hasn't been set already.
1832 ndset.setdefault(fnode, clnode)
1833 else:
1834 # Otherwise we need a full manifest.
1835 m = mnfst.read(mnfstnode)
1836 # For every file in we care about.
1837 for f in changedfiles:
1838 fnode = m.get(f, None)
1839 # If it's in the manifest
1840 if fnode is not None:
1841 # See comments above.
1842 clnode = msng_mnfst_set[mnfstnode]
1843 ndset = msng_filenode_set.setdefault(f, {})
1844 ndset.setdefault(fnode, clnode)
1845 return collect_msng_filenodes
1846
1847 # We have a list of filenodes we think we need for a file, lets remove
1848 # all those we know the recipient must have.
1849 def prune_filenodes(f, filerevlog):
1850 msngset = msng_filenode_set[f]
1851 hasset = set()
1852 # If a 'missing' filenode thinks it belongs to a changenode we
1853 # assume the recipient must have, then the recipient must have
1854 # that filenode.
1855 for n in msngset:
1856 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1857 if clnode in has_cl_set:
1858 hasset.add(n)
1859 prune_parents(filerevlog, hasset, msngset)
1860
265
1861 # A function generator function that sets up the a context for the
266 if remote.capable('branchmap'):
1862 # inner function.
267 # Check for each named branch if we're creating new remote heads.
1863 def lookup_filenode_link_func(fname):
268 # To be a remote head after push, node must be either:
1864 msngset = msng_filenode_set[fname]
269 # - unknown locally
1865 # Lookup the changenode the filenode belongs to.
270 # - a local outgoing head descended from update
1866 def lookup_filenode_link(fnode):
271 # - a remote head that's known locally and not
1867 return msngset[fnode]
272 # ancestral to an outgoing head
1868 return lookup_filenode_link
273 #
1869
274 # New named branches cannot be created without --force.
1870 # Add the nodes that were explicitly requested.
1871 def add_extra_nodes(name, nodes):
1872 if not extranodes or name not in extranodes:
1873 return
1874
1875 for node, linknode in extranodes[name]:
1876 if node not in nodes:
1877 nodes[node] = linknode
1878
1879 # Now that we have all theses utility functions to help out and
1880 # logically divide up the task, generate the group.
1881 def gengroup():
1882 # The set of changed files starts empty.
1883 changedfiles = {}
1884 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1885
275
1886 # Create a changenode group generator that will call our functions
276 # 1. Create set of branches involved in the push.
1887 # back to lookup the owning changenode and collect information.
277 branches = set(repo[n].branch() for n in outg)
1888 group = cl.group(msng_cl_lst, identity, collect)
1889 cnt = 0
1890 for chnk in group:
1891 yield chnk
1892 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1893 cnt += 1
1894 self.ui.progress(_('bundling changes'), None)
1895
1896
1897 # Figure out which manifest nodes (of the ones we think might be
1898 # part of the changegroup) the recipient must know about and
1899 # remove them from the changegroup.
1900 has_mnfst_set = set()
1901 for n in msng_mnfst_set:
1902 # If a 'missing' manifest thinks it belongs to a changenode
1903 # the recipient is assumed to have, obviously the recipient
1904 # must have that manifest.
1905 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1906 if linknode in has_cl_set:
1907 has_mnfst_set.add(n)
1908 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1909 add_extra_nodes(1, msng_mnfst_set)
1910 msng_mnfst_lst = msng_mnfst_set.keys()
1911 # Sort the manifestnodes by revision number.
1912 msng_mnfst_lst.sort(key=mnfst.rev)
1913 # Create a generator for the manifestnodes that calls our lookup
1914 # and data collection functions back.
1915 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1916 filenode_collector(changedfiles))
1917 cnt = 0
1918 for chnk in group:
1919 yield chnk
1920 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1921 cnt += 1
1922 self.ui.progress(_('bundling manifests'), None)
1923
1924 # These are no longer needed, dereference and toss the memory for
1925 # them.
1926 msng_mnfst_lst = None
1927 msng_mnfst_set.clear()
1928
278
1929 if extranodes:
279 # 2. Check for new branches on the remote.
1930 for fname in extranodes:
280 remotemap = remote.branchmap()
1931 if isinstance(fname, int):
281 newbranches = branches - set(remotemap)
1932 continue
282 if newbranches and not newbranch: # new branch requires --new-branch
1933 msng_filenode_set.setdefault(fname, {})
283 branchnames = ', '.join("%s" % b for b in newbranches)
1934 changedfiles[fname] = 1
284 repo.ui.warn(_("abort: push creates "
1935 # Go through all our files in order sorted by name.
285 "new remote branches: %s!\n")
1936 cnt = 0
286 % branchnames)
1937 for fname in sorted(changedfiles):
287 repo.ui.status(_("(use 'hg push --new-branch' to create new "
1938 filerevlog = self.file(fname)
288 "remote branches)\n"))
1939 if not len(filerevlog):
289 return None, 0
1940 raise util.Abort(_("empty or missing revlog for %s") % fname)
290 branches.difference_update(newbranches)
1941 # Toss out the filenodes that the recipient isn't really
1942 # missing.
1943 if fname in msng_filenode_set:
1944 prune_filenodes(fname, filerevlog)
1945 add_extra_nodes(fname, msng_filenode_set[fname])
1946 msng_filenode_lst = msng_filenode_set[fname].keys()
1947 else:
1948 msng_filenode_lst = []
1949 # If any filenodes are left, generate the group for them,
1950 # otherwise don't bother.
1951 if len(msng_filenode_lst) > 0:
1952 yield changegroup.chunkheader(len(fname))
1953 yield fname
1954 # Sort the filenodes by their revision #
1955 msng_filenode_lst.sort(key=filerevlog.rev)
1956 # Create a group generator and only pass in a changenode
1957 # lookup function as we need to collect no information
1958 # from filenodes.
1959 group = filerevlog.group(msng_filenode_lst,
1960 lookup_filenode_link_func(fname))
1961 for chnk in group:
1962 self.ui.progress(
1963 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1964 cnt += 1
1965 yield chnk
1966 if fname in msng_filenode_set:
1967 # Don't need this anymore, toss it to free memory.
1968 del msng_filenode_set[fname]
1969 # Signal that no more groups are left.
1970 yield changegroup.closechunk()
1971 self.ui.progress(_('bundling files'), None)
1972
1973 if msng_cl_lst:
1974 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1975
1976 return util.chunkbuffer(gengroup())
1977
1978 def changegroup(self, basenodes, source):
1979 # to avoid a race we use changegroupsubset() (issue1320)
1980 return self.changegroupsubset(basenodes, self.heads(), source)
1981
1982 def _changegroup(self, nodes, source):
1983 """Compute the changegroup of all nodes that we have that a recipient
1984 doesn't. Return a chunkbuffer object whose read() method will return
1985 successive changegroup chunks.
1986
1987 This is much easier than the previous function as we can assume that
1988 the recipient has any changenode we aren't sending them.
1989
1990 nodes is the set of nodes to send"""
1991
1992 self.hook('preoutgoing', throw=True, source=source)
1993
1994 cl = self.changelog
1995 revset = set([cl.rev(n) for n in nodes])
1996 self.changegroupinfo(nodes, source)
1997
1998 def identity(x):
1999 return x
2000
2001 def gennodelst(log):
2002 for r in log:
2003 if log.linkrev(r) in revset:
2004 yield log.node(r)
2005
291
2006 def lookuprevlink_func(revlog):
292 # 3. Construct the initial oldmap and newmap dicts.
2007 def lookuprevlink(n):
293 # They contain information about the remote heads before and
2008 return cl.node(revlog.linkrev(revlog.rev(n)))
294 # after the push, respectively.
2009 return lookuprevlink
295 # Heads not found locally are not included in either dict,
2010
296 # since they won't be affected by the push.
2011 def gengroup():
297 # unsynced contains all branches with incoming changesets.
2012 '''yield a sequence of changegroup chunks (strings)'''
298 oldmap = {}
2013 # construct a list of all changed files
299 newmap = {}
2014 changedfiles = {}
300 unsynced = set()
2015 mmfs = {}
301 for branch in branches:
2016 collect = changegroup.collector(cl, mmfs, changedfiles)
302 remoteheads = remotemap[branch]
2017
303 prunedheads = [h for h in remoteheads if h in cl.nodemap]
2018 cnt = 0
304 oldmap[branch] = prunedheads
2019 for chnk in cl.group(nodes, identity, collect):
305 newmap[branch] = list(prunedheads)
2020 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
306 if len(remoteheads) > len(prunedheads):
2021 cnt += 1
307 unsynced.add(branch)
2022 yield chnk
2023 self.ui.progress(_('bundling changes'), None)
2024
2025 mnfst = self.manifest
2026 nodeiter = gennodelst(mnfst)
2027 cnt = 0
2028 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
2029 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
2030 cnt += 1
2031 yield chnk
2032 self.ui.progress(_('bundling manifests'), None)
2033
2034 cnt = 0
2035 for fname in sorted(changedfiles):
2036 filerevlog = self.file(fname)
2037 if not len(filerevlog):
2038 raise util.Abort(_("empty or missing revlog for %s") % fname)
2039 nodeiter = gennodelst(filerevlog)
2040 nodeiter = list(nodeiter)
2041 if nodeiter:
2042 yield changegroup.chunkheader(len(fname))
2043 yield fname
2044 lookup = lookuprevlink_func(filerevlog)
2045 for chnk in filerevlog.group(nodeiter, lookup):
2046 self.ui.progress(
2047 _('bundling files'), cnt, item=fname, unit=_('chunks'))
2048 cnt += 1
2049 yield chnk
2050 self.ui.progress(_('bundling files'), None)
2051
2052 yield changegroup.closechunk()
2053
2054 if nodes:
2055 self.hook('outgoing', node=hex(nodes[0]), source=source)
2056
2057 return util.chunkbuffer(gengroup())
2058
2059 def addchangegroup(self, source, srctype, url, emptyok=False):
2060 """Add the changegroup returned by source.read() to this repo.
2061 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2062 the URL of the repo where this changegroup is coming from.
2063
308
2064 Return an integer summarizing the change to this repo:
309 # 4. Update newmap with outgoing changes.
2065 - nothing changed or no source: 0
310 # This will possibly add new heads and remove existing ones.
2066 - more heads than before: 1+added heads (2..n)
311 ctxgen = (repo[n] for n in outg)
2067 - fewer heads than before: -1-removed heads (-2..-n)
312 repo._updatebranchcache(newmap, ctxgen)
2068 - number of heads stays the same: 1
2069 """
2070 def csmap(x):
2071 self.ui.debug("add changeset %s\n" % short(x))
2072 return len(cl)
2073
2074 def revmap(x):
2075 return cl.rev(x)
2076
2077 if not source:
2078 return 0
2079
2080 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2081
2082 changesets = files = revisions = 0
2083 efiles = set()
2084
2085 # write changelog data to temp files so concurrent readers will not see
2086 # inconsistent view
2087 cl = self.changelog
2088 cl.delayupdate()
2089 oldheads = len(cl.heads())
2090
313
2091 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
314 # 5. Check for new heads.
2092 try:
315 # If there are more heads after the push than before, a suitable
2093 trp = weakref.proxy(tr)
316 # warning, depending on unsynced status, is displayed.
2094 # pull off the changeset group
317 for branch in branches:
2095 self.ui.status(_("adding changesets\n"))
318 if len(newmap[branch]) > len(oldmap[branch]):
2096 clstart = len(cl)
319 return fail_multiple_heads(branch in unsynced, branch)
2097 class prog(object):
2098 step = _('changesets')
2099 count = 1
2100 ui = self.ui
2101 total = None
2102 def __call__(self):
2103 self.ui.progress(self.step, self.count, unit=_('chunks'),
2104 total=self.total)
2105 self.count += 1
2106 pr = prog()
2107 chunkiter = changegroup.chunkiter(source, progress=pr)
2108 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2109 raise util.Abort(_("received changelog group is empty"))
2110 clend = len(cl)
2111 changesets = clend - clstart
2112 for c in xrange(clstart, clend):
2113 efiles.update(self[c].files())
2114 efiles = len(efiles)
2115 self.ui.progress(_('changesets'), None)
2116
2117 # pull off the manifest group
2118 self.ui.status(_("adding manifests\n"))
2119 pr.step = _('manifests')
2120 pr.count = 1
2121 pr.total = changesets # manifests <= changesets
2122 chunkiter = changegroup.chunkiter(source, progress=pr)
2123 # no need to check for empty manifest group here:
2124 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2125 # no new manifest will be created and the manifest group will
2126 # be empty during the pull
2127 self.manifest.addgroup(chunkiter, revmap, trp)
2128 self.ui.progress(_('manifests'), None)
2129
2130 needfiles = {}
2131 if self.ui.configbool('server', 'validate', default=False):
2132 # validate incoming csets have their manifests
2133 for cset in xrange(clstart, clend):
2134 mfest = self.changelog.read(self.changelog.node(cset))[0]
2135 mfest = self.manifest.readdelta(mfest)
2136 # store file nodes we must see
2137 for f, n in mfest.iteritems():
2138 needfiles.setdefault(f, set()).add(n)
2139
320
2140 # process the files
321 # 6. Check for unsynced changes on involved branches.
2141 self.ui.status(_("adding file changes\n"))
322 if unsynced:
2142 pr.step = 'files'
323 repo.ui.warn(_("note: unsynced remote changes!\n"))
2143 pr.count = 1
2144 pr.total = efiles
2145 while 1:
2146 f = changegroup.getchunk(source)
2147 if not f:
2148 break
2149 self.ui.debug("adding %s revisions\n" % f)
2150 pr()
2151 fl = self.file(f)
2152 o = len(fl)
2153 chunkiter = changegroup.chunkiter(source)
2154 if fl.addgroup(chunkiter, revmap, trp) is None:
2155 raise util.Abort(_("received file revlog group is empty"))
2156 revisions += len(fl) - o
2157 files += 1
2158 if f in needfiles:
2159 needs = needfiles[f]
2160 for new in xrange(o, len(fl)):
2161 n = fl.node(new)
2162 if n in needs:
2163 needs.remove(n)
2164 if not needs:
2165 del needfiles[f]
2166 self.ui.progress(_('files'), None)
2167
2168 for f, needs in needfiles.iteritems():
2169 fl = self.file(f)
2170 for n in needs:
2171 try:
2172 fl.rev(n)
2173 except error.LookupError:
2174 raise util.Abort(
2175 _('missing file data for %s:%s - run hg verify') %
2176 (f, hex(n)))
2177
2178 newheads = len(cl.heads())
2179 heads = ""
2180 if oldheads and newheads != oldheads:
2181 heads = _(" (%+d heads)") % (newheads - oldheads)
2182
2183 self.ui.status(_("added %d changesets"
2184 " with %d changes to %d files%s\n")
2185 % (changesets, revisions, files, heads))
2186
2187 if changesets > 0:
2188 p = lambda: cl.writepending() and self.root or ""
2189 self.hook('pretxnchangegroup', throw=True,
2190 node=hex(cl.node(clstart)), source=srctype,
2191 url=url, pending=p)
2192
2193 # make changelog see real files again
2194 cl.finalize(trp)
2195
2196 tr.close()
2197 finally:
2198 tr.release()
2199
2200 if changesets > 0:
2201 # forcefully update the on-disk branch cache
2202 self.ui.debug("updating the branch cache\n")
2203 self.branchtags()
2204 self.hook("changegroup", node=hex(cl.node(clstart)),
2205 source=srctype, url=url)
2206
2207 for i in xrange(clstart, clend):
2208 self.hook("incoming", node=hex(cl.node(i)),
2209 source=srctype, url=url)
2210
2211 # never return 0 here:
2212 if newheads < oldheads:
2213 return newheads - oldheads - 1
2214 else:
2215 return newheads - oldheads + 1
2216
2217
324
2218 def stream_in(self, remote):
325 else:
2219 fp = remote.stream_out()
326 # Old servers: Check for new topological heads.
2220 l = fp.readline()
327 # Code based on _updatebranchcache.
2221 try:
328 newheads = set(h for h in remote_heads if h in cl.nodemap)
2222 resp = int(l)
329 oldheadcnt = len(newheads)
2223 except ValueError:
330 newheads.update(outg)
2224 raise error.ResponseError(
331 if len(newheads) > 1:
2225 _('Unexpected response from remote server:'), l)
332 for latest in reversed(outg):
2226 if resp == 1:
333 if latest not in newheads:
2227 raise util.Abort(_('operation forbidden by server'))
334 continue
2228 elif resp == 2:
335 minhrev = min(cl.rev(h) for h in newheads)
2229 raise util.Abort(_('locking the remote repository failed'))
336 reachable = cl.reachable(latest, cl.node(minhrev))
2230 elif resp != 0:
337 reachable.remove(latest)
2231 raise util.Abort(_('the server sent an unknown error code'))
338 newheads.difference_update(reachable)
2232 self.ui.status(_('streaming all changes\n'))
339 if len(newheads) > oldheadcnt:
2233 l = fp.readline()
340 return fail_multiple_heads(inc)
2234 try:
341 if inc:
2235 total_files, total_bytes = map(int, l.split(' ', 1))
342 repo.ui.warn(_("note: unsynced remote changes!\n"))
2236 except (ValueError, TypeError):
2237 raise error.ResponseError(
2238 _('Unexpected response from remote server:'), l)
2239 self.ui.status(_('%d files to transfer, %s of data\n') %
2240 (total_files, util.bytecount(total_bytes)))
2241 start = time.time()
2242 for i in xrange(total_files):
2243 # XXX doesn't support '\n' or '\r' in filenames
2244 l = fp.readline()
2245 try:
2246 name, size = l.split('\0', 1)
2247 size = int(size)
2248 except (ValueError, TypeError):
2249 raise error.ResponseError(
2250 _('Unexpected response from remote server:'), l)
2251 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2252 # for backwards compat, name was partially encoded
2253 ofp = self.sopener(store.decodedir(name), 'w')
2254 for chunk in util.filechunkiter(fp, limit=size):
2255 ofp.write(chunk)
2256 ofp.close()
2257 elapsed = time.time() - start
2258 if elapsed <= 0:
2259 elapsed = 0.001
2260 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2261 (util.bytecount(total_bytes), elapsed,
2262 util.bytecount(total_bytes / elapsed)))
2263 self.invalidate()
2264 return len(self.heads()) + 1
2265
343
2266 def clone(self, remote, heads=[], stream=False):
344 if revs is None:
2267 '''clone remote repository.
345 # use the fast path, no race possible on push
2268
346 nodes = repo.changelog.findmissing(common.keys())
2269 keyword arguments:
347 cg = repo._changegroup(nodes, 'push')
2270 heads: list of revs to clone (forces use of pull)
348 else:
2271 stream: use streaming clone if possible'''
349 cg = repo.changegroupsubset(update, revs, 'push')
2272
350 return cg, remote_heads
2273 # now, all clients that can request uncompressed clones can
2274 # read repo formats supported by all servers that can serve
2275 # them.
2276
2277 # if revlog format changes, client will have to check version
2278 # and format flags on "stream" capability, and use
2279 # uncompressed only if compatible.
2280
2281 if stream and not heads and remote.capable('stream'):
2282 return self.stream_in(remote)
2283 return self.pull(remote, heads)
2284
2285 # used to avoid circular references so destructors work
2286 def aftertrans(files):
2287 renamefiles = [tuple(t) for t in files]
2288 def a():
2289 for src, dest in renamefiles:
2290 util.rename(src, dest)
2291 return a
2292
2293 def instance(ui, path, create):
2294 return localrepository(ui, util.drop_scheme('file', path), create)
2295
2296 def islocal(path):
2297 return True
@@ -7,7 +7,7 b''
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo, discovery
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
@@ -1272,222 +1272,12 b' class localrepository(repo.repository):'
1272
1272
1273 return r
1273 return r
1274
1274
1275 def findincoming(self, remote, base=None, heads=None, force=False):
1276 """Return list of roots of the subsets of missing nodes from remote
1277
1278 If base dict is specified, assume that these nodes and their parents
1279 exist on the remote side and that no child of a node of base exists
1280 in both remote and self.
1281 Furthermore base will be updated to include the nodes that exists
1282 in self and remote but no children exists in self and remote.
1283 If a list of heads is specified, return only nodes which are heads
1284 or ancestors of these heads.
1285
1286 All the ancestors of base are in self and in remote.
1287 All the descendants of the list returned are missing in self.
1288 (and so we know that the rest of the nodes are missing in remote, see
1289 outgoing)
1290 """
1291 return self.findcommonincoming(remote, base, heads, force)[1]
1292
1293 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1294 """Return a tuple (common, missing roots, heads) used to identify
1295 missing nodes from remote.
1296
1297 If base dict is specified, assume that these nodes and their parents
1298 exist on the remote side and that no child of a node of base exists
1299 in both remote and self.
1300 Furthermore base will be updated to include the nodes that exists
1301 in self and remote but no children exists in self and remote.
1302 If a list of heads is specified, return only nodes which are heads
1303 or ancestors of these heads.
1304
1305 All the ancestors of base are in self and in remote.
1306 """
1307 m = self.changelog.nodemap
1308 search = []
1309 fetch = set()
1310 seen = set()
1311 seenbranch = set()
1312 if base is None:
1313 base = {}
1314
1315 if not heads:
1316 heads = remote.heads()
1317
1318 if self.changelog.tip() == nullid:
1319 base[nullid] = 1
1320 if heads != [nullid]:
1321 return [nullid], [nullid], list(heads)
1322 return [nullid], [], []
1323
1324 # assume we're closer to the tip than the root
1325 # and start by examining the heads
1326 self.ui.status(_("searching for changes\n"))
1327
1328 unknown = []
1329 for h in heads:
1330 if h not in m:
1331 unknown.append(h)
1332 else:
1333 base[h] = 1
1334
1335 heads = unknown
1336 if not unknown:
1337 return base.keys(), [], []
1338
1339 req = set(unknown)
1340 reqcnt = 0
1341
1342 # search through remote branches
1343 # a 'branch' here is a linear segment of history, with four parts:
1344 # head, root, first parent, second parent
1345 # (a branch always has two parents (or none) by definition)
1346 unknown = remote.branches(unknown)
1347 while unknown:
1348 r = []
1349 while unknown:
1350 n = unknown.pop(0)
1351 if n[0] in seen:
1352 continue
1353
1354 self.ui.debug("examining %s:%s\n"
1355 % (short(n[0]), short(n[1])))
1356 if n[0] == nullid: # found the end of the branch
1357 pass
1358 elif n in seenbranch:
1359 self.ui.debug("branch already found\n")
1360 continue
1361 elif n[1] and n[1] in m: # do we know the base?
1362 self.ui.debug("found incomplete branch %s:%s\n"
1363 % (short(n[0]), short(n[1])))
1364 search.append(n[0:2]) # schedule branch range for scanning
1365 seenbranch.add(n)
1366 else:
1367 if n[1] not in seen and n[1] not in fetch:
1368 if n[2] in m and n[3] in m:
1369 self.ui.debug("found new changeset %s\n" %
1370 short(n[1]))
1371 fetch.add(n[1]) # earliest unknown
1372 for p in n[2:4]:
1373 if p in m:
1374 base[p] = 1 # latest known
1375
1376 for p in n[2:4]:
1377 if p not in req and p not in m:
1378 r.append(p)
1379 req.add(p)
1380 seen.add(n[0])
1381
1382 if r:
1383 reqcnt += 1
1384 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1385 self.ui.debug("request %d: %s\n" %
1386 (reqcnt, " ".join(map(short, r))))
1387 for p in xrange(0, len(r), 10):
1388 for b in remote.branches(r[p:p + 10]):
1389 self.ui.debug("received %s:%s\n" %
1390 (short(b[0]), short(b[1])))
1391 unknown.append(b)
1392
1393 # do binary search on the branches we found
1394 while search:
1395 newsearch = []
1396 reqcnt += 1
1397 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1398 for n, l in zip(search, remote.between(search)):
1399 l.append(n[1])
1400 p = n[0]
1401 f = 1
1402 for i in l:
1403 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1404 if i in m:
1405 if f <= 2:
1406 self.ui.debug("found new branch changeset %s\n" %
1407 short(p))
1408 fetch.add(p)
1409 base[i] = 1
1410 else:
1411 self.ui.debug("narrowed branch search to %s:%s\n"
1412 % (short(p), short(i)))
1413 newsearch.append((p, i))
1414 break
1415 p, f = i, f * 2
1416 search = newsearch
1417
1418 # sanity check our fetch list
1419 for f in fetch:
1420 if f in m:
1421 raise error.RepoError(_("already have changeset ")
1422 + short(f[:4]))
1423
1424 if base.keys() == [nullid]:
1425 if force:
1426 self.ui.warn(_("warning: repository is unrelated\n"))
1427 else:
1428 raise util.Abort(_("repository is unrelated"))
1429
1430 self.ui.debug("found new changesets starting at " +
1431 " ".join([short(f) for f in fetch]) + "\n")
1432
1433 self.ui.progress(_('searching'), None)
1434 self.ui.debug("%d total queries\n" % reqcnt)
1435
1436 return base.keys(), list(fetch), heads
1437
1438 def findoutgoing(self, remote, base=None, heads=None, force=False):
1439 """Return list of nodes that are roots of subsets not in remote
1440
1441 If base dict is specified, assume that these nodes and their parents
1442 exist on the remote side.
1443 If a list of heads is specified, return only nodes which are heads
1444 or ancestors of these heads, and return a second element which
1445 contains all remote heads which get new children.
1446 """
1447 if base is None:
1448 base = {}
1449 self.findincoming(remote, base, heads, force=force)
1450
1451 self.ui.debug("common changesets up to "
1452 + " ".join(map(short, base.keys())) + "\n")
1453
1454 remain = set(self.changelog.nodemap)
1455
1456 # prune everything remote has from the tree
1457 remain.remove(nullid)
1458 remove = base.keys()
1459 while remove:
1460 n = remove.pop(0)
1461 if n in remain:
1462 remain.remove(n)
1463 for p in self.changelog.parents(n):
1464 remove.append(p)
1465
1466 # find every node whose parents have been pruned
1467 subset = []
1468 # find every remote head that will get new children
1469 updated_heads = set()
1470 for n in remain:
1471 p1, p2 = self.changelog.parents(n)
1472 if p1 not in remain and p2 not in remain:
1473 subset.append(n)
1474 if heads:
1475 if p1 in heads:
1476 updated_heads.add(p1)
1477 if p2 in heads:
1478 updated_heads.add(p2)
1479
1480 # this is the set of all roots we have to push
1481 if heads:
1482 return subset, list(updated_heads)
1483 else:
1484 return subset
1485
1486 def pull(self, remote, heads=None, force=False):
1275 def pull(self, remote, heads=None, force=False):
1487 lock = self.lock()
1276 lock = self.lock()
1488 try:
1277 try:
1489 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1278 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1490 force=force)
1279 force=force)
1280 common, fetch, rheads = tmp
1491 if not fetch:
1281 if not fetch:
1492 self.ui.status(_("no changes found\n"))
1282 self.ui.status(_("no changes found\n"))
1493 return 0
1283 return 0
@@ -1530,135 +1320,6 b' class localrepository(repo.repository):'
1530 return self.push_unbundle(remote, force, revs, newbranch)
1320 return self.push_unbundle(remote, force, revs, newbranch)
1531 return self.push_addchangegroup(remote, force, revs, newbranch)
1321 return self.push_addchangegroup(remote, force, revs, newbranch)
1532
1322
1533 def prepush(self, remote, force, revs, newbranch):
1534 '''Analyze the local and remote repositories and determine which
1535 changesets need to be pushed to the remote. Return value depends
1536 on circumstances:
1537
1538 If we are not going to push anything, return a tuple (None,
1539 outgoing) where outgoing is 0 if there are no outgoing
1540 changesets and 1 if there are, but we refuse to push them
1541 (e.g. would create new remote heads).
1542
1543 Otherwise, return a tuple (changegroup, remoteheads), where
1544 changegroup is a readable file-like object whose read() returns
1545 successive changegroup chunks ready to be sent over the wire and
1546 remoteheads is the list of remote heads.'''
1547 common = {}
1548 remote_heads = remote.heads()
1549 inc = self.findincoming(remote, common, remote_heads, force=force)
1550
1551 cl = self.changelog
1552 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1553 outg, bases, heads = cl.nodesbetween(update, revs)
1554
1555 if not bases:
1556 self.ui.status(_("no changes found\n"))
1557 return None, 1
1558
1559 if not force and remote_heads != [nullid]:
1560
1561 def fail_multiple_heads(unsynced, branch=None):
1562 if branch:
1563 msg = _("abort: push creates new remote heads"
1564 " on branch '%s'!\n") % branch
1565 else:
1566 msg = _("abort: push creates new remote heads!\n")
1567 self.ui.warn(msg)
1568 if unsynced:
1569 self.ui.status(_("(you should pull and merge or"
1570 " use push -f to force)\n"))
1571 else:
1572 self.ui.status(_("(did you forget to merge?"
1573 " use push -f to force)\n"))
1574 return None, 0
1575
1576 if remote.capable('branchmap'):
1577 # Check for each named branch if we're creating new remote heads.
1578 # To be a remote head after push, node must be either:
1579 # - unknown locally
1580 # - a local outgoing head descended from update
1581 # - a remote head that's known locally and not
1582 # ancestral to an outgoing head
1583 #
1584 # New named branches cannot be created without --force.
1585
1586 # 1. Create set of branches involved in the push.
1587 branches = set(self[n].branch() for n in outg)
1588
1589 # 2. Check for new branches on the remote.
1590 remotemap = remote.branchmap()
1591 newbranches = branches - set(remotemap)
1592 if newbranches and not newbranch: # new branch requires --new-branch
1593 branchnames = ', '.join("%s" % b for b in newbranches)
1594 self.ui.warn(_("abort: push creates "
1595 "new remote branches: %s!\n")
1596 % branchnames)
1597 self.ui.status(_("(use 'hg push --new-branch' to create new "
1598 "remote branches)\n"))
1599 return None, 0
1600 branches.difference_update(newbranches)
1601
1602 # 3. Construct the initial oldmap and newmap dicts.
1603 # They contain information about the remote heads before and
1604 # after the push, respectively.
1605 # Heads not found locally are not included in either dict,
1606 # since they won't be affected by the push.
1607 # unsynced contains all branches with incoming changesets.
1608 oldmap = {}
1609 newmap = {}
1610 unsynced = set()
1611 for branch in branches:
1612 remoteheads = remotemap[branch]
1613 prunedheads = [h for h in remoteheads if h in cl.nodemap]
1614 oldmap[branch] = prunedheads
1615 newmap[branch] = list(prunedheads)
1616 if len(remoteheads) > len(prunedheads):
1617 unsynced.add(branch)
1618
1619 # 4. Update newmap with outgoing changes.
1620 # This will possibly add new heads and remove existing ones.
1621 ctxgen = (self[n] for n in outg)
1622 self._updatebranchcache(newmap, ctxgen)
1623
1624 # 5. Check for new heads.
1625 # If there are more heads after the push than before, a suitable
1626 # warning, depending on unsynced status, is displayed.
1627 for branch in branches:
1628 if len(newmap[branch]) > len(oldmap[branch]):
1629 return fail_multiple_heads(branch in unsynced, branch)
1630
1631 # 6. Check for unsynced changes on involved branches.
1632 if unsynced:
1633 self.ui.warn(_("note: unsynced remote changes!\n"))
1634
1635 else:
1636 # Old servers: Check for new topological heads.
1637 # Code based on _updatebranchcache.
1638 newheads = set(h for h in remote_heads if h in cl.nodemap)
1639 oldheadcnt = len(newheads)
1640 newheads.update(outg)
1641 if len(newheads) > 1:
1642 for latest in reversed(outg):
1643 if latest not in newheads:
1644 continue
1645 minhrev = min(cl.rev(h) for h in newheads)
1646 reachable = cl.reachable(latest, cl.node(minhrev))
1647 reachable.remove(latest)
1648 newheads.difference_update(reachable)
1649 if len(newheads) > oldheadcnt:
1650 return fail_multiple_heads(inc)
1651 if inc:
1652 self.ui.warn(_("note: unsynced remote changes!\n"))
1653
1654 if revs is None:
1655 # use the fast path, no race possible on push
1656 nodes = self.changelog.findmissing(common.keys())
1657 cg = self._changegroup(nodes, 'push')
1658 else:
1659 cg = self.changegroupsubset(update, revs, 'push')
1660 return cg, remote_heads
1661
1662 def push_addchangegroup(self, remote, force, revs, newbranch):
1323 def push_addchangegroup(self, remote, force, revs, newbranch):
1663 '''Push a changegroup by locking the remote and sending the
1324 '''Push a changegroup by locking the remote and sending the
1664 addchangegroup command to it. Used for local and old SSH repos.
1325 addchangegroup command to it. Used for local and old SSH repos.
@@ -1666,7 +1327,7 b' class localrepository(repo.repository):'
1666 '''
1327 '''
1667 lock = remote.lock()
1328 lock = remote.lock()
1668 try:
1329 try:
1669 ret = self.prepush(remote, force, revs, newbranch)
1330 ret = discovery.prepush(self, remote, force, revs, newbranch)
1670 if ret[0] is not None:
1331 if ret[0] is not None:
1671 cg, remote_heads = ret
1332 cg, remote_heads = ret
1672 # here, we return an integer indicating remote head count change
1333 # here, we return an integer indicating remote head count change
@@ -1685,7 +1346,7 b' class localrepository(repo.repository):'
1685 # different heads (someone else won commit/push race), server
1346 # different heads (someone else won commit/push race), server
1686 # aborts.
1347 # aborts.
1687
1348
1688 ret = self.prepush(remote, force, revs, newbranch)
1349 ret = discovery.prepush(self, remote, force, revs, newbranch)
1689 if ret[0] is not None:
1350 if ret[0] is not None:
1690 cg, remote_heads = ret
1351 cg, remote_heads = ret
1691 if force:
1352 if force:
@@ -6,7 +6,7 b''
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import parser, util, error
9 import parser, util, error, discovery
10 import match as _match
10 import match as _match
11
11
12 elements = {
12 elements = {
@@ -420,7 +420,7 b' def outgoing(repo, subset, x):'
420 dest, branches = hg.parseurl(dest)
420 dest, branches = hg.parseurl(dest)
421 other = hg.repository(hg.remoteui(repo, {}), dest)
421 other = hg.repository(hg.remoteui(repo, {}), dest)
422 repo.ui.pushbuffer()
422 repo.ui.pushbuffer()
423 o = repo.findoutgoing(other)
423 o = discovery.findoutgoing(repo, other)
424 repo.ui.popbuffer()
424 repo.ui.popbuffer()
425 cl = repo.changelog
425 cl = repo.changelog
426 o = set([cl.rev(r) for r in repo.changelog.nodesbetween(o, None)[0]])
426 o = set([cl.rev(r) for r in repo.changelog.nodesbetween(o, None)[0]])
General Comments 0
You need to be logged in to leave comments. Login now