##// END OF EJS Templates
fix errors reported by pyflakes test
Sune Foldager -
r14184:4ab6e2d5 default
parent child Browse files
Show More
@@ -1,169 +1,169
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, short
8 from node import nullid, short
9 from i18n import _
9 from i18n import _
10 import util, error, setdiscovery, treediscovery
10 import util, setdiscovery, treediscovery
11
11
12 def findcommonincoming(repo, remote, heads=None, force=False):
12 def findcommonincoming(repo, remote, heads=None, force=False):
13 """Return a tuple (common, anyincoming, heads) used to identify the common
13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 subset of nodes between repo and remote.
14 subset of nodes between repo and remote.
15
15
16 "common" is a list of (at least) the heads of the common subset.
16 "common" is a list of (at least) the heads of the common subset.
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 locally. If remote does not support getbundle, this actually is a list of
18 locally. If remote does not support getbundle, this actually is a list of
19 roots of the nodes that would be incoming, to be supplied to
19 roots of the nodes that would be incoming, to be supplied to
20 changegroupsubset. No code except for pull should be relying on this fact
20 changegroupsubset. No code except for pull should be relying on this fact
21 any longer.
21 any longer.
22 "heads" is either the supplied heads, or else the remote's heads.
22 "heads" is either the supplied heads, or else the remote's heads.
23
23
24 If you pass heads and they are all known locally, the reponse lists justs
24 If you pass heads and they are all known locally, the reponse lists justs
25 these heads in "common" and in "heads".
25 these heads in "common" and in "heads".
26 """
26 """
27
27
28 if not remote.capable('getbundle'):
28 if not remote.capable('getbundle'):
29 return treediscovery.findcommonincoming(repo, remote, heads, force)
29 return treediscovery.findcommonincoming(repo, remote, heads, force)
30
30
31 if heads:
31 if heads:
32 allknown = True
32 allknown = True
33 nm = repo.changelog.nodemap
33 nm = repo.changelog.nodemap
34 for h in heads:
34 for h in heads:
35 if nm.get(h) is None:
35 if nm.get(h) is None:
36 allknown = False
36 allknown = False
37 break
37 break
38 if allknown:
38 if allknown:
39 return (heads, False, heads)
39 return (heads, False, heads)
40
40
41 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
41 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
42 abortwhenunrelated=not force)
42 abortwhenunrelated=not force)
43 common, anyinc, srvheads = res
43 common, anyinc, srvheads = res
44 return (list(common), anyinc, heads or list(srvheads))
44 return (list(common), anyinc, heads or list(srvheads))
45
45
46 def prepush(repo, remote, force, revs, newbranch):
46 def prepush(repo, remote, force, revs, newbranch):
47 '''Analyze the local and remote repositories and determine which
47 '''Analyze the local and remote repositories and determine which
48 changesets need to be pushed to the remote. Return value depends
48 changesets need to be pushed to the remote. Return value depends
49 on circumstances:
49 on circumstances:
50
50
51 If we are not going to push anything, return a tuple (None,
51 If we are not going to push anything, return a tuple (None,
52 outgoing) where outgoing is 0 if there are no outgoing
52 outgoing) where outgoing is 0 if there are no outgoing
53 changesets and 1 if there are, but we refuse to push them
53 changesets and 1 if there are, but we refuse to push them
54 (e.g. would create new remote heads).
54 (e.g. would create new remote heads).
55
55
56 Otherwise, return a tuple (changegroup, remoteheads), where
56 Otherwise, return a tuple (changegroup, remoteheads), where
57 changegroup is a readable file-like object whose read() returns
57 changegroup is a readable file-like object whose read() returns
58 successive changegroup chunks ready to be sent over the wire and
58 successive changegroup chunks ready to be sent over the wire and
59 remoteheads is the list of remote heads.'''
59 remoteheads is the list of remote heads.'''
60 common, inc, remoteheads = findcommonincoming(repo, remote, force=force)
60 common, inc, remoteheads = findcommonincoming(repo, remote, force=force)
61
61
62 cl = repo.changelog
62 cl = repo.changelog
63 outg = cl.findmissing(common, revs)
63 outg = cl.findmissing(common, revs)
64
64
65 if not outg:
65 if not outg:
66 repo.ui.status(_("no changes found\n"))
66 repo.ui.status(_("no changes found\n"))
67 return None, 1
67 return None, 1
68
68
69 if not force and remoteheads != [nullid]:
69 if not force and remoteheads != [nullid]:
70 if remote.capable('branchmap'):
70 if remote.capable('branchmap'):
71 # Check for each named branch if we're creating new remote heads.
71 # Check for each named branch if we're creating new remote heads.
72 # To be a remote head after push, node must be either:
72 # To be a remote head after push, node must be either:
73 # - unknown locally
73 # - unknown locally
74 # - a local outgoing head descended from update
74 # - a local outgoing head descended from update
75 # - a remote head that's known locally and not
75 # - a remote head that's known locally and not
76 # ancestral to an outgoing head
76 # ancestral to an outgoing head
77
77
78 # 1. Create set of branches involved in the push.
78 # 1. Create set of branches involved in the push.
79 branches = set(repo[n].branch() for n in outg)
79 branches = set(repo[n].branch() for n in outg)
80
80
81 # 2. Check for new branches on the remote.
81 # 2. Check for new branches on the remote.
82 remotemap = remote.branchmap()
82 remotemap = remote.branchmap()
83 newbranches = branches - set(remotemap)
83 newbranches = branches - set(remotemap)
84 if newbranches and not newbranch: # new branch requires --new-branch
84 if newbranches and not newbranch: # new branch requires --new-branch
85 branchnames = ', '.join(sorted(newbranches))
85 branchnames = ', '.join(sorted(newbranches))
86 raise util.Abort(_("push creates new remote branches: %s!")
86 raise util.Abort(_("push creates new remote branches: %s!")
87 % branchnames,
87 % branchnames,
88 hint=_("use 'hg push --new-branch' to create"
88 hint=_("use 'hg push --new-branch' to create"
89 " new remote branches"))
89 " new remote branches"))
90 branches.difference_update(newbranches)
90 branches.difference_update(newbranches)
91
91
92 # 3. Construct the initial oldmap and newmap dicts.
92 # 3. Construct the initial oldmap and newmap dicts.
93 # They contain information about the remote heads before and
93 # They contain information about the remote heads before and
94 # after the push, respectively.
94 # after the push, respectively.
95 # Heads not found locally are not included in either dict,
95 # Heads not found locally are not included in either dict,
96 # since they won't be affected by the push.
96 # since they won't be affected by the push.
97 # unsynced contains all branches with incoming changesets.
97 # unsynced contains all branches with incoming changesets.
98 oldmap = {}
98 oldmap = {}
99 newmap = {}
99 newmap = {}
100 unsynced = set()
100 unsynced = set()
101 for branch in branches:
101 for branch in branches:
102 remotebrheads = remotemap[branch]
102 remotebrheads = remotemap[branch]
103 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
103 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
104 oldmap[branch] = prunedbrheads
104 oldmap[branch] = prunedbrheads
105 newmap[branch] = list(prunedbrheads)
105 newmap[branch] = list(prunedbrheads)
106 if len(remotebrheads) > len(prunedbrheads):
106 if len(remotebrheads) > len(prunedbrheads):
107 unsynced.add(branch)
107 unsynced.add(branch)
108
108
109 # 4. Update newmap with outgoing changes.
109 # 4. Update newmap with outgoing changes.
110 # This will possibly add new heads and remove existing ones.
110 # This will possibly add new heads and remove existing ones.
111 ctxgen = (repo[n] for n in outg)
111 ctxgen = (repo[n] for n in outg)
112 repo._updatebranchcache(newmap, ctxgen)
112 repo._updatebranchcache(newmap, ctxgen)
113
113
114 else:
114 else:
115 # 1-4b. old servers: Check for new topological heads.
115 # 1-4b. old servers: Check for new topological heads.
116 # Construct {old,new}map with branch = None (topological branch).
116 # Construct {old,new}map with branch = None (topological branch).
117 # (code based on _updatebranchcache)
117 # (code based on _updatebranchcache)
118 oldheads = set(h for h in remoteheads if h in cl.nodemap)
118 oldheads = set(h for h in remoteheads if h in cl.nodemap)
119 newheads = oldheads.union(outg)
119 newheads = oldheads.union(outg)
120 if len(newheads) > 1:
120 if len(newheads) > 1:
121 for latest in reversed(outg):
121 for latest in reversed(outg):
122 if latest not in newheads:
122 if latest not in newheads:
123 continue
123 continue
124 minhrev = min(cl.rev(h) for h in newheads)
124 minhrev = min(cl.rev(h) for h in newheads)
125 reachable = cl.reachable(latest, cl.node(minhrev))
125 reachable = cl.reachable(latest, cl.node(minhrev))
126 reachable.remove(latest)
126 reachable.remove(latest)
127 newheads.difference_update(reachable)
127 newheads.difference_update(reachable)
128 branches = set([None])
128 branches = set([None])
129 newmap = {None: newheads}
129 newmap = {None: newheads}
130 oldmap = {None: oldheads}
130 oldmap = {None: oldheads}
131 unsynced = inc and branches or set()
131 unsynced = inc and branches or set()
132
132
133 # 5. Check for new heads.
133 # 5. Check for new heads.
134 # If there are more heads after the push than before, a suitable
134 # If there are more heads after the push than before, a suitable
135 # error message, depending on unsynced status, is displayed.
135 # error message, depending on unsynced status, is displayed.
136 error = None
136 error = None
137 for branch in branches:
137 for branch in branches:
138 newhs = set(newmap[branch])
138 newhs = set(newmap[branch])
139 oldhs = set(oldmap[branch])
139 oldhs = set(oldmap[branch])
140 if len(newhs) > len(oldhs):
140 if len(newhs) > len(oldhs):
141 if error is None:
141 if error is None:
142 if branch:
142 if branch:
143 error = _("push creates new remote heads "
143 error = _("push creates new remote heads "
144 "on branch '%s'!") % branch
144 "on branch '%s'!") % branch
145 else:
145 else:
146 error = _("push creates new remote heads!")
146 error = _("push creates new remote heads!")
147 if branch in unsynced:
147 if branch in unsynced:
148 hint = _("you should pull and merge or "
148 hint = _("you should pull and merge or "
149 "use push -f to force")
149 "use push -f to force")
150 else:
150 else:
151 hint = _("did you forget to merge? "
151 hint = _("did you forget to merge? "
152 "use push -f to force")
152 "use push -f to force")
153 if branch:
153 if branch:
154 repo.ui.debug("new remote heads on branch '%s'\n" % branch)
154 repo.ui.debug("new remote heads on branch '%s'\n" % branch)
155 for h in (newhs - oldhs):
155 for h in (newhs - oldhs):
156 repo.ui.debug("new remote head %s\n" % short(h))
156 repo.ui.debug("new remote head %s\n" % short(h))
157 if error:
157 if error:
158 raise util.Abort(error, hint=hint)
158 raise util.Abort(error, hint=hint)
159
159
160 # 6. Check for unsynced changes on involved branches.
160 # 6. Check for unsynced changes on involved branches.
161 if unsynced:
161 if unsynced:
162 repo.ui.warn(_("note: unsynced remote changes!\n"))
162 repo.ui.warn(_("note: unsynced remote changes!\n"))
163
163
164 if revs is None:
164 if revs is None:
165 # use the fast path, no race possible on push
165 # use the fast path, no race possible on push
166 cg = repo._changegroup(outg, 'push')
166 cg = repo._changegroup(outg, 'push')
167 else:
167 else:
168 cg = repo.getbundle('push', heads=revs, common=common)
168 cg = repo.getbundle('push', heads=revs, common=common)
169 return cg, remoteheads
169 return cg, remoteheads
@@ -1,1961 +1,1961
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error
13 import scmutil, util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 'known', 'getbundle'))
23 'known', 'getbundle'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = scmutil.path_auditor(self.root, self._checknested)
33 self.auditor = scmutil.path_auditor(self.root, self._checknested)
34 self.opener = scmutil.opener(self.path)
34 self.opener = scmutil.opener(self.path)
35 self.wopener = scmutil.opener(self.root)
35 self.wopener = scmutil.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 util.makedir(self.path, notindexed=True)
49 util.makedir(self.path, notindexed=True)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener.append(
59 self.opener.append(
60 "00changelog.i",
60 "00changelog.i",
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'parentdelta', False):
64 if self.ui.configbool('format', 'parentdelta', False):
65 requirements.append("parentdelta")
65 requirements.append("parentdelta")
66 else:
66 else:
67 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
68 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
70 else:
70 else:
71 # find requirements
71 # find requirements
72 requirements = set()
72 requirements = set()
73 try:
73 try:
74 requirements = set(self.opener.read("requires").splitlines())
74 requirements = set(self.opener.read("requires").splitlines())
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 for r in requirements - self.supported:
78 for r in requirements - self.supported:
79 raise error.RequirementError(
79 raise error.RequirementError(
80 _("requirement '%s' not supported") % r)
80 _("requirement '%s' not supported") % r)
81
81
82 self.sharedpath = self.path
82 self.sharedpath = self.path
83 try:
83 try:
84 s = os.path.realpath(self.opener.read("sharedpath"))
84 s = os.path.realpath(self.opener.read("sharedpath"))
85 if not os.path.exists(s):
85 if not os.path.exists(s):
86 raise error.RepoError(
86 raise error.RepoError(
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 self.sharedpath = s
88 self.sharedpath = s
89 except IOError, inst:
89 except IOError, inst:
90 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
91 raise
91 raise
92
92
93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
94 self.spath = self.store.path
94 self.spath = self.store.path
95 self.sopener = self.store.opener
95 self.sopener = self.store.opener
96 self.sjoin = self.store.join
96 self.sjoin = self.store.join
97 self.opener.createmode = self.store.createmode
97 self.opener.createmode = self.store.createmode
98 self._applyrequirements(requirements)
98 self._applyrequirements(requirements)
99 if create:
99 if create:
100 self._writerequirements()
100 self._writerequirements()
101
101
102 # These two define the set of tags for this repository. _tags
102 # These two define the set of tags for this repository. _tags
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 # 'local'. (Global tags are defined by .hgtags across all
104 # 'local'. (Global tags are defined by .hgtags across all
105 # heads, and local tags are defined in .hg/localtags.) They
105 # heads, and local tags are defined in .hg/localtags.) They
106 # constitute the in-memory cache of tags.
106 # constitute the in-memory cache of tags.
107 self._tags = None
107 self._tags = None
108 self._tagtypes = None
108 self._tagtypes = None
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.nodetagscache = None
112 self.nodetagscache = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 def _applyrequirements(self, requirements):
117 def _applyrequirements(self, requirements):
118 self.requirements = requirements
118 self.requirements = requirements
119 self.sopener.options = {}
119 self.sopener.options = {}
120 if 'parentdelta' in requirements:
120 if 'parentdelta' in requirements:
121 self.sopener.options['parentdelta'] = 1
121 self.sopener.options['parentdelta'] = 1
122
122
123 def _writerequirements(self):
123 def _writerequirements(self):
124 reqfile = self.opener("requires", "w")
124 reqfile = self.opener("requires", "w")
125 for r in self.requirements:
125 for r in self.requirements:
126 reqfile.write("%s\n" % r)
126 reqfile.write("%s\n" % r)
127 reqfile.close()
127 reqfile.close()
128
128
129 def _checknested(self, path):
129 def _checknested(self, path):
130 """Determine if path is a legal nested repository."""
130 """Determine if path is a legal nested repository."""
131 if not path.startswith(self.root):
131 if not path.startswith(self.root):
132 return False
132 return False
133 subpath = path[len(self.root) + 1:]
133 subpath = path[len(self.root) + 1:]
134
134
135 # XXX: Checking against the current working copy is wrong in
135 # XXX: Checking against the current working copy is wrong in
136 # the sense that it can reject things like
136 # the sense that it can reject things like
137 #
137 #
138 # $ hg cat -r 10 sub/x.txt
138 # $ hg cat -r 10 sub/x.txt
139 #
139 #
140 # if sub/ is no longer a subrepository in the working copy
140 # if sub/ is no longer a subrepository in the working copy
141 # parent revision.
141 # parent revision.
142 #
142 #
143 # However, it can of course also allow things that would have
143 # However, it can of course also allow things that would have
144 # been rejected before, such as the above cat command if sub/
144 # been rejected before, such as the above cat command if sub/
145 # is a subrepository now, but was a normal directory before.
145 # is a subrepository now, but was a normal directory before.
146 # The old path auditor would have rejected by mistake since it
146 # The old path auditor would have rejected by mistake since it
147 # panics when it sees sub/.hg/.
147 # panics when it sees sub/.hg/.
148 #
148 #
149 # All in all, checking against the working copy seems sensible
149 # All in all, checking against the working copy seems sensible
150 # since we want to prevent access to nested repositories on
150 # since we want to prevent access to nested repositories on
151 # the filesystem *now*.
151 # the filesystem *now*.
152 ctx = self[None]
152 ctx = self[None]
153 parts = util.splitpath(subpath)
153 parts = util.splitpath(subpath)
154 while parts:
154 while parts:
155 prefix = os.sep.join(parts)
155 prefix = os.sep.join(parts)
156 if prefix in ctx.substate:
156 if prefix in ctx.substate:
157 if prefix == subpath:
157 if prefix == subpath:
158 return True
158 return True
159 else:
159 else:
160 sub = ctx.sub(prefix)
160 sub = ctx.sub(prefix)
161 return sub.checknested(subpath[len(prefix) + 1:])
161 return sub.checknested(subpath[len(prefix) + 1:])
162 else:
162 else:
163 parts.pop()
163 parts.pop()
164 return False
164 return False
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarks(self):
167 def _bookmarks(self):
168 return bookmarks.read(self)
168 return bookmarks.read(self)
169
169
170 @util.propertycache
170 @util.propertycache
171 def _bookmarkcurrent(self):
171 def _bookmarkcurrent(self):
172 return bookmarks.readcurrent(self)
172 return bookmarks.readcurrent(self)
173
173
174 @propertycache
174 @propertycache
175 def changelog(self):
175 def changelog(self):
176 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
177 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
178 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
179 if p.startswith(self.root):
179 if p.startswith(self.root):
180 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
181 self.sopener.options['defversion'] = c.version
181 self.sopener.options['defversion'] = c.version
182 return c
182 return c
183
183
184 @propertycache
184 @propertycache
185 def manifest(self):
185 def manifest(self):
186 return manifest.manifest(self.sopener)
186 return manifest.manifest(self.sopener)
187
187
188 @propertycache
188 @propertycache
189 def dirstate(self):
189 def dirstate(self):
190 warned = [0]
190 warned = [0]
191 def validate(node):
191 def validate(node):
192 try:
192 try:
193 self.changelog.rev(node)
193 self.changelog.rev(node)
194 return node
194 return node
195 except error.LookupError:
195 except error.LookupError:
196 if not warned[0]:
196 if not warned[0]:
197 warned[0] = True
197 warned[0] = True
198 self.ui.warn(_("warning: ignoring unknown"
198 self.ui.warn(_("warning: ignoring unknown"
199 " working parent %s!\n") % short(node))
199 " working parent %s!\n") % short(node))
200 return nullid
200 return nullid
201
201
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203
203
204 def __getitem__(self, changeid):
204 def __getitem__(self, changeid):
205 if changeid is None:
205 if changeid is None:
206 return context.workingctx(self)
206 return context.workingctx(self)
207 return context.changectx(self, changeid)
207 return context.changectx(self, changeid)
208
208
209 def __contains__(self, changeid):
209 def __contains__(self, changeid):
210 try:
210 try:
211 return bool(self.lookup(changeid))
211 return bool(self.lookup(changeid))
212 except error.RepoLookupError:
212 except error.RepoLookupError:
213 return False
213 return False
214
214
215 def __nonzero__(self):
215 def __nonzero__(self):
216 return True
216 return True
217
217
218 def __len__(self):
218 def __len__(self):
219 return len(self.changelog)
219 return len(self.changelog)
220
220
221 def __iter__(self):
221 def __iter__(self):
222 for i in xrange(len(self)):
222 for i in xrange(len(self)):
223 yield i
223 yield i
224
224
225 def url(self):
225 def url(self):
226 return 'file:' + self.root
226 return 'file:' + self.root
227
227
228 def hook(self, name, throw=False, **args):
228 def hook(self, name, throw=False, **args):
229 return hook.hook(self.ui, self, name, throw, **args)
229 return hook.hook(self.ui, self, name, throw, **args)
230
230
231 tag_disallowed = ':\r\n'
231 tag_disallowed = ':\r\n'
232
232
233 def _tag(self, names, node, message, local, user, date, extra={}):
233 def _tag(self, names, node, message, local, user, date, extra={}):
234 if isinstance(names, str):
234 if isinstance(names, str):
235 allchars = names
235 allchars = names
236 names = (names,)
236 names = (names,)
237 else:
237 else:
238 allchars = ''.join(names)
238 allchars = ''.join(names)
239 for c in self.tag_disallowed:
239 for c in self.tag_disallowed:
240 if c in allchars:
240 if c in allchars:
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242
242
243 branches = self.branchmap()
243 branches = self.branchmap()
244 for name in names:
244 for name in names:
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 local=local)
246 local=local)
247 if name in branches:
247 if name in branches:
248 self.ui.warn(_("warning: tag %s conflicts with existing"
248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 " branch name\n") % name)
249 " branch name\n") % name)
250
250
251 def writetags(fp, names, munge, prevtags):
251 def writetags(fp, names, munge, prevtags):
252 fp.seek(0, 2)
252 fp.seek(0, 2)
253 if prevtags and prevtags[-1] != '\n':
253 if prevtags and prevtags[-1] != '\n':
254 fp.write('\n')
254 fp.write('\n')
255 for name in names:
255 for name in names:
256 m = munge and munge(name) or name
256 m = munge and munge(name) or name
257 if self._tagtypes and name in self._tagtypes:
257 if self._tagtypes and name in self._tagtypes:
258 old = self._tags.get(name, nullid)
258 old = self._tags.get(name, nullid)
259 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(old), m))
260 fp.write('%s %s\n' % (hex(node), m))
260 fp.write('%s %s\n' % (hex(node), m))
261 fp.close()
261 fp.close()
262
262
263 prevtags = ''
263 prevtags = ''
264 if local:
264 if local:
265 try:
265 try:
266 fp = self.opener('localtags', 'r+')
266 fp = self.opener('localtags', 'r+')
267 except IOError:
267 except IOError:
268 fp = self.opener('localtags', 'a')
268 fp = self.opener('localtags', 'a')
269 else:
269 else:
270 prevtags = fp.read()
270 prevtags = fp.read()
271
271
272 # local tags are stored in the current charset
272 # local tags are stored in the current charset
273 writetags(fp, names, None, prevtags)
273 writetags(fp, names, None, prevtags)
274 for name in names:
274 for name in names:
275 self.hook('tag', node=hex(node), tag=name, local=local)
275 self.hook('tag', node=hex(node), tag=name, local=local)
276 return
276 return
277
277
278 try:
278 try:
279 fp = self.wfile('.hgtags', 'rb+')
279 fp = self.wfile('.hgtags', 'rb+')
280 except IOError:
280 except IOError:
281 fp = self.wfile('.hgtags', 'ab')
281 fp = self.wfile('.hgtags', 'ab')
282 else:
282 else:
283 prevtags = fp.read()
283 prevtags = fp.read()
284
284
285 # committed tags are stored in UTF-8
285 # committed tags are stored in UTF-8
286 writetags(fp, names, encoding.fromlocal, prevtags)
286 writetags(fp, names, encoding.fromlocal, prevtags)
287
287
288 fp.close()
288 fp.close()
289
289
290 if '.hgtags' not in self.dirstate:
290 if '.hgtags' not in self.dirstate:
291 self[None].add(['.hgtags'])
291 self[None].add(['.hgtags'])
292
292
293 m = matchmod.exact(self.root, '', ['.hgtags'])
293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295
295
296 for name in names:
296 for name in names:
297 self.hook('tag', node=hex(node), tag=name, local=local)
297 self.hook('tag', node=hex(node), tag=name, local=local)
298
298
299 return tagnode
299 return tagnode
300
300
301 def tag(self, names, node, message, local, user, date):
301 def tag(self, names, node, message, local, user, date):
302 '''tag a revision with one or more symbolic names.
302 '''tag a revision with one or more symbolic names.
303
303
304 names is a list of strings or, when adding a single tag, names may be a
304 names is a list of strings or, when adding a single tag, names may be a
305 string.
305 string.
306
306
307 if local is True, the tags are stored in a per-repository file.
307 if local is True, the tags are stored in a per-repository file.
308 otherwise, they are stored in the .hgtags file, and a new
308 otherwise, they are stored in the .hgtags file, and a new
309 changeset is committed with the change.
309 changeset is committed with the change.
310
310
311 keyword arguments:
311 keyword arguments:
312
312
313 local: whether to store tags in non-version-controlled file
313 local: whether to store tags in non-version-controlled file
314 (default False)
314 (default False)
315
315
316 message: commit message to use if committing
316 message: commit message to use if committing
317
317
318 user: name of user to use if committing
318 user: name of user to use if committing
319
319
320 date: date tuple to use if committing'''
320 date: date tuple to use if committing'''
321
321
322 if not local:
322 if not local:
323 for x in self.status()[:5]:
323 for x in self.status()[:5]:
324 if '.hgtags' in x:
324 if '.hgtags' in x:
325 raise util.Abort(_('working copy of .hgtags is changed '
325 raise util.Abort(_('working copy of .hgtags is changed '
326 '(please commit .hgtags manually)'))
326 '(please commit .hgtags manually)'))
327
327
328 self.tags() # instantiate the cache
328 self.tags() # instantiate the cache
329 self._tag(names, node, message, local, user, date)
329 self._tag(names, node, message, local, user, date)
330
330
331 def tags(self):
331 def tags(self):
332 '''return a mapping of tag to node'''
332 '''return a mapping of tag to node'''
333 if self._tags is None:
333 if self._tags is None:
334 (self._tags, self._tagtypes) = self._findtags()
334 (self._tags, self._tagtypes) = self._findtags()
335
335
336 return self._tags
336 return self._tags
337
337
338 def _findtags(self):
338 def _findtags(self):
339 '''Do the hard work of finding tags. Return a pair of dicts
339 '''Do the hard work of finding tags. Return a pair of dicts
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 maps tag name to a string like \'global\' or \'local\'.
341 maps tag name to a string like \'global\' or \'local\'.
342 Subclasses or extensions are free to add their own tags, but
342 Subclasses or extensions are free to add their own tags, but
343 should be aware that the returned dicts will be retained for the
343 should be aware that the returned dicts will be retained for the
344 duration of the localrepo object.'''
344 duration of the localrepo object.'''
345
345
346 # XXX what tagtype should subclasses/extensions use? Currently
346 # XXX what tagtype should subclasses/extensions use? Currently
347 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 # Should each extension invent its own tag type? Should there
348 # Should each extension invent its own tag type? Should there
349 # be one tagtype for all such "virtual" tags? Or is the status
349 # be one tagtype for all such "virtual" tags? Or is the status
350 # quo fine?
350 # quo fine?
351
351
352 alltags = {} # map tag name to (node, hist)
352 alltags = {} # map tag name to (node, hist)
353 tagtypes = {}
353 tagtypes = {}
354
354
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357
357
358 # Build the return dicts. Have to re-encode tag names because
358 # Build the return dicts. Have to re-encode tag names because
359 # the tags module always uses UTF-8 (in order not to lose info
359 # the tags module always uses UTF-8 (in order not to lose info
360 # writing to the cache), but the rest of Mercurial wants them in
360 # writing to the cache), but the rest of Mercurial wants them in
361 # local encoding.
361 # local encoding.
362 tags = {}
362 tags = {}
363 for (name, (node, hist)) in alltags.iteritems():
363 for (name, (node, hist)) in alltags.iteritems():
364 if node != nullid:
364 if node != nullid:
365 try:
365 try:
366 # ignore tags to unknown nodes
366 # ignore tags to unknown nodes
367 self.changelog.lookup(node)
367 self.changelog.lookup(node)
368 tags[encoding.tolocal(name)] = node
368 tags[encoding.tolocal(name)] = node
369 except error.LookupError:
369 except error.LookupError:
370 pass
370 pass
371 tags['tip'] = self.changelog.tip()
371 tags['tip'] = self.changelog.tip()
372 tagtypes = dict([(encoding.tolocal(name), value)
372 tagtypes = dict([(encoding.tolocal(name), value)
373 for (name, value) in tagtypes.iteritems()])
373 for (name, value) in tagtypes.iteritems()])
374 return (tags, tagtypes)
374 return (tags, tagtypes)
375
375
376 def tagtype(self, tagname):
376 def tagtype(self, tagname):
377 '''
377 '''
378 return the type of the given tag. result can be:
378 return the type of the given tag. result can be:
379
379
380 'local' : a local tag
380 'local' : a local tag
381 'global' : a global tag
381 'global' : a global tag
382 None : tag does not exist
382 None : tag does not exist
383 '''
383 '''
384
384
385 self.tags()
385 self.tags()
386
386
387 return self._tagtypes.get(tagname)
387 return self._tagtypes.get(tagname)
388
388
389 def tagslist(self):
389 def tagslist(self):
390 '''return a list of tags ordered by revision'''
390 '''return a list of tags ordered by revision'''
391 l = []
391 l = []
392 for t, n in self.tags().iteritems():
392 for t, n in self.tags().iteritems():
393 r = self.changelog.rev(n)
393 r = self.changelog.rev(n)
394 l.append((r, t, n))
394 l.append((r, t, n))
395 return [(t, n) for r, t, n in sorted(l)]
395 return [(t, n) for r, t, n in sorted(l)]
396
396
397 def nodetags(self, node):
397 def nodetags(self, node):
398 '''return the tags associated with a node'''
398 '''return the tags associated with a node'''
399 if not self.nodetagscache:
399 if not self.nodetagscache:
400 self.nodetagscache = {}
400 self.nodetagscache = {}
401 for t, n in self.tags().iteritems():
401 for t, n in self.tags().iteritems():
402 self.nodetagscache.setdefault(n, []).append(t)
402 self.nodetagscache.setdefault(n, []).append(t)
403 for tags in self.nodetagscache.itervalues():
403 for tags in self.nodetagscache.itervalues():
404 tags.sort()
404 tags.sort()
405 return self.nodetagscache.get(node, [])
405 return self.nodetagscache.get(node, [])
406
406
407 def nodebookmarks(self, node):
407 def nodebookmarks(self, node):
408 marks = []
408 marks = []
409 for bookmark, n in self._bookmarks.iteritems():
409 for bookmark, n in self._bookmarks.iteritems():
410 if n == node:
410 if n == node:
411 marks.append(bookmark)
411 marks.append(bookmark)
412 return sorted(marks)
412 return sorted(marks)
413
413
414 def _branchtags(self, partial, lrev):
414 def _branchtags(self, partial, lrev):
415 # TODO: rename this function?
415 # TODO: rename this function?
416 tiprev = len(self) - 1
416 tiprev = len(self) - 1
417 if lrev != tiprev:
417 if lrev != tiprev:
418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
419 self._updatebranchcache(partial, ctxgen)
419 self._updatebranchcache(partial, ctxgen)
420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
421
421
422 return partial
422 return partial
423
423
424 def updatebranchcache(self):
424 def updatebranchcache(self):
425 tip = self.changelog.tip()
425 tip = self.changelog.tip()
426 if self._branchcache is not None and self._branchcachetip == tip:
426 if self._branchcache is not None and self._branchcachetip == tip:
427 return self._branchcache
427 return self._branchcache
428
428
429 oldtip = self._branchcachetip
429 oldtip = self._branchcachetip
430 self._branchcachetip = tip
430 self._branchcachetip = tip
431 if oldtip is None or oldtip not in self.changelog.nodemap:
431 if oldtip is None or oldtip not in self.changelog.nodemap:
432 partial, last, lrev = self._readbranchcache()
432 partial, last, lrev = self._readbranchcache()
433 else:
433 else:
434 lrev = self.changelog.rev(oldtip)
434 lrev = self.changelog.rev(oldtip)
435 partial = self._branchcache
435 partial = self._branchcache
436
436
437 self._branchtags(partial, lrev)
437 self._branchtags(partial, lrev)
438 # this private cache holds all heads (not just tips)
438 # this private cache holds all heads (not just tips)
439 self._branchcache = partial
439 self._branchcache = partial
440
440
441 def branchmap(self):
441 def branchmap(self):
442 '''returns a dictionary {branch: [branchheads]}'''
442 '''returns a dictionary {branch: [branchheads]}'''
443 self.updatebranchcache()
443 self.updatebranchcache()
444 return self._branchcache
444 return self._branchcache
445
445
446 def branchtags(self):
446 def branchtags(self):
447 '''return a dict where branch names map to the tipmost head of
447 '''return a dict where branch names map to the tipmost head of
448 the branch, open heads come before closed'''
448 the branch, open heads come before closed'''
449 bt = {}
449 bt = {}
450 for bn, heads in self.branchmap().iteritems():
450 for bn, heads in self.branchmap().iteritems():
451 tip = heads[-1]
451 tip = heads[-1]
452 for h in reversed(heads):
452 for h in reversed(heads):
453 if 'close' not in self.changelog.read(h)[5]:
453 if 'close' not in self.changelog.read(h)[5]:
454 tip = h
454 tip = h
455 break
455 break
456 bt[bn] = tip
456 bt[bn] = tip
457 return bt
457 return bt
458
458
459 def _readbranchcache(self):
459 def _readbranchcache(self):
460 partial = {}
460 partial = {}
461 try:
461 try:
462 f = self.opener("cache/branchheads")
462 f = self.opener("cache/branchheads")
463 lines = f.read().split('\n')
463 lines = f.read().split('\n')
464 f.close()
464 f.close()
465 except (IOError, OSError):
465 except (IOError, OSError):
466 return {}, nullid, nullrev
466 return {}, nullid, nullrev
467
467
468 try:
468 try:
469 last, lrev = lines.pop(0).split(" ", 1)
469 last, lrev = lines.pop(0).split(" ", 1)
470 last, lrev = bin(last), int(lrev)
470 last, lrev = bin(last), int(lrev)
471 if lrev >= len(self) or self[lrev].node() != last:
471 if lrev >= len(self) or self[lrev].node() != last:
472 # invalidate the cache
472 # invalidate the cache
473 raise ValueError('invalidating branch cache (tip differs)')
473 raise ValueError('invalidating branch cache (tip differs)')
474 for l in lines:
474 for l in lines:
475 if not l:
475 if not l:
476 continue
476 continue
477 node, label = l.split(" ", 1)
477 node, label = l.split(" ", 1)
478 label = encoding.tolocal(label.strip())
478 label = encoding.tolocal(label.strip())
479 partial.setdefault(label, []).append(bin(node))
479 partial.setdefault(label, []).append(bin(node))
480 except KeyboardInterrupt:
480 except KeyboardInterrupt:
481 raise
481 raise
482 except Exception, inst:
482 except Exception, inst:
483 if self.ui.debugflag:
483 if self.ui.debugflag:
484 self.ui.warn(str(inst), '\n')
484 self.ui.warn(str(inst), '\n')
485 partial, last, lrev = {}, nullid, nullrev
485 partial, last, lrev = {}, nullid, nullrev
486 return partial, last, lrev
486 return partial, last, lrev
487
487
488 def _writebranchcache(self, branches, tip, tiprev):
488 def _writebranchcache(self, branches, tip, tiprev):
489 try:
489 try:
490 f = self.opener("cache/branchheads", "w", atomictemp=True)
490 f = self.opener("cache/branchheads", "w", atomictemp=True)
491 f.write("%s %s\n" % (hex(tip), tiprev))
491 f.write("%s %s\n" % (hex(tip), tiprev))
492 for label, nodes in branches.iteritems():
492 for label, nodes in branches.iteritems():
493 for node in nodes:
493 for node in nodes:
494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
495 f.rename()
495 f.rename()
496 except (IOError, OSError):
496 except (IOError, OSError):
497 pass
497 pass
498
498
499 def _updatebranchcache(self, partial, ctxgen):
499 def _updatebranchcache(self, partial, ctxgen):
500 # collect new branch entries
500 # collect new branch entries
501 newbranches = {}
501 newbranches = {}
502 for c in ctxgen:
502 for c in ctxgen:
503 newbranches.setdefault(c.branch(), []).append(c.node())
503 newbranches.setdefault(c.branch(), []).append(c.node())
504 # if older branchheads are reachable from new ones, they aren't
504 # if older branchheads are reachable from new ones, they aren't
505 # really branchheads. Note checking parents is insufficient:
505 # really branchheads. Note checking parents is insufficient:
506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
507 for branch, newnodes in newbranches.iteritems():
507 for branch, newnodes in newbranches.iteritems():
508 bheads = partial.setdefault(branch, [])
508 bheads = partial.setdefault(branch, [])
509 bheads.extend(newnodes)
509 bheads.extend(newnodes)
510 if len(bheads) <= 1:
510 if len(bheads) <= 1:
511 continue
511 continue
512 bheads = sorted(bheads, key=lambda x: self[x].rev())
512 bheads = sorted(bheads, key=lambda x: self[x].rev())
513 # starting from tip means fewer passes over reachable
513 # starting from tip means fewer passes over reachable
514 while newnodes:
514 while newnodes:
515 latest = newnodes.pop()
515 latest = newnodes.pop()
516 if latest not in bheads:
516 if latest not in bheads:
517 continue
517 continue
518 minbhrev = self[bheads[0]].node()
518 minbhrev = self[bheads[0]].node()
519 reachable = self.changelog.reachable(latest, minbhrev)
519 reachable = self.changelog.reachable(latest, minbhrev)
520 reachable.remove(latest)
520 reachable.remove(latest)
521 if reachable:
521 if reachable:
522 bheads = [b for b in bheads if b not in reachable]
522 bheads = [b for b in bheads if b not in reachable]
523 partial[branch] = bheads
523 partial[branch] = bheads
524
524
525 def lookup(self, key):
525 def lookup(self, key):
526 if isinstance(key, int):
526 if isinstance(key, int):
527 return self.changelog.node(key)
527 return self.changelog.node(key)
528 elif key == '.':
528 elif key == '.':
529 return self.dirstate.p1()
529 return self.dirstate.p1()
530 elif key == 'null':
530 elif key == 'null':
531 return nullid
531 return nullid
532 elif key == 'tip':
532 elif key == 'tip':
533 return self.changelog.tip()
533 return self.changelog.tip()
534 n = self.changelog._match(key)
534 n = self.changelog._match(key)
535 if n:
535 if n:
536 return n
536 return n
537 if key in self._bookmarks:
537 if key in self._bookmarks:
538 return self._bookmarks[key]
538 return self._bookmarks[key]
539 if key in self.tags():
539 if key in self.tags():
540 return self.tags()[key]
540 return self.tags()[key]
541 if key in self.branchtags():
541 if key in self.branchtags():
542 return self.branchtags()[key]
542 return self.branchtags()[key]
543 n = self.changelog._partialmatch(key)
543 n = self.changelog._partialmatch(key)
544 if n:
544 if n:
545 return n
545 return n
546
546
547 # can't find key, check if it might have come from damaged dirstate
547 # can't find key, check if it might have come from damaged dirstate
548 if key in self.dirstate.parents():
548 if key in self.dirstate.parents():
549 raise error.Abort(_("working directory has unknown parent '%s'!")
549 raise error.Abort(_("working directory has unknown parent '%s'!")
550 % short(key))
550 % short(key))
551 try:
551 try:
552 if len(key) == 20:
552 if len(key) == 20:
553 key = hex(key)
553 key = hex(key)
554 except TypeError:
554 except TypeError:
555 pass
555 pass
556 raise error.RepoLookupError(_("unknown revision '%s'") % key)
556 raise error.RepoLookupError(_("unknown revision '%s'") % key)
557
557
558 def lookupbranch(self, key, remote=None):
558 def lookupbranch(self, key, remote=None):
559 repo = remote or self
559 repo = remote or self
560 if key in repo.branchmap():
560 if key in repo.branchmap():
561 return key
561 return key
562
562
563 repo = (remote and remote.local()) and remote or self
563 repo = (remote and remote.local()) and remote or self
564 return repo[key].branch()
564 return repo[key].branch()
565
565
566 def known(self, nodes):
566 def known(self, nodes):
567 nm = self.changelog.nodemap
567 nm = self.changelog.nodemap
568 return [(n in nm) for n in nodes]
568 return [(n in nm) for n in nodes]
569
569
570 def local(self):
570 def local(self):
571 return True
571 return True
572
572
573 def join(self, f):
573 def join(self, f):
574 return os.path.join(self.path, f)
574 return os.path.join(self.path, f)
575
575
576 def wjoin(self, f):
576 def wjoin(self, f):
577 return os.path.join(self.root, f)
577 return os.path.join(self.root, f)
578
578
579 def file(self, f):
579 def file(self, f):
580 if f[0] == '/':
580 if f[0] == '/':
581 f = f[1:]
581 f = f[1:]
582 return filelog.filelog(self.sopener, f)
582 return filelog.filelog(self.sopener, f)
583
583
584 def changectx(self, changeid):
584 def changectx(self, changeid):
585 return self[changeid]
585 return self[changeid]
586
586
587 def parents(self, changeid=None):
587 def parents(self, changeid=None):
588 '''get list of changectxs for parents of changeid'''
588 '''get list of changectxs for parents of changeid'''
589 return self[changeid].parents()
589 return self[changeid].parents()
590
590
591 def filectx(self, path, changeid=None, fileid=None):
591 def filectx(self, path, changeid=None, fileid=None):
592 """changeid can be a changeset revision, node, or tag.
592 """changeid can be a changeset revision, node, or tag.
593 fileid can be a file revision or node."""
593 fileid can be a file revision or node."""
594 return context.filectx(self, path, changeid, fileid)
594 return context.filectx(self, path, changeid, fileid)
595
595
596 def getcwd(self):
596 def getcwd(self):
597 return self.dirstate.getcwd()
597 return self.dirstate.getcwd()
598
598
599 def pathto(self, f, cwd=None):
599 def pathto(self, f, cwd=None):
600 return self.dirstate.pathto(f, cwd)
600 return self.dirstate.pathto(f, cwd)
601
601
602 def wfile(self, f, mode='r'):
602 def wfile(self, f, mode='r'):
603 return self.wopener(f, mode)
603 return self.wopener(f, mode)
604
604
605 def _link(self, f):
605 def _link(self, f):
606 return os.path.islink(self.wjoin(f))
606 return os.path.islink(self.wjoin(f))
607
607
608 def _loadfilter(self, filter):
608 def _loadfilter(self, filter):
609 if filter not in self.filterpats:
609 if filter not in self.filterpats:
610 l = []
610 l = []
611 for pat, cmd in self.ui.configitems(filter):
611 for pat, cmd in self.ui.configitems(filter):
612 if cmd == '!':
612 if cmd == '!':
613 continue
613 continue
614 mf = matchmod.match(self.root, '', [pat])
614 mf = matchmod.match(self.root, '', [pat])
615 fn = None
615 fn = None
616 params = cmd
616 params = cmd
617 for name, filterfn in self._datafilters.iteritems():
617 for name, filterfn in self._datafilters.iteritems():
618 if cmd.startswith(name):
618 if cmd.startswith(name):
619 fn = filterfn
619 fn = filterfn
620 params = cmd[len(name):].lstrip()
620 params = cmd[len(name):].lstrip()
621 break
621 break
622 if not fn:
622 if not fn:
623 fn = lambda s, c, **kwargs: util.filter(s, c)
623 fn = lambda s, c, **kwargs: util.filter(s, c)
624 # Wrap old filters not supporting keyword arguments
624 # Wrap old filters not supporting keyword arguments
625 if not inspect.getargspec(fn)[2]:
625 if not inspect.getargspec(fn)[2]:
626 oldfn = fn
626 oldfn = fn
627 fn = lambda s, c, **kwargs: oldfn(s, c)
627 fn = lambda s, c, **kwargs: oldfn(s, c)
628 l.append((mf, fn, params))
628 l.append((mf, fn, params))
629 self.filterpats[filter] = l
629 self.filterpats[filter] = l
630 return self.filterpats[filter]
630 return self.filterpats[filter]
631
631
632 def _filter(self, filterpats, filename, data):
632 def _filter(self, filterpats, filename, data):
633 for mf, fn, cmd in filterpats:
633 for mf, fn, cmd in filterpats:
634 if mf(filename):
634 if mf(filename):
635 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
635 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
636 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
636 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
637 break
637 break
638
638
639 return data
639 return data
640
640
641 @propertycache
641 @propertycache
642 def _encodefilterpats(self):
642 def _encodefilterpats(self):
643 return self._loadfilter('encode')
643 return self._loadfilter('encode')
644
644
645 @propertycache
645 @propertycache
646 def _decodefilterpats(self):
646 def _decodefilterpats(self):
647 return self._loadfilter('decode')
647 return self._loadfilter('decode')
648
648
649 def adddatafilter(self, name, filter):
649 def adddatafilter(self, name, filter):
650 self._datafilters[name] = filter
650 self._datafilters[name] = filter
651
651
652 def wread(self, filename):
652 def wread(self, filename):
653 if self._link(filename):
653 if self._link(filename):
654 data = os.readlink(self.wjoin(filename))
654 data = os.readlink(self.wjoin(filename))
655 else:
655 else:
656 data = self.wopener.read(filename)
656 data = self.wopener.read(filename)
657 return self._filter(self._encodefilterpats, filename, data)
657 return self._filter(self._encodefilterpats, filename, data)
658
658
659 def wwrite(self, filename, data, flags):
659 def wwrite(self, filename, data, flags):
660 data = self._filter(self._decodefilterpats, filename, data)
660 data = self._filter(self._decodefilterpats, filename, data)
661 if 'l' in flags:
661 if 'l' in flags:
662 self.wopener.symlink(data, filename)
662 self.wopener.symlink(data, filename)
663 else:
663 else:
664 fp = self.wopener.write(filename, data)
664 self.wopener.write(filename, data)
665 if 'x' in flags:
665 if 'x' in flags:
666 util.set_flags(self.wjoin(filename), False, True)
666 util.set_flags(self.wjoin(filename), False, True)
667
667
668 def wwritedata(self, filename, data):
668 def wwritedata(self, filename, data):
669 return self._filter(self._decodefilterpats, filename, data)
669 return self._filter(self._decodefilterpats, filename, data)
670
670
671 def transaction(self, desc):
671 def transaction(self, desc):
672 tr = self._transref and self._transref() or None
672 tr = self._transref and self._transref() or None
673 if tr and tr.running():
673 if tr and tr.running():
674 return tr.nest()
674 return tr.nest()
675
675
676 # abort here if the journal already exists
676 # abort here if the journal already exists
677 if os.path.exists(self.sjoin("journal")):
677 if os.path.exists(self.sjoin("journal")):
678 raise error.RepoError(
678 raise error.RepoError(
679 _("abandoned transaction found - run hg recover"))
679 _("abandoned transaction found - run hg recover"))
680
680
681 # save dirstate for rollback
681 # save dirstate for rollback
682 try:
682 try:
683 ds = self.opener.read("dirstate")
683 ds = self.opener.read("dirstate")
684 except IOError:
684 except IOError:
685 ds = ""
685 ds = ""
686 self.opener.write("journal.dirstate", ds)
686 self.opener.write("journal.dirstate", ds)
687 self.opener.write("journal.branch",
687 self.opener.write("journal.branch",
688 encoding.fromlocal(self.dirstate.branch()))
688 encoding.fromlocal(self.dirstate.branch()))
689 self.opener.write("journal.desc",
689 self.opener.write("journal.desc",
690 "%d\n%s\n" % (len(self), desc))
690 "%d\n%s\n" % (len(self), desc))
691
691
692 renames = [(self.sjoin("journal"), self.sjoin("undo")),
692 renames = [(self.sjoin("journal"), self.sjoin("undo")),
693 (self.join("journal.dirstate"), self.join("undo.dirstate")),
693 (self.join("journal.dirstate"), self.join("undo.dirstate")),
694 (self.join("journal.branch"), self.join("undo.branch")),
694 (self.join("journal.branch"), self.join("undo.branch")),
695 (self.join("journal.desc"), self.join("undo.desc"))]
695 (self.join("journal.desc"), self.join("undo.desc"))]
696 tr = transaction.transaction(self.ui.warn, self.sopener,
696 tr = transaction.transaction(self.ui.warn, self.sopener,
697 self.sjoin("journal"),
697 self.sjoin("journal"),
698 aftertrans(renames),
698 aftertrans(renames),
699 self.store.createmode)
699 self.store.createmode)
700 self._transref = weakref.ref(tr)
700 self._transref = weakref.ref(tr)
701 return tr
701 return tr
702
702
703 def recover(self):
703 def recover(self):
704 lock = self.lock()
704 lock = self.lock()
705 try:
705 try:
706 if os.path.exists(self.sjoin("journal")):
706 if os.path.exists(self.sjoin("journal")):
707 self.ui.status(_("rolling back interrupted transaction\n"))
707 self.ui.status(_("rolling back interrupted transaction\n"))
708 transaction.rollback(self.sopener, self.sjoin("journal"),
708 transaction.rollback(self.sopener, self.sjoin("journal"),
709 self.ui.warn)
709 self.ui.warn)
710 self.invalidate()
710 self.invalidate()
711 return True
711 return True
712 else:
712 else:
713 self.ui.warn(_("no interrupted transaction available\n"))
713 self.ui.warn(_("no interrupted transaction available\n"))
714 return False
714 return False
715 finally:
715 finally:
716 lock.release()
716 lock.release()
717
717
718 def rollback(self, dryrun=False):
718 def rollback(self, dryrun=False):
719 wlock = lock = None
719 wlock = lock = None
720 try:
720 try:
721 wlock = self.wlock()
721 wlock = self.wlock()
722 lock = self.lock()
722 lock = self.lock()
723 if os.path.exists(self.sjoin("undo")):
723 if os.path.exists(self.sjoin("undo")):
724 try:
724 try:
725 args = self.opener.read("undo.desc").splitlines()
725 args = self.opener.read("undo.desc").splitlines()
726 if len(args) >= 3 and self.ui.verbose:
726 if len(args) >= 3 and self.ui.verbose:
727 desc = _("repository tip rolled back to revision %s"
727 desc = _("repository tip rolled back to revision %s"
728 " (undo %s: %s)\n") % (
728 " (undo %s: %s)\n") % (
729 int(args[0]) - 1, args[1], args[2])
729 int(args[0]) - 1, args[1], args[2])
730 elif len(args) >= 2:
730 elif len(args) >= 2:
731 desc = _("repository tip rolled back to revision %s"
731 desc = _("repository tip rolled back to revision %s"
732 " (undo %s)\n") % (
732 " (undo %s)\n") % (
733 int(args[0]) - 1, args[1])
733 int(args[0]) - 1, args[1])
734 except IOError:
734 except IOError:
735 desc = _("rolling back unknown transaction\n")
735 desc = _("rolling back unknown transaction\n")
736 self.ui.status(desc)
736 self.ui.status(desc)
737 if dryrun:
737 if dryrun:
738 return
738 return
739 transaction.rollback(self.sopener, self.sjoin("undo"),
739 transaction.rollback(self.sopener, self.sjoin("undo"),
740 self.ui.warn)
740 self.ui.warn)
741 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
741 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
742 if os.path.exists(self.join('undo.bookmarks')):
742 if os.path.exists(self.join('undo.bookmarks')):
743 util.rename(self.join('undo.bookmarks'),
743 util.rename(self.join('undo.bookmarks'),
744 self.join('bookmarks'))
744 self.join('bookmarks'))
745 try:
745 try:
746 branch = self.opener.read("undo.branch")
746 branch = self.opener.read("undo.branch")
747 self.dirstate.setbranch(branch)
747 self.dirstate.setbranch(branch)
748 except IOError:
748 except IOError:
749 self.ui.warn(_("named branch could not be reset, "
749 self.ui.warn(_("named branch could not be reset, "
750 "current branch is still: %s\n")
750 "current branch is still: %s\n")
751 % self.dirstate.branch())
751 % self.dirstate.branch())
752 self.invalidate()
752 self.invalidate()
753 self.dirstate.invalidate()
753 self.dirstate.invalidate()
754 self.destroyed()
754 self.destroyed()
755 parents = tuple([p.rev() for p in self.parents()])
755 parents = tuple([p.rev() for p in self.parents()])
756 if len(parents) > 1:
756 if len(parents) > 1:
757 self.ui.status(_("working directory now based on "
757 self.ui.status(_("working directory now based on "
758 "revisions %d and %d\n") % parents)
758 "revisions %d and %d\n") % parents)
759 else:
759 else:
760 self.ui.status(_("working directory now based on "
760 self.ui.status(_("working directory now based on "
761 "revision %d\n") % parents)
761 "revision %d\n") % parents)
762 else:
762 else:
763 self.ui.warn(_("no rollback information available\n"))
763 self.ui.warn(_("no rollback information available\n"))
764 return 1
764 return 1
765 finally:
765 finally:
766 release(lock, wlock)
766 release(lock, wlock)
767
767
768 def invalidatecaches(self):
768 def invalidatecaches(self):
769 self._tags = None
769 self._tags = None
770 self._tagtypes = None
770 self._tagtypes = None
771 self.nodetagscache = None
771 self.nodetagscache = None
772 self._branchcache = None # in UTF-8
772 self._branchcache = None # in UTF-8
773 self._branchcachetip = None
773 self._branchcachetip = None
774
774
775 def invalidate(self):
775 def invalidate(self):
776 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
776 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
777 if a in self.__dict__:
777 if a in self.__dict__:
778 delattr(self, a)
778 delattr(self, a)
779 self.invalidatecaches()
779 self.invalidatecaches()
780
780
781 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
781 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
782 try:
782 try:
783 l = lock.lock(lockname, 0, releasefn, desc=desc)
783 l = lock.lock(lockname, 0, releasefn, desc=desc)
784 except error.LockHeld, inst:
784 except error.LockHeld, inst:
785 if not wait:
785 if not wait:
786 raise
786 raise
787 self.ui.warn(_("waiting for lock on %s held by %r\n") %
787 self.ui.warn(_("waiting for lock on %s held by %r\n") %
788 (desc, inst.locker))
788 (desc, inst.locker))
789 # default to 600 seconds timeout
789 # default to 600 seconds timeout
790 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
790 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
791 releasefn, desc=desc)
791 releasefn, desc=desc)
792 if acquirefn:
792 if acquirefn:
793 acquirefn()
793 acquirefn()
794 return l
794 return l
795
795
796 def lock(self, wait=True):
796 def lock(self, wait=True):
797 '''Lock the repository store (.hg/store) and return a weak reference
797 '''Lock the repository store (.hg/store) and return a weak reference
798 to the lock. Use this before modifying the store (e.g. committing or
798 to the lock. Use this before modifying the store (e.g. committing or
799 stripping). If you are opening a transaction, get a lock as well.)'''
799 stripping). If you are opening a transaction, get a lock as well.)'''
800 l = self._lockref and self._lockref()
800 l = self._lockref and self._lockref()
801 if l is not None and l.held:
801 if l is not None and l.held:
802 l.lock()
802 l.lock()
803 return l
803 return l
804
804
805 l = self._lock(self.sjoin("lock"), wait, self.store.write,
805 l = self._lock(self.sjoin("lock"), wait, self.store.write,
806 self.invalidate, _('repository %s') % self.origroot)
806 self.invalidate, _('repository %s') % self.origroot)
807 self._lockref = weakref.ref(l)
807 self._lockref = weakref.ref(l)
808 return l
808 return l
809
809
810 def wlock(self, wait=True):
810 def wlock(self, wait=True):
811 '''Lock the non-store parts of the repository (everything under
811 '''Lock the non-store parts of the repository (everything under
812 .hg except .hg/store) and return a weak reference to the lock.
812 .hg except .hg/store) and return a weak reference to the lock.
813 Use this before modifying files in .hg.'''
813 Use this before modifying files in .hg.'''
814 l = self._wlockref and self._wlockref()
814 l = self._wlockref and self._wlockref()
815 if l is not None and l.held:
815 if l is not None and l.held:
816 l.lock()
816 l.lock()
817 return l
817 return l
818
818
819 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
819 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
820 self.dirstate.invalidate, _('working directory of %s') %
820 self.dirstate.invalidate, _('working directory of %s') %
821 self.origroot)
821 self.origroot)
822 self._wlockref = weakref.ref(l)
822 self._wlockref = weakref.ref(l)
823 return l
823 return l
824
824
825 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
825 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
826 """
826 """
827 commit an individual file as part of a larger transaction
827 commit an individual file as part of a larger transaction
828 """
828 """
829
829
830 fname = fctx.path()
830 fname = fctx.path()
831 text = fctx.data()
831 text = fctx.data()
832 flog = self.file(fname)
832 flog = self.file(fname)
833 fparent1 = manifest1.get(fname, nullid)
833 fparent1 = manifest1.get(fname, nullid)
834 fparent2 = fparent2o = manifest2.get(fname, nullid)
834 fparent2 = fparent2o = manifest2.get(fname, nullid)
835
835
836 meta = {}
836 meta = {}
837 copy = fctx.renamed()
837 copy = fctx.renamed()
838 if copy and copy[0] != fname:
838 if copy and copy[0] != fname:
839 # Mark the new revision of this file as a copy of another
839 # Mark the new revision of this file as a copy of another
840 # file. This copy data will effectively act as a parent
840 # file. This copy data will effectively act as a parent
841 # of this new revision. If this is a merge, the first
841 # of this new revision. If this is a merge, the first
842 # parent will be the nullid (meaning "look up the copy data")
842 # parent will be the nullid (meaning "look up the copy data")
843 # and the second one will be the other parent. For example:
843 # and the second one will be the other parent. For example:
844 #
844 #
845 # 0 --- 1 --- 3 rev1 changes file foo
845 # 0 --- 1 --- 3 rev1 changes file foo
846 # \ / rev2 renames foo to bar and changes it
846 # \ / rev2 renames foo to bar and changes it
847 # \- 2 -/ rev3 should have bar with all changes and
847 # \- 2 -/ rev3 should have bar with all changes and
848 # should record that bar descends from
848 # should record that bar descends from
849 # bar in rev2 and foo in rev1
849 # bar in rev2 and foo in rev1
850 #
850 #
851 # this allows this merge to succeed:
851 # this allows this merge to succeed:
852 #
852 #
853 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
853 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
854 # \ / merging rev3 and rev4 should use bar@rev2
854 # \ / merging rev3 and rev4 should use bar@rev2
855 # \- 2 --- 4 as the merge base
855 # \- 2 --- 4 as the merge base
856 #
856 #
857
857
858 cfname = copy[0]
858 cfname = copy[0]
859 crev = manifest1.get(cfname)
859 crev = manifest1.get(cfname)
860 newfparent = fparent2
860 newfparent = fparent2
861
861
862 if manifest2: # branch merge
862 if manifest2: # branch merge
863 if fparent2 == nullid or crev is None: # copied on remote side
863 if fparent2 == nullid or crev is None: # copied on remote side
864 if cfname in manifest2:
864 if cfname in manifest2:
865 crev = manifest2[cfname]
865 crev = manifest2[cfname]
866 newfparent = fparent1
866 newfparent = fparent1
867
867
868 # find source in nearest ancestor if we've lost track
868 # find source in nearest ancestor if we've lost track
869 if not crev:
869 if not crev:
870 self.ui.debug(" %s: searching for copy revision for %s\n" %
870 self.ui.debug(" %s: searching for copy revision for %s\n" %
871 (fname, cfname))
871 (fname, cfname))
872 for ancestor in self[None].ancestors():
872 for ancestor in self[None].ancestors():
873 if cfname in ancestor:
873 if cfname in ancestor:
874 crev = ancestor[cfname].filenode()
874 crev = ancestor[cfname].filenode()
875 break
875 break
876
876
877 if crev:
877 if crev:
878 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
878 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
879 meta["copy"] = cfname
879 meta["copy"] = cfname
880 meta["copyrev"] = hex(crev)
880 meta["copyrev"] = hex(crev)
881 fparent1, fparent2 = nullid, newfparent
881 fparent1, fparent2 = nullid, newfparent
882 else:
882 else:
883 self.ui.warn(_("warning: can't find ancestor for '%s' "
883 self.ui.warn(_("warning: can't find ancestor for '%s' "
884 "copied from '%s'!\n") % (fname, cfname))
884 "copied from '%s'!\n") % (fname, cfname))
885
885
886 elif fparent2 != nullid:
886 elif fparent2 != nullid:
887 # is one parent an ancestor of the other?
887 # is one parent an ancestor of the other?
888 fparentancestor = flog.ancestor(fparent1, fparent2)
888 fparentancestor = flog.ancestor(fparent1, fparent2)
889 if fparentancestor == fparent1:
889 if fparentancestor == fparent1:
890 fparent1, fparent2 = fparent2, nullid
890 fparent1, fparent2 = fparent2, nullid
891 elif fparentancestor == fparent2:
891 elif fparentancestor == fparent2:
892 fparent2 = nullid
892 fparent2 = nullid
893
893
894 # is the file changed?
894 # is the file changed?
895 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
895 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
896 changelist.append(fname)
896 changelist.append(fname)
897 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
897 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
898
898
899 # are just the flags changed during merge?
899 # are just the flags changed during merge?
900 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
900 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
901 changelist.append(fname)
901 changelist.append(fname)
902
902
903 return fparent1
903 return fparent1
904
904
905 def commit(self, text="", user=None, date=None, match=None, force=False,
905 def commit(self, text="", user=None, date=None, match=None, force=False,
906 editor=False, extra={}):
906 editor=False, extra={}):
907 """Add a new revision to current repository.
907 """Add a new revision to current repository.
908
908
909 Revision information is gathered from the working directory,
909 Revision information is gathered from the working directory,
910 match can be used to filter the committed files. If editor is
910 match can be used to filter the committed files. If editor is
911 supplied, it is called to get a commit message.
911 supplied, it is called to get a commit message.
912 """
912 """
913
913
914 def fail(f, msg):
914 def fail(f, msg):
915 raise util.Abort('%s: %s' % (f, msg))
915 raise util.Abort('%s: %s' % (f, msg))
916
916
917 if not match:
917 if not match:
918 match = matchmod.always(self.root, '')
918 match = matchmod.always(self.root, '')
919
919
920 if not force:
920 if not force:
921 vdirs = []
921 vdirs = []
922 match.dir = vdirs.append
922 match.dir = vdirs.append
923 match.bad = fail
923 match.bad = fail
924
924
925 wlock = self.wlock()
925 wlock = self.wlock()
926 try:
926 try:
927 wctx = self[None]
927 wctx = self[None]
928 merge = len(wctx.parents()) > 1
928 merge = len(wctx.parents()) > 1
929
929
930 if (not force and merge and match and
930 if (not force and merge and match and
931 (match.files() or match.anypats())):
931 (match.files() or match.anypats())):
932 raise util.Abort(_('cannot partially commit a merge '
932 raise util.Abort(_('cannot partially commit a merge '
933 '(do not specify files or patterns)'))
933 '(do not specify files or patterns)'))
934
934
935 changes = self.status(match=match, clean=force)
935 changes = self.status(match=match, clean=force)
936 if force:
936 if force:
937 changes[0].extend(changes[6]) # mq may commit unchanged files
937 changes[0].extend(changes[6]) # mq may commit unchanged files
938
938
939 # check subrepos
939 # check subrepos
940 subs = []
940 subs = []
941 removedsubs = set()
941 removedsubs = set()
942 for p in wctx.parents():
942 for p in wctx.parents():
943 removedsubs.update(s for s in p.substate if match(s))
943 removedsubs.update(s for s in p.substate if match(s))
944 for s in wctx.substate:
944 for s in wctx.substate:
945 removedsubs.discard(s)
945 removedsubs.discard(s)
946 if match(s) and wctx.sub(s).dirty():
946 if match(s) and wctx.sub(s).dirty():
947 subs.append(s)
947 subs.append(s)
948 if (subs or removedsubs):
948 if (subs or removedsubs):
949 if (not match('.hgsub') and
949 if (not match('.hgsub') and
950 '.hgsub' in (wctx.modified() + wctx.added())):
950 '.hgsub' in (wctx.modified() + wctx.added())):
951 raise util.Abort(_("can't commit subrepos without .hgsub"))
951 raise util.Abort(_("can't commit subrepos without .hgsub"))
952 if '.hgsubstate' not in changes[0]:
952 if '.hgsubstate' not in changes[0]:
953 changes[0].insert(0, '.hgsubstate')
953 changes[0].insert(0, '.hgsubstate')
954
954
955 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
955 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
956 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
956 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
957 if changedsubs:
957 if changedsubs:
958 raise util.Abort(_("uncommitted changes in subrepo %s")
958 raise util.Abort(_("uncommitted changes in subrepo %s")
959 % changedsubs[0])
959 % changedsubs[0])
960
960
961 # make sure all explicit patterns are matched
961 # make sure all explicit patterns are matched
962 if not force and match.files():
962 if not force and match.files():
963 matched = set(changes[0] + changes[1] + changes[2])
963 matched = set(changes[0] + changes[1] + changes[2])
964
964
965 for f in match.files():
965 for f in match.files():
966 if f == '.' or f in matched or f in wctx.substate:
966 if f == '.' or f in matched or f in wctx.substate:
967 continue
967 continue
968 if f in changes[3]: # missing
968 if f in changes[3]: # missing
969 fail(f, _('file not found!'))
969 fail(f, _('file not found!'))
970 if f in vdirs: # visited directory
970 if f in vdirs: # visited directory
971 d = f + '/'
971 d = f + '/'
972 for mf in matched:
972 for mf in matched:
973 if mf.startswith(d):
973 if mf.startswith(d):
974 break
974 break
975 else:
975 else:
976 fail(f, _("no match under directory!"))
976 fail(f, _("no match under directory!"))
977 elif f not in self.dirstate:
977 elif f not in self.dirstate:
978 fail(f, _("file not tracked!"))
978 fail(f, _("file not tracked!"))
979
979
980 if (not force and not extra.get("close") and not merge
980 if (not force and not extra.get("close") and not merge
981 and not (changes[0] or changes[1] or changes[2])
981 and not (changes[0] or changes[1] or changes[2])
982 and wctx.branch() == wctx.p1().branch()):
982 and wctx.branch() == wctx.p1().branch()):
983 return None
983 return None
984
984
985 ms = mergemod.mergestate(self)
985 ms = mergemod.mergestate(self)
986 for f in changes[0]:
986 for f in changes[0]:
987 if f in ms and ms[f] == 'u':
987 if f in ms and ms[f] == 'u':
988 raise util.Abort(_("unresolved merge conflicts "
988 raise util.Abort(_("unresolved merge conflicts "
989 "(see hg help resolve)"))
989 "(see hg help resolve)"))
990
990
991 cctx = context.workingctx(self, text, user, date, extra, changes)
991 cctx = context.workingctx(self, text, user, date, extra, changes)
992 if editor:
992 if editor:
993 cctx._text = editor(self, cctx, subs)
993 cctx._text = editor(self, cctx, subs)
994 edited = (text != cctx._text)
994 edited = (text != cctx._text)
995
995
996 # commit subs
996 # commit subs
997 if subs or removedsubs:
997 if subs or removedsubs:
998 state = wctx.substate.copy()
998 state = wctx.substate.copy()
999 for s in sorted(subs):
999 for s in sorted(subs):
1000 sub = wctx.sub(s)
1000 sub = wctx.sub(s)
1001 self.ui.status(_('committing subrepository %s\n') %
1001 self.ui.status(_('committing subrepository %s\n') %
1002 subrepo.subrelpath(sub))
1002 subrepo.subrelpath(sub))
1003 sr = sub.commit(cctx._text, user, date)
1003 sr = sub.commit(cctx._text, user, date)
1004 state[s] = (state[s][0], sr)
1004 state[s] = (state[s][0], sr)
1005 subrepo.writestate(self, state)
1005 subrepo.writestate(self, state)
1006
1006
1007 # Save commit message in case this transaction gets rolled back
1007 # Save commit message in case this transaction gets rolled back
1008 # (e.g. by a pretxncommit hook). Leave the content alone on
1008 # (e.g. by a pretxncommit hook). Leave the content alone on
1009 # the assumption that the user will use the same editor again.
1009 # the assumption that the user will use the same editor again.
1010 msgfile = self.opener('last-message.txt', 'wb')
1010 msgfile = self.opener('last-message.txt', 'wb')
1011 msgfile.write(cctx._text)
1011 msgfile.write(cctx._text)
1012 msgfile.close()
1012 msgfile.close()
1013
1013
1014 p1, p2 = self.dirstate.parents()
1014 p1, p2 = self.dirstate.parents()
1015 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1015 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1016 try:
1016 try:
1017 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1017 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1018 ret = self.commitctx(cctx, True)
1018 ret = self.commitctx(cctx, True)
1019 except:
1019 except:
1020 if edited:
1020 if edited:
1021 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1021 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1022 self.ui.write(
1022 self.ui.write(
1023 _('note: commit message saved in %s\n') % msgfn)
1023 _('note: commit message saved in %s\n') % msgfn)
1024 raise
1024 raise
1025
1025
1026 # update bookmarks, dirstate and mergestate
1026 # update bookmarks, dirstate and mergestate
1027 bookmarks.update(self, p1, ret)
1027 bookmarks.update(self, p1, ret)
1028 for f in changes[0] + changes[1]:
1028 for f in changes[0] + changes[1]:
1029 self.dirstate.normal(f)
1029 self.dirstate.normal(f)
1030 for f in changes[2]:
1030 for f in changes[2]:
1031 self.dirstate.forget(f)
1031 self.dirstate.forget(f)
1032 self.dirstate.setparents(ret)
1032 self.dirstate.setparents(ret)
1033 ms.reset()
1033 ms.reset()
1034 finally:
1034 finally:
1035 wlock.release()
1035 wlock.release()
1036
1036
1037 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1037 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1038 return ret
1038 return ret
1039
1039
1040 def commitctx(self, ctx, error=False):
1040 def commitctx(self, ctx, error=False):
1041 """Add a new revision to current repository.
1041 """Add a new revision to current repository.
1042 Revision information is passed via the context argument.
1042 Revision information is passed via the context argument.
1043 """
1043 """
1044
1044
1045 tr = lock = None
1045 tr = lock = None
1046 removed = list(ctx.removed())
1046 removed = list(ctx.removed())
1047 p1, p2 = ctx.p1(), ctx.p2()
1047 p1, p2 = ctx.p1(), ctx.p2()
1048 user = ctx.user()
1048 user = ctx.user()
1049
1049
1050 lock = self.lock()
1050 lock = self.lock()
1051 try:
1051 try:
1052 tr = self.transaction("commit")
1052 tr = self.transaction("commit")
1053 trp = weakref.proxy(tr)
1053 trp = weakref.proxy(tr)
1054
1054
1055 if ctx.files():
1055 if ctx.files():
1056 m1 = p1.manifest().copy()
1056 m1 = p1.manifest().copy()
1057 m2 = p2.manifest()
1057 m2 = p2.manifest()
1058
1058
1059 # check in files
1059 # check in files
1060 new = {}
1060 new = {}
1061 changed = []
1061 changed = []
1062 linkrev = len(self)
1062 linkrev = len(self)
1063 for f in sorted(ctx.modified() + ctx.added()):
1063 for f in sorted(ctx.modified() + ctx.added()):
1064 self.ui.note(f + "\n")
1064 self.ui.note(f + "\n")
1065 try:
1065 try:
1066 fctx = ctx[f]
1066 fctx = ctx[f]
1067 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1067 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1068 changed)
1068 changed)
1069 m1.set(f, fctx.flags())
1069 m1.set(f, fctx.flags())
1070 except OSError, inst:
1070 except OSError, inst:
1071 self.ui.warn(_("trouble committing %s!\n") % f)
1071 self.ui.warn(_("trouble committing %s!\n") % f)
1072 raise
1072 raise
1073 except IOError, inst:
1073 except IOError, inst:
1074 errcode = getattr(inst, 'errno', errno.ENOENT)
1074 errcode = getattr(inst, 'errno', errno.ENOENT)
1075 if error or errcode and errcode != errno.ENOENT:
1075 if error or errcode and errcode != errno.ENOENT:
1076 self.ui.warn(_("trouble committing %s!\n") % f)
1076 self.ui.warn(_("trouble committing %s!\n") % f)
1077 raise
1077 raise
1078 else:
1078 else:
1079 removed.append(f)
1079 removed.append(f)
1080
1080
1081 # update manifest
1081 # update manifest
1082 m1.update(new)
1082 m1.update(new)
1083 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1083 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1084 drop = [f for f in removed if f in m1]
1084 drop = [f for f in removed if f in m1]
1085 for f in drop:
1085 for f in drop:
1086 del m1[f]
1086 del m1[f]
1087 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1087 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1088 p2.manifestnode(), (new, drop))
1088 p2.manifestnode(), (new, drop))
1089 files = changed + removed
1089 files = changed + removed
1090 else:
1090 else:
1091 mn = p1.manifestnode()
1091 mn = p1.manifestnode()
1092 files = []
1092 files = []
1093
1093
1094 # update changelog
1094 # update changelog
1095 self.changelog.delayupdate()
1095 self.changelog.delayupdate()
1096 n = self.changelog.add(mn, files, ctx.description(),
1096 n = self.changelog.add(mn, files, ctx.description(),
1097 trp, p1.node(), p2.node(),
1097 trp, p1.node(), p2.node(),
1098 user, ctx.date(), ctx.extra().copy())
1098 user, ctx.date(), ctx.extra().copy())
1099 p = lambda: self.changelog.writepending() and self.root or ""
1099 p = lambda: self.changelog.writepending() and self.root or ""
1100 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1100 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1101 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1101 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1102 parent2=xp2, pending=p)
1102 parent2=xp2, pending=p)
1103 self.changelog.finalize(trp)
1103 self.changelog.finalize(trp)
1104 tr.close()
1104 tr.close()
1105
1105
1106 if self._branchcache:
1106 if self._branchcache:
1107 self.updatebranchcache()
1107 self.updatebranchcache()
1108 return n
1108 return n
1109 finally:
1109 finally:
1110 if tr:
1110 if tr:
1111 tr.release()
1111 tr.release()
1112 lock.release()
1112 lock.release()
1113
1113
1114 def destroyed(self):
1114 def destroyed(self):
1115 '''Inform the repository that nodes have been destroyed.
1115 '''Inform the repository that nodes have been destroyed.
1116 Intended for use by strip and rollback, so there's a common
1116 Intended for use by strip and rollback, so there's a common
1117 place for anything that has to be done after destroying history.'''
1117 place for anything that has to be done after destroying history.'''
1118 # XXX it might be nice if we could take the list of destroyed
1118 # XXX it might be nice if we could take the list of destroyed
1119 # nodes, but I don't see an easy way for rollback() to do that
1119 # nodes, but I don't see an easy way for rollback() to do that
1120
1120
1121 # Ensure the persistent tag cache is updated. Doing it now
1121 # Ensure the persistent tag cache is updated. Doing it now
1122 # means that the tag cache only has to worry about destroyed
1122 # means that the tag cache only has to worry about destroyed
1123 # heads immediately after a strip/rollback. That in turn
1123 # heads immediately after a strip/rollback. That in turn
1124 # guarantees that "cachetip == currenttip" (comparing both rev
1124 # guarantees that "cachetip == currenttip" (comparing both rev
1125 # and node) always means no nodes have been added or destroyed.
1125 # and node) always means no nodes have been added or destroyed.
1126
1126
1127 # XXX this is suboptimal when qrefresh'ing: we strip the current
1127 # XXX this is suboptimal when qrefresh'ing: we strip the current
1128 # head, refresh the tag cache, then immediately add a new head.
1128 # head, refresh the tag cache, then immediately add a new head.
1129 # But I think doing it this way is necessary for the "instant
1129 # But I think doing it this way is necessary for the "instant
1130 # tag cache retrieval" case to work.
1130 # tag cache retrieval" case to work.
1131 self.invalidatecaches()
1131 self.invalidatecaches()
1132
1132
1133 def walk(self, match, node=None):
1133 def walk(self, match, node=None):
1134 '''
1134 '''
1135 walk recursively through the directory tree or a given
1135 walk recursively through the directory tree or a given
1136 changeset, finding all files matched by the match
1136 changeset, finding all files matched by the match
1137 function
1137 function
1138 '''
1138 '''
1139 return self[node].walk(match)
1139 return self[node].walk(match)
1140
1140
1141 def status(self, node1='.', node2=None, match=None,
1141 def status(self, node1='.', node2=None, match=None,
1142 ignored=False, clean=False, unknown=False,
1142 ignored=False, clean=False, unknown=False,
1143 listsubrepos=False):
1143 listsubrepos=False):
1144 """return status of files between two nodes or node and working directory
1144 """return status of files between two nodes or node and working directory
1145
1145
1146 If node1 is None, use the first dirstate parent instead.
1146 If node1 is None, use the first dirstate parent instead.
1147 If node2 is None, compare node1 with working directory.
1147 If node2 is None, compare node1 with working directory.
1148 """
1148 """
1149
1149
1150 def mfmatches(ctx):
1150 def mfmatches(ctx):
1151 mf = ctx.manifest().copy()
1151 mf = ctx.manifest().copy()
1152 for fn in mf.keys():
1152 for fn in mf.keys():
1153 if not match(fn):
1153 if not match(fn):
1154 del mf[fn]
1154 del mf[fn]
1155 return mf
1155 return mf
1156
1156
1157 if isinstance(node1, context.changectx):
1157 if isinstance(node1, context.changectx):
1158 ctx1 = node1
1158 ctx1 = node1
1159 else:
1159 else:
1160 ctx1 = self[node1]
1160 ctx1 = self[node1]
1161 if isinstance(node2, context.changectx):
1161 if isinstance(node2, context.changectx):
1162 ctx2 = node2
1162 ctx2 = node2
1163 else:
1163 else:
1164 ctx2 = self[node2]
1164 ctx2 = self[node2]
1165
1165
1166 working = ctx2.rev() is None
1166 working = ctx2.rev() is None
1167 parentworking = working and ctx1 == self['.']
1167 parentworking = working and ctx1 == self['.']
1168 match = match or matchmod.always(self.root, self.getcwd())
1168 match = match or matchmod.always(self.root, self.getcwd())
1169 listignored, listclean, listunknown = ignored, clean, unknown
1169 listignored, listclean, listunknown = ignored, clean, unknown
1170
1170
1171 # load earliest manifest first for caching reasons
1171 # load earliest manifest first for caching reasons
1172 if not working and ctx2.rev() < ctx1.rev():
1172 if not working and ctx2.rev() < ctx1.rev():
1173 ctx2.manifest()
1173 ctx2.manifest()
1174
1174
1175 if not parentworking:
1175 if not parentworking:
1176 def bad(f, msg):
1176 def bad(f, msg):
1177 if f not in ctx1:
1177 if f not in ctx1:
1178 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1178 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1179 match.bad = bad
1179 match.bad = bad
1180
1180
1181 if working: # we need to scan the working dir
1181 if working: # we need to scan the working dir
1182 subrepos = []
1182 subrepos = []
1183 if '.hgsub' in self.dirstate:
1183 if '.hgsub' in self.dirstate:
1184 subrepos = ctx1.substate.keys()
1184 subrepos = ctx1.substate.keys()
1185 s = self.dirstate.status(match, subrepos, listignored,
1185 s = self.dirstate.status(match, subrepos, listignored,
1186 listclean, listunknown)
1186 listclean, listunknown)
1187 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1187 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1188
1188
1189 # check for any possibly clean files
1189 # check for any possibly clean files
1190 if parentworking and cmp:
1190 if parentworking and cmp:
1191 fixup = []
1191 fixup = []
1192 # do a full compare of any files that might have changed
1192 # do a full compare of any files that might have changed
1193 for f in sorted(cmp):
1193 for f in sorted(cmp):
1194 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1194 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1195 or ctx1[f].cmp(ctx2[f])):
1195 or ctx1[f].cmp(ctx2[f])):
1196 modified.append(f)
1196 modified.append(f)
1197 else:
1197 else:
1198 fixup.append(f)
1198 fixup.append(f)
1199
1199
1200 # update dirstate for files that are actually clean
1200 # update dirstate for files that are actually clean
1201 if fixup:
1201 if fixup:
1202 if listclean:
1202 if listclean:
1203 clean += fixup
1203 clean += fixup
1204
1204
1205 try:
1205 try:
1206 # updating the dirstate is optional
1206 # updating the dirstate is optional
1207 # so we don't wait on the lock
1207 # so we don't wait on the lock
1208 wlock = self.wlock(False)
1208 wlock = self.wlock(False)
1209 try:
1209 try:
1210 for f in fixup:
1210 for f in fixup:
1211 self.dirstate.normal(f)
1211 self.dirstate.normal(f)
1212 finally:
1212 finally:
1213 wlock.release()
1213 wlock.release()
1214 except error.LockError:
1214 except error.LockError:
1215 pass
1215 pass
1216
1216
1217 if not parentworking:
1217 if not parentworking:
1218 mf1 = mfmatches(ctx1)
1218 mf1 = mfmatches(ctx1)
1219 if working:
1219 if working:
1220 # we are comparing working dir against non-parent
1220 # we are comparing working dir against non-parent
1221 # generate a pseudo-manifest for the working dir
1221 # generate a pseudo-manifest for the working dir
1222 mf2 = mfmatches(self['.'])
1222 mf2 = mfmatches(self['.'])
1223 for f in cmp + modified + added:
1223 for f in cmp + modified + added:
1224 mf2[f] = None
1224 mf2[f] = None
1225 mf2.set(f, ctx2.flags(f))
1225 mf2.set(f, ctx2.flags(f))
1226 for f in removed:
1226 for f in removed:
1227 if f in mf2:
1227 if f in mf2:
1228 del mf2[f]
1228 del mf2[f]
1229 else:
1229 else:
1230 # we are comparing two revisions
1230 # we are comparing two revisions
1231 deleted, unknown, ignored = [], [], []
1231 deleted, unknown, ignored = [], [], []
1232 mf2 = mfmatches(ctx2)
1232 mf2 = mfmatches(ctx2)
1233
1233
1234 modified, added, clean = [], [], []
1234 modified, added, clean = [], [], []
1235 for fn in mf2:
1235 for fn in mf2:
1236 if fn in mf1:
1236 if fn in mf1:
1237 if (fn not in deleted and
1237 if (fn not in deleted and
1238 (mf1.flags(fn) != mf2.flags(fn) or
1238 (mf1.flags(fn) != mf2.flags(fn) or
1239 (mf1[fn] != mf2[fn] and
1239 (mf1[fn] != mf2[fn] and
1240 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1240 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1241 modified.append(fn)
1241 modified.append(fn)
1242 elif listclean:
1242 elif listclean:
1243 clean.append(fn)
1243 clean.append(fn)
1244 del mf1[fn]
1244 del mf1[fn]
1245 elif fn not in deleted:
1245 elif fn not in deleted:
1246 added.append(fn)
1246 added.append(fn)
1247 removed = mf1.keys()
1247 removed = mf1.keys()
1248
1248
1249 r = modified, added, removed, deleted, unknown, ignored, clean
1249 r = modified, added, removed, deleted, unknown, ignored, clean
1250
1250
1251 if listsubrepos:
1251 if listsubrepos:
1252 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1252 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1253 if working:
1253 if working:
1254 rev2 = None
1254 rev2 = None
1255 else:
1255 else:
1256 rev2 = ctx2.substate[subpath][1]
1256 rev2 = ctx2.substate[subpath][1]
1257 try:
1257 try:
1258 submatch = matchmod.narrowmatcher(subpath, match)
1258 submatch = matchmod.narrowmatcher(subpath, match)
1259 s = sub.status(rev2, match=submatch, ignored=listignored,
1259 s = sub.status(rev2, match=submatch, ignored=listignored,
1260 clean=listclean, unknown=listunknown,
1260 clean=listclean, unknown=listunknown,
1261 listsubrepos=True)
1261 listsubrepos=True)
1262 for rfiles, sfiles in zip(r, s):
1262 for rfiles, sfiles in zip(r, s):
1263 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1263 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1264 except error.LookupError:
1264 except error.LookupError:
1265 self.ui.status(_("skipping missing subrepository: %s\n")
1265 self.ui.status(_("skipping missing subrepository: %s\n")
1266 % subpath)
1266 % subpath)
1267
1267
1268 for l in r:
1268 for l in r:
1269 l.sort()
1269 l.sort()
1270 return r
1270 return r
1271
1271
1272 def heads(self, start=None):
1272 def heads(self, start=None):
1273 heads = self.changelog.heads(start)
1273 heads = self.changelog.heads(start)
1274 # sort the output in rev descending order
1274 # sort the output in rev descending order
1275 return sorted(heads, key=self.changelog.rev, reverse=True)
1275 return sorted(heads, key=self.changelog.rev, reverse=True)
1276
1276
1277 def branchheads(self, branch=None, start=None, closed=False):
1277 def branchheads(self, branch=None, start=None, closed=False):
1278 '''return a (possibly filtered) list of heads for the given branch
1278 '''return a (possibly filtered) list of heads for the given branch
1279
1279
1280 Heads are returned in topological order, from newest to oldest.
1280 Heads are returned in topological order, from newest to oldest.
1281 If branch is None, use the dirstate branch.
1281 If branch is None, use the dirstate branch.
1282 If start is not None, return only heads reachable from start.
1282 If start is not None, return only heads reachable from start.
1283 If closed is True, return heads that are marked as closed as well.
1283 If closed is True, return heads that are marked as closed as well.
1284 '''
1284 '''
1285 if branch is None:
1285 if branch is None:
1286 branch = self[None].branch()
1286 branch = self[None].branch()
1287 branches = self.branchmap()
1287 branches = self.branchmap()
1288 if branch not in branches:
1288 if branch not in branches:
1289 return []
1289 return []
1290 # the cache returns heads ordered lowest to highest
1290 # the cache returns heads ordered lowest to highest
1291 bheads = list(reversed(branches[branch]))
1291 bheads = list(reversed(branches[branch]))
1292 if start is not None:
1292 if start is not None:
1293 # filter out the heads that cannot be reached from startrev
1293 # filter out the heads that cannot be reached from startrev
1294 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1294 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1295 bheads = [h for h in bheads if h in fbheads]
1295 bheads = [h for h in bheads if h in fbheads]
1296 if not closed:
1296 if not closed:
1297 bheads = [h for h in bheads if
1297 bheads = [h for h in bheads if
1298 ('close' not in self.changelog.read(h)[5])]
1298 ('close' not in self.changelog.read(h)[5])]
1299 return bheads
1299 return bheads
1300
1300
1301 def branches(self, nodes):
1301 def branches(self, nodes):
1302 if not nodes:
1302 if not nodes:
1303 nodes = [self.changelog.tip()]
1303 nodes = [self.changelog.tip()]
1304 b = []
1304 b = []
1305 for n in nodes:
1305 for n in nodes:
1306 t = n
1306 t = n
1307 while 1:
1307 while 1:
1308 p = self.changelog.parents(n)
1308 p = self.changelog.parents(n)
1309 if p[1] != nullid or p[0] == nullid:
1309 if p[1] != nullid or p[0] == nullid:
1310 b.append((t, n, p[0], p[1]))
1310 b.append((t, n, p[0], p[1]))
1311 break
1311 break
1312 n = p[0]
1312 n = p[0]
1313 return b
1313 return b
1314
1314
1315 def between(self, pairs):
1315 def between(self, pairs):
1316 r = []
1316 r = []
1317
1317
1318 for top, bottom in pairs:
1318 for top, bottom in pairs:
1319 n, l, i = top, [], 0
1319 n, l, i = top, [], 0
1320 f = 1
1320 f = 1
1321
1321
1322 while n != bottom and n != nullid:
1322 while n != bottom and n != nullid:
1323 p = self.changelog.parents(n)[0]
1323 p = self.changelog.parents(n)[0]
1324 if i == f:
1324 if i == f:
1325 l.append(n)
1325 l.append(n)
1326 f = f * 2
1326 f = f * 2
1327 n = p
1327 n = p
1328 i += 1
1328 i += 1
1329
1329
1330 r.append(l)
1330 r.append(l)
1331
1331
1332 return r
1332 return r
1333
1333
1334 def pull(self, remote, heads=None, force=False):
1334 def pull(self, remote, heads=None, force=False):
1335 lock = self.lock()
1335 lock = self.lock()
1336 try:
1336 try:
1337 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1337 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1338 force=force)
1338 force=force)
1339 common, fetch, rheads = tmp
1339 common, fetch, rheads = tmp
1340 if not fetch:
1340 if not fetch:
1341 self.ui.status(_("no changes found\n"))
1341 self.ui.status(_("no changes found\n"))
1342 result = 0
1342 result = 0
1343 else:
1343 else:
1344 if heads is None and list(common) == [nullid]:
1344 if heads is None and list(common) == [nullid]:
1345 self.ui.status(_("requesting all changes\n"))
1345 self.ui.status(_("requesting all changes\n"))
1346 elif heads is None and remote.capable('changegroupsubset'):
1346 elif heads is None and remote.capable('changegroupsubset'):
1347 # issue1320, avoid a race if remote changed after discovery
1347 # issue1320, avoid a race if remote changed after discovery
1348 heads = rheads
1348 heads = rheads
1349
1349
1350 if remote.capable('getbundle'):
1350 if remote.capable('getbundle'):
1351 cg = remote.getbundle('pull', common=common,
1351 cg = remote.getbundle('pull', common=common,
1352 heads=heads or rheads)
1352 heads=heads or rheads)
1353 elif heads is None:
1353 elif heads is None:
1354 cg = remote.changegroup(fetch, 'pull')
1354 cg = remote.changegroup(fetch, 'pull')
1355 elif not remote.capable('changegroupsubset'):
1355 elif not remote.capable('changegroupsubset'):
1356 raise util.Abort(_("partial pull cannot be done because "
1356 raise util.Abort(_("partial pull cannot be done because "
1357 "other repository doesn't support "
1357 "other repository doesn't support "
1358 "changegroupsubset."))
1358 "changegroupsubset."))
1359 else:
1359 else:
1360 cg = remote.changegroupsubset(fetch, heads, 'pull')
1360 cg = remote.changegroupsubset(fetch, heads, 'pull')
1361 result = self.addchangegroup(cg, 'pull', remote.url(),
1361 result = self.addchangegroup(cg, 'pull', remote.url(),
1362 lock=lock)
1362 lock=lock)
1363 finally:
1363 finally:
1364 lock.release()
1364 lock.release()
1365
1365
1366 return result
1366 return result
1367
1367
1368 def checkpush(self, force, revs):
1368 def checkpush(self, force, revs):
1369 """Extensions can override this function if additional checks have
1369 """Extensions can override this function if additional checks have
1370 to be performed before pushing, or call it if they override push
1370 to be performed before pushing, or call it if they override push
1371 command.
1371 command.
1372 """
1372 """
1373 pass
1373 pass
1374
1374
1375 def push(self, remote, force=False, revs=None, newbranch=False):
1375 def push(self, remote, force=False, revs=None, newbranch=False):
1376 '''Push outgoing changesets (limited by revs) from the current
1376 '''Push outgoing changesets (limited by revs) from the current
1377 repository to remote. Return an integer:
1377 repository to remote. Return an integer:
1378 - 0 means HTTP error *or* nothing to push
1378 - 0 means HTTP error *or* nothing to push
1379 - 1 means we pushed and remote head count is unchanged *or*
1379 - 1 means we pushed and remote head count is unchanged *or*
1380 we have outgoing changesets but refused to push
1380 we have outgoing changesets but refused to push
1381 - other values as described by addchangegroup()
1381 - other values as described by addchangegroup()
1382 '''
1382 '''
1383 # there are two ways to push to remote repo:
1383 # there are two ways to push to remote repo:
1384 #
1384 #
1385 # addchangegroup assumes local user can lock remote
1385 # addchangegroup assumes local user can lock remote
1386 # repo (local filesystem, old ssh servers).
1386 # repo (local filesystem, old ssh servers).
1387 #
1387 #
1388 # unbundle assumes local user cannot lock remote repo (new ssh
1388 # unbundle assumes local user cannot lock remote repo (new ssh
1389 # servers, http servers).
1389 # servers, http servers).
1390
1390
1391 self.checkpush(force, revs)
1391 self.checkpush(force, revs)
1392 lock = None
1392 lock = None
1393 unbundle = remote.capable('unbundle')
1393 unbundle = remote.capable('unbundle')
1394 if not unbundle:
1394 if not unbundle:
1395 lock = remote.lock()
1395 lock = remote.lock()
1396 try:
1396 try:
1397 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1397 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1398 newbranch)
1398 newbranch)
1399 ret = remote_heads
1399 ret = remote_heads
1400 if cg is not None:
1400 if cg is not None:
1401 if unbundle:
1401 if unbundle:
1402 # local repo finds heads on server, finds out what
1402 # local repo finds heads on server, finds out what
1403 # revs it must push. once revs transferred, if server
1403 # revs it must push. once revs transferred, if server
1404 # finds it has different heads (someone else won
1404 # finds it has different heads (someone else won
1405 # commit/push race), server aborts.
1405 # commit/push race), server aborts.
1406 if force:
1406 if force:
1407 remote_heads = ['force']
1407 remote_heads = ['force']
1408 # ssh: return remote's addchangegroup()
1408 # ssh: return remote's addchangegroup()
1409 # http: return remote's addchangegroup() or 0 for error
1409 # http: return remote's addchangegroup() or 0 for error
1410 ret = remote.unbundle(cg, remote_heads, 'push')
1410 ret = remote.unbundle(cg, remote_heads, 'push')
1411 else:
1411 else:
1412 # we return an integer indicating remote head count change
1412 # we return an integer indicating remote head count change
1413 ret = remote.addchangegroup(cg, 'push', self.url(),
1413 ret = remote.addchangegroup(cg, 'push', self.url(),
1414 lock=lock)
1414 lock=lock)
1415 finally:
1415 finally:
1416 if lock is not None:
1416 if lock is not None:
1417 lock.release()
1417 lock.release()
1418
1418
1419 self.ui.debug("checking for updated bookmarks\n")
1419 self.ui.debug("checking for updated bookmarks\n")
1420 rb = remote.listkeys('bookmarks')
1420 rb = remote.listkeys('bookmarks')
1421 for k in rb.keys():
1421 for k in rb.keys():
1422 if k in self._bookmarks:
1422 if k in self._bookmarks:
1423 nr, nl = rb[k], hex(self._bookmarks[k])
1423 nr, nl = rb[k], hex(self._bookmarks[k])
1424 if nr in self:
1424 if nr in self:
1425 cr = self[nr]
1425 cr = self[nr]
1426 cl = self[nl]
1426 cl = self[nl]
1427 if cl in cr.descendants():
1427 if cl in cr.descendants():
1428 r = remote.pushkey('bookmarks', k, nr, nl)
1428 r = remote.pushkey('bookmarks', k, nr, nl)
1429 if r:
1429 if r:
1430 self.ui.status(_("updating bookmark %s\n") % k)
1430 self.ui.status(_("updating bookmark %s\n") % k)
1431 else:
1431 else:
1432 self.ui.warn(_('updating bookmark %s'
1432 self.ui.warn(_('updating bookmark %s'
1433 ' failed!\n') % k)
1433 ' failed!\n') % k)
1434
1434
1435 return ret
1435 return ret
1436
1436
1437 def changegroupinfo(self, nodes, source):
1437 def changegroupinfo(self, nodes, source):
1438 if self.ui.verbose or source == 'bundle':
1438 if self.ui.verbose or source == 'bundle':
1439 self.ui.status(_("%d changesets found\n") % len(nodes))
1439 self.ui.status(_("%d changesets found\n") % len(nodes))
1440 if self.ui.debugflag:
1440 if self.ui.debugflag:
1441 self.ui.debug("list of changesets:\n")
1441 self.ui.debug("list of changesets:\n")
1442 for node in nodes:
1442 for node in nodes:
1443 self.ui.debug("%s\n" % hex(node))
1443 self.ui.debug("%s\n" % hex(node))
1444
1444
1445 def changegroupsubset(self, bases, heads, source):
1445 def changegroupsubset(self, bases, heads, source):
1446 """Compute a changegroup consisting of all the nodes that are
1446 """Compute a changegroup consisting of all the nodes that are
1447 descendents of any of the bases and ancestors of any of the heads.
1447 descendents of any of the bases and ancestors of any of the heads.
1448 Return a chunkbuffer object whose read() method will return
1448 Return a chunkbuffer object whose read() method will return
1449 successive changegroup chunks.
1449 successive changegroup chunks.
1450
1450
1451 It is fairly complex as determining which filenodes and which
1451 It is fairly complex as determining which filenodes and which
1452 manifest nodes need to be included for the changeset to be complete
1452 manifest nodes need to be included for the changeset to be complete
1453 is non-trivial.
1453 is non-trivial.
1454
1454
1455 Another wrinkle is doing the reverse, figuring out which changeset in
1455 Another wrinkle is doing the reverse, figuring out which changeset in
1456 the changegroup a particular filenode or manifestnode belongs to.
1456 the changegroup a particular filenode or manifestnode belongs to.
1457 """
1457 """
1458 cl = self.changelog
1458 cl = self.changelog
1459 if not bases:
1459 if not bases:
1460 bases = [nullid]
1460 bases = [nullid]
1461 csets, bases, heads = cl.nodesbetween(bases, heads)
1461 csets, bases, heads = cl.nodesbetween(bases, heads)
1462 # We assume that all ancestors of bases are known
1462 # We assume that all ancestors of bases are known
1463 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1463 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1464 return self._changegroupsubset(common, csets, heads, source)
1464 return self._changegroupsubset(common, csets, heads, source)
1465
1465
1466 def getbundle(self, source, heads=None, common=None):
1466 def getbundle(self, source, heads=None, common=None):
1467 """Like changegroupsubset, but returns the set difference between the
1467 """Like changegroupsubset, but returns the set difference between the
1468 ancestors of heads and the ancestors common.
1468 ancestors of heads and the ancestors common.
1469
1469
1470 If heads is None, use the local heads. If common is None, use [nullid].
1470 If heads is None, use the local heads. If common is None, use [nullid].
1471
1471
1472 The nodes in common might not all be known locally due to the way the
1472 The nodes in common might not all be known locally due to the way the
1473 current discovery protocol works.
1473 current discovery protocol works.
1474 """
1474 """
1475 cl = self.changelog
1475 cl = self.changelog
1476 if common:
1476 if common:
1477 nm = cl.nodemap
1477 nm = cl.nodemap
1478 common = [n for n in common if n in nm]
1478 common = [n for n in common if n in nm]
1479 else:
1479 else:
1480 common = [nullid]
1480 common = [nullid]
1481 if not heads:
1481 if not heads:
1482 heads = cl.heads()
1482 heads = cl.heads()
1483 common, missing = cl.findcommonmissing(common, heads)
1483 common, missing = cl.findcommonmissing(common, heads)
1484 if not missing:
1484 if not missing:
1485 return None
1485 return None
1486 return self._changegroupsubset(common, missing, heads, source)
1486 return self._changegroupsubset(common, missing, heads, source)
1487
1487
1488 def _changegroupsubset(self, commonrevs, csets, heads, source):
1488 def _changegroupsubset(self, commonrevs, csets, heads, source):
1489
1489
1490 cl = self.changelog
1490 cl = self.changelog
1491 mf = self.manifest
1491 mf = self.manifest
1492 mfs = {} # needed manifests
1492 mfs = {} # needed manifests
1493 fnodes = {} # needed file nodes
1493 fnodes = {} # needed file nodes
1494 changedfiles = set()
1494 changedfiles = set()
1495 fstate = ['', {}]
1495 fstate = ['', {}]
1496 count = [0]
1496 count = [0]
1497
1497
1498 # can we go through the fast path ?
1498 # can we go through the fast path ?
1499 heads.sort()
1499 heads.sort()
1500 if heads == sorted(self.heads()):
1500 if heads == sorted(self.heads()):
1501 return self._changegroup(csets, source)
1501 return self._changegroup(csets, source)
1502
1502
1503 # slow path
1503 # slow path
1504 self.hook('preoutgoing', throw=True, source=source)
1504 self.hook('preoutgoing', throw=True, source=source)
1505 self.changegroupinfo(csets, source)
1505 self.changegroupinfo(csets, source)
1506
1506
1507 # filter any nodes that claim to be part of the known set
1507 # filter any nodes that claim to be part of the known set
1508 def prune(revlog, missing):
1508 def prune(revlog, missing):
1509 for n in missing:
1509 for n in missing:
1510 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1510 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1511 yield n
1511 yield n
1512
1512
1513 def lookup(revlog, x):
1513 def lookup(revlog, x):
1514 if revlog == cl:
1514 if revlog == cl:
1515 c = cl.read(x)
1515 c = cl.read(x)
1516 changedfiles.update(c[3])
1516 changedfiles.update(c[3])
1517 mfs.setdefault(c[0], x)
1517 mfs.setdefault(c[0], x)
1518 count[0] += 1
1518 count[0] += 1
1519 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1519 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1520 return x
1520 return x
1521 elif revlog == mf:
1521 elif revlog == mf:
1522 clnode = mfs[x]
1522 clnode = mfs[x]
1523 mdata = mf.readfast(x)
1523 mdata = mf.readfast(x)
1524 for f in changedfiles:
1524 for f in changedfiles:
1525 if f in mdata:
1525 if f in mdata:
1526 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1526 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1527 count[0] += 1
1527 count[0] += 1
1528 self.ui.progress(_('bundling'), count[0],
1528 self.ui.progress(_('bundling'), count[0],
1529 unit=_('manifests'), total=len(mfs))
1529 unit=_('manifests'), total=len(mfs))
1530 return mfs[x]
1530 return mfs[x]
1531 else:
1531 else:
1532 self.ui.progress(
1532 self.ui.progress(
1533 _('bundling'), count[0], item=fstate[0],
1533 _('bundling'), count[0], item=fstate[0],
1534 unit=_('files'), total=len(changedfiles))
1534 unit=_('files'), total=len(changedfiles))
1535 return fstate[1][x]
1535 return fstate[1][x]
1536
1536
1537 bundler = changegroup.bundle10(lookup)
1537 bundler = changegroup.bundle10(lookup)
1538
1538
1539 def gengroup():
1539 def gengroup():
1540 # Create a changenode group generator that will call our functions
1540 # Create a changenode group generator that will call our functions
1541 # back to lookup the owning changenode and collect information.
1541 # back to lookup the owning changenode and collect information.
1542 for chunk in cl.group(csets, bundler):
1542 for chunk in cl.group(csets, bundler):
1543 yield chunk
1543 yield chunk
1544 self.ui.progress(_('bundling'), None)
1544 self.ui.progress(_('bundling'), None)
1545
1545
1546 # Create a generator for the manifestnodes that calls our lookup
1546 # Create a generator for the manifestnodes that calls our lookup
1547 # and data collection functions back.
1547 # and data collection functions back.
1548 count[0] = 0
1548 count[0] = 0
1549 for chunk in mf.group(prune(mf, mfs), bundler):
1549 for chunk in mf.group(prune(mf, mfs), bundler):
1550 yield chunk
1550 yield chunk
1551 self.ui.progress(_('bundling'), None)
1551 self.ui.progress(_('bundling'), None)
1552
1552
1553 mfs.clear()
1553 mfs.clear()
1554
1554
1555 # Go through all our files in order sorted by name.
1555 # Go through all our files in order sorted by name.
1556 count[0] = 0
1556 count[0] = 0
1557 for fname in sorted(changedfiles):
1557 for fname in sorted(changedfiles):
1558 filerevlog = self.file(fname)
1558 filerevlog = self.file(fname)
1559 if not len(filerevlog):
1559 if not len(filerevlog):
1560 raise util.Abort(_("empty or missing revlog for %s") % fname)
1560 raise util.Abort(_("empty or missing revlog for %s") % fname)
1561 fstate[0] = fname
1561 fstate[0] = fname
1562 fstate[1] = fnodes.pop(fname, {})
1562 fstate[1] = fnodes.pop(fname, {})
1563 first = True
1563 first = True
1564
1564
1565 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1565 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1566 bundler):
1566 bundler):
1567 if first:
1567 if first:
1568 if chunk == bundler.close():
1568 if chunk == bundler.close():
1569 break
1569 break
1570 count[0] += 1
1570 count[0] += 1
1571 yield bundler.fileheader(fname)
1571 yield bundler.fileheader(fname)
1572 first = False
1572 first = False
1573 yield chunk
1573 yield chunk
1574 # Signal that no more groups are left.
1574 # Signal that no more groups are left.
1575 yield bundler.close()
1575 yield bundler.close()
1576 self.ui.progress(_('bundling'), None)
1576 self.ui.progress(_('bundling'), None)
1577
1577
1578 if csets:
1578 if csets:
1579 self.hook('outgoing', node=hex(csets[0]), source=source)
1579 self.hook('outgoing', node=hex(csets[0]), source=source)
1580
1580
1581 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1581 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1582
1582
1583 def changegroup(self, basenodes, source):
1583 def changegroup(self, basenodes, source):
1584 # to avoid a race we use changegroupsubset() (issue1320)
1584 # to avoid a race we use changegroupsubset() (issue1320)
1585 return self.changegroupsubset(basenodes, self.heads(), source)
1585 return self.changegroupsubset(basenodes, self.heads(), source)
1586
1586
1587 def _changegroup(self, nodes, source):
1587 def _changegroup(self, nodes, source):
1588 """Compute the changegroup of all nodes that we have that a recipient
1588 """Compute the changegroup of all nodes that we have that a recipient
1589 doesn't. Return a chunkbuffer object whose read() method will return
1589 doesn't. Return a chunkbuffer object whose read() method will return
1590 successive changegroup chunks.
1590 successive changegroup chunks.
1591
1591
1592 This is much easier than the previous function as we can assume that
1592 This is much easier than the previous function as we can assume that
1593 the recipient has any changenode we aren't sending them.
1593 the recipient has any changenode we aren't sending them.
1594
1594
1595 nodes is the set of nodes to send"""
1595 nodes is the set of nodes to send"""
1596
1596
1597 cl = self.changelog
1597 cl = self.changelog
1598 mf = self.manifest
1598 mf = self.manifest
1599 mfs = {}
1599 mfs = {}
1600 changedfiles = set()
1600 changedfiles = set()
1601 fstate = ['']
1601 fstate = ['']
1602 count = [0]
1602 count = [0]
1603
1603
1604 self.hook('preoutgoing', throw=True, source=source)
1604 self.hook('preoutgoing', throw=True, source=source)
1605 self.changegroupinfo(nodes, source)
1605 self.changegroupinfo(nodes, source)
1606
1606
1607 revset = set([cl.rev(n) for n in nodes])
1607 revset = set([cl.rev(n) for n in nodes])
1608
1608
1609 def gennodelst(log):
1609 def gennodelst(log):
1610 for r in log:
1610 for r in log:
1611 if log.linkrev(r) in revset:
1611 if log.linkrev(r) in revset:
1612 yield log.node(r)
1612 yield log.node(r)
1613
1613
1614 def lookup(revlog, x):
1614 def lookup(revlog, x):
1615 if revlog == cl:
1615 if revlog == cl:
1616 c = cl.read(x)
1616 c = cl.read(x)
1617 changedfiles.update(c[3])
1617 changedfiles.update(c[3])
1618 mfs.setdefault(c[0], x)
1618 mfs.setdefault(c[0], x)
1619 count[0] += 1
1619 count[0] += 1
1620 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1620 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1621 return x
1621 return x
1622 elif revlog == mf:
1622 elif revlog == mf:
1623 count[0] += 1
1623 count[0] += 1
1624 self.ui.progress(_('bundling'), count[0],
1624 self.ui.progress(_('bundling'), count[0],
1625 unit=_('manifests'), total=len(mfs))
1625 unit=_('manifests'), total=len(mfs))
1626 return cl.node(revlog.linkrev(revlog.rev(x)))
1626 return cl.node(revlog.linkrev(revlog.rev(x)))
1627 else:
1627 else:
1628 self.ui.progress(
1628 self.ui.progress(
1629 _('bundling'), count[0], item=fstate[0],
1629 _('bundling'), count[0], item=fstate[0],
1630 total=len(changedfiles), unit=_('files'))
1630 total=len(changedfiles), unit=_('files'))
1631 return cl.node(revlog.linkrev(revlog.rev(x)))
1631 return cl.node(revlog.linkrev(revlog.rev(x)))
1632
1632
1633 bundler = changegroup.bundle10(lookup)
1633 bundler = changegroup.bundle10(lookup)
1634
1634
1635 def gengroup():
1635 def gengroup():
1636 '''yield a sequence of changegroup chunks (strings)'''
1636 '''yield a sequence of changegroup chunks (strings)'''
1637 # construct a list of all changed files
1637 # construct a list of all changed files
1638
1638
1639 for chunk in cl.group(nodes, bundler):
1639 for chunk in cl.group(nodes, bundler):
1640 yield chunk
1640 yield chunk
1641 self.ui.progress(_('bundling'), None)
1641 self.ui.progress(_('bundling'), None)
1642
1642
1643 count[0] = 0
1643 count[0] = 0
1644 for chunk in mf.group(gennodelst(mf), bundler):
1644 for chunk in mf.group(gennodelst(mf), bundler):
1645 yield chunk
1645 yield chunk
1646 self.ui.progress(_('bundling'), None)
1646 self.ui.progress(_('bundling'), None)
1647
1647
1648 count[0] = 0
1648 count[0] = 0
1649 for fname in sorted(changedfiles):
1649 for fname in sorted(changedfiles):
1650 filerevlog = self.file(fname)
1650 filerevlog = self.file(fname)
1651 if not len(filerevlog):
1651 if not len(filerevlog):
1652 raise util.Abort(_("empty or missing revlog for %s") % fname)
1652 raise util.Abort(_("empty or missing revlog for %s") % fname)
1653 fstate[0] = fname
1653 fstate[0] = fname
1654 first = True
1654 first = True
1655 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1655 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1656 if first:
1656 if first:
1657 if chunk == bundler.close():
1657 if chunk == bundler.close():
1658 break
1658 break
1659 count[0] += 1
1659 count[0] += 1
1660 yield bundler.fileheader(fname)
1660 yield bundler.fileheader(fname)
1661 first = False
1661 first = False
1662 yield chunk
1662 yield chunk
1663 yield bundler.close()
1663 yield bundler.close()
1664 self.ui.progress(_('bundling'), None)
1664 self.ui.progress(_('bundling'), None)
1665
1665
1666 if nodes:
1666 if nodes:
1667 self.hook('outgoing', node=hex(nodes[0]), source=source)
1667 self.hook('outgoing', node=hex(nodes[0]), source=source)
1668
1668
1669 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1669 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1670
1670
1671 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1671 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1672 """Add the changegroup returned by source.read() to this repo.
1672 """Add the changegroup returned by source.read() to this repo.
1673 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1673 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1674 the URL of the repo where this changegroup is coming from.
1674 the URL of the repo where this changegroup is coming from.
1675 If lock is not None, the function takes ownership of the lock
1675 If lock is not None, the function takes ownership of the lock
1676 and releases it after the changegroup is added.
1676 and releases it after the changegroup is added.
1677
1677
1678 Return an integer summarizing the change to this repo:
1678 Return an integer summarizing the change to this repo:
1679 - nothing changed or no source: 0
1679 - nothing changed or no source: 0
1680 - more heads than before: 1+added heads (2..n)
1680 - more heads than before: 1+added heads (2..n)
1681 - fewer heads than before: -1-removed heads (-2..-n)
1681 - fewer heads than before: -1-removed heads (-2..-n)
1682 - number of heads stays the same: 1
1682 - number of heads stays the same: 1
1683 """
1683 """
1684 def csmap(x):
1684 def csmap(x):
1685 self.ui.debug("add changeset %s\n" % short(x))
1685 self.ui.debug("add changeset %s\n" % short(x))
1686 return len(cl)
1686 return len(cl)
1687
1687
1688 def revmap(x):
1688 def revmap(x):
1689 return cl.rev(x)
1689 return cl.rev(x)
1690
1690
1691 if not source:
1691 if not source:
1692 return 0
1692 return 0
1693
1693
1694 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1694 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1695
1695
1696 changesets = files = revisions = 0
1696 changesets = files = revisions = 0
1697 efiles = set()
1697 efiles = set()
1698
1698
1699 # write changelog data to temp files so concurrent readers will not see
1699 # write changelog data to temp files so concurrent readers will not see
1700 # inconsistent view
1700 # inconsistent view
1701 cl = self.changelog
1701 cl = self.changelog
1702 cl.delayupdate()
1702 cl.delayupdate()
1703 oldheads = cl.heads()
1703 oldheads = cl.heads()
1704
1704
1705 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1705 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1706 try:
1706 try:
1707 trp = weakref.proxy(tr)
1707 trp = weakref.proxy(tr)
1708 # pull off the changeset group
1708 # pull off the changeset group
1709 self.ui.status(_("adding changesets\n"))
1709 self.ui.status(_("adding changesets\n"))
1710 clstart = len(cl)
1710 clstart = len(cl)
1711 class prog(object):
1711 class prog(object):
1712 step = _('changesets')
1712 step = _('changesets')
1713 count = 1
1713 count = 1
1714 ui = self.ui
1714 ui = self.ui
1715 total = None
1715 total = None
1716 def __call__(self):
1716 def __call__(self):
1717 self.ui.progress(self.step, self.count, unit=_('chunks'),
1717 self.ui.progress(self.step, self.count, unit=_('chunks'),
1718 total=self.total)
1718 total=self.total)
1719 self.count += 1
1719 self.count += 1
1720 pr = prog()
1720 pr = prog()
1721 source.callback = pr
1721 source.callback = pr
1722
1722
1723 source.changelogheader()
1723 source.changelogheader()
1724 if (cl.addgroup(source, csmap, trp) is None
1724 if (cl.addgroup(source, csmap, trp) is None
1725 and not emptyok):
1725 and not emptyok):
1726 raise util.Abort(_("received changelog group is empty"))
1726 raise util.Abort(_("received changelog group is empty"))
1727 clend = len(cl)
1727 clend = len(cl)
1728 changesets = clend - clstart
1728 changesets = clend - clstart
1729 for c in xrange(clstart, clend):
1729 for c in xrange(clstart, clend):
1730 efiles.update(self[c].files())
1730 efiles.update(self[c].files())
1731 efiles = len(efiles)
1731 efiles = len(efiles)
1732 self.ui.progress(_('changesets'), None)
1732 self.ui.progress(_('changesets'), None)
1733
1733
1734 # pull off the manifest group
1734 # pull off the manifest group
1735 self.ui.status(_("adding manifests\n"))
1735 self.ui.status(_("adding manifests\n"))
1736 pr.step = _('manifests')
1736 pr.step = _('manifests')
1737 pr.count = 1
1737 pr.count = 1
1738 pr.total = changesets # manifests <= changesets
1738 pr.total = changesets # manifests <= changesets
1739 # no need to check for empty manifest group here:
1739 # no need to check for empty manifest group here:
1740 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1740 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1741 # no new manifest will be created and the manifest group will
1741 # no new manifest will be created and the manifest group will
1742 # be empty during the pull
1742 # be empty during the pull
1743 source.manifestheader()
1743 source.manifestheader()
1744 self.manifest.addgroup(source, revmap, trp)
1744 self.manifest.addgroup(source, revmap, trp)
1745 self.ui.progress(_('manifests'), None)
1745 self.ui.progress(_('manifests'), None)
1746
1746
1747 needfiles = {}
1747 needfiles = {}
1748 if self.ui.configbool('server', 'validate', default=False):
1748 if self.ui.configbool('server', 'validate', default=False):
1749 # validate incoming csets have their manifests
1749 # validate incoming csets have their manifests
1750 for cset in xrange(clstart, clend):
1750 for cset in xrange(clstart, clend):
1751 mfest = self.changelog.read(self.changelog.node(cset))[0]
1751 mfest = self.changelog.read(self.changelog.node(cset))[0]
1752 mfest = self.manifest.readdelta(mfest)
1752 mfest = self.manifest.readdelta(mfest)
1753 # store file nodes we must see
1753 # store file nodes we must see
1754 for f, n in mfest.iteritems():
1754 for f, n in mfest.iteritems():
1755 needfiles.setdefault(f, set()).add(n)
1755 needfiles.setdefault(f, set()).add(n)
1756
1756
1757 # process the files
1757 # process the files
1758 self.ui.status(_("adding file changes\n"))
1758 self.ui.status(_("adding file changes\n"))
1759 pr.step = 'files'
1759 pr.step = 'files'
1760 pr.count = 1
1760 pr.count = 1
1761 pr.total = efiles
1761 pr.total = efiles
1762 source.callback = None
1762 source.callback = None
1763
1763
1764 while 1:
1764 while 1:
1765 chunkdata = source.filelogheader()
1765 chunkdata = source.filelogheader()
1766 if not chunkdata:
1766 if not chunkdata:
1767 break
1767 break
1768 f = chunkdata["filename"]
1768 f = chunkdata["filename"]
1769 self.ui.debug("adding %s revisions\n" % f)
1769 self.ui.debug("adding %s revisions\n" % f)
1770 pr()
1770 pr()
1771 fl = self.file(f)
1771 fl = self.file(f)
1772 o = len(fl)
1772 o = len(fl)
1773 if fl.addgroup(source, revmap, trp) is None:
1773 if fl.addgroup(source, revmap, trp) is None:
1774 raise util.Abort(_("received file revlog group is empty"))
1774 raise util.Abort(_("received file revlog group is empty"))
1775 revisions += len(fl) - o
1775 revisions += len(fl) - o
1776 files += 1
1776 files += 1
1777 if f in needfiles:
1777 if f in needfiles:
1778 needs = needfiles[f]
1778 needs = needfiles[f]
1779 for new in xrange(o, len(fl)):
1779 for new in xrange(o, len(fl)):
1780 n = fl.node(new)
1780 n = fl.node(new)
1781 if n in needs:
1781 if n in needs:
1782 needs.remove(n)
1782 needs.remove(n)
1783 if not needs:
1783 if not needs:
1784 del needfiles[f]
1784 del needfiles[f]
1785 self.ui.progress(_('files'), None)
1785 self.ui.progress(_('files'), None)
1786
1786
1787 for f, needs in needfiles.iteritems():
1787 for f, needs in needfiles.iteritems():
1788 fl = self.file(f)
1788 fl = self.file(f)
1789 for n in needs:
1789 for n in needs:
1790 try:
1790 try:
1791 fl.rev(n)
1791 fl.rev(n)
1792 except error.LookupError:
1792 except error.LookupError:
1793 raise util.Abort(
1793 raise util.Abort(
1794 _('missing file data for %s:%s - run hg verify') %
1794 _('missing file data for %s:%s - run hg verify') %
1795 (f, hex(n)))
1795 (f, hex(n)))
1796
1796
1797 dh = 0
1797 dh = 0
1798 if oldheads:
1798 if oldheads:
1799 heads = cl.heads()
1799 heads = cl.heads()
1800 dh = len(heads) - len(oldheads)
1800 dh = len(heads) - len(oldheads)
1801 for h in heads:
1801 for h in heads:
1802 if h not in oldheads and 'close' in self[h].extra():
1802 if h not in oldheads and 'close' in self[h].extra():
1803 dh -= 1
1803 dh -= 1
1804 htext = ""
1804 htext = ""
1805 if dh:
1805 if dh:
1806 htext = _(" (%+d heads)") % dh
1806 htext = _(" (%+d heads)") % dh
1807
1807
1808 self.ui.status(_("added %d changesets"
1808 self.ui.status(_("added %d changesets"
1809 " with %d changes to %d files%s\n")
1809 " with %d changes to %d files%s\n")
1810 % (changesets, revisions, files, htext))
1810 % (changesets, revisions, files, htext))
1811
1811
1812 if changesets > 0:
1812 if changesets > 0:
1813 p = lambda: cl.writepending() and self.root or ""
1813 p = lambda: cl.writepending() and self.root or ""
1814 self.hook('pretxnchangegroup', throw=True,
1814 self.hook('pretxnchangegroup', throw=True,
1815 node=hex(cl.node(clstart)), source=srctype,
1815 node=hex(cl.node(clstart)), source=srctype,
1816 url=url, pending=p)
1816 url=url, pending=p)
1817
1817
1818 # make changelog see real files again
1818 # make changelog see real files again
1819 cl.finalize(trp)
1819 cl.finalize(trp)
1820
1820
1821 tr.close()
1821 tr.close()
1822 finally:
1822 finally:
1823 tr.release()
1823 tr.release()
1824 if lock:
1824 if lock:
1825 lock.release()
1825 lock.release()
1826
1826
1827 if changesets > 0:
1827 if changesets > 0:
1828 # forcefully update the on-disk branch cache
1828 # forcefully update the on-disk branch cache
1829 self.ui.debug("updating the branch cache\n")
1829 self.ui.debug("updating the branch cache\n")
1830 self.updatebranchcache()
1830 self.updatebranchcache()
1831 self.hook("changegroup", node=hex(cl.node(clstart)),
1831 self.hook("changegroup", node=hex(cl.node(clstart)),
1832 source=srctype, url=url)
1832 source=srctype, url=url)
1833
1833
1834 for i in xrange(clstart, clend):
1834 for i in xrange(clstart, clend):
1835 self.hook("incoming", node=hex(cl.node(i)),
1835 self.hook("incoming", node=hex(cl.node(i)),
1836 source=srctype, url=url)
1836 source=srctype, url=url)
1837
1837
1838 # never return 0 here:
1838 # never return 0 here:
1839 if dh < 0:
1839 if dh < 0:
1840 return dh - 1
1840 return dh - 1
1841 else:
1841 else:
1842 return dh + 1
1842 return dh + 1
1843
1843
1844 def stream_in(self, remote, requirements):
1844 def stream_in(self, remote, requirements):
1845 lock = self.lock()
1845 lock = self.lock()
1846 try:
1846 try:
1847 fp = remote.stream_out()
1847 fp = remote.stream_out()
1848 l = fp.readline()
1848 l = fp.readline()
1849 try:
1849 try:
1850 resp = int(l)
1850 resp = int(l)
1851 except ValueError:
1851 except ValueError:
1852 raise error.ResponseError(
1852 raise error.ResponseError(
1853 _('Unexpected response from remote server:'), l)
1853 _('Unexpected response from remote server:'), l)
1854 if resp == 1:
1854 if resp == 1:
1855 raise util.Abort(_('operation forbidden by server'))
1855 raise util.Abort(_('operation forbidden by server'))
1856 elif resp == 2:
1856 elif resp == 2:
1857 raise util.Abort(_('locking the remote repository failed'))
1857 raise util.Abort(_('locking the remote repository failed'))
1858 elif resp != 0:
1858 elif resp != 0:
1859 raise util.Abort(_('the server sent an unknown error code'))
1859 raise util.Abort(_('the server sent an unknown error code'))
1860 self.ui.status(_('streaming all changes\n'))
1860 self.ui.status(_('streaming all changes\n'))
1861 l = fp.readline()
1861 l = fp.readline()
1862 try:
1862 try:
1863 total_files, total_bytes = map(int, l.split(' ', 1))
1863 total_files, total_bytes = map(int, l.split(' ', 1))
1864 except (ValueError, TypeError):
1864 except (ValueError, TypeError):
1865 raise error.ResponseError(
1865 raise error.ResponseError(
1866 _('Unexpected response from remote server:'), l)
1866 _('Unexpected response from remote server:'), l)
1867 self.ui.status(_('%d files to transfer, %s of data\n') %
1867 self.ui.status(_('%d files to transfer, %s of data\n') %
1868 (total_files, util.bytecount(total_bytes)))
1868 (total_files, util.bytecount(total_bytes)))
1869 start = time.time()
1869 start = time.time()
1870 for i in xrange(total_files):
1870 for i in xrange(total_files):
1871 # XXX doesn't support '\n' or '\r' in filenames
1871 # XXX doesn't support '\n' or '\r' in filenames
1872 l = fp.readline()
1872 l = fp.readline()
1873 try:
1873 try:
1874 name, size = l.split('\0', 1)
1874 name, size = l.split('\0', 1)
1875 size = int(size)
1875 size = int(size)
1876 except (ValueError, TypeError):
1876 except (ValueError, TypeError):
1877 raise error.ResponseError(
1877 raise error.ResponseError(
1878 _('Unexpected response from remote server:'), l)
1878 _('Unexpected response from remote server:'), l)
1879 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1879 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1880 # for backwards compat, name was partially encoded
1880 # for backwards compat, name was partially encoded
1881 ofp = self.sopener(store.decodedir(name), 'w')
1881 ofp = self.sopener(store.decodedir(name), 'w')
1882 for chunk in util.filechunkiter(fp, limit=size):
1882 for chunk in util.filechunkiter(fp, limit=size):
1883 ofp.write(chunk)
1883 ofp.write(chunk)
1884 ofp.close()
1884 ofp.close()
1885 elapsed = time.time() - start
1885 elapsed = time.time() - start
1886 if elapsed <= 0:
1886 if elapsed <= 0:
1887 elapsed = 0.001
1887 elapsed = 0.001
1888 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1888 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1889 (util.bytecount(total_bytes), elapsed,
1889 (util.bytecount(total_bytes), elapsed,
1890 util.bytecount(total_bytes / elapsed)))
1890 util.bytecount(total_bytes / elapsed)))
1891
1891
1892 # new requirements = old non-format requirements + new format-related
1892 # new requirements = old non-format requirements + new format-related
1893 # requirements from the streamed-in repository
1893 # requirements from the streamed-in repository
1894 requirements.update(set(self.requirements) - self.supportedformats)
1894 requirements.update(set(self.requirements) - self.supportedformats)
1895 self._applyrequirements(requirements)
1895 self._applyrequirements(requirements)
1896 self._writerequirements()
1896 self._writerequirements()
1897
1897
1898 self.invalidate()
1898 self.invalidate()
1899 return len(self.heads()) + 1
1899 return len(self.heads()) + 1
1900 finally:
1900 finally:
1901 lock.release()
1901 lock.release()
1902
1902
1903 def clone(self, remote, heads=[], stream=False):
1903 def clone(self, remote, heads=[], stream=False):
1904 '''clone remote repository.
1904 '''clone remote repository.
1905
1905
1906 keyword arguments:
1906 keyword arguments:
1907 heads: list of revs to clone (forces use of pull)
1907 heads: list of revs to clone (forces use of pull)
1908 stream: use streaming clone if possible'''
1908 stream: use streaming clone if possible'''
1909
1909
1910 # now, all clients that can request uncompressed clones can
1910 # now, all clients that can request uncompressed clones can
1911 # read repo formats supported by all servers that can serve
1911 # read repo formats supported by all servers that can serve
1912 # them.
1912 # them.
1913
1913
1914 # if revlog format changes, client will have to check version
1914 # if revlog format changes, client will have to check version
1915 # and format flags on "stream" capability, and use
1915 # and format flags on "stream" capability, and use
1916 # uncompressed only if compatible.
1916 # uncompressed only if compatible.
1917
1917
1918 if stream and not heads:
1918 if stream and not heads:
1919 # 'stream' means remote revlog format is revlogv1 only
1919 # 'stream' means remote revlog format is revlogv1 only
1920 if remote.capable('stream'):
1920 if remote.capable('stream'):
1921 return self.stream_in(remote, set(('revlogv1',)))
1921 return self.stream_in(remote, set(('revlogv1',)))
1922 # otherwise, 'streamreqs' contains the remote revlog format
1922 # otherwise, 'streamreqs' contains the remote revlog format
1923 streamreqs = remote.capable('streamreqs')
1923 streamreqs = remote.capable('streamreqs')
1924 if streamreqs:
1924 if streamreqs:
1925 streamreqs = set(streamreqs.split(','))
1925 streamreqs = set(streamreqs.split(','))
1926 # if we support it, stream in and adjust our requirements
1926 # if we support it, stream in and adjust our requirements
1927 if not streamreqs - self.supportedformats:
1927 if not streamreqs - self.supportedformats:
1928 return self.stream_in(remote, streamreqs)
1928 return self.stream_in(remote, streamreqs)
1929 return self.pull(remote, heads)
1929 return self.pull(remote, heads)
1930
1930
1931 def pushkey(self, namespace, key, old, new):
1931 def pushkey(self, namespace, key, old, new):
1932 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1932 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1933 old=old, new=new)
1933 old=old, new=new)
1934 ret = pushkey.push(self, namespace, key, old, new)
1934 ret = pushkey.push(self, namespace, key, old, new)
1935 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1935 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1936 ret=ret)
1936 ret=ret)
1937 return ret
1937 return ret
1938
1938
1939 def listkeys(self, namespace):
1939 def listkeys(self, namespace):
1940 self.hook('prelistkeys', throw=True, namespace=namespace)
1940 self.hook('prelistkeys', throw=True, namespace=namespace)
1941 values = pushkey.list(self, namespace)
1941 values = pushkey.list(self, namespace)
1942 self.hook('listkeys', namespace=namespace, values=values)
1942 self.hook('listkeys', namespace=namespace, values=values)
1943 return values
1943 return values
1944
1944
1945 def debugwireargs(self, one, two, three=None, four=None, five=None):
1945 def debugwireargs(self, one, two, three=None, four=None, five=None):
1946 '''used to test argument passing over the wire'''
1946 '''used to test argument passing over the wire'''
1947 return "%s %s %s %s %s" % (one, two, three, four, five)
1947 return "%s %s %s %s %s" % (one, two, three, four, five)
1948
1948
1949 # used to avoid circular references so destructors work
1949 # used to avoid circular references so destructors work
1950 def aftertrans(files):
1950 def aftertrans(files):
1951 renamefiles = [tuple(t) for t in files]
1951 renamefiles = [tuple(t) for t in files]
1952 def a():
1952 def a():
1953 for src, dest in renamefiles:
1953 for src, dest in renamefiles:
1954 util.rename(src, dest)
1954 util.rename(src, dest)
1955 return a
1955 return a
1956
1956
1957 def instance(ui, path, create):
1957 def instance(ui, path, create):
1958 return localrepository(ui, util.localpath(path), create)
1958 return localrepository(ui, util.localpath(path), create)
1959
1959
1960 def islocal(path):
1960 def islocal(path):
1961 return True
1961 return True
General Comments 0
You need to be logged in to leave comments. Login now