##// END OF EJS Templates
branchmap: extract updatebranchcache from repo
Pierre-Yves David -
r18121:f8a13f06 default
parent child Browse files
Show More
@@ -1,113 +1,145 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10
10
11 def read(repo):
11 def read(repo):
12 partial = {}
12 partial = {}
13 try:
13 try:
14 f = repo.opener("cache/branchheads")
14 f = repo.opener("cache/branchheads")
15 lines = f.read().split('\n')
15 lines = f.read().split('\n')
16 f.close()
16 f.close()
17 except (IOError, OSError):
17 except (IOError, OSError):
18 return {}, nullid, nullrev
18 return {}, nullid, nullrev
19
19
20 try:
20 try:
21 last, lrev = lines.pop(0).split(" ", 1)
21 last, lrev = lines.pop(0).split(" ", 1)
22 last, lrev = bin(last), int(lrev)
22 last, lrev = bin(last), int(lrev)
23 if lrev >= len(repo) or repo[lrev].node() != last:
23 if lrev >= len(repo) or repo[lrev].node() != last:
24 # invalidate the cache
24 # invalidate the cache
25 raise ValueError('invalidating branch cache (tip differs)')
25 raise ValueError('invalidating branch cache (tip differs)')
26 for l in lines:
26 for l in lines:
27 if not l:
27 if not l:
28 continue
28 continue
29 node, label = l.split(" ", 1)
29 node, label = l.split(" ", 1)
30 label = encoding.tolocal(label.strip())
30 label = encoding.tolocal(label.strip())
31 if not node in repo:
31 if not node in repo:
32 raise ValueError('invalidating branch cache because node '+
32 raise ValueError('invalidating branch cache because node '+
33 '%s does not exist' % node)
33 '%s does not exist' % node)
34 partial.setdefault(label, []).append(bin(node))
34 partial.setdefault(label, []).append(bin(node))
35 except KeyboardInterrupt:
35 except KeyboardInterrupt:
36 raise
36 raise
37 except Exception, inst:
37 except Exception, inst:
38 if repo.ui.debugflag:
38 if repo.ui.debugflag:
39 repo.ui.warn(str(inst), '\n')
39 repo.ui.warn(str(inst), '\n')
40 partial, last, lrev = {}, nullid, nullrev
40 partial, last, lrev = {}, nullid, nullrev
41 return partial, last, lrev
41 return partial, last, lrev
42
42
43 def write(repo, branches, tip, tiprev):
43 def write(repo, branches, tip, tiprev):
44 try:
44 try:
45 f = repo.opener("cache/branchheads", "w", atomictemp=True)
45 f = repo.opener("cache/branchheads", "w", atomictemp=True)
46 f.write("%s %s\n" % (hex(tip), tiprev))
46 f.write("%s %s\n" % (hex(tip), tiprev))
47 for label, nodes in branches.iteritems():
47 for label, nodes in branches.iteritems():
48 for node in nodes:
48 for node in nodes:
49 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
49 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
50 f.close()
50 f.close()
51 except (IOError, OSError):
51 except (IOError, OSError):
52 pass
52 pass
53
53
54 def update(repo, partial, ctxgen):
54 def update(repo, partial, ctxgen):
55 """Given a branchhead cache, partial, that may have extra nodes or be
55 """Given a branchhead cache, partial, that may have extra nodes or be
56 missing heads, and a generator of nodes that are at least a superset of
56 missing heads, and a generator of nodes that are at least a superset of
57 heads missing, this function updates partial to be correct.
57 heads missing, this function updates partial to be correct.
58 """
58 """
59 # collect new branch entries
59 # collect new branch entries
60 newbranches = {}
60 newbranches = {}
61 for c in ctxgen:
61 for c in ctxgen:
62 newbranches.setdefault(c.branch(), []).append(c.node())
62 newbranches.setdefault(c.branch(), []).append(c.node())
63 # if older branchheads are reachable from new ones, they aren't
63 # if older branchheads are reachable from new ones, they aren't
64 # really branchheads. Note checking parents is insufficient:
64 # really branchheads. Note checking parents is insufficient:
65 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
65 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
66 for branch, newnodes in newbranches.iteritems():
66 for branch, newnodes in newbranches.iteritems():
67 bheads = partial.setdefault(branch, [])
67 bheads = partial.setdefault(branch, [])
68 # Remove candidate heads that no longer are in the repo (e.g., as
68 # Remove candidate heads that no longer are in the repo (e.g., as
69 # the result of a strip that just happened). Avoid using 'node in
69 # the result of a strip that just happened). Avoid using 'node in
70 # self' here because that dives down into branchcache code somewhat
70 # self' here because that dives down into branchcache code somewhat
71 # recursively.
71 # recursively.
72 bheadrevs = [repo.changelog.rev(node) for node in bheads
72 bheadrevs = [repo.changelog.rev(node) for node in bheads
73 if repo.changelog.hasnode(node)]
73 if repo.changelog.hasnode(node)]
74 newheadrevs = [repo.changelog.rev(node) for node in newnodes
74 newheadrevs = [repo.changelog.rev(node) for node in newnodes
75 if repo.changelog.hasnode(node)]
75 if repo.changelog.hasnode(node)]
76 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
76 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
77 # Remove duplicates - nodes that are in newheadrevs and are already
77 # Remove duplicates - nodes that are in newheadrevs and are already
78 # in bheadrevs. This can happen if you strip a node whose parent
78 # in bheadrevs. This can happen if you strip a node whose parent
79 # was already a head (because they're on different branches).
79 # was already a head (because they're on different branches).
80 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
80 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
81
81
82 # Starting from tip means fewer passes over reachable. If we know
82 # Starting from tip means fewer passes over reachable. If we know
83 # the new candidates are not ancestors of existing heads, we don't
83 # the new candidates are not ancestors of existing heads, we don't
84 # have to examine ancestors of existing heads
84 # have to examine ancestors of existing heads
85 if ctxisnew:
85 if ctxisnew:
86 iterrevs = sorted(newheadrevs)
86 iterrevs = sorted(newheadrevs)
87 else:
87 else:
88 iterrevs = list(bheadrevs)
88 iterrevs = list(bheadrevs)
89
89
90 # This loop prunes out two kinds of heads - heads that are
90 # This loop prunes out two kinds of heads - heads that are
91 # superseded by a head in newheadrevs, and newheadrevs that are not
91 # superseded by a head in newheadrevs, and newheadrevs that are not
92 # heads because an existing head is their descendant.
92 # heads because an existing head is their descendant.
93 while iterrevs:
93 while iterrevs:
94 latest = iterrevs.pop()
94 latest = iterrevs.pop()
95 if latest not in bheadrevs:
95 if latest not in bheadrevs:
96 continue
96 continue
97 ancestors = set(repo.changelog.ancestors([latest],
97 ancestors = set(repo.changelog.ancestors([latest],
98 bheadrevs[0]))
98 bheadrevs[0]))
99 if ancestors:
99 if ancestors:
100 bheadrevs = [b for b in bheadrevs if b not in ancestors]
100 bheadrevs = [b for b in bheadrevs if b not in ancestors]
101 partial[branch] = [repo.changelog.node(rev) for rev in bheadrevs]
101 partial[branch] = [repo.changelog.node(rev) for rev in bheadrevs]
102
102
103 # There may be branches that cease to exist when the last commit in the
103 # There may be branches that cease to exist when the last commit in the
104 # branch was stripped. This code filters them out. Note that the
104 # branch was stripped. This code filters them out. Note that the
105 # branch that ceased to exist may not be in newbranches because
105 # branch that ceased to exist may not be in newbranches because
106 # newbranches is the set of candidate heads, which when you strip the
106 # newbranches is the set of candidate heads, which when you strip the
107 # last commit in a branch will be the parent branch.
107 # last commit in a branch will be the parent branch.
108 for branch in partial.keys():
108 for branch in partial.keys():
109 nodes = [head for head in partial[branch]
109 nodes = [head for head in partial[branch]
110 if repo.changelog.hasnode(head)]
110 if repo.changelog.hasnode(head)]
111 if not nodes:
111 if not nodes:
112 del partial[branch]
112 del partial[branch]
113
113
114 def updatecache(repo):
115 repo = repo.unfiltered() # Until we get a smarter cache management
116 cl = repo.changelog
117 tip = cl.tip()
118 if repo._branchcache is not None and repo._branchcachetip == tip:
119 return
120
121 oldtip = repo._branchcachetip
122 if oldtip is None or oldtip not in cl.nodemap:
123 partial, last, lrev = read(repo)
124 else:
125 lrev = cl.rev(oldtip)
126 partial = repo._branchcache
127
128 catip = repo._cacheabletip()
129 # if lrev == catip: cache is already up to date
130 # if lrev > catip: we have uncachable element in `partial` can't write
131 # on disk
132 if lrev < catip:
133 ctxgen = (repo[r] for r in cl.revs(lrev + 1, catip))
134 update(repo, partial, ctxgen)
135 write(repo, partial, cl.node(catip), catip)
136 lrev = catip
137 # If cacheable tip were lower than actual tip, we need to update the
138 # cache up to tip. This update (from cacheable to actual tip) is not
139 # written to disk since it's not cacheable.
140 tiprev = len(repo) - 1
141 if lrev < tiprev:
142 ctxgen = (repo[r] for r in cl.revs(lrev + 1, tiprev))
143 update(repo, partial, ctxgen)
144 repo._branchcache = partial
145 repo._branchcachetip = tip
@@ -1,2619 +1,2586 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 import branchmap
18 import branchmap
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class repofilecache(filecache):
22 class repofilecache(filecache):
23 """All filecache usage on repo are done for logic that should be unfiltered
23 """All filecache usage on repo are done for logic that should be unfiltered
24 """
24 """
25
25
26 def __get__(self, repo, type=None):
26 def __get__(self, repo, type=None):
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 def __set__(self, repo, value):
28 def __set__(self, repo, value):
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 def __delete__(self, repo):
30 def __delete__(self, repo):
31 return super(repofilecache, self).__delete__(repo.unfiltered())
31 return super(repofilecache, self).__delete__(repo.unfiltered())
32
32
33 class storecache(repofilecache):
33 class storecache(repofilecache):
34 """filecache for files in the store"""
34 """filecache for files in the store"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj.sjoin(fname)
36 return obj.sjoin(fname)
37
37
38 class unfilteredpropertycache(propertycache):
38 class unfilteredpropertycache(propertycache):
39 """propertycache that apply to unfiltered repo only"""
39 """propertycache that apply to unfiltered repo only"""
40
40
41 def __get__(self, repo, type=None):
41 def __get__(self, repo, type=None):
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43
43
44 class filteredpropertycache(propertycache):
44 class filteredpropertycache(propertycache):
45 """propertycache that must take filtering in account"""
45 """propertycache that must take filtering in account"""
46
46
47 def cachevalue(self, obj, value):
47 def cachevalue(self, obj, value):
48 object.__setattr__(obj, self.name, value)
48 object.__setattr__(obj, self.name, value)
49
49
50
50
51 def hasunfilteredcache(repo, name):
51 def hasunfilteredcache(repo, name):
52 """check if an repo and a unfilteredproperty cached value for <name>"""
52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 return name in vars(repo.unfiltered())
53 return name in vars(repo.unfiltered())
54
54
55 def unfilteredmethod(orig):
55 def unfilteredmethod(orig):
56 """decorate method that always need to be run on unfiltered version"""
56 """decorate method that always need to be run on unfiltered version"""
57 def wrapper(repo, *args, **kwargs):
57 def wrapper(repo, *args, **kwargs):
58 return orig(repo.unfiltered(), *args, **kwargs)
58 return orig(repo.unfiltered(), *args, **kwargs)
59 return wrapper
59 return wrapper
60
60
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63
63
64 class localpeer(peer.peerrepository):
64 class localpeer(peer.peerrepository):
65 '''peer for a local repo; reflects only the most recent API'''
65 '''peer for a local repo; reflects only the most recent API'''
66
66
67 def __init__(self, repo, caps=MODERNCAPS):
67 def __init__(self, repo, caps=MODERNCAPS):
68 peer.peerrepository.__init__(self)
68 peer.peerrepository.__init__(self)
69 self._repo = repo
69 self._repo = repo
70 self.ui = repo.ui
70 self.ui = repo.ui
71 self._caps = repo._restrictcapabilities(caps)
71 self._caps = repo._restrictcapabilities(caps)
72 self.requirements = repo.requirements
72 self.requirements = repo.requirements
73 self.supportedformats = repo.supportedformats
73 self.supportedformats = repo.supportedformats
74
74
75 def close(self):
75 def close(self):
76 self._repo.close()
76 self._repo.close()
77
77
78 def _capabilities(self):
78 def _capabilities(self):
79 return self._caps
79 return self._caps
80
80
81 def local(self):
81 def local(self):
82 return self._repo
82 return self._repo
83
83
84 def canpush(self):
84 def canpush(self):
85 return True
85 return True
86
86
87 def url(self):
87 def url(self):
88 return self._repo.url()
88 return self._repo.url()
89
89
90 def lookup(self, key):
90 def lookup(self, key):
91 return self._repo.lookup(key)
91 return self._repo.lookup(key)
92
92
93 def branchmap(self):
93 def branchmap(self):
94 return discovery.visiblebranchmap(self._repo)
94 return discovery.visiblebranchmap(self._repo)
95
95
96 def heads(self):
96 def heads(self):
97 return discovery.visibleheads(self._repo)
97 return discovery.visibleheads(self._repo)
98
98
99 def known(self, nodes):
99 def known(self, nodes):
100 return self._repo.known(nodes)
100 return self._repo.known(nodes)
101
101
102 def getbundle(self, source, heads=None, common=None):
102 def getbundle(self, source, heads=None, common=None):
103 return self._repo.getbundle(source, heads=heads, common=common)
103 return self._repo.getbundle(source, heads=heads, common=common)
104
104
105 # TODO We might want to move the next two calls into legacypeer and add
105 # TODO We might want to move the next two calls into legacypeer and add
106 # unbundle instead.
106 # unbundle instead.
107
107
108 def lock(self):
108 def lock(self):
109 return self._repo.lock()
109 return self._repo.lock()
110
110
111 def addchangegroup(self, cg, source, url):
111 def addchangegroup(self, cg, source, url):
112 return self._repo.addchangegroup(cg, source, url)
112 return self._repo.addchangegroup(cg, source, url)
113
113
114 def pushkey(self, namespace, key, old, new):
114 def pushkey(self, namespace, key, old, new):
115 return self._repo.pushkey(namespace, key, old, new)
115 return self._repo.pushkey(namespace, key, old, new)
116
116
117 def listkeys(self, namespace):
117 def listkeys(self, namespace):
118 return self._repo.listkeys(namespace)
118 return self._repo.listkeys(namespace)
119
119
120 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 '''used to test argument passing over the wire'''
121 '''used to test argument passing over the wire'''
122 return "%s %s %s %s %s" % (one, two, three, four, five)
122 return "%s %s %s %s %s" % (one, two, three, four, five)
123
123
124 class locallegacypeer(localpeer):
124 class locallegacypeer(localpeer):
125 '''peer extension which implements legacy methods too; used for tests with
125 '''peer extension which implements legacy methods too; used for tests with
126 restricted capabilities'''
126 restricted capabilities'''
127
127
128 def __init__(self, repo):
128 def __init__(self, repo):
129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130
130
131 def branches(self, nodes):
131 def branches(self, nodes):
132 return self._repo.branches(nodes)
132 return self._repo.branches(nodes)
133
133
134 def between(self, pairs):
134 def between(self, pairs):
135 return self._repo.between(pairs)
135 return self._repo.between(pairs)
136
136
137 def changegroup(self, basenodes, source):
137 def changegroup(self, basenodes, source):
138 return self._repo.changegroup(basenodes, source)
138 return self._repo.changegroup(basenodes, source)
139
139
140 def changegroupsubset(self, bases, heads, source):
140 def changegroupsubset(self, bases, heads, source):
141 return self._repo.changegroupsubset(bases, heads, source)
141 return self._repo.changegroupsubset(bases, heads, source)
142
142
143 class localrepository(object):
143 class localrepository(object):
144
144
145 supportedformats = set(('revlogv1', 'generaldelta'))
145 supportedformats = set(('revlogv1', 'generaldelta'))
146 supported = supportedformats | set(('store', 'fncache', 'shared',
146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 'dotencode'))
147 'dotencode'))
148 openerreqs = set(('revlogv1', 'generaldelta'))
148 openerreqs = set(('revlogv1', 'generaldelta'))
149 requirements = ['revlogv1']
149 requirements = ['revlogv1']
150
150
151 def _baserequirements(self, create):
151 def _baserequirements(self, create):
152 return self.requirements[:]
152 return self.requirements[:]
153
153
154 def __init__(self, baseui, path=None, create=False):
154 def __init__(self, baseui, path=None, create=False):
155 self.wvfs = scmutil.vfs(path, expand=True)
155 self.wvfs = scmutil.vfs(path, expand=True)
156 self.wopener = self.wvfs
156 self.wopener = self.wvfs
157 self.root = self.wvfs.base
157 self.root = self.wvfs.base
158 self.path = self.wvfs.join(".hg")
158 self.path = self.wvfs.join(".hg")
159 self.origroot = path
159 self.origroot = path
160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
161 self.vfs = scmutil.vfs(self.path)
161 self.vfs = scmutil.vfs(self.path)
162 self.opener = self.vfs
162 self.opener = self.vfs
163 self.baseui = baseui
163 self.baseui = baseui
164 self.ui = baseui.copy()
164 self.ui = baseui.copy()
165 # A list of callback to shape the phase if no data were found.
165 # A list of callback to shape the phase if no data were found.
166 # Callback are in the form: func(repo, roots) --> processed root.
166 # Callback are in the form: func(repo, roots) --> processed root.
167 # This list it to be filled by extension during repo setup
167 # This list it to be filled by extension during repo setup
168 self._phasedefaults = []
168 self._phasedefaults = []
169 try:
169 try:
170 self.ui.readconfig(self.join("hgrc"), self.root)
170 self.ui.readconfig(self.join("hgrc"), self.root)
171 extensions.loadall(self.ui)
171 extensions.loadall(self.ui)
172 except IOError:
172 except IOError:
173 pass
173 pass
174
174
175 if not self.vfs.isdir():
175 if not self.vfs.isdir():
176 if create:
176 if create:
177 if not self.wvfs.exists():
177 if not self.wvfs.exists():
178 self.wvfs.makedirs()
178 self.wvfs.makedirs()
179 self.vfs.makedir(notindexed=True)
179 self.vfs.makedir(notindexed=True)
180 requirements = self._baserequirements(create)
180 requirements = self._baserequirements(create)
181 if self.ui.configbool('format', 'usestore', True):
181 if self.ui.configbool('format', 'usestore', True):
182 self.vfs.mkdir("store")
182 self.vfs.mkdir("store")
183 requirements.append("store")
183 requirements.append("store")
184 if self.ui.configbool('format', 'usefncache', True):
184 if self.ui.configbool('format', 'usefncache', True):
185 requirements.append("fncache")
185 requirements.append("fncache")
186 if self.ui.configbool('format', 'dotencode', True):
186 if self.ui.configbool('format', 'dotencode', True):
187 requirements.append('dotencode')
187 requirements.append('dotencode')
188 # create an invalid changelog
188 # create an invalid changelog
189 self.vfs.append(
189 self.vfs.append(
190 "00changelog.i",
190 "00changelog.i",
191 '\0\0\0\2' # represents revlogv2
191 '\0\0\0\2' # represents revlogv2
192 ' dummy changelog to prevent using the old repo layout'
192 ' dummy changelog to prevent using the old repo layout'
193 )
193 )
194 if self.ui.configbool('format', 'generaldelta', False):
194 if self.ui.configbool('format', 'generaldelta', False):
195 requirements.append("generaldelta")
195 requirements.append("generaldelta")
196 requirements = set(requirements)
196 requirements = set(requirements)
197 else:
197 else:
198 raise error.RepoError(_("repository %s not found") % path)
198 raise error.RepoError(_("repository %s not found") % path)
199 elif create:
199 elif create:
200 raise error.RepoError(_("repository %s already exists") % path)
200 raise error.RepoError(_("repository %s already exists") % path)
201 else:
201 else:
202 try:
202 try:
203 requirements = scmutil.readrequires(self.vfs, self.supported)
203 requirements = scmutil.readrequires(self.vfs, self.supported)
204 except IOError, inst:
204 except IOError, inst:
205 if inst.errno != errno.ENOENT:
205 if inst.errno != errno.ENOENT:
206 raise
206 raise
207 requirements = set()
207 requirements = set()
208
208
209 self.sharedpath = self.path
209 self.sharedpath = self.path
210 try:
210 try:
211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
212 if not os.path.exists(s):
212 if not os.path.exists(s):
213 raise error.RepoError(
213 raise error.RepoError(
214 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 _('.hg/sharedpath points to nonexistent directory %s') % s)
215 self.sharedpath = s
215 self.sharedpath = s
216 except IOError, inst:
216 except IOError, inst:
217 if inst.errno != errno.ENOENT:
217 if inst.errno != errno.ENOENT:
218 raise
218 raise
219
219
220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
221 self.spath = self.store.path
221 self.spath = self.store.path
222 self.svfs = self.store.vfs
222 self.svfs = self.store.vfs
223 self.sopener = self.svfs
223 self.sopener = self.svfs
224 self.sjoin = self.store.join
224 self.sjoin = self.store.join
225 self.vfs.createmode = self.store.createmode
225 self.vfs.createmode = self.store.createmode
226 self._applyrequirements(requirements)
226 self._applyrequirements(requirements)
227 if create:
227 if create:
228 self._writerequirements()
228 self._writerequirements()
229
229
230
230
231 self._branchcache = None
231 self._branchcache = None
232 self._branchcachetip = None
232 self._branchcachetip = None
233 self.filterpats = {}
233 self.filterpats = {}
234 self._datafilters = {}
234 self._datafilters = {}
235 self._transref = self._lockref = self._wlockref = None
235 self._transref = self._lockref = self._wlockref = None
236
236
237 # A cache for various files under .hg/ that tracks file changes,
237 # A cache for various files under .hg/ that tracks file changes,
238 # (used by the filecache decorator)
238 # (used by the filecache decorator)
239 #
239 #
240 # Maps a property name to its util.filecacheentry
240 # Maps a property name to its util.filecacheentry
241 self._filecache = {}
241 self._filecache = {}
242
242
243 # hold sets of revision to be filtered
243 # hold sets of revision to be filtered
244 # should be cleared when something might have changed the filter value:
244 # should be cleared when something might have changed the filter value:
245 # - new changesets,
245 # - new changesets,
246 # - phase change,
246 # - phase change,
247 # - new obsolescence marker,
247 # - new obsolescence marker,
248 # - working directory parent change,
248 # - working directory parent change,
249 # - bookmark changes
249 # - bookmark changes
250 self.filteredrevcache = {}
250 self.filteredrevcache = {}
251
251
252 def close(self):
252 def close(self):
253 pass
253 pass
254
254
255 def _restrictcapabilities(self, caps):
255 def _restrictcapabilities(self, caps):
256 return caps
256 return caps
257
257
258 def _applyrequirements(self, requirements):
258 def _applyrequirements(self, requirements):
259 self.requirements = requirements
259 self.requirements = requirements
260 self.sopener.options = dict((r, 1) for r in requirements
260 self.sopener.options = dict((r, 1) for r in requirements
261 if r in self.openerreqs)
261 if r in self.openerreqs)
262
262
263 def _writerequirements(self):
263 def _writerequirements(self):
264 reqfile = self.opener("requires", "w")
264 reqfile = self.opener("requires", "w")
265 for r in self.requirements:
265 for r in self.requirements:
266 reqfile.write("%s\n" % r)
266 reqfile.write("%s\n" % r)
267 reqfile.close()
267 reqfile.close()
268
268
269 def _checknested(self, path):
269 def _checknested(self, path):
270 """Determine if path is a legal nested repository."""
270 """Determine if path is a legal nested repository."""
271 if not path.startswith(self.root):
271 if not path.startswith(self.root):
272 return False
272 return False
273 subpath = path[len(self.root) + 1:]
273 subpath = path[len(self.root) + 1:]
274 normsubpath = util.pconvert(subpath)
274 normsubpath = util.pconvert(subpath)
275
275
276 # XXX: Checking against the current working copy is wrong in
276 # XXX: Checking against the current working copy is wrong in
277 # the sense that it can reject things like
277 # the sense that it can reject things like
278 #
278 #
279 # $ hg cat -r 10 sub/x.txt
279 # $ hg cat -r 10 sub/x.txt
280 #
280 #
281 # if sub/ is no longer a subrepository in the working copy
281 # if sub/ is no longer a subrepository in the working copy
282 # parent revision.
282 # parent revision.
283 #
283 #
284 # However, it can of course also allow things that would have
284 # However, it can of course also allow things that would have
285 # been rejected before, such as the above cat command if sub/
285 # been rejected before, such as the above cat command if sub/
286 # is a subrepository now, but was a normal directory before.
286 # is a subrepository now, but was a normal directory before.
287 # The old path auditor would have rejected by mistake since it
287 # The old path auditor would have rejected by mistake since it
288 # panics when it sees sub/.hg/.
288 # panics when it sees sub/.hg/.
289 #
289 #
290 # All in all, checking against the working copy seems sensible
290 # All in all, checking against the working copy seems sensible
291 # since we want to prevent access to nested repositories on
291 # since we want to prevent access to nested repositories on
292 # the filesystem *now*.
292 # the filesystem *now*.
293 ctx = self[None]
293 ctx = self[None]
294 parts = util.splitpath(subpath)
294 parts = util.splitpath(subpath)
295 while parts:
295 while parts:
296 prefix = '/'.join(parts)
296 prefix = '/'.join(parts)
297 if prefix in ctx.substate:
297 if prefix in ctx.substate:
298 if prefix == normsubpath:
298 if prefix == normsubpath:
299 return True
299 return True
300 else:
300 else:
301 sub = ctx.sub(prefix)
301 sub = ctx.sub(prefix)
302 return sub.checknested(subpath[len(prefix) + 1:])
302 return sub.checknested(subpath[len(prefix) + 1:])
303 else:
303 else:
304 parts.pop()
304 parts.pop()
305 return False
305 return False
306
306
307 def peer(self):
307 def peer(self):
308 return localpeer(self) # not cached to avoid reference cycle
308 return localpeer(self) # not cached to avoid reference cycle
309
309
310 def unfiltered(self):
310 def unfiltered(self):
311 """Return unfiltered version of the repository
311 """Return unfiltered version of the repository
312
312
313 Intended to be ovewritten by filtered repo."""
313 Intended to be ovewritten by filtered repo."""
314 return self
314 return self
315
315
316 def filtered(self, name):
316 def filtered(self, name):
317 """Return a filtered version of a repository"""
317 """Return a filtered version of a repository"""
318 # build a new class with the mixin and the current class
318 # build a new class with the mixin and the current class
319 # (possibily subclass of the repo)
319 # (possibily subclass of the repo)
320 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 class proxycls(repoview.repoview, self.unfiltered().__class__):
321 pass
321 pass
322 return proxycls(self, name)
322 return proxycls(self, name)
323
323
324 @repofilecache('bookmarks')
324 @repofilecache('bookmarks')
325 def _bookmarks(self):
325 def _bookmarks(self):
326 return bookmarks.bmstore(self)
326 return bookmarks.bmstore(self)
327
327
328 @repofilecache('bookmarks.current')
328 @repofilecache('bookmarks.current')
329 def _bookmarkcurrent(self):
329 def _bookmarkcurrent(self):
330 return bookmarks.readcurrent(self)
330 return bookmarks.readcurrent(self)
331
331
332 def bookmarkheads(self, bookmark):
332 def bookmarkheads(self, bookmark):
333 name = bookmark.split('@', 1)[0]
333 name = bookmark.split('@', 1)[0]
334 heads = []
334 heads = []
335 for mark, n in self._bookmarks.iteritems():
335 for mark, n in self._bookmarks.iteritems():
336 if mark.split('@', 1)[0] == name:
336 if mark.split('@', 1)[0] == name:
337 heads.append(n)
337 heads.append(n)
338 return heads
338 return heads
339
339
340 @storecache('phaseroots')
340 @storecache('phaseroots')
341 def _phasecache(self):
341 def _phasecache(self):
342 return phases.phasecache(self, self._phasedefaults)
342 return phases.phasecache(self, self._phasedefaults)
343
343
344 @storecache('obsstore')
344 @storecache('obsstore')
345 def obsstore(self):
345 def obsstore(self):
346 store = obsolete.obsstore(self.sopener)
346 store = obsolete.obsstore(self.sopener)
347 if store and not obsolete._enabled:
347 if store and not obsolete._enabled:
348 # message is rare enough to not be translated
348 # message is rare enough to not be translated
349 msg = 'obsolete feature not enabled but %i markers found!\n'
349 msg = 'obsolete feature not enabled but %i markers found!\n'
350 self.ui.warn(msg % len(list(store)))
350 self.ui.warn(msg % len(list(store)))
351 return store
351 return store
352
352
353 @unfilteredpropertycache
353 @unfilteredpropertycache
354 def hiddenrevs(self):
354 def hiddenrevs(self):
355 """hiddenrevs: revs that should be hidden by command and tools
355 """hiddenrevs: revs that should be hidden by command and tools
356
356
357 This set is carried on the repo to ease initialization and lazy
357 This set is carried on the repo to ease initialization and lazy
358 loading; it'll probably move back to changelog for efficiency and
358 loading; it'll probably move back to changelog for efficiency and
359 consistency reasons.
359 consistency reasons.
360
360
361 Note that the hiddenrevs will needs invalidations when
361 Note that the hiddenrevs will needs invalidations when
362 - a new changesets is added (possible unstable above extinct)
362 - a new changesets is added (possible unstable above extinct)
363 - a new obsolete marker is added (possible new extinct changeset)
363 - a new obsolete marker is added (possible new extinct changeset)
364
364
365 hidden changesets cannot have non-hidden descendants
365 hidden changesets cannot have non-hidden descendants
366 """
366 """
367 hidden = set()
367 hidden = set()
368 if self.obsstore:
368 if self.obsstore:
369 ### hide extinct changeset that are not accessible by any mean
369 ### hide extinct changeset that are not accessible by any mean
370 hiddenquery = 'extinct() - ::(. + bookmark())'
370 hiddenquery = 'extinct() - ::(. + bookmark())'
371 hidden.update(self.revs(hiddenquery))
371 hidden.update(self.revs(hiddenquery))
372 return hidden
372 return hidden
373
373
374 @storecache('00changelog.i')
374 @storecache('00changelog.i')
375 def changelog(self):
375 def changelog(self):
376 c = changelog.changelog(self.sopener)
376 c = changelog.changelog(self.sopener)
377 if 'HG_PENDING' in os.environ:
377 if 'HG_PENDING' in os.environ:
378 p = os.environ['HG_PENDING']
378 p = os.environ['HG_PENDING']
379 if p.startswith(self.root):
379 if p.startswith(self.root):
380 c.readpending('00changelog.i.a')
380 c.readpending('00changelog.i.a')
381 return c
381 return c
382
382
383 @storecache('00manifest.i')
383 @storecache('00manifest.i')
384 def manifest(self):
384 def manifest(self):
385 return manifest.manifest(self.sopener)
385 return manifest.manifest(self.sopener)
386
386
387 @repofilecache('dirstate')
387 @repofilecache('dirstate')
388 def dirstate(self):
388 def dirstate(self):
389 warned = [0]
389 warned = [0]
390 def validate(node):
390 def validate(node):
391 try:
391 try:
392 self.changelog.rev(node)
392 self.changelog.rev(node)
393 return node
393 return node
394 except error.LookupError:
394 except error.LookupError:
395 if not warned[0]:
395 if not warned[0]:
396 warned[0] = True
396 warned[0] = True
397 self.ui.warn(_("warning: ignoring unknown"
397 self.ui.warn(_("warning: ignoring unknown"
398 " working parent %s!\n") % short(node))
398 " working parent %s!\n") % short(node))
399 return nullid
399 return nullid
400
400
401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
402
402
403 def __getitem__(self, changeid):
403 def __getitem__(self, changeid):
404 if changeid is None:
404 if changeid is None:
405 return context.workingctx(self)
405 return context.workingctx(self)
406 return context.changectx(self, changeid)
406 return context.changectx(self, changeid)
407
407
408 def __contains__(self, changeid):
408 def __contains__(self, changeid):
409 try:
409 try:
410 return bool(self.lookup(changeid))
410 return bool(self.lookup(changeid))
411 except error.RepoLookupError:
411 except error.RepoLookupError:
412 return False
412 return False
413
413
414 def __nonzero__(self):
414 def __nonzero__(self):
415 return True
415 return True
416
416
417 def __len__(self):
417 def __len__(self):
418 return len(self.changelog)
418 return len(self.changelog)
419
419
420 def __iter__(self):
420 def __iter__(self):
421 return iter(self.changelog)
421 return iter(self.changelog)
422
422
423 def revs(self, expr, *args):
423 def revs(self, expr, *args):
424 '''Return a list of revisions matching the given revset'''
424 '''Return a list of revisions matching the given revset'''
425 expr = revset.formatspec(expr, *args)
425 expr = revset.formatspec(expr, *args)
426 m = revset.match(None, expr)
426 m = revset.match(None, expr)
427 return [r for r in m(self, list(self))]
427 return [r for r in m(self, list(self))]
428
428
429 def set(self, expr, *args):
429 def set(self, expr, *args):
430 '''
430 '''
431 Yield a context for each matching revision, after doing arg
431 Yield a context for each matching revision, after doing arg
432 replacement via revset.formatspec
432 replacement via revset.formatspec
433 '''
433 '''
434 for r in self.revs(expr, *args):
434 for r in self.revs(expr, *args):
435 yield self[r]
435 yield self[r]
436
436
437 def url(self):
437 def url(self):
438 return 'file:' + self.root
438 return 'file:' + self.root
439
439
440 def hook(self, name, throw=False, **args):
440 def hook(self, name, throw=False, **args):
441 return hook.hook(self.ui, self, name, throw, **args)
441 return hook.hook(self.ui, self, name, throw, **args)
442
442
443 @unfilteredmethod
443 @unfilteredmethod
444 def _tag(self, names, node, message, local, user, date, extra={}):
444 def _tag(self, names, node, message, local, user, date, extra={}):
445 if isinstance(names, str):
445 if isinstance(names, str):
446 names = (names,)
446 names = (names,)
447
447
448 branches = self.branchmap()
448 branches = self.branchmap()
449 for name in names:
449 for name in names:
450 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 self.hook('pretag', throw=True, node=hex(node), tag=name,
451 local=local)
451 local=local)
452 if name in branches:
452 if name in branches:
453 self.ui.warn(_("warning: tag %s conflicts with existing"
453 self.ui.warn(_("warning: tag %s conflicts with existing"
454 " branch name\n") % name)
454 " branch name\n") % name)
455
455
456 def writetags(fp, names, munge, prevtags):
456 def writetags(fp, names, munge, prevtags):
457 fp.seek(0, 2)
457 fp.seek(0, 2)
458 if prevtags and prevtags[-1] != '\n':
458 if prevtags and prevtags[-1] != '\n':
459 fp.write('\n')
459 fp.write('\n')
460 for name in names:
460 for name in names:
461 m = munge and munge(name) or name
461 m = munge and munge(name) or name
462 if (self._tagscache.tagtypes and
462 if (self._tagscache.tagtypes and
463 name in self._tagscache.tagtypes):
463 name in self._tagscache.tagtypes):
464 old = self.tags().get(name, nullid)
464 old = self.tags().get(name, nullid)
465 fp.write('%s %s\n' % (hex(old), m))
465 fp.write('%s %s\n' % (hex(old), m))
466 fp.write('%s %s\n' % (hex(node), m))
466 fp.write('%s %s\n' % (hex(node), m))
467 fp.close()
467 fp.close()
468
468
469 prevtags = ''
469 prevtags = ''
470 if local:
470 if local:
471 try:
471 try:
472 fp = self.opener('localtags', 'r+')
472 fp = self.opener('localtags', 'r+')
473 except IOError:
473 except IOError:
474 fp = self.opener('localtags', 'a')
474 fp = self.opener('localtags', 'a')
475 else:
475 else:
476 prevtags = fp.read()
476 prevtags = fp.read()
477
477
478 # local tags are stored in the current charset
478 # local tags are stored in the current charset
479 writetags(fp, names, None, prevtags)
479 writetags(fp, names, None, prevtags)
480 for name in names:
480 for name in names:
481 self.hook('tag', node=hex(node), tag=name, local=local)
481 self.hook('tag', node=hex(node), tag=name, local=local)
482 return
482 return
483
483
484 try:
484 try:
485 fp = self.wfile('.hgtags', 'rb+')
485 fp = self.wfile('.hgtags', 'rb+')
486 except IOError, e:
486 except IOError, e:
487 if e.errno != errno.ENOENT:
487 if e.errno != errno.ENOENT:
488 raise
488 raise
489 fp = self.wfile('.hgtags', 'ab')
489 fp = self.wfile('.hgtags', 'ab')
490 else:
490 else:
491 prevtags = fp.read()
491 prevtags = fp.read()
492
492
493 # committed tags are stored in UTF-8
493 # committed tags are stored in UTF-8
494 writetags(fp, names, encoding.fromlocal, prevtags)
494 writetags(fp, names, encoding.fromlocal, prevtags)
495
495
496 fp.close()
496 fp.close()
497
497
498 self.invalidatecaches()
498 self.invalidatecaches()
499
499
500 if '.hgtags' not in self.dirstate:
500 if '.hgtags' not in self.dirstate:
501 self[None].add(['.hgtags'])
501 self[None].add(['.hgtags'])
502
502
503 m = matchmod.exact(self.root, '', ['.hgtags'])
503 m = matchmod.exact(self.root, '', ['.hgtags'])
504 tagnode = self.commit(message, user, date, extra=extra, match=m)
504 tagnode = self.commit(message, user, date, extra=extra, match=m)
505
505
506 for name in names:
506 for name in names:
507 self.hook('tag', node=hex(node), tag=name, local=local)
507 self.hook('tag', node=hex(node), tag=name, local=local)
508
508
509 return tagnode
509 return tagnode
510
510
511 def tag(self, names, node, message, local, user, date):
511 def tag(self, names, node, message, local, user, date):
512 '''tag a revision with one or more symbolic names.
512 '''tag a revision with one or more symbolic names.
513
513
514 names is a list of strings or, when adding a single tag, names may be a
514 names is a list of strings or, when adding a single tag, names may be a
515 string.
515 string.
516
516
517 if local is True, the tags are stored in a per-repository file.
517 if local is True, the tags are stored in a per-repository file.
518 otherwise, they are stored in the .hgtags file, and a new
518 otherwise, they are stored in the .hgtags file, and a new
519 changeset is committed with the change.
519 changeset is committed with the change.
520
520
521 keyword arguments:
521 keyword arguments:
522
522
523 local: whether to store tags in non-version-controlled file
523 local: whether to store tags in non-version-controlled file
524 (default False)
524 (default False)
525
525
526 message: commit message to use if committing
526 message: commit message to use if committing
527
527
528 user: name of user to use if committing
528 user: name of user to use if committing
529
529
530 date: date tuple to use if committing'''
530 date: date tuple to use if committing'''
531
531
532 if not local:
532 if not local:
533 for x in self.status()[:5]:
533 for x in self.status()[:5]:
534 if '.hgtags' in x:
534 if '.hgtags' in x:
535 raise util.Abort(_('working copy of .hgtags is changed '
535 raise util.Abort(_('working copy of .hgtags is changed '
536 '(please commit .hgtags manually)'))
536 '(please commit .hgtags manually)'))
537
537
538 self.tags() # instantiate the cache
538 self.tags() # instantiate the cache
539 self._tag(names, node, message, local, user, date)
539 self._tag(names, node, message, local, user, date)
540
540
541 @filteredpropertycache
541 @filteredpropertycache
542 def _tagscache(self):
542 def _tagscache(self):
543 '''Returns a tagscache object that contains various tags related
543 '''Returns a tagscache object that contains various tags related
544 caches.'''
544 caches.'''
545
545
546 # This simplifies its cache management by having one decorated
546 # This simplifies its cache management by having one decorated
547 # function (this one) and the rest simply fetch things from it.
547 # function (this one) and the rest simply fetch things from it.
548 class tagscache(object):
548 class tagscache(object):
549 def __init__(self):
549 def __init__(self):
550 # These two define the set of tags for this repository. tags
550 # These two define the set of tags for this repository. tags
551 # maps tag name to node; tagtypes maps tag name to 'global' or
551 # maps tag name to node; tagtypes maps tag name to 'global' or
552 # 'local'. (Global tags are defined by .hgtags across all
552 # 'local'. (Global tags are defined by .hgtags across all
553 # heads, and local tags are defined in .hg/localtags.)
553 # heads, and local tags are defined in .hg/localtags.)
554 # They constitute the in-memory cache of tags.
554 # They constitute the in-memory cache of tags.
555 self.tags = self.tagtypes = None
555 self.tags = self.tagtypes = None
556
556
557 self.nodetagscache = self.tagslist = None
557 self.nodetagscache = self.tagslist = None
558
558
559 cache = tagscache()
559 cache = tagscache()
560 cache.tags, cache.tagtypes = self._findtags()
560 cache.tags, cache.tagtypes = self._findtags()
561
561
562 return cache
562 return cache
563
563
564 def tags(self):
564 def tags(self):
565 '''return a mapping of tag to node'''
565 '''return a mapping of tag to node'''
566 t = {}
566 t = {}
567 if self.changelog.filteredrevs:
567 if self.changelog.filteredrevs:
568 tags, tt = self._findtags()
568 tags, tt = self._findtags()
569 else:
569 else:
570 tags = self._tagscache.tags
570 tags = self._tagscache.tags
571 for k, v in tags.iteritems():
571 for k, v in tags.iteritems():
572 try:
572 try:
573 # ignore tags to unknown nodes
573 # ignore tags to unknown nodes
574 self.changelog.rev(v)
574 self.changelog.rev(v)
575 t[k] = v
575 t[k] = v
576 except (error.LookupError, ValueError):
576 except (error.LookupError, ValueError):
577 pass
577 pass
578 return t
578 return t
579
579
580 def _findtags(self):
580 def _findtags(self):
581 '''Do the hard work of finding tags. Return a pair of dicts
581 '''Do the hard work of finding tags. Return a pair of dicts
582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
583 maps tag name to a string like \'global\' or \'local\'.
583 maps tag name to a string like \'global\' or \'local\'.
584 Subclasses or extensions are free to add their own tags, but
584 Subclasses or extensions are free to add their own tags, but
585 should be aware that the returned dicts will be retained for the
585 should be aware that the returned dicts will be retained for the
586 duration of the localrepo object.'''
586 duration of the localrepo object.'''
587
587
588 # XXX what tagtype should subclasses/extensions use? Currently
588 # XXX what tagtype should subclasses/extensions use? Currently
589 # mq and bookmarks add tags, but do not set the tagtype at all.
589 # mq and bookmarks add tags, but do not set the tagtype at all.
590 # Should each extension invent its own tag type? Should there
590 # Should each extension invent its own tag type? Should there
591 # be one tagtype for all such "virtual" tags? Or is the status
591 # be one tagtype for all such "virtual" tags? Or is the status
592 # quo fine?
592 # quo fine?
593
593
594 alltags = {} # map tag name to (node, hist)
594 alltags = {} # map tag name to (node, hist)
595 tagtypes = {}
595 tagtypes = {}
596
596
597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
599
599
600 # Build the return dicts. Have to re-encode tag names because
600 # Build the return dicts. Have to re-encode tag names because
601 # the tags module always uses UTF-8 (in order not to lose info
601 # the tags module always uses UTF-8 (in order not to lose info
602 # writing to the cache), but the rest of Mercurial wants them in
602 # writing to the cache), but the rest of Mercurial wants them in
603 # local encoding.
603 # local encoding.
604 tags = {}
604 tags = {}
605 for (name, (node, hist)) in alltags.iteritems():
605 for (name, (node, hist)) in alltags.iteritems():
606 if node != nullid:
606 if node != nullid:
607 tags[encoding.tolocal(name)] = node
607 tags[encoding.tolocal(name)] = node
608 tags['tip'] = self.changelog.tip()
608 tags['tip'] = self.changelog.tip()
609 tagtypes = dict([(encoding.tolocal(name), value)
609 tagtypes = dict([(encoding.tolocal(name), value)
610 for (name, value) in tagtypes.iteritems()])
610 for (name, value) in tagtypes.iteritems()])
611 return (tags, tagtypes)
611 return (tags, tagtypes)
612
612
613 def tagtype(self, tagname):
613 def tagtype(self, tagname):
614 '''
614 '''
615 return the type of the given tag. result can be:
615 return the type of the given tag. result can be:
616
616
617 'local' : a local tag
617 'local' : a local tag
618 'global' : a global tag
618 'global' : a global tag
619 None : tag does not exist
619 None : tag does not exist
620 '''
620 '''
621
621
622 return self._tagscache.tagtypes.get(tagname)
622 return self._tagscache.tagtypes.get(tagname)
623
623
624 def tagslist(self):
624 def tagslist(self):
625 '''return a list of tags ordered by revision'''
625 '''return a list of tags ordered by revision'''
626 if not self._tagscache.tagslist:
626 if not self._tagscache.tagslist:
627 l = []
627 l = []
628 for t, n in self.tags().iteritems():
628 for t, n in self.tags().iteritems():
629 r = self.changelog.rev(n)
629 r = self.changelog.rev(n)
630 l.append((r, t, n))
630 l.append((r, t, n))
631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
632
632
633 return self._tagscache.tagslist
633 return self._tagscache.tagslist
634
634
635 def nodetags(self, node):
635 def nodetags(self, node):
636 '''return the tags associated with a node'''
636 '''return the tags associated with a node'''
637 if not self._tagscache.nodetagscache:
637 if not self._tagscache.nodetagscache:
638 nodetagscache = {}
638 nodetagscache = {}
639 for t, n in self._tagscache.tags.iteritems():
639 for t, n in self._tagscache.tags.iteritems():
640 nodetagscache.setdefault(n, []).append(t)
640 nodetagscache.setdefault(n, []).append(t)
641 for tags in nodetagscache.itervalues():
641 for tags in nodetagscache.itervalues():
642 tags.sort()
642 tags.sort()
643 self._tagscache.nodetagscache = nodetagscache
643 self._tagscache.nodetagscache = nodetagscache
644 return self._tagscache.nodetagscache.get(node, [])
644 return self._tagscache.nodetagscache.get(node, [])
645
645
646 def nodebookmarks(self, node):
646 def nodebookmarks(self, node):
647 marks = []
647 marks = []
648 for bookmark, n in self._bookmarks.iteritems():
648 for bookmark, n in self._bookmarks.iteritems():
649 if n == node:
649 if n == node:
650 marks.append(bookmark)
650 marks.append(bookmark)
651 return sorted(marks)
651 return sorted(marks)
652
652
653 def _cacheabletip(self):
653 def _cacheabletip(self):
654 """tip-most revision stable enought to used in persistent cache
654 """tip-most revision stable enought to used in persistent cache
655
655
656 This function is overwritten by MQ to ensure we do not write cache for
656 This function is overwritten by MQ to ensure we do not write cache for
657 a part of the history that will likely change.
657 a part of the history that will likely change.
658
658
659 Efficient handling of filtered revision in branchcache should offer a
659 Efficient handling of filtered revision in branchcache should offer a
660 better alternative. But we are using this approach until it is ready.
660 better alternative. But we are using this approach until it is ready.
661 """
661 """
662 cl = self.changelog
662 cl = self.changelog
663 return cl.rev(cl.tip())
663 return cl.rev(cl.tip())
664
664
665 @unfilteredmethod # Until we get a smarter cache management
666 def updatebranchcache(self):
667 cl = self.changelog
668 tip = cl.tip()
669 if self._branchcache is not None and self._branchcachetip == tip:
670 return
671
672 oldtip = self._branchcachetip
673 if oldtip is None or oldtip not in cl.nodemap:
674 partial, last, lrev = branchmap.read(self)
675 else:
676 lrev = cl.rev(oldtip)
677 partial = self._branchcache
678
679 catip = self._cacheabletip()
680 # if lrev == catip: cache is already up to date
681 # if lrev > catip: we have uncachable element in `partial` can't write
682 # on disk
683 if lrev < catip:
684 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip))
685 branchmap.update(self, partial, ctxgen)
686 branchmap.write(self, partial, cl.node(catip), catip)
687 lrev = catip
688 # If cacheable tip were lower than actual tip, we need to update the
689 # cache up to tip. This update (from cacheable to actual tip) is not
690 # written to disk since it's not cacheable.
691 tiprev = len(self) - 1
692 if lrev < tiprev:
693 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev))
694 branchmap.update(self, partial, ctxgen)
695 self._branchcache = partial
696 self._branchcachetip = tip
697
698 def branchmap(self):
665 def branchmap(self):
699 '''returns a dictionary {branch: [branchheads]}'''
666 '''returns a dictionary {branch: [branchheads]}'''
700 if self.changelog.filteredrevs:
667 if self.changelog.filteredrevs:
701 # some changeset are excluded we can't use the cache
668 # some changeset are excluded we can't use the cache
702 bmap = {}
669 bmap = {}
703 branchmap.update(self, bmap, (self[r] for r in self))
670 branchmap.update(self, bmap, (self[r] for r in self))
704 return bmap
671 return bmap
705 else:
672 else:
706 self.updatebranchcache()
673 branchmap.updatecache(self)
707 return self._branchcache
674 return self._branchcache
708
675
709
676
710 def _branchtip(self, heads):
677 def _branchtip(self, heads):
711 '''return the tipmost branch head in heads'''
678 '''return the tipmost branch head in heads'''
712 tip = heads[-1]
679 tip = heads[-1]
713 for h in reversed(heads):
680 for h in reversed(heads):
714 if not self[h].closesbranch():
681 if not self[h].closesbranch():
715 tip = h
682 tip = h
716 break
683 break
717 return tip
684 return tip
718
685
719 def branchtip(self, branch):
686 def branchtip(self, branch):
720 '''return the tip node for a given branch'''
687 '''return the tip node for a given branch'''
721 if branch not in self.branchmap():
688 if branch not in self.branchmap():
722 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
689 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
723 return self._branchtip(self.branchmap()[branch])
690 return self._branchtip(self.branchmap()[branch])
724
691
725 def branchtags(self):
692 def branchtags(self):
726 '''return a dict where branch names map to the tipmost head of
693 '''return a dict where branch names map to the tipmost head of
727 the branch, open heads come before closed'''
694 the branch, open heads come before closed'''
728 bt = {}
695 bt = {}
729 for bn, heads in self.branchmap().iteritems():
696 for bn, heads in self.branchmap().iteritems():
730 bt[bn] = self._branchtip(heads)
697 bt[bn] = self._branchtip(heads)
731 return bt
698 return bt
732
699
733 def lookup(self, key):
700 def lookup(self, key):
734 return self[key].node()
701 return self[key].node()
735
702
736 def lookupbranch(self, key, remote=None):
703 def lookupbranch(self, key, remote=None):
737 repo = remote or self
704 repo = remote or self
738 if key in repo.branchmap():
705 if key in repo.branchmap():
739 return key
706 return key
740
707
741 repo = (remote and remote.local()) and remote or self
708 repo = (remote and remote.local()) and remote or self
742 return repo[key].branch()
709 return repo[key].branch()
743
710
744 def known(self, nodes):
711 def known(self, nodes):
745 nm = self.changelog.nodemap
712 nm = self.changelog.nodemap
746 pc = self._phasecache
713 pc = self._phasecache
747 result = []
714 result = []
748 for n in nodes:
715 for n in nodes:
749 r = nm.get(n)
716 r = nm.get(n)
750 resp = not (r is None or pc.phase(self, r) >= phases.secret)
717 resp = not (r is None or pc.phase(self, r) >= phases.secret)
751 result.append(resp)
718 result.append(resp)
752 return result
719 return result
753
720
754 def local(self):
721 def local(self):
755 return self
722 return self
756
723
757 def cancopy(self):
724 def cancopy(self):
758 return self.local() # so statichttprepo's override of local() works
725 return self.local() # so statichttprepo's override of local() works
759
726
760 def join(self, f):
727 def join(self, f):
761 return os.path.join(self.path, f)
728 return os.path.join(self.path, f)
762
729
763 def wjoin(self, f):
730 def wjoin(self, f):
764 return os.path.join(self.root, f)
731 return os.path.join(self.root, f)
765
732
766 def file(self, f):
733 def file(self, f):
767 if f[0] == '/':
734 if f[0] == '/':
768 f = f[1:]
735 f = f[1:]
769 return filelog.filelog(self.sopener, f)
736 return filelog.filelog(self.sopener, f)
770
737
771 def changectx(self, changeid):
738 def changectx(self, changeid):
772 return self[changeid]
739 return self[changeid]
773
740
774 def parents(self, changeid=None):
741 def parents(self, changeid=None):
775 '''get list of changectxs for parents of changeid'''
742 '''get list of changectxs for parents of changeid'''
776 return self[changeid].parents()
743 return self[changeid].parents()
777
744
778 def setparents(self, p1, p2=nullid):
745 def setparents(self, p1, p2=nullid):
779 copies = self.dirstate.setparents(p1, p2)
746 copies = self.dirstate.setparents(p1, p2)
780 if copies:
747 if copies:
781 # Adjust copy records, the dirstate cannot do it, it
748 # Adjust copy records, the dirstate cannot do it, it
782 # requires access to parents manifests. Preserve them
749 # requires access to parents manifests. Preserve them
783 # only for entries added to first parent.
750 # only for entries added to first parent.
784 pctx = self[p1]
751 pctx = self[p1]
785 for f in copies:
752 for f in copies:
786 if f not in pctx and copies[f] in pctx:
753 if f not in pctx and copies[f] in pctx:
787 self.dirstate.copy(copies[f], f)
754 self.dirstate.copy(copies[f], f)
788
755
789 def filectx(self, path, changeid=None, fileid=None):
756 def filectx(self, path, changeid=None, fileid=None):
790 """changeid can be a changeset revision, node, or tag.
757 """changeid can be a changeset revision, node, or tag.
791 fileid can be a file revision or node."""
758 fileid can be a file revision or node."""
792 return context.filectx(self, path, changeid, fileid)
759 return context.filectx(self, path, changeid, fileid)
793
760
794 def getcwd(self):
761 def getcwd(self):
795 return self.dirstate.getcwd()
762 return self.dirstate.getcwd()
796
763
797 def pathto(self, f, cwd=None):
764 def pathto(self, f, cwd=None):
798 return self.dirstate.pathto(f, cwd)
765 return self.dirstate.pathto(f, cwd)
799
766
800 def wfile(self, f, mode='r'):
767 def wfile(self, f, mode='r'):
801 return self.wopener(f, mode)
768 return self.wopener(f, mode)
802
769
803 def _link(self, f):
770 def _link(self, f):
804 return os.path.islink(self.wjoin(f))
771 return os.path.islink(self.wjoin(f))
805
772
806 def _loadfilter(self, filter):
773 def _loadfilter(self, filter):
807 if filter not in self.filterpats:
774 if filter not in self.filterpats:
808 l = []
775 l = []
809 for pat, cmd in self.ui.configitems(filter):
776 for pat, cmd in self.ui.configitems(filter):
810 if cmd == '!':
777 if cmd == '!':
811 continue
778 continue
812 mf = matchmod.match(self.root, '', [pat])
779 mf = matchmod.match(self.root, '', [pat])
813 fn = None
780 fn = None
814 params = cmd
781 params = cmd
815 for name, filterfn in self._datafilters.iteritems():
782 for name, filterfn in self._datafilters.iteritems():
816 if cmd.startswith(name):
783 if cmd.startswith(name):
817 fn = filterfn
784 fn = filterfn
818 params = cmd[len(name):].lstrip()
785 params = cmd[len(name):].lstrip()
819 break
786 break
820 if not fn:
787 if not fn:
821 fn = lambda s, c, **kwargs: util.filter(s, c)
788 fn = lambda s, c, **kwargs: util.filter(s, c)
822 # Wrap old filters not supporting keyword arguments
789 # Wrap old filters not supporting keyword arguments
823 if not inspect.getargspec(fn)[2]:
790 if not inspect.getargspec(fn)[2]:
824 oldfn = fn
791 oldfn = fn
825 fn = lambda s, c, **kwargs: oldfn(s, c)
792 fn = lambda s, c, **kwargs: oldfn(s, c)
826 l.append((mf, fn, params))
793 l.append((mf, fn, params))
827 self.filterpats[filter] = l
794 self.filterpats[filter] = l
828 return self.filterpats[filter]
795 return self.filterpats[filter]
829
796
830 def _filter(self, filterpats, filename, data):
797 def _filter(self, filterpats, filename, data):
831 for mf, fn, cmd in filterpats:
798 for mf, fn, cmd in filterpats:
832 if mf(filename):
799 if mf(filename):
833 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
800 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
834 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
801 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
835 break
802 break
836
803
837 return data
804 return data
838
805
839 @unfilteredpropertycache
806 @unfilteredpropertycache
840 def _encodefilterpats(self):
807 def _encodefilterpats(self):
841 return self._loadfilter('encode')
808 return self._loadfilter('encode')
842
809
843 @unfilteredpropertycache
810 @unfilteredpropertycache
844 def _decodefilterpats(self):
811 def _decodefilterpats(self):
845 return self._loadfilter('decode')
812 return self._loadfilter('decode')
846
813
847 def adddatafilter(self, name, filter):
814 def adddatafilter(self, name, filter):
848 self._datafilters[name] = filter
815 self._datafilters[name] = filter
849
816
850 def wread(self, filename):
817 def wread(self, filename):
851 if self._link(filename):
818 if self._link(filename):
852 data = os.readlink(self.wjoin(filename))
819 data = os.readlink(self.wjoin(filename))
853 else:
820 else:
854 data = self.wopener.read(filename)
821 data = self.wopener.read(filename)
855 return self._filter(self._encodefilterpats, filename, data)
822 return self._filter(self._encodefilterpats, filename, data)
856
823
857 def wwrite(self, filename, data, flags):
824 def wwrite(self, filename, data, flags):
858 data = self._filter(self._decodefilterpats, filename, data)
825 data = self._filter(self._decodefilterpats, filename, data)
859 if 'l' in flags:
826 if 'l' in flags:
860 self.wopener.symlink(data, filename)
827 self.wopener.symlink(data, filename)
861 else:
828 else:
862 self.wopener.write(filename, data)
829 self.wopener.write(filename, data)
863 if 'x' in flags:
830 if 'x' in flags:
864 util.setflags(self.wjoin(filename), False, True)
831 util.setflags(self.wjoin(filename), False, True)
865
832
866 def wwritedata(self, filename, data):
833 def wwritedata(self, filename, data):
867 return self._filter(self._decodefilterpats, filename, data)
834 return self._filter(self._decodefilterpats, filename, data)
868
835
869 def transaction(self, desc):
836 def transaction(self, desc):
870 tr = self._transref and self._transref() or None
837 tr = self._transref and self._transref() or None
871 if tr and tr.running():
838 if tr and tr.running():
872 return tr.nest()
839 return tr.nest()
873
840
874 # abort here if the journal already exists
841 # abort here if the journal already exists
875 if os.path.exists(self.sjoin("journal")):
842 if os.path.exists(self.sjoin("journal")):
876 raise error.RepoError(
843 raise error.RepoError(
877 _("abandoned transaction found - run hg recover"))
844 _("abandoned transaction found - run hg recover"))
878
845
879 self._writejournal(desc)
846 self._writejournal(desc)
880 renames = [(x, undoname(x)) for x in self._journalfiles()]
847 renames = [(x, undoname(x)) for x in self._journalfiles()]
881
848
882 tr = transaction.transaction(self.ui.warn, self.sopener,
849 tr = transaction.transaction(self.ui.warn, self.sopener,
883 self.sjoin("journal"),
850 self.sjoin("journal"),
884 aftertrans(renames),
851 aftertrans(renames),
885 self.store.createmode)
852 self.store.createmode)
886 self._transref = weakref.ref(tr)
853 self._transref = weakref.ref(tr)
887 return tr
854 return tr
888
855
889 def _journalfiles(self):
856 def _journalfiles(self):
890 return (self.sjoin('journal'), self.join('journal.dirstate'),
857 return (self.sjoin('journal'), self.join('journal.dirstate'),
891 self.join('journal.branch'), self.join('journal.desc'),
858 self.join('journal.branch'), self.join('journal.desc'),
892 self.join('journal.bookmarks'),
859 self.join('journal.bookmarks'),
893 self.sjoin('journal.phaseroots'))
860 self.sjoin('journal.phaseroots'))
894
861
895 def undofiles(self):
862 def undofiles(self):
896 return [undoname(x) for x in self._journalfiles()]
863 return [undoname(x) for x in self._journalfiles()]
897
864
898 def _writejournal(self, desc):
865 def _writejournal(self, desc):
899 self.opener.write("journal.dirstate",
866 self.opener.write("journal.dirstate",
900 self.opener.tryread("dirstate"))
867 self.opener.tryread("dirstate"))
901 self.opener.write("journal.branch",
868 self.opener.write("journal.branch",
902 encoding.fromlocal(self.dirstate.branch()))
869 encoding.fromlocal(self.dirstate.branch()))
903 self.opener.write("journal.desc",
870 self.opener.write("journal.desc",
904 "%d\n%s\n" % (len(self), desc))
871 "%d\n%s\n" % (len(self), desc))
905 self.opener.write("journal.bookmarks",
872 self.opener.write("journal.bookmarks",
906 self.opener.tryread("bookmarks"))
873 self.opener.tryread("bookmarks"))
907 self.sopener.write("journal.phaseroots",
874 self.sopener.write("journal.phaseroots",
908 self.sopener.tryread("phaseroots"))
875 self.sopener.tryread("phaseroots"))
909
876
910 def recover(self):
877 def recover(self):
911 lock = self.lock()
878 lock = self.lock()
912 try:
879 try:
913 if os.path.exists(self.sjoin("journal")):
880 if os.path.exists(self.sjoin("journal")):
914 self.ui.status(_("rolling back interrupted transaction\n"))
881 self.ui.status(_("rolling back interrupted transaction\n"))
915 transaction.rollback(self.sopener, self.sjoin("journal"),
882 transaction.rollback(self.sopener, self.sjoin("journal"),
916 self.ui.warn)
883 self.ui.warn)
917 self.invalidate()
884 self.invalidate()
918 return True
885 return True
919 else:
886 else:
920 self.ui.warn(_("no interrupted transaction available\n"))
887 self.ui.warn(_("no interrupted transaction available\n"))
921 return False
888 return False
922 finally:
889 finally:
923 lock.release()
890 lock.release()
924
891
925 def rollback(self, dryrun=False, force=False):
892 def rollback(self, dryrun=False, force=False):
926 wlock = lock = None
893 wlock = lock = None
927 try:
894 try:
928 wlock = self.wlock()
895 wlock = self.wlock()
929 lock = self.lock()
896 lock = self.lock()
930 if os.path.exists(self.sjoin("undo")):
897 if os.path.exists(self.sjoin("undo")):
931 return self._rollback(dryrun, force)
898 return self._rollback(dryrun, force)
932 else:
899 else:
933 self.ui.warn(_("no rollback information available\n"))
900 self.ui.warn(_("no rollback information available\n"))
934 return 1
901 return 1
935 finally:
902 finally:
936 release(lock, wlock)
903 release(lock, wlock)
937
904
938 @unfilteredmethod # Until we get smarter cache management
905 @unfilteredmethod # Until we get smarter cache management
939 def _rollback(self, dryrun, force):
906 def _rollback(self, dryrun, force):
940 ui = self.ui
907 ui = self.ui
941 try:
908 try:
942 args = self.opener.read('undo.desc').splitlines()
909 args = self.opener.read('undo.desc').splitlines()
943 (oldlen, desc, detail) = (int(args[0]), args[1], None)
910 (oldlen, desc, detail) = (int(args[0]), args[1], None)
944 if len(args) >= 3:
911 if len(args) >= 3:
945 detail = args[2]
912 detail = args[2]
946 oldtip = oldlen - 1
913 oldtip = oldlen - 1
947
914
948 if detail and ui.verbose:
915 if detail and ui.verbose:
949 msg = (_('repository tip rolled back to revision %s'
916 msg = (_('repository tip rolled back to revision %s'
950 ' (undo %s: %s)\n')
917 ' (undo %s: %s)\n')
951 % (oldtip, desc, detail))
918 % (oldtip, desc, detail))
952 else:
919 else:
953 msg = (_('repository tip rolled back to revision %s'
920 msg = (_('repository tip rolled back to revision %s'
954 ' (undo %s)\n')
921 ' (undo %s)\n')
955 % (oldtip, desc))
922 % (oldtip, desc))
956 except IOError:
923 except IOError:
957 msg = _('rolling back unknown transaction\n')
924 msg = _('rolling back unknown transaction\n')
958 desc = None
925 desc = None
959
926
960 if not force and self['.'] != self['tip'] and desc == 'commit':
927 if not force and self['.'] != self['tip'] and desc == 'commit':
961 raise util.Abort(
928 raise util.Abort(
962 _('rollback of last commit while not checked out '
929 _('rollback of last commit while not checked out '
963 'may lose data'), hint=_('use -f to force'))
930 'may lose data'), hint=_('use -f to force'))
964
931
965 ui.status(msg)
932 ui.status(msg)
966 if dryrun:
933 if dryrun:
967 return 0
934 return 0
968
935
969 parents = self.dirstate.parents()
936 parents = self.dirstate.parents()
970 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
937 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
971 if os.path.exists(self.join('undo.bookmarks')):
938 if os.path.exists(self.join('undo.bookmarks')):
972 util.rename(self.join('undo.bookmarks'),
939 util.rename(self.join('undo.bookmarks'),
973 self.join('bookmarks'))
940 self.join('bookmarks'))
974 if os.path.exists(self.sjoin('undo.phaseroots')):
941 if os.path.exists(self.sjoin('undo.phaseroots')):
975 util.rename(self.sjoin('undo.phaseroots'),
942 util.rename(self.sjoin('undo.phaseroots'),
976 self.sjoin('phaseroots'))
943 self.sjoin('phaseroots'))
977 self.invalidate()
944 self.invalidate()
978
945
979 # Discard all cache entries to force reloading everything.
946 # Discard all cache entries to force reloading everything.
980 self._filecache.clear()
947 self._filecache.clear()
981
948
982 parentgone = (parents[0] not in self.changelog.nodemap or
949 parentgone = (parents[0] not in self.changelog.nodemap or
983 parents[1] not in self.changelog.nodemap)
950 parents[1] not in self.changelog.nodemap)
984 if parentgone:
951 if parentgone:
985 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
952 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
986 try:
953 try:
987 branch = self.opener.read('undo.branch')
954 branch = self.opener.read('undo.branch')
988 self.dirstate.setbranch(encoding.tolocal(branch))
955 self.dirstate.setbranch(encoding.tolocal(branch))
989 except IOError:
956 except IOError:
990 ui.warn(_('named branch could not be reset: '
957 ui.warn(_('named branch could not be reset: '
991 'current branch is still \'%s\'\n')
958 'current branch is still \'%s\'\n')
992 % self.dirstate.branch())
959 % self.dirstate.branch())
993
960
994 self.dirstate.invalidate()
961 self.dirstate.invalidate()
995 parents = tuple([p.rev() for p in self.parents()])
962 parents = tuple([p.rev() for p in self.parents()])
996 if len(parents) > 1:
963 if len(parents) > 1:
997 ui.status(_('working directory now based on '
964 ui.status(_('working directory now based on '
998 'revisions %d and %d\n') % parents)
965 'revisions %d and %d\n') % parents)
999 else:
966 else:
1000 ui.status(_('working directory now based on '
967 ui.status(_('working directory now based on '
1001 'revision %d\n') % parents)
968 'revision %d\n') % parents)
1002 # TODO: if we know which new heads may result from this rollback, pass
969 # TODO: if we know which new heads may result from this rollback, pass
1003 # them to destroy(), which will prevent the branchhead cache from being
970 # them to destroy(), which will prevent the branchhead cache from being
1004 # invalidated.
971 # invalidated.
1005 self.destroyed()
972 self.destroyed()
1006 return 0
973 return 0
1007
974
1008 def invalidatecaches(self):
975 def invalidatecaches(self):
1009
976
1010 if '_tagscache' in vars(self):
977 if '_tagscache' in vars(self):
1011 # can't use delattr on proxy
978 # can't use delattr on proxy
1012 del self.__dict__['_tagscache']
979 del self.__dict__['_tagscache']
1013
980
1014 self.unfiltered()._branchcache = None # in UTF-8
981 self.unfiltered()._branchcache = None # in UTF-8
1015 self.unfiltered()._branchcachetip = None
982 self.unfiltered()._branchcachetip = None
1016 self.invalidatevolatilesets()
983 self.invalidatevolatilesets()
1017
984
1018 def invalidatevolatilesets(self):
985 def invalidatevolatilesets(self):
1019 self.filteredrevcache.clear()
986 self.filteredrevcache.clear()
1020 obsolete.clearobscaches(self)
987 obsolete.clearobscaches(self)
1021 if 'hiddenrevs' in vars(self):
988 if 'hiddenrevs' in vars(self):
1022 del self.hiddenrevs
989 del self.hiddenrevs
1023
990
1024 def invalidatedirstate(self):
991 def invalidatedirstate(self):
1025 '''Invalidates the dirstate, causing the next call to dirstate
992 '''Invalidates the dirstate, causing the next call to dirstate
1026 to check if it was modified since the last time it was read,
993 to check if it was modified since the last time it was read,
1027 rereading it if it has.
994 rereading it if it has.
1028
995
1029 This is different to dirstate.invalidate() that it doesn't always
996 This is different to dirstate.invalidate() that it doesn't always
1030 rereads the dirstate. Use dirstate.invalidate() if you want to
997 rereads the dirstate. Use dirstate.invalidate() if you want to
1031 explicitly read the dirstate again (i.e. restoring it to a previous
998 explicitly read the dirstate again (i.e. restoring it to a previous
1032 known good state).'''
999 known good state).'''
1033 if hasunfilteredcache(self, 'dirstate'):
1000 if hasunfilteredcache(self, 'dirstate'):
1034 for k in self.dirstate._filecache:
1001 for k in self.dirstate._filecache:
1035 try:
1002 try:
1036 delattr(self.dirstate, k)
1003 delattr(self.dirstate, k)
1037 except AttributeError:
1004 except AttributeError:
1038 pass
1005 pass
1039 delattr(self.unfiltered(), 'dirstate')
1006 delattr(self.unfiltered(), 'dirstate')
1040
1007
1041 def invalidate(self):
1008 def invalidate(self):
1042 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1009 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1043 for k in self._filecache:
1010 for k in self._filecache:
1044 # dirstate is invalidated separately in invalidatedirstate()
1011 # dirstate is invalidated separately in invalidatedirstate()
1045 if k == 'dirstate':
1012 if k == 'dirstate':
1046 continue
1013 continue
1047
1014
1048 try:
1015 try:
1049 delattr(unfiltered, k)
1016 delattr(unfiltered, k)
1050 except AttributeError:
1017 except AttributeError:
1051 pass
1018 pass
1052 self.invalidatecaches()
1019 self.invalidatecaches()
1053
1020
1054 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1021 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1055 try:
1022 try:
1056 l = lock.lock(lockname, 0, releasefn, desc=desc)
1023 l = lock.lock(lockname, 0, releasefn, desc=desc)
1057 except error.LockHeld, inst:
1024 except error.LockHeld, inst:
1058 if not wait:
1025 if not wait:
1059 raise
1026 raise
1060 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1027 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1061 (desc, inst.locker))
1028 (desc, inst.locker))
1062 # default to 600 seconds timeout
1029 # default to 600 seconds timeout
1063 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1030 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1064 releasefn, desc=desc)
1031 releasefn, desc=desc)
1065 if acquirefn:
1032 if acquirefn:
1066 acquirefn()
1033 acquirefn()
1067 return l
1034 return l
1068
1035
1069 def _afterlock(self, callback):
1036 def _afterlock(self, callback):
1070 """add a callback to the current repository lock.
1037 """add a callback to the current repository lock.
1071
1038
1072 The callback will be executed on lock release."""
1039 The callback will be executed on lock release."""
1073 l = self._lockref and self._lockref()
1040 l = self._lockref and self._lockref()
1074 if l:
1041 if l:
1075 l.postrelease.append(callback)
1042 l.postrelease.append(callback)
1076 else:
1043 else:
1077 callback()
1044 callback()
1078
1045
1079 def lock(self, wait=True):
1046 def lock(self, wait=True):
1080 '''Lock the repository store (.hg/store) and return a weak reference
1047 '''Lock the repository store (.hg/store) and return a weak reference
1081 to the lock. Use this before modifying the store (e.g. committing or
1048 to the lock. Use this before modifying the store (e.g. committing or
1082 stripping). If you are opening a transaction, get a lock as well.)'''
1049 stripping). If you are opening a transaction, get a lock as well.)'''
1083 l = self._lockref and self._lockref()
1050 l = self._lockref and self._lockref()
1084 if l is not None and l.held:
1051 if l is not None and l.held:
1085 l.lock()
1052 l.lock()
1086 return l
1053 return l
1087
1054
1088 def unlock():
1055 def unlock():
1089 self.store.write()
1056 self.store.write()
1090 if hasunfilteredcache(self, '_phasecache'):
1057 if hasunfilteredcache(self, '_phasecache'):
1091 self._phasecache.write()
1058 self._phasecache.write()
1092 for k, ce in self._filecache.items():
1059 for k, ce in self._filecache.items():
1093 if k == 'dirstate':
1060 if k == 'dirstate':
1094 continue
1061 continue
1095 ce.refresh()
1062 ce.refresh()
1096
1063
1097 l = self._lock(self.sjoin("lock"), wait, unlock,
1064 l = self._lock(self.sjoin("lock"), wait, unlock,
1098 self.invalidate, _('repository %s') % self.origroot)
1065 self.invalidate, _('repository %s') % self.origroot)
1099 self._lockref = weakref.ref(l)
1066 self._lockref = weakref.ref(l)
1100 return l
1067 return l
1101
1068
1102 def wlock(self, wait=True):
1069 def wlock(self, wait=True):
1103 '''Lock the non-store parts of the repository (everything under
1070 '''Lock the non-store parts of the repository (everything under
1104 .hg except .hg/store) and return a weak reference to the lock.
1071 .hg except .hg/store) and return a weak reference to the lock.
1105 Use this before modifying files in .hg.'''
1072 Use this before modifying files in .hg.'''
1106 l = self._wlockref and self._wlockref()
1073 l = self._wlockref and self._wlockref()
1107 if l is not None and l.held:
1074 if l is not None and l.held:
1108 l.lock()
1075 l.lock()
1109 return l
1076 return l
1110
1077
1111 def unlock():
1078 def unlock():
1112 self.dirstate.write()
1079 self.dirstate.write()
1113 ce = self._filecache.get('dirstate')
1080 ce = self._filecache.get('dirstate')
1114 if ce:
1081 if ce:
1115 ce.refresh()
1082 ce.refresh()
1116
1083
1117 l = self._lock(self.join("wlock"), wait, unlock,
1084 l = self._lock(self.join("wlock"), wait, unlock,
1118 self.invalidatedirstate, _('working directory of %s') %
1085 self.invalidatedirstate, _('working directory of %s') %
1119 self.origroot)
1086 self.origroot)
1120 self._wlockref = weakref.ref(l)
1087 self._wlockref = weakref.ref(l)
1121 return l
1088 return l
1122
1089
1123 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1090 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1124 """
1091 """
1125 commit an individual file as part of a larger transaction
1092 commit an individual file as part of a larger transaction
1126 """
1093 """
1127
1094
1128 fname = fctx.path()
1095 fname = fctx.path()
1129 text = fctx.data()
1096 text = fctx.data()
1130 flog = self.file(fname)
1097 flog = self.file(fname)
1131 fparent1 = manifest1.get(fname, nullid)
1098 fparent1 = manifest1.get(fname, nullid)
1132 fparent2 = fparent2o = manifest2.get(fname, nullid)
1099 fparent2 = fparent2o = manifest2.get(fname, nullid)
1133
1100
1134 meta = {}
1101 meta = {}
1135 copy = fctx.renamed()
1102 copy = fctx.renamed()
1136 if copy and copy[0] != fname:
1103 if copy and copy[0] != fname:
1137 # Mark the new revision of this file as a copy of another
1104 # Mark the new revision of this file as a copy of another
1138 # file. This copy data will effectively act as a parent
1105 # file. This copy data will effectively act as a parent
1139 # of this new revision. If this is a merge, the first
1106 # of this new revision. If this is a merge, the first
1140 # parent will be the nullid (meaning "look up the copy data")
1107 # parent will be the nullid (meaning "look up the copy data")
1141 # and the second one will be the other parent. For example:
1108 # and the second one will be the other parent. For example:
1142 #
1109 #
1143 # 0 --- 1 --- 3 rev1 changes file foo
1110 # 0 --- 1 --- 3 rev1 changes file foo
1144 # \ / rev2 renames foo to bar and changes it
1111 # \ / rev2 renames foo to bar and changes it
1145 # \- 2 -/ rev3 should have bar with all changes and
1112 # \- 2 -/ rev3 should have bar with all changes and
1146 # should record that bar descends from
1113 # should record that bar descends from
1147 # bar in rev2 and foo in rev1
1114 # bar in rev2 and foo in rev1
1148 #
1115 #
1149 # this allows this merge to succeed:
1116 # this allows this merge to succeed:
1150 #
1117 #
1151 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1118 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1152 # \ / merging rev3 and rev4 should use bar@rev2
1119 # \ / merging rev3 and rev4 should use bar@rev2
1153 # \- 2 --- 4 as the merge base
1120 # \- 2 --- 4 as the merge base
1154 #
1121 #
1155
1122
1156 cfname = copy[0]
1123 cfname = copy[0]
1157 crev = manifest1.get(cfname)
1124 crev = manifest1.get(cfname)
1158 newfparent = fparent2
1125 newfparent = fparent2
1159
1126
1160 if manifest2: # branch merge
1127 if manifest2: # branch merge
1161 if fparent2 == nullid or crev is None: # copied on remote side
1128 if fparent2 == nullid or crev is None: # copied on remote side
1162 if cfname in manifest2:
1129 if cfname in manifest2:
1163 crev = manifest2[cfname]
1130 crev = manifest2[cfname]
1164 newfparent = fparent1
1131 newfparent = fparent1
1165
1132
1166 # find source in nearest ancestor if we've lost track
1133 # find source in nearest ancestor if we've lost track
1167 if not crev:
1134 if not crev:
1168 self.ui.debug(" %s: searching for copy revision for %s\n" %
1135 self.ui.debug(" %s: searching for copy revision for %s\n" %
1169 (fname, cfname))
1136 (fname, cfname))
1170 for ancestor in self[None].ancestors():
1137 for ancestor in self[None].ancestors():
1171 if cfname in ancestor:
1138 if cfname in ancestor:
1172 crev = ancestor[cfname].filenode()
1139 crev = ancestor[cfname].filenode()
1173 break
1140 break
1174
1141
1175 if crev:
1142 if crev:
1176 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1143 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1177 meta["copy"] = cfname
1144 meta["copy"] = cfname
1178 meta["copyrev"] = hex(crev)
1145 meta["copyrev"] = hex(crev)
1179 fparent1, fparent2 = nullid, newfparent
1146 fparent1, fparent2 = nullid, newfparent
1180 else:
1147 else:
1181 self.ui.warn(_("warning: can't find ancestor for '%s' "
1148 self.ui.warn(_("warning: can't find ancestor for '%s' "
1182 "copied from '%s'!\n") % (fname, cfname))
1149 "copied from '%s'!\n") % (fname, cfname))
1183
1150
1184 elif fparent2 != nullid:
1151 elif fparent2 != nullid:
1185 # is one parent an ancestor of the other?
1152 # is one parent an ancestor of the other?
1186 fparentancestor = flog.ancestor(fparent1, fparent2)
1153 fparentancestor = flog.ancestor(fparent1, fparent2)
1187 if fparentancestor == fparent1:
1154 if fparentancestor == fparent1:
1188 fparent1, fparent2 = fparent2, nullid
1155 fparent1, fparent2 = fparent2, nullid
1189 elif fparentancestor == fparent2:
1156 elif fparentancestor == fparent2:
1190 fparent2 = nullid
1157 fparent2 = nullid
1191
1158
1192 # is the file changed?
1159 # is the file changed?
1193 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1160 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1194 changelist.append(fname)
1161 changelist.append(fname)
1195 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1162 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1196
1163
1197 # are just the flags changed during merge?
1164 # are just the flags changed during merge?
1198 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1165 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1199 changelist.append(fname)
1166 changelist.append(fname)
1200
1167
1201 return fparent1
1168 return fparent1
1202
1169
1203 @unfilteredmethod
1170 @unfilteredmethod
1204 def commit(self, text="", user=None, date=None, match=None, force=False,
1171 def commit(self, text="", user=None, date=None, match=None, force=False,
1205 editor=False, extra={}):
1172 editor=False, extra={}):
1206 """Add a new revision to current repository.
1173 """Add a new revision to current repository.
1207
1174
1208 Revision information is gathered from the working directory,
1175 Revision information is gathered from the working directory,
1209 match can be used to filter the committed files. If editor is
1176 match can be used to filter the committed files. If editor is
1210 supplied, it is called to get a commit message.
1177 supplied, it is called to get a commit message.
1211 """
1178 """
1212
1179
1213 def fail(f, msg):
1180 def fail(f, msg):
1214 raise util.Abort('%s: %s' % (f, msg))
1181 raise util.Abort('%s: %s' % (f, msg))
1215
1182
1216 if not match:
1183 if not match:
1217 match = matchmod.always(self.root, '')
1184 match = matchmod.always(self.root, '')
1218
1185
1219 if not force:
1186 if not force:
1220 vdirs = []
1187 vdirs = []
1221 match.dir = vdirs.append
1188 match.dir = vdirs.append
1222 match.bad = fail
1189 match.bad = fail
1223
1190
1224 wlock = self.wlock()
1191 wlock = self.wlock()
1225 try:
1192 try:
1226 wctx = self[None]
1193 wctx = self[None]
1227 merge = len(wctx.parents()) > 1
1194 merge = len(wctx.parents()) > 1
1228
1195
1229 if (not force and merge and match and
1196 if (not force and merge and match and
1230 (match.files() or match.anypats())):
1197 (match.files() or match.anypats())):
1231 raise util.Abort(_('cannot partially commit a merge '
1198 raise util.Abort(_('cannot partially commit a merge '
1232 '(do not specify files or patterns)'))
1199 '(do not specify files or patterns)'))
1233
1200
1234 changes = self.status(match=match, clean=force)
1201 changes = self.status(match=match, clean=force)
1235 if force:
1202 if force:
1236 changes[0].extend(changes[6]) # mq may commit unchanged files
1203 changes[0].extend(changes[6]) # mq may commit unchanged files
1237
1204
1238 # check subrepos
1205 # check subrepos
1239 subs = []
1206 subs = []
1240 commitsubs = set()
1207 commitsubs = set()
1241 newstate = wctx.substate.copy()
1208 newstate = wctx.substate.copy()
1242 # only manage subrepos and .hgsubstate if .hgsub is present
1209 # only manage subrepos and .hgsubstate if .hgsub is present
1243 if '.hgsub' in wctx:
1210 if '.hgsub' in wctx:
1244 # we'll decide whether to track this ourselves, thanks
1211 # we'll decide whether to track this ourselves, thanks
1245 if '.hgsubstate' in changes[0]:
1212 if '.hgsubstate' in changes[0]:
1246 changes[0].remove('.hgsubstate')
1213 changes[0].remove('.hgsubstate')
1247 if '.hgsubstate' in changes[2]:
1214 if '.hgsubstate' in changes[2]:
1248 changes[2].remove('.hgsubstate')
1215 changes[2].remove('.hgsubstate')
1249
1216
1250 # compare current state to last committed state
1217 # compare current state to last committed state
1251 # build new substate based on last committed state
1218 # build new substate based on last committed state
1252 oldstate = wctx.p1().substate
1219 oldstate = wctx.p1().substate
1253 for s in sorted(newstate.keys()):
1220 for s in sorted(newstate.keys()):
1254 if not match(s):
1221 if not match(s):
1255 # ignore working copy, use old state if present
1222 # ignore working copy, use old state if present
1256 if s in oldstate:
1223 if s in oldstate:
1257 newstate[s] = oldstate[s]
1224 newstate[s] = oldstate[s]
1258 continue
1225 continue
1259 if not force:
1226 if not force:
1260 raise util.Abort(
1227 raise util.Abort(
1261 _("commit with new subrepo %s excluded") % s)
1228 _("commit with new subrepo %s excluded") % s)
1262 if wctx.sub(s).dirty(True):
1229 if wctx.sub(s).dirty(True):
1263 if not self.ui.configbool('ui', 'commitsubrepos'):
1230 if not self.ui.configbool('ui', 'commitsubrepos'):
1264 raise util.Abort(
1231 raise util.Abort(
1265 _("uncommitted changes in subrepo %s") % s,
1232 _("uncommitted changes in subrepo %s") % s,
1266 hint=_("use --subrepos for recursive commit"))
1233 hint=_("use --subrepos for recursive commit"))
1267 subs.append(s)
1234 subs.append(s)
1268 commitsubs.add(s)
1235 commitsubs.add(s)
1269 else:
1236 else:
1270 bs = wctx.sub(s).basestate()
1237 bs = wctx.sub(s).basestate()
1271 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1238 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1272 if oldstate.get(s, (None, None, None))[1] != bs:
1239 if oldstate.get(s, (None, None, None))[1] != bs:
1273 subs.append(s)
1240 subs.append(s)
1274
1241
1275 # check for removed subrepos
1242 # check for removed subrepos
1276 for p in wctx.parents():
1243 for p in wctx.parents():
1277 r = [s for s in p.substate if s not in newstate]
1244 r = [s for s in p.substate if s not in newstate]
1278 subs += [s for s in r if match(s)]
1245 subs += [s for s in r if match(s)]
1279 if subs:
1246 if subs:
1280 if (not match('.hgsub') and
1247 if (not match('.hgsub') and
1281 '.hgsub' in (wctx.modified() + wctx.added())):
1248 '.hgsub' in (wctx.modified() + wctx.added())):
1282 raise util.Abort(
1249 raise util.Abort(
1283 _("can't commit subrepos without .hgsub"))
1250 _("can't commit subrepos without .hgsub"))
1284 changes[0].insert(0, '.hgsubstate')
1251 changes[0].insert(0, '.hgsubstate')
1285
1252
1286 elif '.hgsub' in changes[2]:
1253 elif '.hgsub' in changes[2]:
1287 # clean up .hgsubstate when .hgsub is removed
1254 # clean up .hgsubstate when .hgsub is removed
1288 if ('.hgsubstate' in wctx and
1255 if ('.hgsubstate' in wctx and
1289 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1256 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1290 changes[2].insert(0, '.hgsubstate')
1257 changes[2].insert(0, '.hgsubstate')
1291
1258
1292 # make sure all explicit patterns are matched
1259 # make sure all explicit patterns are matched
1293 if not force and match.files():
1260 if not force and match.files():
1294 matched = set(changes[0] + changes[1] + changes[2])
1261 matched = set(changes[0] + changes[1] + changes[2])
1295
1262
1296 for f in match.files():
1263 for f in match.files():
1297 f = self.dirstate.normalize(f)
1264 f = self.dirstate.normalize(f)
1298 if f == '.' or f in matched or f in wctx.substate:
1265 if f == '.' or f in matched or f in wctx.substate:
1299 continue
1266 continue
1300 if f in changes[3]: # missing
1267 if f in changes[3]: # missing
1301 fail(f, _('file not found!'))
1268 fail(f, _('file not found!'))
1302 if f in vdirs: # visited directory
1269 if f in vdirs: # visited directory
1303 d = f + '/'
1270 d = f + '/'
1304 for mf in matched:
1271 for mf in matched:
1305 if mf.startswith(d):
1272 if mf.startswith(d):
1306 break
1273 break
1307 else:
1274 else:
1308 fail(f, _("no match under directory!"))
1275 fail(f, _("no match under directory!"))
1309 elif f not in self.dirstate:
1276 elif f not in self.dirstate:
1310 fail(f, _("file not tracked!"))
1277 fail(f, _("file not tracked!"))
1311
1278
1312 if (not force and not extra.get("close") and not merge
1279 if (not force and not extra.get("close") and not merge
1313 and not (changes[0] or changes[1] or changes[2])
1280 and not (changes[0] or changes[1] or changes[2])
1314 and wctx.branch() == wctx.p1().branch()):
1281 and wctx.branch() == wctx.p1().branch()):
1315 return None
1282 return None
1316
1283
1317 if merge and changes[3]:
1284 if merge and changes[3]:
1318 raise util.Abort(_("cannot commit merge with missing files"))
1285 raise util.Abort(_("cannot commit merge with missing files"))
1319
1286
1320 ms = mergemod.mergestate(self)
1287 ms = mergemod.mergestate(self)
1321 for f in changes[0]:
1288 for f in changes[0]:
1322 if f in ms and ms[f] == 'u':
1289 if f in ms and ms[f] == 'u':
1323 raise util.Abort(_("unresolved merge conflicts "
1290 raise util.Abort(_("unresolved merge conflicts "
1324 "(see hg help resolve)"))
1291 "(see hg help resolve)"))
1325
1292
1326 cctx = context.workingctx(self, text, user, date, extra, changes)
1293 cctx = context.workingctx(self, text, user, date, extra, changes)
1327 if editor:
1294 if editor:
1328 cctx._text = editor(self, cctx, subs)
1295 cctx._text = editor(self, cctx, subs)
1329 edited = (text != cctx._text)
1296 edited = (text != cctx._text)
1330
1297
1331 # commit subs and write new state
1298 # commit subs and write new state
1332 if subs:
1299 if subs:
1333 for s in sorted(commitsubs):
1300 for s in sorted(commitsubs):
1334 sub = wctx.sub(s)
1301 sub = wctx.sub(s)
1335 self.ui.status(_('committing subrepository %s\n') %
1302 self.ui.status(_('committing subrepository %s\n') %
1336 subrepo.subrelpath(sub))
1303 subrepo.subrelpath(sub))
1337 sr = sub.commit(cctx._text, user, date)
1304 sr = sub.commit(cctx._text, user, date)
1338 newstate[s] = (newstate[s][0], sr)
1305 newstate[s] = (newstate[s][0], sr)
1339 subrepo.writestate(self, newstate)
1306 subrepo.writestate(self, newstate)
1340
1307
1341 # Save commit message in case this transaction gets rolled back
1308 # Save commit message in case this transaction gets rolled back
1342 # (e.g. by a pretxncommit hook). Leave the content alone on
1309 # (e.g. by a pretxncommit hook). Leave the content alone on
1343 # the assumption that the user will use the same editor again.
1310 # the assumption that the user will use the same editor again.
1344 msgfn = self.savecommitmessage(cctx._text)
1311 msgfn = self.savecommitmessage(cctx._text)
1345
1312
1346 p1, p2 = self.dirstate.parents()
1313 p1, p2 = self.dirstate.parents()
1347 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1314 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1348 try:
1315 try:
1349 self.hook("precommit", throw=True, parent1=hookp1,
1316 self.hook("precommit", throw=True, parent1=hookp1,
1350 parent2=hookp2)
1317 parent2=hookp2)
1351 ret = self.commitctx(cctx, True)
1318 ret = self.commitctx(cctx, True)
1352 except: # re-raises
1319 except: # re-raises
1353 if edited:
1320 if edited:
1354 self.ui.write(
1321 self.ui.write(
1355 _('note: commit message saved in %s\n') % msgfn)
1322 _('note: commit message saved in %s\n') % msgfn)
1356 raise
1323 raise
1357
1324
1358 # update bookmarks, dirstate and mergestate
1325 # update bookmarks, dirstate and mergestate
1359 bookmarks.update(self, [p1, p2], ret)
1326 bookmarks.update(self, [p1, p2], ret)
1360 for f in changes[0] + changes[1]:
1327 for f in changes[0] + changes[1]:
1361 self.dirstate.normal(f)
1328 self.dirstate.normal(f)
1362 for f in changes[2]:
1329 for f in changes[2]:
1363 self.dirstate.drop(f)
1330 self.dirstate.drop(f)
1364 self.dirstate.setparents(ret)
1331 self.dirstate.setparents(ret)
1365 ms.reset()
1332 ms.reset()
1366 finally:
1333 finally:
1367 wlock.release()
1334 wlock.release()
1368
1335
1369 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1336 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1370 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1337 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1371 self._afterlock(commithook)
1338 self._afterlock(commithook)
1372 return ret
1339 return ret
1373
1340
1374 @unfilteredmethod
1341 @unfilteredmethod
1375 def commitctx(self, ctx, error=False):
1342 def commitctx(self, ctx, error=False):
1376 """Add a new revision to current repository.
1343 """Add a new revision to current repository.
1377 Revision information is passed via the context argument.
1344 Revision information is passed via the context argument.
1378 """
1345 """
1379
1346
1380 tr = lock = None
1347 tr = lock = None
1381 removed = list(ctx.removed())
1348 removed = list(ctx.removed())
1382 p1, p2 = ctx.p1(), ctx.p2()
1349 p1, p2 = ctx.p1(), ctx.p2()
1383 user = ctx.user()
1350 user = ctx.user()
1384
1351
1385 lock = self.lock()
1352 lock = self.lock()
1386 try:
1353 try:
1387 tr = self.transaction("commit")
1354 tr = self.transaction("commit")
1388 trp = weakref.proxy(tr)
1355 trp = weakref.proxy(tr)
1389
1356
1390 if ctx.files():
1357 if ctx.files():
1391 m1 = p1.manifest().copy()
1358 m1 = p1.manifest().copy()
1392 m2 = p2.manifest()
1359 m2 = p2.manifest()
1393
1360
1394 # check in files
1361 # check in files
1395 new = {}
1362 new = {}
1396 changed = []
1363 changed = []
1397 linkrev = len(self)
1364 linkrev = len(self)
1398 for f in sorted(ctx.modified() + ctx.added()):
1365 for f in sorted(ctx.modified() + ctx.added()):
1399 self.ui.note(f + "\n")
1366 self.ui.note(f + "\n")
1400 try:
1367 try:
1401 fctx = ctx[f]
1368 fctx = ctx[f]
1402 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1369 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1403 changed)
1370 changed)
1404 m1.set(f, fctx.flags())
1371 m1.set(f, fctx.flags())
1405 except OSError, inst:
1372 except OSError, inst:
1406 self.ui.warn(_("trouble committing %s!\n") % f)
1373 self.ui.warn(_("trouble committing %s!\n") % f)
1407 raise
1374 raise
1408 except IOError, inst:
1375 except IOError, inst:
1409 errcode = getattr(inst, 'errno', errno.ENOENT)
1376 errcode = getattr(inst, 'errno', errno.ENOENT)
1410 if error or errcode and errcode != errno.ENOENT:
1377 if error or errcode and errcode != errno.ENOENT:
1411 self.ui.warn(_("trouble committing %s!\n") % f)
1378 self.ui.warn(_("trouble committing %s!\n") % f)
1412 raise
1379 raise
1413 else:
1380 else:
1414 removed.append(f)
1381 removed.append(f)
1415
1382
1416 # update manifest
1383 # update manifest
1417 m1.update(new)
1384 m1.update(new)
1418 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1385 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1419 drop = [f for f in removed if f in m1]
1386 drop = [f for f in removed if f in m1]
1420 for f in drop:
1387 for f in drop:
1421 del m1[f]
1388 del m1[f]
1422 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1389 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1423 p2.manifestnode(), (new, drop))
1390 p2.manifestnode(), (new, drop))
1424 files = changed + removed
1391 files = changed + removed
1425 else:
1392 else:
1426 mn = p1.manifestnode()
1393 mn = p1.manifestnode()
1427 files = []
1394 files = []
1428
1395
1429 # update changelog
1396 # update changelog
1430 self.changelog.delayupdate()
1397 self.changelog.delayupdate()
1431 n = self.changelog.add(mn, files, ctx.description(),
1398 n = self.changelog.add(mn, files, ctx.description(),
1432 trp, p1.node(), p2.node(),
1399 trp, p1.node(), p2.node(),
1433 user, ctx.date(), ctx.extra().copy())
1400 user, ctx.date(), ctx.extra().copy())
1434 p = lambda: self.changelog.writepending() and self.root or ""
1401 p = lambda: self.changelog.writepending() and self.root or ""
1435 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1402 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1436 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1403 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1437 parent2=xp2, pending=p)
1404 parent2=xp2, pending=p)
1438 self.changelog.finalize(trp)
1405 self.changelog.finalize(trp)
1439 # set the new commit is proper phase
1406 # set the new commit is proper phase
1440 targetphase = phases.newcommitphase(self.ui)
1407 targetphase = phases.newcommitphase(self.ui)
1441 if targetphase:
1408 if targetphase:
1442 # retract boundary do not alter parent changeset.
1409 # retract boundary do not alter parent changeset.
1443 # if a parent have higher the resulting phase will
1410 # if a parent have higher the resulting phase will
1444 # be compliant anyway
1411 # be compliant anyway
1445 #
1412 #
1446 # if minimal phase was 0 we don't need to retract anything
1413 # if minimal phase was 0 we don't need to retract anything
1447 phases.retractboundary(self, targetphase, [n])
1414 phases.retractboundary(self, targetphase, [n])
1448 tr.close()
1415 tr.close()
1449 self.updatebranchcache()
1416 branchmap.updatecache(self)
1450 return n
1417 return n
1451 finally:
1418 finally:
1452 if tr:
1419 if tr:
1453 tr.release()
1420 tr.release()
1454 lock.release()
1421 lock.release()
1455
1422
1456 @unfilteredmethod
1423 @unfilteredmethod
1457 def destroyed(self, newheadnodes=None):
1424 def destroyed(self, newheadnodes=None):
1458 '''Inform the repository that nodes have been destroyed.
1425 '''Inform the repository that nodes have been destroyed.
1459 Intended for use by strip and rollback, so there's a common
1426 Intended for use by strip and rollback, so there's a common
1460 place for anything that has to be done after destroying history.
1427 place for anything that has to be done after destroying history.
1461
1428
1462 If you know the branchheadcache was uptodate before nodes were removed
1429 If you know the branchheadcache was uptodate before nodes were removed
1463 and you also know the set of candidate new heads that may have resulted
1430 and you also know the set of candidate new heads that may have resulted
1464 from the destruction, you can set newheadnodes. This will enable the
1431 from the destruction, you can set newheadnodes. This will enable the
1465 code to update the branchheads cache, rather than having future code
1432 code to update the branchheads cache, rather than having future code
1466 decide it's invalid and regenerating it from scratch.
1433 decide it's invalid and regenerating it from scratch.
1467 '''
1434 '''
1468 # If we have info, newheadnodes, on how to update the branch cache, do
1435 # If we have info, newheadnodes, on how to update the branch cache, do
1469 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1436 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1470 # will be caught the next time it is read.
1437 # will be caught the next time it is read.
1471 if newheadnodes:
1438 if newheadnodes:
1472 tiprev = len(self) - 1
1439 tiprev = len(self) - 1
1473 ctxgen = (self[node] for node in newheadnodes
1440 ctxgen = (self[node] for node in newheadnodes
1474 if self.changelog.hasnode(node))
1441 if self.changelog.hasnode(node))
1475 branchmap.update(self, self._branchcache, ctxgen)
1442 branchmap.update(self, self._branchcache, ctxgen)
1476 branchmap.write(self, self._branchcache, self.changelog.tip(),
1443 branchmap.write(self, self._branchcache, self.changelog.tip(),
1477 tiprev)
1444 tiprev)
1478
1445
1479 # Ensure the persistent tag cache is updated. Doing it now
1446 # Ensure the persistent tag cache is updated. Doing it now
1480 # means that the tag cache only has to worry about destroyed
1447 # means that the tag cache only has to worry about destroyed
1481 # heads immediately after a strip/rollback. That in turn
1448 # heads immediately after a strip/rollback. That in turn
1482 # guarantees that "cachetip == currenttip" (comparing both rev
1449 # guarantees that "cachetip == currenttip" (comparing both rev
1483 # and node) always means no nodes have been added or destroyed.
1450 # and node) always means no nodes have been added or destroyed.
1484
1451
1485 # XXX this is suboptimal when qrefresh'ing: we strip the current
1452 # XXX this is suboptimal when qrefresh'ing: we strip the current
1486 # head, refresh the tag cache, then immediately add a new head.
1453 # head, refresh the tag cache, then immediately add a new head.
1487 # But I think doing it this way is necessary for the "instant
1454 # But I think doing it this way is necessary for the "instant
1488 # tag cache retrieval" case to work.
1455 # tag cache retrieval" case to work.
1489 self.invalidatecaches()
1456 self.invalidatecaches()
1490
1457
1491 # Discard all cache entries to force reloading everything.
1458 # Discard all cache entries to force reloading everything.
1492 self._filecache.clear()
1459 self._filecache.clear()
1493
1460
1494 def walk(self, match, node=None):
1461 def walk(self, match, node=None):
1495 '''
1462 '''
1496 walk recursively through the directory tree or a given
1463 walk recursively through the directory tree or a given
1497 changeset, finding all files matched by the match
1464 changeset, finding all files matched by the match
1498 function
1465 function
1499 '''
1466 '''
1500 return self[node].walk(match)
1467 return self[node].walk(match)
1501
1468
1502 def status(self, node1='.', node2=None, match=None,
1469 def status(self, node1='.', node2=None, match=None,
1503 ignored=False, clean=False, unknown=False,
1470 ignored=False, clean=False, unknown=False,
1504 listsubrepos=False):
1471 listsubrepos=False):
1505 """return status of files between two nodes or node and working
1472 """return status of files between two nodes or node and working
1506 directory.
1473 directory.
1507
1474
1508 If node1 is None, use the first dirstate parent instead.
1475 If node1 is None, use the first dirstate parent instead.
1509 If node2 is None, compare node1 with working directory.
1476 If node2 is None, compare node1 with working directory.
1510 """
1477 """
1511
1478
1512 def mfmatches(ctx):
1479 def mfmatches(ctx):
1513 mf = ctx.manifest().copy()
1480 mf = ctx.manifest().copy()
1514 if match.always():
1481 if match.always():
1515 return mf
1482 return mf
1516 for fn in mf.keys():
1483 for fn in mf.keys():
1517 if not match(fn):
1484 if not match(fn):
1518 del mf[fn]
1485 del mf[fn]
1519 return mf
1486 return mf
1520
1487
1521 if isinstance(node1, context.changectx):
1488 if isinstance(node1, context.changectx):
1522 ctx1 = node1
1489 ctx1 = node1
1523 else:
1490 else:
1524 ctx1 = self[node1]
1491 ctx1 = self[node1]
1525 if isinstance(node2, context.changectx):
1492 if isinstance(node2, context.changectx):
1526 ctx2 = node2
1493 ctx2 = node2
1527 else:
1494 else:
1528 ctx2 = self[node2]
1495 ctx2 = self[node2]
1529
1496
1530 working = ctx2.rev() is None
1497 working = ctx2.rev() is None
1531 parentworking = working and ctx1 == self['.']
1498 parentworking = working and ctx1 == self['.']
1532 match = match or matchmod.always(self.root, self.getcwd())
1499 match = match or matchmod.always(self.root, self.getcwd())
1533 listignored, listclean, listunknown = ignored, clean, unknown
1500 listignored, listclean, listunknown = ignored, clean, unknown
1534
1501
1535 # load earliest manifest first for caching reasons
1502 # load earliest manifest first for caching reasons
1536 if not working and ctx2.rev() < ctx1.rev():
1503 if not working and ctx2.rev() < ctx1.rev():
1537 ctx2.manifest()
1504 ctx2.manifest()
1538
1505
1539 if not parentworking:
1506 if not parentworking:
1540 def bad(f, msg):
1507 def bad(f, msg):
1541 # 'f' may be a directory pattern from 'match.files()',
1508 # 'f' may be a directory pattern from 'match.files()',
1542 # so 'f not in ctx1' is not enough
1509 # so 'f not in ctx1' is not enough
1543 if f not in ctx1 and f not in ctx1.dirs():
1510 if f not in ctx1 and f not in ctx1.dirs():
1544 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1511 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1545 match.bad = bad
1512 match.bad = bad
1546
1513
1547 if working: # we need to scan the working dir
1514 if working: # we need to scan the working dir
1548 subrepos = []
1515 subrepos = []
1549 if '.hgsub' in self.dirstate:
1516 if '.hgsub' in self.dirstate:
1550 subrepos = ctx2.substate.keys()
1517 subrepos = ctx2.substate.keys()
1551 s = self.dirstate.status(match, subrepos, listignored,
1518 s = self.dirstate.status(match, subrepos, listignored,
1552 listclean, listunknown)
1519 listclean, listunknown)
1553 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1520 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1554
1521
1555 # check for any possibly clean files
1522 # check for any possibly clean files
1556 if parentworking and cmp:
1523 if parentworking and cmp:
1557 fixup = []
1524 fixup = []
1558 # do a full compare of any files that might have changed
1525 # do a full compare of any files that might have changed
1559 for f in sorted(cmp):
1526 for f in sorted(cmp):
1560 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1527 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1561 or ctx1[f].cmp(ctx2[f])):
1528 or ctx1[f].cmp(ctx2[f])):
1562 modified.append(f)
1529 modified.append(f)
1563 else:
1530 else:
1564 fixup.append(f)
1531 fixup.append(f)
1565
1532
1566 # update dirstate for files that are actually clean
1533 # update dirstate for files that are actually clean
1567 if fixup:
1534 if fixup:
1568 if listclean:
1535 if listclean:
1569 clean += fixup
1536 clean += fixup
1570
1537
1571 try:
1538 try:
1572 # updating the dirstate is optional
1539 # updating the dirstate is optional
1573 # so we don't wait on the lock
1540 # so we don't wait on the lock
1574 wlock = self.wlock(False)
1541 wlock = self.wlock(False)
1575 try:
1542 try:
1576 for f in fixup:
1543 for f in fixup:
1577 self.dirstate.normal(f)
1544 self.dirstate.normal(f)
1578 finally:
1545 finally:
1579 wlock.release()
1546 wlock.release()
1580 except error.LockError:
1547 except error.LockError:
1581 pass
1548 pass
1582
1549
1583 if not parentworking:
1550 if not parentworking:
1584 mf1 = mfmatches(ctx1)
1551 mf1 = mfmatches(ctx1)
1585 if working:
1552 if working:
1586 # we are comparing working dir against non-parent
1553 # we are comparing working dir against non-parent
1587 # generate a pseudo-manifest for the working dir
1554 # generate a pseudo-manifest for the working dir
1588 mf2 = mfmatches(self['.'])
1555 mf2 = mfmatches(self['.'])
1589 for f in cmp + modified + added:
1556 for f in cmp + modified + added:
1590 mf2[f] = None
1557 mf2[f] = None
1591 mf2.set(f, ctx2.flags(f))
1558 mf2.set(f, ctx2.flags(f))
1592 for f in removed:
1559 for f in removed:
1593 if f in mf2:
1560 if f in mf2:
1594 del mf2[f]
1561 del mf2[f]
1595 else:
1562 else:
1596 # we are comparing two revisions
1563 # we are comparing two revisions
1597 deleted, unknown, ignored = [], [], []
1564 deleted, unknown, ignored = [], [], []
1598 mf2 = mfmatches(ctx2)
1565 mf2 = mfmatches(ctx2)
1599
1566
1600 modified, added, clean = [], [], []
1567 modified, added, clean = [], [], []
1601 withflags = mf1.withflags() | mf2.withflags()
1568 withflags = mf1.withflags() | mf2.withflags()
1602 for fn in mf2:
1569 for fn in mf2:
1603 if fn in mf1:
1570 if fn in mf1:
1604 if (fn not in deleted and
1571 if (fn not in deleted and
1605 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1572 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1606 (mf1[fn] != mf2[fn] and
1573 (mf1[fn] != mf2[fn] and
1607 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1574 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1608 modified.append(fn)
1575 modified.append(fn)
1609 elif listclean:
1576 elif listclean:
1610 clean.append(fn)
1577 clean.append(fn)
1611 del mf1[fn]
1578 del mf1[fn]
1612 elif fn not in deleted:
1579 elif fn not in deleted:
1613 added.append(fn)
1580 added.append(fn)
1614 removed = mf1.keys()
1581 removed = mf1.keys()
1615
1582
1616 if working and modified and not self.dirstate._checklink:
1583 if working and modified and not self.dirstate._checklink:
1617 # Symlink placeholders may get non-symlink-like contents
1584 # Symlink placeholders may get non-symlink-like contents
1618 # via user error or dereferencing by NFS or Samba servers,
1585 # via user error or dereferencing by NFS or Samba servers,
1619 # so we filter out any placeholders that don't look like a
1586 # so we filter out any placeholders that don't look like a
1620 # symlink
1587 # symlink
1621 sane = []
1588 sane = []
1622 for f in modified:
1589 for f in modified:
1623 if ctx2.flags(f) == 'l':
1590 if ctx2.flags(f) == 'l':
1624 d = ctx2[f].data()
1591 d = ctx2[f].data()
1625 if len(d) >= 1024 or '\n' in d or util.binary(d):
1592 if len(d) >= 1024 or '\n' in d or util.binary(d):
1626 self.ui.debug('ignoring suspect symlink placeholder'
1593 self.ui.debug('ignoring suspect symlink placeholder'
1627 ' "%s"\n' % f)
1594 ' "%s"\n' % f)
1628 continue
1595 continue
1629 sane.append(f)
1596 sane.append(f)
1630 modified = sane
1597 modified = sane
1631
1598
1632 r = modified, added, removed, deleted, unknown, ignored, clean
1599 r = modified, added, removed, deleted, unknown, ignored, clean
1633
1600
1634 if listsubrepos:
1601 if listsubrepos:
1635 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1602 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1636 if working:
1603 if working:
1637 rev2 = None
1604 rev2 = None
1638 else:
1605 else:
1639 rev2 = ctx2.substate[subpath][1]
1606 rev2 = ctx2.substate[subpath][1]
1640 try:
1607 try:
1641 submatch = matchmod.narrowmatcher(subpath, match)
1608 submatch = matchmod.narrowmatcher(subpath, match)
1642 s = sub.status(rev2, match=submatch, ignored=listignored,
1609 s = sub.status(rev2, match=submatch, ignored=listignored,
1643 clean=listclean, unknown=listunknown,
1610 clean=listclean, unknown=listunknown,
1644 listsubrepos=True)
1611 listsubrepos=True)
1645 for rfiles, sfiles in zip(r, s):
1612 for rfiles, sfiles in zip(r, s):
1646 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1613 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1647 except error.LookupError:
1614 except error.LookupError:
1648 self.ui.status(_("skipping missing subrepository: %s\n")
1615 self.ui.status(_("skipping missing subrepository: %s\n")
1649 % subpath)
1616 % subpath)
1650
1617
1651 for l in r:
1618 for l in r:
1652 l.sort()
1619 l.sort()
1653 return r
1620 return r
1654
1621
1655 def heads(self, start=None):
1622 def heads(self, start=None):
1656 heads = self.changelog.heads(start)
1623 heads = self.changelog.heads(start)
1657 # sort the output in rev descending order
1624 # sort the output in rev descending order
1658 return sorted(heads, key=self.changelog.rev, reverse=True)
1625 return sorted(heads, key=self.changelog.rev, reverse=True)
1659
1626
1660 def branchheads(self, branch=None, start=None, closed=False):
1627 def branchheads(self, branch=None, start=None, closed=False):
1661 '''return a (possibly filtered) list of heads for the given branch
1628 '''return a (possibly filtered) list of heads for the given branch
1662
1629
1663 Heads are returned in topological order, from newest to oldest.
1630 Heads are returned in topological order, from newest to oldest.
1664 If branch is None, use the dirstate branch.
1631 If branch is None, use the dirstate branch.
1665 If start is not None, return only heads reachable from start.
1632 If start is not None, return only heads reachable from start.
1666 If closed is True, return heads that are marked as closed as well.
1633 If closed is True, return heads that are marked as closed as well.
1667 '''
1634 '''
1668 if branch is None:
1635 if branch is None:
1669 branch = self[None].branch()
1636 branch = self[None].branch()
1670 branches = self.branchmap()
1637 branches = self.branchmap()
1671 if branch not in branches:
1638 if branch not in branches:
1672 return []
1639 return []
1673 # the cache returns heads ordered lowest to highest
1640 # the cache returns heads ordered lowest to highest
1674 bheads = list(reversed(branches[branch]))
1641 bheads = list(reversed(branches[branch]))
1675 if start is not None:
1642 if start is not None:
1676 # filter out the heads that cannot be reached from startrev
1643 # filter out the heads that cannot be reached from startrev
1677 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1644 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1678 bheads = [h for h in bheads if h in fbheads]
1645 bheads = [h for h in bheads if h in fbheads]
1679 if not closed:
1646 if not closed:
1680 bheads = [h for h in bheads if not self[h].closesbranch()]
1647 bheads = [h for h in bheads if not self[h].closesbranch()]
1681 return bheads
1648 return bheads
1682
1649
1683 def branches(self, nodes):
1650 def branches(self, nodes):
1684 if not nodes:
1651 if not nodes:
1685 nodes = [self.changelog.tip()]
1652 nodes = [self.changelog.tip()]
1686 b = []
1653 b = []
1687 for n in nodes:
1654 for n in nodes:
1688 t = n
1655 t = n
1689 while True:
1656 while True:
1690 p = self.changelog.parents(n)
1657 p = self.changelog.parents(n)
1691 if p[1] != nullid or p[0] == nullid:
1658 if p[1] != nullid or p[0] == nullid:
1692 b.append((t, n, p[0], p[1]))
1659 b.append((t, n, p[0], p[1]))
1693 break
1660 break
1694 n = p[0]
1661 n = p[0]
1695 return b
1662 return b
1696
1663
1697 def between(self, pairs):
1664 def between(self, pairs):
1698 r = []
1665 r = []
1699
1666
1700 for top, bottom in pairs:
1667 for top, bottom in pairs:
1701 n, l, i = top, [], 0
1668 n, l, i = top, [], 0
1702 f = 1
1669 f = 1
1703
1670
1704 while n != bottom and n != nullid:
1671 while n != bottom and n != nullid:
1705 p = self.changelog.parents(n)[0]
1672 p = self.changelog.parents(n)[0]
1706 if i == f:
1673 if i == f:
1707 l.append(n)
1674 l.append(n)
1708 f = f * 2
1675 f = f * 2
1709 n = p
1676 n = p
1710 i += 1
1677 i += 1
1711
1678
1712 r.append(l)
1679 r.append(l)
1713
1680
1714 return r
1681 return r
1715
1682
1716 def pull(self, remote, heads=None, force=False):
1683 def pull(self, remote, heads=None, force=False):
1717 # don't open transaction for nothing or you break future useful
1684 # don't open transaction for nothing or you break future useful
1718 # rollback call
1685 # rollback call
1719 tr = None
1686 tr = None
1720 trname = 'pull\n' + util.hidepassword(remote.url())
1687 trname = 'pull\n' + util.hidepassword(remote.url())
1721 lock = self.lock()
1688 lock = self.lock()
1722 try:
1689 try:
1723 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1690 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1724 force=force)
1691 force=force)
1725 common, fetch, rheads = tmp
1692 common, fetch, rheads = tmp
1726 if not fetch:
1693 if not fetch:
1727 self.ui.status(_("no changes found\n"))
1694 self.ui.status(_("no changes found\n"))
1728 added = []
1695 added = []
1729 result = 0
1696 result = 0
1730 else:
1697 else:
1731 tr = self.transaction(trname)
1698 tr = self.transaction(trname)
1732 if heads is None and list(common) == [nullid]:
1699 if heads is None and list(common) == [nullid]:
1733 self.ui.status(_("requesting all changes\n"))
1700 self.ui.status(_("requesting all changes\n"))
1734 elif heads is None and remote.capable('changegroupsubset'):
1701 elif heads is None and remote.capable('changegroupsubset'):
1735 # issue1320, avoid a race if remote changed after discovery
1702 # issue1320, avoid a race if remote changed after discovery
1736 heads = rheads
1703 heads = rheads
1737
1704
1738 if remote.capable('getbundle'):
1705 if remote.capable('getbundle'):
1739 cg = remote.getbundle('pull', common=common,
1706 cg = remote.getbundle('pull', common=common,
1740 heads=heads or rheads)
1707 heads=heads or rheads)
1741 elif heads is None:
1708 elif heads is None:
1742 cg = remote.changegroup(fetch, 'pull')
1709 cg = remote.changegroup(fetch, 'pull')
1743 elif not remote.capable('changegroupsubset'):
1710 elif not remote.capable('changegroupsubset'):
1744 raise util.Abort(_("partial pull cannot be done because "
1711 raise util.Abort(_("partial pull cannot be done because "
1745 "other repository doesn't support "
1712 "other repository doesn't support "
1746 "changegroupsubset."))
1713 "changegroupsubset."))
1747 else:
1714 else:
1748 cg = remote.changegroupsubset(fetch, heads, 'pull')
1715 cg = remote.changegroupsubset(fetch, heads, 'pull')
1749 clstart = len(self.changelog)
1716 clstart = len(self.changelog)
1750 result = self.addchangegroup(cg, 'pull', remote.url())
1717 result = self.addchangegroup(cg, 'pull', remote.url())
1751 clend = len(self.changelog)
1718 clend = len(self.changelog)
1752 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1719 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1753
1720
1754 # compute target subset
1721 # compute target subset
1755 if heads is None:
1722 if heads is None:
1756 # We pulled every thing possible
1723 # We pulled every thing possible
1757 # sync on everything common
1724 # sync on everything common
1758 subset = common + added
1725 subset = common + added
1759 else:
1726 else:
1760 # We pulled a specific subset
1727 # We pulled a specific subset
1761 # sync on this subset
1728 # sync on this subset
1762 subset = heads
1729 subset = heads
1763
1730
1764 # Get remote phases data from remote
1731 # Get remote phases data from remote
1765 remotephases = remote.listkeys('phases')
1732 remotephases = remote.listkeys('phases')
1766 publishing = bool(remotephases.get('publishing', False))
1733 publishing = bool(remotephases.get('publishing', False))
1767 if remotephases and not publishing:
1734 if remotephases and not publishing:
1768 # remote is new and unpublishing
1735 # remote is new and unpublishing
1769 pheads, _dr = phases.analyzeremotephases(self, subset,
1736 pheads, _dr = phases.analyzeremotephases(self, subset,
1770 remotephases)
1737 remotephases)
1771 phases.advanceboundary(self, phases.public, pheads)
1738 phases.advanceboundary(self, phases.public, pheads)
1772 phases.advanceboundary(self, phases.draft, subset)
1739 phases.advanceboundary(self, phases.draft, subset)
1773 else:
1740 else:
1774 # Remote is old or publishing all common changesets
1741 # Remote is old or publishing all common changesets
1775 # should be seen as public
1742 # should be seen as public
1776 phases.advanceboundary(self, phases.public, subset)
1743 phases.advanceboundary(self, phases.public, subset)
1777
1744
1778 if obsolete._enabled:
1745 if obsolete._enabled:
1779 self.ui.debug('fetching remote obsolete markers\n')
1746 self.ui.debug('fetching remote obsolete markers\n')
1780 remoteobs = remote.listkeys('obsolete')
1747 remoteobs = remote.listkeys('obsolete')
1781 if 'dump0' in remoteobs:
1748 if 'dump0' in remoteobs:
1782 if tr is None:
1749 if tr is None:
1783 tr = self.transaction(trname)
1750 tr = self.transaction(trname)
1784 for key in sorted(remoteobs, reverse=True):
1751 for key in sorted(remoteobs, reverse=True):
1785 if key.startswith('dump'):
1752 if key.startswith('dump'):
1786 data = base85.b85decode(remoteobs[key])
1753 data = base85.b85decode(remoteobs[key])
1787 self.obsstore.mergemarkers(tr, data)
1754 self.obsstore.mergemarkers(tr, data)
1788 self.invalidatevolatilesets()
1755 self.invalidatevolatilesets()
1789 if tr is not None:
1756 if tr is not None:
1790 tr.close()
1757 tr.close()
1791 finally:
1758 finally:
1792 if tr is not None:
1759 if tr is not None:
1793 tr.release()
1760 tr.release()
1794 lock.release()
1761 lock.release()
1795
1762
1796 return result
1763 return result
1797
1764
1798 def checkpush(self, force, revs):
1765 def checkpush(self, force, revs):
1799 """Extensions can override this function if additional checks have
1766 """Extensions can override this function if additional checks have
1800 to be performed before pushing, or call it if they override push
1767 to be performed before pushing, or call it if they override push
1801 command.
1768 command.
1802 """
1769 """
1803 pass
1770 pass
1804
1771
1805 def push(self, remote, force=False, revs=None, newbranch=False):
1772 def push(self, remote, force=False, revs=None, newbranch=False):
1806 '''Push outgoing changesets (limited by revs) from the current
1773 '''Push outgoing changesets (limited by revs) from the current
1807 repository to remote. Return an integer:
1774 repository to remote. Return an integer:
1808 - None means nothing to push
1775 - None means nothing to push
1809 - 0 means HTTP error
1776 - 0 means HTTP error
1810 - 1 means we pushed and remote head count is unchanged *or*
1777 - 1 means we pushed and remote head count is unchanged *or*
1811 we have outgoing changesets but refused to push
1778 we have outgoing changesets but refused to push
1812 - other values as described by addchangegroup()
1779 - other values as described by addchangegroup()
1813 '''
1780 '''
1814 # there are two ways to push to remote repo:
1781 # there are two ways to push to remote repo:
1815 #
1782 #
1816 # addchangegroup assumes local user can lock remote
1783 # addchangegroup assumes local user can lock remote
1817 # repo (local filesystem, old ssh servers).
1784 # repo (local filesystem, old ssh servers).
1818 #
1785 #
1819 # unbundle assumes local user cannot lock remote repo (new ssh
1786 # unbundle assumes local user cannot lock remote repo (new ssh
1820 # servers, http servers).
1787 # servers, http servers).
1821
1788
1822 if not remote.canpush():
1789 if not remote.canpush():
1823 raise util.Abort(_("destination does not support push"))
1790 raise util.Abort(_("destination does not support push"))
1824 unfi = self.unfiltered()
1791 unfi = self.unfiltered()
1825 # get local lock as we might write phase data
1792 # get local lock as we might write phase data
1826 locallock = self.lock()
1793 locallock = self.lock()
1827 try:
1794 try:
1828 self.checkpush(force, revs)
1795 self.checkpush(force, revs)
1829 lock = None
1796 lock = None
1830 unbundle = remote.capable('unbundle')
1797 unbundle = remote.capable('unbundle')
1831 if not unbundle:
1798 if not unbundle:
1832 lock = remote.lock()
1799 lock = remote.lock()
1833 try:
1800 try:
1834 # discovery
1801 # discovery
1835 fci = discovery.findcommonincoming
1802 fci = discovery.findcommonincoming
1836 commoninc = fci(unfi, remote, force=force)
1803 commoninc = fci(unfi, remote, force=force)
1837 common, inc, remoteheads = commoninc
1804 common, inc, remoteheads = commoninc
1838 fco = discovery.findcommonoutgoing
1805 fco = discovery.findcommonoutgoing
1839 outgoing = fco(unfi, remote, onlyheads=revs,
1806 outgoing = fco(unfi, remote, onlyheads=revs,
1840 commoninc=commoninc, force=force)
1807 commoninc=commoninc, force=force)
1841
1808
1842
1809
1843 if not outgoing.missing:
1810 if not outgoing.missing:
1844 # nothing to push
1811 # nothing to push
1845 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1812 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1846 ret = None
1813 ret = None
1847 else:
1814 else:
1848 # something to push
1815 # something to push
1849 if not force:
1816 if not force:
1850 # if self.obsstore == False --> no obsolete
1817 # if self.obsstore == False --> no obsolete
1851 # then, save the iteration
1818 # then, save the iteration
1852 if unfi.obsstore:
1819 if unfi.obsstore:
1853 # this message are here for 80 char limit reason
1820 # this message are here for 80 char limit reason
1854 mso = _("push includes obsolete changeset: %s!")
1821 mso = _("push includes obsolete changeset: %s!")
1855 msu = _("push includes unstable changeset: %s!")
1822 msu = _("push includes unstable changeset: %s!")
1856 msb = _("push includes bumped changeset: %s!")
1823 msb = _("push includes bumped changeset: %s!")
1857 msd = _("push includes divergent changeset: %s!")
1824 msd = _("push includes divergent changeset: %s!")
1858 # If we are to push if there is at least one
1825 # If we are to push if there is at least one
1859 # obsolete or unstable changeset in missing, at
1826 # obsolete or unstable changeset in missing, at
1860 # least one of the missinghead will be obsolete or
1827 # least one of the missinghead will be obsolete or
1861 # unstable. So checking heads only is ok
1828 # unstable. So checking heads only is ok
1862 for node in outgoing.missingheads:
1829 for node in outgoing.missingheads:
1863 ctx = unfi[node]
1830 ctx = unfi[node]
1864 if ctx.obsolete():
1831 if ctx.obsolete():
1865 raise util.Abort(mso % ctx)
1832 raise util.Abort(mso % ctx)
1866 elif ctx.unstable():
1833 elif ctx.unstable():
1867 raise util.Abort(msu % ctx)
1834 raise util.Abort(msu % ctx)
1868 elif ctx.bumped():
1835 elif ctx.bumped():
1869 raise util.Abort(msb % ctx)
1836 raise util.Abort(msb % ctx)
1870 elif ctx.divergent():
1837 elif ctx.divergent():
1871 raise util.Abort(msd % ctx)
1838 raise util.Abort(msd % ctx)
1872 discovery.checkheads(unfi, remote, outgoing,
1839 discovery.checkheads(unfi, remote, outgoing,
1873 remoteheads, newbranch,
1840 remoteheads, newbranch,
1874 bool(inc))
1841 bool(inc))
1875
1842
1876 # create a changegroup from local
1843 # create a changegroup from local
1877 if revs is None and not outgoing.excluded:
1844 if revs is None and not outgoing.excluded:
1878 # push everything,
1845 # push everything,
1879 # use the fast path, no race possible on push
1846 # use the fast path, no race possible on push
1880 cg = self._changegroup(outgoing.missing, 'push')
1847 cg = self._changegroup(outgoing.missing, 'push')
1881 else:
1848 else:
1882 cg = self.getlocalbundle('push', outgoing)
1849 cg = self.getlocalbundle('push', outgoing)
1883
1850
1884 # apply changegroup to remote
1851 # apply changegroup to remote
1885 if unbundle:
1852 if unbundle:
1886 # local repo finds heads on server, finds out what
1853 # local repo finds heads on server, finds out what
1887 # revs it must push. once revs transferred, if server
1854 # revs it must push. once revs transferred, if server
1888 # finds it has different heads (someone else won
1855 # finds it has different heads (someone else won
1889 # commit/push race), server aborts.
1856 # commit/push race), server aborts.
1890 if force:
1857 if force:
1891 remoteheads = ['force']
1858 remoteheads = ['force']
1892 # ssh: return remote's addchangegroup()
1859 # ssh: return remote's addchangegroup()
1893 # http: return remote's addchangegroup() or 0 for error
1860 # http: return remote's addchangegroup() or 0 for error
1894 ret = remote.unbundle(cg, remoteheads, 'push')
1861 ret = remote.unbundle(cg, remoteheads, 'push')
1895 else:
1862 else:
1896 # we return an integer indicating remote head count
1863 # we return an integer indicating remote head count
1897 # change
1864 # change
1898 ret = remote.addchangegroup(cg, 'push', self.url())
1865 ret = remote.addchangegroup(cg, 'push', self.url())
1899
1866
1900 if ret:
1867 if ret:
1901 # push succeed, synchronize target of the push
1868 # push succeed, synchronize target of the push
1902 cheads = outgoing.missingheads
1869 cheads = outgoing.missingheads
1903 elif revs is None:
1870 elif revs is None:
1904 # All out push fails. synchronize all common
1871 # All out push fails. synchronize all common
1905 cheads = outgoing.commonheads
1872 cheads = outgoing.commonheads
1906 else:
1873 else:
1907 # I want cheads = heads(::missingheads and ::commonheads)
1874 # I want cheads = heads(::missingheads and ::commonheads)
1908 # (missingheads is revs with secret changeset filtered out)
1875 # (missingheads is revs with secret changeset filtered out)
1909 #
1876 #
1910 # This can be expressed as:
1877 # This can be expressed as:
1911 # cheads = ( (missingheads and ::commonheads)
1878 # cheads = ( (missingheads and ::commonheads)
1912 # + (commonheads and ::missingheads))"
1879 # + (commonheads and ::missingheads))"
1913 # )
1880 # )
1914 #
1881 #
1915 # while trying to push we already computed the following:
1882 # while trying to push we already computed the following:
1916 # common = (::commonheads)
1883 # common = (::commonheads)
1917 # missing = ((commonheads::missingheads) - commonheads)
1884 # missing = ((commonheads::missingheads) - commonheads)
1918 #
1885 #
1919 # We can pick:
1886 # We can pick:
1920 # * missingheads part of common (::commonheads)
1887 # * missingheads part of common (::commonheads)
1921 common = set(outgoing.common)
1888 common = set(outgoing.common)
1922 cheads = [node for node in revs if node in common]
1889 cheads = [node for node in revs if node in common]
1923 # and
1890 # and
1924 # * commonheads parents on missing
1891 # * commonheads parents on missing
1925 revset = unfi.set('%ln and parents(roots(%ln))',
1892 revset = unfi.set('%ln and parents(roots(%ln))',
1926 outgoing.commonheads,
1893 outgoing.commonheads,
1927 outgoing.missing)
1894 outgoing.missing)
1928 cheads.extend(c.node() for c in revset)
1895 cheads.extend(c.node() for c in revset)
1929 # even when we don't push, exchanging phase data is useful
1896 # even when we don't push, exchanging phase data is useful
1930 remotephases = remote.listkeys('phases')
1897 remotephases = remote.listkeys('phases')
1931 if not remotephases: # old server or public only repo
1898 if not remotephases: # old server or public only repo
1932 phases.advanceboundary(self, phases.public, cheads)
1899 phases.advanceboundary(self, phases.public, cheads)
1933 # don't push any phase data as there is nothing to push
1900 # don't push any phase data as there is nothing to push
1934 else:
1901 else:
1935 ana = phases.analyzeremotephases(self, cheads, remotephases)
1902 ana = phases.analyzeremotephases(self, cheads, remotephases)
1936 pheads, droots = ana
1903 pheads, droots = ana
1937 ### Apply remote phase on local
1904 ### Apply remote phase on local
1938 if remotephases.get('publishing', False):
1905 if remotephases.get('publishing', False):
1939 phases.advanceboundary(self, phases.public, cheads)
1906 phases.advanceboundary(self, phases.public, cheads)
1940 else: # publish = False
1907 else: # publish = False
1941 phases.advanceboundary(self, phases.public, pheads)
1908 phases.advanceboundary(self, phases.public, pheads)
1942 phases.advanceboundary(self, phases.draft, cheads)
1909 phases.advanceboundary(self, phases.draft, cheads)
1943 ### Apply local phase on remote
1910 ### Apply local phase on remote
1944
1911
1945 # Get the list of all revs draft on remote by public here.
1912 # Get the list of all revs draft on remote by public here.
1946 # XXX Beware that revset break if droots is not strictly
1913 # XXX Beware that revset break if droots is not strictly
1947 # XXX root we may want to ensure it is but it is costly
1914 # XXX root we may want to ensure it is but it is costly
1948 outdated = unfi.set('heads((%ln::%ln) and public())',
1915 outdated = unfi.set('heads((%ln::%ln) and public())',
1949 droots, cheads)
1916 droots, cheads)
1950 for newremotehead in outdated:
1917 for newremotehead in outdated:
1951 r = remote.pushkey('phases',
1918 r = remote.pushkey('phases',
1952 newremotehead.hex(),
1919 newremotehead.hex(),
1953 str(phases.draft),
1920 str(phases.draft),
1954 str(phases.public))
1921 str(phases.public))
1955 if not r:
1922 if not r:
1956 self.ui.warn(_('updating %s to public failed!\n')
1923 self.ui.warn(_('updating %s to public failed!\n')
1957 % newremotehead)
1924 % newremotehead)
1958 self.ui.debug('try to push obsolete markers to remote\n')
1925 self.ui.debug('try to push obsolete markers to remote\n')
1959 if (obsolete._enabled and self.obsstore and
1926 if (obsolete._enabled and self.obsstore and
1960 'obsolete' in remote.listkeys('namespaces')):
1927 'obsolete' in remote.listkeys('namespaces')):
1961 rslts = []
1928 rslts = []
1962 remotedata = self.listkeys('obsolete')
1929 remotedata = self.listkeys('obsolete')
1963 for key in sorted(remotedata, reverse=True):
1930 for key in sorted(remotedata, reverse=True):
1964 # reverse sort to ensure we end with dump0
1931 # reverse sort to ensure we end with dump0
1965 data = remotedata[key]
1932 data = remotedata[key]
1966 rslts.append(remote.pushkey('obsolete', key, '', data))
1933 rslts.append(remote.pushkey('obsolete', key, '', data))
1967 if [r for r in rslts if not r]:
1934 if [r for r in rslts if not r]:
1968 msg = _('failed to push some obsolete markers!\n')
1935 msg = _('failed to push some obsolete markers!\n')
1969 self.ui.warn(msg)
1936 self.ui.warn(msg)
1970 finally:
1937 finally:
1971 if lock is not None:
1938 if lock is not None:
1972 lock.release()
1939 lock.release()
1973 finally:
1940 finally:
1974 locallock.release()
1941 locallock.release()
1975
1942
1976 self.ui.debug("checking for updated bookmarks\n")
1943 self.ui.debug("checking for updated bookmarks\n")
1977 rb = remote.listkeys('bookmarks')
1944 rb = remote.listkeys('bookmarks')
1978 for k in rb.keys():
1945 for k in rb.keys():
1979 if k in unfi._bookmarks:
1946 if k in unfi._bookmarks:
1980 nr, nl = rb[k], hex(self._bookmarks[k])
1947 nr, nl = rb[k], hex(self._bookmarks[k])
1981 if nr in unfi:
1948 if nr in unfi:
1982 cr = unfi[nr]
1949 cr = unfi[nr]
1983 cl = unfi[nl]
1950 cl = unfi[nl]
1984 if bookmarks.validdest(unfi, cr, cl):
1951 if bookmarks.validdest(unfi, cr, cl):
1985 r = remote.pushkey('bookmarks', k, nr, nl)
1952 r = remote.pushkey('bookmarks', k, nr, nl)
1986 if r:
1953 if r:
1987 self.ui.status(_("updating bookmark %s\n") % k)
1954 self.ui.status(_("updating bookmark %s\n") % k)
1988 else:
1955 else:
1989 self.ui.warn(_('updating bookmark %s'
1956 self.ui.warn(_('updating bookmark %s'
1990 ' failed!\n') % k)
1957 ' failed!\n') % k)
1991
1958
1992 return ret
1959 return ret
1993
1960
1994 def changegroupinfo(self, nodes, source):
1961 def changegroupinfo(self, nodes, source):
1995 if self.ui.verbose or source == 'bundle':
1962 if self.ui.verbose or source == 'bundle':
1996 self.ui.status(_("%d changesets found\n") % len(nodes))
1963 self.ui.status(_("%d changesets found\n") % len(nodes))
1997 if self.ui.debugflag:
1964 if self.ui.debugflag:
1998 self.ui.debug("list of changesets:\n")
1965 self.ui.debug("list of changesets:\n")
1999 for node in nodes:
1966 for node in nodes:
2000 self.ui.debug("%s\n" % hex(node))
1967 self.ui.debug("%s\n" % hex(node))
2001
1968
2002 def changegroupsubset(self, bases, heads, source):
1969 def changegroupsubset(self, bases, heads, source):
2003 """Compute a changegroup consisting of all the nodes that are
1970 """Compute a changegroup consisting of all the nodes that are
2004 descendants of any of the bases and ancestors of any of the heads.
1971 descendants of any of the bases and ancestors of any of the heads.
2005 Return a chunkbuffer object whose read() method will return
1972 Return a chunkbuffer object whose read() method will return
2006 successive changegroup chunks.
1973 successive changegroup chunks.
2007
1974
2008 It is fairly complex as determining which filenodes and which
1975 It is fairly complex as determining which filenodes and which
2009 manifest nodes need to be included for the changeset to be complete
1976 manifest nodes need to be included for the changeset to be complete
2010 is non-trivial.
1977 is non-trivial.
2011
1978
2012 Another wrinkle is doing the reverse, figuring out which changeset in
1979 Another wrinkle is doing the reverse, figuring out which changeset in
2013 the changegroup a particular filenode or manifestnode belongs to.
1980 the changegroup a particular filenode or manifestnode belongs to.
2014 """
1981 """
2015 cl = self.changelog
1982 cl = self.changelog
2016 if not bases:
1983 if not bases:
2017 bases = [nullid]
1984 bases = [nullid]
2018 csets, bases, heads = cl.nodesbetween(bases, heads)
1985 csets, bases, heads = cl.nodesbetween(bases, heads)
2019 # We assume that all ancestors of bases are known
1986 # We assume that all ancestors of bases are known
2020 common = cl.ancestors([cl.rev(n) for n in bases])
1987 common = cl.ancestors([cl.rev(n) for n in bases])
2021 return self._changegroupsubset(common, csets, heads, source)
1988 return self._changegroupsubset(common, csets, heads, source)
2022
1989
2023 def getlocalbundle(self, source, outgoing):
1990 def getlocalbundle(self, source, outgoing):
2024 """Like getbundle, but taking a discovery.outgoing as an argument.
1991 """Like getbundle, but taking a discovery.outgoing as an argument.
2025
1992
2026 This is only implemented for local repos and reuses potentially
1993 This is only implemented for local repos and reuses potentially
2027 precomputed sets in outgoing."""
1994 precomputed sets in outgoing."""
2028 if not outgoing.missing:
1995 if not outgoing.missing:
2029 return None
1996 return None
2030 return self._changegroupsubset(outgoing.common,
1997 return self._changegroupsubset(outgoing.common,
2031 outgoing.missing,
1998 outgoing.missing,
2032 outgoing.missingheads,
1999 outgoing.missingheads,
2033 source)
2000 source)
2034
2001
2035 def getbundle(self, source, heads=None, common=None):
2002 def getbundle(self, source, heads=None, common=None):
2036 """Like changegroupsubset, but returns the set difference between the
2003 """Like changegroupsubset, but returns the set difference between the
2037 ancestors of heads and the ancestors common.
2004 ancestors of heads and the ancestors common.
2038
2005
2039 If heads is None, use the local heads. If common is None, use [nullid].
2006 If heads is None, use the local heads. If common is None, use [nullid].
2040
2007
2041 The nodes in common might not all be known locally due to the way the
2008 The nodes in common might not all be known locally due to the way the
2042 current discovery protocol works.
2009 current discovery protocol works.
2043 """
2010 """
2044 cl = self.changelog
2011 cl = self.changelog
2045 if common:
2012 if common:
2046 hasnode = cl.hasnode
2013 hasnode = cl.hasnode
2047 common = [n for n in common if hasnode(n)]
2014 common = [n for n in common if hasnode(n)]
2048 else:
2015 else:
2049 common = [nullid]
2016 common = [nullid]
2050 if not heads:
2017 if not heads:
2051 heads = cl.heads()
2018 heads = cl.heads()
2052 return self.getlocalbundle(source,
2019 return self.getlocalbundle(source,
2053 discovery.outgoing(cl, common, heads))
2020 discovery.outgoing(cl, common, heads))
2054
2021
2055 @unfilteredmethod
2022 @unfilteredmethod
2056 def _changegroupsubset(self, commonrevs, csets, heads, source):
2023 def _changegroupsubset(self, commonrevs, csets, heads, source):
2057
2024
2058 cl = self.changelog
2025 cl = self.changelog
2059 mf = self.manifest
2026 mf = self.manifest
2060 mfs = {} # needed manifests
2027 mfs = {} # needed manifests
2061 fnodes = {} # needed file nodes
2028 fnodes = {} # needed file nodes
2062 changedfiles = set()
2029 changedfiles = set()
2063 fstate = ['', {}]
2030 fstate = ['', {}]
2064 count = [0, 0]
2031 count = [0, 0]
2065
2032
2066 # can we go through the fast path ?
2033 # can we go through the fast path ?
2067 heads.sort()
2034 heads.sort()
2068 if heads == sorted(self.heads()):
2035 if heads == sorted(self.heads()):
2069 return self._changegroup(csets, source)
2036 return self._changegroup(csets, source)
2070
2037
2071 # slow path
2038 # slow path
2072 self.hook('preoutgoing', throw=True, source=source)
2039 self.hook('preoutgoing', throw=True, source=source)
2073 self.changegroupinfo(csets, source)
2040 self.changegroupinfo(csets, source)
2074
2041
2075 # filter any nodes that claim to be part of the known set
2042 # filter any nodes that claim to be part of the known set
2076 def prune(revlog, missing):
2043 def prune(revlog, missing):
2077 rr, rl = revlog.rev, revlog.linkrev
2044 rr, rl = revlog.rev, revlog.linkrev
2078 return [n for n in missing
2045 return [n for n in missing
2079 if rl(rr(n)) not in commonrevs]
2046 if rl(rr(n)) not in commonrevs]
2080
2047
2081 progress = self.ui.progress
2048 progress = self.ui.progress
2082 _bundling = _('bundling')
2049 _bundling = _('bundling')
2083 _changesets = _('changesets')
2050 _changesets = _('changesets')
2084 _manifests = _('manifests')
2051 _manifests = _('manifests')
2085 _files = _('files')
2052 _files = _('files')
2086
2053
2087 def lookup(revlog, x):
2054 def lookup(revlog, x):
2088 if revlog == cl:
2055 if revlog == cl:
2089 c = cl.read(x)
2056 c = cl.read(x)
2090 changedfiles.update(c[3])
2057 changedfiles.update(c[3])
2091 mfs.setdefault(c[0], x)
2058 mfs.setdefault(c[0], x)
2092 count[0] += 1
2059 count[0] += 1
2093 progress(_bundling, count[0],
2060 progress(_bundling, count[0],
2094 unit=_changesets, total=count[1])
2061 unit=_changesets, total=count[1])
2095 return x
2062 return x
2096 elif revlog == mf:
2063 elif revlog == mf:
2097 clnode = mfs[x]
2064 clnode = mfs[x]
2098 mdata = mf.readfast(x)
2065 mdata = mf.readfast(x)
2099 for f, n in mdata.iteritems():
2066 for f, n in mdata.iteritems():
2100 if f in changedfiles:
2067 if f in changedfiles:
2101 fnodes[f].setdefault(n, clnode)
2068 fnodes[f].setdefault(n, clnode)
2102 count[0] += 1
2069 count[0] += 1
2103 progress(_bundling, count[0],
2070 progress(_bundling, count[0],
2104 unit=_manifests, total=count[1])
2071 unit=_manifests, total=count[1])
2105 return clnode
2072 return clnode
2106 else:
2073 else:
2107 progress(_bundling, count[0], item=fstate[0],
2074 progress(_bundling, count[0], item=fstate[0],
2108 unit=_files, total=count[1])
2075 unit=_files, total=count[1])
2109 return fstate[1][x]
2076 return fstate[1][x]
2110
2077
2111 bundler = changegroup.bundle10(lookup)
2078 bundler = changegroup.bundle10(lookup)
2112 reorder = self.ui.config('bundle', 'reorder', 'auto')
2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2113 if reorder == 'auto':
2080 if reorder == 'auto':
2114 reorder = None
2081 reorder = None
2115 else:
2082 else:
2116 reorder = util.parsebool(reorder)
2083 reorder = util.parsebool(reorder)
2117
2084
2118 def gengroup():
2085 def gengroup():
2119 # Create a changenode group generator that will call our functions
2086 # Create a changenode group generator that will call our functions
2120 # back to lookup the owning changenode and collect information.
2087 # back to lookup the owning changenode and collect information.
2121 count[:] = [0, len(csets)]
2088 count[:] = [0, len(csets)]
2122 for chunk in cl.group(csets, bundler, reorder=reorder):
2089 for chunk in cl.group(csets, bundler, reorder=reorder):
2123 yield chunk
2090 yield chunk
2124 progress(_bundling, None)
2091 progress(_bundling, None)
2125
2092
2126 # Create a generator for the manifestnodes that calls our lookup
2093 # Create a generator for the manifestnodes that calls our lookup
2127 # and data collection functions back.
2094 # and data collection functions back.
2128 for f in changedfiles:
2095 for f in changedfiles:
2129 fnodes[f] = {}
2096 fnodes[f] = {}
2130 count[:] = [0, len(mfs)]
2097 count[:] = [0, len(mfs)]
2131 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2098 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2132 yield chunk
2099 yield chunk
2133 progress(_bundling, None)
2100 progress(_bundling, None)
2134
2101
2135 mfs.clear()
2102 mfs.clear()
2136
2103
2137 # Go through all our files in order sorted by name.
2104 # Go through all our files in order sorted by name.
2138 count[:] = [0, len(changedfiles)]
2105 count[:] = [0, len(changedfiles)]
2139 for fname in sorted(changedfiles):
2106 for fname in sorted(changedfiles):
2140 filerevlog = self.file(fname)
2107 filerevlog = self.file(fname)
2141 if not len(filerevlog):
2108 if not len(filerevlog):
2142 raise util.Abort(_("empty or missing revlog for %s")
2109 raise util.Abort(_("empty or missing revlog for %s")
2143 % fname)
2110 % fname)
2144 fstate[0] = fname
2111 fstate[0] = fname
2145 fstate[1] = fnodes.pop(fname, {})
2112 fstate[1] = fnodes.pop(fname, {})
2146
2113
2147 nodelist = prune(filerevlog, fstate[1])
2114 nodelist = prune(filerevlog, fstate[1])
2148 if nodelist:
2115 if nodelist:
2149 count[0] += 1
2116 count[0] += 1
2150 yield bundler.fileheader(fname)
2117 yield bundler.fileheader(fname)
2151 for chunk in filerevlog.group(nodelist, bundler, reorder):
2118 for chunk in filerevlog.group(nodelist, bundler, reorder):
2152 yield chunk
2119 yield chunk
2153
2120
2154 # Signal that no more groups are left.
2121 # Signal that no more groups are left.
2155 yield bundler.close()
2122 yield bundler.close()
2156 progress(_bundling, None)
2123 progress(_bundling, None)
2157
2124
2158 if csets:
2125 if csets:
2159 self.hook('outgoing', node=hex(csets[0]), source=source)
2126 self.hook('outgoing', node=hex(csets[0]), source=source)
2160
2127
2161 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2128 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2162
2129
2163 def changegroup(self, basenodes, source):
2130 def changegroup(self, basenodes, source):
2164 # to avoid a race we use changegroupsubset() (issue1320)
2131 # to avoid a race we use changegroupsubset() (issue1320)
2165 return self.changegroupsubset(basenodes, self.heads(), source)
2132 return self.changegroupsubset(basenodes, self.heads(), source)
2166
2133
2167 @unfilteredmethod
2134 @unfilteredmethod
2168 def _changegroup(self, nodes, source):
2135 def _changegroup(self, nodes, source):
2169 """Compute the changegroup of all nodes that we have that a recipient
2136 """Compute the changegroup of all nodes that we have that a recipient
2170 doesn't. Return a chunkbuffer object whose read() method will return
2137 doesn't. Return a chunkbuffer object whose read() method will return
2171 successive changegroup chunks.
2138 successive changegroup chunks.
2172
2139
2173 This is much easier than the previous function as we can assume that
2140 This is much easier than the previous function as we can assume that
2174 the recipient has any changenode we aren't sending them.
2141 the recipient has any changenode we aren't sending them.
2175
2142
2176 nodes is the set of nodes to send"""
2143 nodes is the set of nodes to send"""
2177
2144
2178 cl = self.changelog
2145 cl = self.changelog
2179 mf = self.manifest
2146 mf = self.manifest
2180 mfs = {}
2147 mfs = {}
2181 changedfiles = set()
2148 changedfiles = set()
2182 fstate = ['']
2149 fstate = ['']
2183 count = [0, 0]
2150 count = [0, 0]
2184
2151
2185 self.hook('preoutgoing', throw=True, source=source)
2152 self.hook('preoutgoing', throw=True, source=source)
2186 self.changegroupinfo(nodes, source)
2153 self.changegroupinfo(nodes, source)
2187
2154
2188 revset = set([cl.rev(n) for n in nodes])
2155 revset = set([cl.rev(n) for n in nodes])
2189
2156
2190 def gennodelst(log):
2157 def gennodelst(log):
2191 ln, llr = log.node, log.linkrev
2158 ln, llr = log.node, log.linkrev
2192 return [ln(r) for r in log if llr(r) in revset]
2159 return [ln(r) for r in log if llr(r) in revset]
2193
2160
2194 progress = self.ui.progress
2161 progress = self.ui.progress
2195 _bundling = _('bundling')
2162 _bundling = _('bundling')
2196 _changesets = _('changesets')
2163 _changesets = _('changesets')
2197 _manifests = _('manifests')
2164 _manifests = _('manifests')
2198 _files = _('files')
2165 _files = _('files')
2199
2166
2200 def lookup(revlog, x):
2167 def lookup(revlog, x):
2201 if revlog == cl:
2168 if revlog == cl:
2202 c = cl.read(x)
2169 c = cl.read(x)
2203 changedfiles.update(c[3])
2170 changedfiles.update(c[3])
2204 mfs.setdefault(c[0], x)
2171 mfs.setdefault(c[0], x)
2205 count[0] += 1
2172 count[0] += 1
2206 progress(_bundling, count[0],
2173 progress(_bundling, count[0],
2207 unit=_changesets, total=count[1])
2174 unit=_changesets, total=count[1])
2208 return x
2175 return x
2209 elif revlog == mf:
2176 elif revlog == mf:
2210 count[0] += 1
2177 count[0] += 1
2211 progress(_bundling, count[0],
2178 progress(_bundling, count[0],
2212 unit=_manifests, total=count[1])
2179 unit=_manifests, total=count[1])
2213 return cl.node(revlog.linkrev(revlog.rev(x)))
2180 return cl.node(revlog.linkrev(revlog.rev(x)))
2214 else:
2181 else:
2215 progress(_bundling, count[0], item=fstate[0],
2182 progress(_bundling, count[0], item=fstate[0],
2216 total=count[1], unit=_files)
2183 total=count[1], unit=_files)
2217 return cl.node(revlog.linkrev(revlog.rev(x)))
2184 return cl.node(revlog.linkrev(revlog.rev(x)))
2218
2185
2219 bundler = changegroup.bundle10(lookup)
2186 bundler = changegroup.bundle10(lookup)
2220 reorder = self.ui.config('bundle', 'reorder', 'auto')
2187 reorder = self.ui.config('bundle', 'reorder', 'auto')
2221 if reorder == 'auto':
2188 if reorder == 'auto':
2222 reorder = None
2189 reorder = None
2223 else:
2190 else:
2224 reorder = util.parsebool(reorder)
2191 reorder = util.parsebool(reorder)
2225
2192
2226 def gengroup():
2193 def gengroup():
2227 '''yield a sequence of changegroup chunks (strings)'''
2194 '''yield a sequence of changegroup chunks (strings)'''
2228 # construct a list of all changed files
2195 # construct a list of all changed files
2229
2196
2230 count[:] = [0, len(nodes)]
2197 count[:] = [0, len(nodes)]
2231 for chunk in cl.group(nodes, bundler, reorder=reorder):
2198 for chunk in cl.group(nodes, bundler, reorder=reorder):
2232 yield chunk
2199 yield chunk
2233 progress(_bundling, None)
2200 progress(_bundling, None)
2234
2201
2235 count[:] = [0, len(mfs)]
2202 count[:] = [0, len(mfs)]
2236 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2203 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2237 yield chunk
2204 yield chunk
2238 progress(_bundling, None)
2205 progress(_bundling, None)
2239
2206
2240 count[:] = [0, len(changedfiles)]
2207 count[:] = [0, len(changedfiles)]
2241 for fname in sorted(changedfiles):
2208 for fname in sorted(changedfiles):
2242 filerevlog = self.file(fname)
2209 filerevlog = self.file(fname)
2243 if not len(filerevlog):
2210 if not len(filerevlog):
2244 raise util.Abort(_("empty or missing revlog for %s")
2211 raise util.Abort(_("empty or missing revlog for %s")
2245 % fname)
2212 % fname)
2246 fstate[0] = fname
2213 fstate[0] = fname
2247 nodelist = gennodelst(filerevlog)
2214 nodelist = gennodelst(filerevlog)
2248 if nodelist:
2215 if nodelist:
2249 count[0] += 1
2216 count[0] += 1
2250 yield bundler.fileheader(fname)
2217 yield bundler.fileheader(fname)
2251 for chunk in filerevlog.group(nodelist, bundler, reorder):
2218 for chunk in filerevlog.group(nodelist, bundler, reorder):
2252 yield chunk
2219 yield chunk
2253 yield bundler.close()
2220 yield bundler.close()
2254 progress(_bundling, None)
2221 progress(_bundling, None)
2255
2222
2256 if nodes:
2223 if nodes:
2257 self.hook('outgoing', node=hex(nodes[0]), source=source)
2224 self.hook('outgoing', node=hex(nodes[0]), source=source)
2258
2225
2259 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2226 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2260
2227
2261 @unfilteredmethod
2228 @unfilteredmethod
2262 def addchangegroup(self, source, srctype, url, emptyok=False):
2229 def addchangegroup(self, source, srctype, url, emptyok=False):
2263 """Add the changegroup returned by source.read() to this repo.
2230 """Add the changegroup returned by source.read() to this repo.
2264 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2231 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2265 the URL of the repo where this changegroup is coming from.
2232 the URL of the repo where this changegroup is coming from.
2266
2233
2267 Return an integer summarizing the change to this repo:
2234 Return an integer summarizing the change to this repo:
2268 - nothing changed or no source: 0
2235 - nothing changed or no source: 0
2269 - more heads than before: 1+added heads (2..n)
2236 - more heads than before: 1+added heads (2..n)
2270 - fewer heads than before: -1-removed heads (-2..-n)
2237 - fewer heads than before: -1-removed heads (-2..-n)
2271 - number of heads stays the same: 1
2238 - number of heads stays the same: 1
2272 """
2239 """
2273 def csmap(x):
2240 def csmap(x):
2274 self.ui.debug("add changeset %s\n" % short(x))
2241 self.ui.debug("add changeset %s\n" % short(x))
2275 return len(cl)
2242 return len(cl)
2276
2243
2277 def revmap(x):
2244 def revmap(x):
2278 return cl.rev(x)
2245 return cl.rev(x)
2279
2246
2280 if not source:
2247 if not source:
2281 return 0
2248 return 0
2282
2249
2283 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2250 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2284
2251
2285 changesets = files = revisions = 0
2252 changesets = files = revisions = 0
2286 efiles = set()
2253 efiles = set()
2287
2254
2288 # write changelog data to temp files so concurrent readers will not see
2255 # write changelog data to temp files so concurrent readers will not see
2289 # inconsistent view
2256 # inconsistent view
2290 cl = self.changelog
2257 cl = self.changelog
2291 cl.delayupdate()
2258 cl.delayupdate()
2292 oldheads = cl.heads()
2259 oldheads = cl.heads()
2293
2260
2294 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2261 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2295 try:
2262 try:
2296 trp = weakref.proxy(tr)
2263 trp = weakref.proxy(tr)
2297 # pull off the changeset group
2264 # pull off the changeset group
2298 self.ui.status(_("adding changesets\n"))
2265 self.ui.status(_("adding changesets\n"))
2299 clstart = len(cl)
2266 clstart = len(cl)
2300 class prog(object):
2267 class prog(object):
2301 step = _('changesets')
2268 step = _('changesets')
2302 count = 1
2269 count = 1
2303 ui = self.ui
2270 ui = self.ui
2304 total = None
2271 total = None
2305 def __call__(self):
2272 def __call__(self):
2306 self.ui.progress(self.step, self.count, unit=_('chunks'),
2273 self.ui.progress(self.step, self.count, unit=_('chunks'),
2307 total=self.total)
2274 total=self.total)
2308 self.count += 1
2275 self.count += 1
2309 pr = prog()
2276 pr = prog()
2310 source.callback = pr
2277 source.callback = pr
2311
2278
2312 source.changelogheader()
2279 source.changelogheader()
2313 srccontent = cl.addgroup(source, csmap, trp)
2280 srccontent = cl.addgroup(source, csmap, trp)
2314 if not (srccontent or emptyok):
2281 if not (srccontent or emptyok):
2315 raise util.Abort(_("received changelog group is empty"))
2282 raise util.Abort(_("received changelog group is empty"))
2316 clend = len(cl)
2283 clend = len(cl)
2317 changesets = clend - clstart
2284 changesets = clend - clstart
2318 for c in xrange(clstart, clend):
2285 for c in xrange(clstart, clend):
2319 efiles.update(self[c].files())
2286 efiles.update(self[c].files())
2320 efiles = len(efiles)
2287 efiles = len(efiles)
2321 self.ui.progress(_('changesets'), None)
2288 self.ui.progress(_('changesets'), None)
2322
2289
2323 # pull off the manifest group
2290 # pull off the manifest group
2324 self.ui.status(_("adding manifests\n"))
2291 self.ui.status(_("adding manifests\n"))
2325 pr.step = _('manifests')
2292 pr.step = _('manifests')
2326 pr.count = 1
2293 pr.count = 1
2327 pr.total = changesets # manifests <= changesets
2294 pr.total = changesets # manifests <= changesets
2328 # no need to check for empty manifest group here:
2295 # no need to check for empty manifest group here:
2329 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2296 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2330 # no new manifest will be created and the manifest group will
2297 # no new manifest will be created and the manifest group will
2331 # be empty during the pull
2298 # be empty during the pull
2332 source.manifestheader()
2299 source.manifestheader()
2333 self.manifest.addgroup(source, revmap, trp)
2300 self.manifest.addgroup(source, revmap, trp)
2334 self.ui.progress(_('manifests'), None)
2301 self.ui.progress(_('manifests'), None)
2335
2302
2336 needfiles = {}
2303 needfiles = {}
2337 if self.ui.configbool('server', 'validate', default=False):
2304 if self.ui.configbool('server', 'validate', default=False):
2338 # validate incoming csets have their manifests
2305 # validate incoming csets have their manifests
2339 for cset in xrange(clstart, clend):
2306 for cset in xrange(clstart, clend):
2340 mfest = self.changelog.read(self.changelog.node(cset))[0]
2307 mfest = self.changelog.read(self.changelog.node(cset))[0]
2341 mfest = self.manifest.readdelta(mfest)
2308 mfest = self.manifest.readdelta(mfest)
2342 # store file nodes we must see
2309 # store file nodes we must see
2343 for f, n in mfest.iteritems():
2310 for f, n in mfest.iteritems():
2344 needfiles.setdefault(f, set()).add(n)
2311 needfiles.setdefault(f, set()).add(n)
2345
2312
2346 # process the files
2313 # process the files
2347 self.ui.status(_("adding file changes\n"))
2314 self.ui.status(_("adding file changes\n"))
2348 pr.step = _('files')
2315 pr.step = _('files')
2349 pr.count = 1
2316 pr.count = 1
2350 pr.total = efiles
2317 pr.total = efiles
2351 source.callback = None
2318 source.callback = None
2352
2319
2353 while True:
2320 while True:
2354 chunkdata = source.filelogheader()
2321 chunkdata = source.filelogheader()
2355 if not chunkdata:
2322 if not chunkdata:
2356 break
2323 break
2357 f = chunkdata["filename"]
2324 f = chunkdata["filename"]
2358 self.ui.debug("adding %s revisions\n" % f)
2325 self.ui.debug("adding %s revisions\n" % f)
2359 pr()
2326 pr()
2360 fl = self.file(f)
2327 fl = self.file(f)
2361 o = len(fl)
2328 o = len(fl)
2362 if not fl.addgroup(source, revmap, trp):
2329 if not fl.addgroup(source, revmap, trp):
2363 raise util.Abort(_("received file revlog group is empty"))
2330 raise util.Abort(_("received file revlog group is empty"))
2364 revisions += len(fl) - o
2331 revisions += len(fl) - o
2365 files += 1
2332 files += 1
2366 if f in needfiles:
2333 if f in needfiles:
2367 needs = needfiles[f]
2334 needs = needfiles[f]
2368 for new in xrange(o, len(fl)):
2335 for new in xrange(o, len(fl)):
2369 n = fl.node(new)
2336 n = fl.node(new)
2370 if n in needs:
2337 if n in needs:
2371 needs.remove(n)
2338 needs.remove(n)
2372 if not needs:
2339 if not needs:
2373 del needfiles[f]
2340 del needfiles[f]
2374 self.ui.progress(_('files'), None)
2341 self.ui.progress(_('files'), None)
2375
2342
2376 for f, needs in needfiles.iteritems():
2343 for f, needs in needfiles.iteritems():
2377 fl = self.file(f)
2344 fl = self.file(f)
2378 for n in needs:
2345 for n in needs:
2379 try:
2346 try:
2380 fl.rev(n)
2347 fl.rev(n)
2381 except error.LookupError:
2348 except error.LookupError:
2382 raise util.Abort(
2349 raise util.Abort(
2383 _('missing file data for %s:%s - run hg verify') %
2350 _('missing file data for %s:%s - run hg verify') %
2384 (f, hex(n)))
2351 (f, hex(n)))
2385
2352
2386 dh = 0
2353 dh = 0
2387 if oldheads:
2354 if oldheads:
2388 heads = cl.heads()
2355 heads = cl.heads()
2389 dh = len(heads) - len(oldheads)
2356 dh = len(heads) - len(oldheads)
2390 for h in heads:
2357 for h in heads:
2391 if h not in oldheads and self[h].closesbranch():
2358 if h not in oldheads and self[h].closesbranch():
2392 dh -= 1
2359 dh -= 1
2393 htext = ""
2360 htext = ""
2394 if dh:
2361 if dh:
2395 htext = _(" (%+d heads)") % dh
2362 htext = _(" (%+d heads)") % dh
2396
2363
2397 self.ui.status(_("added %d changesets"
2364 self.ui.status(_("added %d changesets"
2398 " with %d changes to %d files%s\n")
2365 " with %d changes to %d files%s\n")
2399 % (changesets, revisions, files, htext))
2366 % (changesets, revisions, files, htext))
2400 self.invalidatevolatilesets()
2367 self.invalidatevolatilesets()
2401
2368
2402 if changesets > 0:
2369 if changesets > 0:
2403 p = lambda: cl.writepending() and self.root or ""
2370 p = lambda: cl.writepending() and self.root or ""
2404 self.hook('pretxnchangegroup', throw=True,
2371 self.hook('pretxnchangegroup', throw=True,
2405 node=hex(cl.node(clstart)), source=srctype,
2372 node=hex(cl.node(clstart)), source=srctype,
2406 url=url, pending=p)
2373 url=url, pending=p)
2407
2374
2408 added = [cl.node(r) for r in xrange(clstart, clend)]
2375 added = [cl.node(r) for r in xrange(clstart, clend)]
2409 publishing = self.ui.configbool('phases', 'publish', True)
2376 publishing = self.ui.configbool('phases', 'publish', True)
2410 if srctype == 'push':
2377 if srctype == 'push':
2411 # Old server can not push the boundary themself.
2378 # Old server can not push the boundary themself.
2412 # New server won't push the boundary if changeset already
2379 # New server won't push the boundary if changeset already
2413 # existed locally as secrete
2380 # existed locally as secrete
2414 #
2381 #
2415 # We should not use added here but the list of all change in
2382 # We should not use added here but the list of all change in
2416 # the bundle
2383 # the bundle
2417 if publishing:
2384 if publishing:
2418 phases.advanceboundary(self, phases.public, srccontent)
2385 phases.advanceboundary(self, phases.public, srccontent)
2419 else:
2386 else:
2420 phases.advanceboundary(self, phases.draft, srccontent)
2387 phases.advanceboundary(self, phases.draft, srccontent)
2421 phases.retractboundary(self, phases.draft, added)
2388 phases.retractboundary(self, phases.draft, added)
2422 elif srctype != 'strip':
2389 elif srctype != 'strip':
2423 # publishing only alter behavior during push
2390 # publishing only alter behavior during push
2424 #
2391 #
2425 # strip should not touch boundary at all
2392 # strip should not touch boundary at all
2426 phases.retractboundary(self, phases.draft, added)
2393 phases.retractboundary(self, phases.draft, added)
2427
2394
2428 # make changelog see real files again
2395 # make changelog see real files again
2429 cl.finalize(trp)
2396 cl.finalize(trp)
2430
2397
2431 tr.close()
2398 tr.close()
2432
2399
2433 if changesets > 0:
2400 if changesets > 0:
2434 self.updatebranchcache()
2401 branchmap.updatecache(self)
2435 def runhooks():
2402 def runhooks():
2436 # forcefully update the on-disk branch cache
2403 # forcefully update the on-disk branch cache
2437 self.ui.debug("updating the branch cache\n")
2404 self.ui.debug("updating the branch cache\n")
2438 self.hook("changegroup", node=hex(cl.node(clstart)),
2405 self.hook("changegroup", node=hex(cl.node(clstart)),
2439 source=srctype, url=url)
2406 source=srctype, url=url)
2440
2407
2441 for n in added:
2408 for n in added:
2442 self.hook("incoming", node=hex(n), source=srctype,
2409 self.hook("incoming", node=hex(n), source=srctype,
2443 url=url)
2410 url=url)
2444 self._afterlock(runhooks)
2411 self._afterlock(runhooks)
2445
2412
2446 finally:
2413 finally:
2447 tr.release()
2414 tr.release()
2448 # never return 0 here:
2415 # never return 0 here:
2449 if dh < 0:
2416 if dh < 0:
2450 return dh - 1
2417 return dh - 1
2451 else:
2418 else:
2452 return dh + 1
2419 return dh + 1
2453
2420
2454 def stream_in(self, remote, requirements):
2421 def stream_in(self, remote, requirements):
2455 lock = self.lock()
2422 lock = self.lock()
2456 try:
2423 try:
2457 # Save remote branchmap. We will use it later
2424 # Save remote branchmap. We will use it later
2458 # to speed up branchcache creation
2425 # to speed up branchcache creation
2459 rbranchmap = None
2426 rbranchmap = None
2460 if remote.capable("branchmap"):
2427 if remote.capable("branchmap"):
2461 rbranchmap = remote.branchmap()
2428 rbranchmap = remote.branchmap()
2462
2429
2463 fp = remote.stream_out()
2430 fp = remote.stream_out()
2464 l = fp.readline()
2431 l = fp.readline()
2465 try:
2432 try:
2466 resp = int(l)
2433 resp = int(l)
2467 except ValueError:
2434 except ValueError:
2468 raise error.ResponseError(
2435 raise error.ResponseError(
2469 _('unexpected response from remote server:'), l)
2436 _('unexpected response from remote server:'), l)
2470 if resp == 1:
2437 if resp == 1:
2471 raise util.Abort(_('operation forbidden by server'))
2438 raise util.Abort(_('operation forbidden by server'))
2472 elif resp == 2:
2439 elif resp == 2:
2473 raise util.Abort(_('locking the remote repository failed'))
2440 raise util.Abort(_('locking the remote repository failed'))
2474 elif resp != 0:
2441 elif resp != 0:
2475 raise util.Abort(_('the server sent an unknown error code'))
2442 raise util.Abort(_('the server sent an unknown error code'))
2476 self.ui.status(_('streaming all changes\n'))
2443 self.ui.status(_('streaming all changes\n'))
2477 l = fp.readline()
2444 l = fp.readline()
2478 try:
2445 try:
2479 total_files, total_bytes = map(int, l.split(' ', 1))
2446 total_files, total_bytes = map(int, l.split(' ', 1))
2480 except (ValueError, TypeError):
2447 except (ValueError, TypeError):
2481 raise error.ResponseError(
2448 raise error.ResponseError(
2482 _('unexpected response from remote server:'), l)
2449 _('unexpected response from remote server:'), l)
2483 self.ui.status(_('%d files to transfer, %s of data\n') %
2450 self.ui.status(_('%d files to transfer, %s of data\n') %
2484 (total_files, util.bytecount(total_bytes)))
2451 (total_files, util.bytecount(total_bytes)))
2485 handled_bytes = 0
2452 handled_bytes = 0
2486 self.ui.progress(_('clone'), 0, total=total_bytes)
2453 self.ui.progress(_('clone'), 0, total=total_bytes)
2487 start = time.time()
2454 start = time.time()
2488 for i in xrange(total_files):
2455 for i in xrange(total_files):
2489 # XXX doesn't support '\n' or '\r' in filenames
2456 # XXX doesn't support '\n' or '\r' in filenames
2490 l = fp.readline()
2457 l = fp.readline()
2491 try:
2458 try:
2492 name, size = l.split('\0', 1)
2459 name, size = l.split('\0', 1)
2493 size = int(size)
2460 size = int(size)
2494 except (ValueError, TypeError):
2461 except (ValueError, TypeError):
2495 raise error.ResponseError(
2462 raise error.ResponseError(
2496 _('unexpected response from remote server:'), l)
2463 _('unexpected response from remote server:'), l)
2497 if self.ui.debugflag:
2464 if self.ui.debugflag:
2498 self.ui.debug('adding %s (%s)\n' %
2465 self.ui.debug('adding %s (%s)\n' %
2499 (name, util.bytecount(size)))
2466 (name, util.bytecount(size)))
2500 # for backwards compat, name was partially encoded
2467 # for backwards compat, name was partially encoded
2501 ofp = self.sopener(store.decodedir(name), 'w')
2468 ofp = self.sopener(store.decodedir(name), 'w')
2502 for chunk in util.filechunkiter(fp, limit=size):
2469 for chunk in util.filechunkiter(fp, limit=size):
2503 handled_bytes += len(chunk)
2470 handled_bytes += len(chunk)
2504 self.ui.progress(_('clone'), handled_bytes,
2471 self.ui.progress(_('clone'), handled_bytes,
2505 total=total_bytes)
2472 total=total_bytes)
2506 ofp.write(chunk)
2473 ofp.write(chunk)
2507 ofp.close()
2474 ofp.close()
2508 elapsed = time.time() - start
2475 elapsed = time.time() - start
2509 if elapsed <= 0:
2476 if elapsed <= 0:
2510 elapsed = 0.001
2477 elapsed = 0.001
2511 self.ui.progress(_('clone'), None)
2478 self.ui.progress(_('clone'), None)
2512 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2479 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2513 (util.bytecount(total_bytes), elapsed,
2480 (util.bytecount(total_bytes), elapsed,
2514 util.bytecount(total_bytes / elapsed)))
2481 util.bytecount(total_bytes / elapsed)))
2515
2482
2516 # new requirements = old non-format requirements +
2483 # new requirements = old non-format requirements +
2517 # new format-related
2484 # new format-related
2518 # requirements from the streamed-in repository
2485 # requirements from the streamed-in repository
2519 requirements.update(set(self.requirements) - self.supportedformats)
2486 requirements.update(set(self.requirements) - self.supportedformats)
2520 self._applyrequirements(requirements)
2487 self._applyrequirements(requirements)
2521 self._writerequirements()
2488 self._writerequirements()
2522
2489
2523 if rbranchmap:
2490 if rbranchmap:
2524 rbheads = []
2491 rbheads = []
2525 for bheads in rbranchmap.itervalues():
2492 for bheads in rbranchmap.itervalues():
2526 rbheads.extend(bheads)
2493 rbheads.extend(bheads)
2527
2494
2528 self.branchcache = rbranchmap
2495 self.branchcache = rbranchmap
2529 if rbheads:
2496 if rbheads:
2530 rtiprev = max((int(self.changelog.rev(node))
2497 rtiprev = max((int(self.changelog.rev(node))
2531 for node in rbheads))
2498 for node in rbheads))
2532 branchmap.write(self, self.branchcache,
2499 branchmap.write(self, self.branchcache,
2533 self[rtiprev].node(), rtiprev)
2500 self[rtiprev].node(), rtiprev)
2534 self.invalidate()
2501 self.invalidate()
2535 return len(self.heads()) + 1
2502 return len(self.heads()) + 1
2536 finally:
2503 finally:
2537 lock.release()
2504 lock.release()
2538
2505
2539 def clone(self, remote, heads=[], stream=False):
2506 def clone(self, remote, heads=[], stream=False):
2540 '''clone remote repository.
2507 '''clone remote repository.
2541
2508
2542 keyword arguments:
2509 keyword arguments:
2543 heads: list of revs to clone (forces use of pull)
2510 heads: list of revs to clone (forces use of pull)
2544 stream: use streaming clone if possible'''
2511 stream: use streaming clone if possible'''
2545
2512
2546 # now, all clients that can request uncompressed clones can
2513 # now, all clients that can request uncompressed clones can
2547 # read repo formats supported by all servers that can serve
2514 # read repo formats supported by all servers that can serve
2548 # them.
2515 # them.
2549
2516
2550 # if revlog format changes, client will have to check version
2517 # if revlog format changes, client will have to check version
2551 # and format flags on "stream" capability, and use
2518 # and format flags on "stream" capability, and use
2552 # uncompressed only if compatible.
2519 # uncompressed only if compatible.
2553
2520
2554 if not stream:
2521 if not stream:
2555 # if the server explicitly prefers to stream (for fast LANs)
2522 # if the server explicitly prefers to stream (for fast LANs)
2556 stream = remote.capable('stream-preferred')
2523 stream = remote.capable('stream-preferred')
2557
2524
2558 if stream and not heads:
2525 if stream and not heads:
2559 # 'stream' means remote revlog format is revlogv1 only
2526 # 'stream' means remote revlog format is revlogv1 only
2560 if remote.capable('stream'):
2527 if remote.capable('stream'):
2561 return self.stream_in(remote, set(('revlogv1',)))
2528 return self.stream_in(remote, set(('revlogv1',)))
2562 # otherwise, 'streamreqs' contains the remote revlog format
2529 # otherwise, 'streamreqs' contains the remote revlog format
2563 streamreqs = remote.capable('streamreqs')
2530 streamreqs = remote.capable('streamreqs')
2564 if streamreqs:
2531 if streamreqs:
2565 streamreqs = set(streamreqs.split(','))
2532 streamreqs = set(streamreqs.split(','))
2566 # if we support it, stream in and adjust our requirements
2533 # if we support it, stream in and adjust our requirements
2567 if not streamreqs - self.supportedformats:
2534 if not streamreqs - self.supportedformats:
2568 return self.stream_in(remote, streamreqs)
2535 return self.stream_in(remote, streamreqs)
2569 return self.pull(remote, heads)
2536 return self.pull(remote, heads)
2570
2537
2571 def pushkey(self, namespace, key, old, new):
2538 def pushkey(self, namespace, key, old, new):
2572 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2539 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2573 old=old, new=new)
2540 old=old, new=new)
2574 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2541 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2575 ret = pushkey.push(self, namespace, key, old, new)
2542 ret = pushkey.push(self, namespace, key, old, new)
2576 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2543 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2577 ret=ret)
2544 ret=ret)
2578 return ret
2545 return ret
2579
2546
2580 def listkeys(self, namespace):
2547 def listkeys(self, namespace):
2581 self.hook('prelistkeys', throw=True, namespace=namespace)
2548 self.hook('prelistkeys', throw=True, namespace=namespace)
2582 self.ui.debug('listing keys for "%s"\n' % namespace)
2549 self.ui.debug('listing keys for "%s"\n' % namespace)
2583 values = pushkey.list(self, namespace)
2550 values = pushkey.list(self, namespace)
2584 self.hook('listkeys', namespace=namespace, values=values)
2551 self.hook('listkeys', namespace=namespace, values=values)
2585 return values
2552 return values
2586
2553
2587 def debugwireargs(self, one, two, three=None, four=None, five=None):
2554 def debugwireargs(self, one, two, three=None, four=None, five=None):
2588 '''used to test argument passing over the wire'''
2555 '''used to test argument passing over the wire'''
2589 return "%s %s %s %s %s" % (one, two, three, four, five)
2556 return "%s %s %s %s %s" % (one, two, three, four, five)
2590
2557
2591 def savecommitmessage(self, text):
2558 def savecommitmessage(self, text):
2592 fp = self.opener('last-message.txt', 'wb')
2559 fp = self.opener('last-message.txt', 'wb')
2593 try:
2560 try:
2594 fp.write(text)
2561 fp.write(text)
2595 finally:
2562 finally:
2596 fp.close()
2563 fp.close()
2597 return self.pathto(fp.name[len(self.root) + 1:])
2564 return self.pathto(fp.name[len(self.root) + 1:])
2598
2565
2599 # used to avoid circular references so destructors work
2566 # used to avoid circular references so destructors work
2600 def aftertrans(files):
2567 def aftertrans(files):
2601 renamefiles = [tuple(t) for t in files]
2568 renamefiles = [tuple(t) for t in files]
2602 def a():
2569 def a():
2603 for src, dest in renamefiles:
2570 for src, dest in renamefiles:
2604 try:
2571 try:
2605 util.rename(src, dest)
2572 util.rename(src, dest)
2606 except OSError: # journal file does not yet exist
2573 except OSError: # journal file does not yet exist
2607 pass
2574 pass
2608 return a
2575 return a
2609
2576
2610 def undoname(fn):
2577 def undoname(fn):
2611 base, name = os.path.split(fn)
2578 base, name = os.path.split(fn)
2612 assert name.startswith('journal')
2579 assert name.startswith('journal')
2613 return os.path.join(base, name.replace('journal', 'undo', 1))
2580 return os.path.join(base, name.replace('journal', 'undo', 1))
2614
2581
2615 def instance(ui, path, create):
2582 def instance(ui, path, create):
2616 return localrepository(ui, util.urllocalpath(path), create)
2583 return localrepository(ui, util.urllocalpath(path), create)
2617
2584
2618 def islocal(path):
2585 def islocal(path):
2619 return True
2586 return True
@@ -1,203 +1,203 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from mercurial import changegroup
9 from mercurial import changegroup, branchmap
10 from mercurial.node import short
10 from mercurial.node import short
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 import os
12 import os
13 import errno
13 import errno
14
14
15 def _bundle(repo, bases, heads, node, suffix, compress=True):
15 def _bundle(repo, bases, heads, node, suffix, compress=True):
16 """create a bundle with the specified revisions as a backup"""
16 """create a bundle with the specified revisions as a backup"""
17 cg = repo.changegroupsubset(bases, heads, 'strip')
17 cg = repo.changegroupsubset(bases, heads, 'strip')
18 backupdir = repo.join("strip-backup")
18 backupdir = repo.join("strip-backup")
19 if not os.path.isdir(backupdir):
19 if not os.path.isdir(backupdir):
20 os.mkdir(backupdir)
20 os.mkdir(backupdir)
21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
22 if compress:
22 if compress:
23 bundletype = "HG10BZ"
23 bundletype = "HG10BZ"
24 else:
24 else:
25 bundletype = "HG10UN"
25 bundletype = "HG10UN"
26 return changegroup.writebundle(cg, name, bundletype)
26 return changegroup.writebundle(cg, name, bundletype)
27
27
28 def _collectfiles(repo, striprev):
28 def _collectfiles(repo, striprev):
29 """find out the filelogs affected by the strip"""
29 """find out the filelogs affected by the strip"""
30 files = set()
30 files = set()
31
31
32 for x in xrange(striprev, len(repo)):
32 for x in xrange(striprev, len(repo)):
33 files.update(repo[x].files())
33 files.update(repo[x].files())
34
34
35 return sorted(files)
35 return sorted(files)
36
36
37 def _collectbrokencsets(repo, files, striprev):
37 def _collectbrokencsets(repo, files, striprev):
38 """return the changesets which will be broken by the truncation"""
38 """return the changesets which will be broken by the truncation"""
39 s = set()
39 s = set()
40 def collectone(revlog):
40 def collectone(revlog):
41 linkgen = (revlog.linkrev(i) for i in revlog)
41 linkgen = (revlog.linkrev(i) for i in revlog)
42 # find the truncation point of the revlog
42 # find the truncation point of the revlog
43 for lrev in linkgen:
43 for lrev in linkgen:
44 if lrev >= striprev:
44 if lrev >= striprev:
45 break
45 break
46 # see if any revision after this point has a linkrev
46 # see if any revision after this point has a linkrev
47 # less than striprev (those will be broken by strip)
47 # less than striprev (those will be broken by strip)
48 for lrev in linkgen:
48 for lrev in linkgen:
49 if lrev < striprev:
49 if lrev < striprev:
50 s.add(lrev)
50 s.add(lrev)
51
51
52 collectone(repo.manifest)
52 collectone(repo.manifest)
53 for fname in files:
53 for fname in files:
54 collectone(repo.file(fname))
54 collectone(repo.file(fname))
55
55
56 return s
56 return s
57
57
58 def strip(ui, repo, nodelist, backup="all", topic='backup'):
58 def strip(ui, repo, nodelist, backup="all", topic='backup'):
59 repo = repo.unfiltered()
59 repo = repo.unfiltered()
60 # It simplifies the logic around updating the branchheads cache if we only
60 # It simplifies the logic around updating the branchheads cache if we only
61 # have to consider the effect of the stripped revisions and not revisions
61 # have to consider the effect of the stripped revisions and not revisions
62 # missing because the cache is out-of-date.
62 # missing because the cache is out-of-date.
63 repo.updatebranchcache()
63 branchmap.updatecache(repo)
64
64
65 cl = repo.changelog
65 cl = repo.changelog
66 # TODO handle undo of merge sets
66 # TODO handle undo of merge sets
67 if isinstance(nodelist, str):
67 if isinstance(nodelist, str):
68 nodelist = [nodelist]
68 nodelist = [nodelist]
69 striplist = [cl.rev(node) for node in nodelist]
69 striplist = [cl.rev(node) for node in nodelist]
70 striprev = min(striplist)
70 striprev = min(striplist)
71
71
72 # Generate set of branches who will have nodes stripped.
72 # Generate set of branches who will have nodes stripped.
73 striprevs = repo.revs("%ld::", striplist)
73 striprevs = repo.revs("%ld::", striplist)
74 stripbranches = set([repo[rev].branch() for rev in striprevs])
74 stripbranches = set([repo[rev].branch() for rev in striprevs])
75
75
76 # Set of potential new heads resulting from the strip. The parents of any
76 # Set of potential new heads resulting from the strip. The parents of any
77 # node removed could be a new head because the node to be removed could have
77 # node removed could be a new head because the node to be removed could have
78 # been the only child of the parent.
78 # been the only child of the parent.
79 newheadrevs = repo.revs("parents(%ld::) - %ld::", striprevs, striprevs)
79 newheadrevs = repo.revs("parents(%ld::) - %ld::", striprevs, striprevs)
80 newheadnodes = set([cl.node(rev) for rev in newheadrevs])
80 newheadnodes = set([cl.node(rev) for rev in newheadrevs])
81 newheadbranches = set([repo[rev].branch() for rev in newheadrevs])
81 newheadbranches = set([repo[rev].branch() for rev in newheadrevs])
82
82
83 keeppartialbundle = backup == 'strip'
83 keeppartialbundle = backup == 'strip'
84
84
85 # Some revisions with rev > striprev may not be descendants of striprev.
85 # Some revisions with rev > striprev may not be descendants of striprev.
86 # We have to find these revisions and put them in a bundle, so that
86 # We have to find these revisions and put them in a bundle, so that
87 # we can restore them after the truncations.
87 # we can restore them after the truncations.
88 # To create the bundle we use repo.changegroupsubset which requires
88 # To create the bundle we use repo.changegroupsubset which requires
89 # the list of heads and bases of the set of interesting revisions.
89 # the list of heads and bases of the set of interesting revisions.
90 # (head = revision in the set that has no descendant in the set;
90 # (head = revision in the set that has no descendant in the set;
91 # base = revision in the set that has no ancestor in the set)
91 # base = revision in the set that has no ancestor in the set)
92 tostrip = set(striplist)
92 tostrip = set(striplist)
93 for rev in striplist:
93 for rev in striplist:
94 for desc in cl.descendants([rev]):
94 for desc in cl.descendants([rev]):
95 tostrip.add(desc)
95 tostrip.add(desc)
96
96
97 files = _collectfiles(repo, striprev)
97 files = _collectfiles(repo, striprev)
98 saverevs = _collectbrokencsets(repo, files, striprev)
98 saverevs = _collectbrokencsets(repo, files, striprev)
99
99
100 # compute heads
100 # compute heads
101 saveheads = set(saverevs)
101 saveheads = set(saverevs)
102 for r in xrange(striprev + 1, len(cl)):
102 for r in xrange(striprev + 1, len(cl)):
103 if r not in tostrip:
103 if r not in tostrip:
104 saverevs.add(r)
104 saverevs.add(r)
105 saveheads.difference_update(cl.parentrevs(r))
105 saveheads.difference_update(cl.parentrevs(r))
106 saveheads.add(r)
106 saveheads.add(r)
107 saveheads = [cl.node(r) for r in saveheads]
107 saveheads = [cl.node(r) for r in saveheads]
108
108
109 # compute base nodes
109 # compute base nodes
110 if saverevs:
110 if saverevs:
111 descendants = set(cl.descendants(saverevs))
111 descendants = set(cl.descendants(saverevs))
112 saverevs.difference_update(descendants)
112 saverevs.difference_update(descendants)
113 savebases = [cl.node(r) for r in saverevs]
113 savebases = [cl.node(r) for r in saverevs]
114 stripbases = [cl.node(r) for r in tostrip]
114 stripbases = [cl.node(r) for r in tostrip]
115
115
116 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
116 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
117 # is much faster
117 # is much faster
118 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
118 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
119 if newbmtarget:
119 if newbmtarget:
120 newbmtarget = repo[newbmtarget[0]].node()
120 newbmtarget = repo[newbmtarget[0]].node()
121 else:
121 else:
122 newbmtarget = '.'
122 newbmtarget = '.'
123
123
124 bm = repo._bookmarks
124 bm = repo._bookmarks
125 updatebm = []
125 updatebm = []
126 for m in bm:
126 for m in bm:
127 rev = repo[bm[m]].rev()
127 rev = repo[bm[m]].rev()
128 if rev in tostrip:
128 if rev in tostrip:
129 updatebm.append(m)
129 updatebm.append(m)
130
130
131 # create a changegroup for all the branches we need to keep
131 # create a changegroup for all the branches we need to keep
132 backupfile = None
132 backupfile = None
133 if backup == "all":
133 if backup == "all":
134 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
134 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
135 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
135 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
136 if saveheads or savebases:
136 if saveheads or savebases:
137 # do not compress partial bundle if we remove it from disk later
137 # do not compress partial bundle if we remove it from disk later
138 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
138 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
139 compress=keeppartialbundle)
139 compress=keeppartialbundle)
140
140
141 mfst = repo.manifest
141 mfst = repo.manifest
142
142
143 tr = repo.transaction("strip")
143 tr = repo.transaction("strip")
144 offset = len(tr.entries)
144 offset = len(tr.entries)
145
145
146 try:
146 try:
147 tr.startgroup()
147 tr.startgroup()
148 cl.strip(striprev, tr)
148 cl.strip(striprev, tr)
149 mfst.strip(striprev, tr)
149 mfst.strip(striprev, tr)
150 for fn in files:
150 for fn in files:
151 repo.file(fn).strip(striprev, tr)
151 repo.file(fn).strip(striprev, tr)
152 tr.endgroup()
152 tr.endgroup()
153
153
154 try:
154 try:
155 for i in xrange(offset, len(tr.entries)):
155 for i in xrange(offset, len(tr.entries)):
156 file, troffset, ignore = tr.entries[i]
156 file, troffset, ignore = tr.entries[i]
157 repo.sopener(file, 'a').truncate(troffset)
157 repo.sopener(file, 'a').truncate(troffset)
158 tr.close()
158 tr.close()
159 except: # re-raises
159 except: # re-raises
160 tr.abort()
160 tr.abort()
161 raise
161 raise
162
162
163 if saveheads or savebases:
163 if saveheads or savebases:
164 ui.note(_("adding branch\n"))
164 ui.note(_("adding branch\n"))
165 f = open(chgrpfile, "rb")
165 f = open(chgrpfile, "rb")
166 gen = changegroup.readbundle(f, chgrpfile)
166 gen = changegroup.readbundle(f, chgrpfile)
167 if not repo.ui.verbose:
167 if not repo.ui.verbose:
168 # silence internal shuffling chatter
168 # silence internal shuffling chatter
169 repo.ui.pushbuffer()
169 repo.ui.pushbuffer()
170 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
170 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
171 if not repo.ui.verbose:
171 if not repo.ui.verbose:
172 repo.ui.popbuffer()
172 repo.ui.popbuffer()
173 f.close()
173 f.close()
174 if not keeppartialbundle:
174 if not keeppartialbundle:
175 os.unlink(chgrpfile)
175 os.unlink(chgrpfile)
176
176
177 # remove undo files
177 # remove undo files
178 for undofile in repo.undofiles():
178 for undofile in repo.undofiles():
179 try:
179 try:
180 os.unlink(undofile)
180 os.unlink(undofile)
181 except OSError, e:
181 except OSError, e:
182 if e.errno != errno.ENOENT:
182 if e.errno != errno.ENOENT:
183 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
183 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
184
184
185 for m in updatebm:
185 for m in updatebm:
186 bm[m] = repo[newbmtarget].node()
186 bm[m] = repo[newbmtarget].node()
187 bm.write()
187 bm.write()
188 except: # re-raises
188 except: # re-raises
189 if backupfile:
189 if backupfile:
190 ui.warn(_("strip failed, full bundle stored in '%s'\n")
190 ui.warn(_("strip failed, full bundle stored in '%s'\n")
191 % backupfile)
191 % backupfile)
192 elif saveheads:
192 elif saveheads:
193 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
193 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
194 % chgrpfile)
194 % chgrpfile)
195 raise
195 raise
196
196
197 if len(stripbranches) == 1 and len(newheadbranches) == 1 \
197 if len(stripbranches) == 1 and len(newheadbranches) == 1 \
198 and stripbranches == newheadbranches:
198 and stripbranches == newheadbranches:
199 repo.destroyed(newheadnodes)
199 repo.destroyed(newheadnodes)
200 else:
200 else:
201 # Multiple branches involved in strip. Will allow branchcache to become
201 # Multiple branches involved in strip. Will allow branchcache to become
202 # invalid and later on rebuilt from scratch
202 # invalid and later on rebuilt from scratch
203 repo.destroyed()
203 repo.destroyed()
General Comments 0
You need to be logged in to leave comments. Login now