##// END OF EJS Templates
branchmap: extract write logic from localrepo
Pierre-Yves David -
r18117:526e7ec5 default
parent child Browse files
Show More
@@ -1,6 +1,20
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
8 from node import hex
9 import encoding
10
11 def write(repo, branches, tip, tiprev):
12 try:
13 f = repo.opener("cache/branchheads", "w", atomictemp=True)
14 f.write("%s %s\n" % (hex(tip), tiprev))
15 for label, nodes in branches.iteritems():
16 for node in nodes:
17 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
18 f.close()
19 except (IOError, OSError):
20 pass
@@ -1,2724 +1,2713
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 import branchmap
18 propertycache = util.propertycache
19 propertycache = util.propertycache
19 filecache = scmutil.filecache
20 filecache = scmutil.filecache
20
21
21 class repofilecache(filecache):
22 class repofilecache(filecache):
22 """All filecache usage on repo are done for logic that should be unfiltered
23 """All filecache usage on repo are done for logic that should be unfiltered
23 """
24 """
24
25
25 def __get__(self, repo, type=None):
26 def __get__(self, repo, type=None):
26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 def __set__(self, repo, value):
28 def __set__(self, repo, value):
28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 def __delete__(self, repo):
30 def __delete__(self, repo):
30 return super(repofilecache, self).__delete__(repo.unfiltered())
31 return super(repofilecache, self).__delete__(repo.unfiltered())
31
32
32 class storecache(repofilecache):
33 class storecache(repofilecache):
33 """filecache for files in the store"""
34 """filecache for files in the store"""
34 def join(self, obj, fname):
35 def join(self, obj, fname):
35 return obj.sjoin(fname)
36 return obj.sjoin(fname)
36
37
37 class unfilteredpropertycache(propertycache):
38 class unfilteredpropertycache(propertycache):
38 """propertycache that apply to unfiltered repo only"""
39 """propertycache that apply to unfiltered repo only"""
39
40
40 def __get__(self, repo, type=None):
41 def __get__(self, repo, type=None):
41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42
43
43 class filteredpropertycache(propertycache):
44 class filteredpropertycache(propertycache):
44 """propertycache that must take filtering in account"""
45 """propertycache that must take filtering in account"""
45
46
46 def cachevalue(self, obj, value):
47 def cachevalue(self, obj, value):
47 object.__setattr__(obj, self.name, value)
48 object.__setattr__(obj, self.name, value)
48
49
49
50
50 def hasunfilteredcache(repo, name):
51 def hasunfilteredcache(repo, name):
51 """check if an repo and a unfilteredproperty cached value for <name>"""
52 """check if an repo and a unfilteredproperty cached value for <name>"""
52 return name in vars(repo.unfiltered())
53 return name in vars(repo.unfiltered())
53
54
54 def unfilteredmethod(orig):
55 def unfilteredmethod(orig):
55 """decorate method that always need to be run on unfiltered version"""
56 """decorate method that always need to be run on unfiltered version"""
56 def wrapper(repo, *args, **kwargs):
57 def wrapper(repo, *args, **kwargs):
57 return orig(repo.unfiltered(), *args, **kwargs)
58 return orig(repo.unfiltered(), *args, **kwargs)
58 return wrapper
59 return wrapper
59
60
60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62
63
63 class localpeer(peer.peerrepository):
64 class localpeer(peer.peerrepository):
64 '''peer for a local repo; reflects only the most recent API'''
65 '''peer for a local repo; reflects only the most recent API'''
65
66
66 def __init__(self, repo, caps=MODERNCAPS):
67 def __init__(self, repo, caps=MODERNCAPS):
67 peer.peerrepository.__init__(self)
68 peer.peerrepository.__init__(self)
68 self._repo = repo
69 self._repo = repo
69 self.ui = repo.ui
70 self.ui = repo.ui
70 self._caps = repo._restrictcapabilities(caps)
71 self._caps = repo._restrictcapabilities(caps)
71 self.requirements = repo.requirements
72 self.requirements = repo.requirements
72 self.supportedformats = repo.supportedformats
73 self.supportedformats = repo.supportedformats
73
74
74 def close(self):
75 def close(self):
75 self._repo.close()
76 self._repo.close()
76
77
77 def _capabilities(self):
78 def _capabilities(self):
78 return self._caps
79 return self._caps
79
80
80 def local(self):
81 def local(self):
81 return self._repo
82 return self._repo
82
83
83 def canpush(self):
84 def canpush(self):
84 return True
85 return True
85
86
86 def url(self):
87 def url(self):
87 return self._repo.url()
88 return self._repo.url()
88
89
89 def lookup(self, key):
90 def lookup(self, key):
90 return self._repo.lookup(key)
91 return self._repo.lookup(key)
91
92
92 def branchmap(self):
93 def branchmap(self):
93 return discovery.visiblebranchmap(self._repo)
94 return discovery.visiblebranchmap(self._repo)
94
95
95 def heads(self):
96 def heads(self):
96 return discovery.visibleheads(self._repo)
97 return discovery.visibleheads(self._repo)
97
98
98 def known(self, nodes):
99 def known(self, nodes):
99 return self._repo.known(nodes)
100 return self._repo.known(nodes)
100
101
101 def getbundle(self, source, heads=None, common=None):
102 def getbundle(self, source, heads=None, common=None):
102 return self._repo.getbundle(source, heads=heads, common=common)
103 return self._repo.getbundle(source, heads=heads, common=common)
103
104
104 # TODO We might want to move the next two calls into legacypeer and add
105 # TODO We might want to move the next two calls into legacypeer and add
105 # unbundle instead.
106 # unbundle instead.
106
107
107 def lock(self):
108 def lock(self):
108 return self._repo.lock()
109 return self._repo.lock()
109
110
110 def addchangegroup(self, cg, source, url):
111 def addchangegroup(self, cg, source, url):
111 return self._repo.addchangegroup(cg, source, url)
112 return self._repo.addchangegroup(cg, source, url)
112
113
113 def pushkey(self, namespace, key, old, new):
114 def pushkey(self, namespace, key, old, new):
114 return self._repo.pushkey(namespace, key, old, new)
115 return self._repo.pushkey(namespace, key, old, new)
115
116
116 def listkeys(self, namespace):
117 def listkeys(self, namespace):
117 return self._repo.listkeys(namespace)
118 return self._repo.listkeys(namespace)
118
119
119 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 '''used to test argument passing over the wire'''
121 '''used to test argument passing over the wire'''
121 return "%s %s %s %s %s" % (one, two, three, four, five)
122 return "%s %s %s %s %s" % (one, two, three, four, five)
122
123
123 class locallegacypeer(localpeer):
124 class locallegacypeer(localpeer):
124 '''peer extension which implements legacy methods too; used for tests with
125 '''peer extension which implements legacy methods too; used for tests with
125 restricted capabilities'''
126 restricted capabilities'''
126
127
127 def __init__(self, repo):
128 def __init__(self, repo):
128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129
130
130 def branches(self, nodes):
131 def branches(self, nodes):
131 return self._repo.branches(nodes)
132 return self._repo.branches(nodes)
132
133
133 def between(self, pairs):
134 def between(self, pairs):
134 return self._repo.between(pairs)
135 return self._repo.between(pairs)
135
136
136 def changegroup(self, basenodes, source):
137 def changegroup(self, basenodes, source):
137 return self._repo.changegroup(basenodes, source)
138 return self._repo.changegroup(basenodes, source)
138
139
139 def changegroupsubset(self, bases, heads, source):
140 def changegroupsubset(self, bases, heads, source):
140 return self._repo.changegroupsubset(bases, heads, source)
141 return self._repo.changegroupsubset(bases, heads, source)
141
142
142 class localrepository(object):
143 class localrepository(object):
143
144
144 supportedformats = set(('revlogv1', 'generaldelta'))
145 supportedformats = set(('revlogv1', 'generaldelta'))
145 supported = supportedformats | set(('store', 'fncache', 'shared',
146 supported = supportedformats | set(('store', 'fncache', 'shared',
146 'dotencode'))
147 'dotencode'))
147 openerreqs = set(('revlogv1', 'generaldelta'))
148 openerreqs = set(('revlogv1', 'generaldelta'))
148 requirements = ['revlogv1']
149 requirements = ['revlogv1']
149
150
150 def _baserequirements(self, create):
151 def _baserequirements(self, create):
151 return self.requirements[:]
152 return self.requirements[:]
152
153
153 def __init__(self, baseui, path=None, create=False):
154 def __init__(self, baseui, path=None, create=False):
154 self.wvfs = scmutil.vfs(path, expand=True)
155 self.wvfs = scmutil.vfs(path, expand=True)
155 self.wopener = self.wvfs
156 self.wopener = self.wvfs
156 self.root = self.wvfs.base
157 self.root = self.wvfs.base
157 self.path = self.wvfs.join(".hg")
158 self.path = self.wvfs.join(".hg")
158 self.origroot = path
159 self.origroot = path
159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 self.vfs = scmutil.vfs(self.path)
161 self.vfs = scmutil.vfs(self.path)
161 self.opener = self.vfs
162 self.opener = self.vfs
162 self.baseui = baseui
163 self.baseui = baseui
163 self.ui = baseui.copy()
164 self.ui = baseui.copy()
164 # A list of callback to shape the phase if no data were found.
165 # A list of callback to shape the phase if no data were found.
165 # Callback are in the form: func(repo, roots) --> processed root.
166 # Callback are in the form: func(repo, roots) --> processed root.
166 # This list it to be filled by extension during repo setup
167 # This list it to be filled by extension during repo setup
167 self._phasedefaults = []
168 self._phasedefaults = []
168 try:
169 try:
169 self.ui.readconfig(self.join("hgrc"), self.root)
170 self.ui.readconfig(self.join("hgrc"), self.root)
170 extensions.loadall(self.ui)
171 extensions.loadall(self.ui)
171 except IOError:
172 except IOError:
172 pass
173 pass
173
174
174 if not self.vfs.isdir():
175 if not self.vfs.isdir():
175 if create:
176 if create:
176 if not self.wvfs.exists():
177 if not self.wvfs.exists():
177 self.wvfs.makedirs()
178 self.wvfs.makedirs()
178 self.vfs.makedir(notindexed=True)
179 self.vfs.makedir(notindexed=True)
179 requirements = self._baserequirements(create)
180 requirements = self._baserequirements(create)
180 if self.ui.configbool('format', 'usestore', True):
181 if self.ui.configbool('format', 'usestore', True):
181 self.vfs.mkdir("store")
182 self.vfs.mkdir("store")
182 requirements.append("store")
183 requirements.append("store")
183 if self.ui.configbool('format', 'usefncache', True):
184 if self.ui.configbool('format', 'usefncache', True):
184 requirements.append("fncache")
185 requirements.append("fncache")
185 if self.ui.configbool('format', 'dotencode', True):
186 if self.ui.configbool('format', 'dotencode', True):
186 requirements.append('dotencode')
187 requirements.append('dotencode')
187 # create an invalid changelog
188 # create an invalid changelog
188 self.vfs.append(
189 self.vfs.append(
189 "00changelog.i",
190 "00changelog.i",
190 '\0\0\0\2' # represents revlogv2
191 '\0\0\0\2' # represents revlogv2
191 ' dummy changelog to prevent using the old repo layout'
192 ' dummy changelog to prevent using the old repo layout'
192 )
193 )
193 if self.ui.configbool('format', 'generaldelta', False):
194 if self.ui.configbool('format', 'generaldelta', False):
194 requirements.append("generaldelta")
195 requirements.append("generaldelta")
195 requirements = set(requirements)
196 requirements = set(requirements)
196 else:
197 else:
197 raise error.RepoError(_("repository %s not found") % path)
198 raise error.RepoError(_("repository %s not found") % path)
198 elif create:
199 elif create:
199 raise error.RepoError(_("repository %s already exists") % path)
200 raise error.RepoError(_("repository %s already exists") % path)
200 else:
201 else:
201 try:
202 try:
202 requirements = scmutil.readrequires(self.vfs, self.supported)
203 requirements = scmutil.readrequires(self.vfs, self.supported)
203 except IOError, inst:
204 except IOError, inst:
204 if inst.errno != errno.ENOENT:
205 if inst.errno != errno.ENOENT:
205 raise
206 raise
206 requirements = set()
207 requirements = set()
207
208
208 self.sharedpath = self.path
209 self.sharedpath = self.path
209 try:
210 try:
210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 if not os.path.exists(s):
212 if not os.path.exists(s):
212 raise error.RepoError(
213 raise error.RepoError(
213 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 self.sharedpath = s
215 self.sharedpath = s
215 except IOError, inst:
216 except IOError, inst:
216 if inst.errno != errno.ENOENT:
217 if inst.errno != errno.ENOENT:
217 raise
218 raise
218
219
219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 self.spath = self.store.path
221 self.spath = self.store.path
221 self.svfs = self.store.vfs
222 self.svfs = self.store.vfs
222 self.sopener = self.svfs
223 self.sopener = self.svfs
223 self.sjoin = self.store.join
224 self.sjoin = self.store.join
224 self.vfs.createmode = self.store.createmode
225 self.vfs.createmode = self.store.createmode
225 self._applyrequirements(requirements)
226 self._applyrequirements(requirements)
226 if create:
227 if create:
227 self._writerequirements()
228 self._writerequirements()
228
229
229
230
230 self._branchcache = None
231 self._branchcache = None
231 self._branchcachetip = None
232 self._branchcachetip = None
232 self.filterpats = {}
233 self.filterpats = {}
233 self._datafilters = {}
234 self._datafilters = {}
234 self._transref = self._lockref = self._wlockref = None
235 self._transref = self._lockref = self._wlockref = None
235
236
236 # A cache for various files under .hg/ that tracks file changes,
237 # A cache for various files under .hg/ that tracks file changes,
237 # (used by the filecache decorator)
238 # (used by the filecache decorator)
238 #
239 #
239 # Maps a property name to its util.filecacheentry
240 # Maps a property name to its util.filecacheentry
240 self._filecache = {}
241 self._filecache = {}
241
242
242 # hold sets of revision to be filtered
243 # hold sets of revision to be filtered
243 # should be cleared when something might have changed the filter value:
244 # should be cleared when something might have changed the filter value:
244 # - new changesets,
245 # - new changesets,
245 # - phase change,
246 # - phase change,
246 # - new obsolescence marker,
247 # - new obsolescence marker,
247 # - working directory parent change,
248 # - working directory parent change,
248 # - bookmark changes
249 # - bookmark changes
249 self.filteredrevcache = {}
250 self.filteredrevcache = {}
250
251
251 def close(self):
252 def close(self):
252 pass
253 pass
253
254
254 def _restrictcapabilities(self, caps):
255 def _restrictcapabilities(self, caps):
255 return caps
256 return caps
256
257
257 def _applyrequirements(self, requirements):
258 def _applyrequirements(self, requirements):
258 self.requirements = requirements
259 self.requirements = requirements
259 self.sopener.options = dict((r, 1) for r in requirements
260 self.sopener.options = dict((r, 1) for r in requirements
260 if r in self.openerreqs)
261 if r in self.openerreqs)
261
262
262 def _writerequirements(self):
263 def _writerequirements(self):
263 reqfile = self.opener("requires", "w")
264 reqfile = self.opener("requires", "w")
264 for r in self.requirements:
265 for r in self.requirements:
265 reqfile.write("%s\n" % r)
266 reqfile.write("%s\n" % r)
266 reqfile.close()
267 reqfile.close()
267
268
268 def _checknested(self, path):
269 def _checknested(self, path):
269 """Determine if path is a legal nested repository."""
270 """Determine if path is a legal nested repository."""
270 if not path.startswith(self.root):
271 if not path.startswith(self.root):
271 return False
272 return False
272 subpath = path[len(self.root) + 1:]
273 subpath = path[len(self.root) + 1:]
273 normsubpath = util.pconvert(subpath)
274 normsubpath = util.pconvert(subpath)
274
275
275 # XXX: Checking against the current working copy is wrong in
276 # XXX: Checking against the current working copy is wrong in
276 # the sense that it can reject things like
277 # the sense that it can reject things like
277 #
278 #
278 # $ hg cat -r 10 sub/x.txt
279 # $ hg cat -r 10 sub/x.txt
279 #
280 #
280 # if sub/ is no longer a subrepository in the working copy
281 # if sub/ is no longer a subrepository in the working copy
281 # parent revision.
282 # parent revision.
282 #
283 #
283 # However, it can of course also allow things that would have
284 # However, it can of course also allow things that would have
284 # been rejected before, such as the above cat command if sub/
285 # been rejected before, such as the above cat command if sub/
285 # is a subrepository now, but was a normal directory before.
286 # is a subrepository now, but was a normal directory before.
286 # The old path auditor would have rejected by mistake since it
287 # The old path auditor would have rejected by mistake since it
287 # panics when it sees sub/.hg/.
288 # panics when it sees sub/.hg/.
288 #
289 #
289 # All in all, checking against the working copy seems sensible
290 # All in all, checking against the working copy seems sensible
290 # since we want to prevent access to nested repositories on
291 # since we want to prevent access to nested repositories on
291 # the filesystem *now*.
292 # the filesystem *now*.
292 ctx = self[None]
293 ctx = self[None]
293 parts = util.splitpath(subpath)
294 parts = util.splitpath(subpath)
294 while parts:
295 while parts:
295 prefix = '/'.join(parts)
296 prefix = '/'.join(parts)
296 if prefix in ctx.substate:
297 if prefix in ctx.substate:
297 if prefix == normsubpath:
298 if prefix == normsubpath:
298 return True
299 return True
299 else:
300 else:
300 sub = ctx.sub(prefix)
301 sub = ctx.sub(prefix)
301 return sub.checknested(subpath[len(prefix) + 1:])
302 return sub.checknested(subpath[len(prefix) + 1:])
302 else:
303 else:
303 parts.pop()
304 parts.pop()
304 return False
305 return False
305
306
306 def peer(self):
307 def peer(self):
307 return localpeer(self) # not cached to avoid reference cycle
308 return localpeer(self) # not cached to avoid reference cycle
308
309
309 def unfiltered(self):
310 def unfiltered(self):
310 """Return unfiltered version of the repository
311 """Return unfiltered version of the repository
311
312
312 Intended to be ovewritten by filtered repo."""
313 Intended to be ovewritten by filtered repo."""
313 return self
314 return self
314
315
315 def filtered(self, name):
316 def filtered(self, name):
316 """Return a filtered version of a repository"""
317 """Return a filtered version of a repository"""
317 # build a new class with the mixin and the current class
318 # build a new class with the mixin and the current class
318 # (possibily subclass of the repo)
319 # (possibily subclass of the repo)
319 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 pass
321 pass
321 return proxycls(self, name)
322 return proxycls(self, name)
322
323
323 @repofilecache('bookmarks')
324 @repofilecache('bookmarks')
324 def _bookmarks(self):
325 def _bookmarks(self):
325 return bookmarks.bmstore(self)
326 return bookmarks.bmstore(self)
326
327
327 @repofilecache('bookmarks.current')
328 @repofilecache('bookmarks.current')
328 def _bookmarkcurrent(self):
329 def _bookmarkcurrent(self):
329 return bookmarks.readcurrent(self)
330 return bookmarks.readcurrent(self)
330
331
331 def bookmarkheads(self, bookmark):
332 def bookmarkheads(self, bookmark):
332 name = bookmark.split('@', 1)[0]
333 name = bookmark.split('@', 1)[0]
333 heads = []
334 heads = []
334 for mark, n in self._bookmarks.iteritems():
335 for mark, n in self._bookmarks.iteritems():
335 if mark.split('@', 1)[0] == name:
336 if mark.split('@', 1)[0] == name:
336 heads.append(n)
337 heads.append(n)
337 return heads
338 return heads
338
339
339 @storecache('phaseroots')
340 @storecache('phaseroots')
340 def _phasecache(self):
341 def _phasecache(self):
341 return phases.phasecache(self, self._phasedefaults)
342 return phases.phasecache(self, self._phasedefaults)
342
343
343 @storecache('obsstore')
344 @storecache('obsstore')
344 def obsstore(self):
345 def obsstore(self):
345 store = obsolete.obsstore(self.sopener)
346 store = obsolete.obsstore(self.sopener)
346 if store and not obsolete._enabled:
347 if store and not obsolete._enabled:
347 # message is rare enough to not be translated
348 # message is rare enough to not be translated
348 msg = 'obsolete feature not enabled but %i markers found!\n'
349 msg = 'obsolete feature not enabled but %i markers found!\n'
349 self.ui.warn(msg % len(list(store)))
350 self.ui.warn(msg % len(list(store)))
350 return store
351 return store
351
352
352 @unfilteredpropertycache
353 @unfilteredpropertycache
353 def hiddenrevs(self):
354 def hiddenrevs(self):
354 """hiddenrevs: revs that should be hidden by command and tools
355 """hiddenrevs: revs that should be hidden by command and tools
355
356
356 This set is carried on the repo to ease initialization and lazy
357 This set is carried on the repo to ease initialization and lazy
357 loading; it'll probably move back to changelog for efficiency and
358 loading; it'll probably move back to changelog for efficiency and
358 consistency reasons.
359 consistency reasons.
359
360
360 Note that the hiddenrevs will needs invalidations when
361 Note that the hiddenrevs will needs invalidations when
361 - a new changesets is added (possible unstable above extinct)
362 - a new changesets is added (possible unstable above extinct)
362 - a new obsolete marker is added (possible new extinct changeset)
363 - a new obsolete marker is added (possible new extinct changeset)
363
364
364 hidden changesets cannot have non-hidden descendants
365 hidden changesets cannot have non-hidden descendants
365 """
366 """
366 hidden = set()
367 hidden = set()
367 if self.obsstore:
368 if self.obsstore:
368 ### hide extinct changeset that are not accessible by any mean
369 ### hide extinct changeset that are not accessible by any mean
369 hiddenquery = 'extinct() - ::(. + bookmark())'
370 hiddenquery = 'extinct() - ::(. + bookmark())'
370 hidden.update(self.revs(hiddenquery))
371 hidden.update(self.revs(hiddenquery))
371 return hidden
372 return hidden
372
373
373 @storecache('00changelog.i')
374 @storecache('00changelog.i')
374 def changelog(self):
375 def changelog(self):
375 c = changelog.changelog(self.sopener)
376 c = changelog.changelog(self.sopener)
376 if 'HG_PENDING' in os.environ:
377 if 'HG_PENDING' in os.environ:
377 p = os.environ['HG_PENDING']
378 p = os.environ['HG_PENDING']
378 if p.startswith(self.root):
379 if p.startswith(self.root):
379 c.readpending('00changelog.i.a')
380 c.readpending('00changelog.i.a')
380 return c
381 return c
381
382
382 @storecache('00manifest.i')
383 @storecache('00manifest.i')
383 def manifest(self):
384 def manifest(self):
384 return manifest.manifest(self.sopener)
385 return manifest.manifest(self.sopener)
385
386
386 @repofilecache('dirstate')
387 @repofilecache('dirstate')
387 def dirstate(self):
388 def dirstate(self):
388 warned = [0]
389 warned = [0]
389 def validate(node):
390 def validate(node):
390 try:
391 try:
391 self.changelog.rev(node)
392 self.changelog.rev(node)
392 return node
393 return node
393 except error.LookupError:
394 except error.LookupError:
394 if not warned[0]:
395 if not warned[0]:
395 warned[0] = True
396 warned[0] = True
396 self.ui.warn(_("warning: ignoring unknown"
397 self.ui.warn(_("warning: ignoring unknown"
397 " working parent %s!\n") % short(node))
398 " working parent %s!\n") % short(node))
398 return nullid
399 return nullid
399
400
400 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401
402
402 def __getitem__(self, changeid):
403 def __getitem__(self, changeid):
403 if changeid is None:
404 if changeid is None:
404 return context.workingctx(self)
405 return context.workingctx(self)
405 return context.changectx(self, changeid)
406 return context.changectx(self, changeid)
406
407
407 def __contains__(self, changeid):
408 def __contains__(self, changeid):
408 try:
409 try:
409 return bool(self.lookup(changeid))
410 return bool(self.lookup(changeid))
410 except error.RepoLookupError:
411 except error.RepoLookupError:
411 return False
412 return False
412
413
413 def __nonzero__(self):
414 def __nonzero__(self):
414 return True
415 return True
415
416
416 def __len__(self):
417 def __len__(self):
417 return len(self.changelog)
418 return len(self.changelog)
418
419
419 def __iter__(self):
420 def __iter__(self):
420 return iter(self.changelog)
421 return iter(self.changelog)
421
422
422 def revs(self, expr, *args):
423 def revs(self, expr, *args):
423 '''Return a list of revisions matching the given revset'''
424 '''Return a list of revisions matching the given revset'''
424 expr = revset.formatspec(expr, *args)
425 expr = revset.formatspec(expr, *args)
425 m = revset.match(None, expr)
426 m = revset.match(None, expr)
426 return [r for r in m(self, list(self))]
427 return [r for r in m(self, list(self))]
427
428
428 def set(self, expr, *args):
429 def set(self, expr, *args):
429 '''
430 '''
430 Yield a context for each matching revision, after doing arg
431 Yield a context for each matching revision, after doing arg
431 replacement via revset.formatspec
432 replacement via revset.formatspec
432 '''
433 '''
433 for r in self.revs(expr, *args):
434 for r in self.revs(expr, *args):
434 yield self[r]
435 yield self[r]
435
436
436 def url(self):
437 def url(self):
437 return 'file:' + self.root
438 return 'file:' + self.root
438
439
439 def hook(self, name, throw=False, **args):
440 def hook(self, name, throw=False, **args):
440 return hook.hook(self.ui, self, name, throw, **args)
441 return hook.hook(self.ui, self, name, throw, **args)
441
442
442 @unfilteredmethod
443 @unfilteredmethod
443 def _tag(self, names, node, message, local, user, date, extra={}):
444 def _tag(self, names, node, message, local, user, date, extra={}):
444 if isinstance(names, str):
445 if isinstance(names, str):
445 names = (names,)
446 names = (names,)
446
447
447 branches = self.branchmap()
448 branches = self.branchmap()
448 for name in names:
449 for name in names:
449 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 local=local)
451 local=local)
451 if name in branches:
452 if name in branches:
452 self.ui.warn(_("warning: tag %s conflicts with existing"
453 self.ui.warn(_("warning: tag %s conflicts with existing"
453 " branch name\n") % name)
454 " branch name\n") % name)
454
455
455 def writetags(fp, names, munge, prevtags):
456 def writetags(fp, names, munge, prevtags):
456 fp.seek(0, 2)
457 fp.seek(0, 2)
457 if prevtags and prevtags[-1] != '\n':
458 if prevtags and prevtags[-1] != '\n':
458 fp.write('\n')
459 fp.write('\n')
459 for name in names:
460 for name in names:
460 m = munge and munge(name) or name
461 m = munge and munge(name) or name
461 if (self._tagscache.tagtypes and
462 if (self._tagscache.tagtypes and
462 name in self._tagscache.tagtypes):
463 name in self._tagscache.tagtypes):
463 old = self.tags().get(name, nullid)
464 old = self.tags().get(name, nullid)
464 fp.write('%s %s\n' % (hex(old), m))
465 fp.write('%s %s\n' % (hex(old), m))
465 fp.write('%s %s\n' % (hex(node), m))
466 fp.write('%s %s\n' % (hex(node), m))
466 fp.close()
467 fp.close()
467
468
468 prevtags = ''
469 prevtags = ''
469 if local:
470 if local:
470 try:
471 try:
471 fp = self.opener('localtags', 'r+')
472 fp = self.opener('localtags', 'r+')
472 except IOError:
473 except IOError:
473 fp = self.opener('localtags', 'a')
474 fp = self.opener('localtags', 'a')
474 else:
475 else:
475 prevtags = fp.read()
476 prevtags = fp.read()
476
477
477 # local tags are stored in the current charset
478 # local tags are stored in the current charset
478 writetags(fp, names, None, prevtags)
479 writetags(fp, names, None, prevtags)
479 for name in names:
480 for name in names:
480 self.hook('tag', node=hex(node), tag=name, local=local)
481 self.hook('tag', node=hex(node), tag=name, local=local)
481 return
482 return
482
483
483 try:
484 try:
484 fp = self.wfile('.hgtags', 'rb+')
485 fp = self.wfile('.hgtags', 'rb+')
485 except IOError, e:
486 except IOError, e:
486 if e.errno != errno.ENOENT:
487 if e.errno != errno.ENOENT:
487 raise
488 raise
488 fp = self.wfile('.hgtags', 'ab')
489 fp = self.wfile('.hgtags', 'ab')
489 else:
490 else:
490 prevtags = fp.read()
491 prevtags = fp.read()
491
492
492 # committed tags are stored in UTF-8
493 # committed tags are stored in UTF-8
493 writetags(fp, names, encoding.fromlocal, prevtags)
494 writetags(fp, names, encoding.fromlocal, prevtags)
494
495
495 fp.close()
496 fp.close()
496
497
497 self.invalidatecaches()
498 self.invalidatecaches()
498
499
499 if '.hgtags' not in self.dirstate:
500 if '.hgtags' not in self.dirstate:
500 self[None].add(['.hgtags'])
501 self[None].add(['.hgtags'])
501
502
502 m = matchmod.exact(self.root, '', ['.hgtags'])
503 m = matchmod.exact(self.root, '', ['.hgtags'])
503 tagnode = self.commit(message, user, date, extra=extra, match=m)
504 tagnode = self.commit(message, user, date, extra=extra, match=m)
504
505
505 for name in names:
506 for name in names:
506 self.hook('tag', node=hex(node), tag=name, local=local)
507 self.hook('tag', node=hex(node), tag=name, local=local)
507
508
508 return tagnode
509 return tagnode
509
510
510 def tag(self, names, node, message, local, user, date):
511 def tag(self, names, node, message, local, user, date):
511 '''tag a revision with one or more symbolic names.
512 '''tag a revision with one or more symbolic names.
512
513
513 names is a list of strings or, when adding a single tag, names may be a
514 names is a list of strings or, when adding a single tag, names may be a
514 string.
515 string.
515
516
516 if local is True, the tags are stored in a per-repository file.
517 if local is True, the tags are stored in a per-repository file.
517 otherwise, they are stored in the .hgtags file, and a new
518 otherwise, they are stored in the .hgtags file, and a new
518 changeset is committed with the change.
519 changeset is committed with the change.
519
520
520 keyword arguments:
521 keyword arguments:
521
522
522 local: whether to store tags in non-version-controlled file
523 local: whether to store tags in non-version-controlled file
523 (default False)
524 (default False)
524
525
525 message: commit message to use if committing
526 message: commit message to use if committing
526
527
527 user: name of user to use if committing
528 user: name of user to use if committing
528
529
529 date: date tuple to use if committing'''
530 date: date tuple to use if committing'''
530
531
531 if not local:
532 if not local:
532 for x in self.status()[:5]:
533 for x in self.status()[:5]:
533 if '.hgtags' in x:
534 if '.hgtags' in x:
534 raise util.Abort(_('working copy of .hgtags is changed '
535 raise util.Abort(_('working copy of .hgtags is changed '
535 '(please commit .hgtags manually)'))
536 '(please commit .hgtags manually)'))
536
537
537 self.tags() # instantiate the cache
538 self.tags() # instantiate the cache
538 self._tag(names, node, message, local, user, date)
539 self._tag(names, node, message, local, user, date)
539
540
540 @filteredpropertycache
541 @filteredpropertycache
541 def _tagscache(self):
542 def _tagscache(self):
542 '''Returns a tagscache object that contains various tags related
543 '''Returns a tagscache object that contains various tags related
543 caches.'''
544 caches.'''
544
545
545 # This simplifies its cache management by having one decorated
546 # This simplifies its cache management by having one decorated
546 # function (this one) and the rest simply fetch things from it.
547 # function (this one) and the rest simply fetch things from it.
547 class tagscache(object):
548 class tagscache(object):
548 def __init__(self):
549 def __init__(self):
549 # These two define the set of tags for this repository. tags
550 # These two define the set of tags for this repository. tags
550 # maps tag name to node; tagtypes maps tag name to 'global' or
551 # maps tag name to node; tagtypes maps tag name to 'global' or
551 # 'local'. (Global tags are defined by .hgtags across all
552 # 'local'. (Global tags are defined by .hgtags across all
552 # heads, and local tags are defined in .hg/localtags.)
553 # heads, and local tags are defined in .hg/localtags.)
553 # They constitute the in-memory cache of tags.
554 # They constitute the in-memory cache of tags.
554 self.tags = self.tagtypes = None
555 self.tags = self.tagtypes = None
555
556
556 self.nodetagscache = self.tagslist = None
557 self.nodetagscache = self.tagslist = None
557
558
558 cache = tagscache()
559 cache = tagscache()
559 cache.tags, cache.tagtypes = self._findtags()
560 cache.tags, cache.tagtypes = self._findtags()
560
561
561 return cache
562 return cache
562
563
563 def tags(self):
564 def tags(self):
564 '''return a mapping of tag to node'''
565 '''return a mapping of tag to node'''
565 t = {}
566 t = {}
566 if self.changelog.filteredrevs:
567 if self.changelog.filteredrevs:
567 tags, tt = self._findtags()
568 tags, tt = self._findtags()
568 else:
569 else:
569 tags = self._tagscache.tags
570 tags = self._tagscache.tags
570 for k, v in tags.iteritems():
571 for k, v in tags.iteritems():
571 try:
572 try:
572 # ignore tags to unknown nodes
573 # ignore tags to unknown nodes
573 self.changelog.rev(v)
574 self.changelog.rev(v)
574 t[k] = v
575 t[k] = v
575 except (error.LookupError, ValueError):
576 except (error.LookupError, ValueError):
576 pass
577 pass
577 return t
578 return t
578
579
579 def _findtags(self):
580 def _findtags(self):
580 '''Do the hard work of finding tags. Return a pair of dicts
581 '''Do the hard work of finding tags. Return a pair of dicts
581 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 maps tag name to a string like \'global\' or \'local\'.
583 maps tag name to a string like \'global\' or \'local\'.
583 Subclasses or extensions are free to add their own tags, but
584 Subclasses or extensions are free to add their own tags, but
584 should be aware that the returned dicts will be retained for the
585 should be aware that the returned dicts will be retained for the
585 duration of the localrepo object.'''
586 duration of the localrepo object.'''
586
587
587 # XXX what tagtype should subclasses/extensions use? Currently
588 # XXX what tagtype should subclasses/extensions use? Currently
588 # mq and bookmarks add tags, but do not set the tagtype at all.
589 # mq and bookmarks add tags, but do not set the tagtype at all.
589 # Should each extension invent its own tag type? Should there
590 # Should each extension invent its own tag type? Should there
590 # be one tagtype for all such "virtual" tags? Or is the status
591 # be one tagtype for all such "virtual" tags? Or is the status
591 # quo fine?
592 # quo fine?
592
593
593 alltags = {} # map tag name to (node, hist)
594 alltags = {} # map tag name to (node, hist)
594 tagtypes = {}
595 tagtypes = {}
595
596
596 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598
599
599 # Build the return dicts. Have to re-encode tag names because
600 # Build the return dicts. Have to re-encode tag names because
600 # the tags module always uses UTF-8 (in order not to lose info
601 # the tags module always uses UTF-8 (in order not to lose info
601 # writing to the cache), but the rest of Mercurial wants them in
602 # writing to the cache), but the rest of Mercurial wants them in
602 # local encoding.
603 # local encoding.
603 tags = {}
604 tags = {}
604 for (name, (node, hist)) in alltags.iteritems():
605 for (name, (node, hist)) in alltags.iteritems():
605 if node != nullid:
606 if node != nullid:
606 tags[encoding.tolocal(name)] = node
607 tags[encoding.tolocal(name)] = node
607 tags['tip'] = self.changelog.tip()
608 tags['tip'] = self.changelog.tip()
608 tagtypes = dict([(encoding.tolocal(name), value)
609 tagtypes = dict([(encoding.tolocal(name), value)
609 for (name, value) in tagtypes.iteritems()])
610 for (name, value) in tagtypes.iteritems()])
610 return (tags, tagtypes)
611 return (tags, tagtypes)
611
612
612 def tagtype(self, tagname):
613 def tagtype(self, tagname):
613 '''
614 '''
614 return the type of the given tag. result can be:
615 return the type of the given tag. result can be:
615
616
616 'local' : a local tag
617 'local' : a local tag
617 'global' : a global tag
618 'global' : a global tag
618 None : tag does not exist
619 None : tag does not exist
619 '''
620 '''
620
621
621 return self._tagscache.tagtypes.get(tagname)
622 return self._tagscache.tagtypes.get(tagname)
622
623
623 def tagslist(self):
624 def tagslist(self):
624 '''return a list of tags ordered by revision'''
625 '''return a list of tags ordered by revision'''
625 if not self._tagscache.tagslist:
626 if not self._tagscache.tagslist:
626 l = []
627 l = []
627 for t, n in self.tags().iteritems():
628 for t, n in self.tags().iteritems():
628 r = self.changelog.rev(n)
629 r = self.changelog.rev(n)
629 l.append((r, t, n))
630 l.append((r, t, n))
630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631
632
632 return self._tagscache.tagslist
633 return self._tagscache.tagslist
633
634
634 def nodetags(self, node):
635 def nodetags(self, node):
635 '''return the tags associated with a node'''
636 '''return the tags associated with a node'''
636 if not self._tagscache.nodetagscache:
637 if not self._tagscache.nodetagscache:
637 nodetagscache = {}
638 nodetagscache = {}
638 for t, n in self._tagscache.tags.iteritems():
639 for t, n in self._tagscache.tags.iteritems():
639 nodetagscache.setdefault(n, []).append(t)
640 nodetagscache.setdefault(n, []).append(t)
640 for tags in nodetagscache.itervalues():
641 for tags in nodetagscache.itervalues():
641 tags.sort()
642 tags.sort()
642 self._tagscache.nodetagscache = nodetagscache
643 self._tagscache.nodetagscache = nodetagscache
643 return self._tagscache.nodetagscache.get(node, [])
644 return self._tagscache.nodetagscache.get(node, [])
644
645
645 def nodebookmarks(self, node):
646 def nodebookmarks(self, node):
646 marks = []
647 marks = []
647 for bookmark, n in self._bookmarks.iteritems():
648 for bookmark, n in self._bookmarks.iteritems():
648 if n == node:
649 if n == node:
649 marks.append(bookmark)
650 marks.append(bookmark)
650 return sorted(marks)
651 return sorted(marks)
651
652
652 def _cacheabletip(self):
653 def _cacheabletip(self):
653 """tip-most revision stable enought to used in persistent cache
654 """tip-most revision stable enought to used in persistent cache
654
655
655 This function is overwritten by MQ to ensure we do not write cache for
656 This function is overwritten by MQ to ensure we do not write cache for
656 a part of the history that will likely change.
657 a part of the history that will likely change.
657
658
658 Efficient handling of filtered revision in branchcache should offer a
659 Efficient handling of filtered revision in branchcache should offer a
659 better alternative. But we are using this approach until it is ready.
660 better alternative. But we are using this approach until it is ready.
660 """
661 """
661 cl = self.changelog
662 cl = self.changelog
662 return cl.rev(cl.tip())
663 return cl.rev(cl.tip())
663
664
664 @unfilteredmethod # Until we get a smarter cache management
665 @unfilteredmethod # Until we get a smarter cache management
665 def updatebranchcache(self):
666 def updatebranchcache(self):
666 cl = self.changelog
667 cl = self.changelog
667 tip = cl.tip()
668 tip = cl.tip()
668 if self._branchcache is not None and self._branchcachetip == tip:
669 if self._branchcache is not None and self._branchcachetip == tip:
669 return
670 return
670
671
671 oldtip = self._branchcachetip
672 oldtip = self._branchcachetip
672 if oldtip is None or oldtip not in cl.nodemap:
673 if oldtip is None or oldtip not in cl.nodemap:
673 partial, last, lrev = self._readbranchcache()
674 partial, last, lrev = self._readbranchcache()
674 else:
675 else:
675 lrev = cl.rev(oldtip)
676 lrev = cl.rev(oldtip)
676 partial = self._branchcache
677 partial = self._branchcache
677
678
678 catip = self._cacheabletip()
679 catip = self._cacheabletip()
679 # if lrev == catip: cache is already up to date
680 # if lrev == catip: cache is already up to date
680 # if lrev > catip: we have uncachable element in `partial` can't write
681 # if lrev > catip: we have uncachable element in `partial` can't write
681 # on disk
682 # on disk
682 if lrev < catip:
683 if lrev < catip:
683 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip))
684 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip))
684 self._updatebranchcache(partial, ctxgen)
685 self._updatebranchcache(partial, ctxgen)
685 self._writebranchcache(partial, cl.node(catip), catip)
686 branchmap.write(self, partial, cl.node(catip), catip)
686 lrev = catip
687 lrev = catip
687 # If cacheable tip were lower than actual tip, we need to update the
688 # If cacheable tip were lower than actual tip, we need to update the
688 # cache up to tip. This update (from cacheable to actual tip) is not
689 # cache up to tip. This update (from cacheable to actual tip) is not
689 # written to disk since it's not cacheable.
690 # written to disk since it's not cacheable.
690 tiprev = len(self) - 1
691 tiprev = len(self) - 1
691 if lrev < tiprev:
692 if lrev < tiprev:
692 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev))
693 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev))
693 self._updatebranchcache(partial, ctxgen)
694 self._updatebranchcache(partial, ctxgen)
694 self._branchcache = partial
695 self._branchcache = partial
695 self._branchcachetip = tip
696 self._branchcachetip = tip
696
697
697 def branchmap(self):
698 def branchmap(self):
698 '''returns a dictionary {branch: [branchheads]}'''
699 '''returns a dictionary {branch: [branchheads]}'''
699 if self.changelog.filteredrevs:
700 if self.changelog.filteredrevs:
700 # some changeset are excluded we can't use the cache
701 # some changeset are excluded we can't use the cache
701 branchmap = {}
702 branchmap = {}
702 self._updatebranchcache(branchmap, (self[r] for r in self))
703 self._updatebranchcache(branchmap, (self[r] for r in self))
703 return branchmap
704 return branchmap
704 else:
705 else:
705 self.updatebranchcache()
706 self.updatebranchcache()
706 return self._branchcache
707 return self._branchcache
707
708
708
709
709 def _branchtip(self, heads):
710 def _branchtip(self, heads):
710 '''return the tipmost branch head in heads'''
711 '''return the tipmost branch head in heads'''
711 tip = heads[-1]
712 tip = heads[-1]
712 for h in reversed(heads):
713 for h in reversed(heads):
713 if not self[h].closesbranch():
714 if not self[h].closesbranch():
714 tip = h
715 tip = h
715 break
716 break
716 return tip
717 return tip
717
718
718 def branchtip(self, branch):
719 def branchtip(self, branch):
719 '''return the tip node for a given branch'''
720 '''return the tip node for a given branch'''
720 if branch not in self.branchmap():
721 if branch not in self.branchmap():
721 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
722 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
722 return self._branchtip(self.branchmap()[branch])
723 return self._branchtip(self.branchmap()[branch])
723
724
724 def branchtags(self):
725 def branchtags(self):
725 '''return a dict where branch names map to the tipmost head of
726 '''return a dict where branch names map to the tipmost head of
726 the branch, open heads come before closed'''
727 the branch, open heads come before closed'''
727 bt = {}
728 bt = {}
728 for bn, heads in self.branchmap().iteritems():
729 for bn, heads in self.branchmap().iteritems():
729 bt[bn] = self._branchtip(heads)
730 bt[bn] = self._branchtip(heads)
730 return bt
731 return bt
731
732
732 @unfilteredmethod # Until we get a smarter cache management
733 @unfilteredmethod # Until we get a smarter cache management
733 def _readbranchcache(self):
734 def _readbranchcache(self):
734 partial = {}
735 partial = {}
735 try:
736 try:
736 f = self.opener("cache/branchheads")
737 f = self.opener("cache/branchheads")
737 lines = f.read().split('\n')
738 lines = f.read().split('\n')
738 f.close()
739 f.close()
739 except (IOError, OSError):
740 except (IOError, OSError):
740 return {}, nullid, nullrev
741 return {}, nullid, nullrev
741
742
742 try:
743 try:
743 last, lrev = lines.pop(0).split(" ", 1)
744 last, lrev = lines.pop(0).split(" ", 1)
744 last, lrev = bin(last), int(lrev)
745 last, lrev = bin(last), int(lrev)
745 if lrev >= len(self) or self[lrev].node() != last:
746 if lrev >= len(self) or self[lrev].node() != last:
746 # invalidate the cache
747 # invalidate the cache
747 raise ValueError('invalidating branch cache (tip differs)')
748 raise ValueError('invalidating branch cache (tip differs)')
748 for l in lines:
749 for l in lines:
749 if not l:
750 if not l:
750 continue
751 continue
751 node, label = l.split(" ", 1)
752 node, label = l.split(" ", 1)
752 label = encoding.tolocal(label.strip())
753 label = encoding.tolocal(label.strip())
753 if not node in self:
754 if not node in self:
754 raise ValueError('invalidating branch cache because node '+
755 raise ValueError('invalidating branch cache because node '+
755 '%s does not exist' % node)
756 '%s does not exist' % node)
756 partial.setdefault(label, []).append(bin(node))
757 partial.setdefault(label, []).append(bin(node))
757 except KeyboardInterrupt:
758 except KeyboardInterrupt:
758 raise
759 raise
759 except Exception, inst:
760 except Exception, inst:
760 if self.ui.debugflag:
761 if self.ui.debugflag:
761 self.ui.warn(str(inst), '\n')
762 self.ui.warn(str(inst), '\n')
762 partial, last, lrev = {}, nullid, nullrev
763 partial, last, lrev = {}, nullid, nullrev
763 return partial, last, lrev
764 return partial, last, lrev
764
765
765 @unfilteredmethod # Until we get a smarter cache management
766 @unfilteredmethod # Until we get a smarter cache management
766 def _writebranchcache(self, branches, tip, tiprev):
767 try:
768 f = self.opener("cache/branchheads", "w", atomictemp=True)
769 f.write("%s %s\n" % (hex(tip), tiprev))
770 for label, nodes in branches.iteritems():
771 for node in nodes:
772 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
773 f.close()
774 except (IOError, OSError):
775 pass
776
777 @unfilteredmethod # Until we get a smarter cache management
778 def _updatebranchcache(self, partial, ctxgen):
767 def _updatebranchcache(self, partial, ctxgen):
779 """Given a branchhead cache, partial, that may have extra nodes or be
768 """Given a branchhead cache, partial, that may have extra nodes or be
780 missing heads, and a generator of nodes that are at least a superset of
769 missing heads, and a generator of nodes that are at least a superset of
781 heads missing, this function updates partial to be correct.
770 heads missing, this function updates partial to be correct.
782 """
771 """
783 # collect new branch entries
772 # collect new branch entries
784 newbranches = {}
773 newbranches = {}
785 for c in ctxgen:
774 for c in ctxgen:
786 newbranches.setdefault(c.branch(), []).append(c.node())
775 newbranches.setdefault(c.branch(), []).append(c.node())
787 # if older branchheads are reachable from new ones, they aren't
776 # if older branchheads are reachable from new ones, they aren't
788 # really branchheads. Note checking parents is insufficient:
777 # really branchheads. Note checking parents is insufficient:
789 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
778 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
790 for branch, newnodes in newbranches.iteritems():
779 for branch, newnodes in newbranches.iteritems():
791 bheads = partial.setdefault(branch, [])
780 bheads = partial.setdefault(branch, [])
792 # Remove candidate heads that no longer are in the repo (e.g., as
781 # Remove candidate heads that no longer are in the repo (e.g., as
793 # the result of a strip that just happened). Avoid using 'node in
782 # the result of a strip that just happened). Avoid using 'node in
794 # self' here because that dives down into branchcache code somewhat
783 # self' here because that dives down into branchcache code somewhat
795 # recursively.
784 # recursively.
796 bheadrevs = [self.changelog.rev(node) for node in bheads
785 bheadrevs = [self.changelog.rev(node) for node in bheads
797 if self.changelog.hasnode(node)]
786 if self.changelog.hasnode(node)]
798 newheadrevs = [self.changelog.rev(node) for node in newnodes
787 newheadrevs = [self.changelog.rev(node) for node in newnodes
799 if self.changelog.hasnode(node)]
788 if self.changelog.hasnode(node)]
800 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
789 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
801 # Remove duplicates - nodes that are in newheadrevs and are already
790 # Remove duplicates - nodes that are in newheadrevs and are already
802 # in bheadrevs. This can happen if you strip a node whose parent
791 # in bheadrevs. This can happen if you strip a node whose parent
803 # was already a head (because they're on different branches).
792 # was already a head (because they're on different branches).
804 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
793 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
805
794
806 # Starting from tip means fewer passes over reachable. If we know
795 # Starting from tip means fewer passes over reachable. If we know
807 # the new candidates are not ancestors of existing heads, we don't
796 # the new candidates are not ancestors of existing heads, we don't
808 # have to examine ancestors of existing heads
797 # have to examine ancestors of existing heads
809 if ctxisnew:
798 if ctxisnew:
810 iterrevs = sorted(newheadrevs)
799 iterrevs = sorted(newheadrevs)
811 else:
800 else:
812 iterrevs = list(bheadrevs)
801 iterrevs = list(bheadrevs)
813
802
814 # This loop prunes out two kinds of heads - heads that are
803 # This loop prunes out two kinds of heads - heads that are
815 # superseded by a head in newheadrevs, and newheadrevs that are not
804 # superseded by a head in newheadrevs, and newheadrevs that are not
816 # heads because an existing head is their descendant.
805 # heads because an existing head is their descendant.
817 while iterrevs:
806 while iterrevs:
818 latest = iterrevs.pop()
807 latest = iterrevs.pop()
819 if latest not in bheadrevs:
808 if latest not in bheadrevs:
820 continue
809 continue
821 ancestors = set(self.changelog.ancestors([latest],
810 ancestors = set(self.changelog.ancestors([latest],
822 bheadrevs[0]))
811 bheadrevs[0]))
823 if ancestors:
812 if ancestors:
824 bheadrevs = [b for b in bheadrevs if b not in ancestors]
813 bheadrevs = [b for b in bheadrevs if b not in ancestors]
825 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
814 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
826
815
827 # There may be branches that cease to exist when the last commit in the
816 # There may be branches that cease to exist when the last commit in the
828 # branch was stripped. This code filters them out. Note that the
817 # branch was stripped. This code filters them out. Note that the
829 # branch that ceased to exist may not be in newbranches because
818 # branch that ceased to exist may not be in newbranches because
830 # newbranches is the set of candidate heads, which when you strip the
819 # newbranches is the set of candidate heads, which when you strip the
831 # last commit in a branch will be the parent branch.
820 # last commit in a branch will be the parent branch.
832 for branch in partial.keys():
821 for branch in partial.keys():
833 nodes = [head for head in partial[branch]
822 nodes = [head for head in partial[branch]
834 if self.changelog.hasnode(head)]
823 if self.changelog.hasnode(head)]
835 if not nodes:
824 if not nodes:
836 del partial[branch]
825 del partial[branch]
837
826
838 def lookup(self, key):
827 def lookup(self, key):
839 return self[key].node()
828 return self[key].node()
840
829
841 def lookupbranch(self, key, remote=None):
830 def lookupbranch(self, key, remote=None):
842 repo = remote or self
831 repo = remote or self
843 if key in repo.branchmap():
832 if key in repo.branchmap():
844 return key
833 return key
845
834
846 repo = (remote and remote.local()) and remote or self
835 repo = (remote and remote.local()) and remote or self
847 return repo[key].branch()
836 return repo[key].branch()
848
837
849 def known(self, nodes):
838 def known(self, nodes):
850 nm = self.changelog.nodemap
839 nm = self.changelog.nodemap
851 pc = self._phasecache
840 pc = self._phasecache
852 result = []
841 result = []
853 for n in nodes:
842 for n in nodes:
854 r = nm.get(n)
843 r = nm.get(n)
855 resp = not (r is None or pc.phase(self, r) >= phases.secret)
844 resp = not (r is None or pc.phase(self, r) >= phases.secret)
856 result.append(resp)
845 result.append(resp)
857 return result
846 return result
858
847
859 def local(self):
848 def local(self):
860 return self
849 return self
861
850
862 def cancopy(self):
851 def cancopy(self):
863 return self.local() # so statichttprepo's override of local() works
852 return self.local() # so statichttprepo's override of local() works
864
853
865 def join(self, f):
854 def join(self, f):
866 return os.path.join(self.path, f)
855 return os.path.join(self.path, f)
867
856
868 def wjoin(self, f):
857 def wjoin(self, f):
869 return os.path.join(self.root, f)
858 return os.path.join(self.root, f)
870
859
871 def file(self, f):
860 def file(self, f):
872 if f[0] == '/':
861 if f[0] == '/':
873 f = f[1:]
862 f = f[1:]
874 return filelog.filelog(self.sopener, f)
863 return filelog.filelog(self.sopener, f)
875
864
876 def changectx(self, changeid):
865 def changectx(self, changeid):
877 return self[changeid]
866 return self[changeid]
878
867
879 def parents(self, changeid=None):
868 def parents(self, changeid=None):
880 '''get list of changectxs for parents of changeid'''
869 '''get list of changectxs for parents of changeid'''
881 return self[changeid].parents()
870 return self[changeid].parents()
882
871
883 def setparents(self, p1, p2=nullid):
872 def setparents(self, p1, p2=nullid):
884 copies = self.dirstate.setparents(p1, p2)
873 copies = self.dirstate.setparents(p1, p2)
885 if copies:
874 if copies:
886 # Adjust copy records, the dirstate cannot do it, it
875 # Adjust copy records, the dirstate cannot do it, it
887 # requires access to parents manifests. Preserve them
876 # requires access to parents manifests. Preserve them
888 # only for entries added to first parent.
877 # only for entries added to first parent.
889 pctx = self[p1]
878 pctx = self[p1]
890 for f in copies:
879 for f in copies:
891 if f not in pctx and copies[f] in pctx:
880 if f not in pctx and copies[f] in pctx:
892 self.dirstate.copy(copies[f], f)
881 self.dirstate.copy(copies[f], f)
893
882
894 def filectx(self, path, changeid=None, fileid=None):
883 def filectx(self, path, changeid=None, fileid=None):
895 """changeid can be a changeset revision, node, or tag.
884 """changeid can be a changeset revision, node, or tag.
896 fileid can be a file revision or node."""
885 fileid can be a file revision or node."""
897 return context.filectx(self, path, changeid, fileid)
886 return context.filectx(self, path, changeid, fileid)
898
887
899 def getcwd(self):
888 def getcwd(self):
900 return self.dirstate.getcwd()
889 return self.dirstate.getcwd()
901
890
902 def pathto(self, f, cwd=None):
891 def pathto(self, f, cwd=None):
903 return self.dirstate.pathto(f, cwd)
892 return self.dirstate.pathto(f, cwd)
904
893
905 def wfile(self, f, mode='r'):
894 def wfile(self, f, mode='r'):
906 return self.wopener(f, mode)
895 return self.wopener(f, mode)
907
896
908 def _link(self, f):
897 def _link(self, f):
909 return os.path.islink(self.wjoin(f))
898 return os.path.islink(self.wjoin(f))
910
899
911 def _loadfilter(self, filter):
900 def _loadfilter(self, filter):
912 if filter not in self.filterpats:
901 if filter not in self.filterpats:
913 l = []
902 l = []
914 for pat, cmd in self.ui.configitems(filter):
903 for pat, cmd in self.ui.configitems(filter):
915 if cmd == '!':
904 if cmd == '!':
916 continue
905 continue
917 mf = matchmod.match(self.root, '', [pat])
906 mf = matchmod.match(self.root, '', [pat])
918 fn = None
907 fn = None
919 params = cmd
908 params = cmd
920 for name, filterfn in self._datafilters.iteritems():
909 for name, filterfn in self._datafilters.iteritems():
921 if cmd.startswith(name):
910 if cmd.startswith(name):
922 fn = filterfn
911 fn = filterfn
923 params = cmd[len(name):].lstrip()
912 params = cmd[len(name):].lstrip()
924 break
913 break
925 if not fn:
914 if not fn:
926 fn = lambda s, c, **kwargs: util.filter(s, c)
915 fn = lambda s, c, **kwargs: util.filter(s, c)
927 # Wrap old filters not supporting keyword arguments
916 # Wrap old filters not supporting keyword arguments
928 if not inspect.getargspec(fn)[2]:
917 if not inspect.getargspec(fn)[2]:
929 oldfn = fn
918 oldfn = fn
930 fn = lambda s, c, **kwargs: oldfn(s, c)
919 fn = lambda s, c, **kwargs: oldfn(s, c)
931 l.append((mf, fn, params))
920 l.append((mf, fn, params))
932 self.filterpats[filter] = l
921 self.filterpats[filter] = l
933 return self.filterpats[filter]
922 return self.filterpats[filter]
934
923
935 def _filter(self, filterpats, filename, data):
924 def _filter(self, filterpats, filename, data):
936 for mf, fn, cmd in filterpats:
925 for mf, fn, cmd in filterpats:
937 if mf(filename):
926 if mf(filename):
938 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
927 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
939 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
928 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
940 break
929 break
941
930
942 return data
931 return data
943
932
944 @unfilteredpropertycache
933 @unfilteredpropertycache
945 def _encodefilterpats(self):
934 def _encodefilterpats(self):
946 return self._loadfilter('encode')
935 return self._loadfilter('encode')
947
936
948 @unfilteredpropertycache
937 @unfilteredpropertycache
949 def _decodefilterpats(self):
938 def _decodefilterpats(self):
950 return self._loadfilter('decode')
939 return self._loadfilter('decode')
951
940
952 def adddatafilter(self, name, filter):
941 def adddatafilter(self, name, filter):
953 self._datafilters[name] = filter
942 self._datafilters[name] = filter
954
943
955 def wread(self, filename):
944 def wread(self, filename):
956 if self._link(filename):
945 if self._link(filename):
957 data = os.readlink(self.wjoin(filename))
946 data = os.readlink(self.wjoin(filename))
958 else:
947 else:
959 data = self.wopener.read(filename)
948 data = self.wopener.read(filename)
960 return self._filter(self._encodefilterpats, filename, data)
949 return self._filter(self._encodefilterpats, filename, data)
961
950
962 def wwrite(self, filename, data, flags):
951 def wwrite(self, filename, data, flags):
963 data = self._filter(self._decodefilterpats, filename, data)
952 data = self._filter(self._decodefilterpats, filename, data)
964 if 'l' in flags:
953 if 'l' in flags:
965 self.wopener.symlink(data, filename)
954 self.wopener.symlink(data, filename)
966 else:
955 else:
967 self.wopener.write(filename, data)
956 self.wopener.write(filename, data)
968 if 'x' in flags:
957 if 'x' in flags:
969 util.setflags(self.wjoin(filename), False, True)
958 util.setflags(self.wjoin(filename), False, True)
970
959
971 def wwritedata(self, filename, data):
960 def wwritedata(self, filename, data):
972 return self._filter(self._decodefilterpats, filename, data)
961 return self._filter(self._decodefilterpats, filename, data)
973
962
974 def transaction(self, desc):
963 def transaction(self, desc):
975 tr = self._transref and self._transref() or None
964 tr = self._transref and self._transref() or None
976 if tr and tr.running():
965 if tr and tr.running():
977 return tr.nest()
966 return tr.nest()
978
967
979 # abort here if the journal already exists
968 # abort here if the journal already exists
980 if os.path.exists(self.sjoin("journal")):
969 if os.path.exists(self.sjoin("journal")):
981 raise error.RepoError(
970 raise error.RepoError(
982 _("abandoned transaction found - run hg recover"))
971 _("abandoned transaction found - run hg recover"))
983
972
984 self._writejournal(desc)
973 self._writejournal(desc)
985 renames = [(x, undoname(x)) for x in self._journalfiles()]
974 renames = [(x, undoname(x)) for x in self._journalfiles()]
986
975
987 tr = transaction.transaction(self.ui.warn, self.sopener,
976 tr = transaction.transaction(self.ui.warn, self.sopener,
988 self.sjoin("journal"),
977 self.sjoin("journal"),
989 aftertrans(renames),
978 aftertrans(renames),
990 self.store.createmode)
979 self.store.createmode)
991 self._transref = weakref.ref(tr)
980 self._transref = weakref.ref(tr)
992 return tr
981 return tr
993
982
994 def _journalfiles(self):
983 def _journalfiles(self):
995 return (self.sjoin('journal'), self.join('journal.dirstate'),
984 return (self.sjoin('journal'), self.join('journal.dirstate'),
996 self.join('journal.branch'), self.join('journal.desc'),
985 self.join('journal.branch'), self.join('journal.desc'),
997 self.join('journal.bookmarks'),
986 self.join('journal.bookmarks'),
998 self.sjoin('journal.phaseroots'))
987 self.sjoin('journal.phaseroots'))
999
988
1000 def undofiles(self):
989 def undofiles(self):
1001 return [undoname(x) for x in self._journalfiles()]
990 return [undoname(x) for x in self._journalfiles()]
1002
991
1003 def _writejournal(self, desc):
992 def _writejournal(self, desc):
1004 self.opener.write("journal.dirstate",
993 self.opener.write("journal.dirstate",
1005 self.opener.tryread("dirstate"))
994 self.opener.tryread("dirstate"))
1006 self.opener.write("journal.branch",
995 self.opener.write("journal.branch",
1007 encoding.fromlocal(self.dirstate.branch()))
996 encoding.fromlocal(self.dirstate.branch()))
1008 self.opener.write("journal.desc",
997 self.opener.write("journal.desc",
1009 "%d\n%s\n" % (len(self), desc))
998 "%d\n%s\n" % (len(self), desc))
1010 self.opener.write("journal.bookmarks",
999 self.opener.write("journal.bookmarks",
1011 self.opener.tryread("bookmarks"))
1000 self.opener.tryread("bookmarks"))
1012 self.sopener.write("journal.phaseroots",
1001 self.sopener.write("journal.phaseroots",
1013 self.sopener.tryread("phaseroots"))
1002 self.sopener.tryread("phaseroots"))
1014
1003
1015 def recover(self):
1004 def recover(self):
1016 lock = self.lock()
1005 lock = self.lock()
1017 try:
1006 try:
1018 if os.path.exists(self.sjoin("journal")):
1007 if os.path.exists(self.sjoin("journal")):
1019 self.ui.status(_("rolling back interrupted transaction\n"))
1008 self.ui.status(_("rolling back interrupted transaction\n"))
1020 transaction.rollback(self.sopener, self.sjoin("journal"),
1009 transaction.rollback(self.sopener, self.sjoin("journal"),
1021 self.ui.warn)
1010 self.ui.warn)
1022 self.invalidate()
1011 self.invalidate()
1023 return True
1012 return True
1024 else:
1013 else:
1025 self.ui.warn(_("no interrupted transaction available\n"))
1014 self.ui.warn(_("no interrupted transaction available\n"))
1026 return False
1015 return False
1027 finally:
1016 finally:
1028 lock.release()
1017 lock.release()
1029
1018
1030 def rollback(self, dryrun=False, force=False):
1019 def rollback(self, dryrun=False, force=False):
1031 wlock = lock = None
1020 wlock = lock = None
1032 try:
1021 try:
1033 wlock = self.wlock()
1022 wlock = self.wlock()
1034 lock = self.lock()
1023 lock = self.lock()
1035 if os.path.exists(self.sjoin("undo")):
1024 if os.path.exists(self.sjoin("undo")):
1036 return self._rollback(dryrun, force)
1025 return self._rollback(dryrun, force)
1037 else:
1026 else:
1038 self.ui.warn(_("no rollback information available\n"))
1027 self.ui.warn(_("no rollback information available\n"))
1039 return 1
1028 return 1
1040 finally:
1029 finally:
1041 release(lock, wlock)
1030 release(lock, wlock)
1042
1031
1043 @unfilteredmethod # Until we get smarter cache management
1032 @unfilteredmethod # Until we get smarter cache management
1044 def _rollback(self, dryrun, force):
1033 def _rollback(self, dryrun, force):
1045 ui = self.ui
1034 ui = self.ui
1046 try:
1035 try:
1047 args = self.opener.read('undo.desc').splitlines()
1036 args = self.opener.read('undo.desc').splitlines()
1048 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1037 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1049 if len(args) >= 3:
1038 if len(args) >= 3:
1050 detail = args[2]
1039 detail = args[2]
1051 oldtip = oldlen - 1
1040 oldtip = oldlen - 1
1052
1041
1053 if detail and ui.verbose:
1042 if detail and ui.verbose:
1054 msg = (_('repository tip rolled back to revision %s'
1043 msg = (_('repository tip rolled back to revision %s'
1055 ' (undo %s: %s)\n')
1044 ' (undo %s: %s)\n')
1056 % (oldtip, desc, detail))
1045 % (oldtip, desc, detail))
1057 else:
1046 else:
1058 msg = (_('repository tip rolled back to revision %s'
1047 msg = (_('repository tip rolled back to revision %s'
1059 ' (undo %s)\n')
1048 ' (undo %s)\n')
1060 % (oldtip, desc))
1049 % (oldtip, desc))
1061 except IOError:
1050 except IOError:
1062 msg = _('rolling back unknown transaction\n')
1051 msg = _('rolling back unknown transaction\n')
1063 desc = None
1052 desc = None
1064
1053
1065 if not force and self['.'] != self['tip'] and desc == 'commit':
1054 if not force and self['.'] != self['tip'] and desc == 'commit':
1066 raise util.Abort(
1055 raise util.Abort(
1067 _('rollback of last commit while not checked out '
1056 _('rollback of last commit while not checked out '
1068 'may lose data'), hint=_('use -f to force'))
1057 'may lose data'), hint=_('use -f to force'))
1069
1058
1070 ui.status(msg)
1059 ui.status(msg)
1071 if dryrun:
1060 if dryrun:
1072 return 0
1061 return 0
1073
1062
1074 parents = self.dirstate.parents()
1063 parents = self.dirstate.parents()
1075 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1064 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1076 if os.path.exists(self.join('undo.bookmarks')):
1065 if os.path.exists(self.join('undo.bookmarks')):
1077 util.rename(self.join('undo.bookmarks'),
1066 util.rename(self.join('undo.bookmarks'),
1078 self.join('bookmarks'))
1067 self.join('bookmarks'))
1079 if os.path.exists(self.sjoin('undo.phaseroots')):
1068 if os.path.exists(self.sjoin('undo.phaseroots')):
1080 util.rename(self.sjoin('undo.phaseroots'),
1069 util.rename(self.sjoin('undo.phaseroots'),
1081 self.sjoin('phaseroots'))
1070 self.sjoin('phaseroots'))
1082 self.invalidate()
1071 self.invalidate()
1083
1072
1084 # Discard all cache entries to force reloading everything.
1073 # Discard all cache entries to force reloading everything.
1085 self._filecache.clear()
1074 self._filecache.clear()
1086
1075
1087 parentgone = (parents[0] not in self.changelog.nodemap or
1076 parentgone = (parents[0] not in self.changelog.nodemap or
1088 parents[1] not in self.changelog.nodemap)
1077 parents[1] not in self.changelog.nodemap)
1089 if parentgone:
1078 if parentgone:
1090 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1079 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1091 try:
1080 try:
1092 branch = self.opener.read('undo.branch')
1081 branch = self.opener.read('undo.branch')
1093 self.dirstate.setbranch(encoding.tolocal(branch))
1082 self.dirstate.setbranch(encoding.tolocal(branch))
1094 except IOError:
1083 except IOError:
1095 ui.warn(_('named branch could not be reset: '
1084 ui.warn(_('named branch could not be reset: '
1096 'current branch is still \'%s\'\n')
1085 'current branch is still \'%s\'\n')
1097 % self.dirstate.branch())
1086 % self.dirstate.branch())
1098
1087
1099 self.dirstate.invalidate()
1088 self.dirstate.invalidate()
1100 parents = tuple([p.rev() for p in self.parents()])
1089 parents = tuple([p.rev() for p in self.parents()])
1101 if len(parents) > 1:
1090 if len(parents) > 1:
1102 ui.status(_('working directory now based on '
1091 ui.status(_('working directory now based on '
1103 'revisions %d and %d\n') % parents)
1092 'revisions %d and %d\n') % parents)
1104 else:
1093 else:
1105 ui.status(_('working directory now based on '
1094 ui.status(_('working directory now based on '
1106 'revision %d\n') % parents)
1095 'revision %d\n') % parents)
1107 # TODO: if we know which new heads may result from this rollback, pass
1096 # TODO: if we know which new heads may result from this rollback, pass
1108 # them to destroy(), which will prevent the branchhead cache from being
1097 # them to destroy(), which will prevent the branchhead cache from being
1109 # invalidated.
1098 # invalidated.
1110 self.destroyed()
1099 self.destroyed()
1111 return 0
1100 return 0
1112
1101
1113 def invalidatecaches(self):
1102 def invalidatecaches(self):
1114
1103
1115 if '_tagscache' in vars(self):
1104 if '_tagscache' in vars(self):
1116 # can't use delattr on proxy
1105 # can't use delattr on proxy
1117 del self.__dict__['_tagscache']
1106 del self.__dict__['_tagscache']
1118
1107
1119 self.unfiltered()._branchcache = None # in UTF-8
1108 self.unfiltered()._branchcache = None # in UTF-8
1120 self.unfiltered()._branchcachetip = None
1109 self.unfiltered()._branchcachetip = None
1121 self.invalidatevolatilesets()
1110 self.invalidatevolatilesets()
1122
1111
1123 def invalidatevolatilesets(self):
1112 def invalidatevolatilesets(self):
1124 self.filteredrevcache.clear()
1113 self.filteredrevcache.clear()
1125 obsolete.clearobscaches(self)
1114 obsolete.clearobscaches(self)
1126 if 'hiddenrevs' in vars(self):
1115 if 'hiddenrevs' in vars(self):
1127 del self.hiddenrevs
1116 del self.hiddenrevs
1128
1117
1129 def invalidatedirstate(self):
1118 def invalidatedirstate(self):
1130 '''Invalidates the dirstate, causing the next call to dirstate
1119 '''Invalidates the dirstate, causing the next call to dirstate
1131 to check if it was modified since the last time it was read,
1120 to check if it was modified since the last time it was read,
1132 rereading it if it has.
1121 rereading it if it has.
1133
1122
1134 This is different to dirstate.invalidate() that it doesn't always
1123 This is different to dirstate.invalidate() that it doesn't always
1135 rereads the dirstate. Use dirstate.invalidate() if you want to
1124 rereads the dirstate. Use dirstate.invalidate() if you want to
1136 explicitly read the dirstate again (i.e. restoring it to a previous
1125 explicitly read the dirstate again (i.e. restoring it to a previous
1137 known good state).'''
1126 known good state).'''
1138 if hasunfilteredcache(self, 'dirstate'):
1127 if hasunfilteredcache(self, 'dirstate'):
1139 for k in self.dirstate._filecache:
1128 for k in self.dirstate._filecache:
1140 try:
1129 try:
1141 delattr(self.dirstate, k)
1130 delattr(self.dirstate, k)
1142 except AttributeError:
1131 except AttributeError:
1143 pass
1132 pass
1144 delattr(self.unfiltered(), 'dirstate')
1133 delattr(self.unfiltered(), 'dirstate')
1145
1134
1146 def invalidate(self):
1135 def invalidate(self):
1147 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1136 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1148 for k in self._filecache:
1137 for k in self._filecache:
1149 # dirstate is invalidated separately in invalidatedirstate()
1138 # dirstate is invalidated separately in invalidatedirstate()
1150 if k == 'dirstate':
1139 if k == 'dirstate':
1151 continue
1140 continue
1152
1141
1153 try:
1142 try:
1154 delattr(unfiltered, k)
1143 delattr(unfiltered, k)
1155 except AttributeError:
1144 except AttributeError:
1156 pass
1145 pass
1157 self.invalidatecaches()
1146 self.invalidatecaches()
1158
1147
1159 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1148 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1160 try:
1149 try:
1161 l = lock.lock(lockname, 0, releasefn, desc=desc)
1150 l = lock.lock(lockname, 0, releasefn, desc=desc)
1162 except error.LockHeld, inst:
1151 except error.LockHeld, inst:
1163 if not wait:
1152 if not wait:
1164 raise
1153 raise
1165 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1154 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1166 (desc, inst.locker))
1155 (desc, inst.locker))
1167 # default to 600 seconds timeout
1156 # default to 600 seconds timeout
1168 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1157 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1169 releasefn, desc=desc)
1158 releasefn, desc=desc)
1170 if acquirefn:
1159 if acquirefn:
1171 acquirefn()
1160 acquirefn()
1172 return l
1161 return l
1173
1162
1174 def _afterlock(self, callback):
1163 def _afterlock(self, callback):
1175 """add a callback to the current repository lock.
1164 """add a callback to the current repository lock.
1176
1165
1177 The callback will be executed on lock release."""
1166 The callback will be executed on lock release."""
1178 l = self._lockref and self._lockref()
1167 l = self._lockref and self._lockref()
1179 if l:
1168 if l:
1180 l.postrelease.append(callback)
1169 l.postrelease.append(callback)
1181 else:
1170 else:
1182 callback()
1171 callback()
1183
1172
1184 def lock(self, wait=True):
1173 def lock(self, wait=True):
1185 '''Lock the repository store (.hg/store) and return a weak reference
1174 '''Lock the repository store (.hg/store) and return a weak reference
1186 to the lock. Use this before modifying the store (e.g. committing or
1175 to the lock. Use this before modifying the store (e.g. committing or
1187 stripping). If you are opening a transaction, get a lock as well.)'''
1176 stripping). If you are opening a transaction, get a lock as well.)'''
1188 l = self._lockref and self._lockref()
1177 l = self._lockref and self._lockref()
1189 if l is not None and l.held:
1178 if l is not None and l.held:
1190 l.lock()
1179 l.lock()
1191 return l
1180 return l
1192
1181
1193 def unlock():
1182 def unlock():
1194 self.store.write()
1183 self.store.write()
1195 if hasunfilteredcache(self, '_phasecache'):
1184 if hasunfilteredcache(self, '_phasecache'):
1196 self._phasecache.write()
1185 self._phasecache.write()
1197 for k, ce in self._filecache.items():
1186 for k, ce in self._filecache.items():
1198 if k == 'dirstate':
1187 if k == 'dirstate':
1199 continue
1188 continue
1200 ce.refresh()
1189 ce.refresh()
1201
1190
1202 l = self._lock(self.sjoin("lock"), wait, unlock,
1191 l = self._lock(self.sjoin("lock"), wait, unlock,
1203 self.invalidate, _('repository %s') % self.origroot)
1192 self.invalidate, _('repository %s') % self.origroot)
1204 self._lockref = weakref.ref(l)
1193 self._lockref = weakref.ref(l)
1205 return l
1194 return l
1206
1195
1207 def wlock(self, wait=True):
1196 def wlock(self, wait=True):
1208 '''Lock the non-store parts of the repository (everything under
1197 '''Lock the non-store parts of the repository (everything under
1209 .hg except .hg/store) and return a weak reference to the lock.
1198 .hg except .hg/store) and return a weak reference to the lock.
1210 Use this before modifying files in .hg.'''
1199 Use this before modifying files in .hg.'''
1211 l = self._wlockref and self._wlockref()
1200 l = self._wlockref and self._wlockref()
1212 if l is not None and l.held:
1201 if l is not None and l.held:
1213 l.lock()
1202 l.lock()
1214 return l
1203 return l
1215
1204
1216 def unlock():
1205 def unlock():
1217 self.dirstate.write()
1206 self.dirstate.write()
1218 ce = self._filecache.get('dirstate')
1207 ce = self._filecache.get('dirstate')
1219 if ce:
1208 if ce:
1220 ce.refresh()
1209 ce.refresh()
1221
1210
1222 l = self._lock(self.join("wlock"), wait, unlock,
1211 l = self._lock(self.join("wlock"), wait, unlock,
1223 self.invalidatedirstate, _('working directory of %s') %
1212 self.invalidatedirstate, _('working directory of %s') %
1224 self.origroot)
1213 self.origroot)
1225 self._wlockref = weakref.ref(l)
1214 self._wlockref = weakref.ref(l)
1226 return l
1215 return l
1227
1216
1228 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1217 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1229 """
1218 """
1230 commit an individual file as part of a larger transaction
1219 commit an individual file as part of a larger transaction
1231 """
1220 """
1232
1221
1233 fname = fctx.path()
1222 fname = fctx.path()
1234 text = fctx.data()
1223 text = fctx.data()
1235 flog = self.file(fname)
1224 flog = self.file(fname)
1236 fparent1 = manifest1.get(fname, nullid)
1225 fparent1 = manifest1.get(fname, nullid)
1237 fparent2 = fparent2o = manifest2.get(fname, nullid)
1226 fparent2 = fparent2o = manifest2.get(fname, nullid)
1238
1227
1239 meta = {}
1228 meta = {}
1240 copy = fctx.renamed()
1229 copy = fctx.renamed()
1241 if copy and copy[0] != fname:
1230 if copy and copy[0] != fname:
1242 # Mark the new revision of this file as a copy of another
1231 # Mark the new revision of this file as a copy of another
1243 # file. This copy data will effectively act as a parent
1232 # file. This copy data will effectively act as a parent
1244 # of this new revision. If this is a merge, the first
1233 # of this new revision. If this is a merge, the first
1245 # parent will be the nullid (meaning "look up the copy data")
1234 # parent will be the nullid (meaning "look up the copy data")
1246 # and the second one will be the other parent. For example:
1235 # and the second one will be the other parent. For example:
1247 #
1236 #
1248 # 0 --- 1 --- 3 rev1 changes file foo
1237 # 0 --- 1 --- 3 rev1 changes file foo
1249 # \ / rev2 renames foo to bar and changes it
1238 # \ / rev2 renames foo to bar and changes it
1250 # \- 2 -/ rev3 should have bar with all changes and
1239 # \- 2 -/ rev3 should have bar with all changes and
1251 # should record that bar descends from
1240 # should record that bar descends from
1252 # bar in rev2 and foo in rev1
1241 # bar in rev2 and foo in rev1
1253 #
1242 #
1254 # this allows this merge to succeed:
1243 # this allows this merge to succeed:
1255 #
1244 #
1256 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1245 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1257 # \ / merging rev3 and rev4 should use bar@rev2
1246 # \ / merging rev3 and rev4 should use bar@rev2
1258 # \- 2 --- 4 as the merge base
1247 # \- 2 --- 4 as the merge base
1259 #
1248 #
1260
1249
1261 cfname = copy[0]
1250 cfname = copy[0]
1262 crev = manifest1.get(cfname)
1251 crev = manifest1.get(cfname)
1263 newfparent = fparent2
1252 newfparent = fparent2
1264
1253
1265 if manifest2: # branch merge
1254 if manifest2: # branch merge
1266 if fparent2 == nullid or crev is None: # copied on remote side
1255 if fparent2 == nullid or crev is None: # copied on remote side
1267 if cfname in manifest2:
1256 if cfname in manifest2:
1268 crev = manifest2[cfname]
1257 crev = manifest2[cfname]
1269 newfparent = fparent1
1258 newfparent = fparent1
1270
1259
1271 # find source in nearest ancestor if we've lost track
1260 # find source in nearest ancestor if we've lost track
1272 if not crev:
1261 if not crev:
1273 self.ui.debug(" %s: searching for copy revision for %s\n" %
1262 self.ui.debug(" %s: searching for copy revision for %s\n" %
1274 (fname, cfname))
1263 (fname, cfname))
1275 for ancestor in self[None].ancestors():
1264 for ancestor in self[None].ancestors():
1276 if cfname in ancestor:
1265 if cfname in ancestor:
1277 crev = ancestor[cfname].filenode()
1266 crev = ancestor[cfname].filenode()
1278 break
1267 break
1279
1268
1280 if crev:
1269 if crev:
1281 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1270 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1282 meta["copy"] = cfname
1271 meta["copy"] = cfname
1283 meta["copyrev"] = hex(crev)
1272 meta["copyrev"] = hex(crev)
1284 fparent1, fparent2 = nullid, newfparent
1273 fparent1, fparent2 = nullid, newfparent
1285 else:
1274 else:
1286 self.ui.warn(_("warning: can't find ancestor for '%s' "
1275 self.ui.warn(_("warning: can't find ancestor for '%s' "
1287 "copied from '%s'!\n") % (fname, cfname))
1276 "copied from '%s'!\n") % (fname, cfname))
1288
1277
1289 elif fparent2 != nullid:
1278 elif fparent2 != nullid:
1290 # is one parent an ancestor of the other?
1279 # is one parent an ancestor of the other?
1291 fparentancestor = flog.ancestor(fparent1, fparent2)
1280 fparentancestor = flog.ancestor(fparent1, fparent2)
1292 if fparentancestor == fparent1:
1281 if fparentancestor == fparent1:
1293 fparent1, fparent2 = fparent2, nullid
1282 fparent1, fparent2 = fparent2, nullid
1294 elif fparentancestor == fparent2:
1283 elif fparentancestor == fparent2:
1295 fparent2 = nullid
1284 fparent2 = nullid
1296
1285
1297 # is the file changed?
1286 # is the file changed?
1298 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1287 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1299 changelist.append(fname)
1288 changelist.append(fname)
1300 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1289 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1301
1290
1302 # are just the flags changed during merge?
1291 # are just the flags changed during merge?
1303 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1292 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1304 changelist.append(fname)
1293 changelist.append(fname)
1305
1294
1306 return fparent1
1295 return fparent1
1307
1296
1308 @unfilteredmethod
1297 @unfilteredmethod
1309 def commit(self, text="", user=None, date=None, match=None, force=False,
1298 def commit(self, text="", user=None, date=None, match=None, force=False,
1310 editor=False, extra={}):
1299 editor=False, extra={}):
1311 """Add a new revision to current repository.
1300 """Add a new revision to current repository.
1312
1301
1313 Revision information is gathered from the working directory,
1302 Revision information is gathered from the working directory,
1314 match can be used to filter the committed files. If editor is
1303 match can be used to filter the committed files. If editor is
1315 supplied, it is called to get a commit message.
1304 supplied, it is called to get a commit message.
1316 """
1305 """
1317
1306
1318 def fail(f, msg):
1307 def fail(f, msg):
1319 raise util.Abort('%s: %s' % (f, msg))
1308 raise util.Abort('%s: %s' % (f, msg))
1320
1309
1321 if not match:
1310 if not match:
1322 match = matchmod.always(self.root, '')
1311 match = matchmod.always(self.root, '')
1323
1312
1324 if not force:
1313 if not force:
1325 vdirs = []
1314 vdirs = []
1326 match.dir = vdirs.append
1315 match.dir = vdirs.append
1327 match.bad = fail
1316 match.bad = fail
1328
1317
1329 wlock = self.wlock()
1318 wlock = self.wlock()
1330 try:
1319 try:
1331 wctx = self[None]
1320 wctx = self[None]
1332 merge = len(wctx.parents()) > 1
1321 merge = len(wctx.parents()) > 1
1333
1322
1334 if (not force and merge and match and
1323 if (not force and merge and match and
1335 (match.files() or match.anypats())):
1324 (match.files() or match.anypats())):
1336 raise util.Abort(_('cannot partially commit a merge '
1325 raise util.Abort(_('cannot partially commit a merge '
1337 '(do not specify files or patterns)'))
1326 '(do not specify files or patterns)'))
1338
1327
1339 changes = self.status(match=match, clean=force)
1328 changes = self.status(match=match, clean=force)
1340 if force:
1329 if force:
1341 changes[0].extend(changes[6]) # mq may commit unchanged files
1330 changes[0].extend(changes[6]) # mq may commit unchanged files
1342
1331
1343 # check subrepos
1332 # check subrepos
1344 subs = []
1333 subs = []
1345 commitsubs = set()
1334 commitsubs = set()
1346 newstate = wctx.substate.copy()
1335 newstate = wctx.substate.copy()
1347 # only manage subrepos and .hgsubstate if .hgsub is present
1336 # only manage subrepos and .hgsubstate if .hgsub is present
1348 if '.hgsub' in wctx:
1337 if '.hgsub' in wctx:
1349 # we'll decide whether to track this ourselves, thanks
1338 # we'll decide whether to track this ourselves, thanks
1350 if '.hgsubstate' in changes[0]:
1339 if '.hgsubstate' in changes[0]:
1351 changes[0].remove('.hgsubstate')
1340 changes[0].remove('.hgsubstate')
1352 if '.hgsubstate' in changes[2]:
1341 if '.hgsubstate' in changes[2]:
1353 changes[2].remove('.hgsubstate')
1342 changes[2].remove('.hgsubstate')
1354
1343
1355 # compare current state to last committed state
1344 # compare current state to last committed state
1356 # build new substate based on last committed state
1345 # build new substate based on last committed state
1357 oldstate = wctx.p1().substate
1346 oldstate = wctx.p1().substate
1358 for s in sorted(newstate.keys()):
1347 for s in sorted(newstate.keys()):
1359 if not match(s):
1348 if not match(s):
1360 # ignore working copy, use old state if present
1349 # ignore working copy, use old state if present
1361 if s in oldstate:
1350 if s in oldstate:
1362 newstate[s] = oldstate[s]
1351 newstate[s] = oldstate[s]
1363 continue
1352 continue
1364 if not force:
1353 if not force:
1365 raise util.Abort(
1354 raise util.Abort(
1366 _("commit with new subrepo %s excluded") % s)
1355 _("commit with new subrepo %s excluded") % s)
1367 if wctx.sub(s).dirty(True):
1356 if wctx.sub(s).dirty(True):
1368 if not self.ui.configbool('ui', 'commitsubrepos'):
1357 if not self.ui.configbool('ui', 'commitsubrepos'):
1369 raise util.Abort(
1358 raise util.Abort(
1370 _("uncommitted changes in subrepo %s") % s,
1359 _("uncommitted changes in subrepo %s") % s,
1371 hint=_("use --subrepos for recursive commit"))
1360 hint=_("use --subrepos for recursive commit"))
1372 subs.append(s)
1361 subs.append(s)
1373 commitsubs.add(s)
1362 commitsubs.add(s)
1374 else:
1363 else:
1375 bs = wctx.sub(s).basestate()
1364 bs = wctx.sub(s).basestate()
1376 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1365 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1377 if oldstate.get(s, (None, None, None))[1] != bs:
1366 if oldstate.get(s, (None, None, None))[1] != bs:
1378 subs.append(s)
1367 subs.append(s)
1379
1368
1380 # check for removed subrepos
1369 # check for removed subrepos
1381 for p in wctx.parents():
1370 for p in wctx.parents():
1382 r = [s for s in p.substate if s not in newstate]
1371 r = [s for s in p.substate if s not in newstate]
1383 subs += [s for s in r if match(s)]
1372 subs += [s for s in r if match(s)]
1384 if subs:
1373 if subs:
1385 if (not match('.hgsub') and
1374 if (not match('.hgsub') and
1386 '.hgsub' in (wctx.modified() + wctx.added())):
1375 '.hgsub' in (wctx.modified() + wctx.added())):
1387 raise util.Abort(
1376 raise util.Abort(
1388 _("can't commit subrepos without .hgsub"))
1377 _("can't commit subrepos without .hgsub"))
1389 changes[0].insert(0, '.hgsubstate')
1378 changes[0].insert(0, '.hgsubstate')
1390
1379
1391 elif '.hgsub' in changes[2]:
1380 elif '.hgsub' in changes[2]:
1392 # clean up .hgsubstate when .hgsub is removed
1381 # clean up .hgsubstate when .hgsub is removed
1393 if ('.hgsubstate' in wctx and
1382 if ('.hgsubstate' in wctx and
1394 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1383 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1395 changes[2].insert(0, '.hgsubstate')
1384 changes[2].insert(0, '.hgsubstate')
1396
1385
1397 # make sure all explicit patterns are matched
1386 # make sure all explicit patterns are matched
1398 if not force and match.files():
1387 if not force and match.files():
1399 matched = set(changes[0] + changes[1] + changes[2])
1388 matched = set(changes[0] + changes[1] + changes[2])
1400
1389
1401 for f in match.files():
1390 for f in match.files():
1402 f = self.dirstate.normalize(f)
1391 f = self.dirstate.normalize(f)
1403 if f == '.' or f in matched or f in wctx.substate:
1392 if f == '.' or f in matched or f in wctx.substate:
1404 continue
1393 continue
1405 if f in changes[3]: # missing
1394 if f in changes[3]: # missing
1406 fail(f, _('file not found!'))
1395 fail(f, _('file not found!'))
1407 if f in vdirs: # visited directory
1396 if f in vdirs: # visited directory
1408 d = f + '/'
1397 d = f + '/'
1409 for mf in matched:
1398 for mf in matched:
1410 if mf.startswith(d):
1399 if mf.startswith(d):
1411 break
1400 break
1412 else:
1401 else:
1413 fail(f, _("no match under directory!"))
1402 fail(f, _("no match under directory!"))
1414 elif f not in self.dirstate:
1403 elif f not in self.dirstate:
1415 fail(f, _("file not tracked!"))
1404 fail(f, _("file not tracked!"))
1416
1405
1417 if (not force and not extra.get("close") and not merge
1406 if (not force and not extra.get("close") and not merge
1418 and not (changes[0] or changes[1] or changes[2])
1407 and not (changes[0] or changes[1] or changes[2])
1419 and wctx.branch() == wctx.p1().branch()):
1408 and wctx.branch() == wctx.p1().branch()):
1420 return None
1409 return None
1421
1410
1422 if merge and changes[3]:
1411 if merge and changes[3]:
1423 raise util.Abort(_("cannot commit merge with missing files"))
1412 raise util.Abort(_("cannot commit merge with missing files"))
1424
1413
1425 ms = mergemod.mergestate(self)
1414 ms = mergemod.mergestate(self)
1426 for f in changes[0]:
1415 for f in changes[0]:
1427 if f in ms and ms[f] == 'u':
1416 if f in ms and ms[f] == 'u':
1428 raise util.Abort(_("unresolved merge conflicts "
1417 raise util.Abort(_("unresolved merge conflicts "
1429 "(see hg help resolve)"))
1418 "(see hg help resolve)"))
1430
1419
1431 cctx = context.workingctx(self, text, user, date, extra, changes)
1420 cctx = context.workingctx(self, text, user, date, extra, changes)
1432 if editor:
1421 if editor:
1433 cctx._text = editor(self, cctx, subs)
1422 cctx._text = editor(self, cctx, subs)
1434 edited = (text != cctx._text)
1423 edited = (text != cctx._text)
1435
1424
1436 # commit subs and write new state
1425 # commit subs and write new state
1437 if subs:
1426 if subs:
1438 for s in sorted(commitsubs):
1427 for s in sorted(commitsubs):
1439 sub = wctx.sub(s)
1428 sub = wctx.sub(s)
1440 self.ui.status(_('committing subrepository %s\n') %
1429 self.ui.status(_('committing subrepository %s\n') %
1441 subrepo.subrelpath(sub))
1430 subrepo.subrelpath(sub))
1442 sr = sub.commit(cctx._text, user, date)
1431 sr = sub.commit(cctx._text, user, date)
1443 newstate[s] = (newstate[s][0], sr)
1432 newstate[s] = (newstate[s][0], sr)
1444 subrepo.writestate(self, newstate)
1433 subrepo.writestate(self, newstate)
1445
1434
1446 # Save commit message in case this transaction gets rolled back
1435 # Save commit message in case this transaction gets rolled back
1447 # (e.g. by a pretxncommit hook). Leave the content alone on
1436 # (e.g. by a pretxncommit hook). Leave the content alone on
1448 # the assumption that the user will use the same editor again.
1437 # the assumption that the user will use the same editor again.
1449 msgfn = self.savecommitmessage(cctx._text)
1438 msgfn = self.savecommitmessage(cctx._text)
1450
1439
1451 p1, p2 = self.dirstate.parents()
1440 p1, p2 = self.dirstate.parents()
1452 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1441 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1453 try:
1442 try:
1454 self.hook("precommit", throw=True, parent1=hookp1,
1443 self.hook("precommit", throw=True, parent1=hookp1,
1455 parent2=hookp2)
1444 parent2=hookp2)
1456 ret = self.commitctx(cctx, True)
1445 ret = self.commitctx(cctx, True)
1457 except: # re-raises
1446 except: # re-raises
1458 if edited:
1447 if edited:
1459 self.ui.write(
1448 self.ui.write(
1460 _('note: commit message saved in %s\n') % msgfn)
1449 _('note: commit message saved in %s\n') % msgfn)
1461 raise
1450 raise
1462
1451
1463 # update bookmarks, dirstate and mergestate
1452 # update bookmarks, dirstate and mergestate
1464 bookmarks.update(self, [p1, p2], ret)
1453 bookmarks.update(self, [p1, p2], ret)
1465 for f in changes[0] + changes[1]:
1454 for f in changes[0] + changes[1]:
1466 self.dirstate.normal(f)
1455 self.dirstate.normal(f)
1467 for f in changes[2]:
1456 for f in changes[2]:
1468 self.dirstate.drop(f)
1457 self.dirstate.drop(f)
1469 self.dirstate.setparents(ret)
1458 self.dirstate.setparents(ret)
1470 ms.reset()
1459 ms.reset()
1471 finally:
1460 finally:
1472 wlock.release()
1461 wlock.release()
1473
1462
1474 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1463 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1475 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1464 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1476 self._afterlock(commithook)
1465 self._afterlock(commithook)
1477 return ret
1466 return ret
1478
1467
1479 @unfilteredmethod
1468 @unfilteredmethod
1480 def commitctx(self, ctx, error=False):
1469 def commitctx(self, ctx, error=False):
1481 """Add a new revision to current repository.
1470 """Add a new revision to current repository.
1482 Revision information is passed via the context argument.
1471 Revision information is passed via the context argument.
1483 """
1472 """
1484
1473
1485 tr = lock = None
1474 tr = lock = None
1486 removed = list(ctx.removed())
1475 removed = list(ctx.removed())
1487 p1, p2 = ctx.p1(), ctx.p2()
1476 p1, p2 = ctx.p1(), ctx.p2()
1488 user = ctx.user()
1477 user = ctx.user()
1489
1478
1490 lock = self.lock()
1479 lock = self.lock()
1491 try:
1480 try:
1492 tr = self.transaction("commit")
1481 tr = self.transaction("commit")
1493 trp = weakref.proxy(tr)
1482 trp = weakref.proxy(tr)
1494
1483
1495 if ctx.files():
1484 if ctx.files():
1496 m1 = p1.manifest().copy()
1485 m1 = p1.manifest().copy()
1497 m2 = p2.manifest()
1486 m2 = p2.manifest()
1498
1487
1499 # check in files
1488 # check in files
1500 new = {}
1489 new = {}
1501 changed = []
1490 changed = []
1502 linkrev = len(self)
1491 linkrev = len(self)
1503 for f in sorted(ctx.modified() + ctx.added()):
1492 for f in sorted(ctx.modified() + ctx.added()):
1504 self.ui.note(f + "\n")
1493 self.ui.note(f + "\n")
1505 try:
1494 try:
1506 fctx = ctx[f]
1495 fctx = ctx[f]
1507 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1496 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1508 changed)
1497 changed)
1509 m1.set(f, fctx.flags())
1498 m1.set(f, fctx.flags())
1510 except OSError, inst:
1499 except OSError, inst:
1511 self.ui.warn(_("trouble committing %s!\n") % f)
1500 self.ui.warn(_("trouble committing %s!\n") % f)
1512 raise
1501 raise
1513 except IOError, inst:
1502 except IOError, inst:
1514 errcode = getattr(inst, 'errno', errno.ENOENT)
1503 errcode = getattr(inst, 'errno', errno.ENOENT)
1515 if error or errcode and errcode != errno.ENOENT:
1504 if error or errcode and errcode != errno.ENOENT:
1516 self.ui.warn(_("trouble committing %s!\n") % f)
1505 self.ui.warn(_("trouble committing %s!\n") % f)
1517 raise
1506 raise
1518 else:
1507 else:
1519 removed.append(f)
1508 removed.append(f)
1520
1509
1521 # update manifest
1510 # update manifest
1522 m1.update(new)
1511 m1.update(new)
1523 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1512 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1524 drop = [f for f in removed if f in m1]
1513 drop = [f for f in removed if f in m1]
1525 for f in drop:
1514 for f in drop:
1526 del m1[f]
1515 del m1[f]
1527 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1516 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1528 p2.manifestnode(), (new, drop))
1517 p2.manifestnode(), (new, drop))
1529 files = changed + removed
1518 files = changed + removed
1530 else:
1519 else:
1531 mn = p1.manifestnode()
1520 mn = p1.manifestnode()
1532 files = []
1521 files = []
1533
1522
1534 # update changelog
1523 # update changelog
1535 self.changelog.delayupdate()
1524 self.changelog.delayupdate()
1536 n = self.changelog.add(mn, files, ctx.description(),
1525 n = self.changelog.add(mn, files, ctx.description(),
1537 trp, p1.node(), p2.node(),
1526 trp, p1.node(), p2.node(),
1538 user, ctx.date(), ctx.extra().copy())
1527 user, ctx.date(), ctx.extra().copy())
1539 p = lambda: self.changelog.writepending() and self.root or ""
1528 p = lambda: self.changelog.writepending() and self.root or ""
1540 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1529 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1541 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1530 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1542 parent2=xp2, pending=p)
1531 parent2=xp2, pending=p)
1543 self.changelog.finalize(trp)
1532 self.changelog.finalize(trp)
1544 # set the new commit is proper phase
1533 # set the new commit is proper phase
1545 targetphase = phases.newcommitphase(self.ui)
1534 targetphase = phases.newcommitphase(self.ui)
1546 if targetphase:
1535 if targetphase:
1547 # retract boundary do not alter parent changeset.
1536 # retract boundary do not alter parent changeset.
1548 # if a parent have higher the resulting phase will
1537 # if a parent have higher the resulting phase will
1549 # be compliant anyway
1538 # be compliant anyway
1550 #
1539 #
1551 # if minimal phase was 0 we don't need to retract anything
1540 # if minimal phase was 0 we don't need to retract anything
1552 phases.retractboundary(self, targetphase, [n])
1541 phases.retractboundary(self, targetphase, [n])
1553 tr.close()
1542 tr.close()
1554 self.updatebranchcache()
1543 self.updatebranchcache()
1555 return n
1544 return n
1556 finally:
1545 finally:
1557 if tr:
1546 if tr:
1558 tr.release()
1547 tr.release()
1559 lock.release()
1548 lock.release()
1560
1549
1561 @unfilteredmethod
1550 @unfilteredmethod
1562 def destroyed(self, newheadnodes=None):
1551 def destroyed(self, newheadnodes=None):
1563 '''Inform the repository that nodes have been destroyed.
1552 '''Inform the repository that nodes have been destroyed.
1564 Intended for use by strip and rollback, so there's a common
1553 Intended for use by strip and rollback, so there's a common
1565 place for anything that has to be done after destroying history.
1554 place for anything that has to be done after destroying history.
1566
1555
1567 If you know the branchheadcache was uptodate before nodes were removed
1556 If you know the branchheadcache was uptodate before nodes were removed
1568 and you also know the set of candidate new heads that may have resulted
1557 and you also know the set of candidate new heads that may have resulted
1569 from the destruction, you can set newheadnodes. This will enable the
1558 from the destruction, you can set newheadnodes. This will enable the
1570 code to update the branchheads cache, rather than having future code
1559 code to update the branchheads cache, rather than having future code
1571 decide it's invalid and regenerating it from scratch.
1560 decide it's invalid and regenerating it from scratch.
1572 '''
1561 '''
1573 # If we have info, newheadnodes, on how to update the branch cache, do
1562 # If we have info, newheadnodes, on how to update the branch cache, do
1574 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1563 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1575 # will be caught the next time it is read.
1564 # will be caught the next time it is read.
1576 if newheadnodes:
1565 if newheadnodes:
1577 tiprev = len(self) - 1
1566 tiprev = len(self) - 1
1578 ctxgen = (self[node] for node in newheadnodes
1567 ctxgen = (self[node] for node in newheadnodes
1579 if self.changelog.hasnode(node))
1568 if self.changelog.hasnode(node))
1580 self._updatebranchcache(self._branchcache, ctxgen)
1569 self._updatebranchcache(self._branchcache, ctxgen)
1581 self._writebranchcache(self._branchcache, self.changelog.tip(),
1570 branchmap.write(self, self._branchcache, self.changelog.tip(),
1582 tiprev)
1571 tiprev)
1583
1572
1584 # Ensure the persistent tag cache is updated. Doing it now
1573 # Ensure the persistent tag cache is updated. Doing it now
1585 # means that the tag cache only has to worry about destroyed
1574 # means that the tag cache only has to worry about destroyed
1586 # heads immediately after a strip/rollback. That in turn
1575 # heads immediately after a strip/rollback. That in turn
1587 # guarantees that "cachetip == currenttip" (comparing both rev
1576 # guarantees that "cachetip == currenttip" (comparing both rev
1588 # and node) always means no nodes have been added or destroyed.
1577 # and node) always means no nodes have been added or destroyed.
1589
1578
1590 # XXX this is suboptimal when qrefresh'ing: we strip the current
1579 # XXX this is suboptimal when qrefresh'ing: we strip the current
1591 # head, refresh the tag cache, then immediately add a new head.
1580 # head, refresh the tag cache, then immediately add a new head.
1592 # But I think doing it this way is necessary for the "instant
1581 # But I think doing it this way is necessary for the "instant
1593 # tag cache retrieval" case to work.
1582 # tag cache retrieval" case to work.
1594 self.invalidatecaches()
1583 self.invalidatecaches()
1595
1584
1596 # Discard all cache entries to force reloading everything.
1585 # Discard all cache entries to force reloading everything.
1597 self._filecache.clear()
1586 self._filecache.clear()
1598
1587
1599 def walk(self, match, node=None):
1588 def walk(self, match, node=None):
1600 '''
1589 '''
1601 walk recursively through the directory tree or a given
1590 walk recursively through the directory tree or a given
1602 changeset, finding all files matched by the match
1591 changeset, finding all files matched by the match
1603 function
1592 function
1604 '''
1593 '''
1605 return self[node].walk(match)
1594 return self[node].walk(match)
1606
1595
1607 def status(self, node1='.', node2=None, match=None,
1596 def status(self, node1='.', node2=None, match=None,
1608 ignored=False, clean=False, unknown=False,
1597 ignored=False, clean=False, unknown=False,
1609 listsubrepos=False):
1598 listsubrepos=False):
1610 """return status of files between two nodes or node and working
1599 """return status of files between two nodes or node and working
1611 directory.
1600 directory.
1612
1601
1613 If node1 is None, use the first dirstate parent instead.
1602 If node1 is None, use the first dirstate parent instead.
1614 If node2 is None, compare node1 with working directory.
1603 If node2 is None, compare node1 with working directory.
1615 """
1604 """
1616
1605
1617 def mfmatches(ctx):
1606 def mfmatches(ctx):
1618 mf = ctx.manifest().copy()
1607 mf = ctx.manifest().copy()
1619 if match.always():
1608 if match.always():
1620 return mf
1609 return mf
1621 for fn in mf.keys():
1610 for fn in mf.keys():
1622 if not match(fn):
1611 if not match(fn):
1623 del mf[fn]
1612 del mf[fn]
1624 return mf
1613 return mf
1625
1614
1626 if isinstance(node1, context.changectx):
1615 if isinstance(node1, context.changectx):
1627 ctx1 = node1
1616 ctx1 = node1
1628 else:
1617 else:
1629 ctx1 = self[node1]
1618 ctx1 = self[node1]
1630 if isinstance(node2, context.changectx):
1619 if isinstance(node2, context.changectx):
1631 ctx2 = node2
1620 ctx2 = node2
1632 else:
1621 else:
1633 ctx2 = self[node2]
1622 ctx2 = self[node2]
1634
1623
1635 working = ctx2.rev() is None
1624 working = ctx2.rev() is None
1636 parentworking = working and ctx1 == self['.']
1625 parentworking = working and ctx1 == self['.']
1637 match = match or matchmod.always(self.root, self.getcwd())
1626 match = match or matchmod.always(self.root, self.getcwd())
1638 listignored, listclean, listunknown = ignored, clean, unknown
1627 listignored, listclean, listunknown = ignored, clean, unknown
1639
1628
1640 # load earliest manifest first for caching reasons
1629 # load earliest manifest first for caching reasons
1641 if not working and ctx2.rev() < ctx1.rev():
1630 if not working and ctx2.rev() < ctx1.rev():
1642 ctx2.manifest()
1631 ctx2.manifest()
1643
1632
1644 if not parentworking:
1633 if not parentworking:
1645 def bad(f, msg):
1634 def bad(f, msg):
1646 # 'f' may be a directory pattern from 'match.files()',
1635 # 'f' may be a directory pattern from 'match.files()',
1647 # so 'f not in ctx1' is not enough
1636 # so 'f not in ctx1' is not enough
1648 if f not in ctx1 and f not in ctx1.dirs():
1637 if f not in ctx1 and f not in ctx1.dirs():
1649 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1638 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1650 match.bad = bad
1639 match.bad = bad
1651
1640
1652 if working: # we need to scan the working dir
1641 if working: # we need to scan the working dir
1653 subrepos = []
1642 subrepos = []
1654 if '.hgsub' in self.dirstate:
1643 if '.hgsub' in self.dirstate:
1655 subrepos = ctx2.substate.keys()
1644 subrepos = ctx2.substate.keys()
1656 s = self.dirstate.status(match, subrepos, listignored,
1645 s = self.dirstate.status(match, subrepos, listignored,
1657 listclean, listunknown)
1646 listclean, listunknown)
1658 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1647 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1659
1648
1660 # check for any possibly clean files
1649 # check for any possibly clean files
1661 if parentworking and cmp:
1650 if parentworking and cmp:
1662 fixup = []
1651 fixup = []
1663 # do a full compare of any files that might have changed
1652 # do a full compare of any files that might have changed
1664 for f in sorted(cmp):
1653 for f in sorted(cmp):
1665 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1654 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1666 or ctx1[f].cmp(ctx2[f])):
1655 or ctx1[f].cmp(ctx2[f])):
1667 modified.append(f)
1656 modified.append(f)
1668 else:
1657 else:
1669 fixup.append(f)
1658 fixup.append(f)
1670
1659
1671 # update dirstate for files that are actually clean
1660 # update dirstate for files that are actually clean
1672 if fixup:
1661 if fixup:
1673 if listclean:
1662 if listclean:
1674 clean += fixup
1663 clean += fixup
1675
1664
1676 try:
1665 try:
1677 # updating the dirstate is optional
1666 # updating the dirstate is optional
1678 # so we don't wait on the lock
1667 # so we don't wait on the lock
1679 wlock = self.wlock(False)
1668 wlock = self.wlock(False)
1680 try:
1669 try:
1681 for f in fixup:
1670 for f in fixup:
1682 self.dirstate.normal(f)
1671 self.dirstate.normal(f)
1683 finally:
1672 finally:
1684 wlock.release()
1673 wlock.release()
1685 except error.LockError:
1674 except error.LockError:
1686 pass
1675 pass
1687
1676
1688 if not parentworking:
1677 if not parentworking:
1689 mf1 = mfmatches(ctx1)
1678 mf1 = mfmatches(ctx1)
1690 if working:
1679 if working:
1691 # we are comparing working dir against non-parent
1680 # we are comparing working dir against non-parent
1692 # generate a pseudo-manifest for the working dir
1681 # generate a pseudo-manifest for the working dir
1693 mf2 = mfmatches(self['.'])
1682 mf2 = mfmatches(self['.'])
1694 for f in cmp + modified + added:
1683 for f in cmp + modified + added:
1695 mf2[f] = None
1684 mf2[f] = None
1696 mf2.set(f, ctx2.flags(f))
1685 mf2.set(f, ctx2.flags(f))
1697 for f in removed:
1686 for f in removed:
1698 if f in mf2:
1687 if f in mf2:
1699 del mf2[f]
1688 del mf2[f]
1700 else:
1689 else:
1701 # we are comparing two revisions
1690 # we are comparing two revisions
1702 deleted, unknown, ignored = [], [], []
1691 deleted, unknown, ignored = [], [], []
1703 mf2 = mfmatches(ctx2)
1692 mf2 = mfmatches(ctx2)
1704
1693
1705 modified, added, clean = [], [], []
1694 modified, added, clean = [], [], []
1706 withflags = mf1.withflags() | mf2.withflags()
1695 withflags = mf1.withflags() | mf2.withflags()
1707 for fn in mf2:
1696 for fn in mf2:
1708 if fn in mf1:
1697 if fn in mf1:
1709 if (fn not in deleted and
1698 if (fn not in deleted and
1710 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1699 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1711 (mf1[fn] != mf2[fn] and
1700 (mf1[fn] != mf2[fn] and
1712 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1701 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1713 modified.append(fn)
1702 modified.append(fn)
1714 elif listclean:
1703 elif listclean:
1715 clean.append(fn)
1704 clean.append(fn)
1716 del mf1[fn]
1705 del mf1[fn]
1717 elif fn not in deleted:
1706 elif fn not in deleted:
1718 added.append(fn)
1707 added.append(fn)
1719 removed = mf1.keys()
1708 removed = mf1.keys()
1720
1709
1721 if working and modified and not self.dirstate._checklink:
1710 if working and modified and not self.dirstate._checklink:
1722 # Symlink placeholders may get non-symlink-like contents
1711 # Symlink placeholders may get non-symlink-like contents
1723 # via user error or dereferencing by NFS or Samba servers,
1712 # via user error or dereferencing by NFS or Samba servers,
1724 # so we filter out any placeholders that don't look like a
1713 # so we filter out any placeholders that don't look like a
1725 # symlink
1714 # symlink
1726 sane = []
1715 sane = []
1727 for f in modified:
1716 for f in modified:
1728 if ctx2.flags(f) == 'l':
1717 if ctx2.flags(f) == 'l':
1729 d = ctx2[f].data()
1718 d = ctx2[f].data()
1730 if len(d) >= 1024 or '\n' in d or util.binary(d):
1719 if len(d) >= 1024 or '\n' in d or util.binary(d):
1731 self.ui.debug('ignoring suspect symlink placeholder'
1720 self.ui.debug('ignoring suspect symlink placeholder'
1732 ' "%s"\n' % f)
1721 ' "%s"\n' % f)
1733 continue
1722 continue
1734 sane.append(f)
1723 sane.append(f)
1735 modified = sane
1724 modified = sane
1736
1725
1737 r = modified, added, removed, deleted, unknown, ignored, clean
1726 r = modified, added, removed, deleted, unknown, ignored, clean
1738
1727
1739 if listsubrepos:
1728 if listsubrepos:
1740 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1729 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1741 if working:
1730 if working:
1742 rev2 = None
1731 rev2 = None
1743 else:
1732 else:
1744 rev2 = ctx2.substate[subpath][1]
1733 rev2 = ctx2.substate[subpath][1]
1745 try:
1734 try:
1746 submatch = matchmod.narrowmatcher(subpath, match)
1735 submatch = matchmod.narrowmatcher(subpath, match)
1747 s = sub.status(rev2, match=submatch, ignored=listignored,
1736 s = sub.status(rev2, match=submatch, ignored=listignored,
1748 clean=listclean, unknown=listunknown,
1737 clean=listclean, unknown=listunknown,
1749 listsubrepos=True)
1738 listsubrepos=True)
1750 for rfiles, sfiles in zip(r, s):
1739 for rfiles, sfiles in zip(r, s):
1751 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1740 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1752 except error.LookupError:
1741 except error.LookupError:
1753 self.ui.status(_("skipping missing subrepository: %s\n")
1742 self.ui.status(_("skipping missing subrepository: %s\n")
1754 % subpath)
1743 % subpath)
1755
1744
1756 for l in r:
1745 for l in r:
1757 l.sort()
1746 l.sort()
1758 return r
1747 return r
1759
1748
1760 def heads(self, start=None):
1749 def heads(self, start=None):
1761 heads = self.changelog.heads(start)
1750 heads = self.changelog.heads(start)
1762 # sort the output in rev descending order
1751 # sort the output in rev descending order
1763 return sorted(heads, key=self.changelog.rev, reverse=True)
1752 return sorted(heads, key=self.changelog.rev, reverse=True)
1764
1753
1765 def branchheads(self, branch=None, start=None, closed=False):
1754 def branchheads(self, branch=None, start=None, closed=False):
1766 '''return a (possibly filtered) list of heads for the given branch
1755 '''return a (possibly filtered) list of heads for the given branch
1767
1756
1768 Heads are returned in topological order, from newest to oldest.
1757 Heads are returned in topological order, from newest to oldest.
1769 If branch is None, use the dirstate branch.
1758 If branch is None, use the dirstate branch.
1770 If start is not None, return only heads reachable from start.
1759 If start is not None, return only heads reachable from start.
1771 If closed is True, return heads that are marked as closed as well.
1760 If closed is True, return heads that are marked as closed as well.
1772 '''
1761 '''
1773 if branch is None:
1762 if branch is None:
1774 branch = self[None].branch()
1763 branch = self[None].branch()
1775 branches = self.branchmap()
1764 branches = self.branchmap()
1776 if branch not in branches:
1765 if branch not in branches:
1777 return []
1766 return []
1778 # the cache returns heads ordered lowest to highest
1767 # the cache returns heads ordered lowest to highest
1779 bheads = list(reversed(branches[branch]))
1768 bheads = list(reversed(branches[branch]))
1780 if start is not None:
1769 if start is not None:
1781 # filter out the heads that cannot be reached from startrev
1770 # filter out the heads that cannot be reached from startrev
1782 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1771 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1783 bheads = [h for h in bheads if h in fbheads]
1772 bheads = [h for h in bheads if h in fbheads]
1784 if not closed:
1773 if not closed:
1785 bheads = [h for h in bheads if not self[h].closesbranch()]
1774 bheads = [h for h in bheads if not self[h].closesbranch()]
1786 return bheads
1775 return bheads
1787
1776
1788 def branches(self, nodes):
1777 def branches(self, nodes):
1789 if not nodes:
1778 if not nodes:
1790 nodes = [self.changelog.tip()]
1779 nodes = [self.changelog.tip()]
1791 b = []
1780 b = []
1792 for n in nodes:
1781 for n in nodes:
1793 t = n
1782 t = n
1794 while True:
1783 while True:
1795 p = self.changelog.parents(n)
1784 p = self.changelog.parents(n)
1796 if p[1] != nullid or p[0] == nullid:
1785 if p[1] != nullid or p[0] == nullid:
1797 b.append((t, n, p[0], p[1]))
1786 b.append((t, n, p[0], p[1]))
1798 break
1787 break
1799 n = p[0]
1788 n = p[0]
1800 return b
1789 return b
1801
1790
1802 def between(self, pairs):
1791 def between(self, pairs):
1803 r = []
1792 r = []
1804
1793
1805 for top, bottom in pairs:
1794 for top, bottom in pairs:
1806 n, l, i = top, [], 0
1795 n, l, i = top, [], 0
1807 f = 1
1796 f = 1
1808
1797
1809 while n != bottom and n != nullid:
1798 while n != bottom and n != nullid:
1810 p = self.changelog.parents(n)[0]
1799 p = self.changelog.parents(n)[0]
1811 if i == f:
1800 if i == f:
1812 l.append(n)
1801 l.append(n)
1813 f = f * 2
1802 f = f * 2
1814 n = p
1803 n = p
1815 i += 1
1804 i += 1
1816
1805
1817 r.append(l)
1806 r.append(l)
1818
1807
1819 return r
1808 return r
1820
1809
1821 def pull(self, remote, heads=None, force=False):
1810 def pull(self, remote, heads=None, force=False):
1822 # don't open transaction for nothing or you break future useful
1811 # don't open transaction for nothing or you break future useful
1823 # rollback call
1812 # rollback call
1824 tr = None
1813 tr = None
1825 trname = 'pull\n' + util.hidepassword(remote.url())
1814 trname = 'pull\n' + util.hidepassword(remote.url())
1826 lock = self.lock()
1815 lock = self.lock()
1827 try:
1816 try:
1828 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1817 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1829 force=force)
1818 force=force)
1830 common, fetch, rheads = tmp
1819 common, fetch, rheads = tmp
1831 if not fetch:
1820 if not fetch:
1832 self.ui.status(_("no changes found\n"))
1821 self.ui.status(_("no changes found\n"))
1833 added = []
1822 added = []
1834 result = 0
1823 result = 0
1835 else:
1824 else:
1836 tr = self.transaction(trname)
1825 tr = self.transaction(trname)
1837 if heads is None and list(common) == [nullid]:
1826 if heads is None and list(common) == [nullid]:
1838 self.ui.status(_("requesting all changes\n"))
1827 self.ui.status(_("requesting all changes\n"))
1839 elif heads is None and remote.capable('changegroupsubset'):
1828 elif heads is None and remote.capable('changegroupsubset'):
1840 # issue1320, avoid a race if remote changed after discovery
1829 # issue1320, avoid a race if remote changed after discovery
1841 heads = rheads
1830 heads = rheads
1842
1831
1843 if remote.capable('getbundle'):
1832 if remote.capable('getbundle'):
1844 cg = remote.getbundle('pull', common=common,
1833 cg = remote.getbundle('pull', common=common,
1845 heads=heads or rheads)
1834 heads=heads or rheads)
1846 elif heads is None:
1835 elif heads is None:
1847 cg = remote.changegroup(fetch, 'pull')
1836 cg = remote.changegroup(fetch, 'pull')
1848 elif not remote.capable('changegroupsubset'):
1837 elif not remote.capable('changegroupsubset'):
1849 raise util.Abort(_("partial pull cannot be done because "
1838 raise util.Abort(_("partial pull cannot be done because "
1850 "other repository doesn't support "
1839 "other repository doesn't support "
1851 "changegroupsubset."))
1840 "changegroupsubset."))
1852 else:
1841 else:
1853 cg = remote.changegroupsubset(fetch, heads, 'pull')
1842 cg = remote.changegroupsubset(fetch, heads, 'pull')
1854 clstart = len(self.changelog)
1843 clstart = len(self.changelog)
1855 result = self.addchangegroup(cg, 'pull', remote.url())
1844 result = self.addchangegroup(cg, 'pull', remote.url())
1856 clend = len(self.changelog)
1845 clend = len(self.changelog)
1857 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1846 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1858
1847
1859 # compute target subset
1848 # compute target subset
1860 if heads is None:
1849 if heads is None:
1861 # We pulled every thing possible
1850 # We pulled every thing possible
1862 # sync on everything common
1851 # sync on everything common
1863 subset = common + added
1852 subset = common + added
1864 else:
1853 else:
1865 # We pulled a specific subset
1854 # We pulled a specific subset
1866 # sync on this subset
1855 # sync on this subset
1867 subset = heads
1856 subset = heads
1868
1857
1869 # Get remote phases data from remote
1858 # Get remote phases data from remote
1870 remotephases = remote.listkeys('phases')
1859 remotephases = remote.listkeys('phases')
1871 publishing = bool(remotephases.get('publishing', False))
1860 publishing = bool(remotephases.get('publishing', False))
1872 if remotephases and not publishing:
1861 if remotephases and not publishing:
1873 # remote is new and unpublishing
1862 # remote is new and unpublishing
1874 pheads, _dr = phases.analyzeremotephases(self, subset,
1863 pheads, _dr = phases.analyzeremotephases(self, subset,
1875 remotephases)
1864 remotephases)
1876 phases.advanceboundary(self, phases.public, pheads)
1865 phases.advanceboundary(self, phases.public, pheads)
1877 phases.advanceboundary(self, phases.draft, subset)
1866 phases.advanceboundary(self, phases.draft, subset)
1878 else:
1867 else:
1879 # Remote is old or publishing all common changesets
1868 # Remote is old or publishing all common changesets
1880 # should be seen as public
1869 # should be seen as public
1881 phases.advanceboundary(self, phases.public, subset)
1870 phases.advanceboundary(self, phases.public, subset)
1882
1871
1883 if obsolete._enabled:
1872 if obsolete._enabled:
1884 self.ui.debug('fetching remote obsolete markers\n')
1873 self.ui.debug('fetching remote obsolete markers\n')
1885 remoteobs = remote.listkeys('obsolete')
1874 remoteobs = remote.listkeys('obsolete')
1886 if 'dump0' in remoteobs:
1875 if 'dump0' in remoteobs:
1887 if tr is None:
1876 if tr is None:
1888 tr = self.transaction(trname)
1877 tr = self.transaction(trname)
1889 for key in sorted(remoteobs, reverse=True):
1878 for key in sorted(remoteobs, reverse=True):
1890 if key.startswith('dump'):
1879 if key.startswith('dump'):
1891 data = base85.b85decode(remoteobs[key])
1880 data = base85.b85decode(remoteobs[key])
1892 self.obsstore.mergemarkers(tr, data)
1881 self.obsstore.mergemarkers(tr, data)
1893 self.invalidatevolatilesets()
1882 self.invalidatevolatilesets()
1894 if tr is not None:
1883 if tr is not None:
1895 tr.close()
1884 tr.close()
1896 finally:
1885 finally:
1897 if tr is not None:
1886 if tr is not None:
1898 tr.release()
1887 tr.release()
1899 lock.release()
1888 lock.release()
1900
1889
1901 return result
1890 return result
1902
1891
1903 def checkpush(self, force, revs):
1892 def checkpush(self, force, revs):
1904 """Extensions can override this function if additional checks have
1893 """Extensions can override this function if additional checks have
1905 to be performed before pushing, or call it if they override push
1894 to be performed before pushing, or call it if they override push
1906 command.
1895 command.
1907 """
1896 """
1908 pass
1897 pass
1909
1898
1910 def push(self, remote, force=False, revs=None, newbranch=False):
1899 def push(self, remote, force=False, revs=None, newbranch=False):
1911 '''Push outgoing changesets (limited by revs) from the current
1900 '''Push outgoing changesets (limited by revs) from the current
1912 repository to remote. Return an integer:
1901 repository to remote. Return an integer:
1913 - None means nothing to push
1902 - None means nothing to push
1914 - 0 means HTTP error
1903 - 0 means HTTP error
1915 - 1 means we pushed and remote head count is unchanged *or*
1904 - 1 means we pushed and remote head count is unchanged *or*
1916 we have outgoing changesets but refused to push
1905 we have outgoing changesets but refused to push
1917 - other values as described by addchangegroup()
1906 - other values as described by addchangegroup()
1918 '''
1907 '''
1919 # there are two ways to push to remote repo:
1908 # there are two ways to push to remote repo:
1920 #
1909 #
1921 # addchangegroup assumes local user can lock remote
1910 # addchangegroup assumes local user can lock remote
1922 # repo (local filesystem, old ssh servers).
1911 # repo (local filesystem, old ssh servers).
1923 #
1912 #
1924 # unbundle assumes local user cannot lock remote repo (new ssh
1913 # unbundle assumes local user cannot lock remote repo (new ssh
1925 # servers, http servers).
1914 # servers, http servers).
1926
1915
1927 if not remote.canpush():
1916 if not remote.canpush():
1928 raise util.Abort(_("destination does not support push"))
1917 raise util.Abort(_("destination does not support push"))
1929 unfi = self.unfiltered()
1918 unfi = self.unfiltered()
1930 # get local lock as we might write phase data
1919 # get local lock as we might write phase data
1931 locallock = self.lock()
1920 locallock = self.lock()
1932 try:
1921 try:
1933 self.checkpush(force, revs)
1922 self.checkpush(force, revs)
1934 lock = None
1923 lock = None
1935 unbundle = remote.capable('unbundle')
1924 unbundle = remote.capable('unbundle')
1936 if not unbundle:
1925 if not unbundle:
1937 lock = remote.lock()
1926 lock = remote.lock()
1938 try:
1927 try:
1939 # discovery
1928 # discovery
1940 fci = discovery.findcommonincoming
1929 fci = discovery.findcommonincoming
1941 commoninc = fci(unfi, remote, force=force)
1930 commoninc = fci(unfi, remote, force=force)
1942 common, inc, remoteheads = commoninc
1931 common, inc, remoteheads = commoninc
1943 fco = discovery.findcommonoutgoing
1932 fco = discovery.findcommonoutgoing
1944 outgoing = fco(unfi, remote, onlyheads=revs,
1933 outgoing = fco(unfi, remote, onlyheads=revs,
1945 commoninc=commoninc, force=force)
1934 commoninc=commoninc, force=force)
1946
1935
1947
1936
1948 if not outgoing.missing:
1937 if not outgoing.missing:
1949 # nothing to push
1938 # nothing to push
1950 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1939 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1951 ret = None
1940 ret = None
1952 else:
1941 else:
1953 # something to push
1942 # something to push
1954 if not force:
1943 if not force:
1955 # if self.obsstore == False --> no obsolete
1944 # if self.obsstore == False --> no obsolete
1956 # then, save the iteration
1945 # then, save the iteration
1957 if unfi.obsstore:
1946 if unfi.obsstore:
1958 # this message are here for 80 char limit reason
1947 # this message are here for 80 char limit reason
1959 mso = _("push includes obsolete changeset: %s!")
1948 mso = _("push includes obsolete changeset: %s!")
1960 msu = _("push includes unstable changeset: %s!")
1949 msu = _("push includes unstable changeset: %s!")
1961 msb = _("push includes bumped changeset: %s!")
1950 msb = _("push includes bumped changeset: %s!")
1962 msd = _("push includes divergent changeset: %s!")
1951 msd = _("push includes divergent changeset: %s!")
1963 # If we are to push if there is at least one
1952 # If we are to push if there is at least one
1964 # obsolete or unstable changeset in missing, at
1953 # obsolete or unstable changeset in missing, at
1965 # least one of the missinghead will be obsolete or
1954 # least one of the missinghead will be obsolete or
1966 # unstable. So checking heads only is ok
1955 # unstable. So checking heads only is ok
1967 for node in outgoing.missingheads:
1956 for node in outgoing.missingheads:
1968 ctx = unfi[node]
1957 ctx = unfi[node]
1969 if ctx.obsolete():
1958 if ctx.obsolete():
1970 raise util.Abort(mso % ctx)
1959 raise util.Abort(mso % ctx)
1971 elif ctx.unstable():
1960 elif ctx.unstable():
1972 raise util.Abort(msu % ctx)
1961 raise util.Abort(msu % ctx)
1973 elif ctx.bumped():
1962 elif ctx.bumped():
1974 raise util.Abort(msb % ctx)
1963 raise util.Abort(msb % ctx)
1975 elif ctx.divergent():
1964 elif ctx.divergent():
1976 raise util.Abort(msd % ctx)
1965 raise util.Abort(msd % ctx)
1977 discovery.checkheads(unfi, remote, outgoing,
1966 discovery.checkheads(unfi, remote, outgoing,
1978 remoteheads, newbranch,
1967 remoteheads, newbranch,
1979 bool(inc))
1968 bool(inc))
1980
1969
1981 # create a changegroup from local
1970 # create a changegroup from local
1982 if revs is None and not outgoing.excluded:
1971 if revs is None and not outgoing.excluded:
1983 # push everything,
1972 # push everything,
1984 # use the fast path, no race possible on push
1973 # use the fast path, no race possible on push
1985 cg = self._changegroup(outgoing.missing, 'push')
1974 cg = self._changegroup(outgoing.missing, 'push')
1986 else:
1975 else:
1987 cg = self.getlocalbundle('push', outgoing)
1976 cg = self.getlocalbundle('push', outgoing)
1988
1977
1989 # apply changegroup to remote
1978 # apply changegroup to remote
1990 if unbundle:
1979 if unbundle:
1991 # local repo finds heads on server, finds out what
1980 # local repo finds heads on server, finds out what
1992 # revs it must push. once revs transferred, if server
1981 # revs it must push. once revs transferred, if server
1993 # finds it has different heads (someone else won
1982 # finds it has different heads (someone else won
1994 # commit/push race), server aborts.
1983 # commit/push race), server aborts.
1995 if force:
1984 if force:
1996 remoteheads = ['force']
1985 remoteheads = ['force']
1997 # ssh: return remote's addchangegroup()
1986 # ssh: return remote's addchangegroup()
1998 # http: return remote's addchangegroup() or 0 for error
1987 # http: return remote's addchangegroup() or 0 for error
1999 ret = remote.unbundle(cg, remoteheads, 'push')
1988 ret = remote.unbundle(cg, remoteheads, 'push')
2000 else:
1989 else:
2001 # we return an integer indicating remote head count
1990 # we return an integer indicating remote head count
2002 # change
1991 # change
2003 ret = remote.addchangegroup(cg, 'push', self.url())
1992 ret = remote.addchangegroup(cg, 'push', self.url())
2004
1993
2005 if ret:
1994 if ret:
2006 # push succeed, synchronize target of the push
1995 # push succeed, synchronize target of the push
2007 cheads = outgoing.missingheads
1996 cheads = outgoing.missingheads
2008 elif revs is None:
1997 elif revs is None:
2009 # All out push fails. synchronize all common
1998 # All out push fails. synchronize all common
2010 cheads = outgoing.commonheads
1999 cheads = outgoing.commonheads
2011 else:
2000 else:
2012 # I want cheads = heads(::missingheads and ::commonheads)
2001 # I want cheads = heads(::missingheads and ::commonheads)
2013 # (missingheads is revs with secret changeset filtered out)
2002 # (missingheads is revs with secret changeset filtered out)
2014 #
2003 #
2015 # This can be expressed as:
2004 # This can be expressed as:
2016 # cheads = ( (missingheads and ::commonheads)
2005 # cheads = ( (missingheads and ::commonheads)
2017 # + (commonheads and ::missingheads))"
2006 # + (commonheads and ::missingheads))"
2018 # )
2007 # )
2019 #
2008 #
2020 # while trying to push we already computed the following:
2009 # while trying to push we already computed the following:
2021 # common = (::commonheads)
2010 # common = (::commonheads)
2022 # missing = ((commonheads::missingheads) - commonheads)
2011 # missing = ((commonheads::missingheads) - commonheads)
2023 #
2012 #
2024 # We can pick:
2013 # We can pick:
2025 # * missingheads part of common (::commonheads)
2014 # * missingheads part of common (::commonheads)
2026 common = set(outgoing.common)
2015 common = set(outgoing.common)
2027 cheads = [node for node in revs if node in common]
2016 cheads = [node for node in revs if node in common]
2028 # and
2017 # and
2029 # * commonheads parents on missing
2018 # * commonheads parents on missing
2030 revset = unfi.set('%ln and parents(roots(%ln))',
2019 revset = unfi.set('%ln and parents(roots(%ln))',
2031 outgoing.commonheads,
2020 outgoing.commonheads,
2032 outgoing.missing)
2021 outgoing.missing)
2033 cheads.extend(c.node() for c in revset)
2022 cheads.extend(c.node() for c in revset)
2034 # even when we don't push, exchanging phase data is useful
2023 # even when we don't push, exchanging phase data is useful
2035 remotephases = remote.listkeys('phases')
2024 remotephases = remote.listkeys('phases')
2036 if not remotephases: # old server or public only repo
2025 if not remotephases: # old server or public only repo
2037 phases.advanceboundary(self, phases.public, cheads)
2026 phases.advanceboundary(self, phases.public, cheads)
2038 # don't push any phase data as there is nothing to push
2027 # don't push any phase data as there is nothing to push
2039 else:
2028 else:
2040 ana = phases.analyzeremotephases(self, cheads, remotephases)
2029 ana = phases.analyzeremotephases(self, cheads, remotephases)
2041 pheads, droots = ana
2030 pheads, droots = ana
2042 ### Apply remote phase on local
2031 ### Apply remote phase on local
2043 if remotephases.get('publishing', False):
2032 if remotephases.get('publishing', False):
2044 phases.advanceboundary(self, phases.public, cheads)
2033 phases.advanceboundary(self, phases.public, cheads)
2045 else: # publish = False
2034 else: # publish = False
2046 phases.advanceboundary(self, phases.public, pheads)
2035 phases.advanceboundary(self, phases.public, pheads)
2047 phases.advanceboundary(self, phases.draft, cheads)
2036 phases.advanceboundary(self, phases.draft, cheads)
2048 ### Apply local phase on remote
2037 ### Apply local phase on remote
2049
2038
2050 # Get the list of all revs draft on remote by public here.
2039 # Get the list of all revs draft on remote by public here.
2051 # XXX Beware that revset break if droots is not strictly
2040 # XXX Beware that revset break if droots is not strictly
2052 # XXX root we may want to ensure it is but it is costly
2041 # XXX root we may want to ensure it is but it is costly
2053 outdated = unfi.set('heads((%ln::%ln) and public())',
2042 outdated = unfi.set('heads((%ln::%ln) and public())',
2054 droots, cheads)
2043 droots, cheads)
2055 for newremotehead in outdated:
2044 for newremotehead in outdated:
2056 r = remote.pushkey('phases',
2045 r = remote.pushkey('phases',
2057 newremotehead.hex(),
2046 newremotehead.hex(),
2058 str(phases.draft),
2047 str(phases.draft),
2059 str(phases.public))
2048 str(phases.public))
2060 if not r:
2049 if not r:
2061 self.ui.warn(_('updating %s to public failed!\n')
2050 self.ui.warn(_('updating %s to public failed!\n')
2062 % newremotehead)
2051 % newremotehead)
2063 self.ui.debug('try to push obsolete markers to remote\n')
2052 self.ui.debug('try to push obsolete markers to remote\n')
2064 if (obsolete._enabled and self.obsstore and
2053 if (obsolete._enabled and self.obsstore and
2065 'obsolete' in remote.listkeys('namespaces')):
2054 'obsolete' in remote.listkeys('namespaces')):
2066 rslts = []
2055 rslts = []
2067 remotedata = self.listkeys('obsolete')
2056 remotedata = self.listkeys('obsolete')
2068 for key in sorted(remotedata, reverse=True):
2057 for key in sorted(remotedata, reverse=True):
2069 # reverse sort to ensure we end with dump0
2058 # reverse sort to ensure we end with dump0
2070 data = remotedata[key]
2059 data = remotedata[key]
2071 rslts.append(remote.pushkey('obsolete', key, '', data))
2060 rslts.append(remote.pushkey('obsolete', key, '', data))
2072 if [r for r in rslts if not r]:
2061 if [r for r in rslts if not r]:
2073 msg = _('failed to push some obsolete markers!\n')
2062 msg = _('failed to push some obsolete markers!\n')
2074 self.ui.warn(msg)
2063 self.ui.warn(msg)
2075 finally:
2064 finally:
2076 if lock is not None:
2065 if lock is not None:
2077 lock.release()
2066 lock.release()
2078 finally:
2067 finally:
2079 locallock.release()
2068 locallock.release()
2080
2069
2081 self.ui.debug("checking for updated bookmarks\n")
2070 self.ui.debug("checking for updated bookmarks\n")
2082 rb = remote.listkeys('bookmarks')
2071 rb = remote.listkeys('bookmarks')
2083 for k in rb.keys():
2072 for k in rb.keys():
2084 if k in unfi._bookmarks:
2073 if k in unfi._bookmarks:
2085 nr, nl = rb[k], hex(self._bookmarks[k])
2074 nr, nl = rb[k], hex(self._bookmarks[k])
2086 if nr in unfi:
2075 if nr in unfi:
2087 cr = unfi[nr]
2076 cr = unfi[nr]
2088 cl = unfi[nl]
2077 cl = unfi[nl]
2089 if bookmarks.validdest(unfi, cr, cl):
2078 if bookmarks.validdest(unfi, cr, cl):
2090 r = remote.pushkey('bookmarks', k, nr, nl)
2079 r = remote.pushkey('bookmarks', k, nr, nl)
2091 if r:
2080 if r:
2092 self.ui.status(_("updating bookmark %s\n") % k)
2081 self.ui.status(_("updating bookmark %s\n") % k)
2093 else:
2082 else:
2094 self.ui.warn(_('updating bookmark %s'
2083 self.ui.warn(_('updating bookmark %s'
2095 ' failed!\n') % k)
2084 ' failed!\n') % k)
2096
2085
2097 return ret
2086 return ret
2098
2087
2099 def changegroupinfo(self, nodes, source):
2088 def changegroupinfo(self, nodes, source):
2100 if self.ui.verbose or source == 'bundle':
2089 if self.ui.verbose or source == 'bundle':
2101 self.ui.status(_("%d changesets found\n") % len(nodes))
2090 self.ui.status(_("%d changesets found\n") % len(nodes))
2102 if self.ui.debugflag:
2091 if self.ui.debugflag:
2103 self.ui.debug("list of changesets:\n")
2092 self.ui.debug("list of changesets:\n")
2104 for node in nodes:
2093 for node in nodes:
2105 self.ui.debug("%s\n" % hex(node))
2094 self.ui.debug("%s\n" % hex(node))
2106
2095
2107 def changegroupsubset(self, bases, heads, source):
2096 def changegroupsubset(self, bases, heads, source):
2108 """Compute a changegroup consisting of all the nodes that are
2097 """Compute a changegroup consisting of all the nodes that are
2109 descendants of any of the bases and ancestors of any of the heads.
2098 descendants of any of the bases and ancestors of any of the heads.
2110 Return a chunkbuffer object whose read() method will return
2099 Return a chunkbuffer object whose read() method will return
2111 successive changegroup chunks.
2100 successive changegroup chunks.
2112
2101
2113 It is fairly complex as determining which filenodes and which
2102 It is fairly complex as determining which filenodes and which
2114 manifest nodes need to be included for the changeset to be complete
2103 manifest nodes need to be included for the changeset to be complete
2115 is non-trivial.
2104 is non-trivial.
2116
2105
2117 Another wrinkle is doing the reverse, figuring out which changeset in
2106 Another wrinkle is doing the reverse, figuring out which changeset in
2118 the changegroup a particular filenode or manifestnode belongs to.
2107 the changegroup a particular filenode or manifestnode belongs to.
2119 """
2108 """
2120 cl = self.changelog
2109 cl = self.changelog
2121 if not bases:
2110 if not bases:
2122 bases = [nullid]
2111 bases = [nullid]
2123 csets, bases, heads = cl.nodesbetween(bases, heads)
2112 csets, bases, heads = cl.nodesbetween(bases, heads)
2124 # We assume that all ancestors of bases are known
2113 # We assume that all ancestors of bases are known
2125 common = cl.ancestors([cl.rev(n) for n in bases])
2114 common = cl.ancestors([cl.rev(n) for n in bases])
2126 return self._changegroupsubset(common, csets, heads, source)
2115 return self._changegroupsubset(common, csets, heads, source)
2127
2116
2128 def getlocalbundle(self, source, outgoing):
2117 def getlocalbundle(self, source, outgoing):
2129 """Like getbundle, but taking a discovery.outgoing as an argument.
2118 """Like getbundle, but taking a discovery.outgoing as an argument.
2130
2119
2131 This is only implemented for local repos and reuses potentially
2120 This is only implemented for local repos and reuses potentially
2132 precomputed sets in outgoing."""
2121 precomputed sets in outgoing."""
2133 if not outgoing.missing:
2122 if not outgoing.missing:
2134 return None
2123 return None
2135 return self._changegroupsubset(outgoing.common,
2124 return self._changegroupsubset(outgoing.common,
2136 outgoing.missing,
2125 outgoing.missing,
2137 outgoing.missingheads,
2126 outgoing.missingheads,
2138 source)
2127 source)
2139
2128
2140 def getbundle(self, source, heads=None, common=None):
2129 def getbundle(self, source, heads=None, common=None):
2141 """Like changegroupsubset, but returns the set difference between the
2130 """Like changegroupsubset, but returns the set difference between the
2142 ancestors of heads and the ancestors common.
2131 ancestors of heads and the ancestors common.
2143
2132
2144 If heads is None, use the local heads. If common is None, use [nullid].
2133 If heads is None, use the local heads. If common is None, use [nullid].
2145
2134
2146 The nodes in common might not all be known locally due to the way the
2135 The nodes in common might not all be known locally due to the way the
2147 current discovery protocol works.
2136 current discovery protocol works.
2148 """
2137 """
2149 cl = self.changelog
2138 cl = self.changelog
2150 if common:
2139 if common:
2151 hasnode = cl.hasnode
2140 hasnode = cl.hasnode
2152 common = [n for n in common if hasnode(n)]
2141 common = [n for n in common if hasnode(n)]
2153 else:
2142 else:
2154 common = [nullid]
2143 common = [nullid]
2155 if not heads:
2144 if not heads:
2156 heads = cl.heads()
2145 heads = cl.heads()
2157 return self.getlocalbundle(source,
2146 return self.getlocalbundle(source,
2158 discovery.outgoing(cl, common, heads))
2147 discovery.outgoing(cl, common, heads))
2159
2148
2160 @unfilteredmethod
2149 @unfilteredmethod
2161 def _changegroupsubset(self, commonrevs, csets, heads, source):
2150 def _changegroupsubset(self, commonrevs, csets, heads, source):
2162
2151
2163 cl = self.changelog
2152 cl = self.changelog
2164 mf = self.manifest
2153 mf = self.manifest
2165 mfs = {} # needed manifests
2154 mfs = {} # needed manifests
2166 fnodes = {} # needed file nodes
2155 fnodes = {} # needed file nodes
2167 changedfiles = set()
2156 changedfiles = set()
2168 fstate = ['', {}]
2157 fstate = ['', {}]
2169 count = [0, 0]
2158 count = [0, 0]
2170
2159
2171 # can we go through the fast path ?
2160 # can we go through the fast path ?
2172 heads.sort()
2161 heads.sort()
2173 if heads == sorted(self.heads()):
2162 if heads == sorted(self.heads()):
2174 return self._changegroup(csets, source)
2163 return self._changegroup(csets, source)
2175
2164
2176 # slow path
2165 # slow path
2177 self.hook('preoutgoing', throw=True, source=source)
2166 self.hook('preoutgoing', throw=True, source=source)
2178 self.changegroupinfo(csets, source)
2167 self.changegroupinfo(csets, source)
2179
2168
2180 # filter any nodes that claim to be part of the known set
2169 # filter any nodes that claim to be part of the known set
2181 def prune(revlog, missing):
2170 def prune(revlog, missing):
2182 rr, rl = revlog.rev, revlog.linkrev
2171 rr, rl = revlog.rev, revlog.linkrev
2183 return [n for n in missing
2172 return [n for n in missing
2184 if rl(rr(n)) not in commonrevs]
2173 if rl(rr(n)) not in commonrevs]
2185
2174
2186 progress = self.ui.progress
2175 progress = self.ui.progress
2187 _bundling = _('bundling')
2176 _bundling = _('bundling')
2188 _changesets = _('changesets')
2177 _changesets = _('changesets')
2189 _manifests = _('manifests')
2178 _manifests = _('manifests')
2190 _files = _('files')
2179 _files = _('files')
2191
2180
2192 def lookup(revlog, x):
2181 def lookup(revlog, x):
2193 if revlog == cl:
2182 if revlog == cl:
2194 c = cl.read(x)
2183 c = cl.read(x)
2195 changedfiles.update(c[3])
2184 changedfiles.update(c[3])
2196 mfs.setdefault(c[0], x)
2185 mfs.setdefault(c[0], x)
2197 count[0] += 1
2186 count[0] += 1
2198 progress(_bundling, count[0],
2187 progress(_bundling, count[0],
2199 unit=_changesets, total=count[1])
2188 unit=_changesets, total=count[1])
2200 return x
2189 return x
2201 elif revlog == mf:
2190 elif revlog == mf:
2202 clnode = mfs[x]
2191 clnode = mfs[x]
2203 mdata = mf.readfast(x)
2192 mdata = mf.readfast(x)
2204 for f, n in mdata.iteritems():
2193 for f, n in mdata.iteritems():
2205 if f in changedfiles:
2194 if f in changedfiles:
2206 fnodes[f].setdefault(n, clnode)
2195 fnodes[f].setdefault(n, clnode)
2207 count[0] += 1
2196 count[0] += 1
2208 progress(_bundling, count[0],
2197 progress(_bundling, count[0],
2209 unit=_manifests, total=count[1])
2198 unit=_manifests, total=count[1])
2210 return clnode
2199 return clnode
2211 else:
2200 else:
2212 progress(_bundling, count[0], item=fstate[0],
2201 progress(_bundling, count[0], item=fstate[0],
2213 unit=_files, total=count[1])
2202 unit=_files, total=count[1])
2214 return fstate[1][x]
2203 return fstate[1][x]
2215
2204
2216 bundler = changegroup.bundle10(lookup)
2205 bundler = changegroup.bundle10(lookup)
2217 reorder = self.ui.config('bundle', 'reorder', 'auto')
2206 reorder = self.ui.config('bundle', 'reorder', 'auto')
2218 if reorder == 'auto':
2207 if reorder == 'auto':
2219 reorder = None
2208 reorder = None
2220 else:
2209 else:
2221 reorder = util.parsebool(reorder)
2210 reorder = util.parsebool(reorder)
2222
2211
2223 def gengroup():
2212 def gengroup():
2224 # Create a changenode group generator that will call our functions
2213 # Create a changenode group generator that will call our functions
2225 # back to lookup the owning changenode and collect information.
2214 # back to lookup the owning changenode and collect information.
2226 count[:] = [0, len(csets)]
2215 count[:] = [0, len(csets)]
2227 for chunk in cl.group(csets, bundler, reorder=reorder):
2216 for chunk in cl.group(csets, bundler, reorder=reorder):
2228 yield chunk
2217 yield chunk
2229 progress(_bundling, None)
2218 progress(_bundling, None)
2230
2219
2231 # Create a generator for the manifestnodes that calls our lookup
2220 # Create a generator for the manifestnodes that calls our lookup
2232 # and data collection functions back.
2221 # and data collection functions back.
2233 for f in changedfiles:
2222 for f in changedfiles:
2234 fnodes[f] = {}
2223 fnodes[f] = {}
2235 count[:] = [0, len(mfs)]
2224 count[:] = [0, len(mfs)]
2236 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2225 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2237 yield chunk
2226 yield chunk
2238 progress(_bundling, None)
2227 progress(_bundling, None)
2239
2228
2240 mfs.clear()
2229 mfs.clear()
2241
2230
2242 # Go through all our files in order sorted by name.
2231 # Go through all our files in order sorted by name.
2243 count[:] = [0, len(changedfiles)]
2232 count[:] = [0, len(changedfiles)]
2244 for fname in sorted(changedfiles):
2233 for fname in sorted(changedfiles):
2245 filerevlog = self.file(fname)
2234 filerevlog = self.file(fname)
2246 if not len(filerevlog):
2235 if not len(filerevlog):
2247 raise util.Abort(_("empty or missing revlog for %s")
2236 raise util.Abort(_("empty or missing revlog for %s")
2248 % fname)
2237 % fname)
2249 fstate[0] = fname
2238 fstate[0] = fname
2250 fstate[1] = fnodes.pop(fname, {})
2239 fstate[1] = fnodes.pop(fname, {})
2251
2240
2252 nodelist = prune(filerevlog, fstate[1])
2241 nodelist = prune(filerevlog, fstate[1])
2253 if nodelist:
2242 if nodelist:
2254 count[0] += 1
2243 count[0] += 1
2255 yield bundler.fileheader(fname)
2244 yield bundler.fileheader(fname)
2256 for chunk in filerevlog.group(nodelist, bundler, reorder):
2245 for chunk in filerevlog.group(nodelist, bundler, reorder):
2257 yield chunk
2246 yield chunk
2258
2247
2259 # Signal that no more groups are left.
2248 # Signal that no more groups are left.
2260 yield bundler.close()
2249 yield bundler.close()
2261 progress(_bundling, None)
2250 progress(_bundling, None)
2262
2251
2263 if csets:
2252 if csets:
2264 self.hook('outgoing', node=hex(csets[0]), source=source)
2253 self.hook('outgoing', node=hex(csets[0]), source=source)
2265
2254
2266 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2255 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2267
2256
2268 def changegroup(self, basenodes, source):
2257 def changegroup(self, basenodes, source):
2269 # to avoid a race we use changegroupsubset() (issue1320)
2258 # to avoid a race we use changegroupsubset() (issue1320)
2270 return self.changegroupsubset(basenodes, self.heads(), source)
2259 return self.changegroupsubset(basenodes, self.heads(), source)
2271
2260
2272 @unfilteredmethod
2261 @unfilteredmethod
2273 def _changegroup(self, nodes, source):
2262 def _changegroup(self, nodes, source):
2274 """Compute the changegroup of all nodes that we have that a recipient
2263 """Compute the changegroup of all nodes that we have that a recipient
2275 doesn't. Return a chunkbuffer object whose read() method will return
2264 doesn't. Return a chunkbuffer object whose read() method will return
2276 successive changegroup chunks.
2265 successive changegroup chunks.
2277
2266
2278 This is much easier than the previous function as we can assume that
2267 This is much easier than the previous function as we can assume that
2279 the recipient has any changenode we aren't sending them.
2268 the recipient has any changenode we aren't sending them.
2280
2269
2281 nodes is the set of nodes to send"""
2270 nodes is the set of nodes to send"""
2282
2271
2283 cl = self.changelog
2272 cl = self.changelog
2284 mf = self.manifest
2273 mf = self.manifest
2285 mfs = {}
2274 mfs = {}
2286 changedfiles = set()
2275 changedfiles = set()
2287 fstate = ['']
2276 fstate = ['']
2288 count = [0, 0]
2277 count = [0, 0]
2289
2278
2290 self.hook('preoutgoing', throw=True, source=source)
2279 self.hook('preoutgoing', throw=True, source=source)
2291 self.changegroupinfo(nodes, source)
2280 self.changegroupinfo(nodes, source)
2292
2281
2293 revset = set([cl.rev(n) for n in nodes])
2282 revset = set([cl.rev(n) for n in nodes])
2294
2283
2295 def gennodelst(log):
2284 def gennodelst(log):
2296 ln, llr = log.node, log.linkrev
2285 ln, llr = log.node, log.linkrev
2297 return [ln(r) for r in log if llr(r) in revset]
2286 return [ln(r) for r in log if llr(r) in revset]
2298
2287
2299 progress = self.ui.progress
2288 progress = self.ui.progress
2300 _bundling = _('bundling')
2289 _bundling = _('bundling')
2301 _changesets = _('changesets')
2290 _changesets = _('changesets')
2302 _manifests = _('manifests')
2291 _manifests = _('manifests')
2303 _files = _('files')
2292 _files = _('files')
2304
2293
2305 def lookup(revlog, x):
2294 def lookup(revlog, x):
2306 if revlog == cl:
2295 if revlog == cl:
2307 c = cl.read(x)
2296 c = cl.read(x)
2308 changedfiles.update(c[3])
2297 changedfiles.update(c[3])
2309 mfs.setdefault(c[0], x)
2298 mfs.setdefault(c[0], x)
2310 count[0] += 1
2299 count[0] += 1
2311 progress(_bundling, count[0],
2300 progress(_bundling, count[0],
2312 unit=_changesets, total=count[1])
2301 unit=_changesets, total=count[1])
2313 return x
2302 return x
2314 elif revlog == mf:
2303 elif revlog == mf:
2315 count[0] += 1
2304 count[0] += 1
2316 progress(_bundling, count[0],
2305 progress(_bundling, count[0],
2317 unit=_manifests, total=count[1])
2306 unit=_manifests, total=count[1])
2318 return cl.node(revlog.linkrev(revlog.rev(x)))
2307 return cl.node(revlog.linkrev(revlog.rev(x)))
2319 else:
2308 else:
2320 progress(_bundling, count[0], item=fstate[0],
2309 progress(_bundling, count[0], item=fstate[0],
2321 total=count[1], unit=_files)
2310 total=count[1], unit=_files)
2322 return cl.node(revlog.linkrev(revlog.rev(x)))
2311 return cl.node(revlog.linkrev(revlog.rev(x)))
2323
2312
2324 bundler = changegroup.bundle10(lookup)
2313 bundler = changegroup.bundle10(lookup)
2325 reorder = self.ui.config('bundle', 'reorder', 'auto')
2314 reorder = self.ui.config('bundle', 'reorder', 'auto')
2326 if reorder == 'auto':
2315 if reorder == 'auto':
2327 reorder = None
2316 reorder = None
2328 else:
2317 else:
2329 reorder = util.parsebool(reorder)
2318 reorder = util.parsebool(reorder)
2330
2319
2331 def gengroup():
2320 def gengroup():
2332 '''yield a sequence of changegroup chunks (strings)'''
2321 '''yield a sequence of changegroup chunks (strings)'''
2333 # construct a list of all changed files
2322 # construct a list of all changed files
2334
2323
2335 count[:] = [0, len(nodes)]
2324 count[:] = [0, len(nodes)]
2336 for chunk in cl.group(nodes, bundler, reorder=reorder):
2325 for chunk in cl.group(nodes, bundler, reorder=reorder):
2337 yield chunk
2326 yield chunk
2338 progress(_bundling, None)
2327 progress(_bundling, None)
2339
2328
2340 count[:] = [0, len(mfs)]
2329 count[:] = [0, len(mfs)]
2341 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2330 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2342 yield chunk
2331 yield chunk
2343 progress(_bundling, None)
2332 progress(_bundling, None)
2344
2333
2345 count[:] = [0, len(changedfiles)]
2334 count[:] = [0, len(changedfiles)]
2346 for fname in sorted(changedfiles):
2335 for fname in sorted(changedfiles):
2347 filerevlog = self.file(fname)
2336 filerevlog = self.file(fname)
2348 if not len(filerevlog):
2337 if not len(filerevlog):
2349 raise util.Abort(_("empty or missing revlog for %s")
2338 raise util.Abort(_("empty or missing revlog for %s")
2350 % fname)
2339 % fname)
2351 fstate[0] = fname
2340 fstate[0] = fname
2352 nodelist = gennodelst(filerevlog)
2341 nodelist = gennodelst(filerevlog)
2353 if nodelist:
2342 if nodelist:
2354 count[0] += 1
2343 count[0] += 1
2355 yield bundler.fileheader(fname)
2344 yield bundler.fileheader(fname)
2356 for chunk in filerevlog.group(nodelist, bundler, reorder):
2345 for chunk in filerevlog.group(nodelist, bundler, reorder):
2357 yield chunk
2346 yield chunk
2358 yield bundler.close()
2347 yield bundler.close()
2359 progress(_bundling, None)
2348 progress(_bundling, None)
2360
2349
2361 if nodes:
2350 if nodes:
2362 self.hook('outgoing', node=hex(nodes[0]), source=source)
2351 self.hook('outgoing', node=hex(nodes[0]), source=source)
2363
2352
2364 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2353 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2365
2354
2366 @unfilteredmethod
2355 @unfilteredmethod
2367 def addchangegroup(self, source, srctype, url, emptyok=False):
2356 def addchangegroup(self, source, srctype, url, emptyok=False):
2368 """Add the changegroup returned by source.read() to this repo.
2357 """Add the changegroup returned by source.read() to this repo.
2369 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2358 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2370 the URL of the repo where this changegroup is coming from.
2359 the URL of the repo where this changegroup is coming from.
2371
2360
2372 Return an integer summarizing the change to this repo:
2361 Return an integer summarizing the change to this repo:
2373 - nothing changed or no source: 0
2362 - nothing changed or no source: 0
2374 - more heads than before: 1+added heads (2..n)
2363 - more heads than before: 1+added heads (2..n)
2375 - fewer heads than before: -1-removed heads (-2..-n)
2364 - fewer heads than before: -1-removed heads (-2..-n)
2376 - number of heads stays the same: 1
2365 - number of heads stays the same: 1
2377 """
2366 """
2378 def csmap(x):
2367 def csmap(x):
2379 self.ui.debug("add changeset %s\n" % short(x))
2368 self.ui.debug("add changeset %s\n" % short(x))
2380 return len(cl)
2369 return len(cl)
2381
2370
2382 def revmap(x):
2371 def revmap(x):
2383 return cl.rev(x)
2372 return cl.rev(x)
2384
2373
2385 if not source:
2374 if not source:
2386 return 0
2375 return 0
2387
2376
2388 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2377 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2389
2378
2390 changesets = files = revisions = 0
2379 changesets = files = revisions = 0
2391 efiles = set()
2380 efiles = set()
2392
2381
2393 # write changelog data to temp files so concurrent readers will not see
2382 # write changelog data to temp files so concurrent readers will not see
2394 # inconsistent view
2383 # inconsistent view
2395 cl = self.changelog
2384 cl = self.changelog
2396 cl.delayupdate()
2385 cl.delayupdate()
2397 oldheads = cl.heads()
2386 oldheads = cl.heads()
2398
2387
2399 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2388 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2400 try:
2389 try:
2401 trp = weakref.proxy(tr)
2390 trp = weakref.proxy(tr)
2402 # pull off the changeset group
2391 # pull off the changeset group
2403 self.ui.status(_("adding changesets\n"))
2392 self.ui.status(_("adding changesets\n"))
2404 clstart = len(cl)
2393 clstart = len(cl)
2405 class prog(object):
2394 class prog(object):
2406 step = _('changesets')
2395 step = _('changesets')
2407 count = 1
2396 count = 1
2408 ui = self.ui
2397 ui = self.ui
2409 total = None
2398 total = None
2410 def __call__(self):
2399 def __call__(self):
2411 self.ui.progress(self.step, self.count, unit=_('chunks'),
2400 self.ui.progress(self.step, self.count, unit=_('chunks'),
2412 total=self.total)
2401 total=self.total)
2413 self.count += 1
2402 self.count += 1
2414 pr = prog()
2403 pr = prog()
2415 source.callback = pr
2404 source.callback = pr
2416
2405
2417 source.changelogheader()
2406 source.changelogheader()
2418 srccontent = cl.addgroup(source, csmap, trp)
2407 srccontent = cl.addgroup(source, csmap, trp)
2419 if not (srccontent or emptyok):
2408 if not (srccontent or emptyok):
2420 raise util.Abort(_("received changelog group is empty"))
2409 raise util.Abort(_("received changelog group is empty"))
2421 clend = len(cl)
2410 clend = len(cl)
2422 changesets = clend - clstart
2411 changesets = clend - clstart
2423 for c in xrange(clstart, clend):
2412 for c in xrange(clstart, clend):
2424 efiles.update(self[c].files())
2413 efiles.update(self[c].files())
2425 efiles = len(efiles)
2414 efiles = len(efiles)
2426 self.ui.progress(_('changesets'), None)
2415 self.ui.progress(_('changesets'), None)
2427
2416
2428 # pull off the manifest group
2417 # pull off the manifest group
2429 self.ui.status(_("adding manifests\n"))
2418 self.ui.status(_("adding manifests\n"))
2430 pr.step = _('manifests')
2419 pr.step = _('manifests')
2431 pr.count = 1
2420 pr.count = 1
2432 pr.total = changesets # manifests <= changesets
2421 pr.total = changesets # manifests <= changesets
2433 # no need to check for empty manifest group here:
2422 # no need to check for empty manifest group here:
2434 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2423 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2435 # no new manifest will be created and the manifest group will
2424 # no new manifest will be created and the manifest group will
2436 # be empty during the pull
2425 # be empty during the pull
2437 source.manifestheader()
2426 source.manifestheader()
2438 self.manifest.addgroup(source, revmap, trp)
2427 self.manifest.addgroup(source, revmap, trp)
2439 self.ui.progress(_('manifests'), None)
2428 self.ui.progress(_('manifests'), None)
2440
2429
2441 needfiles = {}
2430 needfiles = {}
2442 if self.ui.configbool('server', 'validate', default=False):
2431 if self.ui.configbool('server', 'validate', default=False):
2443 # validate incoming csets have their manifests
2432 # validate incoming csets have their manifests
2444 for cset in xrange(clstart, clend):
2433 for cset in xrange(clstart, clend):
2445 mfest = self.changelog.read(self.changelog.node(cset))[0]
2434 mfest = self.changelog.read(self.changelog.node(cset))[0]
2446 mfest = self.manifest.readdelta(mfest)
2435 mfest = self.manifest.readdelta(mfest)
2447 # store file nodes we must see
2436 # store file nodes we must see
2448 for f, n in mfest.iteritems():
2437 for f, n in mfest.iteritems():
2449 needfiles.setdefault(f, set()).add(n)
2438 needfiles.setdefault(f, set()).add(n)
2450
2439
2451 # process the files
2440 # process the files
2452 self.ui.status(_("adding file changes\n"))
2441 self.ui.status(_("adding file changes\n"))
2453 pr.step = _('files')
2442 pr.step = _('files')
2454 pr.count = 1
2443 pr.count = 1
2455 pr.total = efiles
2444 pr.total = efiles
2456 source.callback = None
2445 source.callback = None
2457
2446
2458 while True:
2447 while True:
2459 chunkdata = source.filelogheader()
2448 chunkdata = source.filelogheader()
2460 if not chunkdata:
2449 if not chunkdata:
2461 break
2450 break
2462 f = chunkdata["filename"]
2451 f = chunkdata["filename"]
2463 self.ui.debug("adding %s revisions\n" % f)
2452 self.ui.debug("adding %s revisions\n" % f)
2464 pr()
2453 pr()
2465 fl = self.file(f)
2454 fl = self.file(f)
2466 o = len(fl)
2455 o = len(fl)
2467 if not fl.addgroup(source, revmap, trp):
2456 if not fl.addgroup(source, revmap, trp):
2468 raise util.Abort(_("received file revlog group is empty"))
2457 raise util.Abort(_("received file revlog group is empty"))
2469 revisions += len(fl) - o
2458 revisions += len(fl) - o
2470 files += 1
2459 files += 1
2471 if f in needfiles:
2460 if f in needfiles:
2472 needs = needfiles[f]
2461 needs = needfiles[f]
2473 for new in xrange(o, len(fl)):
2462 for new in xrange(o, len(fl)):
2474 n = fl.node(new)
2463 n = fl.node(new)
2475 if n in needs:
2464 if n in needs:
2476 needs.remove(n)
2465 needs.remove(n)
2477 if not needs:
2466 if not needs:
2478 del needfiles[f]
2467 del needfiles[f]
2479 self.ui.progress(_('files'), None)
2468 self.ui.progress(_('files'), None)
2480
2469
2481 for f, needs in needfiles.iteritems():
2470 for f, needs in needfiles.iteritems():
2482 fl = self.file(f)
2471 fl = self.file(f)
2483 for n in needs:
2472 for n in needs:
2484 try:
2473 try:
2485 fl.rev(n)
2474 fl.rev(n)
2486 except error.LookupError:
2475 except error.LookupError:
2487 raise util.Abort(
2476 raise util.Abort(
2488 _('missing file data for %s:%s - run hg verify') %
2477 _('missing file data for %s:%s - run hg verify') %
2489 (f, hex(n)))
2478 (f, hex(n)))
2490
2479
2491 dh = 0
2480 dh = 0
2492 if oldheads:
2481 if oldheads:
2493 heads = cl.heads()
2482 heads = cl.heads()
2494 dh = len(heads) - len(oldheads)
2483 dh = len(heads) - len(oldheads)
2495 for h in heads:
2484 for h in heads:
2496 if h not in oldheads and self[h].closesbranch():
2485 if h not in oldheads and self[h].closesbranch():
2497 dh -= 1
2486 dh -= 1
2498 htext = ""
2487 htext = ""
2499 if dh:
2488 if dh:
2500 htext = _(" (%+d heads)") % dh
2489 htext = _(" (%+d heads)") % dh
2501
2490
2502 self.ui.status(_("added %d changesets"
2491 self.ui.status(_("added %d changesets"
2503 " with %d changes to %d files%s\n")
2492 " with %d changes to %d files%s\n")
2504 % (changesets, revisions, files, htext))
2493 % (changesets, revisions, files, htext))
2505 self.invalidatevolatilesets()
2494 self.invalidatevolatilesets()
2506
2495
2507 if changesets > 0:
2496 if changesets > 0:
2508 p = lambda: cl.writepending() and self.root or ""
2497 p = lambda: cl.writepending() and self.root or ""
2509 self.hook('pretxnchangegroup', throw=True,
2498 self.hook('pretxnchangegroup', throw=True,
2510 node=hex(cl.node(clstart)), source=srctype,
2499 node=hex(cl.node(clstart)), source=srctype,
2511 url=url, pending=p)
2500 url=url, pending=p)
2512
2501
2513 added = [cl.node(r) for r in xrange(clstart, clend)]
2502 added = [cl.node(r) for r in xrange(clstart, clend)]
2514 publishing = self.ui.configbool('phases', 'publish', True)
2503 publishing = self.ui.configbool('phases', 'publish', True)
2515 if srctype == 'push':
2504 if srctype == 'push':
2516 # Old server can not push the boundary themself.
2505 # Old server can not push the boundary themself.
2517 # New server won't push the boundary if changeset already
2506 # New server won't push the boundary if changeset already
2518 # existed locally as secrete
2507 # existed locally as secrete
2519 #
2508 #
2520 # We should not use added here but the list of all change in
2509 # We should not use added here but the list of all change in
2521 # the bundle
2510 # the bundle
2522 if publishing:
2511 if publishing:
2523 phases.advanceboundary(self, phases.public, srccontent)
2512 phases.advanceboundary(self, phases.public, srccontent)
2524 else:
2513 else:
2525 phases.advanceboundary(self, phases.draft, srccontent)
2514 phases.advanceboundary(self, phases.draft, srccontent)
2526 phases.retractboundary(self, phases.draft, added)
2515 phases.retractboundary(self, phases.draft, added)
2527 elif srctype != 'strip':
2516 elif srctype != 'strip':
2528 # publishing only alter behavior during push
2517 # publishing only alter behavior during push
2529 #
2518 #
2530 # strip should not touch boundary at all
2519 # strip should not touch boundary at all
2531 phases.retractboundary(self, phases.draft, added)
2520 phases.retractboundary(self, phases.draft, added)
2532
2521
2533 # make changelog see real files again
2522 # make changelog see real files again
2534 cl.finalize(trp)
2523 cl.finalize(trp)
2535
2524
2536 tr.close()
2525 tr.close()
2537
2526
2538 if changesets > 0:
2527 if changesets > 0:
2539 self.updatebranchcache()
2528 self.updatebranchcache()
2540 def runhooks():
2529 def runhooks():
2541 # forcefully update the on-disk branch cache
2530 # forcefully update the on-disk branch cache
2542 self.ui.debug("updating the branch cache\n")
2531 self.ui.debug("updating the branch cache\n")
2543 self.hook("changegroup", node=hex(cl.node(clstart)),
2532 self.hook("changegroup", node=hex(cl.node(clstart)),
2544 source=srctype, url=url)
2533 source=srctype, url=url)
2545
2534
2546 for n in added:
2535 for n in added:
2547 self.hook("incoming", node=hex(n), source=srctype,
2536 self.hook("incoming", node=hex(n), source=srctype,
2548 url=url)
2537 url=url)
2549 self._afterlock(runhooks)
2538 self._afterlock(runhooks)
2550
2539
2551 finally:
2540 finally:
2552 tr.release()
2541 tr.release()
2553 # never return 0 here:
2542 # never return 0 here:
2554 if dh < 0:
2543 if dh < 0:
2555 return dh - 1
2544 return dh - 1
2556 else:
2545 else:
2557 return dh + 1
2546 return dh + 1
2558
2547
2559 def stream_in(self, remote, requirements):
2548 def stream_in(self, remote, requirements):
2560 lock = self.lock()
2549 lock = self.lock()
2561 try:
2550 try:
2562 # Save remote branchmap. We will use it later
2551 # Save remote branchmap. We will use it later
2563 # to speed up branchcache creation
2552 # to speed up branchcache creation
2564 rbranchmap = None
2553 rbranchmap = None
2565 if remote.capable("branchmap"):
2554 if remote.capable("branchmap"):
2566 rbranchmap = remote.branchmap()
2555 rbranchmap = remote.branchmap()
2567
2556
2568 fp = remote.stream_out()
2557 fp = remote.stream_out()
2569 l = fp.readline()
2558 l = fp.readline()
2570 try:
2559 try:
2571 resp = int(l)
2560 resp = int(l)
2572 except ValueError:
2561 except ValueError:
2573 raise error.ResponseError(
2562 raise error.ResponseError(
2574 _('unexpected response from remote server:'), l)
2563 _('unexpected response from remote server:'), l)
2575 if resp == 1:
2564 if resp == 1:
2576 raise util.Abort(_('operation forbidden by server'))
2565 raise util.Abort(_('operation forbidden by server'))
2577 elif resp == 2:
2566 elif resp == 2:
2578 raise util.Abort(_('locking the remote repository failed'))
2567 raise util.Abort(_('locking the remote repository failed'))
2579 elif resp != 0:
2568 elif resp != 0:
2580 raise util.Abort(_('the server sent an unknown error code'))
2569 raise util.Abort(_('the server sent an unknown error code'))
2581 self.ui.status(_('streaming all changes\n'))
2570 self.ui.status(_('streaming all changes\n'))
2582 l = fp.readline()
2571 l = fp.readline()
2583 try:
2572 try:
2584 total_files, total_bytes = map(int, l.split(' ', 1))
2573 total_files, total_bytes = map(int, l.split(' ', 1))
2585 except (ValueError, TypeError):
2574 except (ValueError, TypeError):
2586 raise error.ResponseError(
2575 raise error.ResponseError(
2587 _('unexpected response from remote server:'), l)
2576 _('unexpected response from remote server:'), l)
2588 self.ui.status(_('%d files to transfer, %s of data\n') %
2577 self.ui.status(_('%d files to transfer, %s of data\n') %
2589 (total_files, util.bytecount(total_bytes)))
2578 (total_files, util.bytecount(total_bytes)))
2590 handled_bytes = 0
2579 handled_bytes = 0
2591 self.ui.progress(_('clone'), 0, total=total_bytes)
2580 self.ui.progress(_('clone'), 0, total=total_bytes)
2592 start = time.time()
2581 start = time.time()
2593 for i in xrange(total_files):
2582 for i in xrange(total_files):
2594 # XXX doesn't support '\n' or '\r' in filenames
2583 # XXX doesn't support '\n' or '\r' in filenames
2595 l = fp.readline()
2584 l = fp.readline()
2596 try:
2585 try:
2597 name, size = l.split('\0', 1)
2586 name, size = l.split('\0', 1)
2598 size = int(size)
2587 size = int(size)
2599 except (ValueError, TypeError):
2588 except (ValueError, TypeError):
2600 raise error.ResponseError(
2589 raise error.ResponseError(
2601 _('unexpected response from remote server:'), l)
2590 _('unexpected response from remote server:'), l)
2602 if self.ui.debugflag:
2591 if self.ui.debugflag:
2603 self.ui.debug('adding %s (%s)\n' %
2592 self.ui.debug('adding %s (%s)\n' %
2604 (name, util.bytecount(size)))
2593 (name, util.bytecount(size)))
2605 # for backwards compat, name was partially encoded
2594 # for backwards compat, name was partially encoded
2606 ofp = self.sopener(store.decodedir(name), 'w')
2595 ofp = self.sopener(store.decodedir(name), 'w')
2607 for chunk in util.filechunkiter(fp, limit=size):
2596 for chunk in util.filechunkiter(fp, limit=size):
2608 handled_bytes += len(chunk)
2597 handled_bytes += len(chunk)
2609 self.ui.progress(_('clone'), handled_bytes,
2598 self.ui.progress(_('clone'), handled_bytes,
2610 total=total_bytes)
2599 total=total_bytes)
2611 ofp.write(chunk)
2600 ofp.write(chunk)
2612 ofp.close()
2601 ofp.close()
2613 elapsed = time.time() - start
2602 elapsed = time.time() - start
2614 if elapsed <= 0:
2603 if elapsed <= 0:
2615 elapsed = 0.001
2604 elapsed = 0.001
2616 self.ui.progress(_('clone'), None)
2605 self.ui.progress(_('clone'), None)
2617 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2606 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2618 (util.bytecount(total_bytes), elapsed,
2607 (util.bytecount(total_bytes), elapsed,
2619 util.bytecount(total_bytes / elapsed)))
2608 util.bytecount(total_bytes / elapsed)))
2620
2609
2621 # new requirements = old non-format requirements +
2610 # new requirements = old non-format requirements +
2622 # new format-related
2611 # new format-related
2623 # requirements from the streamed-in repository
2612 # requirements from the streamed-in repository
2624 requirements.update(set(self.requirements) - self.supportedformats)
2613 requirements.update(set(self.requirements) - self.supportedformats)
2625 self._applyrequirements(requirements)
2614 self._applyrequirements(requirements)
2626 self._writerequirements()
2615 self._writerequirements()
2627
2616
2628 if rbranchmap:
2617 if rbranchmap:
2629 rbheads = []
2618 rbheads = []
2630 for bheads in rbranchmap.itervalues():
2619 for bheads in rbranchmap.itervalues():
2631 rbheads.extend(bheads)
2620 rbheads.extend(bheads)
2632
2621
2633 self.branchcache = rbranchmap
2622 self.branchcache = rbranchmap
2634 if rbheads:
2623 if rbheads:
2635 rtiprev = max((int(self.changelog.rev(node))
2624 rtiprev = max((int(self.changelog.rev(node))
2636 for node in rbheads))
2625 for node in rbheads))
2637 self._writebranchcache(self.branchcache,
2626 branchmap.write(self, self.branchcache,
2638 self[rtiprev].node(), rtiprev)
2627 self[rtiprev].node(), rtiprev)
2639 self.invalidate()
2628 self.invalidate()
2640 return len(self.heads()) + 1
2629 return len(self.heads()) + 1
2641 finally:
2630 finally:
2642 lock.release()
2631 lock.release()
2643
2632
2644 def clone(self, remote, heads=[], stream=False):
2633 def clone(self, remote, heads=[], stream=False):
2645 '''clone remote repository.
2634 '''clone remote repository.
2646
2635
2647 keyword arguments:
2636 keyword arguments:
2648 heads: list of revs to clone (forces use of pull)
2637 heads: list of revs to clone (forces use of pull)
2649 stream: use streaming clone if possible'''
2638 stream: use streaming clone if possible'''
2650
2639
2651 # now, all clients that can request uncompressed clones can
2640 # now, all clients that can request uncompressed clones can
2652 # read repo formats supported by all servers that can serve
2641 # read repo formats supported by all servers that can serve
2653 # them.
2642 # them.
2654
2643
2655 # if revlog format changes, client will have to check version
2644 # if revlog format changes, client will have to check version
2656 # and format flags on "stream" capability, and use
2645 # and format flags on "stream" capability, and use
2657 # uncompressed only if compatible.
2646 # uncompressed only if compatible.
2658
2647
2659 if not stream:
2648 if not stream:
2660 # if the server explicitly prefers to stream (for fast LANs)
2649 # if the server explicitly prefers to stream (for fast LANs)
2661 stream = remote.capable('stream-preferred')
2650 stream = remote.capable('stream-preferred')
2662
2651
2663 if stream and not heads:
2652 if stream and not heads:
2664 # 'stream' means remote revlog format is revlogv1 only
2653 # 'stream' means remote revlog format is revlogv1 only
2665 if remote.capable('stream'):
2654 if remote.capable('stream'):
2666 return self.stream_in(remote, set(('revlogv1',)))
2655 return self.stream_in(remote, set(('revlogv1',)))
2667 # otherwise, 'streamreqs' contains the remote revlog format
2656 # otherwise, 'streamreqs' contains the remote revlog format
2668 streamreqs = remote.capable('streamreqs')
2657 streamreqs = remote.capable('streamreqs')
2669 if streamreqs:
2658 if streamreqs:
2670 streamreqs = set(streamreqs.split(','))
2659 streamreqs = set(streamreqs.split(','))
2671 # if we support it, stream in and adjust our requirements
2660 # if we support it, stream in and adjust our requirements
2672 if not streamreqs - self.supportedformats:
2661 if not streamreqs - self.supportedformats:
2673 return self.stream_in(remote, streamreqs)
2662 return self.stream_in(remote, streamreqs)
2674 return self.pull(remote, heads)
2663 return self.pull(remote, heads)
2675
2664
2676 def pushkey(self, namespace, key, old, new):
2665 def pushkey(self, namespace, key, old, new):
2677 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2666 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2678 old=old, new=new)
2667 old=old, new=new)
2679 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2668 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2680 ret = pushkey.push(self, namespace, key, old, new)
2669 ret = pushkey.push(self, namespace, key, old, new)
2681 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2670 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2682 ret=ret)
2671 ret=ret)
2683 return ret
2672 return ret
2684
2673
2685 def listkeys(self, namespace):
2674 def listkeys(self, namespace):
2686 self.hook('prelistkeys', throw=True, namespace=namespace)
2675 self.hook('prelistkeys', throw=True, namespace=namespace)
2687 self.ui.debug('listing keys for "%s"\n' % namespace)
2676 self.ui.debug('listing keys for "%s"\n' % namespace)
2688 values = pushkey.list(self, namespace)
2677 values = pushkey.list(self, namespace)
2689 self.hook('listkeys', namespace=namespace, values=values)
2678 self.hook('listkeys', namespace=namespace, values=values)
2690 return values
2679 return values
2691
2680
2692 def debugwireargs(self, one, two, three=None, four=None, five=None):
2681 def debugwireargs(self, one, two, three=None, four=None, five=None):
2693 '''used to test argument passing over the wire'''
2682 '''used to test argument passing over the wire'''
2694 return "%s %s %s %s %s" % (one, two, three, four, five)
2683 return "%s %s %s %s %s" % (one, two, three, four, five)
2695
2684
2696 def savecommitmessage(self, text):
2685 def savecommitmessage(self, text):
2697 fp = self.opener('last-message.txt', 'wb')
2686 fp = self.opener('last-message.txt', 'wb')
2698 try:
2687 try:
2699 fp.write(text)
2688 fp.write(text)
2700 finally:
2689 finally:
2701 fp.close()
2690 fp.close()
2702 return self.pathto(fp.name[len(self.root) + 1:])
2691 return self.pathto(fp.name[len(self.root) + 1:])
2703
2692
2704 # used to avoid circular references so destructors work
2693 # used to avoid circular references so destructors work
2705 def aftertrans(files):
2694 def aftertrans(files):
2706 renamefiles = [tuple(t) for t in files]
2695 renamefiles = [tuple(t) for t in files]
2707 def a():
2696 def a():
2708 for src, dest in renamefiles:
2697 for src, dest in renamefiles:
2709 try:
2698 try:
2710 util.rename(src, dest)
2699 util.rename(src, dest)
2711 except OSError: # journal file does not yet exist
2700 except OSError: # journal file does not yet exist
2712 pass
2701 pass
2713 return a
2702 return a
2714
2703
2715 def undoname(fn):
2704 def undoname(fn):
2716 base, name = os.path.split(fn)
2705 base, name = os.path.split(fn)
2717 assert name.startswith('journal')
2706 assert name.startswith('journal')
2718 return os.path.join(base, name.replace('journal', 'undo', 1))
2707 return os.path.join(base, name.replace('journal', 'undo', 1))
2719
2708
2720 def instance(ui, path, create):
2709 def instance(ui, path, create):
2721 return localrepository(ui, util.urllocalpath(path), create)
2710 return localrepository(ui, util.urllocalpath(path), create)
2722
2711
2723 def islocal(path):
2712 def islocal(path):
2724 return True
2713 return True
General Comments 0
You need to be logged in to leave comments. Login now