##// END OF EJS Templates
branchmap: _updatebranchmap does not need to be filtered...
Pierre-Yves David -
r18119:5264464b default
parent child Browse files
Show More
@@ -1,2680 +1,2679 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 import branchmap
18 import branchmap
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class repofilecache(filecache):
22 class repofilecache(filecache):
23 """All filecache usage on repo are done for logic that should be unfiltered
23 """All filecache usage on repo are done for logic that should be unfiltered
24 """
24 """
25
25
26 def __get__(self, repo, type=None):
26 def __get__(self, repo, type=None):
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 def __set__(self, repo, value):
28 def __set__(self, repo, value):
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 def __delete__(self, repo):
30 def __delete__(self, repo):
31 return super(repofilecache, self).__delete__(repo.unfiltered())
31 return super(repofilecache, self).__delete__(repo.unfiltered())
32
32
33 class storecache(repofilecache):
33 class storecache(repofilecache):
34 """filecache for files in the store"""
34 """filecache for files in the store"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj.sjoin(fname)
36 return obj.sjoin(fname)
37
37
38 class unfilteredpropertycache(propertycache):
38 class unfilteredpropertycache(propertycache):
39 """propertycache that apply to unfiltered repo only"""
39 """propertycache that apply to unfiltered repo only"""
40
40
41 def __get__(self, repo, type=None):
41 def __get__(self, repo, type=None):
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43
43
44 class filteredpropertycache(propertycache):
44 class filteredpropertycache(propertycache):
45 """propertycache that must take filtering in account"""
45 """propertycache that must take filtering in account"""
46
46
47 def cachevalue(self, obj, value):
47 def cachevalue(self, obj, value):
48 object.__setattr__(obj, self.name, value)
48 object.__setattr__(obj, self.name, value)
49
49
50
50
51 def hasunfilteredcache(repo, name):
51 def hasunfilteredcache(repo, name):
52 """check if an repo and a unfilteredproperty cached value for <name>"""
52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 return name in vars(repo.unfiltered())
53 return name in vars(repo.unfiltered())
54
54
55 def unfilteredmethod(orig):
55 def unfilteredmethod(orig):
56 """decorate method that always need to be run on unfiltered version"""
56 """decorate method that always need to be run on unfiltered version"""
57 def wrapper(repo, *args, **kwargs):
57 def wrapper(repo, *args, **kwargs):
58 return orig(repo.unfiltered(), *args, **kwargs)
58 return orig(repo.unfiltered(), *args, **kwargs)
59 return wrapper
59 return wrapper
60
60
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63
63
64 class localpeer(peer.peerrepository):
64 class localpeer(peer.peerrepository):
65 '''peer for a local repo; reflects only the most recent API'''
65 '''peer for a local repo; reflects only the most recent API'''
66
66
67 def __init__(self, repo, caps=MODERNCAPS):
67 def __init__(self, repo, caps=MODERNCAPS):
68 peer.peerrepository.__init__(self)
68 peer.peerrepository.__init__(self)
69 self._repo = repo
69 self._repo = repo
70 self.ui = repo.ui
70 self.ui = repo.ui
71 self._caps = repo._restrictcapabilities(caps)
71 self._caps = repo._restrictcapabilities(caps)
72 self.requirements = repo.requirements
72 self.requirements = repo.requirements
73 self.supportedformats = repo.supportedformats
73 self.supportedformats = repo.supportedformats
74
74
75 def close(self):
75 def close(self):
76 self._repo.close()
76 self._repo.close()
77
77
78 def _capabilities(self):
78 def _capabilities(self):
79 return self._caps
79 return self._caps
80
80
81 def local(self):
81 def local(self):
82 return self._repo
82 return self._repo
83
83
84 def canpush(self):
84 def canpush(self):
85 return True
85 return True
86
86
87 def url(self):
87 def url(self):
88 return self._repo.url()
88 return self._repo.url()
89
89
90 def lookup(self, key):
90 def lookup(self, key):
91 return self._repo.lookup(key)
91 return self._repo.lookup(key)
92
92
93 def branchmap(self):
93 def branchmap(self):
94 return discovery.visiblebranchmap(self._repo)
94 return discovery.visiblebranchmap(self._repo)
95
95
96 def heads(self):
96 def heads(self):
97 return discovery.visibleheads(self._repo)
97 return discovery.visibleheads(self._repo)
98
98
99 def known(self, nodes):
99 def known(self, nodes):
100 return self._repo.known(nodes)
100 return self._repo.known(nodes)
101
101
102 def getbundle(self, source, heads=None, common=None):
102 def getbundle(self, source, heads=None, common=None):
103 return self._repo.getbundle(source, heads=heads, common=common)
103 return self._repo.getbundle(source, heads=heads, common=common)
104
104
105 # TODO We might want to move the next two calls into legacypeer and add
105 # TODO We might want to move the next two calls into legacypeer and add
106 # unbundle instead.
106 # unbundle instead.
107
107
108 def lock(self):
108 def lock(self):
109 return self._repo.lock()
109 return self._repo.lock()
110
110
111 def addchangegroup(self, cg, source, url):
111 def addchangegroup(self, cg, source, url):
112 return self._repo.addchangegroup(cg, source, url)
112 return self._repo.addchangegroup(cg, source, url)
113
113
114 def pushkey(self, namespace, key, old, new):
114 def pushkey(self, namespace, key, old, new):
115 return self._repo.pushkey(namespace, key, old, new)
115 return self._repo.pushkey(namespace, key, old, new)
116
116
117 def listkeys(self, namespace):
117 def listkeys(self, namespace):
118 return self._repo.listkeys(namespace)
118 return self._repo.listkeys(namespace)
119
119
120 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 '''used to test argument passing over the wire'''
121 '''used to test argument passing over the wire'''
122 return "%s %s %s %s %s" % (one, two, three, four, five)
122 return "%s %s %s %s %s" % (one, two, three, four, five)
123
123
124 class locallegacypeer(localpeer):
124 class locallegacypeer(localpeer):
125 '''peer extension which implements legacy methods too; used for tests with
125 '''peer extension which implements legacy methods too; used for tests with
126 restricted capabilities'''
126 restricted capabilities'''
127
127
128 def __init__(self, repo):
128 def __init__(self, repo):
129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130
130
131 def branches(self, nodes):
131 def branches(self, nodes):
132 return self._repo.branches(nodes)
132 return self._repo.branches(nodes)
133
133
134 def between(self, pairs):
134 def between(self, pairs):
135 return self._repo.between(pairs)
135 return self._repo.between(pairs)
136
136
137 def changegroup(self, basenodes, source):
137 def changegroup(self, basenodes, source):
138 return self._repo.changegroup(basenodes, source)
138 return self._repo.changegroup(basenodes, source)
139
139
140 def changegroupsubset(self, bases, heads, source):
140 def changegroupsubset(self, bases, heads, source):
141 return self._repo.changegroupsubset(bases, heads, source)
141 return self._repo.changegroupsubset(bases, heads, source)
142
142
143 class localrepository(object):
143 class localrepository(object):
144
144
145 supportedformats = set(('revlogv1', 'generaldelta'))
145 supportedformats = set(('revlogv1', 'generaldelta'))
146 supported = supportedformats | set(('store', 'fncache', 'shared',
146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 'dotencode'))
147 'dotencode'))
148 openerreqs = set(('revlogv1', 'generaldelta'))
148 openerreqs = set(('revlogv1', 'generaldelta'))
149 requirements = ['revlogv1']
149 requirements = ['revlogv1']
150
150
151 def _baserequirements(self, create):
151 def _baserequirements(self, create):
152 return self.requirements[:]
152 return self.requirements[:]
153
153
154 def __init__(self, baseui, path=None, create=False):
154 def __init__(self, baseui, path=None, create=False):
155 self.wvfs = scmutil.vfs(path, expand=True)
155 self.wvfs = scmutil.vfs(path, expand=True)
156 self.wopener = self.wvfs
156 self.wopener = self.wvfs
157 self.root = self.wvfs.base
157 self.root = self.wvfs.base
158 self.path = self.wvfs.join(".hg")
158 self.path = self.wvfs.join(".hg")
159 self.origroot = path
159 self.origroot = path
160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
161 self.vfs = scmutil.vfs(self.path)
161 self.vfs = scmutil.vfs(self.path)
162 self.opener = self.vfs
162 self.opener = self.vfs
163 self.baseui = baseui
163 self.baseui = baseui
164 self.ui = baseui.copy()
164 self.ui = baseui.copy()
165 # A list of callback to shape the phase if no data were found.
165 # A list of callback to shape the phase if no data were found.
166 # Callback are in the form: func(repo, roots) --> processed root.
166 # Callback are in the form: func(repo, roots) --> processed root.
167 # This list it to be filled by extension during repo setup
167 # This list it to be filled by extension during repo setup
168 self._phasedefaults = []
168 self._phasedefaults = []
169 try:
169 try:
170 self.ui.readconfig(self.join("hgrc"), self.root)
170 self.ui.readconfig(self.join("hgrc"), self.root)
171 extensions.loadall(self.ui)
171 extensions.loadall(self.ui)
172 except IOError:
172 except IOError:
173 pass
173 pass
174
174
175 if not self.vfs.isdir():
175 if not self.vfs.isdir():
176 if create:
176 if create:
177 if not self.wvfs.exists():
177 if not self.wvfs.exists():
178 self.wvfs.makedirs()
178 self.wvfs.makedirs()
179 self.vfs.makedir(notindexed=True)
179 self.vfs.makedir(notindexed=True)
180 requirements = self._baserequirements(create)
180 requirements = self._baserequirements(create)
181 if self.ui.configbool('format', 'usestore', True):
181 if self.ui.configbool('format', 'usestore', True):
182 self.vfs.mkdir("store")
182 self.vfs.mkdir("store")
183 requirements.append("store")
183 requirements.append("store")
184 if self.ui.configbool('format', 'usefncache', True):
184 if self.ui.configbool('format', 'usefncache', True):
185 requirements.append("fncache")
185 requirements.append("fncache")
186 if self.ui.configbool('format', 'dotencode', True):
186 if self.ui.configbool('format', 'dotencode', True):
187 requirements.append('dotencode')
187 requirements.append('dotencode')
188 # create an invalid changelog
188 # create an invalid changelog
189 self.vfs.append(
189 self.vfs.append(
190 "00changelog.i",
190 "00changelog.i",
191 '\0\0\0\2' # represents revlogv2
191 '\0\0\0\2' # represents revlogv2
192 ' dummy changelog to prevent using the old repo layout'
192 ' dummy changelog to prevent using the old repo layout'
193 )
193 )
194 if self.ui.configbool('format', 'generaldelta', False):
194 if self.ui.configbool('format', 'generaldelta', False):
195 requirements.append("generaldelta")
195 requirements.append("generaldelta")
196 requirements = set(requirements)
196 requirements = set(requirements)
197 else:
197 else:
198 raise error.RepoError(_("repository %s not found") % path)
198 raise error.RepoError(_("repository %s not found") % path)
199 elif create:
199 elif create:
200 raise error.RepoError(_("repository %s already exists") % path)
200 raise error.RepoError(_("repository %s already exists") % path)
201 else:
201 else:
202 try:
202 try:
203 requirements = scmutil.readrequires(self.vfs, self.supported)
203 requirements = scmutil.readrequires(self.vfs, self.supported)
204 except IOError, inst:
204 except IOError, inst:
205 if inst.errno != errno.ENOENT:
205 if inst.errno != errno.ENOENT:
206 raise
206 raise
207 requirements = set()
207 requirements = set()
208
208
209 self.sharedpath = self.path
209 self.sharedpath = self.path
210 try:
210 try:
211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
212 if not os.path.exists(s):
212 if not os.path.exists(s):
213 raise error.RepoError(
213 raise error.RepoError(
214 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 _('.hg/sharedpath points to nonexistent directory %s') % s)
215 self.sharedpath = s
215 self.sharedpath = s
216 except IOError, inst:
216 except IOError, inst:
217 if inst.errno != errno.ENOENT:
217 if inst.errno != errno.ENOENT:
218 raise
218 raise
219
219
220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
221 self.spath = self.store.path
221 self.spath = self.store.path
222 self.svfs = self.store.vfs
222 self.svfs = self.store.vfs
223 self.sopener = self.svfs
223 self.sopener = self.svfs
224 self.sjoin = self.store.join
224 self.sjoin = self.store.join
225 self.vfs.createmode = self.store.createmode
225 self.vfs.createmode = self.store.createmode
226 self._applyrequirements(requirements)
226 self._applyrequirements(requirements)
227 if create:
227 if create:
228 self._writerequirements()
228 self._writerequirements()
229
229
230
230
231 self._branchcache = None
231 self._branchcache = None
232 self._branchcachetip = None
232 self._branchcachetip = None
233 self.filterpats = {}
233 self.filterpats = {}
234 self._datafilters = {}
234 self._datafilters = {}
235 self._transref = self._lockref = self._wlockref = None
235 self._transref = self._lockref = self._wlockref = None
236
236
237 # A cache for various files under .hg/ that tracks file changes,
237 # A cache for various files under .hg/ that tracks file changes,
238 # (used by the filecache decorator)
238 # (used by the filecache decorator)
239 #
239 #
240 # Maps a property name to its util.filecacheentry
240 # Maps a property name to its util.filecacheentry
241 self._filecache = {}
241 self._filecache = {}
242
242
243 # hold sets of revision to be filtered
243 # hold sets of revision to be filtered
244 # should be cleared when something might have changed the filter value:
244 # should be cleared when something might have changed the filter value:
245 # - new changesets,
245 # - new changesets,
246 # - phase change,
246 # - phase change,
247 # - new obsolescence marker,
247 # - new obsolescence marker,
248 # - working directory parent change,
248 # - working directory parent change,
249 # - bookmark changes
249 # - bookmark changes
250 self.filteredrevcache = {}
250 self.filteredrevcache = {}
251
251
252 def close(self):
252 def close(self):
253 pass
253 pass
254
254
255 def _restrictcapabilities(self, caps):
255 def _restrictcapabilities(self, caps):
256 return caps
256 return caps
257
257
258 def _applyrequirements(self, requirements):
258 def _applyrequirements(self, requirements):
259 self.requirements = requirements
259 self.requirements = requirements
260 self.sopener.options = dict((r, 1) for r in requirements
260 self.sopener.options = dict((r, 1) for r in requirements
261 if r in self.openerreqs)
261 if r in self.openerreqs)
262
262
263 def _writerequirements(self):
263 def _writerequirements(self):
264 reqfile = self.opener("requires", "w")
264 reqfile = self.opener("requires", "w")
265 for r in self.requirements:
265 for r in self.requirements:
266 reqfile.write("%s\n" % r)
266 reqfile.write("%s\n" % r)
267 reqfile.close()
267 reqfile.close()
268
268
269 def _checknested(self, path):
269 def _checknested(self, path):
270 """Determine if path is a legal nested repository."""
270 """Determine if path is a legal nested repository."""
271 if not path.startswith(self.root):
271 if not path.startswith(self.root):
272 return False
272 return False
273 subpath = path[len(self.root) + 1:]
273 subpath = path[len(self.root) + 1:]
274 normsubpath = util.pconvert(subpath)
274 normsubpath = util.pconvert(subpath)
275
275
276 # XXX: Checking against the current working copy is wrong in
276 # XXX: Checking against the current working copy is wrong in
277 # the sense that it can reject things like
277 # the sense that it can reject things like
278 #
278 #
279 # $ hg cat -r 10 sub/x.txt
279 # $ hg cat -r 10 sub/x.txt
280 #
280 #
281 # if sub/ is no longer a subrepository in the working copy
281 # if sub/ is no longer a subrepository in the working copy
282 # parent revision.
282 # parent revision.
283 #
283 #
284 # However, it can of course also allow things that would have
284 # However, it can of course also allow things that would have
285 # been rejected before, such as the above cat command if sub/
285 # been rejected before, such as the above cat command if sub/
286 # is a subrepository now, but was a normal directory before.
286 # is a subrepository now, but was a normal directory before.
287 # The old path auditor would have rejected by mistake since it
287 # The old path auditor would have rejected by mistake since it
288 # panics when it sees sub/.hg/.
288 # panics when it sees sub/.hg/.
289 #
289 #
290 # All in all, checking against the working copy seems sensible
290 # All in all, checking against the working copy seems sensible
291 # since we want to prevent access to nested repositories on
291 # since we want to prevent access to nested repositories on
292 # the filesystem *now*.
292 # the filesystem *now*.
293 ctx = self[None]
293 ctx = self[None]
294 parts = util.splitpath(subpath)
294 parts = util.splitpath(subpath)
295 while parts:
295 while parts:
296 prefix = '/'.join(parts)
296 prefix = '/'.join(parts)
297 if prefix in ctx.substate:
297 if prefix in ctx.substate:
298 if prefix == normsubpath:
298 if prefix == normsubpath:
299 return True
299 return True
300 else:
300 else:
301 sub = ctx.sub(prefix)
301 sub = ctx.sub(prefix)
302 return sub.checknested(subpath[len(prefix) + 1:])
302 return sub.checknested(subpath[len(prefix) + 1:])
303 else:
303 else:
304 parts.pop()
304 parts.pop()
305 return False
305 return False
306
306
307 def peer(self):
307 def peer(self):
308 return localpeer(self) # not cached to avoid reference cycle
308 return localpeer(self) # not cached to avoid reference cycle
309
309
310 def unfiltered(self):
310 def unfiltered(self):
311 """Return unfiltered version of the repository
311 """Return unfiltered version of the repository
312
312
313 Intended to be ovewritten by filtered repo."""
313 Intended to be ovewritten by filtered repo."""
314 return self
314 return self
315
315
316 def filtered(self, name):
316 def filtered(self, name):
317 """Return a filtered version of a repository"""
317 """Return a filtered version of a repository"""
318 # build a new class with the mixin and the current class
318 # build a new class with the mixin and the current class
319 # (possibily subclass of the repo)
319 # (possibily subclass of the repo)
320 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 class proxycls(repoview.repoview, self.unfiltered().__class__):
321 pass
321 pass
322 return proxycls(self, name)
322 return proxycls(self, name)
323
323
324 @repofilecache('bookmarks')
324 @repofilecache('bookmarks')
325 def _bookmarks(self):
325 def _bookmarks(self):
326 return bookmarks.bmstore(self)
326 return bookmarks.bmstore(self)
327
327
328 @repofilecache('bookmarks.current')
328 @repofilecache('bookmarks.current')
329 def _bookmarkcurrent(self):
329 def _bookmarkcurrent(self):
330 return bookmarks.readcurrent(self)
330 return bookmarks.readcurrent(self)
331
331
332 def bookmarkheads(self, bookmark):
332 def bookmarkheads(self, bookmark):
333 name = bookmark.split('@', 1)[0]
333 name = bookmark.split('@', 1)[0]
334 heads = []
334 heads = []
335 for mark, n in self._bookmarks.iteritems():
335 for mark, n in self._bookmarks.iteritems():
336 if mark.split('@', 1)[0] == name:
336 if mark.split('@', 1)[0] == name:
337 heads.append(n)
337 heads.append(n)
338 return heads
338 return heads
339
339
340 @storecache('phaseroots')
340 @storecache('phaseroots')
341 def _phasecache(self):
341 def _phasecache(self):
342 return phases.phasecache(self, self._phasedefaults)
342 return phases.phasecache(self, self._phasedefaults)
343
343
344 @storecache('obsstore')
344 @storecache('obsstore')
345 def obsstore(self):
345 def obsstore(self):
346 store = obsolete.obsstore(self.sopener)
346 store = obsolete.obsstore(self.sopener)
347 if store and not obsolete._enabled:
347 if store and not obsolete._enabled:
348 # message is rare enough to not be translated
348 # message is rare enough to not be translated
349 msg = 'obsolete feature not enabled but %i markers found!\n'
349 msg = 'obsolete feature not enabled but %i markers found!\n'
350 self.ui.warn(msg % len(list(store)))
350 self.ui.warn(msg % len(list(store)))
351 return store
351 return store
352
352
353 @unfilteredpropertycache
353 @unfilteredpropertycache
354 def hiddenrevs(self):
354 def hiddenrevs(self):
355 """hiddenrevs: revs that should be hidden by command and tools
355 """hiddenrevs: revs that should be hidden by command and tools
356
356
357 This set is carried on the repo to ease initialization and lazy
357 This set is carried on the repo to ease initialization and lazy
358 loading; it'll probably move back to changelog for efficiency and
358 loading; it'll probably move back to changelog for efficiency and
359 consistency reasons.
359 consistency reasons.
360
360
361 Note that the hiddenrevs will needs invalidations when
361 Note that the hiddenrevs will needs invalidations when
362 - a new changesets is added (possible unstable above extinct)
362 - a new changesets is added (possible unstable above extinct)
363 - a new obsolete marker is added (possible new extinct changeset)
363 - a new obsolete marker is added (possible new extinct changeset)
364
364
365 hidden changesets cannot have non-hidden descendants
365 hidden changesets cannot have non-hidden descendants
366 """
366 """
367 hidden = set()
367 hidden = set()
368 if self.obsstore:
368 if self.obsstore:
369 ### hide extinct changeset that are not accessible by any mean
369 ### hide extinct changeset that are not accessible by any mean
370 hiddenquery = 'extinct() - ::(. + bookmark())'
370 hiddenquery = 'extinct() - ::(. + bookmark())'
371 hidden.update(self.revs(hiddenquery))
371 hidden.update(self.revs(hiddenquery))
372 return hidden
372 return hidden
373
373
374 @storecache('00changelog.i')
374 @storecache('00changelog.i')
375 def changelog(self):
375 def changelog(self):
376 c = changelog.changelog(self.sopener)
376 c = changelog.changelog(self.sopener)
377 if 'HG_PENDING' in os.environ:
377 if 'HG_PENDING' in os.environ:
378 p = os.environ['HG_PENDING']
378 p = os.environ['HG_PENDING']
379 if p.startswith(self.root):
379 if p.startswith(self.root):
380 c.readpending('00changelog.i.a')
380 c.readpending('00changelog.i.a')
381 return c
381 return c
382
382
383 @storecache('00manifest.i')
383 @storecache('00manifest.i')
384 def manifest(self):
384 def manifest(self):
385 return manifest.manifest(self.sopener)
385 return manifest.manifest(self.sopener)
386
386
387 @repofilecache('dirstate')
387 @repofilecache('dirstate')
388 def dirstate(self):
388 def dirstate(self):
389 warned = [0]
389 warned = [0]
390 def validate(node):
390 def validate(node):
391 try:
391 try:
392 self.changelog.rev(node)
392 self.changelog.rev(node)
393 return node
393 return node
394 except error.LookupError:
394 except error.LookupError:
395 if not warned[0]:
395 if not warned[0]:
396 warned[0] = True
396 warned[0] = True
397 self.ui.warn(_("warning: ignoring unknown"
397 self.ui.warn(_("warning: ignoring unknown"
398 " working parent %s!\n") % short(node))
398 " working parent %s!\n") % short(node))
399 return nullid
399 return nullid
400
400
401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
402
402
403 def __getitem__(self, changeid):
403 def __getitem__(self, changeid):
404 if changeid is None:
404 if changeid is None:
405 return context.workingctx(self)
405 return context.workingctx(self)
406 return context.changectx(self, changeid)
406 return context.changectx(self, changeid)
407
407
408 def __contains__(self, changeid):
408 def __contains__(self, changeid):
409 try:
409 try:
410 return bool(self.lookup(changeid))
410 return bool(self.lookup(changeid))
411 except error.RepoLookupError:
411 except error.RepoLookupError:
412 return False
412 return False
413
413
414 def __nonzero__(self):
414 def __nonzero__(self):
415 return True
415 return True
416
416
417 def __len__(self):
417 def __len__(self):
418 return len(self.changelog)
418 return len(self.changelog)
419
419
420 def __iter__(self):
420 def __iter__(self):
421 return iter(self.changelog)
421 return iter(self.changelog)
422
422
423 def revs(self, expr, *args):
423 def revs(self, expr, *args):
424 '''Return a list of revisions matching the given revset'''
424 '''Return a list of revisions matching the given revset'''
425 expr = revset.formatspec(expr, *args)
425 expr = revset.formatspec(expr, *args)
426 m = revset.match(None, expr)
426 m = revset.match(None, expr)
427 return [r for r in m(self, list(self))]
427 return [r for r in m(self, list(self))]
428
428
429 def set(self, expr, *args):
429 def set(self, expr, *args):
430 '''
430 '''
431 Yield a context for each matching revision, after doing arg
431 Yield a context for each matching revision, after doing arg
432 replacement via revset.formatspec
432 replacement via revset.formatspec
433 '''
433 '''
434 for r in self.revs(expr, *args):
434 for r in self.revs(expr, *args):
435 yield self[r]
435 yield self[r]
436
436
437 def url(self):
437 def url(self):
438 return 'file:' + self.root
438 return 'file:' + self.root
439
439
440 def hook(self, name, throw=False, **args):
440 def hook(self, name, throw=False, **args):
441 return hook.hook(self.ui, self, name, throw, **args)
441 return hook.hook(self.ui, self, name, throw, **args)
442
442
443 @unfilteredmethod
443 @unfilteredmethod
444 def _tag(self, names, node, message, local, user, date, extra={}):
444 def _tag(self, names, node, message, local, user, date, extra={}):
445 if isinstance(names, str):
445 if isinstance(names, str):
446 names = (names,)
446 names = (names,)
447
447
448 branches = self.branchmap()
448 branches = self.branchmap()
449 for name in names:
449 for name in names:
450 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 self.hook('pretag', throw=True, node=hex(node), tag=name,
451 local=local)
451 local=local)
452 if name in branches:
452 if name in branches:
453 self.ui.warn(_("warning: tag %s conflicts with existing"
453 self.ui.warn(_("warning: tag %s conflicts with existing"
454 " branch name\n") % name)
454 " branch name\n") % name)
455
455
456 def writetags(fp, names, munge, prevtags):
456 def writetags(fp, names, munge, prevtags):
457 fp.seek(0, 2)
457 fp.seek(0, 2)
458 if prevtags and prevtags[-1] != '\n':
458 if prevtags and prevtags[-1] != '\n':
459 fp.write('\n')
459 fp.write('\n')
460 for name in names:
460 for name in names:
461 m = munge and munge(name) or name
461 m = munge and munge(name) or name
462 if (self._tagscache.tagtypes and
462 if (self._tagscache.tagtypes and
463 name in self._tagscache.tagtypes):
463 name in self._tagscache.tagtypes):
464 old = self.tags().get(name, nullid)
464 old = self.tags().get(name, nullid)
465 fp.write('%s %s\n' % (hex(old), m))
465 fp.write('%s %s\n' % (hex(old), m))
466 fp.write('%s %s\n' % (hex(node), m))
466 fp.write('%s %s\n' % (hex(node), m))
467 fp.close()
467 fp.close()
468
468
469 prevtags = ''
469 prevtags = ''
470 if local:
470 if local:
471 try:
471 try:
472 fp = self.opener('localtags', 'r+')
472 fp = self.opener('localtags', 'r+')
473 except IOError:
473 except IOError:
474 fp = self.opener('localtags', 'a')
474 fp = self.opener('localtags', 'a')
475 else:
475 else:
476 prevtags = fp.read()
476 prevtags = fp.read()
477
477
478 # local tags are stored in the current charset
478 # local tags are stored in the current charset
479 writetags(fp, names, None, prevtags)
479 writetags(fp, names, None, prevtags)
480 for name in names:
480 for name in names:
481 self.hook('tag', node=hex(node), tag=name, local=local)
481 self.hook('tag', node=hex(node), tag=name, local=local)
482 return
482 return
483
483
484 try:
484 try:
485 fp = self.wfile('.hgtags', 'rb+')
485 fp = self.wfile('.hgtags', 'rb+')
486 except IOError, e:
486 except IOError, e:
487 if e.errno != errno.ENOENT:
487 if e.errno != errno.ENOENT:
488 raise
488 raise
489 fp = self.wfile('.hgtags', 'ab')
489 fp = self.wfile('.hgtags', 'ab')
490 else:
490 else:
491 prevtags = fp.read()
491 prevtags = fp.read()
492
492
493 # committed tags are stored in UTF-8
493 # committed tags are stored in UTF-8
494 writetags(fp, names, encoding.fromlocal, prevtags)
494 writetags(fp, names, encoding.fromlocal, prevtags)
495
495
496 fp.close()
496 fp.close()
497
497
498 self.invalidatecaches()
498 self.invalidatecaches()
499
499
500 if '.hgtags' not in self.dirstate:
500 if '.hgtags' not in self.dirstate:
501 self[None].add(['.hgtags'])
501 self[None].add(['.hgtags'])
502
502
503 m = matchmod.exact(self.root, '', ['.hgtags'])
503 m = matchmod.exact(self.root, '', ['.hgtags'])
504 tagnode = self.commit(message, user, date, extra=extra, match=m)
504 tagnode = self.commit(message, user, date, extra=extra, match=m)
505
505
506 for name in names:
506 for name in names:
507 self.hook('tag', node=hex(node), tag=name, local=local)
507 self.hook('tag', node=hex(node), tag=name, local=local)
508
508
509 return tagnode
509 return tagnode
510
510
511 def tag(self, names, node, message, local, user, date):
511 def tag(self, names, node, message, local, user, date):
512 '''tag a revision with one or more symbolic names.
512 '''tag a revision with one or more symbolic names.
513
513
514 names is a list of strings or, when adding a single tag, names may be a
514 names is a list of strings or, when adding a single tag, names may be a
515 string.
515 string.
516
516
517 if local is True, the tags are stored in a per-repository file.
517 if local is True, the tags are stored in a per-repository file.
518 otherwise, they are stored in the .hgtags file, and a new
518 otherwise, they are stored in the .hgtags file, and a new
519 changeset is committed with the change.
519 changeset is committed with the change.
520
520
521 keyword arguments:
521 keyword arguments:
522
522
523 local: whether to store tags in non-version-controlled file
523 local: whether to store tags in non-version-controlled file
524 (default False)
524 (default False)
525
525
526 message: commit message to use if committing
526 message: commit message to use if committing
527
527
528 user: name of user to use if committing
528 user: name of user to use if committing
529
529
530 date: date tuple to use if committing'''
530 date: date tuple to use if committing'''
531
531
532 if not local:
532 if not local:
533 for x in self.status()[:5]:
533 for x in self.status()[:5]:
534 if '.hgtags' in x:
534 if '.hgtags' in x:
535 raise util.Abort(_('working copy of .hgtags is changed '
535 raise util.Abort(_('working copy of .hgtags is changed '
536 '(please commit .hgtags manually)'))
536 '(please commit .hgtags manually)'))
537
537
538 self.tags() # instantiate the cache
538 self.tags() # instantiate the cache
539 self._tag(names, node, message, local, user, date)
539 self._tag(names, node, message, local, user, date)
540
540
541 @filteredpropertycache
541 @filteredpropertycache
542 def _tagscache(self):
542 def _tagscache(self):
543 '''Returns a tagscache object that contains various tags related
543 '''Returns a tagscache object that contains various tags related
544 caches.'''
544 caches.'''
545
545
546 # This simplifies its cache management by having one decorated
546 # This simplifies its cache management by having one decorated
547 # function (this one) and the rest simply fetch things from it.
547 # function (this one) and the rest simply fetch things from it.
548 class tagscache(object):
548 class tagscache(object):
549 def __init__(self):
549 def __init__(self):
550 # These two define the set of tags for this repository. tags
550 # These two define the set of tags for this repository. tags
551 # maps tag name to node; tagtypes maps tag name to 'global' or
551 # maps tag name to node; tagtypes maps tag name to 'global' or
552 # 'local'. (Global tags are defined by .hgtags across all
552 # 'local'. (Global tags are defined by .hgtags across all
553 # heads, and local tags are defined in .hg/localtags.)
553 # heads, and local tags are defined in .hg/localtags.)
554 # They constitute the in-memory cache of tags.
554 # They constitute the in-memory cache of tags.
555 self.tags = self.tagtypes = None
555 self.tags = self.tagtypes = None
556
556
557 self.nodetagscache = self.tagslist = None
557 self.nodetagscache = self.tagslist = None
558
558
559 cache = tagscache()
559 cache = tagscache()
560 cache.tags, cache.tagtypes = self._findtags()
560 cache.tags, cache.tagtypes = self._findtags()
561
561
562 return cache
562 return cache
563
563
564 def tags(self):
564 def tags(self):
565 '''return a mapping of tag to node'''
565 '''return a mapping of tag to node'''
566 t = {}
566 t = {}
567 if self.changelog.filteredrevs:
567 if self.changelog.filteredrevs:
568 tags, tt = self._findtags()
568 tags, tt = self._findtags()
569 else:
569 else:
570 tags = self._tagscache.tags
570 tags = self._tagscache.tags
571 for k, v in tags.iteritems():
571 for k, v in tags.iteritems():
572 try:
572 try:
573 # ignore tags to unknown nodes
573 # ignore tags to unknown nodes
574 self.changelog.rev(v)
574 self.changelog.rev(v)
575 t[k] = v
575 t[k] = v
576 except (error.LookupError, ValueError):
576 except (error.LookupError, ValueError):
577 pass
577 pass
578 return t
578 return t
579
579
580 def _findtags(self):
580 def _findtags(self):
581 '''Do the hard work of finding tags. Return a pair of dicts
581 '''Do the hard work of finding tags. Return a pair of dicts
582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
583 maps tag name to a string like \'global\' or \'local\'.
583 maps tag name to a string like \'global\' or \'local\'.
584 Subclasses or extensions are free to add their own tags, but
584 Subclasses or extensions are free to add their own tags, but
585 should be aware that the returned dicts will be retained for the
585 should be aware that the returned dicts will be retained for the
586 duration of the localrepo object.'''
586 duration of the localrepo object.'''
587
587
588 # XXX what tagtype should subclasses/extensions use? Currently
588 # XXX what tagtype should subclasses/extensions use? Currently
589 # mq and bookmarks add tags, but do not set the tagtype at all.
589 # mq and bookmarks add tags, but do not set the tagtype at all.
590 # Should each extension invent its own tag type? Should there
590 # Should each extension invent its own tag type? Should there
591 # be one tagtype for all such "virtual" tags? Or is the status
591 # be one tagtype for all such "virtual" tags? Or is the status
592 # quo fine?
592 # quo fine?
593
593
594 alltags = {} # map tag name to (node, hist)
594 alltags = {} # map tag name to (node, hist)
595 tagtypes = {}
595 tagtypes = {}
596
596
597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
599
599
600 # Build the return dicts. Have to re-encode tag names because
600 # Build the return dicts. Have to re-encode tag names because
601 # the tags module always uses UTF-8 (in order not to lose info
601 # the tags module always uses UTF-8 (in order not to lose info
602 # writing to the cache), but the rest of Mercurial wants them in
602 # writing to the cache), but the rest of Mercurial wants them in
603 # local encoding.
603 # local encoding.
604 tags = {}
604 tags = {}
605 for (name, (node, hist)) in alltags.iteritems():
605 for (name, (node, hist)) in alltags.iteritems():
606 if node != nullid:
606 if node != nullid:
607 tags[encoding.tolocal(name)] = node
607 tags[encoding.tolocal(name)] = node
608 tags['tip'] = self.changelog.tip()
608 tags['tip'] = self.changelog.tip()
609 tagtypes = dict([(encoding.tolocal(name), value)
609 tagtypes = dict([(encoding.tolocal(name), value)
610 for (name, value) in tagtypes.iteritems()])
610 for (name, value) in tagtypes.iteritems()])
611 return (tags, tagtypes)
611 return (tags, tagtypes)
612
612
613 def tagtype(self, tagname):
613 def tagtype(self, tagname):
614 '''
614 '''
615 return the type of the given tag. result can be:
615 return the type of the given tag. result can be:
616
616
617 'local' : a local tag
617 'local' : a local tag
618 'global' : a global tag
618 'global' : a global tag
619 None : tag does not exist
619 None : tag does not exist
620 '''
620 '''
621
621
622 return self._tagscache.tagtypes.get(tagname)
622 return self._tagscache.tagtypes.get(tagname)
623
623
624 def tagslist(self):
624 def tagslist(self):
625 '''return a list of tags ordered by revision'''
625 '''return a list of tags ordered by revision'''
626 if not self._tagscache.tagslist:
626 if not self._tagscache.tagslist:
627 l = []
627 l = []
628 for t, n in self.tags().iteritems():
628 for t, n in self.tags().iteritems():
629 r = self.changelog.rev(n)
629 r = self.changelog.rev(n)
630 l.append((r, t, n))
630 l.append((r, t, n))
631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
632
632
633 return self._tagscache.tagslist
633 return self._tagscache.tagslist
634
634
635 def nodetags(self, node):
635 def nodetags(self, node):
636 '''return the tags associated with a node'''
636 '''return the tags associated with a node'''
637 if not self._tagscache.nodetagscache:
637 if not self._tagscache.nodetagscache:
638 nodetagscache = {}
638 nodetagscache = {}
639 for t, n in self._tagscache.tags.iteritems():
639 for t, n in self._tagscache.tags.iteritems():
640 nodetagscache.setdefault(n, []).append(t)
640 nodetagscache.setdefault(n, []).append(t)
641 for tags in nodetagscache.itervalues():
641 for tags in nodetagscache.itervalues():
642 tags.sort()
642 tags.sort()
643 self._tagscache.nodetagscache = nodetagscache
643 self._tagscache.nodetagscache = nodetagscache
644 return self._tagscache.nodetagscache.get(node, [])
644 return self._tagscache.nodetagscache.get(node, [])
645
645
646 def nodebookmarks(self, node):
646 def nodebookmarks(self, node):
647 marks = []
647 marks = []
648 for bookmark, n in self._bookmarks.iteritems():
648 for bookmark, n in self._bookmarks.iteritems():
649 if n == node:
649 if n == node:
650 marks.append(bookmark)
650 marks.append(bookmark)
651 return sorted(marks)
651 return sorted(marks)
652
652
653 def _cacheabletip(self):
653 def _cacheabletip(self):
654 """tip-most revision stable enought to used in persistent cache
654 """tip-most revision stable enought to used in persistent cache
655
655
656 This function is overwritten by MQ to ensure we do not write cache for
656 This function is overwritten by MQ to ensure we do not write cache for
657 a part of the history that will likely change.
657 a part of the history that will likely change.
658
658
659 Efficient handling of filtered revision in branchcache should offer a
659 Efficient handling of filtered revision in branchcache should offer a
660 better alternative. But we are using this approach until it is ready.
660 better alternative. But we are using this approach until it is ready.
661 """
661 """
662 cl = self.changelog
662 cl = self.changelog
663 return cl.rev(cl.tip())
663 return cl.rev(cl.tip())
664
664
665 @unfilteredmethod # Until we get a smarter cache management
665 @unfilteredmethod # Until we get a smarter cache management
666 def updatebranchcache(self):
666 def updatebranchcache(self):
667 cl = self.changelog
667 cl = self.changelog
668 tip = cl.tip()
668 tip = cl.tip()
669 if self._branchcache is not None and self._branchcachetip == tip:
669 if self._branchcache is not None and self._branchcachetip == tip:
670 return
670 return
671
671
672 oldtip = self._branchcachetip
672 oldtip = self._branchcachetip
673 if oldtip is None or oldtip not in cl.nodemap:
673 if oldtip is None or oldtip not in cl.nodemap:
674 partial, last, lrev = branchmap.read(self)
674 partial, last, lrev = branchmap.read(self)
675 else:
675 else:
676 lrev = cl.rev(oldtip)
676 lrev = cl.rev(oldtip)
677 partial = self._branchcache
677 partial = self._branchcache
678
678
679 catip = self._cacheabletip()
679 catip = self._cacheabletip()
680 # if lrev == catip: cache is already up to date
680 # if lrev == catip: cache is already up to date
681 # if lrev > catip: we have uncachable element in `partial` can't write
681 # if lrev > catip: we have uncachable element in `partial` can't write
682 # on disk
682 # on disk
683 if lrev < catip:
683 if lrev < catip:
684 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip))
684 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip))
685 self._updatebranchcache(partial, ctxgen)
685 self._updatebranchcache(partial, ctxgen)
686 branchmap.write(self, partial, cl.node(catip), catip)
686 branchmap.write(self, partial, cl.node(catip), catip)
687 lrev = catip
687 lrev = catip
688 # If cacheable tip were lower than actual tip, we need to update the
688 # If cacheable tip were lower than actual tip, we need to update the
689 # cache up to tip. This update (from cacheable to actual tip) is not
689 # cache up to tip. This update (from cacheable to actual tip) is not
690 # written to disk since it's not cacheable.
690 # written to disk since it's not cacheable.
691 tiprev = len(self) - 1
691 tiprev = len(self) - 1
692 if lrev < tiprev:
692 if lrev < tiprev:
693 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev))
693 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev))
694 self._updatebranchcache(partial, ctxgen)
694 self._updatebranchcache(partial, ctxgen)
695 self._branchcache = partial
695 self._branchcache = partial
696 self._branchcachetip = tip
696 self._branchcachetip = tip
697
697
698 def branchmap(self):
698 def branchmap(self):
699 '''returns a dictionary {branch: [branchheads]}'''
699 '''returns a dictionary {branch: [branchheads]}'''
700 if self.changelog.filteredrevs:
700 if self.changelog.filteredrevs:
701 # some changeset are excluded we can't use the cache
701 # some changeset are excluded we can't use the cache
702 branchmap = {}
702 branchmap = {}
703 self._updatebranchcache(branchmap, (self[r] for r in self))
703 self._updatebranchcache(branchmap, (self[r] for r in self))
704 return branchmap
704 return branchmap
705 else:
705 else:
706 self.updatebranchcache()
706 self.updatebranchcache()
707 return self._branchcache
707 return self._branchcache
708
708
709
709
710 def _branchtip(self, heads):
710 def _branchtip(self, heads):
711 '''return the tipmost branch head in heads'''
711 '''return the tipmost branch head in heads'''
712 tip = heads[-1]
712 tip = heads[-1]
713 for h in reversed(heads):
713 for h in reversed(heads):
714 if not self[h].closesbranch():
714 if not self[h].closesbranch():
715 tip = h
715 tip = h
716 break
716 break
717 return tip
717 return tip
718
718
719 def branchtip(self, branch):
719 def branchtip(self, branch):
720 '''return the tip node for a given branch'''
720 '''return the tip node for a given branch'''
721 if branch not in self.branchmap():
721 if branch not in self.branchmap():
722 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
722 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
723 return self._branchtip(self.branchmap()[branch])
723 return self._branchtip(self.branchmap()[branch])
724
724
725 def branchtags(self):
725 def branchtags(self):
726 '''return a dict where branch names map to the tipmost head of
726 '''return a dict where branch names map to the tipmost head of
727 the branch, open heads come before closed'''
727 the branch, open heads come before closed'''
728 bt = {}
728 bt = {}
729 for bn, heads in self.branchmap().iteritems():
729 for bn, heads in self.branchmap().iteritems():
730 bt[bn] = self._branchtip(heads)
730 bt[bn] = self._branchtip(heads)
731 return bt
731 return bt
732
732
733 @unfilteredmethod # Until we get a smarter cache management
734 def _updatebranchcache(self, partial, ctxgen):
733 def _updatebranchcache(self, partial, ctxgen):
735 """Given a branchhead cache, partial, that may have extra nodes or be
734 """Given a branchhead cache, partial, that may have extra nodes or be
736 missing heads, and a generator of nodes that are at least a superset of
735 missing heads, and a generator of nodes that are at least a superset of
737 heads missing, this function updates partial to be correct.
736 heads missing, this function updates partial to be correct.
738 """
737 """
739 # collect new branch entries
738 # collect new branch entries
740 newbranches = {}
739 newbranches = {}
741 for c in ctxgen:
740 for c in ctxgen:
742 newbranches.setdefault(c.branch(), []).append(c.node())
741 newbranches.setdefault(c.branch(), []).append(c.node())
743 # if older branchheads are reachable from new ones, they aren't
742 # if older branchheads are reachable from new ones, they aren't
744 # really branchheads. Note checking parents is insufficient:
743 # really branchheads. Note checking parents is insufficient:
745 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
744 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
746 for branch, newnodes in newbranches.iteritems():
745 for branch, newnodes in newbranches.iteritems():
747 bheads = partial.setdefault(branch, [])
746 bheads = partial.setdefault(branch, [])
748 # Remove candidate heads that no longer are in the repo (e.g., as
747 # Remove candidate heads that no longer are in the repo (e.g., as
749 # the result of a strip that just happened). Avoid using 'node in
748 # the result of a strip that just happened). Avoid using 'node in
750 # self' here because that dives down into branchcache code somewhat
749 # self' here because that dives down into branchcache code somewhat
751 # recursively.
750 # recursively.
752 bheadrevs = [self.changelog.rev(node) for node in bheads
751 bheadrevs = [self.changelog.rev(node) for node in bheads
753 if self.changelog.hasnode(node)]
752 if self.changelog.hasnode(node)]
754 newheadrevs = [self.changelog.rev(node) for node in newnodes
753 newheadrevs = [self.changelog.rev(node) for node in newnodes
755 if self.changelog.hasnode(node)]
754 if self.changelog.hasnode(node)]
756 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
755 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
757 # Remove duplicates - nodes that are in newheadrevs and are already
756 # Remove duplicates - nodes that are in newheadrevs and are already
758 # in bheadrevs. This can happen if you strip a node whose parent
757 # in bheadrevs. This can happen if you strip a node whose parent
759 # was already a head (because they're on different branches).
758 # was already a head (because they're on different branches).
760 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
759 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
761
760
762 # Starting from tip means fewer passes over reachable. If we know
761 # Starting from tip means fewer passes over reachable. If we know
763 # the new candidates are not ancestors of existing heads, we don't
762 # the new candidates are not ancestors of existing heads, we don't
764 # have to examine ancestors of existing heads
763 # have to examine ancestors of existing heads
765 if ctxisnew:
764 if ctxisnew:
766 iterrevs = sorted(newheadrevs)
765 iterrevs = sorted(newheadrevs)
767 else:
766 else:
768 iterrevs = list(bheadrevs)
767 iterrevs = list(bheadrevs)
769
768
770 # This loop prunes out two kinds of heads - heads that are
769 # This loop prunes out two kinds of heads - heads that are
771 # superseded by a head in newheadrevs, and newheadrevs that are not
770 # superseded by a head in newheadrevs, and newheadrevs that are not
772 # heads because an existing head is their descendant.
771 # heads because an existing head is their descendant.
773 while iterrevs:
772 while iterrevs:
774 latest = iterrevs.pop()
773 latest = iterrevs.pop()
775 if latest not in bheadrevs:
774 if latest not in bheadrevs:
776 continue
775 continue
777 ancestors = set(self.changelog.ancestors([latest],
776 ancestors = set(self.changelog.ancestors([latest],
778 bheadrevs[0]))
777 bheadrevs[0]))
779 if ancestors:
778 if ancestors:
780 bheadrevs = [b for b in bheadrevs if b not in ancestors]
779 bheadrevs = [b for b in bheadrevs if b not in ancestors]
781 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
780 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
782
781
783 # There may be branches that cease to exist when the last commit in the
782 # There may be branches that cease to exist when the last commit in the
784 # branch was stripped. This code filters them out. Note that the
783 # branch was stripped. This code filters them out. Note that the
785 # branch that ceased to exist may not be in newbranches because
784 # branch that ceased to exist may not be in newbranches because
786 # newbranches is the set of candidate heads, which when you strip the
785 # newbranches is the set of candidate heads, which when you strip the
787 # last commit in a branch will be the parent branch.
786 # last commit in a branch will be the parent branch.
788 for branch in partial.keys():
787 for branch in partial.keys():
789 nodes = [head for head in partial[branch]
788 nodes = [head for head in partial[branch]
790 if self.changelog.hasnode(head)]
789 if self.changelog.hasnode(head)]
791 if not nodes:
790 if not nodes:
792 del partial[branch]
791 del partial[branch]
793
792
794 def lookup(self, key):
793 def lookup(self, key):
795 return self[key].node()
794 return self[key].node()
796
795
797 def lookupbranch(self, key, remote=None):
796 def lookupbranch(self, key, remote=None):
798 repo = remote or self
797 repo = remote or self
799 if key in repo.branchmap():
798 if key in repo.branchmap():
800 return key
799 return key
801
800
802 repo = (remote and remote.local()) and remote or self
801 repo = (remote and remote.local()) and remote or self
803 return repo[key].branch()
802 return repo[key].branch()
804
803
805 def known(self, nodes):
804 def known(self, nodes):
806 nm = self.changelog.nodemap
805 nm = self.changelog.nodemap
807 pc = self._phasecache
806 pc = self._phasecache
808 result = []
807 result = []
809 for n in nodes:
808 for n in nodes:
810 r = nm.get(n)
809 r = nm.get(n)
811 resp = not (r is None or pc.phase(self, r) >= phases.secret)
810 resp = not (r is None or pc.phase(self, r) >= phases.secret)
812 result.append(resp)
811 result.append(resp)
813 return result
812 return result
814
813
815 def local(self):
814 def local(self):
816 return self
815 return self
817
816
818 def cancopy(self):
817 def cancopy(self):
819 return self.local() # so statichttprepo's override of local() works
818 return self.local() # so statichttprepo's override of local() works
820
819
821 def join(self, f):
820 def join(self, f):
822 return os.path.join(self.path, f)
821 return os.path.join(self.path, f)
823
822
824 def wjoin(self, f):
823 def wjoin(self, f):
825 return os.path.join(self.root, f)
824 return os.path.join(self.root, f)
826
825
827 def file(self, f):
826 def file(self, f):
828 if f[0] == '/':
827 if f[0] == '/':
829 f = f[1:]
828 f = f[1:]
830 return filelog.filelog(self.sopener, f)
829 return filelog.filelog(self.sopener, f)
831
830
832 def changectx(self, changeid):
831 def changectx(self, changeid):
833 return self[changeid]
832 return self[changeid]
834
833
835 def parents(self, changeid=None):
834 def parents(self, changeid=None):
836 '''get list of changectxs for parents of changeid'''
835 '''get list of changectxs for parents of changeid'''
837 return self[changeid].parents()
836 return self[changeid].parents()
838
837
839 def setparents(self, p1, p2=nullid):
838 def setparents(self, p1, p2=nullid):
840 copies = self.dirstate.setparents(p1, p2)
839 copies = self.dirstate.setparents(p1, p2)
841 if copies:
840 if copies:
842 # Adjust copy records, the dirstate cannot do it, it
841 # Adjust copy records, the dirstate cannot do it, it
843 # requires access to parents manifests. Preserve them
842 # requires access to parents manifests. Preserve them
844 # only for entries added to first parent.
843 # only for entries added to first parent.
845 pctx = self[p1]
844 pctx = self[p1]
846 for f in copies:
845 for f in copies:
847 if f not in pctx and copies[f] in pctx:
846 if f not in pctx and copies[f] in pctx:
848 self.dirstate.copy(copies[f], f)
847 self.dirstate.copy(copies[f], f)
849
848
850 def filectx(self, path, changeid=None, fileid=None):
849 def filectx(self, path, changeid=None, fileid=None):
851 """changeid can be a changeset revision, node, or tag.
850 """changeid can be a changeset revision, node, or tag.
852 fileid can be a file revision or node."""
851 fileid can be a file revision or node."""
853 return context.filectx(self, path, changeid, fileid)
852 return context.filectx(self, path, changeid, fileid)
854
853
855 def getcwd(self):
854 def getcwd(self):
856 return self.dirstate.getcwd()
855 return self.dirstate.getcwd()
857
856
858 def pathto(self, f, cwd=None):
857 def pathto(self, f, cwd=None):
859 return self.dirstate.pathto(f, cwd)
858 return self.dirstate.pathto(f, cwd)
860
859
861 def wfile(self, f, mode='r'):
860 def wfile(self, f, mode='r'):
862 return self.wopener(f, mode)
861 return self.wopener(f, mode)
863
862
864 def _link(self, f):
863 def _link(self, f):
865 return os.path.islink(self.wjoin(f))
864 return os.path.islink(self.wjoin(f))
866
865
867 def _loadfilter(self, filter):
866 def _loadfilter(self, filter):
868 if filter not in self.filterpats:
867 if filter not in self.filterpats:
869 l = []
868 l = []
870 for pat, cmd in self.ui.configitems(filter):
869 for pat, cmd in self.ui.configitems(filter):
871 if cmd == '!':
870 if cmd == '!':
872 continue
871 continue
873 mf = matchmod.match(self.root, '', [pat])
872 mf = matchmod.match(self.root, '', [pat])
874 fn = None
873 fn = None
875 params = cmd
874 params = cmd
876 for name, filterfn in self._datafilters.iteritems():
875 for name, filterfn in self._datafilters.iteritems():
877 if cmd.startswith(name):
876 if cmd.startswith(name):
878 fn = filterfn
877 fn = filterfn
879 params = cmd[len(name):].lstrip()
878 params = cmd[len(name):].lstrip()
880 break
879 break
881 if not fn:
880 if not fn:
882 fn = lambda s, c, **kwargs: util.filter(s, c)
881 fn = lambda s, c, **kwargs: util.filter(s, c)
883 # Wrap old filters not supporting keyword arguments
882 # Wrap old filters not supporting keyword arguments
884 if not inspect.getargspec(fn)[2]:
883 if not inspect.getargspec(fn)[2]:
885 oldfn = fn
884 oldfn = fn
886 fn = lambda s, c, **kwargs: oldfn(s, c)
885 fn = lambda s, c, **kwargs: oldfn(s, c)
887 l.append((mf, fn, params))
886 l.append((mf, fn, params))
888 self.filterpats[filter] = l
887 self.filterpats[filter] = l
889 return self.filterpats[filter]
888 return self.filterpats[filter]
890
889
891 def _filter(self, filterpats, filename, data):
890 def _filter(self, filterpats, filename, data):
892 for mf, fn, cmd in filterpats:
891 for mf, fn, cmd in filterpats:
893 if mf(filename):
892 if mf(filename):
894 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
893 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
895 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
894 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
896 break
895 break
897
896
898 return data
897 return data
899
898
900 @unfilteredpropertycache
899 @unfilteredpropertycache
901 def _encodefilterpats(self):
900 def _encodefilterpats(self):
902 return self._loadfilter('encode')
901 return self._loadfilter('encode')
903
902
904 @unfilteredpropertycache
903 @unfilteredpropertycache
905 def _decodefilterpats(self):
904 def _decodefilterpats(self):
906 return self._loadfilter('decode')
905 return self._loadfilter('decode')
907
906
908 def adddatafilter(self, name, filter):
907 def adddatafilter(self, name, filter):
909 self._datafilters[name] = filter
908 self._datafilters[name] = filter
910
909
911 def wread(self, filename):
910 def wread(self, filename):
912 if self._link(filename):
911 if self._link(filename):
913 data = os.readlink(self.wjoin(filename))
912 data = os.readlink(self.wjoin(filename))
914 else:
913 else:
915 data = self.wopener.read(filename)
914 data = self.wopener.read(filename)
916 return self._filter(self._encodefilterpats, filename, data)
915 return self._filter(self._encodefilterpats, filename, data)
917
916
918 def wwrite(self, filename, data, flags):
917 def wwrite(self, filename, data, flags):
919 data = self._filter(self._decodefilterpats, filename, data)
918 data = self._filter(self._decodefilterpats, filename, data)
920 if 'l' in flags:
919 if 'l' in flags:
921 self.wopener.symlink(data, filename)
920 self.wopener.symlink(data, filename)
922 else:
921 else:
923 self.wopener.write(filename, data)
922 self.wopener.write(filename, data)
924 if 'x' in flags:
923 if 'x' in flags:
925 util.setflags(self.wjoin(filename), False, True)
924 util.setflags(self.wjoin(filename), False, True)
926
925
927 def wwritedata(self, filename, data):
926 def wwritedata(self, filename, data):
928 return self._filter(self._decodefilterpats, filename, data)
927 return self._filter(self._decodefilterpats, filename, data)
929
928
930 def transaction(self, desc):
929 def transaction(self, desc):
931 tr = self._transref and self._transref() or None
930 tr = self._transref and self._transref() or None
932 if tr and tr.running():
931 if tr and tr.running():
933 return tr.nest()
932 return tr.nest()
934
933
935 # abort here if the journal already exists
934 # abort here if the journal already exists
936 if os.path.exists(self.sjoin("journal")):
935 if os.path.exists(self.sjoin("journal")):
937 raise error.RepoError(
936 raise error.RepoError(
938 _("abandoned transaction found - run hg recover"))
937 _("abandoned transaction found - run hg recover"))
939
938
940 self._writejournal(desc)
939 self._writejournal(desc)
941 renames = [(x, undoname(x)) for x in self._journalfiles()]
940 renames = [(x, undoname(x)) for x in self._journalfiles()]
942
941
943 tr = transaction.transaction(self.ui.warn, self.sopener,
942 tr = transaction.transaction(self.ui.warn, self.sopener,
944 self.sjoin("journal"),
943 self.sjoin("journal"),
945 aftertrans(renames),
944 aftertrans(renames),
946 self.store.createmode)
945 self.store.createmode)
947 self._transref = weakref.ref(tr)
946 self._transref = weakref.ref(tr)
948 return tr
947 return tr
949
948
950 def _journalfiles(self):
949 def _journalfiles(self):
951 return (self.sjoin('journal'), self.join('journal.dirstate'),
950 return (self.sjoin('journal'), self.join('journal.dirstate'),
952 self.join('journal.branch'), self.join('journal.desc'),
951 self.join('journal.branch'), self.join('journal.desc'),
953 self.join('journal.bookmarks'),
952 self.join('journal.bookmarks'),
954 self.sjoin('journal.phaseroots'))
953 self.sjoin('journal.phaseroots'))
955
954
956 def undofiles(self):
955 def undofiles(self):
957 return [undoname(x) for x in self._journalfiles()]
956 return [undoname(x) for x in self._journalfiles()]
958
957
959 def _writejournal(self, desc):
958 def _writejournal(self, desc):
960 self.opener.write("journal.dirstate",
959 self.opener.write("journal.dirstate",
961 self.opener.tryread("dirstate"))
960 self.opener.tryread("dirstate"))
962 self.opener.write("journal.branch",
961 self.opener.write("journal.branch",
963 encoding.fromlocal(self.dirstate.branch()))
962 encoding.fromlocal(self.dirstate.branch()))
964 self.opener.write("journal.desc",
963 self.opener.write("journal.desc",
965 "%d\n%s\n" % (len(self), desc))
964 "%d\n%s\n" % (len(self), desc))
966 self.opener.write("journal.bookmarks",
965 self.opener.write("journal.bookmarks",
967 self.opener.tryread("bookmarks"))
966 self.opener.tryread("bookmarks"))
968 self.sopener.write("journal.phaseroots",
967 self.sopener.write("journal.phaseroots",
969 self.sopener.tryread("phaseroots"))
968 self.sopener.tryread("phaseroots"))
970
969
971 def recover(self):
970 def recover(self):
972 lock = self.lock()
971 lock = self.lock()
973 try:
972 try:
974 if os.path.exists(self.sjoin("journal")):
973 if os.path.exists(self.sjoin("journal")):
975 self.ui.status(_("rolling back interrupted transaction\n"))
974 self.ui.status(_("rolling back interrupted transaction\n"))
976 transaction.rollback(self.sopener, self.sjoin("journal"),
975 transaction.rollback(self.sopener, self.sjoin("journal"),
977 self.ui.warn)
976 self.ui.warn)
978 self.invalidate()
977 self.invalidate()
979 return True
978 return True
980 else:
979 else:
981 self.ui.warn(_("no interrupted transaction available\n"))
980 self.ui.warn(_("no interrupted transaction available\n"))
982 return False
981 return False
983 finally:
982 finally:
984 lock.release()
983 lock.release()
985
984
986 def rollback(self, dryrun=False, force=False):
985 def rollback(self, dryrun=False, force=False):
987 wlock = lock = None
986 wlock = lock = None
988 try:
987 try:
989 wlock = self.wlock()
988 wlock = self.wlock()
990 lock = self.lock()
989 lock = self.lock()
991 if os.path.exists(self.sjoin("undo")):
990 if os.path.exists(self.sjoin("undo")):
992 return self._rollback(dryrun, force)
991 return self._rollback(dryrun, force)
993 else:
992 else:
994 self.ui.warn(_("no rollback information available\n"))
993 self.ui.warn(_("no rollback information available\n"))
995 return 1
994 return 1
996 finally:
995 finally:
997 release(lock, wlock)
996 release(lock, wlock)
998
997
999 @unfilteredmethod # Until we get smarter cache management
998 @unfilteredmethod # Until we get smarter cache management
1000 def _rollback(self, dryrun, force):
999 def _rollback(self, dryrun, force):
1001 ui = self.ui
1000 ui = self.ui
1002 try:
1001 try:
1003 args = self.opener.read('undo.desc').splitlines()
1002 args = self.opener.read('undo.desc').splitlines()
1004 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1003 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1005 if len(args) >= 3:
1004 if len(args) >= 3:
1006 detail = args[2]
1005 detail = args[2]
1007 oldtip = oldlen - 1
1006 oldtip = oldlen - 1
1008
1007
1009 if detail and ui.verbose:
1008 if detail and ui.verbose:
1010 msg = (_('repository tip rolled back to revision %s'
1009 msg = (_('repository tip rolled back to revision %s'
1011 ' (undo %s: %s)\n')
1010 ' (undo %s: %s)\n')
1012 % (oldtip, desc, detail))
1011 % (oldtip, desc, detail))
1013 else:
1012 else:
1014 msg = (_('repository tip rolled back to revision %s'
1013 msg = (_('repository tip rolled back to revision %s'
1015 ' (undo %s)\n')
1014 ' (undo %s)\n')
1016 % (oldtip, desc))
1015 % (oldtip, desc))
1017 except IOError:
1016 except IOError:
1018 msg = _('rolling back unknown transaction\n')
1017 msg = _('rolling back unknown transaction\n')
1019 desc = None
1018 desc = None
1020
1019
1021 if not force and self['.'] != self['tip'] and desc == 'commit':
1020 if not force and self['.'] != self['tip'] and desc == 'commit':
1022 raise util.Abort(
1021 raise util.Abort(
1023 _('rollback of last commit while not checked out '
1022 _('rollback of last commit while not checked out '
1024 'may lose data'), hint=_('use -f to force'))
1023 'may lose data'), hint=_('use -f to force'))
1025
1024
1026 ui.status(msg)
1025 ui.status(msg)
1027 if dryrun:
1026 if dryrun:
1028 return 0
1027 return 0
1029
1028
1030 parents = self.dirstate.parents()
1029 parents = self.dirstate.parents()
1031 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1030 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1032 if os.path.exists(self.join('undo.bookmarks')):
1031 if os.path.exists(self.join('undo.bookmarks')):
1033 util.rename(self.join('undo.bookmarks'),
1032 util.rename(self.join('undo.bookmarks'),
1034 self.join('bookmarks'))
1033 self.join('bookmarks'))
1035 if os.path.exists(self.sjoin('undo.phaseroots')):
1034 if os.path.exists(self.sjoin('undo.phaseroots')):
1036 util.rename(self.sjoin('undo.phaseroots'),
1035 util.rename(self.sjoin('undo.phaseroots'),
1037 self.sjoin('phaseroots'))
1036 self.sjoin('phaseroots'))
1038 self.invalidate()
1037 self.invalidate()
1039
1038
1040 # Discard all cache entries to force reloading everything.
1039 # Discard all cache entries to force reloading everything.
1041 self._filecache.clear()
1040 self._filecache.clear()
1042
1041
1043 parentgone = (parents[0] not in self.changelog.nodemap or
1042 parentgone = (parents[0] not in self.changelog.nodemap or
1044 parents[1] not in self.changelog.nodemap)
1043 parents[1] not in self.changelog.nodemap)
1045 if parentgone:
1044 if parentgone:
1046 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1045 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1047 try:
1046 try:
1048 branch = self.opener.read('undo.branch')
1047 branch = self.opener.read('undo.branch')
1049 self.dirstate.setbranch(encoding.tolocal(branch))
1048 self.dirstate.setbranch(encoding.tolocal(branch))
1050 except IOError:
1049 except IOError:
1051 ui.warn(_('named branch could not be reset: '
1050 ui.warn(_('named branch could not be reset: '
1052 'current branch is still \'%s\'\n')
1051 'current branch is still \'%s\'\n')
1053 % self.dirstate.branch())
1052 % self.dirstate.branch())
1054
1053
1055 self.dirstate.invalidate()
1054 self.dirstate.invalidate()
1056 parents = tuple([p.rev() for p in self.parents()])
1055 parents = tuple([p.rev() for p in self.parents()])
1057 if len(parents) > 1:
1056 if len(parents) > 1:
1058 ui.status(_('working directory now based on '
1057 ui.status(_('working directory now based on '
1059 'revisions %d and %d\n') % parents)
1058 'revisions %d and %d\n') % parents)
1060 else:
1059 else:
1061 ui.status(_('working directory now based on '
1060 ui.status(_('working directory now based on '
1062 'revision %d\n') % parents)
1061 'revision %d\n') % parents)
1063 # TODO: if we know which new heads may result from this rollback, pass
1062 # TODO: if we know which new heads may result from this rollback, pass
1064 # them to destroy(), which will prevent the branchhead cache from being
1063 # them to destroy(), which will prevent the branchhead cache from being
1065 # invalidated.
1064 # invalidated.
1066 self.destroyed()
1065 self.destroyed()
1067 return 0
1066 return 0
1068
1067
1069 def invalidatecaches(self):
1068 def invalidatecaches(self):
1070
1069
1071 if '_tagscache' in vars(self):
1070 if '_tagscache' in vars(self):
1072 # can't use delattr on proxy
1071 # can't use delattr on proxy
1073 del self.__dict__['_tagscache']
1072 del self.__dict__['_tagscache']
1074
1073
1075 self.unfiltered()._branchcache = None # in UTF-8
1074 self.unfiltered()._branchcache = None # in UTF-8
1076 self.unfiltered()._branchcachetip = None
1075 self.unfiltered()._branchcachetip = None
1077 self.invalidatevolatilesets()
1076 self.invalidatevolatilesets()
1078
1077
1079 def invalidatevolatilesets(self):
1078 def invalidatevolatilesets(self):
1080 self.filteredrevcache.clear()
1079 self.filteredrevcache.clear()
1081 obsolete.clearobscaches(self)
1080 obsolete.clearobscaches(self)
1082 if 'hiddenrevs' in vars(self):
1081 if 'hiddenrevs' in vars(self):
1083 del self.hiddenrevs
1082 del self.hiddenrevs
1084
1083
1085 def invalidatedirstate(self):
1084 def invalidatedirstate(self):
1086 '''Invalidates the dirstate, causing the next call to dirstate
1085 '''Invalidates the dirstate, causing the next call to dirstate
1087 to check if it was modified since the last time it was read,
1086 to check if it was modified since the last time it was read,
1088 rereading it if it has.
1087 rereading it if it has.
1089
1088
1090 This is different to dirstate.invalidate() that it doesn't always
1089 This is different to dirstate.invalidate() that it doesn't always
1091 rereads the dirstate. Use dirstate.invalidate() if you want to
1090 rereads the dirstate. Use dirstate.invalidate() if you want to
1092 explicitly read the dirstate again (i.e. restoring it to a previous
1091 explicitly read the dirstate again (i.e. restoring it to a previous
1093 known good state).'''
1092 known good state).'''
1094 if hasunfilteredcache(self, 'dirstate'):
1093 if hasunfilteredcache(self, 'dirstate'):
1095 for k in self.dirstate._filecache:
1094 for k in self.dirstate._filecache:
1096 try:
1095 try:
1097 delattr(self.dirstate, k)
1096 delattr(self.dirstate, k)
1098 except AttributeError:
1097 except AttributeError:
1099 pass
1098 pass
1100 delattr(self.unfiltered(), 'dirstate')
1099 delattr(self.unfiltered(), 'dirstate')
1101
1100
1102 def invalidate(self):
1101 def invalidate(self):
1103 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1102 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1104 for k in self._filecache:
1103 for k in self._filecache:
1105 # dirstate is invalidated separately in invalidatedirstate()
1104 # dirstate is invalidated separately in invalidatedirstate()
1106 if k == 'dirstate':
1105 if k == 'dirstate':
1107 continue
1106 continue
1108
1107
1109 try:
1108 try:
1110 delattr(unfiltered, k)
1109 delattr(unfiltered, k)
1111 except AttributeError:
1110 except AttributeError:
1112 pass
1111 pass
1113 self.invalidatecaches()
1112 self.invalidatecaches()
1114
1113
1115 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1114 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1116 try:
1115 try:
1117 l = lock.lock(lockname, 0, releasefn, desc=desc)
1116 l = lock.lock(lockname, 0, releasefn, desc=desc)
1118 except error.LockHeld, inst:
1117 except error.LockHeld, inst:
1119 if not wait:
1118 if not wait:
1120 raise
1119 raise
1121 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1120 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1122 (desc, inst.locker))
1121 (desc, inst.locker))
1123 # default to 600 seconds timeout
1122 # default to 600 seconds timeout
1124 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1123 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1125 releasefn, desc=desc)
1124 releasefn, desc=desc)
1126 if acquirefn:
1125 if acquirefn:
1127 acquirefn()
1126 acquirefn()
1128 return l
1127 return l
1129
1128
1130 def _afterlock(self, callback):
1129 def _afterlock(self, callback):
1131 """add a callback to the current repository lock.
1130 """add a callback to the current repository lock.
1132
1131
1133 The callback will be executed on lock release."""
1132 The callback will be executed on lock release."""
1134 l = self._lockref and self._lockref()
1133 l = self._lockref and self._lockref()
1135 if l:
1134 if l:
1136 l.postrelease.append(callback)
1135 l.postrelease.append(callback)
1137 else:
1136 else:
1138 callback()
1137 callback()
1139
1138
1140 def lock(self, wait=True):
1139 def lock(self, wait=True):
1141 '''Lock the repository store (.hg/store) and return a weak reference
1140 '''Lock the repository store (.hg/store) and return a weak reference
1142 to the lock. Use this before modifying the store (e.g. committing or
1141 to the lock. Use this before modifying the store (e.g. committing or
1143 stripping). If you are opening a transaction, get a lock as well.)'''
1142 stripping). If you are opening a transaction, get a lock as well.)'''
1144 l = self._lockref and self._lockref()
1143 l = self._lockref and self._lockref()
1145 if l is not None and l.held:
1144 if l is not None and l.held:
1146 l.lock()
1145 l.lock()
1147 return l
1146 return l
1148
1147
1149 def unlock():
1148 def unlock():
1150 self.store.write()
1149 self.store.write()
1151 if hasunfilteredcache(self, '_phasecache'):
1150 if hasunfilteredcache(self, '_phasecache'):
1152 self._phasecache.write()
1151 self._phasecache.write()
1153 for k, ce in self._filecache.items():
1152 for k, ce in self._filecache.items():
1154 if k == 'dirstate':
1153 if k == 'dirstate':
1155 continue
1154 continue
1156 ce.refresh()
1155 ce.refresh()
1157
1156
1158 l = self._lock(self.sjoin("lock"), wait, unlock,
1157 l = self._lock(self.sjoin("lock"), wait, unlock,
1159 self.invalidate, _('repository %s') % self.origroot)
1158 self.invalidate, _('repository %s') % self.origroot)
1160 self._lockref = weakref.ref(l)
1159 self._lockref = weakref.ref(l)
1161 return l
1160 return l
1162
1161
1163 def wlock(self, wait=True):
1162 def wlock(self, wait=True):
1164 '''Lock the non-store parts of the repository (everything under
1163 '''Lock the non-store parts of the repository (everything under
1165 .hg except .hg/store) and return a weak reference to the lock.
1164 .hg except .hg/store) and return a weak reference to the lock.
1166 Use this before modifying files in .hg.'''
1165 Use this before modifying files in .hg.'''
1167 l = self._wlockref and self._wlockref()
1166 l = self._wlockref and self._wlockref()
1168 if l is not None and l.held:
1167 if l is not None and l.held:
1169 l.lock()
1168 l.lock()
1170 return l
1169 return l
1171
1170
1172 def unlock():
1171 def unlock():
1173 self.dirstate.write()
1172 self.dirstate.write()
1174 ce = self._filecache.get('dirstate')
1173 ce = self._filecache.get('dirstate')
1175 if ce:
1174 if ce:
1176 ce.refresh()
1175 ce.refresh()
1177
1176
1178 l = self._lock(self.join("wlock"), wait, unlock,
1177 l = self._lock(self.join("wlock"), wait, unlock,
1179 self.invalidatedirstate, _('working directory of %s') %
1178 self.invalidatedirstate, _('working directory of %s') %
1180 self.origroot)
1179 self.origroot)
1181 self._wlockref = weakref.ref(l)
1180 self._wlockref = weakref.ref(l)
1182 return l
1181 return l
1183
1182
1184 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1183 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1185 """
1184 """
1186 commit an individual file as part of a larger transaction
1185 commit an individual file as part of a larger transaction
1187 """
1186 """
1188
1187
1189 fname = fctx.path()
1188 fname = fctx.path()
1190 text = fctx.data()
1189 text = fctx.data()
1191 flog = self.file(fname)
1190 flog = self.file(fname)
1192 fparent1 = manifest1.get(fname, nullid)
1191 fparent1 = manifest1.get(fname, nullid)
1193 fparent2 = fparent2o = manifest2.get(fname, nullid)
1192 fparent2 = fparent2o = manifest2.get(fname, nullid)
1194
1193
1195 meta = {}
1194 meta = {}
1196 copy = fctx.renamed()
1195 copy = fctx.renamed()
1197 if copy and copy[0] != fname:
1196 if copy and copy[0] != fname:
1198 # Mark the new revision of this file as a copy of another
1197 # Mark the new revision of this file as a copy of another
1199 # file. This copy data will effectively act as a parent
1198 # file. This copy data will effectively act as a parent
1200 # of this new revision. If this is a merge, the first
1199 # of this new revision. If this is a merge, the first
1201 # parent will be the nullid (meaning "look up the copy data")
1200 # parent will be the nullid (meaning "look up the copy data")
1202 # and the second one will be the other parent. For example:
1201 # and the second one will be the other parent. For example:
1203 #
1202 #
1204 # 0 --- 1 --- 3 rev1 changes file foo
1203 # 0 --- 1 --- 3 rev1 changes file foo
1205 # \ / rev2 renames foo to bar and changes it
1204 # \ / rev2 renames foo to bar and changes it
1206 # \- 2 -/ rev3 should have bar with all changes and
1205 # \- 2 -/ rev3 should have bar with all changes and
1207 # should record that bar descends from
1206 # should record that bar descends from
1208 # bar in rev2 and foo in rev1
1207 # bar in rev2 and foo in rev1
1209 #
1208 #
1210 # this allows this merge to succeed:
1209 # this allows this merge to succeed:
1211 #
1210 #
1212 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1211 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1213 # \ / merging rev3 and rev4 should use bar@rev2
1212 # \ / merging rev3 and rev4 should use bar@rev2
1214 # \- 2 --- 4 as the merge base
1213 # \- 2 --- 4 as the merge base
1215 #
1214 #
1216
1215
1217 cfname = copy[0]
1216 cfname = copy[0]
1218 crev = manifest1.get(cfname)
1217 crev = manifest1.get(cfname)
1219 newfparent = fparent2
1218 newfparent = fparent2
1220
1219
1221 if manifest2: # branch merge
1220 if manifest2: # branch merge
1222 if fparent2 == nullid or crev is None: # copied on remote side
1221 if fparent2 == nullid or crev is None: # copied on remote side
1223 if cfname in manifest2:
1222 if cfname in manifest2:
1224 crev = manifest2[cfname]
1223 crev = manifest2[cfname]
1225 newfparent = fparent1
1224 newfparent = fparent1
1226
1225
1227 # find source in nearest ancestor if we've lost track
1226 # find source in nearest ancestor if we've lost track
1228 if not crev:
1227 if not crev:
1229 self.ui.debug(" %s: searching for copy revision for %s\n" %
1228 self.ui.debug(" %s: searching for copy revision for %s\n" %
1230 (fname, cfname))
1229 (fname, cfname))
1231 for ancestor in self[None].ancestors():
1230 for ancestor in self[None].ancestors():
1232 if cfname in ancestor:
1231 if cfname in ancestor:
1233 crev = ancestor[cfname].filenode()
1232 crev = ancestor[cfname].filenode()
1234 break
1233 break
1235
1234
1236 if crev:
1235 if crev:
1237 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1236 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1238 meta["copy"] = cfname
1237 meta["copy"] = cfname
1239 meta["copyrev"] = hex(crev)
1238 meta["copyrev"] = hex(crev)
1240 fparent1, fparent2 = nullid, newfparent
1239 fparent1, fparent2 = nullid, newfparent
1241 else:
1240 else:
1242 self.ui.warn(_("warning: can't find ancestor for '%s' "
1241 self.ui.warn(_("warning: can't find ancestor for '%s' "
1243 "copied from '%s'!\n") % (fname, cfname))
1242 "copied from '%s'!\n") % (fname, cfname))
1244
1243
1245 elif fparent2 != nullid:
1244 elif fparent2 != nullid:
1246 # is one parent an ancestor of the other?
1245 # is one parent an ancestor of the other?
1247 fparentancestor = flog.ancestor(fparent1, fparent2)
1246 fparentancestor = flog.ancestor(fparent1, fparent2)
1248 if fparentancestor == fparent1:
1247 if fparentancestor == fparent1:
1249 fparent1, fparent2 = fparent2, nullid
1248 fparent1, fparent2 = fparent2, nullid
1250 elif fparentancestor == fparent2:
1249 elif fparentancestor == fparent2:
1251 fparent2 = nullid
1250 fparent2 = nullid
1252
1251
1253 # is the file changed?
1252 # is the file changed?
1254 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1253 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1255 changelist.append(fname)
1254 changelist.append(fname)
1256 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1255 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1257
1256
1258 # are just the flags changed during merge?
1257 # are just the flags changed during merge?
1259 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1258 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1260 changelist.append(fname)
1259 changelist.append(fname)
1261
1260
1262 return fparent1
1261 return fparent1
1263
1262
1264 @unfilteredmethod
1263 @unfilteredmethod
1265 def commit(self, text="", user=None, date=None, match=None, force=False,
1264 def commit(self, text="", user=None, date=None, match=None, force=False,
1266 editor=False, extra={}):
1265 editor=False, extra={}):
1267 """Add a new revision to current repository.
1266 """Add a new revision to current repository.
1268
1267
1269 Revision information is gathered from the working directory,
1268 Revision information is gathered from the working directory,
1270 match can be used to filter the committed files. If editor is
1269 match can be used to filter the committed files. If editor is
1271 supplied, it is called to get a commit message.
1270 supplied, it is called to get a commit message.
1272 """
1271 """
1273
1272
1274 def fail(f, msg):
1273 def fail(f, msg):
1275 raise util.Abort('%s: %s' % (f, msg))
1274 raise util.Abort('%s: %s' % (f, msg))
1276
1275
1277 if not match:
1276 if not match:
1278 match = matchmod.always(self.root, '')
1277 match = matchmod.always(self.root, '')
1279
1278
1280 if not force:
1279 if not force:
1281 vdirs = []
1280 vdirs = []
1282 match.dir = vdirs.append
1281 match.dir = vdirs.append
1283 match.bad = fail
1282 match.bad = fail
1284
1283
1285 wlock = self.wlock()
1284 wlock = self.wlock()
1286 try:
1285 try:
1287 wctx = self[None]
1286 wctx = self[None]
1288 merge = len(wctx.parents()) > 1
1287 merge = len(wctx.parents()) > 1
1289
1288
1290 if (not force and merge and match and
1289 if (not force and merge and match and
1291 (match.files() or match.anypats())):
1290 (match.files() or match.anypats())):
1292 raise util.Abort(_('cannot partially commit a merge '
1291 raise util.Abort(_('cannot partially commit a merge '
1293 '(do not specify files or patterns)'))
1292 '(do not specify files or patterns)'))
1294
1293
1295 changes = self.status(match=match, clean=force)
1294 changes = self.status(match=match, clean=force)
1296 if force:
1295 if force:
1297 changes[0].extend(changes[6]) # mq may commit unchanged files
1296 changes[0].extend(changes[6]) # mq may commit unchanged files
1298
1297
1299 # check subrepos
1298 # check subrepos
1300 subs = []
1299 subs = []
1301 commitsubs = set()
1300 commitsubs = set()
1302 newstate = wctx.substate.copy()
1301 newstate = wctx.substate.copy()
1303 # only manage subrepos and .hgsubstate if .hgsub is present
1302 # only manage subrepos and .hgsubstate if .hgsub is present
1304 if '.hgsub' in wctx:
1303 if '.hgsub' in wctx:
1305 # we'll decide whether to track this ourselves, thanks
1304 # we'll decide whether to track this ourselves, thanks
1306 if '.hgsubstate' in changes[0]:
1305 if '.hgsubstate' in changes[0]:
1307 changes[0].remove('.hgsubstate')
1306 changes[0].remove('.hgsubstate')
1308 if '.hgsubstate' in changes[2]:
1307 if '.hgsubstate' in changes[2]:
1309 changes[2].remove('.hgsubstate')
1308 changes[2].remove('.hgsubstate')
1310
1309
1311 # compare current state to last committed state
1310 # compare current state to last committed state
1312 # build new substate based on last committed state
1311 # build new substate based on last committed state
1313 oldstate = wctx.p1().substate
1312 oldstate = wctx.p1().substate
1314 for s in sorted(newstate.keys()):
1313 for s in sorted(newstate.keys()):
1315 if not match(s):
1314 if not match(s):
1316 # ignore working copy, use old state if present
1315 # ignore working copy, use old state if present
1317 if s in oldstate:
1316 if s in oldstate:
1318 newstate[s] = oldstate[s]
1317 newstate[s] = oldstate[s]
1319 continue
1318 continue
1320 if not force:
1319 if not force:
1321 raise util.Abort(
1320 raise util.Abort(
1322 _("commit with new subrepo %s excluded") % s)
1321 _("commit with new subrepo %s excluded") % s)
1323 if wctx.sub(s).dirty(True):
1322 if wctx.sub(s).dirty(True):
1324 if not self.ui.configbool('ui', 'commitsubrepos'):
1323 if not self.ui.configbool('ui', 'commitsubrepos'):
1325 raise util.Abort(
1324 raise util.Abort(
1326 _("uncommitted changes in subrepo %s") % s,
1325 _("uncommitted changes in subrepo %s") % s,
1327 hint=_("use --subrepos for recursive commit"))
1326 hint=_("use --subrepos for recursive commit"))
1328 subs.append(s)
1327 subs.append(s)
1329 commitsubs.add(s)
1328 commitsubs.add(s)
1330 else:
1329 else:
1331 bs = wctx.sub(s).basestate()
1330 bs = wctx.sub(s).basestate()
1332 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1331 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1333 if oldstate.get(s, (None, None, None))[1] != bs:
1332 if oldstate.get(s, (None, None, None))[1] != bs:
1334 subs.append(s)
1333 subs.append(s)
1335
1334
1336 # check for removed subrepos
1335 # check for removed subrepos
1337 for p in wctx.parents():
1336 for p in wctx.parents():
1338 r = [s for s in p.substate if s not in newstate]
1337 r = [s for s in p.substate if s not in newstate]
1339 subs += [s for s in r if match(s)]
1338 subs += [s for s in r if match(s)]
1340 if subs:
1339 if subs:
1341 if (not match('.hgsub') and
1340 if (not match('.hgsub') and
1342 '.hgsub' in (wctx.modified() + wctx.added())):
1341 '.hgsub' in (wctx.modified() + wctx.added())):
1343 raise util.Abort(
1342 raise util.Abort(
1344 _("can't commit subrepos without .hgsub"))
1343 _("can't commit subrepos without .hgsub"))
1345 changes[0].insert(0, '.hgsubstate')
1344 changes[0].insert(0, '.hgsubstate')
1346
1345
1347 elif '.hgsub' in changes[2]:
1346 elif '.hgsub' in changes[2]:
1348 # clean up .hgsubstate when .hgsub is removed
1347 # clean up .hgsubstate when .hgsub is removed
1349 if ('.hgsubstate' in wctx and
1348 if ('.hgsubstate' in wctx and
1350 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1349 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1351 changes[2].insert(0, '.hgsubstate')
1350 changes[2].insert(0, '.hgsubstate')
1352
1351
1353 # make sure all explicit patterns are matched
1352 # make sure all explicit patterns are matched
1354 if not force and match.files():
1353 if not force and match.files():
1355 matched = set(changes[0] + changes[1] + changes[2])
1354 matched = set(changes[0] + changes[1] + changes[2])
1356
1355
1357 for f in match.files():
1356 for f in match.files():
1358 f = self.dirstate.normalize(f)
1357 f = self.dirstate.normalize(f)
1359 if f == '.' or f in matched or f in wctx.substate:
1358 if f == '.' or f in matched or f in wctx.substate:
1360 continue
1359 continue
1361 if f in changes[3]: # missing
1360 if f in changes[3]: # missing
1362 fail(f, _('file not found!'))
1361 fail(f, _('file not found!'))
1363 if f in vdirs: # visited directory
1362 if f in vdirs: # visited directory
1364 d = f + '/'
1363 d = f + '/'
1365 for mf in matched:
1364 for mf in matched:
1366 if mf.startswith(d):
1365 if mf.startswith(d):
1367 break
1366 break
1368 else:
1367 else:
1369 fail(f, _("no match under directory!"))
1368 fail(f, _("no match under directory!"))
1370 elif f not in self.dirstate:
1369 elif f not in self.dirstate:
1371 fail(f, _("file not tracked!"))
1370 fail(f, _("file not tracked!"))
1372
1371
1373 if (not force and not extra.get("close") and not merge
1372 if (not force and not extra.get("close") and not merge
1374 and not (changes[0] or changes[1] or changes[2])
1373 and not (changes[0] or changes[1] or changes[2])
1375 and wctx.branch() == wctx.p1().branch()):
1374 and wctx.branch() == wctx.p1().branch()):
1376 return None
1375 return None
1377
1376
1378 if merge and changes[3]:
1377 if merge and changes[3]:
1379 raise util.Abort(_("cannot commit merge with missing files"))
1378 raise util.Abort(_("cannot commit merge with missing files"))
1380
1379
1381 ms = mergemod.mergestate(self)
1380 ms = mergemod.mergestate(self)
1382 for f in changes[0]:
1381 for f in changes[0]:
1383 if f in ms and ms[f] == 'u':
1382 if f in ms and ms[f] == 'u':
1384 raise util.Abort(_("unresolved merge conflicts "
1383 raise util.Abort(_("unresolved merge conflicts "
1385 "(see hg help resolve)"))
1384 "(see hg help resolve)"))
1386
1385
1387 cctx = context.workingctx(self, text, user, date, extra, changes)
1386 cctx = context.workingctx(self, text, user, date, extra, changes)
1388 if editor:
1387 if editor:
1389 cctx._text = editor(self, cctx, subs)
1388 cctx._text = editor(self, cctx, subs)
1390 edited = (text != cctx._text)
1389 edited = (text != cctx._text)
1391
1390
1392 # commit subs and write new state
1391 # commit subs and write new state
1393 if subs:
1392 if subs:
1394 for s in sorted(commitsubs):
1393 for s in sorted(commitsubs):
1395 sub = wctx.sub(s)
1394 sub = wctx.sub(s)
1396 self.ui.status(_('committing subrepository %s\n') %
1395 self.ui.status(_('committing subrepository %s\n') %
1397 subrepo.subrelpath(sub))
1396 subrepo.subrelpath(sub))
1398 sr = sub.commit(cctx._text, user, date)
1397 sr = sub.commit(cctx._text, user, date)
1399 newstate[s] = (newstate[s][0], sr)
1398 newstate[s] = (newstate[s][0], sr)
1400 subrepo.writestate(self, newstate)
1399 subrepo.writestate(self, newstate)
1401
1400
1402 # Save commit message in case this transaction gets rolled back
1401 # Save commit message in case this transaction gets rolled back
1403 # (e.g. by a pretxncommit hook). Leave the content alone on
1402 # (e.g. by a pretxncommit hook). Leave the content alone on
1404 # the assumption that the user will use the same editor again.
1403 # the assumption that the user will use the same editor again.
1405 msgfn = self.savecommitmessage(cctx._text)
1404 msgfn = self.savecommitmessage(cctx._text)
1406
1405
1407 p1, p2 = self.dirstate.parents()
1406 p1, p2 = self.dirstate.parents()
1408 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1407 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1409 try:
1408 try:
1410 self.hook("precommit", throw=True, parent1=hookp1,
1409 self.hook("precommit", throw=True, parent1=hookp1,
1411 parent2=hookp2)
1410 parent2=hookp2)
1412 ret = self.commitctx(cctx, True)
1411 ret = self.commitctx(cctx, True)
1413 except: # re-raises
1412 except: # re-raises
1414 if edited:
1413 if edited:
1415 self.ui.write(
1414 self.ui.write(
1416 _('note: commit message saved in %s\n') % msgfn)
1415 _('note: commit message saved in %s\n') % msgfn)
1417 raise
1416 raise
1418
1417
1419 # update bookmarks, dirstate and mergestate
1418 # update bookmarks, dirstate and mergestate
1420 bookmarks.update(self, [p1, p2], ret)
1419 bookmarks.update(self, [p1, p2], ret)
1421 for f in changes[0] + changes[1]:
1420 for f in changes[0] + changes[1]:
1422 self.dirstate.normal(f)
1421 self.dirstate.normal(f)
1423 for f in changes[2]:
1422 for f in changes[2]:
1424 self.dirstate.drop(f)
1423 self.dirstate.drop(f)
1425 self.dirstate.setparents(ret)
1424 self.dirstate.setparents(ret)
1426 ms.reset()
1425 ms.reset()
1427 finally:
1426 finally:
1428 wlock.release()
1427 wlock.release()
1429
1428
1430 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1429 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1431 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1430 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1432 self._afterlock(commithook)
1431 self._afterlock(commithook)
1433 return ret
1432 return ret
1434
1433
1435 @unfilteredmethod
1434 @unfilteredmethod
1436 def commitctx(self, ctx, error=False):
1435 def commitctx(self, ctx, error=False):
1437 """Add a new revision to current repository.
1436 """Add a new revision to current repository.
1438 Revision information is passed via the context argument.
1437 Revision information is passed via the context argument.
1439 """
1438 """
1440
1439
1441 tr = lock = None
1440 tr = lock = None
1442 removed = list(ctx.removed())
1441 removed = list(ctx.removed())
1443 p1, p2 = ctx.p1(), ctx.p2()
1442 p1, p2 = ctx.p1(), ctx.p2()
1444 user = ctx.user()
1443 user = ctx.user()
1445
1444
1446 lock = self.lock()
1445 lock = self.lock()
1447 try:
1446 try:
1448 tr = self.transaction("commit")
1447 tr = self.transaction("commit")
1449 trp = weakref.proxy(tr)
1448 trp = weakref.proxy(tr)
1450
1449
1451 if ctx.files():
1450 if ctx.files():
1452 m1 = p1.manifest().copy()
1451 m1 = p1.manifest().copy()
1453 m2 = p2.manifest()
1452 m2 = p2.manifest()
1454
1453
1455 # check in files
1454 # check in files
1456 new = {}
1455 new = {}
1457 changed = []
1456 changed = []
1458 linkrev = len(self)
1457 linkrev = len(self)
1459 for f in sorted(ctx.modified() + ctx.added()):
1458 for f in sorted(ctx.modified() + ctx.added()):
1460 self.ui.note(f + "\n")
1459 self.ui.note(f + "\n")
1461 try:
1460 try:
1462 fctx = ctx[f]
1461 fctx = ctx[f]
1463 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1462 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1464 changed)
1463 changed)
1465 m1.set(f, fctx.flags())
1464 m1.set(f, fctx.flags())
1466 except OSError, inst:
1465 except OSError, inst:
1467 self.ui.warn(_("trouble committing %s!\n") % f)
1466 self.ui.warn(_("trouble committing %s!\n") % f)
1468 raise
1467 raise
1469 except IOError, inst:
1468 except IOError, inst:
1470 errcode = getattr(inst, 'errno', errno.ENOENT)
1469 errcode = getattr(inst, 'errno', errno.ENOENT)
1471 if error or errcode and errcode != errno.ENOENT:
1470 if error or errcode and errcode != errno.ENOENT:
1472 self.ui.warn(_("trouble committing %s!\n") % f)
1471 self.ui.warn(_("trouble committing %s!\n") % f)
1473 raise
1472 raise
1474 else:
1473 else:
1475 removed.append(f)
1474 removed.append(f)
1476
1475
1477 # update manifest
1476 # update manifest
1478 m1.update(new)
1477 m1.update(new)
1479 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1478 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1480 drop = [f for f in removed if f in m1]
1479 drop = [f for f in removed if f in m1]
1481 for f in drop:
1480 for f in drop:
1482 del m1[f]
1481 del m1[f]
1483 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1482 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1484 p2.manifestnode(), (new, drop))
1483 p2.manifestnode(), (new, drop))
1485 files = changed + removed
1484 files = changed + removed
1486 else:
1485 else:
1487 mn = p1.manifestnode()
1486 mn = p1.manifestnode()
1488 files = []
1487 files = []
1489
1488
1490 # update changelog
1489 # update changelog
1491 self.changelog.delayupdate()
1490 self.changelog.delayupdate()
1492 n = self.changelog.add(mn, files, ctx.description(),
1491 n = self.changelog.add(mn, files, ctx.description(),
1493 trp, p1.node(), p2.node(),
1492 trp, p1.node(), p2.node(),
1494 user, ctx.date(), ctx.extra().copy())
1493 user, ctx.date(), ctx.extra().copy())
1495 p = lambda: self.changelog.writepending() and self.root or ""
1494 p = lambda: self.changelog.writepending() and self.root or ""
1496 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1495 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1497 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1496 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1498 parent2=xp2, pending=p)
1497 parent2=xp2, pending=p)
1499 self.changelog.finalize(trp)
1498 self.changelog.finalize(trp)
1500 # set the new commit is proper phase
1499 # set the new commit is proper phase
1501 targetphase = phases.newcommitphase(self.ui)
1500 targetphase = phases.newcommitphase(self.ui)
1502 if targetphase:
1501 if targetphase:
1503 # retract boundary do not alter parent changeset.
1502 # retract boundary do not alter parent changeset.
1504 # if a parent have higher the resulting phase will
1503 # if a parent have higher the resulting phase will
1505 # be compliant anyway
1504 # be compliant anyway
1506 #
1505 #
1507 # if minimal phase was 0 we don't need to retract anything
1506 # if minimal phase was 0 we don't need to retract anything
1508 phases.retractboundary(self, targetphase, [n])
1507 phases.retractboundary(self, targetphase, [n])
1509 tr.close()
1508 tr.close()
1510 self.updatebranchcache()
1509 self.updatebranchcache()
1511 return n
1510 return n
1512 finally:
1511 finally:
1513 if tr:
1512 if tr:
1514 tr.release()
1513 tr.release()
1515 lock.release()
1514 lock.release()
1516
1515
1517 @unfilteredmethod
1516 @unfilteredmethod
1518 def destroyed(self, newheadnodes=None):
1517 def destroyed(self, newheadnodes=None):
1519 '''Inform the repository that nodes have been destroyed.
1518 '''Inform the repository that nodes have been destroyed.
1520 Intended for use by strip and rollback, so there's a common
1519 Intended for use by strip and rollback, so there's a common
1521 place for anything that has to be done after destroying history.
1520 place for anything that has to be done after destroying history.
1522
1521
1523 If you know the branchheadcache was uptodate before nodes were removed
1522 If you know the branchheadcache was uptodate before nodes were removed
1524 and you also know the set of candidate new heads that may have resulted
1523 and you also know the set of candidate new heads that may have resulted
1525 from the destruction, you can set newheadnodes. This will enable the
1524 from the destruction, you can set newheadnodes. This will enable the
1526 code to update the branchheads cache, rather than having future code
1525 code to update the branchheads cache, rather than having future code
1527 decide it's invalid and regenerating it from scratch.
1526 decide it's invalid and regenerating it from scratch.
1528 '''
1527 '''
1529 # If we have info, newheadnodes, on how to update the branch cache, do
1528 # If we have info, newheadnodes, on how to update the branch cache, do
1530 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1529 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1531 # will be caught the next time it is read.
1530 # will be caught the next time it is read.
1532 if newheadnodes:
1531 if newheadnodes:
1533 tiprev = len(self) - 1
1532 tiprev = len(self) - 1
1534 ctxgen = (self[node] for node in newheadnodes
1533 ctxgen = (self[node] for node in newheadnodes
1535 if self.changelog.hasnode(node))
1534 if self.changelog.hasnode(node))
1536 self._updatebranchcache(self._branchcache, ctxgen)
1535 self._updatebranchcache(self._branchcache, ctxgen)
1537 branchmap.write(self, self._branchcache, self.changelog.tip(),
1536 branchmap.write(self, self._branchcache, self.changelog.tip(),
1538 tiprev)
1537 tiprev)
1539
1538
1540 # Ensure the persistent tag cache is updated. Doing it now
1539 # Ensure the persistent tag cache is updated. Doing it now
1541 # means that the tag cache only has to worry about destroyed
1540 # means that the tag cache only has to worry about destroyed
1542 # heads immediately after a strip/rollback. That in turn
1541 # heads immediately after a strip/rollback. That in turn
1543 # guarantees that "cachetip == currenttip" (comparing both rev
1542 # guarantees that "cachetip == currenttip" (comparing both rev
1544 # and node) always means no nodes have been added or destroyed.
1543 # and node) always means no nodes have been added or destroyed.
1545
1544
1546 # XXX this is suboptimal when qrefresh'ing: we strip the current
1545 # XXX this is suboptimal when qrefresh'ing: we strip the current
1547 # head, refresh the tag cache, then immediately add a new head.
1546 # head, refresh the tag cache, then immediately add a new head.
1548 # But I think doing it this way is necessary for the "instant
1547 # But I think doing it this way is necessary for the "instant
1549 # tag cache retrieval" case to work.
1548 # tag cache retrieval" case to work.
1550 self.invalidatecaches()
1549 self.invalidatecaches()
1551
1550
1552 # Discard all cache entries to force reloading everything.
1551 # Discard all cache entries to force reloading everything.
1553 self._filecache.clear()
1552 self._filecache.clear()
1554
1553
1555 def walk(self, match, node=None):
1554 def walk(self, match, node=None):
1556 '''
1555 '''
1557 walk recursively through the directory tree or a given
1556 walk recursively through the directory tree or a given
1558 changeset, finding all files matched by the match
1557 changeset, finding all files matched by the match
1559 function
1558 function
1560 '''
1559 '''
1561 return self[node].walk(match)
1560 return self[node].walk(match)
1562
1561
1563 def status(self, node1='.', node2=None, match=None,
1562 def status(self, node1='.', node2=None, match=None,
1564 ignored=False, clean=False, unknown=False,
1563 ignored=False, clean=False, unknown=False,
1565 listsubrepos=False):
1564 listsubrepos=False):
1566 """return status of files between two nodes or node and working
1565 """return status of files between two nodes or node and working
1567 directory.
1566 directory.
1568
1567
1569 If node1 is None, use the first dirstate parent instead.
1568 If node1 is None, use the first dirstate parent instead.
1570 If node2 is None, compare node1 with working directory.
1569 If node2 is None, compare node1 with working directory.
1571 """
1570 """
1572
1571
1573 def mfmatches(ctx):
1572 def mfmatches(ctx):
1574 mf = ctx.manifest().copy()
1573 mf = ctx.manifest().copy()
1575 if match.always():
1574 if match.always():
1576 return mf
1575 return mf
1577 for fn in mf.keys():
1576 for fn in mf.keys():
1578 if not match(fn):
1577 if not match(fn):
1579 del mf[fn]
1578 del mf[fn]
1580 return mf
1579 return mf
1581
1580
1582 if isinstance(node1, context.changectx):
1581 if isinstance(node1, context.changectx):
1583 ctx1 = node1
1582 ctx1 = node1
1584 else:
1583 else:
1585 ctx1 = self[node1]
1584 ctx1 = self[node1]
1586 if isinstance(node2, context.changectx):
1585 if isinstance(node2, context.changectx):
1587 ctx2 = node2
1586 ctx2 = node2
1588 else:
1587 else:
1589 ctx2 = self[node2]
1588 ctx2 = self[node2]
1590
1589
1591 working = ctx2.rev() is None
1590 working = ctx2.rev() is None
1592 parentworking = working and ctx1 == self['.']
1591 parentworking = working and ctx1 == self['.']
1593 match = match or matchmod.always(self.root, self.getcwd())
1592 match = match or matchmod.always(self.root, self.getcwd())
1594 listignored, listclean, listunknown = ignored, clean, unknown
1593 listignored, listclean, listunknown = ignored, clean, unknown
1595
1594
1596 # load earliest manifest first for caching reasons
1595 # load earliest manifest first for caching reasons
1597 if not working and ctx2.rev() < ctx1.rev():
1596 if not working and ctx2.rev() < ctx1.rev():
1598 ctx2.manifest()
1597 ctx2.manifest()
1599
1598
1600 if not parentworking:
1599 if not parentworking:
1601 def bad(f, msg):
1600 def bad(f, msg):
1602 # 'f' may be a directory pattern from 'match.files()',
1601 # 'f' may be a directory pattern from 'match.files()',
1603 # so 'f not in ctx1' is not enough
1602 # so 'f not in ctx1' is not enough
1604 if f not in ctx1 and f not in ctx1.dirs():
1603 if f not in ctx1 and f not in ctx1.dirs():
1605 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1604 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1606 match.bad = bad
1605 match.bad = bad
1607
1606
1608 if working: # we need to scan the working dir
1607 if working: # we need to scan the working dir
1609 subrepos = []
1608 subrepos = []
1610 if '.hgsub' in self.dirstate:
1609 if '.hgsub' in self.dirstate:
1611 subrepos = ctx2.substate.keys()
1610 subrepos = ctx2.substate.keys()
1612 s = self.dirstate.status(match, subrepos, listignored,
1611 s = self.dirstate.status(match, subrepos, listignored,
1613 listclean, listunknown)
1612 listclean, listunknown)
1614 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1613 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1615
1614
1616 # check for any possibly clean files
1615 # check for any possibly clean files
1617 if parentworking and cmp:
1616 if parentworking and cmp:
1618 fixup = []
1617 fixup = []
1619 # do a full compare of any files that might have changed
1618 # do a full compare of any files that might have changed
1620 for f in sorted(cmp):
1619 for f in sorted(cmp):
1621 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1620 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1622 or ctx1[f].cmp(ctx2[f])):
1621 or ctx1[f].cmp(ctx2[f])):
1623 modified.append(f)
1622 modified.append(f)
1624 else:
1623 else:
1625 fixup.append(f)
1624 fixup.append(f)
1626
1625
1627 # update dirstate for files that are actually clean
1626 # update dirstate for files that are actually clean
1628 if fixup:
1627 if fixup:
1629 if listclean:
1628 if listclean:
1630 clean += fixup
1629 clean += fixup
1631
1630
1632 try:
1631 try:
1633 # updating the dirstate is optional
1632 # updating the dirstate is optional
1634 # so we don't wait on the lock
1633 # so we don't wait on the lock
1635 wlock = self.wlock(False)
1634 wlock = self.wlock(False)
1636 try:
1635 try:
1637 for f in fixup:
1636 for f in fixup:
1638 self.dirstate.normal(f)
1637 self.dirstate.normal(f)
1639 finally:
1638 finally:
1640 wlock.release()
1639 wlock.release()
1641 except error.LockError:
1640 except error.LockError:
1642 pass
1641 pass
1643
1642
1644 if not parentworking:
1643 if not parentworking:
1645 mf1 = mfmatches(ctx1)
1644 mf1 = mfmatches(ctx1)
1646 if working:
1645 if working:
1647 # we are comparing working dir against non-parent
1646 # we are comparing working dir against non-parent
1648 # generate a pseudo-manifest for the working dir
1647 # generate a pseudo-manifest for the working dir
1649 mf2 = mfmatches(self['.'])
1648 mf2 = mfmatches(self['.'])
1650 for f in cmp + modified + added:
1649 for f in cmp + modified + added:
1651 mf2[f] = None
1650 mf2[f] = None
1652 mf2.set(f, ctx2.flags(f))
1651 mf2.set(f, ctx2.flags(f))
1653 for f in removed:
1652 for f in removed:
1654 if f in mf2:
1653 if f in mf2:
1655 del mf2[f]
1654 del mf2[f]
1656 else:
1655 else:
1657 # we are comparing two revisions
1656 # we are comparing two revisions
1658 deleted, unknown, ignored = [], [], []
1657 deleted, unknown, ignored = [], [], []
1659 mf2 = mfmatches(ctx2)
1658 mf2 = mfmatches(ctx2)
1660
1659
1661 modified, added, clean = [], [], []
1660 modified, added, clean = [], [], []
1662 withflags = mf1.withflags() | mf2.withflags()
1661 withflags = mf1.withflags() | mf2.withflags()
1663 for fn in mf2:
1662 for fn in mf2:
1664 if fn in mf1:
1663 if fn in mf1:
1665 if (fn not in deleted and
1664 if (fn not in deleted and
1666 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1665 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1667 (mf1[fn] != mf2[fn] and
1666 (mf1[fn] != mf2[fn] and
1668 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1667 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1669 modified.append(fn)
1668 modified.append(fn)
1670 elif listclean:
1669 elif listclean:
1671 clean.append(fn)
1670 clean.append(fn)
1672 del mf1[fn]
1671 del mf1[fn]
1673 elif fn not in deleted:
1672 elif fn not in deleted:
1674 added.append(fn)
1673 added.append(fn)
1675 removed = mf1.keys()
1674 removed = mf1.keys()
1676
1675
1677 if working and modified and not self.dirstate._checklink:
1676 if working and modified and not self.dirstate._checklink:
1678 # Symlink placeholders may get non-symlink-like contents
1677 # Symlink placeholders may get non-symlink-like contents
1679 # via user error or dereferencing by NFS or Samba servers,
1678 # via user error or dereferencing by NFS or Samba servers,
1680 # so we filter out any placeholders that don't look like a
1679 # so we filter out any placeholders that don't look like a
1681 # symlink
1680 # symlink
1682 sane = []
1681 sane = []
1683 for f in modified:
1682 for f in modified:
1684 if ctx2.flags(f) == 'l':
1683 if ctx2.flags(f) == 'l':
1685 d = ctx2[f].data()
1684 d = ctx2[f].data()
1686 if len(d) >= 1024 or '\n' in d or util.binary(d):
1685 if len(d) >= 1024 or '\n' in d or util.binary(d):
1687 self.ui.debug('ignoring suspect symlink placeholder'
1686 self.ui.debug('ignoring suspect symlink placeholder'
1688 ' "%s"\n' % f)
1687 ' "%s"\n' % f)
1689 continue
1688 continue
1690 sane.append(f)
1689 sane.append(f)
1691 modified = sane
1690 modified = sane
1692
1691
1693 r = modified, added, removed, deleted, unknown, ignored, clean
1692 r = modified, added, removed, deleted, unknown, ignored, clean
1694
1693
1695 if listsubrepos:
1694 if listsubrepos:
1696 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1695 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1697 if working:
1696 if working:
1698 rev2 = None
1697 rev2 = None
1699 else:
1698 else:
1700 rev2 = ctx2.substate[subpath][1]
1699 rev2 = ctx2.substate[subpath][1]
1701 try:
1700 try:
1702 submatch = matchmod.narrowmatcher(subpath, match)
1701 submatch = matchmod.narrowmatcher(subpath, match)
1703 s = sub.status(rev2, match=submatch, ignored=listignored,
1702 s = sub.status(rev2, match=submatch, ignored=listignored,
1704 clean=listclean, unknown=listunknown,
1703 clean=listclean, unknown=listunknown,
1705 listsubrepos=True)
1704 listsubrepos=True)
1706 for rfiles, sfiles in zip(r, s):
1705 for rfiles, sfiles in zip(r, s):
1707 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1706 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1708 except error.LookupError:
1707 except error.LookupError:
1709 self.ui.status(_("skipping missing subrepository: %s\n")
1708 self.ui.status(_("skipping missing subrepository: %s\n")
1710 % subpath)
1709 % subpath)
1711
1710
1712 for l in r:
1711 for l in r:
1713 l.sort()
1712 l.sort()
1714 return r
1713 return r
1715
1714
1716 def heads(self, start=None):
1715 def heads(self, start=None):
1717 heads = self.changelog.heads(start)
1716 heads = self.changelog.heads(start)
1718 # sort the output in rev descending order
1717 # sort the output in rev descending order
1719 return sorted(heads, key=self.changelog.rev, reverse=True)
1718 return sorted(heads, key=self.changelog.rev, reverse=True)
1720
1719
1721 def branchheads(self, branch=None, start=None, closed=False):
1720 def branchheads(self, branch=None, start=None, closed=False):
1722 '''return a (possibly filtered) list of heads for the given branch
1721 '''return a (possibly filtered) list of heads for the given branch
1723
1722
1724 Heads are returned in topological order, from newest to oldest.
1723 Heads are returned in topological order, from newest to oldest.
1725 If branch is None, use the dirstate branch.
1724 If branch is None, use the dirstate branch.
1726 If start is not None, return only heads reachable from start.
1725 If start is not None, return only heads reachable from start.
1727 If closed is True, return heads that are marked as closed as well.
1726 If closed is True, return heads that are marked as closed as well.
1728 '''
1727 '''
1729 if branch is None:
1728 if branch is None:
1730 branch = self[None].branch()
1729 branch = self[None].branch()
1731 branches = self.branchmap()
1730 branches = self.branchmap()
1732 if branch not in branches:
1731 if branch not in branches:
1733 return []
1732 return []
1734 # the cache returns heads ordered lowest to highest
1733 # the cache returns heads ordered lowest to highest
1735 bheads = list(reversed(branches[branch]))
1734 bheads = list(reversed(branches[branch]))
1736 if start is not None:
1735 if start is not None:
1737 # filter out the heads that cannot be reached from startrev
1736 # filter out the heads that cannot be reached from startrev
1738 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1737 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1739 bheads = [h for h in bheads if h in fbheads]
1738 bheads = [h for h in bheads if h in fbheads]
1740 if not closed:
1739 if not closed:
1741 bheads = [h for h in bheads if not self[h].closesbranch()]
1740 bheads = [h for h in bheads if not self[h].closesbranch()]
1742 return bheads
1741 return bheads
1743
1742
1744 def branches(self, nodes):
1743 def branches(self, nodes):
1745 if not nodes:
1744 if not nodes:
1746 nodes = [self.changelog.tip()]
1745 nodes = [self.changelog.tip()]
1747 b = []
1746 b = []
1748 for n in nodes:
1747 for n in nodes:
1749 t = n
1748 t = n
1750 while True:
1749 while True:
1751 p = self.changelog.parents(n)
1750 p = self.changelog.parents(n)
1752 if p[1] != nullid or p[0] == nullid:
1751 if p[1] != nullid or p[0] == nullid:
1753 b.append((t, n, p[0], p[1]))
1752 b.append((t, n, p[0], p[1]))
1754 break
1753 break
1755 n = p[0]
1754 n = p[0]
1756 return b
1755 return b
1757
1756
1758 def between(self, pairs):
1757 def between(self, pairs):
1759 r = []
1758 r = []
1760
1759
1761 for top, bottom in pairs:
1760 for top, bottom in pairs:
1762 n, l, i = top, [], 0
1761 n, l, i = top, [], 0
1763 f = 1
1762 f = 1
1764
1763
1765 while n != bottom and n != nullid:
1764 while n != bottom and n != nullid:
1766 p = self.changelog.parents(n)[0]
1765 p = self.changelog.parents(n)[0]
1767 if i == f:
1766 if i == f:
1768 l.append(n)
1767 l.append(n)
1769 f = f * 2
1768 f = f * 2
1770 n = p
1769 n = p
1771 i += 1
1770 i += 1
1772
1771
1773 r.append(l)
1772 r.append(l)
1774
1773
1775 return r
1774 return r
1776
1775
1777 def pull(self, remote, heads=None, force=False):
1776 def pull(self, remote, heads=None, force=False):
1778 # don't open transaction for nothing or you break future useful
1777 # don't open transaction for nothing or you break future useful
1779 # rollback call
1778 # rollback call
1780 tr = None
1779 tr = None
1781 trname = 'pull\n' + util.hidepassword(remote.url())
1780 trname = 'pull\n' + util.hidepassword(remote.url())
1782 lock = self.lock()
1781 lock = self.lock()
1783 try:
1782 try:
1784 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1783 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1785 force=force)
1784 force=force)
1786 common, fetch, rheads = tmp
1785 common, fetch, rheads = tmp
1787 if not fetch:
1786 if not fetch:
1788 self.ui.status(_("no changes found\n"))
1787 self.ui.status(_("no changes found\n"))
1789 added = []
1788 added = []
1790 result = 0
1789 result = 0
1791 else:
1790 else:
1792 tr = self.transaction(trname)
1791 tr = self.transaction(trname)
1793 if heads is None and list(common) == [nullid]:
1792 if heads is None and list(common) == [nullid]:
1794 self.ui.status(_("requesting all changes\n"))
1793 self.ui.status(_("requesting all changes\n"))
1795 elif heads is None and remote.capable('changegroupsubset'):
1794 elif heads is None and remote.capable('changegroupsubset'):
1796 # issue1320, avoid a race if remote changed after discovery
1795 # issue1320, avoid a race if remote changed after discovery
1797 heads = rheads
1796 heads = rheads
1798
1797
1799 if remote.capable('getbundle'):
1798 if remote.capable('getbundle'):
1800 cg = remote.getbundle('pull', common=common,
1799 cg = remote.getbundle('pull', common=common,
1801 heads=heads or rheads)
1800 heads=heads or rheads)
1802 elif heads is None:
1801 elif heads is None:
1803 cg = remote.changegroup(fetch, 'pull')
1802 cg = remote.changegroup(fetch, 'pull')
1804 elif not remote.capable('changegroupsubset'):
1803 elif not remote.capable('changegroupsubset'):
1805 raise util.Abort(_("partial pull cannot be done because "
1804 raise util.Abort(_("partial pull cannot be done because "
1806 "other repository doesn't support "
1805 "other repository doesn't support "
1807 "changegroupsubset."))
1806 "changegroupsubset."))
1808 else:
1807 else:
1809 cg = remote.changegroupsubset(fetch, heads, 'pull')
1808 cg = remote.changegroupsubset(fetch, heads, 'pull')
1810 clstart = len(self.changelog)
1809 clstart = len(self.changelog)
1811 result = self.addchangegroup(cg, 'pull', remote.url())
1810 result = self.addchangegroup(cg, 'pull', remote.url())
1812 clend = len(self.changelog)
1811 clend = len(self.changelog)
1813 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1812 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1814
1813
1815 # compute target subset
1814 # compute target subset
1816 if heads is None:
1815 if heads is None:
1817 # We pulled every thing possible
1816 # We pulled every thing possible
1818 # sync on everything common
1817 # sync on everything common
1819 subset = common + added
1818 subset = common + added
1820 else:
1819 else:
1821 # We pulled a specific subset
1820 # We pulled a specific subset
1822 # sync on this subset
1821 # sync on this subset
1823 subset = heads
1822 subset = heads
1824
1823
1825 # Get remote phases data from remote
1824 # Get remote phases data from remote
1826 remotephases = remote.listkeys('phases')
1825 remotephases = remote.listkeys('phases')
1827 publishing = bool(remotephases.get('publishing', False))
1826 publishing = bool(remotephases.get('publishing', False))
1828 if remotephases and not publishing:
1827 if remotephases and not publishing:
1829 # remote is new and unpublishing
1828 # remote is new and unpublishing
1830 pheads, _dr = phases.analyzeremotephases(self, subset,
1829 pheads, _dr = phases.analyzeremotephases(self, subset,
1831 remotephases)
1830 remotephases)
1832 phases.advanceboundary(self, phases.public, pheads)
1831 phases.advanceboundary(self, phases.public, pheads)
1833 phases.advanceboundary(self, phases.draft, subset)
1832 phases.advanceboundary(self, phases.draft, subset)
1834 else:
1833 else:
1835 # Remote is old or publishing all common changesets
1834 # Remote is old or publishing all common changesets
1836 # should be seen as public
1835 # should be seen as public
1837 phases.advanceboundary(self, phases.public, subset)
1836 phases.advanceboundary(self, phases.public, subset)
1838
1837
1839 if obsolete._enabled:
1838 if obsolete._enabled:
1840 self.ui.debug('fetching remote obsolete markers\n')
1839 self.ui.debug('fetching remote obsolete markers\n')
1841 remoteobs = remote.listkeys('obsolete')
1840 remoteobs = remote.listkeys('obsolete')
1842 if 'dump0' in remoteobs:
1841 if 'dump0' in remoteobs:
1843 if tr is None:
1842 if tr is None:
1844 tr = self.transaction(trname)
1843 tr = self.transaction(trname)
1845 for key in sorted(remoteobs, reverse=True):
1844 for key in sorted(remoteobs, reverse=True):
1846 if key.startswith('dump'):
1845 if key.startswith('dump'):
1847 data = base85.b85decode(remoteobs[key])
1846 data = base85.b85decode(remoteobs[key])
1848 self.obsstore.mergemarkers(tr, data)
1847 self.obsstore.mergemarkers(tr, data)
1849 self.invalidatevolatilesets()
1848 self.invalidatevolatilesets()
1850 if tr is not None:
1849 if tr is not None:
1851 tr.close()
1850 tr.close()
1852 finally:
1851 finally:
1853 if tr is not None:
1852 if tr is not None:
1854 tr.release()
1853 tr.release()
1855 lock.release()
1854 lock.release()
1856
1855
1857 return result
1856 return result
1858
1857
1859 def checkpush(self, force, revs):
1858 def checkpush(self, force, revs):
1860 """Extensions can override this function if additional checks have
1859 """Extensions can override this function if additional checks have
1861 to be performed before pushing, or call it if they override push
1860 to be performed before pushing, or call it if they override push
1862 command.
1861 command.
1863 """
1862 """
1864 pass
1863 pass
1865
1864
1866 def push(self, remote, force=False, revs=None, newbranch=False):
1865 def push(self, remote, force=False, revs=None, newbranch=False):
1867 '''Push outgoing changesets (limited by revs) from the current
1866 '''Push outgoing changesets (limited by revs) from the current
1868 repository to remote. Return an integer:
1867 repository to remote. Return an integer:
1869 - None means nothing to push
1868 - None means nothing to push
1870 - 0 means HTTP error
1869 - 0 means HTTP error
1871 - 1 means we pushed and remote head count is unchanged *or*
1870 - 1 means we pushed and remote head count is unchanged *or*
1872 we have outgoing changesets but refused to push
1871 we have outgoing changesets but refused to push
1873 - other values as described by addchangegroup()
1872 - other values as described by addchangegroup()
1874 '''
1873 '''
1875 # there are two ways to push to remote repo:
1874 # there are two ways to push to remote repo:
1876 #
1875 #
1877 # addchangegroup assumes local user can lock remote
1876 # addchangegroup assumes local user can lock remote
1878 # repo (local filesystem, old ssh servers).
1877 # repo (local filesystem, old ssh servers).
1879 #
1878 #
1880 # unbundle assumes local user cannot lock remote repo (new ssh
1879 # unbundle assumes local user cannot lock remote repo (new ssh
1881 # servers, http servers).
1880 # servers, http servers).
1882
1881
1883 if not remote.canpush():
1882 if not remote.canpush():
1884 raise util.Abort(_("destination does not support push"))
1883 raise util.Abort(_("destination does not support push"))
1885 unfi = self.unfiltered()
1884 unfi = self.unfiltered()
1886 # get local lock as we might write phase data
1885 # get local lock as we might write phase data
1887 locallock = self.lock()
1886 locallock = self.lock()
1888 try:
1887 try:
1889 self.checkpush(force, revs)
1888 self.checkpush(force, revs)
1890 lock = None
1889 lock = None
1891 unbundle = remote.capable('unbundle')
1890 unbundle = remote.capable('unbundle')
1892 if not unbundle:
1891 if not unbundle:
1893 lock = remote.lock()
1892 lock = remote.lock()
1894 try:
1893 try:
1895 # discovery
1894 # discovery
1896 fci = discovery.findcommonincoming
1895 fci = discovery.findcommonincoming
1897 commoninc = fci(unfi, remote, force=force)
1896 commoninc = fci(unfi, remote, force=force)
1898 common, inc, remoteheads = commoninc
1897 common, inc, remoteheads = commoninc
1899 fco = discovery.findcommonoutgoing
1898 fco = discovery.findcommonoutgoing
1900 outgoing = fco(unfi, remote, onlyheads=revs,
1899 outgoing = fco(unfi, remote, onlyheads=revs,
1901 commoninc=commoninc, force=force)
1900 commoninc=commoninc, force=force)
1902
1901
1903
1902
1904 if not outgoing.missing:
1903 if not outgoing.missing:
1905 # nothing to push
1904 # nothing to push
1906 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1905 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1907 ret = None
1906 ret = None
1908 else:
1907 else:
1909 # something to push
1908 # something to push
1910 if not force:
1909 if not force:
1911 # if self.obsstore == False --> no obsolete
1910 # if self.obsstore == False --> no obsolete
1912 # then, save the iteration
1911 # then, save the iteration
1913 if unfi.obsstore:
1912 if unfi.obsstore:
1914 # this message are here for 80 char limit reason
1913 # this message are here for 80 char limit reason
1915 mso = _("push includes obsolete changeset: %s!")
1914 mso = _("push includes obsolete changeset: %s!")
1916 msu = _("push includes unstable changeset: %s!")
1915 msu = _("push includes unstable changeset: %s!")
1917 msb = _("push includes bumped changeset: %s!")
1916 msb = _("push includes bumped changeset: %s!")
1918 msd = _("push includes divergent changeset: %s!")
1917 msd = _("push includes divergent changeset: %s!")
1919 # If we are to push if there is at least one
1918 # If we are to push if there is at least one
1920 # obsolete or unstable changeset in missing, at
1919 # obsolete or unstable changeset in missing, at
1921 # least one of the missinghead will be obsolete or
1920 # least one of the missinghead will be obsolete or
1922 # unstable. So checking heads only is ok
1921 # unstable. So checking heads only is ok
1923 for node in outgoing.missingheads:
1922 for node in outgoing.missingheads:
1924 ctx = unfi[node]
1923 ctx = unfi[node]
1925 if ctx.obsolete():
1924 if ctx.obsolete():
1926 raise util.Abort(mso % ctx)
1925 raise util.Abort(mso % ctx)
1927 elif ctx.unstable():
1926 elif ctx.unstable():
1928 raise util.Abort(msu % ctx)
1927 raise util.Abort(msu % ctx)
1929 elif ctx.bumped():
1928 elif ctx.bumped():
1930 raise util.Abort(msb % ctx)
1929 raise util.Abort(msb % ctx)
1931 elif ctx.divergent():
1930 elif ctx.divergent():
1932 raise util.Abort(msd % ctx)
1931 raise util.Abort(msd % ctx)
1933 discovery.checkheads(unfi, remote, outgoing,
1932 discovery.checkheads(unfi, remote, outgoing,
1934 remoteheads, newbranch,
1933 remoteheads, newbranch,
1935 bool(inc))
1934 bool(inc))
1936
1935
1937 # create a changegroup from local
1936 # create a changegroup from local
1938 if revs is None and not outgoing.excluded:
1937 if revs is None and not outgoing.excluded:
1939 # push everything,
1938 # push everything,
1940 # use the fast path, no race possible on push
1939 # use the fast path, no race possible on push
1941 cg = self._changegroup(outgoing.missing, 'push')
1940 cg = self._changegroup(outgoing.missing, 'push')
1942 else:
1941 else:
1943 cg = self.getlocalbundle('push', outgoing)
1942 cg = self.getlocalbundle('push', outgoing)
1944
1943
1945 # apply changegroup to remote
1944 # apply changegroup to remote
1946 if unbundle:
1945 if unbundle:
1947 # local repo finds heads on server, finds out what
1946 # local repo finds heads on server, finds out what
1948 # revs it must push. once revs transferred, if server
1947 # revs it must push. once revs transferred, if server
1949 # finds it has different heads (someone else won
1948 # finds it has different heads (someone else won
1950 # commit/push race), server aborts.
1949 # commit/push race), server aborts.
1951 if force:
1950 if force:
1952 remoteheads = ['force']
1951 remoteheads = ['force']
1953 # ssh: return remote's addchangegroup()
1952 # ssh: return remote's addchangegroup()
1954 # http: return remote's addchangegroup() or 0 for error
1953 # http: return remote's addchangegroup() or 0 for error
1955 ret = remote.unbundle(cg, remoteheads, 'push')
1954 ret = remote.unbundle(cg, remoteheads, 'push')
1956 else:
1955 else:
1957 # we return an integer indicating remote head count
1956 # we return an integer indicating remote head count
1958 # change
1957 # change
1959 ret = remote.addchangegroup(cg, 'push', self.url())
1958 ret = remote.addchangegroup(cg, 'push', self.url())
1960
1959
1961 if ret:
1960 if ret:
1962 # push succeed, synchronize target of the push
1961 # push succeed, synchronize target of the push
1963 cheads = outgoing.missingheads
1962 cheads = outgoing.missingheads
1964 elif revs is None:
1963 elif revs is None:
1965 # All out push fails. synchronize all common
1964 # All out push fails. synchronize all common
1966 cheads = outgoing.commonheads
1965 cheads = outgoing.commonheads
1967 else:
1966 else:
1968 # I want cheads = heads(::missingheads and ::commonheads)
1967 # I want cheads = heads(::missingheads and ::commonheads)
1969 # (missingheads is revs with secret changeset filtered out)
1968 # (missingheads is revs with secret changeset filtered out)
1970 #
1969 #
1971 # This can be expressed as:
1970 # This can be expressed as:
1972 # cheads = ( (missingheads and ::commonheads)
1971 # cheads = ( (missingheads and ::commonheads)
1973 # + (commonheads and ::missingheads))"
1972 # + (commonheads and ::missingheads))"
1974 # )
1973 # )
1975 #
1974 #
1976 # while trying to push we already computed the following:
1975 # while trying to push we already computed the following:
1977 # common = (::commonheads)
1976 # common = (::commonheads)
1978 # missing = ((commonheads::missingheads) - commonheads)
1977 # missing = ((commonheads::missingheads) - commonheads)
1979 #
1978 #
1980 # We can pick:
1979 # We can pick:
1981 # * missingheads part of common (::commonheads)
1980 # * missingheads part of common (::commonheads)
1982 common = set(outgoing.common)
1981 common = set(outgoing.common)
1983 cheads = [node for node in revs if node in common]
1982 cheads = [node for node in revs if node in common]
1984 # and
1983 # and
1985 # * commonheads parents on missing
1984 # * commonheads parents on missing
1986 revset = unfi.set('%ln and parents(roots(%ln))',
1985 revset = unfi.set('%ln and parents(roots(%ln))',
1987 outgoing.commonheads,
1986 outgoing.commonheads,
1988 outgoing.missing)
1987 outgoing.missing)
1989 cheads.extend(c.node() for c in revset)
1988 cheads.extend(c.node() for c in revset)
1990 # even when we don't push, exchanging phase data is useful
1989 # even when we don't push, exchanging phase data is useful
1991 remotephases = remote.listkeys('phases')
1990 remotephases = remote.listkeys('phases')
1992 if not remotephases: # old server or public only repo
1991 if not remotephases: # old server or public only repo
1993 phases.advanceboundary(self, phases.public, cheads)
1992 phases.advanceboundary(self, phases.public, cheads)
1994 # don't push any phase data as there is nothing to push
1993 # don't push any phase data as there is nothing to push
1995 else:
1994 else:
1996 ana = phases.analyzeremotephases(self, cheads, remotephases)
1995 ana = phases.analyzeremotephases(self, cheads, remotephases)
1997 pheads, droots = ana
1996 pheads, droots = ana
1998 ### Apply remote phase on local
1997 ### Apply remote phase on local
1999 if remotephases.get('publishing', False):
1998 if remotephases.get('publishing', False):
2000 phases.advanceboundary(self, phases.public, cheads)
1999 phases.advanceboundary(self, phases.public, cheads)
2001 else: # publish = False
2000 else: # publish = False
2002 phases.advanceboundary(self, phases.public, pheads)
2001 phases.advanceboundary(self, phases.public, pheads)
2003 phases.advanceboundary(self, phases.draft, cheads)
2002 phases.advanceboundary(self, phases.draft, cheads)
2004 ### Apply local phase on remote
2003 ### Apply local phase on remote
2005
2004
2006 # Get the list of all revs draft on remote by public here.
2005 # Get the list of all revs draft on remote by public here.
2007 # XXX Beware that revset break if droots is not strictly
2006 # XXX Beware that revset break if droots is not strictly
2008 # XXX root we may want to ensure it is but it is costly
2007 # XXX root we may want to ensure it is but it is costly
2009 outdated = unfi.set('heads((%ln::%ln) and public())',
2008 outdated = unfi.set('heads((%ln::%ln) and public())',
2010 droots, cheads)
2009 droots, cheads)
2011 for newremotehead in outdated:
2010 for newremotehead in outdated:
2012 r = remote.pushkey('phases',
2011 r = remote.pushkey('phases',
2013 newremotehead.hex(),
2012 newremotehead.hex(),
2014 str(phases.draft),
2013 str(phases.draft),
2015 str(phases.public))
2014 str(phases.public))
2016 if not r:
2015 if not r:
2017 self.ui.warn(_('updating %s to public failed!\n')
2016 self.ui.warn(_('updating %s to public failed!\n')
2018 % newremotehead)
2017 % newremotehead)
2019 self.ui.debug('try to push obsolete markers to remote\n')
2018 self.ui.debug('try to push obsolete markers to remote\n')
2020 if (obsolete._enabled and self.obsstore and
2019 if (obsolete._enabled and self.obsstore and
2021 'obsolete' in remote.listkeys('namespaces')):
2020 'obsolete' in remote.listkeys('namespaces')):
2022 rslts = []
2021 rslts = []
2023 remotedata = self.listkeys('obsolete')
2022 remotedata = self.listkeys('obsolete')
2024 for key in sorted(remotedata, reverse=True):
2023 for key in sorted(remotedata, reverse=True):
2025 # reverse sort to ensure we end with dump0
2024 # reverse sort to ensure we end with dump0
2026 data = remotedata[key]
2025 data = remotedata[key]
2027 rslts.append(remote.pushkey('obsolete', key, '', data))
2026 rslts.append(remote.pushkey('obsolete', key, '', data))
2028 if [r for r in rslts if not r]:
2027 if [r for r in rslts if not r]:
2029 msg = _('failed to push some obsolete markers!\n')
2028 msg = _('failed to push some obsolete markers!\n')
2030 self.ui.warn(msg)
2029 self.ui.warn(msg)
2031 finally:
2030 finally:
2032 if lock is not None:
2031 if lock is not None:
2033 lock.release()
2032 lock.release()
2034 finally:
2033 finally:
2035 locallock.release()
2034 locallock.release()
2036
2035
2037 self.ui.debug("checking for updated bookmarks\n")
2036 self.ui.debug("checking for updated bookmarks\n")
2038 rb = remote.listkeys('bookmarks')
2037 rb = remote.listkeys('bookmarks')
2039 for k in rb.keys():
2038 for k in rb.keys():
2040 if k in unfi._bookmarks:
2039 if k in unfi._bookmarks:
2041 nr, nl = rb[k], hex(self._bookmarks[k])
2040 nr, nl = rb[k], hex(self._bookmarks[k])
2042 if nr in unfi:
2041 if nr in unfi:
2043 cr = unfi[nr]
2042 cr = unfi[nr]
2044 cl = unfi[nl]
2043 cl = unfi[nl]
2045 if bookmarks.validdest(unfi, cr, cl):
2044 if bookmarks.validdest(unfi, cr, cl):
2046 r = remote.pushkey('bookmarks', k, nr, nl)
2045 r = remote.pushkey('bookmarks', k, nr, nl)
2047 if r:
2046 if r:
2048 self.ui.status(_("updating bookmark %s\n") % k)
2047 self.ui.status(_("updating bookmark %s\n") % k)
2049 else:
2048 else:
2050 self.ui.warn(_('updating bookmark %s'
2049 self.ui.warn(_('updating bookmark %s'
2051 ' failed!\n') % k)
2050 ' failed!\n') % k)
2052
2051
2053 return ret
2052 return ret
2054
2053
2055 def changegroupinfo(self, nodes, source):
2054 def changegroupinfo(self, nodes, source):
2056 if self.ui.verbose or source == 'bundle':
2055 if self.ui.verbose or source == 'bundle':
2057 self.ui.status(_("%d changesets found\n") % len(nodes))
2056 self.ui.status(_("%d changesets found\n") % len(nodes))
2058 if self.ui.debugflag:
2057 if self.ui.debugflag:
2059 self.ui.debug("list of changesets:\n")
2058 self.ui.debug("list of changesets:\n")
2060 for node in nodes:
2059 for node in nodes:
2061 self.ui.debug("%s\n" % hex(node))
2060 self.ui.debug("%s\n" % hex(node))
2062
2061
2063 def changegroupsubset(self, bases, heads, source):
2062 def changegroupsubset(self, bases, heads, source):
2064 """Compute a changegroup consisting of all the nodes that are
2063 """Compute a changegroup consisting of all the nodes that are
2065 descendants of any of the bases and ancestors of any of the heads.
2064 descendants of any of the bases and ancestors of any of the heads.
2066 Return a chunkbuffer object whose read() method will return
2065 Return a chunkbuffer object whose read() method will return
2067 successive changegroup chunks.
2066 successive changegroup chunks.
2068
2067
2069 It is fairly complex as determining which filenodes and which
2068 It is fairly complex as determining which filenodes and which
2070 manifest nodes need to be included for the changeset to be complete
2069 manifest nodes need to be included for the changeset to be complete
2071 is non-trivial.
2070 is non-trivial.
2072
2071
2073 Another wrinkle is doing the reverse, figuring out which changeset in
2072 Another wrinkle is doing the reverse, figuring out which changeset in
2074 the changegroup a particular filenode or manifestnode belongs to.
2073 the changegroup a particular filenode or manifestnode belongs to.
2075 """
2074 """
2076 cl = self.changelog
2075 cl = self.changelog
2077 if not bases:
2076 if not bases:
2078 bases = [nullid]
2077 bases = [nullid]
2079 csets, bases, heads = cl.nodesbetween(bases, heads)
2078 csets, bases, heads = cl.nodesbetween(bases, heads)
2080 # We assume that all ancestors of bases are known
2079 # We assume that all ancestors of bases are known
2081 common = cl.ancestors([cl.rev(n) for n in bases])
2080 common = cl.ancestors([cl.rev(n) for n in bases])
2082 return self._changegroupsubset(common, csets, heads, source)
2081 return self._changegroupsubset(common, csets, heads, source)
2083
2082
2084 def getlocalbundle(self, source, outgoing):
2083 def getlocalbundle(self, source, outgoing):
2085 """Like getbundle, but taking a discovery.outgoing as an argument.
2084 """Like getbundle, but taking a discovery.outgoing as an argument.
2086
2085
2087 This is only implemented for local repos and reuses potentially
2086 This is only implemented for local repos and reuses potentially
2088 precomputed sets in outgoing."""
2087 precomputed sets in outgoing."""
2089 if not outgoing.missing:
2088 if not outgoing.missing:
2090 return None
2089 return None
2091 return self._changegroupsubset(outgoing.common,
2090 return self._changegroupsubset(outgoing.common,
2092 outgoing.missing,
2091 outgoing.missing,
2093 outgoing.missingheads,
2092 outgoing.missingheads,
2094 source)
2093 source)
2095
2094
2096 def getbundle(self, source, heads=None, common=None):
2095 def getbundle(self, source, heads=None, common=None):
2097 """Like changegroupsubset, but returns the set difference between the
2096 """Like changegroupsubset, but returns the set difference between the
2098 ancestors of heads and the ancestors common.
2097 ancestors of heads and the ancestors common.
2099
2098
2100 If heads is None, use the local heads. If common is None, use [nullid].
2099 If heads is None, use the local heads. If common is None, use [nullid].
2101
2100
2102 The nodes in common might not all be known locally due to the way the
2101 The nodes in common might not all be known locally due to the way the
2103 current discovery protocol works.
2102 current discovery protocol works.
2104 """
2103 """
2105 cl = self.changelog
2104 cl = self.changelog
2106 if common:
2105 if common:
2107 hasnode = cl.hasnode
2106 hasnode = cl.hasnode
2108 common = [n for n in common if hasnode(n)]
2107 common = [n for n in common if hasnode(n)]
2109 else:
2108 else:
2110 common = [nullid]
2109 common = [nullid]
2111 if not heads:
2110 if not heads:
2112 heads = cl.heads()
2111 heads = cl.heads()
2113 return self.getlocalbundle(source,
2112 return self.getlocalbundle(source,
2114 discovery.outgoing(cl, common, heads))
2113 discovery.outgoing(cl, common, heads))
2115
2114
2116 @unfilteredmethod
2115 @unfilteredmethod
2117 def _changegroupsubset(self, commonrevs, csets, heads, source):
2116 def _changegroupsubset(self, commonrevs, csets, heads, source):
2118
2117
2119 cl = self.changelog
2118 cl = self.changelog
2120 mf = self.manifest
2119 mf = self.manifest
2121 mfs = {} # needed manifests
2120 mfs = {} # needed manifests
2122 fnodes = {} # needed file nodes
2121 fnodes = {} # needed file nodes
2123 changedfiles = set()
2122 changedfiles = set()
2124 fstate = ['', {}]
2123 fstate = ['', {}]
2125 count = [0, 0]
2124 count = [0, 0]
2126
2125
2127 # can we go through the fast path ?
2126 # can we go through the fast path ?
2128 heads.sort()
2127 heads.sort()
2129 if heads == sorted(self.heads()):
2128 if heads == sorted(self.heads()):
2130 return self._changegroup(csets, source)
2129 return self._changegroup(csets, source)
2131
2130
2132 # slow path
2131 # slow path
2133 self.hook('preoutgoing', throw=True, source=source)
2132 self.hook('preoutgoing', throw=True, source=source)
2134 self.changegroupinfo(csets, source)
2133 self.changegroupinfo(csets, source)
2135
2134
2136 # filter any nodes that claim to be part of the known set
2135 # filter any nodes that claim to be part of the known set
2137 def prune(revlog, missing):
2136 def prune(revlog, missing):
2138 rr, rl = revlog.rev, revlog.linkrev
2137 rr, rl = revlog.rev, revlog.linkrev
2139 return [n for n in missing
2138 return [n for n in missing
2140 if rl(rr(n)) not in commonrevs]
2139 if rl(rr(n)) not in commonrevs]
2141
2140
2142 progress = self.ui.progress
2141 progress = self.ui.progress
2143 _bundling = _('bundling')
2142 _bundling = _('bundling')
2144 _changesets = _('changesets')
2143 _changesets = _('changesets')
2145 _manifests = _('manifests')
2144 _manifests = _('manifests')
2146 _files = _('files')
2145 _files = _('files')
2147
2146
2148 def lookup(revlog, x):
2147 def lookup(revlog, x):
2149 if revlog == cl:
2148 if revlog == cl:
2150 c = cl.read(x)
2149 c = cl.read(x)
2151 changedfiles.update(c[3])
2150 changedfiles.update(c[3])
2152 mfs.setdefault(c[0], x)
2151 mfs.setdefault(c[0], x)
2153 count[0] += 1
2152 count[0] += 1
2154 progress(_bundling, count[0],
2153 progress(_bundling, count[0],
2155 unit=_changesets, total=count[1])
2154 unit=_changesets, total=count[1])
2156 return x
2155 return x
2157 elif revlog == mf:
2156 elif revlog == mf:
2158 clnode = mfs[x]
2157 clnode = mfs[x]
2159 mdata = mf.readfast(x)
2158 mdata = mf.readfast(x)
2160 for f, n in mdata.iteritems():
2159 for f, n in mdata.iteritems():
2161 if f in changedfiles:
2160 if f in changedfiles:
2162 fnodes[f].setdefault(n, clnode)
2161 fnodes[f].setdefault(n, clnode)
2163 count[0] += 1
2162 count[0] += 1
2164 progress(_bundling, count[0],
2163 progress(_bundling, count[0],
2165 unit=_manifests, total=count[1])
2164 unit=_manifests, total=count[1])
2166 return clnode
2165 return clnode
2167 else:
2166 else:
2168 progress(_bundling, count[0], item=fstate[0],
2167 progress(_bundling, count[0], item=fstate[0],
2169 unit=_files, total=count[1])
2168 unit=_files, total=count[1])
2170 return fstate[1][x]
2169 return fstate[1][x]
2171
2170
2172 bundler = changegroup.bundle10(lookup)
2171 bundler = changegroup.bundle10(lookup)
2173 reorder = self.ui.config('bundle', 'reorder', 'auto')
2172 reorder = self.ui.config('bundle', 'reorder', 'auto')
2174 if reorder == 'auto':
2173 if reorder == 'auto':
2175 reorder = None
2174 reorder = None
2176 else:
2175 else:
2177 reorder = util.parsebool(reorder)
2176 reorder = util.parsebool(reorder)
2178
2177
2179 def gengroup():
2178 def gengroup():
2180 # Create a changenode group generator that will call our functions
2179 # Create a changenode group generator that will call our functions
2181 # back to lookup the owning changenode and collect information.
2180 # back to lookup the owning changenode and collect information.
2182 count[:] = [0, len(csets)]
2181 count[:] = [0, len(csets)]
2183 for chunk in cl.group(csets, bundler, reorder=reorder):
2182 for chunk in cl.group(csets, bundler, reorder=reorder):
2184 yield chunk
2183 yield chunk
2185 progress(_bundling, None)
2184 progress(_bundling, None)
2186
2185
2187 # Create a generator for the manifestnodes that calls our lookup
2186 # Create a generator for the manifestnodes that calls our lookup
2188 # and data collection functions back.
2187 # and data collection functions back.
2189 for f in changedfiles:
2188 for f in changedfiles:
2190 fnodes[f] = {}
2189 fnodes[f] = {}
2191 count[:] = [0, len(mfs)]
2190 count[:] = [0, len(mfs)]
2192 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2191 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2193 yield chunk
2192 yield chunk
2194 progress(_bundling, None)
2193 progress(_bundling, None)
2195
2194
2196 mfs.clear()
2195 mfs.clear()
2197
2196
2198 # Go through all our files in order sorted by name.
2197 # Go through all our files in order sorted by name.
2199 count[:] = [0, len(changedfiles)]
2198 count[:] = [0, len(changedfiles)]
2200 for fname in sorted(changedfiles):
2199 for fname in sorted(changedfiles):
2201 filerevlog = self.file(fname)
2200 filerevlog = self.file(fname)
2202 if not len(filerevlog):
2201 if not len(filerevlog):
2203 raise util.Abort(_("empty or missing revlog for %s")
2202 raise util.Abort(_("empty or missing revlog for %s")
2204 % fname)
2203 % fname)
2205 fstate[0] = fname
2204 fstate[0] = fname
2206 fstate[1] = fnodes.pop(fname, {})
2205 fstate[1] = fnodes.pop(fname, {})
2207
2206
2208 nodelist = prune(filerevlog, fstate[1])
2207 nodelist = prune(filerevlog, fstate[1])
2209 if nodelist:
2208 if nodelist:
2210 count[0] += 1
2209 count[0] += 1
2211 yield bundler.fileheader(fname)
2210 yield bundler.fileheader(fname)
2212 for chunk in filerevlog.group(nodelist, bundler, reorder):
2211 for chunk in filerevlog.group(nodelist, bundler, reorder):
2213 yield chunk
2212 yield chunk
2214
2213
2215 # Signal that no more groups are left.
2214 # Signal that no more groups are left.
2216 yield bundler.close()
2215 yield bundler.close()
2217 progress(_bundling, None)
2216 progress(_bundling, None)
2218
2217
2219 if csets:
2218 if csets:
2220 self.hook('outgoing', node=hex(csets[0]), source=source)
2219 self.hook('outgoing', node=hex(csets[0]), source=source)
2221
2220
2222 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2221 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2223
2222
2224 def changegroup(self, basenodes, source):
2223 def changegroup(self, basenodes, source):
2225 # to avoid a race we use changegroupsubset() (issue1320)
2224 # to avoid a race we use changegroupsubset() (issue1320)
2226 return self.changegroupsubset(basenodes, self.heads(), source)
2225 return self.changegroupsubset(basenodes, self.heads(), source)
2227
2226
2228 @unfilteredmethod
2227 @unfilteredmethod
2229 def _changegroup(self, nodes, source):
2228 def _changegroup(self, nodes, source):
2230 """Compute the changegroup of all nodes that we have that a recipient
2229 """Compute the changegroup of all nodes that we have that a recipient
2231 doesn't. Return a chunkbuffer object whose read() method will return
2230 doesn't. Return a chunkbuffer object whose read() method will return
2232 successive changegroup chunks.
2231 successive changegroup chunks.
2233
2232
2234 This is much easier than the previous function as we can assume that
2233 This is much easier than the previous function as we can assume that
2235 the recipient has any changenode we aren't sending them.
2234 the recipient has any changenode we aren't sending them.
2236
2235
2237 nodes is the set of nodes to send"""
2236 nodes is the set of nodes to send"""
2238
2237
2239 cl = self.changelog
2238 cl = self.changelog
2240 mf = self.manifest
2239 mf = self.manifest
2241 mfs = {}
2240 mfs = {}
2242 changedfiles = set()
2241 changedfiles = set()
2243 fstate = ['']
2242 fstate = ['']
2244 count = [0, 0]
2243 count = [0, 0]
2245
2244
2246 self.hook('preoutgoing', throw=True, source=source)
2245 self.hook('preoutgoing', throw=True, source=source)
2247 self.changegroupinfo(nodes, source)
2246 self.changegroupinfo(nodes, source)
2248
2247
2249 revset = set([cl.rev(n) for n in nodes])
2248 revset = set([cl.rev(n) for n in nodes])
2250
2249
2251 def gennodelst(log):
2250 def gennodelst(log):
2252 ln, llr = log.node, log.linkrev
2251 ln, llr = log.node, log.linkrev
2253 return [ln(r) for r in log if llr(r) in revset]
2252 return [ln(r) for r in log if llr(r) in revset]
2254
2253
2255 progress = self.ui.progress
2254 progress = self.ui.progress
2256 _bundling = _('bundling')
2255 _bundling = _('bundling')
2257 _changesets = _('changesets')
2256 _changesets = _('changesets')
2258 _manifests = _('manifests')
2257 _manifests = _('manifests')
2259 _files = _('files')
2258 _files = _('files')
2260
2259
2261 def lookup(revlog, x):
2260 def lookup(revlog, x):
2262 if revlog == cl:
2261 if revlog == cl:
2263 c = cl.read(x)
2262 c = cl.read(x)
2264 changedfiles.update(c[3])
2263 changedfiles.update(c[3])
2265 mfs.setdefault(c[0], x)
2264 mfs.setdefault(c[0], x)
2266 count[0] += 1
2265 count[0] += 1
2267 progress(_bundling, count[0],
2266 progress(_bundling, count[0],
2268 unit=_changesets, total=count[1])
2267 unit=_changesets, total=count[1])
2269 return x
2268 return x
2270 elif revlog == mf:
2269 elif revlog == mf:
2271 count[0] += 1
2270 count[0] += 1
2272 progress(_bundling, count[0],
2271 progress(_bundling, count[0],
2273 unit=_manifests, total=count[1])
2272 unit=_manifests, total=count[1])
2274 return cl.node(revlog.linkrev(revlog.rev(x)))
2273 return cl.node(revlog.linkrev(revlog.rev(x)))
2275 else:
2274 else:
2276 progress(_bundling, count[0], item=fstate[0],
2275 progress(_bundling, count[0], item=fstate[0],
2277 total=count[1], unit=_files)
2276 total=count[1], unit=_files)
2278 return cl.node(revlog.linkrev(revlog.rev(x)))
2277 return cl.node(revlog.linkrev(revlog.rev(x)))
2279
2278
2280 bundler = changegroup.bundle10(lookup)
2279 bundler = changegroup.bundle10(lookup)
2281 reorder = self.ui.config('bundle', 'reorder', 'auto')
2280 reorder = self.ui.config('bundle', 'reorder', 'auto')
2282 if reorder == 'auto':
2281 if reorder == 'auto':
2283 reorder = None
2282 reorder = None
2284 else:
2283 else:
2285 reorder = util.parsebool(reorder)
2284 reorder = util.parsebool(reorder)
2286
2285
2287 def gengroup():
2286 def gengroup():
2288 '''yield a sequence of changegroup chunks (strings)'''
2287 '''yield a sequence of changegroup chunks (strings)'''
2289 # construct a list of all changed files
2288 # construct a list of all changed files
2290
2289
2291 count[:] = [0, len(nodes)]
2290 count[:] = [0, len(nodes)]
2292 for chunk in cl.group(nodes, bundler, reorder=reorder):
2291 for chunk in cl.group(nodes, bundler, reorder=reorder):
2293 yield chunk
2292 yield chunk
2294 progress(_bundling, None)
2293 progress(_bundling, None)
2295
2294
2296 count[:] = [0, len(mfs)]
2295 count[:] = [0, len(mfs)]
2297 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2296 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2298 yield chunk
2297 yield chunk
2299 progress(_bundling, None)
2298 progress(_bundling, None)
2300
2299
2301 count[:] = [0, len(changedfiles)]
2300 count[:] = [0, len(changedfiles)]
2302 for fname in sorted(changedfiles):
2301 for fname in sorted(changedfiles):
2303 filerevlog = self.file(fname)
2302 filerevlog = self.file(fname)
2304 if not len(filerevlog):
2303 if not len(filerevlog):
2305 raise util.Abort(_("empty or missing revlog for %s")
2304 raise util.Abort(_("empty or missing revlog for %s")
2306 % fname)
2305 % fname)
2307 fstate[0] = fname
2306 fstate[0] = fname
2308 nodelist = gennodelst(filerevlog)
2307 nodelist = gennodelst(filerevlog)
2309 if nodelist:
2308 if nodelist:
2310 count[0] += 1
2309 count[0] += 1
2311 yield bundler.fileheader(fname)
2310 yield bundler.fileheader(fname)
2312 for chunk in filerevlog.group(nodelist, bundler, reorder):
2311 for chunk in filerevlog.group(nodelist, bundler, reorder):
2313 yield chunk
2312 yield chunk
2314 yield bundler.close()
2313 yield bundler.close()
2315 progress(_bundling, None)
2314 progress(_bundling, None)
2316
2315
2317 if nodes:
2316 if nodes:
2318 self.hook('outgoing', node=hex(nodes[0]), source=source)
2317 self.hook('outgoing', node=hex(nodes[0]), source=source)
2319
2318
2320 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2319 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2321
2320
2322 @unfilteredmethod
2321 @unfilteredmethod
2323 def addchangegroup(self, source, srctype, url, emptyok=False):
2322 def addchangegroup(self, source, srctype, url, emptyok=False):
2324 """Add the changegroup returned by source.read() to this repo.
2323 """Add the changegroup returned by source.read() to this repo.
2325 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2324 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2326 the URL of the repo where this changegroup is coming from.
2325 the URL of the repo where this changegroup is coming from.
2327
2326
2328 Return an integer summarizing the change to this repo:
2327 Return an integer summarizing the change to this repo:
2329 - nothing changed or no source: 0
2328 - nothing changed or no source: 0
2330 - more heads than before: 1+added heads (2..n)
2329 - more heads than before: 1+added heads (2..n)
2331 - fewer heads than before: -1-removed heads (-2..-n)
2330 - fewer heads than before: -1-removed heads (-2..-n)
2332 - number of heads stays the same: 1
2331 - number of heads stays the same: 1
2333 """
2332 """
2334 def csmap(x):
2333 def csmap(x):
2335 self.ui.debug("add changeset %s\n" % short(x))
2334 self.ui.debug("add changeset %s\n" % short(x))
2336 return len(cl)
2335 return len(cl)
2337
2336
2338 def revmap(x):
2337 def revmap(x):
2339 return cl.rev(x)
2338 return cl.rev(x)
2340
2339
2341 if not source:
2340 if not source:
2342 return 0
2341 return 0
2343
2342
2344 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2343 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2345
2344
2346 changesets = files = revisions = 0
2345 changesets = files = revisions = 0
2347 efiles = set()
2346 efiles = set()
2348
2347
2349 # write changelog data to temp files so concurrent readers will not see
2348 # write changelog data to temp files so concurrent readers will not see
2350 # inconsistent view
2349 # inconsistent view
2351 cl = self.changelog
2350 cl = self.changelog
2352 cl.delayupdate()
2351 cl.delayupdate()
2353 oldheads = cl.heads()
2352 oldheads = cl.heads()
2354
2353
2355 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2354 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2356 try:
2355 try:
2357 trp = weakref.proxy(tr)
2356 trp = weakref.proxy(tr)
2358 # pull off the changeset group
2357 # pull off the changeset group
2359 self.ui.status(_("adding changesets\n"))
2358 self.ui.status(_("adding changesets\n"))
2360 clstart = len(cl)
2359 clstart = len(cl)
2361 class prog(object):
2360 class prog(object):
2362 step = _('changesets')
2361 step = _('changesets')
2363 count = 1
2362 count = 1
2364 ui = self.ui
2363 ui = self.ui
2365 total = None
2364 total = None
2366 def __call__(self):
2365 def __call__(self):
2367 self.ui.progress(self.step, self.count, unit=_('chunks'),
2366 self.ui.progress(self.step, self.count, unit=_('chunks'),
2368 total=self.total)
2367 total=self.total)
2369 self.count += 1
2368 self.count += 1
2370 pr = prog()
2369 pr = prog()
2371 source.callback = pr
2370 source.callback = pr
2372
2371
2373 source.changelogheader()
2372 source.changelogheader()
2374 srccontent = cl.addgroup(source, csmap, trp)
2373 srccontent = cl.addgroup(source, csmap, trp)
2375 if not (srccontent or emptyok):
2374 if not (srccontent or emptyok):
2376 raise util.Abort(_("received changelog group is empty"))
2375 raise util.Abort(_("received changelog group is empty"))
2377 clend = len(cl)
2376 clend = len(cl)
2378 changesets = clend - clstart
2377 changesets = clend - clstart
2379 for c in xrange(clstart, clend):
2378 for c in xrange(clstart, clend):
2380 efiles.update(self[c].files())
2379 efiles.update(self[c].files())
2381 efiles = len(efiles)
2380 efiles = len(efiles)
2382 self.ui.progress(_('changesets'), None)
2381 self.ui.progress(_('changesets'), None)
2383
2382
2384 # pull off the manifest group
2383 # pull off the manifest group
2385 self.ui.status(_("adding manifests\n"))
2384 self.ui.status(_("adding manifests\n"))
2386 pr.step = _('manifests')
2385 pr.step = _('manifests')
2387 pr.count = 1
2386 pr.count = 1
2388 pr.total = changesets # manifests <= changesets
2387 pr.total = changesets # manifests <= changesets
2389 # no need to check for empty manifest group here:
2388 # no need to check for empty manifest group here:
2390 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2389 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2391 # no new manifest will be created and the manifest group will
2390 # no new manifest will be created and the manifest group will
2392 # be empty during the pull
2391 # be empty during the pull
2393 source.manifestheader()
2392 source.manifestheader()
2394 self.manifest.addgroup(source, revmap, trp)
2393 self.manifest.addgroup(source, revmap, trp)
2395 self.ui.progress(_('manifests'), None)
2394 self.ui.progress(_('manifests'), None)
2396
2395
2397 needfiles = {}
2396 needfiles = {}
2398 if self.ui.configbool('server', 'validate', default=False):
2397 if self.ui.configbool('server', 'validate', default=False):
2399 # validate incoming csets have their manifests
2398 # validate incoming csets have their manifests
2400 for cset in xrange(clstart, clend):
2399 for cset in xrange(clstart, clend):
2401 mfest = self.changelog.read(self.changelog.node(cset))[0]
2400 mfest = self.changelog.read(self.changelog.node(cset))[0]
2402 mfest = self.manifest.readdelta(mfest)
2401 mfest = self.manifest.readdelta(mfest)
2403 # store file nodes we must see
2402 # store file nodes we must see
2404 for f, n in mfest.iteritems():
2403 for f, n in mfest.iteritems():
2405 needfiles.setdefault(f, set()).add(n)
2404 needfiles.setdefault(f, set()).add(n)
2406
2405
2407 # process the files
2406 # process the files
2408 self.ui.status(_("adding file changes\n"))
2407 self.ui.status(_("adding file changes\n"))
2409 pr.step = _('files')
2408 pr.step = _('files')
2410 pr.count = 1
2409 pr.count = 1
2411 pr.total = efiles
2410 pr.total = efiles
2412 source.callback = None
2411 source.callback = None
2413
2412
2414 while True:
2413 while True:
2415 chunkdata = source.filelogheader()
2414 chunkdata = source.filelogheader()
2416 if not chunkdata:
2415 if not chunkdata:
2417 break
2416 break
2418 f = chunkdata["filename"]
2417 f = chunkdata["filename"]
2419 self.ui.debug("adding %s revisions\n" % f)
2418 self.ui.debug("adding %s revisions\n" % f)
2420 pr()
2419 pr()
2421 fl = self.file(f)
2420 fl = self.file(f)
2422 o = len(fl)
2421 o = len(fl)
2423 if not fl.addgroup(source, revmap, trp):
2422 if not fl.addgroup(source, revmap, trp):
2424 raise util.Abort(_("received file revlog group is empty"))
2423 raise util.Abort(_("received file revlog group is empty"))
2425 revisions += len(fl) - o
2424 revisions += len(fl) - o
2426 files += 1
2425 files += 1
2427 if f in needfiles:
2426 if f in needfiles:
2428 needs = needfiles[f]
2427 needs = needfiles[f]
2429 for new in xrange(o, len(fl)):
2428 for new in xrange(o, len(fl)):
2430 n = fl.node(new)
2429 n = fl.node(new)
2431 if n in needs:
2430 if n in needs:
2432 needs.remove(n)
2431 needs.remove(n)
2433 if not needs:
2432 if not needs:
2434 del needfiles[f]
2433 del needfiles[f]
2435 self.ui.progress(_('files'), None)
2434 self.ui.progress(_('files'), None)
2436
2435
2437 for f, needs in needfiles.iteritems():
2436 for f, needs in needfiles.iteritems():
2438 fl = self.file(f)
2437 fl = self.file(f)
2439 for n in needs:
2438 for n in needs:
2440 try:
2439 try:
2441 fl.rev(n)
2440 fl.rev(n)
2442 except error.LookupError:
2441 except error.LookupError:
2443 raise util.Abort(
2442 raise util.Abort(
2444 _('missing file data for %s:%s - run hg verify') %
2443 _('missing file data for %s:%s - run hg verify') %
2445 (f, hex(n)))
2444 (f, hex(n)))
2446
2445
2447 dh = 0
2446 dh = 0
2448 if oldheads:
2447 if oldheads:
2449 heads = cl.heads()
2448 heads = cl.heads()
2450 dh = len(heads) - len(oldheads)
2449 dh = len(heads) - len(oldheads)
2451 for h in heads:
2450 for h in heads:
2452 if h not in oldheads and self[h].closesbranch():
2451 if h not in oldheads and self[h].closesbranch():
2453 dh -= 1
2452 dh -= 1
2454 htext = ""
2453 htext = ""
2455 if dh:
2454 if dh:
2456 htext = _(" (%+d heads)") % dh
2455 htext = _(" (%+d heads)") % dh
2457
2456
2458 self.ui.status(_("added %d changesets"
2457 self.ui.status(_("added %d changesets"
2459 " with %d changes to %d files%s\n")
2458 " with %d changes to %d files%s\n")
2460 % (changesets, revisions, files, htext))
2459 % (changesets, revisions, files, htext))
2461 self.invalidatevolatilesets()
2460 self.invalidatevolatilesets()
2462
2461
2463 if changesets > 0:
2462 if changesets > 0:
2464 p = lambda: cl.writepending() and self.root or ""
2463 p = lambda: cl.writepending() and self.root or ""
2465 self.hook('pretxnchangegroup', throw=True,
2464 self.hook('pretxnchangegroup', throw=True,
2466 node=hex(cl.node(clstart)), source=srctype,
2465 node=hex(cl.node(clstart)), source=srctype,
2467 url=url, pending=p)
2466 url=url, pending=p)
2468
2467
2469 added = [cl.node(r) for r in xrange(clstart, clend)]
2468 added = [cl.node(r) for r in xrange(clstart, clend)]
2470 publishing = self.ui.configbool('phases', 'publish', True)
2469 publishing = self.ui.configbool('phases', 'publish', True)
2471 if srctype == 'push':
2470 if srctype == 'push':
2472 # Old server can not push the boundary themself.
2471 # Old server can not push the boundary themself.
2473 # New server won't push the boundary if changeset already
2472 # New server won't push the boundary if changeset already
2474 # existed locally as secrete
2473 # existed locally as secrete
2475 #
2474 #
2476 # We should not use added here but the list of all change in
2475 # We should not use added here but the list of all change in
2477 # the bundle
2476 # the bundle
2478 if publishing:
2477 if publishing:
2479 phases.advanceboundary(self, phases.public, srccontent)
2478 phases.advanceboundary(self, phases.public, srccontent)
2480 else:
2479 else:
2481 phases.advanceboundary(self, phases.draft, srccontent)
2480 phases.advanceboundary(self, phases.draft, srccontent)
2482 phases.retractboundary(self, phases.draft, added)
2481 phases.retractboundary(self, phases.draft, added)
2483 elif srctype != 'strip':
2482 elif srctype != 'strip':
2484 # publishing only alter behavior during push
2483 # publishing only alter behavior during push
2485 #
2484 #
2486 # strip should not touch boundary at all
2485 # strip should not touch boundary at all
2487 phases.retractboundary(self, phases.draft, added)
2486 phases.retractboundary(self, phases.draft, added)
2488
2487
2489 # make changelog see real files again
2488 # make changelog see real files again
2490 cl.finalize(trp)
2489 cl.finalize(trp)
2491
2490
2492 tr.close()
2491 tr.close()
2493
2492
2494 if changesets > 0:
2493 if changesets > 0:
2495 self.updatebranchcache()
2494 self.updatebranchcache()
2496 def runhooks():
2495 def runhooks():
2497 # forcefully update the on-disk branch cache
2496 # forcefully update the on-disk branch cache
2498 self.ui.debug("updating the branch cache\n")
2497 self.ui.debug("updating the branch cache\n")
2499 self.hook("changegroup", node=hex(cl.node(clstart)),
2498 self.hook("changegroup", node=hex(cl.node(clstart)),
2500 source=srctype, url=url)
2499 source=srctype, url=url)
2501
2500
2502 for n in added:
2501 for n in added:
2503 self.hook("incoming", node=hex(n), source=srctype,
2502 self.hook("incoming", node=hex(n), source=srctype,
2504 url=url)
2503 url=url)
2505 self._afterlock(runhooks)
2504 self._afterlock(runhooks)
2506
2505
2507 finally:
2506 finally:
2508 tr.release()
2507 tr.release()
2509 # never return 0 here:
2508 # never return 0 here:
2510 if dh < 0:
2509 if dh < 0:
2511 return dh - 1
2510 return dh - 1
2512 else:
2511 else:
2513 return dh + 1
2512 return dh + 1
2514
2513
2515 def stream_in(self, remote, requirements):
2514 def stream_in(self, remote, requirements):
2516 lock = self.lock()
2515 lock = self.lock()
2517 try:
2516 try:
2518 # Save remote branchmap. We will use it later
2517 # Save remote branchmap. We will use it later
2519 # to speed up branchcache creation
2518 # to speed up branchcache creation
2520 rbranchmap = None
2519 rbranchmap = None
2521 if remote.capable("branchmap"):
2520 if remote.capable("branchmap"):
2522 rbranchmap = remote.branchmap()
2521 rbranchmap = remote.branchmap()
2523
2522
2524 fp = remote.stream_out()
2523 fp = remote.stream_out()
2525 l = fp.readline()
2524 l = fp.readline()
2526 try:
2525 try:
2527 resp = int(l)
2526 resp = int(l)
2528 except ValueError:
2527 except ValueError:
2529 raise error.ResponseError(
2528 raise error.ResponseError(
2530 _('unexpected response from remote server:'), l)
2529 _('unexpected response from remote server:'), l)
2531 if resp == 1:
2530 if resp == 1:
2532 raise util.Abort(_('operation forbidden by server'))
2531 raise util.Abort(_('operation forbidden by server'))
2533 elif resp == 2:
2532 elif resp == 2:
2534 raise util.Abort(_('locking the remote repository failed'))
2533 raise util.Abort(_('locking the remote repository failed'))
2535 elif resp != 0:
2534 elif resp != 0:
2536 raise util.Abort(_('the server sent an unknown error code'))
2535 raise util.Abort(_('the server sent an unknown error code'))
2537 self.ui.status(_('streaming all changes\n'))
2536 self.ui.status(_('streaming all changes\n'))
2538 l = fp.readline()
2537 l = fp.readline()
2539 try:
2538 try:
2540 total_files, total_bytes = map(int, l.split(' ', 1))
2539 total_files, total_bytes = map(int, l.split(' ', 1))
2541 except (ValueError, TypeError):
2540 except (ValueError, TypeError):
2542 raise error.ResponseError(
2541 raise error.ResponseError(
2543 _('unexpected response from remote server:'), l)
2542 _('unexpected response from remote server:'), l)
2544 self.ui.status(_('%d files to transfer, %s of data\n') %
2543 self.ui.status(_('%d files to transfer, %s of data\n') %
2545 (total_files, util.bytecount(total_bytes)))
2544 (total_files, util.bytecount(total_bytes)))
2546 handled_bytes = 0
2545 handled_bytes = 0
2547 self.ui.progress(_('clone'), 0, total=total_bytes)
2546 self.ui.progress(_('clone'), 0, total=total_bytes)
2548 start = time.time()
2547 start = time.time()
2549 for i in xrange(total_files):
2548 for i in xrange(total_files):
2550 # XXX doesn't support '\n' or '\r' in filenames
2549 # XXX doesn't support '\n' or '\r' in filenames
2551 l = fp.readline()
2550 l = fp.readline()
2552 try:
2551 try:
2553 name, size = l.split('\0', 1)
2552 name, size = l.split('\0', 1)
2554 size = int(size)
2553 size = int(size)
2555 except (ValueError, TypeError):
2554 except (ValueError, TypeError):
2556 raise error.ResponseError(
2555 raise error.ResponseError(
2557 _('unexpected response from remote server:'), l)
2556 _('unexpected response from remote server:'), l)
2558 if self.ui.debugflag:
2557 if self.ui.debugflag:
2559 self.ui.debug('adding %s (%s)\n' %
2558 self.ui.debug('adding %s (%s)\n' %
2560 (name, util.bytecount(size)))
2559 (name, util.bytecount(size)))
2561 # for backwards compat, name was partially encoded
2560 # for backwards compat, name was partially encoded
2562 ofp = self.sopener(store.decodedir(name), 'w')
2561 ofp = self.sopener(store.decodedir(name), 'w')
2563 for chunk in util.filechunkiter(fp, limit=size):
2562 for chunk in util.filechunkiter(fp, limit=size):
2564 handled_bytes += len(chunk)
2563 handled_bytes += len(chunk)
2565 self.ui.progress(_('clone'), handled_bytes,
2564 self.ui.progress(_('clone'), handled_bytes,
2566 total=total_bytes)
2565 total=total_bytes)
2567 ofp.write(chunk)
2566 ofp.write(chunk)
2568 ofp.close()
2567 ofp.close()
2569 elapsed = time.time() - start
2568 elapsed = time.time() - start
2570 if elapsed <= 0:
2569 if elapsed <= 0:
2571 elapsed = 0.001
2570 elapsed = 0.001
2572 self.ui.progress(_('clone'), None)
2571 self.ui.progress(_('clone'), None)
2573 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2572 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2574 (util.bytecount(total_bytes), elapsed,
2573 (util.bytecount(total_bytes), elapsed,
2575 util.bytecount(total_bytes / elapsed)))
2574 util.bytecount(total_bytes / elapsed)))
2576
2575
2577 # new requirements = old non-format requirements +
2576 # new requirements = old non-format requirements +
2578 # new format-related
2577 # new format-related
2579 # requirements from the streamed-in repository
2578 # requirements from the streamed-in repository
2580 requirements.update(set(self.requirements) - self.supportedformats)
2579 requirements.update(set(self.requirements) - self.supportedformats)
2581 self._applyrequirements(requirements)
2580 self._applyrequirements(requirements)
2582 self._writerequirements()
2581 self._writerequirements()
2583
2582
2584 if rbranchmap:
2583 if rbranchmap:
2585 rbheads = []
2584 rbheads = []
2586 for bheads in rbranchmap.itervalues():
2585 for bheads in rbranchmap.itervalues():
2587 rbheads.extend(bheads)
2586 rbheads.extend(bheads)
2588
2587
2589 self.branchcache = rbranchmap
2588 self.branchcache = rbranchmap
2590 if rbheads:
2589 if rbheads:
2591 rtiprev = max((int(self.changelog.rev(node))
2590 rtiprev = max((int(self.changelog.rev(node))
2592 for node in rbheads))
2591 for node in rbheads))
2593 branchmap.write(self, self.branchcache,
2592 branchmap.write(self, self.branchcache,
2594 self[rtiprev].node(), rtiprev)
2593 self[rtiprev].node(), rtiprev)
2595 self.invalidate()
2594 self.invalidate()
2596 return len(self.heads()) + 1
2595 return len(self.heads()) + 1
2597 finally:
2596 finally:
2598 lock.release()
2597 lock.release()
2599
2598
2600 def clone(self, remote, heads=[], stream=False):
2599 def clone(self, remote, heads=[], stream=False):
2601 '''clone remote repository.
2600 '''clone remote repository.
2602
2601
2603 keyword arguments:
2602 keyword arguments:
2604 heads: list of revs to clone (forces use of pull)
2603 heads: list of revs to clone (forces use of pull)
2605 stream: use streaming clone if possible'''
2604 stream: use streaming clone if possible'''
2606
2605
2607 # now, all clients that can request uncompressed clones can
2606 # now, all clients that can request uncompressed clones can
2608 # read repo formats supported by all servers that can serve
2607 # read repo formats supported by all servers that can serve
2609 # them.
2608 # them.
2610
2609
2611 # if revlog format changes, client will have to check version
2610 # if revlog format changes, client will have to check version
2612 # and format flags on "stream" capability, and use
2611 # and format flags on "stream" capability, and use
2613 # uncompressed only if compatible.
2612 # uncompressed only if compatible.
2614
2613
2615 if not stream:
2614 if not stream:
2616 # if the server explicitly prefers to stream (for fast LANs)
2615 # if the server explicitly prefers to stream (for fast LANs)
2617 stream = remote.capable('stream-preferred')
2616 stream = remote.capable('stream-preferred')
2618
2617
2619 if stream and not heads:
2618 if stream and not heads:
2620 # 'stream' means remote revlog format is revlogv1 only
2619 # 'stream' means remote revlog format is revlogv1 only
2621 if remote.capable('stream'):
2620 if remote.capable('stream'):
2622 return self.stream_in(remote, set(('revlogv1',)))
2621 return self.stream_in(remote, set(('revlogv1',)))
2623 # otherwise, 'streamreqs' contains the remote revlog format
2622 # otherwise, 'streamreqs' contains the remote revlog format
2624 streamreqs = remote.capable('streamreqs')
2623 streamreqs = remote.capable('streamreqs')
2625 if streamreqs:
2624 if streamreqs:
2626 streamreqs = set(streamreqs.split(','))
2625 streamreqs = set(streamreqs.split(','))
2627 # if we support it, stream in and adjust our requirements
2626 # if we support it, stream in and adjust our requirements
2628 if not streamreqs - self.supportedformats:
2627 if not streamreqs - self.supportedformats:
2629 return self.stream_in(remote, streamreqs)
2628 return self.stream_in(remote, streamreqs)
2630 return self.pull(remote, heads)
2629 return self.pull(remote, heads)
2631
2630
2632 def pushkey(self, namespace, key, old, new):
2631 def pushkey(self, namespace, key, old, new):
2633 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2632 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2634 old=old, new=new)
2633 old=old, new=new)
2635 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2634 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2636 ret = pushkey.push(self, namespace, key, old, new)
2635 ret = pushkey.push(self, namespace, key, old, new)
2637 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2636 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2638 ret=ret)
2637 ret=ret)
2639 return ret
2638 return ret
2640
2639
2641 def listkeys(self, namespace):
2640 def listkeys(self, namespace):
2642 self.hook('prelistkeys', throw=True, namespace=namespace)
2641 self.hook('prelistkeys', throw=True, namespace=namespace)
2643 self.ui.debug('listing keys for "%s"\n' % namespace)
2642 self.ui.debug('listing keys for "%s"\n' % namespace)
2644 values = pushkey.list(self, namespace)
2643 values = pushkey.list(self, namespace)
2645 self.hook('listkeys', namespace=namespace, values=values)
2644 self.hook('listkeys', namespace=namespace, values=values)
2646 return values
2645 return values
2647
2646
2648 def debugwireargs(self, one, two, three=None, four=None, five=None):
2647 def debugwireargs(self, one, two, three=None, four=None, five=None):
2649 '''used to test argument passing over the wire'''
2648 '''used to test argument passing over the wire'''
2650 return "%s %s %s %s %s" % (one, two, three, four, five)
2649 return "%s %s %s %s %s" % (one, two, three, four, five)
2651
2650
2652 def savecommitmessage(self, text):
2651 def savecommitmessage(self, text):
2653 fp = self.opener('last-message.txt', 'wb')
2652 fp = self.opener('last-message.txt', 'wb')
2654 try:
2653 try:
2655 fp.write(text)
2654 fp.write(text)
2656 finally:
2655 finally:
2657 fp.close()
2656 fp.close()
2658 return self.pathto(fp.name[len(self.root) + 1:])
2657 return self.pathto(fp.name[len(self.root) + 1:])
2659
2658
2660 # used to avoid circular references so destructors work
2659 # used to avoid circular references so destructors work
2661 def aftertrans(files):
2660 def aftertrans(files):
2662 renamefiles = [tuple(t) for t in files]
2661 renamefiles = [tuple(t) for t in files]
2663 def a():
2662 def a():
2664 for src, dest in renamefiles:
2663 for src, dest in renamefiles:
2665 try:
2664 try:
2666 util.rename(src, dest)
2665 util.rename(src, dest)
2667 except OSError: # journal file does not yet exist
2666 except OSError: # journal file does not yet exist
2668 pass
2667 pass
2669 return a
2668 return a
2670
2669
2671 def undoname(fn):
2670 def undoname(fn):
2672 base, name = os.path.split(fn)
2671 base, name = os.path.split(fn)
2673 assert name.startswith('journal')
2672 assert name.startswith('journal')
2674 return os.path.join(base, name.replace('journal', 'undo', 1))
2673 return os.path.join(base, name.replace('journal', 'undo', 1))
2675
2674
2676 def instance(ui, path, create):
2675 def instance(ui, path, create):
2677 return localrepository(ui, util.urllocalpath(path), create)
2676 return localrepository(ui, util.urllocalpath(path), create)
2678
2677
2679 def islocal(path):
2678 def islocal(path):
2680 return True
2679 return True
General Comments 0
You need to be logged in to leave comments. Login now