##// END OF EJS Templates
cache: group obscache and revsfiltercache invalidation in a single function...
Pierre-Yves David -
r18105:312262eb default
parent child Browse files
Show More
@@ -1,2703 +1,2705 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class repofilecache(filecache):
21 class repofilecache(filecache):
22 """All filecache usage on repo are done for logic that should be unfiltered
22 """All filecache usage on repo are done for logic that should be unfiltered
23 """
23 """
24
24
25 def __get__(self, repo, type=None):
25 def __get__(self, repo, type=None):
26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 def __set__(self, repo, value):
27 def __set__(self, repo, value):
28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 def __delete__(self, repo):
29 def __delete__(self, repo):
30 return super(repofilecache, self).__delete__(repo.unfiltered())
30 return super(repofilecache, self).__delete__(repo.unfiltered())
31
31
32 class storecache(repofilecache):
32 class storecache(repofilecache):
33 """filecache for files in the store"""
33 """filecache for files in the store"""
34 def join(self, obj, fname):
34 def join(self, obj, fname):
35 return obj.sjoin(fname)
35 return obj.sjoin(fname)
36
36
37 class unfilteredpropertycache(propertycache):
37 class unfilteredpropertycache(propertycache):
38 """propertycache that apply to unfiltered repo only"""
38 """propertycache that apply to unfiltered repo only"""
39
39
40 def __get__(self, repo, type=None):
40 def __get__(self, repo, type=None):
41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42
42
43 class filteredpropertycache(propertycache):
43 class filteredpropertycache(propertycache):
44 """propertycache that must take filtering in account"""
44 """propertycache that must take filtering in account"""
45
45
46 def cachevalue(self, obj, value):
46 def cachevalue(self, obj, value):
47 object.__setattr__(obj, self.name, value)
47 object.__setattr__(obj, self.name, value)
48
48
49
49
50 def hasunfilteredcache(repo, name):
50 def hasunfilteredcache(repo, name):
51 """check if an repo and a unfilteredproperty cached value for <name>"""
51 """check if an repo and a unfilteredproperty cached value for <name>"""
52 return name in vars(repo.unfiltered())
52 return name in vars(repo.unfiltered())
53
53
54 def unfilteredmethod(orig):
54 def unfilteredmethod(orig):
55 """decorate method that always need to be run on unfiltered version"""
55 """decorate method that always need to be run on unfiltered version"""
56 def wrapper(repo, *args, **kwargs):
56 def wrapper(repo, *args, **kwargs):
57 return orig(repo.unfiltered(), *args, **kwargs)
57 return orig(repo.unfiltered(), *args, **kwargs)
58 return wrapper
58 return wrapper
59
59
60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62
62
63 class localpeer(peer.peerrepository):
63 class localpeer(peer.peerrepository):
64 '''peer for a local repo; reflects only the most recent API'''
64 '''peer for a local repo; reflects only the most recent API'''
65
65
66 def __init__(self, repo, caps=MODERNCAPS):
66 def __init__(self, repo, caps=MODERNCAPS):
67 peer.peerrepository.__init__(self)
67 peer.peerrepository.__init__(self)
68 self._repo = repo
68 self._repo = repo
69 self.ui = repo.ui
69 self.ui = repo.ui
70 self._caps = repo._restrictcapabilities(caps)
70 self._caps = repo._restrictcapabilities(caps)
71 self.requirements = repo.requirements
71 self.requirements = repo.requirements
72 self.supportedformats = repo.supportedformats
72 self.supportedformats = repo.supportedformats
73
73
74 def close(self):
74 def close(self):
75 self._repo.close()
75 self._repo.close()
76
76
77 def _capabilities(self):
77 def _capabilities(self):
78 return self._caps
78 return self._caps
79
79
80 def local(self):
80 def local(self):
81 return self._repo
81 return self._repo
82
82
83 def canpush(self):
83 def canpush(self):
84 return True
84 return True
85
85
86 def url(self):
86 def url(self):
87 return self._repo.url()
87 return self._repo.url()
88
88
89 def lookup(self, key):
89 def lookup(self, key):
90 return self._repo.lookup(key)
90 return self._repo.lookup(key)
91
91
92 def branchmap(self):
92 def branchmap(self):
93 return discovery.visiblebranchmap(self._repo)
93 return discovery.visiblebranchmap(self._repo)
94
94
95 def heads(self):
95 def heads(self):
96 return discovery.visibleheads(self._repo)
96 return discovery.visibleheads(self._repo)
97
97
98 def known(self, nodes):
98 def known(self, nodes):
99 return self._repo.known(nodes)
99 return self._repo.known(nodes)
100
100
101 def getbundle(self, source, heads=None, common=None):
101 def getbundle(self, source, heads=None, common=None):
102 return self._repo.getbundle(source, heads=heads, common=common)
102 return self._repo.getbundle(source, heads=heads, common=common)
103
103
104 # TODO We might want to move the next two calls into legacypeer and add
104 # TODO We might want to move the next two calls into legacypeer and add
105 # unbundle instead.
105 # unbundle instead.
106
106
107 def lock(self):
107 def lock(self):
108 return self._repo.lock()
108 return self._repo.lock()
109
109
110 def addchangegroup(self, cg, source, url):
110 def addchangegroup(self, cg, source, url):
111 return self._repo.addchangegroup(cg, source, url)
111 return self._repo.addchangegroup(cg, source, url)
112
112
113 def pushkey(self, namespace, key, old, new):
113 def pushkey(self, namespace, key, old, new):
114 return self._repo.pushkey(namespace, key, old, new)
114 return self._repo.pushkey(namespace, key, old, new)
115
115
116 def listkeys(self, namespace):
116 def listkeys(self, namespace):
117 return self._repo.listkeys(namespace)
117 return self._repo.listkeys(namespace)
118
118
119 def debugwireargs(self, one, two, three=None, four=None, five=None):
119 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 '''used to test argument passing over the wire'''
120 '''used to test argument passing over the wire'''
121 return "%s %s %s %s %s" % (one, two, three, four, five)
121 return "%s %s %s %s %s" % (one, two, three, four, five)
122
122
123 class locallegacypeer(localpeer):
123 class locallegacypeer(localpeer):
124 '''peer extension which implements legacy methods too; used for tests with
124 '''peer extension which implements legacy methods too; used for tests with
125 restricted capabilities'''
125 restricted capabilities'''
126
126
127 def __init__(self, repo):
127 def __init__(self, repo):
128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129
129
130 def branches(self, nodes):
130 def branches(self, nodes):
131 return self._repo.branches(nodes)
131 return self._repo.branches(nodes)
132
132
133 def between(self, pairs):
133 def between(self, pairs):
134 return self._repo.between(pairs)
134 return self._repo.between(pairs)
135
135
136 def changegroup(self, basenodes, source):
136 def changegroup(self, basenodes, source):
137 return self._repo.changegroup(basenodes, source)
137 return self._repo.changegroup(basenodes, source)
138
138
139 def changegroupsubset(self, bases, heads, source):
139 def changegroupsubset(self, bases, heads, source):
140 return self._repo.changegroupsubset(bases, heads, source)
140 return self._repo.changegroupsubset(bases, heads, source)
141
141
142 class localrepository(object):
142 class localrepository(object):
143
143
144 supportedformats = set(('revlogv1', 'generaldelta'))
144 supportedformats = set(('revlogv1', 'generaldelta'))
145 supported = supportedformats | set(('store', 'fncache', 'shared',
145 supported = supportedformats | set(('store', 'fncache', 'shared',
146 'dotencode'))
146 'dotencode'))
147 openerreqs = set(('revlogv1', 'generaldelta'))
147 openerreqs = set(('revlogv1', 'generaldelta'))
148 requirements = ['revlogv1']
148 requirements = ['revlogv1']
149
149
150 def _baserequirements(self, create):
150 def _baserequirements(self, create):
151 return self.requirements[:]
151 return self.requirements[:]
152
152
153 def __init__(self, baseui, path=None, create=False):
153 def __init__(self, baseui, path=None, create=False):
154 self.wvfs = scmutil.vfs(path, expand=True)
154 self.wvfs = scmutil.vfs(path, expand=True)
155 self.wopener = self.wvfs
155 self.wopener = self.wvfs
156 self.root = self.wvfs.base
156 self.root = self.wvfs.base
157 self.path = self.wvfs.join(".hg")
157 self.path = self.wvfs.join(".hg")
158 self.origroot = path
158 self.origroot = path
159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 self.vfs = scmutil.vfs(self.path)
160 self.vfs = scmutil.vfs(self.path)
161 self.opener = self.vfs
161 self.opener = self.vfs
162 self.baseui = baseui
162 self.baseui = baseui
163 self.ui = baseui.copy()
163 self.ui = baseui.copy()
164 # A list of callback to shape the phase if no data were found.
164 # A list of callback to shape the phase if no data were found.
165 # Callback are in the form: func(repo, roots) --> processed root.
165 # Callback are in the form: func(repo, roots) --> processed root.
166 # This list it to be filled by extension during repo setup
166 # This list it to be filled by extension during repo setup
167 self._phasedefaults = []
167 self._phasedefaults = []
168 try:
168 try:
169 self.ui.readconfig(self.join("hgrc"), self.root)
169 self.ui.readconfig(self.join("hgrc"), self.root)
170 extensions.loadall(self.ui)
170 extensions.loadall(self.ui)
171 except IOError:
171 except IOError:
172 pass
172 pass
173
173
174 if not self.vfs.isdir():
174 if not self.vfs.isdir():
175 if create:
175 if create:
176 if not self.wvfs.exists():
176 if not self.wvfs.exists():
177 self.wvfs.makedirs()
177 self.wvfs.makedirs()
178 self.vfs.makedir(notindexed=True)
178 self.vfs.makedir(notindexed=True)
179 requirements = self._baserequirements(create)
179 requirements = self._baserequirements(create)
180 if self.ui.configbool('format', 'usestore', True):
180 if self.ui.configbool('format', 'usestore', True):
181 self.vfs.mkdir("store")
181 self.vfs.mkdir("store")
182 requirements.append("store")
182 requirements.append("store")
183 if self.ui.configbool('format', 'usefncache', True):
183 if self.ui.configbool('format', 'usefncache', True):
184 requirements.append("fncache")
184 requirements.append("fncache")
185 if self.ui.configbool('format', 'dotencode', True):
185 if self.ui.configbool('format', 'dotencode', True):
186 requirements.append('dotencode')
186 requirements.append('dotencode')
187 # create an invalid changelog
187 # create an invalid changelog
188 self.vfs.append(
188 self.vfs.append(
189 "00changelog.i",
189 "00changelog.i",
190 '\0\0\0\2' # represents revlogv2
190 '\0\0\0\2' # represents revlogv2
191 ' dummy changelog to prevent using the old repo layout'
191 ' dummy changelog to prevent using the old repo layout'
192 )
192 )
193 if self.ui.configbool('format', 'generaldelta', False):
193 if self.ui.configbool('format', 'generaldelta', False):
194 requirements.append("generaldelta")
194 requirements.append("generaldelta")
195 requirements = set(requirements)
195 requirements = set(requirements)
196 else:
196 else:
197 raise error.RepoError(_("repository %s not found") % path)
197 raise error.RepoError(_("repository %s not found") % path)
198 elif create:
198 elif create:
199 raise error.RepoError(_("repository %s already exists") % path)
199 raise error.RepoError(_("repository %s already exists") % path)
200 else:
200 else:
201 try:
201 try:
202 requirements = scmutil.readrequires(self.vfs, self.supported)
202 requirements = scmutil.readrequires(self.vfs, self.supported)
203 except IOError, inst:
203 except IOError, inst:
204 if inst.errno != errno.ENOENT:
204 if inst.errno != errno.ENOENT:
205 raise
205 raise
206 requirements = set()
206 requirements = set()
207
207
208 self.sharedpath = self.path
208 self.sharedpath = self.path
209 try:
209 try:
210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 if not os.path.exists(s):
211 if not os.path.exists(s):
212 raise error.RepoError(
212 raise error.RepoError(
213 _('.hg/sharedpath points to nonexistent directory %s') % s)
213 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 self.sharedpath = s
214 self.sharedpath = s
215 except IOError, inst:
215 except IOError, inst:
216 if inst.errno != errno.ENOENT:
216 if inst.errno != errno.ENOENT:
217 raise
217 raise
218
218
219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 self.spath = self.store.path
220 self.spath = self.store.path
221 self.svfs = self.store.vfs
221 self.svfs = self.store.vfs
222 self.sopener = self.svfs
222 self.sopener = self.svfs
223 self.sjoin = self.store.join
223 self.sjoin = self.store.join
224 self.vfs.createmode = self.store.createmode
224 self.vfs.createmode = self.store.createmode
225 self._applyrequirements(requirements)
225 self._applyrequirements(requirements)
226 if create:
226 if create:
227 self._writerequirements()
227 self._writerequirements()
228
228
229
229
230 self._branchcache = None
230 self._branchcache = None
231 self._branchcachetip = None
231 self._branchcachetip = None
232 self.filterpats = {}
232 self.filterpats = {}
233 self._datafilters = {}
233 self._datafilters = {}
234 self._transref = self._lockref = self._wlockref = None
234 self._transref = self._lockref = self._wlockref = None
235
235
236 # A cache for various files under .hg/ that tracks file changes,
236 # A cache for various files under .hg/ that tracks file changes,
237 # (used by the filecache decorator)
237 # (used by the filecache decorator)
238 #
238 #
239 # Maps a property name to its util.filecacheentry
239 # Maps a property name to its util.filecacheentry
240 self._filecache = {}
240 self._filecache = {}
241
241
242 # hold sets of revision to be filtered
242 # hold sets of revision to be filtered
243 # should be cleared when something might have changed the filter value:
243 # should be cleared when something might have changed the filter value:
244 # - new changesets,
244 # - new changesets,
245 # - phase change,
245 # - phase change,
246 # - new obsolescence marker,
246 # - new obsolescence marker,
247 # - working directory parent change,
247 # - working directory parent change,
248 # - bookmark changes
248 # - bookmark changes
249 self.filteredrevcache = {}
249 self.filteredrevcache = {}
250
250
251 def close(self):
251 def close(self):
252 pass
252 pass
253
253
254 def _restrictcapabilities(self, caps):
254 def _restrictcapabilities(self, caps):
255 return caps
255 return caps
256
256
257 def _applyrequirements(self, requirements):
257 def _applyrequirements(self, requirements):
258 self.requirements = requirements
258 self.requirements = requirements
259 self.sopener.options = dict((r, 1) for r in requirements
259 self.sopener.options = dict((r, 1) for r in requirements
260 if r in self.openerreqs)
260 if r in self.openerreqs)
261
261
262 def _writerequirements(self):
262 def _writerequirements(self):
263 reqfile = self.opener("requires", "w")
263 reqfile = self.opener("requires", "w")
264 for r in self.requirements:
264 for r in self.requirements:
265 reqfile.write("%s\n" % r)
265 reqfile.write("%s\n" % r)
266 reqfile.close()
266 reqfile.close()
267
267
268 def _checknested(self, path):
268 def _checknested(self, path):
269 """Determine if path is a legal nested repository."""
269 """Determine if path is a legal nested repository."""
270 if not path.startswith(self.root):
270 if not path.startswith(self.root):
271 return False
271 return False
272 subpath = path[len(self.root) + 1:]
272 subpath = path[len(self.root) + 1:]
273 normsubpath = util.pconvert(subpath)
273 normsubpath = util.pconvert(subpath)
274
274
275 # XXX: Checking against the current working copy is wrong in
275 # XXX: Checking against the current working copy is wrong in
276 # the sense that it can reject things like
276 # the sense that it can reject things like
277 #
277 #
278 # $ hg cat -r 10 sub/x.txt
278 # $ hg cat -r 10 sub/x.txt
279 #
279 #
280 # if sub/ is no longer a subrepository in the working copy
280 # if sub/ is no longer a subrepository in the working copy
281 # parent revision.
281 # parent revision.
282 #
282 #
283 # However, it can of course also allow things that would have
283 # However, it can of course also allow things that would have
284 # been rejected before, such as the above cat command if sub/
284 # been rejected before, such as the above cat command if sub/
285 # is a subrepository now, but was a normal directory before.
285 # is a subrepository now, but was a normal directory before.
286 # The old path auditor would have rejected by mistake since it
286 # The old path auditor would have rejected by mistake since it
287 # panics when it sees sub/.hg/.
287 # panics when it sees sub/.hg/.
288 #
288 #
289 # All in all, checking against the working copy seems sensible
289 # All in all, checking against the working copy seems sensible
290 # since we want to prevent access to nested repositories on
290 # since we want to prevent access to nested repositories on
291 # the filesystem *now*.
291 # the filesystem *now*.
292 ctx = self[None]
292 ctx = self[None]
293 parts = util.splitpath(subpath)
293 parts = util.splitpath(subpath)
294 while parts:
294 while parts:
295 prefix = '/'.join(parts)
295 prefix = '/'.join(parts)
296 if prefix in ctx.substate:
296 if prefix in ctx.substate:
297 if prefix == normsubpath:
297 if prefix == normsubpath:
298 return True
298 return True
299 else:
299 else:
300 sub = ctx.sub(prefix)
300 sub = ctx.sub(prefix)
301 return sub.checknested(subpath[len(prefix) + 1:])
301 return sub.checknested(subpath[len(prefix) + 1:])
302 else:
302 else:
303 parts.pop()
303 parts.pop()
304 return False
304 return False
305
305
306 def peer(self):
306 def peer(self):
307 return localpeer(self) # not cached to avoid reference cycle
307 return localpeer(self) # not cached to avoid reference cycle
308
308
309 def unfiltered(self):
309 def unfiltered(self):
310 """Return unfiltered version of the repository
310 """Return unfiltered version of the repository
311
311
312 Intended to be ovewritten by filtered repo."""
312 Intended to be ovewritten by filtered repo."""
313 return self
313 return self
314
314
315 def filtered(self, name):
315 def filtered(self, name):
316 """Return a filtered version of a repository"""
316 """Return a filtered version of a repository"""
317 # build a new class with the mixin and the current class
317 # build a new class with the mixin and the current class
318 # (possibily subclass of the repo)
318 # (possibily subclass of the repo)
319 class proxycls(repoview.repoview, self.unfiltered().__class__):
319 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 pass
320 pass
321 return proxycls(self, name)
321 return proxycls(self, name)
322
322
323 @repofilecache('bookmarks')
323 @repofilecache('bookmarks')
324 def _bookmarks(self):
324 def _bookmarks(self):
325 return bookmarks.bmstore(self)
325 return bookmarks.bmstore(self)
326
326
327 @repofilecache('bookmarks.current')
327 @repofilecache('bookmarks.current')
328 def _bookmarkcurrent(self):
328 def _bookmarkcurrent(self):
329 return bookmarks.readcurrent(self)
329 return bookmarks.readcurrent(self)
330
330
331 def bookmarkheads(self, bookmark):
331 def bookmarkheads(self, bookmark):
332 name = bookmark.split('@', 1)[0]
332 name = bookmark.split('@', 1)[0]
333 heads = []
333 heads = []
334 for mark, n in self._bookmarks.iteritems():
334 for mark, n in self._bookmarks.iteritems():
335 if mark.split('@', 1)[0] == name:
335 if mark.split('@', 1)[0] == name:
336 heads.append(n)
336 heads.append(n)
337 return heads
337 return heads
338
338
339 @storecache('phaseroots')
339 @storecache('phaseroots')
340 def _phasecache(self):
340 def _phasecache(self):
341 return phases.phasecache(self, self._phasedefaults)
341 return phases.phasecache(self, self._phasedefaults)
342
342
343 @storecache('obsstore')
343 @storecache('obsstore')
344 def obsstore(self):
344 def obsstore(self):
345 store = obsolete.obsstore(self.sopener)
345 store = obsolete.obsstore(self.sopener)
346 if store and not obsolete._enabled:
346 if store and not obsolete._enabled:
347 # message is rare enough to not be translated
347 # message is rare enough to not be translated
348 msg = 'obsolete feature not enabled but %i markers found!\n'
348 msg = 'obsolete feature not enabled but %i markers found!\n'
349 self.ui.warn(msg % len(list(store)))
349 self.ui.warn(msg % len(list(store)))
350 return store
350 return store
351
351
352 @unfilteredpropertycache
352 @unfilteredpropertycache
353 def hiddenrevs(self):
353 def hiddenrevs(self):
354 """hiddenrevs: revs that should be hidden by command and tools
354 """hiddenrevs: revs that should be hidden by command and tools
355
355
356 This set is carried on the repo to ease initialization and lazy
356 This set is carried on the repo to ease initialization and lazy
357 loading; it'll probably move back to changelog for efficiency and
357 loading; it'll probably move back to changelog for efficiency and
358 consistency reasons.
358 consistency reasons.
359
359
360 Note that the hiddenrevs will needs invalidations when
360 Note that the hiddenrevs will needs invalidations when
361 - a new changesets is added (possible unstable above extinct)
361 - a new changesets is added (possible unstable above extinct)
362 - a new obsolete marker is added (possible new extinct changeset)
362 - a new obsolete marker is added (possible new extinct changeset)
363
363
364 hidden changesets cannot have non-hidden descendants
364 hidden changesets cannot have non-hidden descendants
365 """
365 """
366 hidden = set()
366 hidden = set()
367 if self.obsstore:
367 if self.obsstore:
368 ### hide extinct changeset that are not accessible by any mean
368 ### hide extinct changeset that are not accessible by any mean
369 hiddenquery = 'extinct() - ::(. + bookmark())'
369 hiddenquery = 'extinct() - ::(. + bookmark())'
370 hidden.update(self.revs(hiddenquery))
370 hidden.update(self.revs(hiddenquery))
371 return hidden
371 return hidden
372
372
373 @storecache('00changelog.i')
373 @storecache('00changelog.i')
374 def changelog(self):
374 def changelog(self):
375 c = changelog.changelog(self.sopener)
375 c = changelog.changelog(self.sopener)
376 if 'HG_PENDING' in os.environ:
376 if 'HG_PENDING' in os.environ:
377 p = os.environ['HG_PENDING']
377 p = os.environ['HG_PENDING']
378 if p.startswith(self.root):
378 if p.startswith(self.root):
379 c.readpending('00changelog.i.a')
379 c.readpending('00changelog.i.a')
380 return c
380 return c
381
381
382 @storecache('00manifest.i')
382 @storecache('00manifest.i')
383 def manifest(self):
383 def manifest(self):
384 return manifest.manifest(self.sopener)
384 return manifest.manifest(self.sopener)
385
385
386 @repofilecache('dirstate')
386 @repofilecache('dirstate')
387 def dirstate(self):
387 def dirstate(self):
388 warned = [0]
388 warned = [0]
389 def validate(node):
389 def validate(node):
390 try:
390 try:
391 self.changelog.rev(node)
391 self.changelog.rev(node)
392 return node
392 return node
393 except error.LookupError:
393 except error.LookupError:
394 if not warned[0]:
394 if not warned[0]:
395 warned[0] = True
395 warned[0] = True
396 self.ui.warn(_("warning: ignoring unknown"
396 self.ui.warn(_("warning: ignoring unknown"
397 " working parent %s!\n") % short(node))
397 " working parent %s!\n") % short(node))
398 return nullid
398 return nullid
399
399
400 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
400 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401
401
402 def __getitem__(self, changeid):
402 def __getitem__(self, changeid):
403 if changeid is None:
403 if changeid is None:
404 return context.workingctx(self)
404 return context.workingctx(self)
405 return context.changectx(self, changeid)
405 return context.changectx(self, changeid)
406
406
407 def __contains__(self, changeid):
407 def __contains__(self, changeid):
408 try:
408 try:
409 return bool(self.lookup(changeid))
409 return bool(self.lookup(changeid))
410 except error.RepoLookupError:
410 except error.RepoLookupError:
411 return False
411 return False
412
412
413 def __nonzero__(self):
413 def __nonzero__(self):
414 return True
414 return True
415
415
416 def __len__(self):
416 def __len__(self):
417 return len(self.changelog)
417 return len(self.changelog)
418
418
419 def __iter__(self):
419 def __iter__(self):
420 return iter(self.changelog)
420 return iter(self.changelog)
421
421
422 def revs(self, expr, *args):
422 def revs(self, expr, *args):
423 '''Return a list of revisions matching the given revset'''
423 '''Return a list of revisions matching the given revset'''
424 expr = revset.formatspec(expr, *args)
424 expr = revset.formatspec(expr, *args)
425 m = revset.match(None, expr)
425 m = revset.match(None, expr)
426 return [r for r in m(self, list(self))]
426 return [r for r in m(self, list(self))]
427
427
428 def set(self, expr, *args):
428 def set(self, expr, *args):
429 '''
429 '''
430 Yield a context for each matching revision, after doing arg
430 Yield a context for each matching revision, after doing arg
431 replacement via revset.formatspec
431 replacement via revset.formatspec
432 '''
432 '''
433 for r in self.revs(expr, *args):
433 for r in self.revs(expr, *args):
434 yield self[r]
434 yield self[r]
435
435
436 def url(self):
436 def url(self):
437 return 'file:' + self.root
437 return 'file:' + self.root
438
438
439 def hook(self, name, throw=False, **args):
439 def hook(self, name, throw=False, **args):
440 return hook.hook(self.ui, self, name, throw, **args)
440 return hook.hook(self.ui, self, name, throw, **args)
441
441
442 @unfilteredmethod
442 @unfilteredmethod
443 def _tag(self, names, node, message, local, user, date, extra={}):
443 def _tag(self, names, node, message, local, user, date, extra={}):
444 if isinstance(names, str):
444 if isinstance(names, str):
445 names = (names,)
445 names = (names,)
446
446
447 branches = self.branchmap()
447 branches = self.branchmap()
448 for name in names:
448 for name in names:
449 self.hook('pretag', throw=True, node=hex(node), tag=name,
449 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 local=local)
450 local=local)
451 if name in branches:
451 if name in branches:
452 self.ui.warn(_("warning: tag %s conflicts with existing"
452 self.ui.warn(_("warning: tag %s conflicts with existing"
453 " branch name\n") % name)
453 " branch name\n") % name)
454
454
455 def writetags(fp, names, munge, prevtags):
455 def writetags(fp, names, munge, prevtags):
456 fp.seek(0, 2)
456 fp.seek(0, 2)
457 if prevtags and prevtags[-1] != '\n':
457 if prevtags and prevtags[-1] != '\n':
458 fp.write('\n')
458 fp.write('\n')
459 for name in names:
459 for name in names:
460 m = munge and munge(name) or name
460 m = munge and munge(name) or name
461 if (self._tagscache.tagtypes and
461 if (self._tagscache.tagtypes and
462 name in self._tagscache.tagtypes):
462 name in self._tagscache.tagtypes):
463 old = self.tags().get(name, nullid)
463 old = self.tags().get(name, nullid)
464 fp.write('%s %s\n' % (hex(old), m))
464 fp.write('%s %s\n' % (hex(old), m))
465 fp.write('%s %s\n' % (hex(node), m))
465 fp.write('%s %s\n' % (hex(node), m))
466 fp.close()
466 fp.close()
467
467
468 prevtags = ''
468 prevtags = ''
469 if local:
469 if local:
470 try:
470 try:
471 fp = self.opener('localtags', 'r+')
471 fp = self.opener('localtags', 'r+')
472 except IOError:
472 except IOError:
473 fp = self.opener('localtags', 'a')
473 fp = self.opener('localtags', 'a')
474 else:
474 else:
475 prevtags = fp.read()
475 prevtags = fp.read()
476
476
477 # local tags are stored in the current charset
477 # local tags are stored in the current charset
478 writetags(fp, names, None, prevtags)
478 writetags(fp, names, None, prevtags)
479 for name in names:
479 for name in names:
480 self.hook('tag', node=hex(node), tag=name, local=local)
480 self.hook('tag', node=hex(node), tag=name, local=local)
481 return
481 return
482
482
483 try:
483 try:
484 fp = self.wfile('.hgtags', 'rb+')
484 fp = self.wfile('.hgtags', 'rb+')
485 except IOError, e:
485 except IOError, e:
486 if e.errno != errno.ENOENT:
486 if e.errno != errno.ENOENT:
487 raise
487 raise
488 fp = self.wfile('.hgtags', 'ab')
488 fp = self.wfile('.hgtags', 'ab')
489 else:
489 else:
490 prevtags = fp.read()
490 prevtags = fp.read()
491
491
492 # committed tags are stored in UTF-8
492 # committed tags are stored in UTF-8
493 writetags(fp, names, encoding.fromlocal, prevtags)
493 writetags(fp, names, encoding.fromlocal, prevtags)
494
494
495 fp.close()
495 fp.close()
496
496
497 self.invalidatecaches()
497 self.invalidatecaches()
498
498
499 if '.hgtags' not in self.dirstate:
499 if '.hgtags' not in self.dirstate:
500 self[None].add(['.hgtags'])
500 self[None].add(['.hgtags'])
501
501
502 m = matchmod.exact(self.root, '', ['.hgtags'])
502 m = matchmod.exact(self.root, '', ['.hgtags'])
503 tagnode = self.commit(message, user, date, extra=extra, match=m)
503 tagnode = self.commit(message, user, date, extra=extra, match=m)
504
504
505 for name in names:
505 for name in names:
506 self.hook('tag', node=hex(node), tag=name, local=local)
506 self.hook('tag', node=hex(node), tag=name, local=local)
507
507
508 return tagnode
508 return tagnode
509
509
510 def tag(self, names, node, message, local, user, date):
510 def tag(self, names, node, message, local, user, date):
511 '''tag a revision with one or more symbolic names.
511 '''tag a revision with one or more symbolic names.
512
512
513 names is a list of strings or, when adding a single tag, names may be a
513 names is a list of strings or, when adding a single tag, names may be a
514 string.
514 string.
515
515
516 if local is True, the tags are stored in a per-repository file.
516 if local is True, the tags are stored in a per-repository file.
517 otherwise, they are stored in the .hgtags file, and a new
517 otherwise, they are stored in the .hgtags file, and a new
518 changeset is committed with the change.
518 changeset is committed with the change.
519
519
520 keyword arguments:
520 keyword arguments:
521
521
522 local: whether to store tags in non-version-controlled file
522 local: whether to store tags in non-version-controlled file
523 (default False)
523 (default False)
524
524
525 message: commit message to use if committing
525 message: commit message to use if committing
526
526
527 user: name of user to use if committing
527 user: name of user to use if committing
528
528
529 date: date tuple to use if committing'''
529 date: date tuple to use if committing'''
530
530
531 if not local:
531 if not local:
532 for x in self.status()[:5]:
532 for x in self.status()[:5]:
533 if '.hgtags' in x:
533 if '.hgtags' in x:
534 raise util.Abort(_('working copy of .hgtags is changed '
534 raise util.Abort(_('working copy of .hgtags is changed '
535 '(please commit .hgtags manually)'))
535 '(please commit .hgtags manually)'))
536
536
537 self.tags() # instantiate the cache
537 self.tags() # instantiate the cache
538 self._tag(names, node, message, local, user, date)
538 self._tag(names, node, message, local, user, date)
539
539
540 @filteredpropertycache
540 @filteredpropertycache
541 def _tagscache(self):
541 def _tagscache(self):
542 '''Returns a tagscache object that contains various tags related
542 '''Returns a tagscache object that contains various tags related
543 caches.'''
543 caches.'''
544
544
545 # This simplifies its cache management by having one decorated
545 # This simplifies its cache management by having one decorated
546 # function (this one) and the rest simply fetch things from it.
546 # function (this one) and the rest simply fetch things from it.
547 class tagscache(object):
547 class tagscache(object):
548 def __init__(self):
548 def __init__(self):
549 # These two define the set of tags for this repository. tags
549 # These two define the set of tags for this repository. tags
550 # maps tag name to node; tagtypes maps tag name to 'global' or
550 # maps tag name to node; tagtypes maps tag name to 'global' or
551 # 'local'. (Global tags are defined by .hgtags across all
551 # 'local'. (Global tags are defined by .hgtags across all
552 # heads, and local tags are defined in .hg/localtags.)
552 # heads, and local tags are defined in .hg/localtags.)
553 # They constitute the in-memory cache of tags.
553 # They constitute the in-memory cache of tags.
554 self.tags = self.tagtypes = None
554 self.tags = self.tagtypes = None
555
555
556 self.nodetagscache = self.tagslist = None
556 self.nodetagscache = self.tagslist = None
557
557
558 cache = tagscache()
558 cache = tagscache()
559 cache.tags, cache.tagtypes = self._findtags()
559 cache.tags, cache.tagtypes = self._findtags()
560
560
561 return cache
561 return cache
562
562
563 def tags(self):
563 def tags(self):
564 '''return a mapping of tag to node'''
564 '''return a mapping of tag to node'''
565 t = {}
565 t = {}
566 if self.changelog.filteredrevs:
566 if self.changelog.filteredrevs:
567 tags, tt = self._findtags()
567 tags, tt = self._findtags()
568 else:
568 else:
569 tags = self._tagscache.tags
569 tags = self._tagscache.tags
570 for k, v in tags.iteritems():
570 for k, v in tags.iteritems():
571 try:
571 try:
572 # ignore tags to unknown nodes
572 # ignore tags to unknown nodes
573 self.changelog.rev(v)
573 self.changelog.rev(v)
574 t[k] = v
574 t[k] = v
575 except (error.LookupError, ValueError):
575 except (error.LookupError, ValueError):
576 pass
576 pass
577 return t
577 return t
578
578
579 def _findtags(self):
579 def _findtags(self):
580 '''Do the hard work of finding tags. Return a pair of dicts
580 '''Do the hard work of finding tags. Return a pair of dicts
581 (tags, tagtypes) where tags maps tag name to node, and tagtypes
581 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 maps tag name to a string like \'global\' or \'local\'.
582 maps tag name to a string like \'global\' or \'local\'.
583 Subclasses or extensions are free to add their own tags, but
583 Subclasses or extensions are free to add their own tags, but
584 should be aware that the returned dicts will be retained for the
584 should be aware that the returned dicts will be retained for the
585 duration of the localrepo object.'''
585 duration of the localrepo object.'''
586
586
587 # XXX what tagtype should subclasses/extensions use? Currently
587 # XXX what tagtype should subclasses/extensions use? Currently
588 # mq and bookmarks add tags, but do not set the tagtype at all.
588 # mq and bookmarks add tags, but do not set the tagtype at all.
589 # Should each extension invent its own tag type? Should there
589 # Should each extension invent its own tag type? Should there
590 # be one tagtype for all such "virtual" tags? Or is the status
590 # be one tagtype for all such "virtual" tags? Or is the status
591 # quo fine?
591 # quo fine?
592
592
593 alltags = {} # map tag name to (node, hist)
593 alltags = {} # map tag name to (node, hist)
594 tagtypes = {}
594 tagtypes = {}
595
595
596 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
596 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598
598
599 # Build the return dicts. Have to re-encode tag names because
599 # Build the return dicts. Have to re-encode tag names because
600 # the tags module always uses UTF-8 (in order not to lose info
600 # the tags module always uses UTF-8 (in order not to lose info
601 # writing to the cache), but the rest of Mercurial wants them in
601 # writing to the cache), but the rest of Mercurial wants them in
602 # local encoding.
602 # local encoding.
603 tags = {}
603 tags = {}
604 for (name, (node, hist)) in alltags.iteritems():
604 for (name, (node, hist)) in alltags.iteritems():
605 if node != nullid:
605 if node != nullid:
606 tags[encoding.tolocal(name)] = node
606 tags[encoding.tolocal(name)] = node
607 tags['tip'] = self.changelog.tip()
607 tags['tip'] = self.changelog.tip()
608 tagtypes = dict([(encoding.tolocal(name), value)
608 tagtypes = dict([(encoding.tolocal(name), value)
609 for (name, value) in tagtypes.iteritems()])
609 for (name, value) in tagtypes.iteritems()])
610 return (tags, tagtypes)
610 return (tags, tagtypes)
611
611
612 def tagtype(self, tagname):
612 def tagtype(self, tagname):
613 '''
613 '''
614 return the type of the given tag. result can be:
614 return the type of the given tag. result can be:
615
615
616 'local' : a local tag
616 'local' : a local tag
617 'global' : a global tag
617 'global' : a global tag
618 None : tag does not exist
618 None : tag does not exist
619 '''
619 '''
620
620
621 return self._tagscache.tagtypes.get(tagname)
621 return self._tagscache.tagtypes.get(tagname)
622
622
623 def tagslist(self):
623 def tagslist(self):
624 '''return a list of tags ordered by revision'''
624 '''return a list of tags ordered by revision'''
625 if not self._tagscache.tagslist:
625 if not self._tagscache.tagslist:
626 l = []
626 l = []
627 for t, n in self.tags().iteritems():
627 for t, n in self.tags().iteritems():
628 r = self.changelog.rev(n)
628 r = self.changelog.rev(n)
629 l.append((r, t, n))
629 l.append((r, t, n))
630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631
631
632 return self._tagscache.tagslist
632 return self._tagscache.tagslist
633
633
634 def nodetags(self, node):
634 def nodetags(self, node):
635 '''return the tags associated with a node'''
635 '''return the tags associated with a node'''
636 if not self._tagscache.nodetagscache:
636 if not self._tagscache.nodetagscache:
637 nodetagscache = {}
637 nodetagscache = {}
638 for t, n in self._tagscache.tags.iteritems():
638 for t, n in self._tagscache.tags.iteritems():
639 nodetagscache.setdefault(n, []).append(t)
639 nodetagscache.setdefault(n, []).append(t)
640 for tags in nodetagscache.itervalues():
640 for tags in nodetagscache.itervalues():
641 tags.sort()
641 tags.sort()
642 self._tagscache.nodetagscache = nodetagscache
642 self._tagscache.nodetagscache = nodetagscache
643 return self._tagscache.nodetagscache.get(node, [])
643 return self._tagscache.nodetagscache.get(node, [])
644
644
645 def nodebookmarks(self, node):
645 def nodebookmarks(self, node):
646 marks = []
646 marks = []
647 for bookmark, n in self._bookmarks.iteritems():
647 for bookmark, n in self._bookmarks.iteritems():
648 if n == node:
648 if n == node:
649 marks.append(bookmark)
649 marks.append(bookmark)
650 return sorted(marks)
650 return sorted(marks)
651
651
652 def _branchtags(self, partial, lrev):
652 def _branchtags(self, partial, lrev):
653 # TODO: rename this function?
653 # TODO: rename this function?
654 tiprev = len(self) - 1
654 tiprev = len(self) - 1
655 if lrev != tiprev:
655 if lrev != tiprev:
656 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
656 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
657 self._updatebranchcache(partial, ctxgen)
657 self._updatebranchcache(partial, ctxgen)
658 self._writebranchcache(partial, self.changelog.tip(), tiprev)
658 self._writebranchcache(partial, self.changelog.tip(), tiprev)
659
659
660 return partial
660 return partial
661
661
662 @unfilteredmethod # Until we get a smarter cache management
662 @unfilteredmethod # Until we get a smarter cache management
663 def updatebranchcache(self):
663 def updatebranchcache(self):
664 tip = self.changelog.tip()
664 tip = self.changelog.tip()
665 if self._branchcache is not None and self._branchcachetip == tip:
665 if self._branchcache is not None and self._branchcachetip == tip:
666 return
666 return
667
667
668 oldtip = self._branchcachetip
668 oldtip = self._branchcachetip
669 self._branchcachetip = tip
669 self._branchcachetip = tip
670 if oldtip is None or oldtip not in self.changelog.nodemap:
670 if oldtip is None or oldtip not in self.changelog.nodemap:
671 partial, last, lrev = self._readbranchcache()
671 partial, last, lrev = self._readbranchcache()
672 else:
672 else:
673 lrev = self.changelog.rev(oldtip)
673 lrev = self.changelog.rev(oldtip)
674 partial = self._branchcache
674 partial = self._branchcache
675
675
676 self._branchtags(partial, lrev)
676 self._branchtags(partial, lrev)
677 # this private cache holds all heads (not just the branch tips)
677 # this private cache holds all heads (not just the branch tips)
678 self._branchcache = partial
678 self._branchcache = partial
679
679
680 def branchmap(self):
680 def branchmap(self):
681 '''returns a dictionary {branch: [branchheads]}'''
681 '''returns a dictionary {branch: [branchheads]}'''
682 if self.changelog.filteredrevs:
682 if self.changelog.filteredrevs:
683 # some changeset are excluded we can't use the cache
683 # some changeset are excluded we can't use the cache
684 branchmap = {}
684 branchmap = {}
685 self._updatebranchcache(branchmap, (self[r] for r in self))
685 self._updatebranchcache(branchmap, (self[r] for r in self))
686 return branchmap
686 return branchmap
687 else:
687 else:
688 self.updatebranchcache()
688 self.updatebranchcache()
689 return self._branchcache
689 return self._branchcache
690
690
691
691
692 def _branchtip(self, heads):
692 def _branchtip(self, heads):
693 '''return the tipmost branch head in heads'''
693 '''return the tipmost branch head in heads'''
694 tip = heads[-1]
694 tip = heads[-1]
695 for h in reversed(heads):
695 for h in reversed(heads):
696 if not self[h].closesbranch():
696 if not self[h].closesbranch():
697 tip = h
697 tip = h
698 break
698 break
699 return tip
699 return tip
700
700
701 def branchtip(self, branch):
701 def branchtip(self, branch):
702 '''return the tip node for a given branch'''
702 '''return the tip node for a given branch'''
703 if branch not in self.branchmap():
703 if branch not in self.branchmap():
704 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
704 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
705 return self._branchtip(self.branchmap()[branch])
705 return self._branchtip(self.branchmap()[branch])
706
706
707 def branchtags(self):
707 def branchtags(self):
708 '''return a dict where branch names map to the tipmost head of
708 '''return a dict where branch names map to the tipmost head of
709 the branch, open heads come before closed'''
709 the branch, open heads come before closed'''
710 bt = {}
710 bt = {}
711 for bn, heads in self.branchmap().iteritems():
711 for bn, heads in self.branchmap().iteritems():
712 bt[bn] = self._branchtip(heads)
712 bt[bn] = self._branchtip(heads)
713 return bt
713 return bt
714
714
715 @unfilteredmethod # Until we get a smarter cache management
715 @unfilteredmethod # Until we get a smarter cache management
716 def _readbranchcache(self):
716 def _readbranchcache(self):
717 partial = {}
717 partial = {}
718 try:
718 try:
719 f = self.opener("cache/branchheads")
719 f = self.opener("cache/branchheads")
720 lines = f.read().split('\n')
720 lines = f.read().split('\n')
721 f.close()
721 f.close()
722 except (IOError, OSError):
722 except (IOError, OSError):
723 return {}, nullid, nullrev
723 return {}, nullid, nullrev
724
724
725 try:
725 try:
726 last, lrev = lines.pop(0).split(" ", 1)
726 last, lrev = lines.pop(0).split(" ", 1)
727 last, lrev = bin(last), int(lrev)
727 last, lrev = bin(last), int(lrev)
728 if lrev >= len(self) or self[lrev].node() != last:
728 if lrev >= len(self) or self[lrev].node() != last:
729 # invalidate the cache
729 # invalidate the cache
730 raise ValueError('invalidating branch cache (tip differs)')
730 raise ValueError('invalidating branch cache (tip differs)')
731 for l in lines:
731 for l in lines:
732 if not l:
732 if not l:
733 continue
733 continue
734 node, label = l.split(" ", 1)
734 node, label = l.split(" ", 1)
735 label = encoding.tolocal(label.strip())
735 label = encoding.tolocal(label.strip())
736 if not node in self:
736 if not node in self:
737 raise ValueError('invalidating branch cache because node '+
737 raise ValueError('invalidating branch cache because node '+
738 '%s does not exist' % node)
738 '%s does not exist' % node)
739 partial.setdefault(label, []).append(bin(node))
739 partial.setdefault(label, []).append(bin(node))
740 except KeyboardInterrupt:
740 except KeyboardInterrupt:
741 raise
741 raise
742 except Exception, inst:
742 except Exception, inst:
743 if self.ui.debugflag:
743 if self.ui.debugflag:
744 self.ui.warn(str(inst), '\n')
744 self.ui.warn(str(inst), '\n')
745 partial, last, lrev = {}, nullid, nullrev
745 partial, last, lrev = {}, nullid, nullrev
746 return partial, last, lrev
746 return partial, last, lrev
747
747
748 @unfilteredmethod # Until we get a smarter cache management
748 @unfilteredmethod # Until we get a smarter cache management
749 def _writebranchcache(self, branches, tip, tiprev):
749 def _writebranchcache(self, branches, tip, tiprev):
750 try:
750 try:
751 f = self.opener("cache/branchheads", "w", atomictemp=True)
751 f = self.opener("cache/branchheads", "w", atomictemp=True)
752 f.write("%s %s\n" % (hex(tip), tiprev))
752 f.write("%s %s\n" % (hex(tip), tiprev))
753 for label, nodes in branches.iteritems():
753 for label, nodes in branches.iteritems():
754 for node in nodes:
754 for node in nodes:
755 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
755 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
756 f.close()
756 f.close()
757 except (IOError, OSError):
757 except (IOError, OSError):
758 pass
758 pass
759
759
760 @unfilteredmethod # Until we get a smarter cache management
760 @unfilteredmethod # Until we get a smarter cache management
761 def _updatebranchcache(self, partial, ctxgen):
761 def _updatebranchcache(self, partial, ctxgen):
762 """Given a branchhead cache, partial, that may have extra nodes or be
762 """Given a branchhead cache, partial, that may have extra nodes or be
763 missing heads, and a generator of nodes that are at least a superset of
763 missing heads, and a generator of nodes that are at least a superset of
764 heads missing, this function updates partial to be correct.
764 heads missing, this function updates partial to be correct.
765 """
765 """
766 # collect new branch entries
766 # collect new branch entries
767 newbranches = {}
767 newbranches = {}
768 for c in ctxgen:
768 for c in ctxgen:
769 newbranches.setdefault(c.branch(), []).append(c.node())
769 newbranches.setdefault(c.branch(), []).append(c.node())
770 # if older branchheads are reachable from new ones, they aren't
770 # if older branchheads are reachable from new ones, they aren't
771 # really branchheads. Note checking parents is insufficient:
771 # really branchheads. Note checking parents is insufficient:
772 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
772 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
773 for branch, newnodes in newbranches.iteritems():
773 for branch, newnodes in newbranches.iteritems():
774 bheads = partial.setdefault(branch, [])
774 bheads = partial.setdefault(branch, [])
775 # Remove candidate heads that no longer are in the repo (e.g., as
775 # Remove candidate heads that no longer are in the repo (e.g., as
776 # the result of a strip that just happened). Avoid using 'node in
776 # the result of a strip that just happened). Avoid using 'node in
777 # self' here because that dives down into branchcache code somewhat
777 # self' here because that dives down into branchcache code somewhat
778 # recursively.
778 # recursively.
779 bheadrevs = [self.changelog.rev(node) for node in bheads
779 bheadrevs = [self.changelog.rev(node) for node in bheads
780 if self.changelog.hasnode(node)]
780 if self.changelog.hasnode(node)]
781 newheadrevs = [self.changelog.rev(node) for node in newnodes
781 newheadrevs = [self.changelog.rev(node) for node in newnodes
782 if self.changelog.hasnode(node)]
782 if self.changelog.hasnode(node)]
783 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
783 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
784 # Remove duplicates - nodes that are in newheadrevs and are already
784 # Remove duplicates - nodes that are in newheadrevs and are already
785 # in bheadrevs. This can happen if you strip a node whose parent
785 # in bheadrevs. This can happen if you strip a node whose parent
786 # was already a head (because they're on different branches).
786 # was already a head (because they're on different branches).
787 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
787 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
788
788
789 # Starting from tip means fewer passes over reachable. If we know
789 # Starting from tip means fewer passes over reachable. If we know
790 # the new candidates are not ancestors of existing heads, we don't
790 # the new candidates are not ancestors of existing heads, we don't
791 # have to examine ancestors of existing heads
791 # have to examine ancestors of existing heads
792 if ctxisnew:
792 if ctxisnew:
793 iterrevs = sorted(newheadrevs)
793 iterrevs = sorted(newheadrevs)
794 else:
794 else:
795 iterrevs = list(bheadrevs)
795 iterrevs = list(bheadrevs)
796
796
797 # This loop prunes out two kinds of heads - heads that are
797 # This loop prunes out two kinds of heads - heads that are
798 # superseded by a head in newheadrevs, and newheadrevs that are not
798 # superseded by a head in newheadrevs, and newheadrevs that are not
799 # heads because an existing head is their descendant.
799 # heads because an existing head is their descendant.
800 while iterrevs:
800 while iterrevs:
801 latest = iterrevs.pop()
801 latest = iterrevs.pop()
802 if latest not in bheadrevs:
802 if latest not in bheadrevs:
803 continue
803 continue
804 ancestors = set(self.changelog.ancestors([latest],
804 ancestors = set(self.changelog.ancestors([latest],
805 bheadrevs[0]))
805 bheadrevs[0]))
806 if ancestors:
806 if ancestors:
807 bheadrevs = [b for b in bheadrevs if b not in ancestors]
807 bheadrevs = [b for b in bheadrevs if b not in ancestors]
808 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
808 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
809
809
810 # There may be branches that cease to exist when the last commit in the
810 # There may be branches that cease to exist when the last commit in the
811 # branch was stripped. This code filters them out. Note that the
811 # branch was stripped. This code filters them out. Note that the
812 # branch that ceased to exist may not be in newbranches because
812 # branch that ceased to exist may not be in newbranches because
813 # newbranches is the set of candidate heads, which when you strip the
813 # newbranches is the set of candidate heads, which when you strip the
814 # last commit in a branch will be the parent branch.
814 # last commit in a branch will be the parent branch.
815 for branch in partial.keys():
815 for branch in partial.keys():
816 nodes = [head for head in partial[branch]
816 nodes = [head for head in partial[branch]
817 if self.changelog.hasnode(head)]
817 if self.changelog.hasnode(head)]
818 if not nodes:
818 if not nodes:
819 del partial[branch]
819 del partial[branch]
820
820
821 def lookup(self, key):
821 def lookup(self, key):
822 return self[key].node()
822 return self[key].node()
823
823
824 def lookupbranch(self, key, remote=None):
824 def lookupbranch(self, key, remote=None):
825 repo = remote or self
825 repo = remote or self
826 if key in repo.branchmap():
826 if key in repo.branchmap():
827 return key
827 return key
828
828
829 repo = (remote and remote.local()) and remote or self
829 repo = (remote and remote.local()) and remote or self
830 return repo[key].branch()
830 return repo[key].branch()
831
831
832 def known(self, nodes):
832 def known(self, nodes):
833 nm = self.changelog.nodemap
833 nm = self.changelog.nodemap
834 pc = self._phasecache
834 pc = self._phasecache
835 result = []
835 result = []
836 for n in nodes:
836 for n in nodes:
837 r = nm.get(n)
837 r = nm.get(n)
838 resp = not (r is None or pc.phase(self, r) >= phases.secret)
838 resp = not (r is None or pc.phase(self, r) >= phases.secret)
839 result.append(resp)
839 result.append(resp)
840 return result
840 return result
841
841
842 def local(self):
842 def local(self):
843 return self
843 return self
844
844
845 def cancopy(self):
845 def cancopy(self):
846 return self.local() # so statichttprepo's override of local() works
846 return self.local() # so statichttprepo's override of local() works
847
847
848 def join(self, f):
848 def join(self, f):
849 return os.path.join(self.path, f)
849 return os.path.join(self.path, f)
850
850
851 def wjoin(self, f):
851 def wjoin(self, f):
852 return os.path.join(self.root, f)
852 return os.path.join(self.root, f)
853
853
854 def file(self, f):
854 def file(self, f):
855 if f[0] == '/':
855 if f[0] == '/':
856 f = f[1:]
856 f = f[1:]
857 return filelog.filelog(self.sopener, f)
857 return filelog.filelog(self.sopener, f)
858
858
859 def changectx(self, changeid):
859 def changectx(self, changeid):
860 return self[changeid]
860 return self[changeid]
861
861
862 def parents(self, changeid=None):
862 def parents(self, changeid=None):
863 '''get list of changectxs for parents of changeid'''
863 '''get list of changectxs for parents of changeid'''
864 return self[changeid].parents()
864 return self[changeid].parents()
865
865
866 def setparents(self, p1, p2=nullid):
866 def setparents(self, p1, p2=nullid):
867 copies = self.dirstate.setparents(p1, p2)
867 copies = self.dirstate.setparents(p1, p2)
868 if copies:
868 if copies:
869 # Adjust copy records, the dirstate cannot do it, it
869 # Adjust copy records, the dirstate cannot do it, it
870 # requires access to parents manifests. Preserve them
870 # requires access to parents manifests. Preserve them
871 # only for entries added to first parent.
871 # only for entries added to first parent.
872 pctx = self[p1]
872 pctx = self[p1]
873 for f in copies:
873 for f in copies:
874 if f not in pctx and copies[f] in pctx:
874 if f not in pctx and copies[f] in pctx:
875 self.dirstate.copy(copies[f], f)
875 self.dirstate.copy(copies[f], f)
876
876
877 def filectx(self, path, changeid=None, fileid=None):
877 def filectx(self, path, changeid=None, fileid=None):
878 """changeid can be a changeset revision, node, or tag.
878 """changeid can be a changeset revision, node, or tag.
879 fileid can be a file revision or node."""
879 fileid can be a file revision or node."""
880 return context.filectx(self, path, changeid, fileid)
880 return context.filectx(self, path, changeid, fileid)
881
881
882 def getcwd(self):
882 def getcwd(self):
883 return self.dirstate.getcwd()
883 return self.dirstate.getcwd()
884
884
885 def pathto(self, f, cwd=None):
885 def pathto(self, f, cwd=None):
886 return self.dirstate.pathto(f, cwd)
886 return self.dirstate.pathto(f, cwd)
887
887
888 def wfile(self, f, mode='r'):
888 def wfile(self, f, mode='r'):
889 return self.wopener(f, mode)
889 return self.wopener(f, mode)
890
890
891 def _link(self, f):
891 def _link(self, f):
892 return os.path.islink(self.wjoin(f))
892 return os.path.islink(self.wjoin(f))
893
893
894 def _loadfilter(self, filter):
894 def _loadfilter(self, filter):
895 if filter not in self.filterpats:
895 if filter not in self.filterpats:
896 l = []
896 l = []
897 for pat, cmd in self.ui.configitems(filter):
897 for pat, cmd in self.ui.configitems(filter):
898 if cmd == '!':
898 if cmd == '!':
899 continue
899 continue
900 mf = matchmod.match(self.root, '', [pat])
900 mf = matchmod.match(self.root, '', [pat])
901 fn = None
901 fn = None
902 params = cmd
902 params = cmd
903 for name, filterfn in self._datafilters.iteritems():
903 for name, filterfn in self._datafilters.iteritems():
904 if cmd.startswith(name):
904 if cmd.startswith(name):
905 fn = filterfn
905 fn = filterfn
906 params = cmd[len(name):].lstrip()
906 params = cmd[len(name):].lstrip()
907 break
907 break
908 if not fn:
908 if not fn:
909 fn = lambda s, c, **kwargs: util.filter(s, c)
909 fn = lambda s, c, **kwargs: util.filter(s, c)
910 # Wrap old filters not supporting keyword arguments
910 # Wrap old filters not supporting keyword arguments
911 if not inspect.getargspec(fn)[2]:
911 if not inspect.getargspec(fn)[2]:
912 oldfn = fn
912 oldfn = fn
913 fn = lambda s, c, **kwargs: oldfn(s, c)
913 fn = lambda s, c, **kwargs: oldfn(s, c)
914 l.append((mf, fn, params))
914 l.append((mf, fn, params))
915 self.filterpats[filter] = l
915 self.filterpats[filter] = l
916 return self.filterpats[filter]
916 return self.filterpats[filter]
917
917
918 def _filter(self, filterpats, filename, data):
918 def _filter(self, filterpats, filename, data):
919 for mf, fn, cmd in filterpats:
919 for mf, fn, cmd in filterpats:
920 if mf(filename):
920 if mf(filename):
921 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
921 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
922 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
922 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
923 break
923 break
924
924
925 return data
925 return data
926
926
927 @unfilteredpropertycache
927 @unfilteredpropertycache
928 def _encodefilterpats(self):
928 def _encodefilterpats(self):
929 return self._loadfilter('encode')
929 return self._loadfilter('encode')
930
930
931 @unfilteredpropertycache
931 @unfilteredpropertycache
932 def _decodefilterpats(self):
932 def _decodefilterpats(self):
933 return self._loadfilter('decode')
933 return self._loadfilter('decode')
934
934
935 def adddatafilter(self, name, filter):
935 def adddatafilter(self, name, filter):
936 self._datafilters[name] = filter
936 self._datafilters[name] = filter
937
937
938 def wread(self, filename):
938 def wread(self, filename):
939 if self._link(filename):
939 if self._link(filename):
940 data = os.readlink(self.wjoin(filename))
940 data = os.readlink(self.wjoin(filename))
941 else:
941 else:
942 data = self.wopener.read(filename)
942 data = self.wopener.read(filename)
943 return self._filter(self._encodefilterpats, filename, data)
943 return self._filter(self._encodefilterpats, filename, data)
944
944
945 def wwrite(self, filename, data, flags):
945 def wwrite(self, filename, data, flags):
946 data = self._filter(self._decodefilterpats, filename, data)
946 data = self._filter(self._decodefilterpats, filename, data)
947 if 'l' in flags:
947 if 'l' in flags:
948 self.wopener.symlink(data, filename)
948 self.wopener.symlink(data, filename)
949 else:
949 else:
950 self.wopener.write(filename, data)
950 self.wopener.write(filename, data)
951 if 'x' in flags:
951 if 'x' in flags:
952 util.setflags(self.wjoin(filename), False, True)
952 util.setflags(self.wjoin(filename), False, True)
953
953
954 def wwritedata(self, filename, data):
954 def wwritedata(self, filename, data):
955 return self._filter(self._decodefilterpats, filename, data)
955 return self._filter(self._decodefilterpats, filename, data)
956
956
957 def transaction(self, desc):
957 def transaction(self, desc):
958 tr = self._transref and self._transref() or None
958 tr = self._transref and self._transref() or None
959 if tr and tr.running():
959 if tr and tr.running():
960 return tr.nest()
960 return tr.nest()
961
961
962 # abort here if the journal already exists
962 # abort here if the journal already exists
963 if os.path.exists(self.sjoin("journal")):
963 if os.path.exists(self.sjoin("journal")):
964 raise error.RepoError(
964 raise error.RepoError(
965 _("abandoned transaction found - run hg recover"))
965 _("abandoned transaction found - run hg recover"))
966
966
967 self._writejournal(desc)
967 self._writejournal(desc)
968 renames = [(x, undoname(x)) for x in self._journalfiles()]
968 renames = [(x, undoname(x)) for x in self._journalfiles()]
969
969
970 tr = transaction.transaction(self.ui.warn, self.sopener,
970 tr = transaction.transaction(self.ui.warn, self.sopener,
971 self.sjoin("journal"),
971 self.sjoin("journal"),
972 aftertrans(renames),
972 aftertrans(renames),
973 self.store.createmode)
973 self.store.createmode)
974 self._transref = weakref.ref(tr)
974 self._transref = weakref.ref(tr)
975 return tr
975 return tr
976
976
977 def _journalfiles(self):
977 def _journalfiles(self):
978 return (self.sjoin('journal'), self.join('journal.dirstate'),
978 return (self.sjoin('journal'), self.join('journal.dirstate'),
979 self.join('journal.branch'), self.join('journal.desc'),
979 self.join('journal.branch'), self.join('journal.desc'),
980 self.join('journal.bookmarks'),
980 self.join('journal.bookmarks'),
981 self.sjoin('journal.phaseroots'))
981 self.sjoin('journal.phaseroots'))
982
982
983 def undofiles(self):
983 def undofiles(self):
984 return [undoname(x) for x in self._journalfiles()]
984 return [undoname(x) for x in self._journalfiles()]
985
985
986 def _writejournal(self, desc):
986 def _writejournal(self, desc):
987 self.opener.write("journal.dirstate",
987 self.opener.write("journal.dirstate",
988 self.opener.tryread("dirstate"))
988 self.opener.tryread("dirstate"))
989 self.opener.write("journal.branch",
989 self.opener.write("journal.branch",
990 encoding.fromlocal(self.dirstate.branch()))
990 encoding.fromlocal(self.dirstate.branch()))
991 self.opener.write("journal.desc",
991 self.opener.write("journal.desc",
992 "%d\n%s\n" % (len(self), desc))
992 "%d\n%s\n" % (len(self), desc))
993 self.opener.write("journal.bookmarks",
993 self.opener.write("journal.bookmarks",
994 self.opener.tryread("bookmarks"))
994 self.opener.tryread("bookmarks"))
995 self.sopener.write("journal.phaseroots",
995 self.sopener.write("journal.phaseroots",
996 self.sopener.tryread("phaseroots"))
996 self.sopener.tryread("phaseroots"))
997
997
998 def recover(self):
998 def recover(self):
999 lock = self.lock()
999 lock = self.lock()
1000 try:
1000 try:
1001 if os.path.exists(self.sjoin("journal")):
1001 if os.path.exists(self.sjoin("journal")):
1002 self.ui.status(_("rolling back interrupted transaction\n"))
1002 self.ui.status(_("rolling back interrupted transaction\n"))
1003 transaction.rollback(self.sopener, self.sjoin("journal"),
1003 transaction.rollback(self.sopener, self.sjoin("journal"),
1004 self.ui.warn)
1004 self.ui.warn)
1005 self.invalidate()
1005 self.invalidate()
1006 return True
1006 return True
1007 else:
1007 else:
1008 self.ui.warn(_("no interrupted transaction available\n"))
1008 self.ui.warn(_("no interrupted transaction available\n"))
1009 return False
1009 return False
1010 finally:
1010 finally:
1011 lock.release()
1011 lock.release()
1012
1012
1013 def rollback(self, dryrun=False, force=False):
1013 def rollback(self, dryrun=False, force=False):
1014 wlock = lock = None
1014 wlock = lock = None
1015 try:
1015 try:
1016 wlock = self.wlock()
1016 wlock = self.wlock()
1017 lock = self.lock()
1017 lock = self.lock()
1018 if os.path.exists(self.sjoin("undo")):
1018 if os.path.exists(self.sjoin("undo")):
1019 return self._rollback(dryrun, force)
1019 return self._rollback(dryrun, force)
1020 else:
1020 else:
1021 self.ui.warn(_("no rollback information available\n"))
1021 self.ui.warn(_("no rollback information available\n"))
1022 return 1
1022 return 1
1023 finally:
1023 finally:
1024 release(lock, wlock)
1024 release(lock, wlock)
1025
1025
1026 @unfilteredmethod # Until we get smarter cache management
1026 @unfilteredmethod # Until we get smarter cache management
1027 def _rollback(self, dryrun, force):
1027 def _rollback(self, dryrun, force):
1028 ui = self.ui
1028 ui = self.ui
1029 try:
1029 try:
1030 args = self.opener.read('undo.desc').splitlines()
1030 args = self.opener.read('undo.desc').splitlines()
1031 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1031 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1032 if len(args) >= 3:
1032 if len(args) >= 3:
1033 detail = args[2]
1033 detail = args[2]
1034 oldtip = oldlen - 1
1034 oldtip = oldlen - 1
1035
1035
1036 if detail and ui.verbose:
1036 if detail and ui.verbose:
1037 msg = (_('repository tip rolled back to revision %s'
1037 msg = (_('repository tip rolled back to revision %s'
1038 ' (undo %s: %s)\n')
1038 ' (undo %s: %s)\n')
1039 % (oldtip, desc, detail))
1039 % (oldtip, desc, detail))
1040 else:
1040 else:
1041 msg = (_('repository tip rolled back to revision %s'
1041 msg = (_('repository tip rolled back to revision %s'
1042 ' (undo %s)\n')
1042 ' (undo %s)\n')
1043 % (oldtip, desc))
1043 % (oldtip, desc))
1044 except IOError:
1044 except IOError:
1045 msg = _('rolling back unknown transaction\n')
1045 msg = _('rolling back unknown transaction\n')
1046 desc = None
1046 desc = None
1047
1047
1048 if not force and self['.'] != self['tip'] and desc == 'commit':
1048 if not force and self['.'] != self['tip'] and desc == 'commit':
1049 raise util.Abort(
1049 raise util.Abort(
1050 _('rollback of last commit while not checked out '
1050 _('rollback of last commit while not checked out '
1051 'may lose data'), hint=_('use -f to force'))
1051 'may lose data'), hint=_('use -f to force'))
1052
1052
1053 ui.status(msg)
1053 ui.status(msg)
1054 if dryrun:
1054 if dryrun:
1055 return 0
1055 return 0
1056
1056
1057 parents = self.dirstate.parents()
1057 parents = self.dirstate.parents()
1058 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1058 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1059 if os.path.exists(self.join('undo.bookmarks')):
1059 if os.path.exists(self.join('undo.bookmarks')):
1060 util.rename(self.join('undo.bookmarks'),
1060 util.rename(self.join('undo.bookmarks'),
1061 self.join('bookmarks'))
1061 self.join('bookmarks'))
1062 if os.path.exists(self.sjoin('undo.phaseroots')):
1062 if os.path.exists(self.sjoin('undo.phaseroots')):
1063 util.rename(self.sjoin('undo.phaseroots'),
1063 util.rename(self.sjoin('undo.phaseroots'),
1064 self.sjoin('phaseroots'))
1064 self.sjoin('phaseroots'))
1065 self.invalidate()
1065 self.invalidate()
1066
1066
1067 # Discard all cache entries to force reloading everything.
1067 # Discard all cache entries to force reloading everything.
1068 self._filecache.clear()
1068 self._filecache.clear()
1069
1069
1070 parentgone = (parents[0] not in self.changelog.nodemap or
1070 parentgone = (parents[0] not in self.changelog.nodemap or
1071 parents[1] not in self.changelog.nodemap)
1071 parents[1] not in self.changelog.nodemap)
1072 if parentgone:
1072 if parentgone:
1073 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1073 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1074 try:
1074 try:
1075 branch = self.opener.read('undo.branch')
1075 branch = self.opener.read('undo.branch')
1076 self.dirstate.setbranch(encoding.tolocal(branch))
1076 self.dirstate.setbranch(encoding.tolocal(branch))
1077 except IOError:
1077 except IOError:
1078 ui.warn(_('named branch could not be reset: '
1078 ui.warn(_('named branch could not be reset: '
1079 'current branch is still \'%s\'\n')
1079 'current branch is still \'%s\'\n')
1080 % self.dirstate.branch())
1080 % self.dirstate.branch())
1081
1081
1082 self.dirstate.invalidate()
1082 self.dirstate.invalidate()
1083 parents = tuple([p.rev() for p in self.parents()])
1083 parents = tuple([p.rev() for p in self.parents()])
1084 if len(parents) > 1:
1084 if len(parents) > 1:
1085 ui.status(_('working directory now based on '
1085 ui.status(_('working directory now based on '
1086 'revisions %d and %d\n') % parents)
1086 'revisions %d and %d\n') % parents)
1087 else:
1087 else:
1088 ui.status(_('working directory now based on '
1088 ui.status(_('working directory now based on '
1089 'revision %d\n') % parents)
1089 'revision %d\n') % parents)
1090 # TODO: if we know which new heads may result from this rollback, pass
1090 # TODO: if we know which new heads may result from this rollback, pass
1091 # them to destroy(), which will prevent the branchhead cache from being
1091 # them to destroy(), which will prevent the branchhead cache from being
1092 # invalidated.
1092 # invalidated.
1093 self.destroyed()
1093 self.destroyed()
1094 return 0
1094 return 0
1095
1095
1096 def invalidatecaches(self):
1096 def invalidatecaches(self):
1097
1097
1098 if '_tagscache' in vars(self):
1098 if '_tagscache' in vars(self):
1099 # can't use delattr on proxy
1099 # can't use delattr on proxy
1100 del self.__dict__['_tagscache']
1100 del self.__dict__['_tagscache']
1101
1101
1102 self.unfiltered()._branchcache = None # in UTF-8
1102 self.unfiltered()._branchcache = None # in UTF-8
1103 self.unfiltered()._branchcachetip = None
1103 self.unfiltered()._branchcachetip = None
1104 self.invalidatevolatilesets()
1105
1106 def invalidatevolatilesets(self):
1107 self.filteredrevcache.clear()
1104 obsolete.clearobscaches(self)
1108 obsolete.clearobscaches(self)
1105 self.filteredrevcache.clear()
1106
1109
1107 def invalidatedirstate(self):
1110 def invalidatedirstate(self):
1108 '''Invalidates the dirstate, causing the next call to dirstate
1111 '''Invalidates the dirstate, causing the next call to dirstate
1109 to check if it was modified since the last time it was read,
1112 to check if it was modified since the last time it was read,
1110 rereading it if it has.
1113 rereading it if it has.
1111
1114
1112 This is different to dirstate.invalidate() that it doesn't always
1115 This is different to dirstate.invalidate() that it doesn't always
1113 rereads the dirstate. Use dirstate.invalidate() if you want to
1116 rereads the dirstate. Use dirstate.invalidate() if you want to
1114 explicitly read the dirstate again (i.e. restoring it to a previous
1117 explicitly read the dirstate again (i.e. restoring it to a previous
1115 known good state).'''
1118 known good state).'''
1116 if hasunfilteredcache(self, 'dirstate'):
1119 if hasunfilteredcache(self, 'dirstate'):
1117 for k in self.dirstate._filecache:
1120 for k in self.dirstate._filecache:
1118 try:
1121 try:
1119 delattr(self.dirstate, k)
1122 delattr(self.dirstate, k)
1120 except AttributeError:
1123 except AttributeError:
1121 pass
1124 pass
1122 delattr(self.unfiltered(), 'dirstate')
1125 delattr(self.unfiltered(), 'dirstate')
1123
1126
1124 def invalidate(self):
1127 def invalidate(self):
1125 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1128 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1126 for k in self._filecache:
1129 for k in self._filecache:
1127 # dirstate is invalidated separately in invalidatedirstate()
1130 # dirstate is invalidated separately in invalidatedirstate()
1128 if k == 'dirstate':
1131 if k == 'dirstate':
1129 continue
1132 continue
1130
1133
1131 try:
1134 try:
1132 delattr(unfiltered, k)
1135 delattr(unfiltered, k)
1133 except AttributeError:
1136 except AttributeError:
1134 pass
1137 pass
1135 self.invalidatecaches()
1138 self.invalidatecaches()
1136
1139
1137 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1140 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1138 try:
1141 try:
1139 l = lock.lock(lockname, 0, releasefn, desc=desc)
1142 l = lock.lock(lockname, 0, releasefn, desc=desc)
1140 except error.LockHeld, inst:
1143 except error.LockHeld, inst:
1141 if not wait:
1144 if not wait:
1142 raise
1145 raise
1143 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1146 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1144 (desc, inst.locker))
1147 (desc, inst.locker))
1145 # default to 600 seconds timeout
1148 # default to 600 seconds timeout
1146 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1149 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1147 releasefn, desc=desc)
1150 releasefn, desc=desc)
1148 if acquirefn:
1151 if acquirefn:
1149 acquirefn()
1152 acquirefn()
1150 return l
1153 return l
1151
1154
1152 def _afterlock(self, callback):
1155 def _afterlock(self, callback):
1153 """add a callback to the current repository lock.
1156 """add a callback to the current repository lock.
1154
1157
1155 The callback will be executed on lock release."""
1158 The callback will be executed on lock release."""
1156 l = self._lockref and self._lockref()
1159 l = self._lockref and self._lockref()
1157 if l:
1160 if l:
1158 l.postrelease.append(callback)
1161 l.postrelease.append(callback)
1159 else:
1162 else:
1160 callback()
1163 callback()
1161
1164
1162 def lock(self, wait=True):
1165 def lock(self, wait=True):
1163 '''Lock the repository store (.hg/store) and return a weak reference
1166 '''Lock the repository store (.hg/store) and return a weak reference
1164 to the lock. Use this before modifying the store (e.g. committing or
1167 to the lock. Use this before modifying the store (e.g. committing or
1165 stripping). If you are opening a transaction, get a lock as well.)'''
1168 stripping). If you are opening a transaction, get a lock as well.)'''
1166 l = self._lockref and self._lockref()
1169 l = self._lockref and self._lockref()
1167 if l is not None and l.held:
1170 if l is not None and l.held:
1168 l.lock()
1171 l.lock()
1169 return l
1172 return l
1170
1173
1171 def unlock():
1174 def unlock():
1172 self.store.write()
1175 self.store.write()
1173 if hasunfilteredcache(self, '_phasecache'):
1176 if hasunfilteredcache(self, '_phasecache'):
1174 self._phasecache.write()
1177 self._phasecache.write()
1175 for k, ce in self._filecache.items():
1178 for k, ce in self._filecache.items():
1176 if k == 'dirstate':
1179 if k == 'dirstate':
1177 continue
1180 continue
1178 ce.refresh()
1181 ce.refresh()
1179
1182
1180 l = self._lock(self.sjoin("lock"), wait, unlock,
1183 l = self._lock(self.sjoin("lock"), wait, unlock,
1181 self.invalidate, _('repository %s') % self.origroot)
1184 self.invalidate, _('repository %s') % self.origroot)
1182 self._lockref = weakref.ref(l)
1185 self._lockref = weakref.ref(l)
1183 return l
1186 return l
1184
1187
1185 def wlock(self, wait=True):
1188 def wlock(self, wait=True):
1186 '''Lock the non-store parts of the repository (everything under
1189 '''Lock the non-store parts of the repository (everything under
1187 .hg except .hg/store) and return a weak reference to the lock.
1190 .hg except .hg/store) and return a weak reference to the lock.
1188 Use this before modifying files in .hg.'''
1191 Use this before modifying files in .hg.'''
1189 l = self._wlockref and self._wlockref()
1192 l = self._wlockref and self._wlockref()
1190 if l is not None and l.held:
1193 if l is not None and l.held:
1191 l.lock()
1194 l.lock()
1192 return l
1195 return l
1193
1196
1194 def unlock():
1197 def unlock():
1195 self.dirstate.write()
1198 self.dirstate.write()
1196 ce = self._filecache.get('dirstate')
1199 ce = self._filecache.get('dirstate')
1197 if ce:
1200 if ce:
1198 ce.refresh()
1201 ce.refresh()
1199
1202
1200 l = self._lock(self.join("wlock"), wait, unlock,
1203 l = self._lock(self.join("wlock"), wait, unlock,
1201 self.invalidatedirstate, _('working directory of %s') %
1204 self.invalidatedirstate, _('working directory of %s') %
1202 self.origroot)
1205 self.origroot)
1203 self._wlockref = weakref.ref(l)
1206 self._wlockref = weakref.ref(l)
1204 return l
1207 return l
1205
1208
1206 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1209 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1207 """
1210 """
1208 commit an individual file as part of a larger transaction
1211 commit an individual file as part of a larger transaction
1209 """
1212 """
1210
1213
1211 fname = fctx.path()
1214 fname = fctx.path()
1212 text = fctx.data()
1215 text = fctx.data()
1213 flog = self.file(fname)
1216 flog = self.file(fname)
1214 fparent1 = manifest1.get(fname, nullid)
1217 fparent1 = manifest1.get(fname, nullid)
1215 fparent2 = fparent2o = manifest2.get(fname, nullid)
1218 fparent2 = fparent2o = manifest2.get(fname, nullid)
1216
1219
1217 meta = {}
1220 meta = {}
1218 copy = fctx.renamed()
1221 copy = fctx.renamed()
1219 if copy and copy[0] != fname:
1222 if copy and copy[0] != fname:
1220 # Mark the new revision of this file as a copy of another
1223 # Mark the new revision of this file as a copy of another
1221 # file. This copy data will effectively act as a parent
1224 # file. This copy data will effectively act as a parent
1222 # of this new revision. If this is a merge, the first
1225 # of this new revision. If this is a merge, the first
1223 # parent will be the nullid (meaning "look up the copy data")
1226 # parent will be the nullid (meaning "look up the copy data")
1224 # and the second one will be the other parent. For example:
1227 # and the second one will be the other parent. For example:
1225 #
1228 #
1226 # 0 --- 1 --- 3 rev1 changes file foo
1229 # 0 --- 1 --- 3 rev1 changes file foo
1227 # \ / rev2 renames foo to bar and changes it
1230 # \ / rev2 renames foo to bar and changes it
1228 # \- 2 -/ rev3 should have bar with all changes and
1231 # \- 2 -/ rev3 should have bar with all changes and
1229 # should record that bar descends from
1232 # should record that bar descends from
1230 # bar in rev2 and foo in rev1
1233 # bar in rev2 and foo in rev1
1231 #
1234 #
1232 # this allows this merge to succeed:
1235 # this allows this merge to succeed:
1233 #
1236 #
1234 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1237 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1235 # \ / merging rev3 and rev4 should use bar@rev2
1238 # \ / merging rev3 and rev4 should use bar@rev2
1236 # \- 2 --- 4 as the merge base
1239 # \- 2 --- 4 as the merge base
1237 #
1240 #
1238
1241
1239 cfname = copy[0]
1242 cfname = copy[0]
1240 crev = manifest1.get(cfname)
1243 crev = manifest1.get(cfname)
1241 newfparent = fparent2
1244 newfparent = fparent2
1242
1245
1243 if manifest2: # branch merge
1246 if manifest2: # branch merge
1244 if fparent2 == nullid or crev is None: # copied on remote side
1247 if fparent2 == nullid or crev is None: # copied on remote side
1245 if cfname in manifest2:
1248 if cfname in manifest2:
1246 crev = manifest2[cfname]
1249 crev = manifest2[cfname]
1247 newfparent = fparent1
1250 newfparent = fparent1
1248
1251
1249 # find source in nearest ancestor if we've lost track
1252 # find source in nearest ancestor if we've lost track
1250 if not crev:
1253 if not crev:
1251 self.ui.debug(" %s: searching for copy revision for %s\n" %
1254 self.ui.debug(" %s: searching for copy revision for %s\n" %
1252 (fname, cfname))
1255 (fname, cfname))
1253 for ancestor in self[None].ancestors():
1256 for ancestor in self[None].ancestors():
1254 if cfname in ancestor:
1257 if cfname in ancestor:
1255 crev = ancestor[cfname].filenode()
1258 crev = ancestor[cfname].filenode()
1256 break
1259 break
1257
1260
1258 if crev:
1261 if crev:
1259 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1262 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1260 meta["copy"] = cfname
1263 meta["copy"] = cfname
1261 meta["copyrev"] = hex(crev)
1264 meta["copyrev"] = hex(crev)
1262 fparent1, fparent2 = nullid, newfparent
1265 fparent1, fparent2 = nullid, newfparent
1263 else:
1266 else:
1264 self.ui.warn(_("warning: can't find ancestor for '%s' "
1267 self.ui.warn(_("warning: can't find ancestor for '%s' "
1265 "copied from '%s'!\n") % (fname, cfname))
1268 "copied from '%s'!\n") % (fname, cfname))
1266
1269
1267 elif fparent2 != nullid:
1270 elif fparent2 != nullid:
1268 # is one parent an ancestor of the other?
1271 # is one parent an ancestor of the other?
1269 fparentancestor = flog.ancestor(fparent1, fparent2)
1272 fparentancestor = flog.ancestor(fparent1, fparent2)
1270 if fparentancestor == fparent1:
1273 if fparentancestor == fparent1:
1271 fparent1, fparent2 = fparent2, nullid
1274 fparent1, fparent2 = fparent2, nullid
1272 elif fparentancestor == fparent2:
1275 elif fparentancestor == fparent2:
1273 fparent2 = nullid
1276 fparent2 = nullid
1274
1277
1275 # is the file changed?
1278 # is the file changed?
1276 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1279 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1277 changelist.append(fname)
1280 changelist.append(fname)
1278 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1281 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1279
1282
1280 # are just the flags changed during merge?
1283 # are just the flags changed during merge?
1281 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1284 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1282 changelist.append(fname)
1285 changelist.append(fname)
1283
1286
1284 return fparent1
1287 return fparent1
1285
1288
1286 @unfilteredmethod
1289 @unfilteredmethod
1287 def commit(self, text="", user=None, date=None, match=None, force=False,
1290 def commit(self, text="", user=None, date=None, match=None, force=False,
1288 editor=False, extra={}):
1291 editor=False, extra={}):
1289 """Add a new revision to current repository.
1292 """Add a new revision to current repository.
1290
1293
1291 Revision information is gathered from the working directory,
1294 Revision information is gathered from the working directory,
1292 match can be used to filter the committed files. If editor is
1295 match can be used to filter the committed files. If editor is
1293 supplied, it is called to get a commit message.
1296 supplied, it is called to get a commit message.
1294 """
1297 """
1295
1298
1296 def fail(f, msg):
1299 def fail(f, msg):
1297 raise util.Abort('%s: %s' % (f, msg))
1300 raise util.Abort('%s: %s' % (f, msg))
1298
1301
1299 if not match:
1302 if not match:
1300 match = matchmod.always(self.root, '')
1303 match = matchmod.always(self.root, '')
1301
1304
1302 if not force:
1305 if not force:
1303 vdirs = []
1306 vdirs = []
1304 match.dir = vdirs.append
1307 match.dir = vdirs.append
1305 match.bad = fail
1308 match.bad = fail
1306
1309
1307 wlock = self.wlock()
1310 wlock = self.wlock()
1308 try:
1311 try:
1309 wctx = self[None]
1312 wctx = self[None]
1310 merge = len(wctx.parents()) > 1
1313 merge = len(wctx.parents()) > 1
1311
1314
1312 if (not force and merge and match and
1315 if (not force and merge and match and
1313 (match.files() or match.anypats())):
1316 (match.files() or match.anypats())):
1314 raise util.Abort(_('cannot partially commit a merge '
1317 raise util.Abort(_('cannot partially commit a merge '
1315 '(do not specify files or patterns)'))
1318 '(do not specify files or patterns)'))
1316
1319
1317 changes = self.status(match=match, clean=force)
1320 changes = self.status(match=match, clean=force)
1318 if force:
1321 if force:
1319 changes[0].extend(changes[6]) # mq may commit unchanged files
1322 changes[0].extend(changes[6]) # mq may commit unchanged files
1320
1323
1321 # check subrepos
1324 # check subrepos
1322 subs = []
1325 subs = []
1323 commitsubs = set()
1326 commitsubs = set()
1324 newstate = wctx.substate.copy()
1327 newstate = wctx.substate.copy()
1325 # only manage subrepos and .hgsubstate if .hgsub is present
1328 # only manage subrepos and .hgsubstate if .hgsub is present
1326 if '.hgsub' in wctx:
1329 if '.hgsub' in wctx:
1327 # we'll decide whether to track this ourselves, thanks
1330 # we'll decide whether to track this ourselves, thanks
1328 if '.hgsubstate' in changes[0]:
1331 if '.hgsubstate' in changes[0]:
1329 changes[0].remove('.hgsubstate')
1332 changes[0].remove('.hgsubstate')
1330 if '.hgsubstate' in changes[2]:
1333 if '.hgsubstate' in changes[2]:
1331 changes[2].remove('.hgsubstate')
1334 changes[2].remove('.hgsubstate')
1332
1335
1333 # compare current state to last committed state
1336 # compare current state to last committed state
1334 # build new substate based on last committed state
1337 # build new substate based on last committed state
1335 oldstate = wctx.p1().substate
1338 oldstate = wctx.p1().substate
1336 for s in sorted(newstate.keys()):
1339 for s in sorted(newstate.keys()):
1337 if not match(s):
1340 if not match(s):
1338 # ignore working copy, use old state if present
1341 # ignore working copy, use old state if present
1339 if s in oldstate:
1342 if s in oldstate:
1340 newstate[s] = oldstate[s]
1343 newstate[s] = oldstate[s]
1341 continue
1344 continue
1342 if not force:
1345 if not force:
1343 raise util.Abort(
1346 raise util.Abort(
1344 _("commit with new subrepo %s excluded") % s)
1347 _("commit with new subrepo %s excluded") % s)
1345 if wctx.sub(s).dirty(True):
1348 if wctx.sub(s).dirty(True):
1346 if not self.ui.configbool('ui', 'commitsubrepos'):
1349 if not self.ui.configbool('ui', 'commitsubrepos'):
1347 raise util.Abort(
1350 raise util.Abort(
1348 _("uncommitted changes in subrepo %s") % s,
1351 _("uncommitted changes in subrepo %s") % s,
1349 hint=_("use --subrepos for recursive commit"))
1352 hint=_("use --subrepos for recursive commit"))
1350 subs.append(s)
1353 subs.append(s)
1351 commitsubs.add(s)
1354 commitsubs.add(s)
1352 else:
1355 else:
1353 bs = wctx.sub(s).basestate()
1356 bs = wctx.sub(s).basestate()
1354 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1357 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1355 if oldstate.get(s, (None, None, None))[1] != bs:
1358 if oldstate.get(s, (None, None, None))[1] != bs:
1356 subs.append(s)
1359 subs.append(s)
1357
1360
1358 # check for removed subrepos
1361 # check for removed subrepos
1359 for p in wctx.parents():
1362 for p in wctx.parents():
1360 r = [s for s in p.substate if s not in newstate]
1363 r = [s for s in p.substate if s not in newstate]
1361 subs += [s for s in r if match(s)]
1364 subs += [s for s in r if match(s)]
1362 if subs:
1365 if subs:
1363 if (not match('.hgsub') and
1366 if (not match('.hgsub') and
1364 '.hgsub' in (wctx.modified() + wctx.added())):
1367 '.hgsub' in (wctx.modified() + wctx.added())):
1365 raise util.Abort(
1368 raise util.Abort(
1366 _("can't commit subrepos without .hgsub"))
1369 _("can't commit subrepos without .hgsub"))
1367 changes[0].insert(0, '.hgsubstate')
1370 changes[0].insert(0, '.hgsubstate')
1368
1371
1369 elif '.hgsub' in changes[2]:
1372 elif '.hgsub' in changes[2]:
1370 # clean up .hgsubstate when .hgsub is removed
1373 # clean up .hgsubstate when .hgsub is removed
1371 if ('.hgsubstate' in wctx and
1374 if ('.hgsubstate' in wctx and
1372 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1375 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1373 changes[2].insert(0, '.hgsubstate')
1376 changes[2].insert(0, '.hgsubstate')
1374
1377
1375 # make sure all explicit patterns are matched
1378 # make sure all explicit patterns are matched
1376 if not force and match.files():
1379 if not force and match.files():
1377 matched = set(changes[0] + changes[1] + changes[2])
1380 matched = set(changes[0] + changes[1] + changes[2])
1378
1381
1379 for f in match.files():
1382 for f in match.files():
1380 f = self.dirstate.normalize(f)
1383 f = self.dirstate.normalize(f)
1381 if f == '.' or f in matched or f in wctx.substate:
1384 if f == '.' or f in matched or f in wctx.substate:
1382 continue
1385 continue
1383 if f in changes[3]: # missing
1386 if f in changes[3]: # missing
1384 fail(f, _('file not found!'))
1387 fail(f, _('file not found!'))
1385 if f in vdirs: # visited directory
1388 if f in vdirs: # visited directory
1386 d = f + '/'
1389 d = f + '/'
1387 for mf in matched:
1390 for mf in matched:
1388 if mf.startswith(d):
1391 if mf.startswith(d):
1389 break
1392 break
1390 else:
1393 else:
1391 fail(f, _("no match under directory!"))
1394 fail(f, _("no match under directory!"))
1392 elif f not in self.dirstate:
1395 elif f not in self.dirstate:
1393 fail(f, _("file not tracked!"))
1396 fail(f, _("file not tracked!"))
1394
1397
1395 if (not force and not extra.get("close") and not merge
1398 if (not force and not extra.get("close") and not merge
1396 and not (changes[0] or changes[1] or changes[2])
1399 and not (changes[0] or changes[1] or changes[2])
1397 and wctx.branch() == wctx.p1().branch()):
1400 and wctx.branch() == wctx.p1().branch()):
1398 return None
1401 return None
1399
1402
1400 if merge and changes[3]:
1403 if merge and changes[3]:
1401 raise util.Abort(_("cannot commit merge with missing files"))
1404 raise util.Abort(_("cannot commit merge with missing files"))
1402
1405
1403 ms = mergemod.mergestate(self)
1406 ms = mergemod.mergestate(self)
1404 for f in changes[0]:
1407 for f in changes[0]:
1405 if f in ms and ms[f] == 'u':
1408 if f in ms and ms[f] == 'u':
1406 raise util.Abort(_("unresolved merge conflicts "
1409 raise util.Abort(_("unresolved merge conflicts "
1407 "(see hg help resolve)"))
1410 "(see hg help resolve)"))
1408
1411
1409 cctx = context.workingctx(self, text, user, date, extra, changes)
1412 cctx = context.workingctx(self, text, user, date, extra, changes)
1410 if editor:
1413 if editor:
1411 cctx._text = editor(self, cctx, subs)
1414 cctx._text = editor(self, cctx, subs)
1412 edited = (text != cctx._text)
1415 edited = (text != cctx._text)
1413
1416
1414 # commit subs and write new state
1417 # commit subs and write new state
1415 if subs:
1418 if subs:
1416 for s in sorted(commitsubs):
1419 for s in sorted(commitsubs):
1417 sub = wctx.sub(s)
1420 sub = wctx.sub(s)
1418 self.ui.status(_('committing subrepository %s\n') %
1421 self.ui.status(_('committing subrepository %s\n') %
1419 subrepo.subrelpath(sub))
1422 subrepo.subrelpath(sub))
1420 sr = sub.commit(cctx._text, user, date)
1423 sr = sub.commit(cctx._text, user, date)
1421 newstate[s] = (newstate[s][0], sr)
1424 newstate[s] = (newstate[s][0], sr)
1422 subrepo.writestate(self, newstate)
1425 subrepo.writestate(self, newstate)
1423
1426
1424 # Save commit message in case this transaction gets rolled back
1427 # Save commit message in case this transaction gets rolled back
1425 # (e.g. by a pretxncommit hook). Leave the content alone on
1428 # (e.g. by a pretxncommit hook). Leave the content alone on
1426 # the assumption that the user will use the same editor again.
1429 # the assumption that the user will use the same editor again.
1427 msgfn = self.savecommitmessage(cctx._text)
1430 msgfn = self.savecommitmessage(cctx._text)
1428
1431
1429 p1, p2 = self.dirstate.parents()
1432 p1, p2 = self.dirstate.parents()
1430 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1433 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1431 try:
1434 try:
1432 self.hook("precommit", throw=True, parent1=hookp1,
1435 self.hook("precommit", throw=True, parent1=hookp1,
1433 parent2=hookp2)
1436 parent2=hookp2)
1434 ret = self.commitctx(cctx, True)
1437 ret = self.commitctx(cctx, True)
1435 except: # re-raises
1438 except: # re-raises
1436 if edited:
1439 if edited:
1437 self.ui.write(
1440 self.ui.write(
1438 _('note: commit message saved in %s\n') % msgfn)
1441 _('note: commit message saved in %s\n') % msgfn)
1439 raise
1442 raise
1440
1443
1441 # update bookmarks, dirstate and mergestate
1444 # update bookmarks, dirstate and mergestate
1442 bookmarks.update(self, [p1, p2], ret)
1445 bookmarks.update(self, [p1, p2], ret)
1443 for f in changes[0] + changes[1]:
1446 for f in changes[0] + changes[1]:
1444 self.dirstate.normal(f)
1447 self.dirstate.normal(f)
1445 for f in changes[2]:
1448 for f in changes[2]:
1446 self.dirstate.drop(f)
1449 self.dirstate.drop(f)
1447 self.dirstate.setparents(ret)
1450 self.dirstate.setparents(ret)
1448 ms.reset()
1451 ms.reset()
1449 finally:
1452 finally:
1450 wlock.release()
1453 wlock.release()
1451
1454
1452 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1455 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1453 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1456 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1454 self._afterlock(commithook)
1457 self._afterlock(commithook)
1455 return ret
1458 return ret
1456
1459
1457 @unfilteredmethod
1460 @unfilteredmethod
1458 def commitctx(self, ctx, error=False):
1461 def commitctx(self, ctx, error=False):
1459 """Add a new revision to current repository.
1462 """Add a new revision to current repository.
1460 Revision information is passed via the context argument.
1463 Revision information is passed via the context argument.
1461 """
1464 """
1462
1465
1463 tr = lock = None
1466 tr = lock = None
1464 removed = list(ctx.removed())
1467 removed = list(ctx.removed())
1465 p1, p2 = ctx.p1(), ctx.p2()
1468 p1, p2 = ctx.p1(), ctx.p2()
1466 user = ctx.user()
1469 user = ctx.user()
1467
1470
1468 lock = self.lock()
1471 lock = self.lock()
1469 try:
1472 try:
1470 tr = self.transaction("commit")
1473 tr = self.transaction("commit")
1471 trp = weakref.proxy(tr)
1474 trp = weakref.proxy(tr)
1472
1475
1473 if ctx.files():
1476 if ctx.files():
1474 m1 = p1.manifest().copy()
1477 m1 = p1.manifest().copy()
1475 m2 = p2.manifest()
1478 m2 = p2.manifest()
1476
1479
1477 # check in files
1480 # check in files
1478 new = {}
1481 new = {}
1479 changed = []
1482 changed = []
1480 linkrev = len(self)
1483 linkrev = len(self)
1481 for f in sorted(ctx.modified() + ctx.added()):
1484 for f in sorted(ctx.modified() + ctx.added()):
1482 self.ui.note(f + "\n")
1485 self.ui.note(f + "\n")
1483 try:
1486 try:
1484 fctx = ctx[f]
1487 fctx = ctx[f]
1485 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1488 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1486 changed)
1489 changed)
1487 m1.set(f, fctx.flags())
1490 m1.set(f, fctx.flags())
1488 except OSError, inst:
1491 except OSError, inst:
1489 self.ui.warn(_("trouble committing %s!\n") % f)
1492 self.ui.warn(_("trouble committing %s!\n") % f)
1490 raise
1493 raise
1491 except IOError, inst:
1494 except IOError, inst:
1492 errcode = getattr(inst, 'errno', errno.ENOENT)
1495 errcode = getattr(inst, 'errno', errno.ENOENT)
1493 if error or errcode and errcode != errno.ENOENT:
1496 if error or errcode and errcode != errno.ENOENT:
1494 self.ui.warn(_("trouble committing %s!\n") % f)
1497 self.ui.warn(_("trouble committing %s!\n") % f)
1495 raise
1498 raise
1496 else:
1499 else:
1497 removed.append(f)
1500 removed.append(f)
1498
1501
1499 # update manifest
1502 # update manifest
1500 m1.update(new)
1503 m1.update(new)
1501 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1504 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1502 drop = [f for f in removed if f in m1]
1505 drop = [f for f in removed if f in m1]
1503 for f in drop:
1506 for f in drop:
1504 del m1[f]
1507 del m1[f]
1505 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1508 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1506 p2.manifestnode(), (new, drop))
1509 p2.manifestnode(), (new, drop))
1507 files = changed + removed
1510 files = changed + removed
1508 else:
1511 else:
1509 mn = p1.manifestnode()
1512 mn = p1.manifestnode()
1510 files = []
1513 files = []
1511
1514
1512 # update changelog
1515 # update changelog
1513 self.changelog.delayupdate()
1516 self.changelog.delayupdate()
1514 n = self.changelog.add(mn, files, ctx.description(),
1517 n = self.changelog.add(mn, files, ctx.description(),
1515 trp, p1.node(), p2.node(),
1518 trp, p1.node(), p2.node(),
1516 user, ctx.date(), ctx.extra().copy())
1519 user, ctx.date(), ctx.extra().copy())
1517 p = lambda: self.changelog.writepending() and self.root or ""
1520 p = lambda: self.changelog.writepending() and self.root or ""
1518 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1521 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1519 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1522 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1520 parent2=xp2, pending=p)
1523 parent2=xp2, pending=p)
1521 self.changelog.finalize(trp)
1524 self.changelog.finalize(trp)
1522 # set the new commit is proper phase
1525 # set the new commit is proper phase
1523 targetphase = phases.newcommitphase(self.ui)
1526 targetphase = phases.newcommitphase(self.ui)
1524 if targetphase:
1527 if targetphase:
1525 # retract boundary do not alter parent changeset.
1528 # retract boundary do not alter parent changeset.
1526 # if a parent have higher the resulting phase will
1529 # if a parent have higher the resulting phase will
1527 # be compliant anyway
1530 # be compliant anyway
1528 #
1531 #
1529 # if minimal phase was 0 we don't need to retract anything
1532 # if minimal phase was 0 we don't need to retract anything
1530 phases.retractboundary(self, targetphase, [n])
1533 phases.retractboundary(self, targetphase, [n])
1531 tr.close()
1534 tr.close()
1532 self.updatebranchcache()
1535 self.updatebranchcache()
1533 return n
1536 return n
1534 finally:
1537 finally:
1535 if tr:
1538 if tr:
1536 tr.release()
1539 tr.release()
1537 lock.release()
1540 lock.release()
1538
1541
1539 @unfilteredmethod
1542 @unfilteredmethod
1540 def destroyed(self, newheadnodes=None):
1543 def destroyed(self, newheadnodes=None):
1541 '''Inform the repository that nodes have been destroyed.
1544 '''Inform the repository that nodes have been destroyed.
1542 Intended for use by strip and rollback, so there's a common
1545 Intended for use by strip and rollback, so there's a common
1543 place for anything that has to be done after destroying history.
1546 place for anything that has to be done after destroying history.
1544
1547
1545 If you know the branchheadcache was uptodate before nodes were removed
1548 If you know the branchheadcache was uptodate before nodes were removed
1546 and you also know the set of candidate new heads that may have resulted
1549 and you also know the set of candidate new heads that may have resulted
1547 from the destruction, you can set newheadnodes. This will enable the
1550 from the destruction, you can set newheadnodes. This will enable the
1548 code to update the branchheads cache, rather than having future code
1551 code to update the branchheads cache, rather than having future code
1549 decide it's invalid and regenerating it from scratch.
1552 decide it's invalid and regenerating it from scratch.
1550 '''
1553 '''
1551 # If we have info, newheadnodes, on how to update the branch cache, do
1554 # If we have info, newheadnodes, on how to update the branch cache, do
1552 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1555 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1553 # will be caught the next time it is read.
1556 # will be caught the next time it is read.
1554 if newheadnodes:
1557 if newheadnodes:
1555 tiprev = len(self) - 1
1558 tiprev = len(self) - 1
1556 ctxgen = (self[node] for node in newheadnodes
1559 ctxgen = (self[node] for node in newheadnodes
1557 if self.changelog.hasnode(node))
1560 if self.changelog.hasnode(node))
1558 self._updatebranchcache(self._branchcache, ctxgen)
1561 self._updatebranchcache(self._branchcache, ctxgen)
1559 self._writebranchcache(self._branchcache, self.changelog.tip(),
1562 self._writebranchcache(self._branchcache, self.changelog.tip(),
1560 tiprev)
1563 tiprev)
1561
1564
1562 # Ensure the persistent tag cache is updated. Doing it now
1565 # Ensure the persistent tag cache is updated. Doing it now
1563 # means that the tag cache only has to worry about destroyed
1566 # means that the tag cache only has to worry about destroyed
1564 # heads immediately after a strip/rollback. That in turn
1567 # heads immediately after a strip/rollback. That in turn
1565 # guarantees that "cachetip == currenttip" (comparing both rev
1568 # guarantees that "cachetip == currenttip" (comparing both rev
1566 # and node) always means no nodes have been added or destroyed.
1569 # and node) always means no nodes have been added or destroyed.
1567
1570
1568 # XXX this is suboptimal when qrefresh'ing: we strip the current
1571 # XXX this is suboptimal when qrefresh'ing: we strip the current
1569 # head, refresh the tag cache, then immediately add a new head.
1572 # head, refresh the tag cache, then immediately add a new head.
1570 # But I think doing it this way is necessary for the "instant
1573 # But I think doing it this way is necessary for the "instant
1571 # tag cache retrieval" case to work.
1574 # tag cache retrieval" case to work.
1572 self.invalidatecaches()
1575 self.invalidatecaches()
1573
1576
1574 # Discard all cache entries to force reloading everything.
1577 # Discard all cache entries to force reloading everything.
1575 self._filecache.clear()
1578 self._filecache.clear()
1576
1579
1577 def walk(self, match, node=None):
1580 def walk(self, match, node=None):
1578 '''
1581 '''
1579 walk recursively through the directory tree or a given
1582 walk recursively through the directory tree or a given
1580 changeset, finding all files matched by the match
1583 changeset, finding all files matched by the match
1581 function
1584 function
1582 '''
1585 '''
1583 return self[node].walk(match)
1586 return self[node].walk(match)
1584
1587
1585 def status(self, node1='.', node2=None, match=None,
1588 def status(self, node1='.', node2=None, match=None,
1586 ignored=False, clean=False, unknown=False,
1589 ignored=False, clean=False, unknown=False,
1587 listsubrepos=False):
1590 listsubrepos=False):
1588 """return status of files between two nodes or node and working
1591 """return status of files between two nodes or node and working
1589 directory.
1592 directory.
1590
1593
1591 If node1 is None, use the first dirstate parent instead.
1594 If node1 is None, use the first dirstate parent instead.
1592 If node2 is None, compare node1 with working directory.
1595 If node2 is None, compare node1 with working directory.
1593 """
1596 """
1594
1597
1595 def mfmatches(ctx):
1598 def mfmatches(ctx):
1596 mf = ctx.manifest().copy()
1599 mf = ctx.manifest().copy()
1597 if match.always():
1600 if match.always():
1598 return mf
1601 return mf
1599 for fn in mf.keys():
1602 for fn in mf.keys():
1600 if not match(fn):
1603 if not match(fn):
1601 del mf[fn]
1604 del mf[fn]
1602 return mf
1605 return mf
1603
1606
1604 if isinstance(node1, context.changectx):
1607 if isinstance(node1, context.changectx):
1605 ctx1 = node1
1608 ctx1 = node1
1606 else:
1609 else:
1607 ctx1 = self[node1]
1610 ctx1 = self[node1]
1608 if isinstance(node2, context.changectx):
1611 if isinstance(node2, context.changectx):
1609 ctx2 = node2
1612 ctx2 = node2
1610 else:
1613 else:
1611 ctx2 = self[node2]
1614 ctx2 = self[node2]
1612
1615
1613 working = ctx2.rev() is None
1616 working = ctx2.rev() is None
1614 parentworking = working and ctx1 == self['.']
1617 parentworking = working and ctx1 == self['.']
1615 match = match or matchmod.always(self.root, self.getcwd())
1618 match = match or matchmod.always(self.root, self.getcwd())
1616 listignored, listclean, listunknown = ignored, clean, unknown
1619 listignored, listclean, listunknown = ignored, clean, unknown
1617
1620
1618 # load earliest manifest first for caching reasons
1621 # load earliest manifest first for caching reasons
1619 if not working and ctx2.rev() < ctx1.rev():
1622 if not working and ctx2.rev() < ctx1.rev():
1620 ctx2.manifest()
1623 ctx2.manifest()
1621
1624
1622 if not parentworking:
1625 if not parentworking:
1623 def bad(f, msg):
1626 def bad(f, msg):
1624 # 'f' may be a directory pattern from 'match.files()',
1627 # 'f' may be a directory pattern from 'match.files()',
1625 # so 'f not in ctx1' is not enough
1628 # so 'f not in ctx1' is not enough
1626 if f not in ctx1 and f not in ctx1.dirs():
1629 if f not in ctx1 and f not in ctx1.dirs():
1627 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1630 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1628 match.bad = bad
1631 match.bad = bad
1629
1632
1630 if working: # we need to scan the working dir
1633 if working: # we need to scan the working dir
1631 subrepos = []
1634 subrepos = []
1632 if '.hgsub' in self.dirstate:
1635 if '.hgsub' in self.dirstate:
1633 subrepos = ctx2.substate.keys()
1636 subrepos = ctx2.substate.keys()
1634 s = self.dirstate.status(match, subrepos, listignored,
1637 s = self.dirstate.status(match, subrepos, listignored,
1635 listclean, listunknown)
1638 listclean, listunknown)
1636 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1639 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1637
1640
1638 # check for any possibly clean files
1641 # check for any possibly clean files
1639 if parentworking and cmp:
1642 if parentworking and cmp:
1640 fixup = []
1643 fixup = []
1641 # do a full compare of any files that might have changed
1644 # do a full compare of any files that might have changed
1642 for f in sorted(cmp):
1645 for f in sorted(cmp):
1643 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1646 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1644 or ctx1[f].cmp(ctx2[f])):
1647 or ctx1[f].cmp(ctx2[f])):
1645 modified.append(f)
1648 modified.append(f)
1646 else:
1649 else:
1647 fixup.append(f)
1650 fixup.append(f)
1648
1651
1649 # update dirstate for files that are actually clean
1652 # update dirstate for files that are actually clean
1650 if fixup:
1653 if fixup:
1651 if listclean:
1654 if listclean:
1652 clean += fixup
1655 clean += fixup
1653
1656
1654 try:
1657 try:
1655 # updating the dirstate is optional
1658 # updating the dirstate is optional
1656 # so we don't wait on the lock
1659 # so we don't wait on the lock
1657 wlock = self.wlock(False)
1660 wlock = self.wlock(False)
1658 try:
1661 try:
1659 for f in fixup:
1662 for f in fixup:
1660 self.dirstate.normal(f)
1663 self.dirstate.normal(f)
1661 finally:
1664 finally:
1662 wlock.release()
1665 wlock.release()
1663 except error.LockError:
1666 except error.LockError:
1664 pass
1667 pass
1665
1668
1666 if not parentworking:
1669 if not parentworking:
1667 mf1 = mfmatches(ctx1)
1670 mf1 = mfmatches(ctx1)
1668 if working:
1671 if working:
1669 # we are comparing working dir against non-parent
1672 # we are comparing working dir against non-parent
1670 # generate a pseudo-manifest for the working dir
1673 # generate a pseudo-manifest for the working dir
1671 mf2 = mfmatches(self['.'])
1674 mf2 = mfmatches(self['.'])
1672 for f in cmp + modified + added:
1675 for f in cmp + modified + added:
1673 mf2[f] = None
1676 mf2[f] = None
1674 mf2.set(f, ctx2.flags(f))
1677 mf2.set(f, ctx2.flags(f))
1675 for f in removed:
1678 for f in removed:
1676 if f in mf2:
1679 if f in mf2:
1677 del mf2[f]
1680 del mf2[f]
1678 else:
1681 else:
1679 # we are comparing two revisions
1682 # we are comparing two revisions
1680 deleted, unknown, ignored = [], [], []
1683 deleted, unknown, ignored = [], [], []
1681 mf2 = mfmatches(ctx2)
1684 mf2 = mfmatches(ctx2)
1682
1685
1683 modified, added, clean = [], [], []
1686 modified, added, clean = [], [], []
1684 withflags = mf1.withflags() | mf2.withflags()
1687 withflags = mf1.withflags() | mf2.withflags()
1685 for fn in mf2:
1688 for fn in mf2:
1686 if fn in mf1:
1689 if fn in mf1:
1687 if (fn not in deleted and
1690 if (fn not in deleted and
1688 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1691 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1689 (mf1[fn] != mf2[fn] and
1692 (mf1[fn] != mf2[fn] and
1690 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1693 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1691 modified.append(fn)
1694 modified.append(fn)
1692 elif listclean:
1695 elif listclean:
1693 clean.append(fn)
1696 clean.append(fn)
1694 del mf1[fn]
1697 del mf1[fn]
1695 elif fn not in deleted:
1698 elif fn not in deleted:
1696 added.append(fn)
1699 added.append(fn)
1697 removed = mf1.keys()
1700 removed = mf1.keys()
1698
1701
1699 if working and modified and not self.dirstate._checklink:
1702 if working and modified and not self.dirstate._checklink:
1700 # Symlink placeholders may get non-symlink-like contents
1703 # Symlink placeholders may get non-symlink-like contents
1701 # via user error or dereferencing by NFS or Samba servers,
1704 # via user error or dereferencing by NFS or Samba servers,
1702 # so we filter out any placeholders that don't look like a
1705 # so we filter out any placeholders that don't look like a
1703 # symlink
1706 # symlink
1704 sane = []
1707 sane = []
1705 for f in modified:
1708 for f in modified:
1706 if ctx2.flags(f) == 'l':
1709 if ctx2.flags(f) == 'l':
1707 d = ctx2[f].data()
1710 d = ctx2[f].data()
1708 if len(d) >= 1024 or '\n' in d or util.binary(d):
1711 if len(d) >= 1024 or '\n' in d or util.binary(d):
1709 self.ui.debug('ignoring suspect symlink placeholder'
1712 self.ui.debug('ignoring suspect symlink placeholder'
1710 ' "%s"\n' % f)
1713 ' "%s"\n' % f)
1711 continue
1714 continue
1712 sane.append(f)
1715 sane.append(f)
1713 modified = sane
1716 modified = sane
1714
1717
1715 r = modified, added, removed, deleted, unknown, ignored, clean
1718 r = modified, added, removed, deleted, unknown, ignored, clean
1716
1719
1717 if listsubrepos:
1720 if listsubrepos:
1718 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1721 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1719 if working:
1722 if working:
1720 rev2 = None
1723 rev2 = None
1721 else:
1724 else:
1722 rev2 = ctx2.substate[subpath][1]
1725 rev2 = ctx2.substate[subpath][1]
1723 try:
1726 try:
1724 submatch = matchmod.narrowmatcher(subpath, match)
1727 submatch = matchmod.narrowmatcher(subpath, match)
1725 s = sub.status(rev2, match=submatch, ignored=listignored,
1728 s = sub.status(rev2, match=submatch, ignored=listignored,
1726 clean=listclean, unknown=listunknown,
1729 clean=listclean, unknown=listunknown,
1727 listsubrepos=True)
1730 listsubrepos=True)
1728 for rfiles, sfiles in zip(r, s):
1731 for rfiles, sfiles in zip(r, s):
1729 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1732 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1730 except error.LookupError:
1733 except error.LookupError:
1731 self.ui.status(_("skipping missing subrepository: %s\n")
1734 self.ui.status(_("skipping missing subrepository: %s\n")
1732 % subpath)
1735 % subpath)
1733
1736
1734 for l in r:
1737 for l in r:
1735 l.sort()
1738 l.sort()
1736 return r
1739 return r
1737
1740
1738 def heads(self, start=None):
1741 def heads(self, start=None):
1739 heads = self.changelog.heads(start)
1742 heads = self.changelog.heads(start)
1740 # sort the output in rev descending order
1743 # sort the output in rev descending order
1741 return sorted(heads, key=self.changelog.rev, reverse=True)
1744 return sorted(heads, key=self.changelog.rev, reverse=True)
1742
1745
1743 def branchheads(self, branch=None, start=None, closed=False):
1746 def branchheads(self, branch=None, start=None, closed=False):
1744 '''return a (possibly filtered) list of heads for the given branch
1747 '''return a (possibly filtered) list of heads for the given branch
1745
1748
1746 Heads are returned in topological order, from newest to oldest.
1749 Heads are returned in topological order, from newest to oldest.
1747 If branch is None, use the dirstate branch.
1750 If branch is None, use the dirstate branch.
1748 If start is not None, return only heads reachable from start.
1751 If start is not None, return only heads reachable from start.
1749 If closed is True, return heads that are marked as closed as well.
1752 If closed is True, return heads that are marked as closed as well.
1750 '''
1753 '''
1751 if branch is None:
1754 if branch is None:
1752 branch = self[None].branch()
1755 branch = self[None].branch()
1753 branches = self.branchmap()
1756 branches = self.branchmap()
1754 if branch not in branches:
1757 if branch not in branches:
1755 return []
1758 return []
1756 # the cache returns heads ordered lowest to highest
1759 # the cache returns heads ordered lowest to highest
1757 bheads = list(reversed(branches[branch]))
1760 bheads = list(reversed(branches[branch]))
1758 if start is not None:
1761 if start is not None:
1759 # filter out the heads that cannot be reached from startrev
1762 # filter out the heads that cannot be reached from startrev
1760 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1763 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1761 bheads = [h for h in bheads if h in fbheads]
1764 bheads = [h for h in bheads if h in fbheads]
1762 if not closed:
1765 if not closed:
1763 bheads = [h for h in bheads if not self[h].closesbranch()]
1766 bheads = [h for h in bheads if not self[h].closesbranch()]
1764 return bheads
1767 return bheads
1765
1768
1766 def branches(self, nodes):
1769 def branches(self, nodes):
1767 if not nodes:
1770 if not nodes:
1768 nodes = [self.changelog.tip()]
1771 nodes = [self.changelog.tip()]
1769 b = []
1772 b = []
1770 for n in nodes:
1773 for n in nodes:
1771 t = n
1774 t = n
1772 while True:
1775 while True:
1773 p = self.changelog.parents(n)
1776 p = self.changelog.parents(n)
1774 if p[1] != nullid or p[0] == nullid:
1777 if p[1] != nullid or p[0] == nullid:
1775 b.append((t, n, p[0], p[1]))
1778 b.append((t, n, p[0], p[1]))
1776 break
1779 break
1777 n = p[0]
1780 n = p[0]
1778 return b
1781 return b
1779
1782
1780 def between(self, pairs):
1783 def between(self, pairs):
1781 r = []
1784 r = []
1782
1785
1783 for top, bottom in pairs:
1786 for top, bottom in pairs:
1784 n, l, i = top, [], 0
1787 n, l, i = top, [], 0
1785 f = 1
1788 f = 1
1786
1789
1787 while n != bottom and n != nullid:
1790 while n != bottom and n != nullid:
1788 p = self.changelog.parents(n)[0]
1791 p = self.changelog.parents(n)[0]
1789 if i == f:
1792 if i == f:
1790 l.append(n)
1793 l.append(n)
1791 f = f * 2
1794 f = f * 2
1792 n = p
1795 n = p
1793 i += 1
1796 i += 1
1794
1797
1795 r.append(l)
1798 r.append(l)
1796
1799
1797 return r
1800 return r
1798
1801
1799 def pull(self, remote, heads=None, force=False):
1802 def pull(self, remote, heads=None, force=False):
1800 # don't open transaction for nothing or you break future useful
1803 # don't open transaction for nothing or you break future useful
1801 # rollback call
1804 # rollback call
1802 tr = None
1805 tr = None
1803 trname = 'pull\n' + util.hidepassword(remote.url())
1806 trname = 'pull\n' + util.hidepassword(remote.url())
1804 lock = self.lock()
1807 lock = self.lock()
1805 try:
1808 try:
1806 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1809 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1807 force=force)
1810 force=force)
1808 common, fetch, rheads = tmp
1811 common, fetch, rheads = tmp
1809 if not fetch:
1812 if not fetch:
1810 self.ui.status(_("no changes found\n"))
1813 self.ui.status(_("no changes found\n"))
1811 added = []
1814 added = []
1812 result = 0
1815 result = 0
1813 else:
1816 else:
1814 tr = self.transaction(trname)
1817 tr = self.transaction(trname)
1815 if heads is None and list(common) == [nullid]:
1818 if heads is None and list(common) == [nullid]:
1816 self.ui.status(_("requesting all changes\n"))
1819 self.ui.status(_("requesting all changes\n"))
1817 elif heads is None and remote.capable('changegroupsubset'):
1820 elif heads is None and remote.capable('changegroupsubset'):
1818 # issue1320, avoid a race if remote changed after discovery
1821 # issue1320, avoid a race if remote changed after discovery
1819 heads = rheads
1822 heads = rheads
1820
1823
1821 if remote.capable('getbundle'):
1824 if remote.capable('getbundle'):
1822 cg = remote.getbundle('pull', common=common,
1825 cg = remote.getbundle('pull', common=common,
1823 heads=heads or rheads)
1826 heads=heads or rheads)
1824 elif heads is None:
1827 elif heads is None:
1825 cg = remote.changegroup(fetch, 'pull')
1828 cg = remote.changegroup(fetch, 'pull')
1826 elif not remote.capable('changegroupsubset'):
1829 elif not remote.capable('changegroupsubset'):
1827 raise util.Abort(_("partial pull cannot be done because "
1830 raise util.Abort(_("partial pull cannot be done because "
1828 "other repository doesn't support "
1831 "other repository doesn't support "
1829 "changegroupsubset."))
1832 "changegroupsubset."))
1830 else:
1833 else:
1831 cg = remote.changegroupsubset(fetch, heads, 'pull')
1834 cg = remote.changegroupsubset(fetch, heads, 'pull')
1832 clstart = len(self.changelog)
1835 clstart = len(self.changelog)
1833 result = self.addchangegroup(cg, 'pull', remote.url())
1836 result = self.addchangegroup(cg, 'pull', remote.url())
1834 clend = len(self.changelog)
1837 clend = len(self.changelog)
1835 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1838 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1836
1839
1837 # compute target subset
1840 # compute target subset
1838 if heads is None:
1841 if heads is None:
1839 # We pulled every thing possible
1842 # We pulled every thing possible
1840 # sync on everything common
1843 # sync on everything common
1841 subset = common + added
1844 subset = common + added
1842 else:
1845 else:
1843 # We pulled a specific subset
1846 # We pulled a specific subset
1844 # sync on this subset
1847 # sync on this subset
1845 subset = heads
1848 subset = heads
1846
1849
1847 # Get remote phases data from remote
1850 # Get remote phases data from remote
1848 remotephases = remote.listkeys('phases')
1851 remotephases = remote.listkeys('phases')
1849 publishing = bool(remotephases.get('publishing', False))
1852 publishing = bool(remotephases.get('publishing', False))
1850 if remotephases and not publishing:
1853 if remotephases and not publishing:
1851 # remote is new and unpublishing
1854 # remote is new and unpublishing
1852 pheads, _dr = phases.analyzeremotephases(self, subset,
1855 pheads, _dr = phases.analyzeremotephases(self, subset,
1853 remotephases)
1856 remotephases)
1854 phases.advanceboundary(self, phases.public, pheads)
1857 phases.advanceboundary(self, phases.public, pheads)
1855 phases.advanceboundary(self, phases.draft, subset)
1858 phases.advanceboundary(self, phases.draft, subset)
1856 else:
1859 else:
1857 # Remote is old or publishing all common changesets
1860 # Remote is old or publishing all common changesets
1858 # should be seen as public
1861 # should be seen as public
1859 phases.advanceboundary(self, phases.public, subset)
1862 phases.advanceboundary(self, phases.public, subset)
1860
1863
1861 if obsolete._enabled:
1864 if obsolete._enabled:
1862 self.ui.debug('fetching remote obsolete markers\n')
1865 self.ui.debug('fetching remote obsolete markers\n')
1863 remoteobs = remote.listkeys('obsolete')
1866 remoteobs = remote.listkeys('obsolete')
1864 if 'dump0' in remoteobs:
1867 if 'dump0' in remoteobs:
1865 if tr is None:
1868 if tr is None:
1866 tr = self.transaction(trname)
1869 tr = self.transaction(trname)
1867 for key in sorted(remoteobs, reverse=True):
1870 for key in sorted(remoteobs, reverse=True):
1868 if key.startswith('dump'):
1871 if key.startswith('dump'):
1869 data = base85.b85decode(remoteobs[key])
1872 data = base85.b85decode(remoteobs[key])
1870 self.obsstore.mergemarkers(tr, data)
1873 self.obsstore.mergemarkers(tr, data)
1871 self.filteredrevcache.clear()
1874 self.invalidatevolatilesets()
1872 if tr is not None:
1875 if tr is not None:
1873 tr.close()
1876 tr.close()
1874 finally:
1877 finally:
1875 if tr is not None:
1878 if tr is not None:
1876 tr.release()
1879 tr.release()
1877 lock.release()
1880 lock.release()
1878
1881
1879 return result
1882 return result
1880
1883
1881 def checkpush(self, force, revs):
1884 def checkpush(self, force, revs):
1882 """Extensions can override this function if additional checks have
1885 """Extensions can override this function if additional checks have
1883 to be performed before pushing, or call it if they override push
1886 to be performed before pushing, or call it if they override push
1884 command.
1887 command.
1885 """
1888 """
1886 pass
1889 pass
1887
1890
1888 def push(self, remote, force=False, revs=None, newbranch=False):
1891 def push(self, remote, force=False, revs=None, newbranch=False):
1889 '''Push outgoing changesets (limited by revs) from the current
1892 '''Push outgoing changesets (limited by revs) from the current
1890 repository to remote. Return an integer:
1893 repository to remote. Return an integer:
1891 - None means nothing to push
1894 - None means nothing to push
1892 - 0 means HTTP error
1895 - 0 means HTTP error
1893 - 1 means we pushed and remote head count is unchanged *or*
1896 - 1 means we pushed and remote head count is unchanged *or*
1894 we have outgoing changesets but refused to push
1897 we have outgoing changesets but refused to push
1895 - other values as described by addchangegroup()
1898 - other values as described by addchangegroup()
1896 '''
1899 '''
1897 # there are two ways to push to remote repo:
1900 # there are two ways to push to remote repo:
1898 #
1901 #
1899 # addchangegroup assumes local user can lock remote
1902 # addchangegroup assumes local user can lock remote
1900 # repo (local filesystem, old ssh servers).
1903 # repo (local filesystem, old ssh servers).
1901 #
1904 #
1902 # unbundle assumes local user cannot lock remote repo (new ssh
1905 # unbundle assumes local user cannot lock remote repo (new ssh
1903 # servers, http servers).
1906 # servers, http servers).
1904
1907
1905 if not remote.canpush():
1908 if not remote.canpush():
1906 raise util.Abort(_("destination does not support push"))
1909 raise util.Abort(_("destination does not support push"))
1907 unfi = self.unfiltered()
1910 unfi = self.unfiltered()
1908 # get local lock as we might write phase data
1911 # get local lock as we might write phase data
1909 locallock = self.lock()
1912 locallock = self.lock()
1910 try:
1913 try:
1911 self.checkpush(force, revs)
1914 self.checkpush(force, revs)
1912 lock = None
1915 lock = None
1913 unbundle = remote.capable('unbundle')
1916 unbundle = remote.capable('unbundle')
1914 if not unbundle:
1917 if not unbundle:
1915 lock = remote.lock()
1918 lock = remote.lock()
1916 try:
1919 try:
1917 # discovery
1920 # discovery
1918 fci = discovery.findcommonincoming
1921 fci = discovery.findcommonincoming
1919 commoninc = fci(unfi, remote, force=force)
1922 commoninc = fci(unfi, remote, force=force)
1920 common, inc, remoteheads = commoninc
1923 common, inc, remoteheads = commoninc
1921 fco = discovery.findcommonoutgoing
1924 fco = discovery.findcommonoutgoing
1922 outgoing = fco(unfi, remote, onlyheads=revs,
1925 outgoing = fco(unfi, remote, onlyheads=revs,
1923 commoninc=commoninc, force=force)
1926 commoninc=commoninc, force=force)
1924
1927
1925
1928
1926 if not outgoing.missing:
1929 if not outgoing.missing:
1927 # nothing to push
1930 # nothing to push
1928 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1931 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1929 ret = None
1932 ret = None
1930 else:
1933 else:
1931 # something to push
1934 # something to push
1932 if not force:
1935 if not force:
1933 # if self.obsstore == False --> no obsolete
1936 # if self.obsstore == False --> no obsolete
1934 # then, save the iteration
1937 # then, save the iteration
1935 if unfi.obsstore:
1938 if unfi.obsstore:
1936 # this message are here for 80 char limit reason
1939 # this message are here for 80 char limit reason
1937 mso = _("push includes obsolete changeset: %s!")
1940 mso = _("push includes obsolete changeset: %s!")
1938 msu = _("push includes unstable changeset: %s!")
1941 msu = _("push includes unstable changeset: %s!")
1939 msb = _("push includes bumped changeset: %s!")
1942 msb = _("push includes bumped changeset: %s!")
1940 msd = _("push includes divergent changeset: %s!")
1943 msd = _("push includes divergent changeset: %s!")
1941 # If we are to push if there is at least one
1944 # If we are to push if there is at least one
1942 # obsolete or unstable changeset in missing, at
1945 # obsolete or unstable changeset in missing, at
1943 # least one of the missinghead will be obsolete or
1946 # least one of the missinghead will be obsolete or
1944 # unstable. So checking heads only is ok
1947 # unstable. So checking heads only is ok
1945 for node in outgoing.missingheads:
1948 for node in outgoing.missingheads:
1946 ctx = unfi[node]
1949 ctx = unfi[node]
1947 if ctx.obsolete():
1950 if ctx.obsolete():
1948 raise util.Abort(mso % ctx)
1951 raise util.Abort(mso % ctx)
1949 elif ctx.unstable():
1952 elif ctx.unstable():
1950 raise util.Abort(msu % ctx)
1953 raise util.Abort(msu % ctx)
1951 elif ctx.bumped():
1954 elif ctx.bumped():
1952 raise util.Abort(msb % ctx)
1955 raise util.Abort(msb % ctx)
1953 elif ctx.divergent():
1956 elif ctx.divergent():
1954 raise util.Abort(msd % ctx)
1957 raise util.Abort(msd % ctx)
1955 discovery.checkheads(unfi, remote, outgoing,
1958 discovery.checkheads(unfi, remote, outgoing,
1956 remoteheads, newbranch,
1959 remoteheads, newbranch,
1957 bool(inc))
1960 bool(inc))
1958
1961
1959 # create a changegroup from local
1962 # create a changegroup from local
1960 if revs is None and not outgoing.excluded:
1963 if revs is None and not outgoing.excluded:
1961 # push everything,
1964 # push everything,
1962 # use the fast path, no race possible on push
1965 # use the fast path, no race possible on push
1963 cg = self._changegroup(outgoing.missing, 'push')
1966 cg = self._changegroup(outgoing.missing, 'push')
1964 else:
1967 else:
1965 cg = self.getlocalbundle('push', outgoing)
1968 cg = self.getlocalbundle('push', outgoing)
1966
1969
1967 # apply changegroup to remote
1970 # apply changegroup to remote
1968 if unbundle:
1971 if unbundle:
1969 # local repo finds heads on server, finds out what
1972 # local repo finds heads on server, finds out what
1970 # revs it must push. once revs transferred, if server
1973 # revs it must push. once revs transferred, if server
1971 # finds it has different heads (someone else won
1974 # finds it has different heads (someone else won
1972 # commit/push race), server aborts.
1975 # commit/push race), server aborts.
1973 if force:
1976 if force:
1974 remoteheads = ['force']
1977 remoteheads = ['force']
1975 # ssh: return remote's addchangegroup()
1978 # ssh: return remote's addchangegroup()
1976 # http: return remote's addchangegroup() or 0 for error
1979 # http: return remote's addchangegroup() or 0 for error
1977 ret = remote.unbundle(cg, remoteheads, 'push')
1980 ret = remote.unbundle(cg, remoteheads, 'push')
1978 else:
1981 else:
1979 # we return an integer indicating remote head count
1982 # we return an integer indicating remote head count
1980 # change
1983 # change
1981 ret = remote.addchangegroup(cg, 'push', self.url())
1984 ret = remote.addchangegroup(cg, 'push', self.url())
1982
1985
1983 if ret:
1986 if ret:
1984 # push succeed, synchronize target of the push
1987 # push succeed, synchronize target of the push
1985 cheads = outgoing.missingheads
1988 cheads = outgoing.missingheads
1986 elif revs is None:
1989 elif revs is None:
1987 # All out push fails. synchronize all common
1990 # All out push fails. synchronize all common
1988 cheads = outgoing.commonheads
1991 cheads = outgoing.commonheads
1989 else:
1992 else:
1990 # I want cheads = heads(::missingheads and ::commonheads)
1993 # I want cheads = heads(::missingheads and ::commonheads)
1991 # (missingheads is revs with secret changeset filtered out)
1994 # (missingheads is revs with secret changeset filtered out)
1992 #
1995 #
1993 # This can be expressed as:
1996 # This can be expressed as:
1994 # cheads = ( (missingheads and ::commonheads)
1997 # cheads = ( (missingheads and ::commonheads)
1995 # + (commonheads and ::missingheads))"
1998 # + (commonheads and ::missingheads))"
1996 # )
1999 # )
1997 #
2000 #
1998 # while trying to push we already computed the following:
2001 # while trying to push we already computed the following:
1999 # common = (::commonheads)
2002 # common = (::commonheads)
2000 # missing = ((commonheads::missingheads) - commonheads)
2003 # missing = ((commonheads::missingheads) - commonheads)
2001 #
2004 #
2002 # We can pick:
2005 # We can pick:
2003 # * missingheads part of common (::commonheads)
2006 # * missingheads part of common (::commonheads)
2004 common = set(outgoing.common)
2007 common = set(outgoing.common)
2005 cheads = [node for node in revs if node in common]
2008 cheads = [node for node in revs if node in common]
2006 # and
2009 # and
2007 # * commonheads parents on missing
2010 # * commonheads parents on missing
2008 revset = unfi.set('%ln and parents(roots(%ln))',
2011 revset = unfi.set('%ln and parents(roots(%ln))',
2009 outgoing.commonheads,
2012 outgoing.commonheads,
2010 outgoing.missing)
2013 outgoing.missing)
2011 cheads.extend(c.node() for c in revset)
2014 cheads.extend(c.node() for c in revset)
2012 # even when we don't push, exchanging phase data is useful
2015 # even when we don't push, exchanging phase data is useful
2013 remotephases = remote.listkeys('phases')
2016 remotephases = remote.listkeys('phases')
2014 if not remotephases: # old server or public only repo
2017 if not remotephases: # old server or public only repo
2015 phases.advanceboundary(self, phases.public, cheads)
2018 phases.advanceboundary(self, phases.public, cheads)
2016 # don't push any phase data as there is nothing to push
2019 # don't push any phase data as there is nothing to push
2017 else:
2020 else:
2018 ana = phases.analyzeremotephases(self, cheads, remotephases)
2021 ana = phases.analyzeremotephases(self, cheads, remotephases)
2019 pheads, droots = ana
2022 pheads, droots = ana
2020 ### Apply remote phase on local
2023 ### Apply remote phase on local
2021 if remotephases.get('publishing', False):
2024 if remotephases.get('publishing', False):
2022 phases.advanceboundary(self, phases.public, cheads)
2025 phases.advanceboundary(self, phases.public, cheads)
2023 else: # publish = False
2026 else: # publish = False
2024 phases.advanceboundary(self, phases.public, pheads)
2027 phases.advanceboundary(self, phases.public, pheads)
2025 phases.advanceboundary(self, phases.draft, cheads)
2028 phases.advanceboundary(self, phases.draft, cheads)
2026 ### Apply local phase on remote
2029 ### Apply local phase on remote
2027
2030
2028 # Get the list of all revs draft on remote by public here.
2031 # Get the list of all revs draft on remote by public here.
2029 # XXX Beware that revset break if droots is not strictly
2032 # XXX Beware that revset break if droots is not strictly
2030 # XXX root we may want to ensure it is but it is costly
2033 # XXX root we may want to ensure it is but it is costly
2031 outdated = unfi.set('heads((%ln::%ln) and public())',
2034 outdated = unfi.set('heads((%ln::%ln) and public())',
2032 droots, cheads)
2035 droots, cheads)
2033 for newremotehead in outdated:
2036 for newremotehead in outdated:
2034 r = remote.pushkey('phases',
2037 r = remote.pushkey('phases',
2035 newremotehead.hex(),
2038 newremotehead.hex(),
2036 str(phases.draft),
2039 str(phases.draft),
2037 str(phases.public))
2040 str(phases.public))
2038 if not r:
2041 if not r:
2039 self.ui.warn(_('updating %s to public failed!\n')
2042 self.ui.warn(_('updating %s to public failed!\n')
2040 % newremotehead)
2043 % newremotehead)
2041 self.ui.debug('try to push obsolete markers to remote\n')
2044 self.ui.debug('try to push obsolete markers to remote\n')
2042 if (obsolete._enabled and self.obsstore and
2045 if (obsolete._enabled and self.obsstore and
2043 'obsolete' in remote.listkeys('namespaces')):
2046 'obsolete' in remote.listkeys('namespaces')):
2044 rslts = []
2047 rslts = []
2045 remotedata = self.listkeys('obsolete')
2048 remotedata = self.listkeys('obsolete')
2046 for key in sorted(remotedata, reverse=True):
2049 for key in sorted(remotedata, reverse=True):
2047 # reverse sort to ensure we end with dump0
2050 # reverse sort to ensure we end with dump0
2048 data = remotedata[key]
2051 data = remotedata[key]
2049 rslts.append(remote.pushkey('obsolete', key, '', data))
2052 rslts.append(remote.pushkey('obsolete', key, '', data))
2050 if [r for r in rslts if not r]:
2053 if [r for r in rslts if not r]:
2051 msg = _('failed to push some obsolete markers!\n')
2054 msg = _('failed to push some obsolete markers!\n')
2052 self.ui.warn(msg)
2055 self.ui.warn(msg)
2053 finally:
2056 finally:
2054 if lock is not None:
2057 if lock is not None:
2055 lock.release()
2058 lock.release()
2056 finally:
2059 finally:
2057 locallock.release()
2060 locallock.release()
2058
2061
2059 self.ui.debug("checking for updated bookmarks\n")
2062 self.ui.debug("checking for updated bookmarks\n")
2060 rb = remote.listkeys('bookmarks')
2063 rb = remote.listkeys('bookmarks')
2061 for k in rb.keys():
2064 for k in rb.keys():
2062 if k in unfi._bookmarks:
2065 if k in unfi._bookmarks:
2063 nr, nl = rb[k], hex(self._bookmarks[k])
2066 nr, nl = rb[k], hex(self._bookmarks[k])
2064 if nr in unfi:
2067 if nr in unfi:
2065 cr = unfi[nr]
2068 cr = unfi[nr]
2066 cl = unfi[nl]
2069 cl = unfi[nl]
2067 if bookmarks.validdest(unfi, cr, cl):
2070 if bookmarks.validdest(unfi, cr, cl):
2068 r = remote.pushkey('bookmarks', k, nr, nl)
2071 r = remote.pushkey('bookmarks', k, nr, nl)
2069 if r:
2072 if r:
2070 self.ui.status(_("updating bookmark %s\n") % k)
2073 self.ui.status(_("updating bookmark %s\n") % k)
2071 else:
2074 else:
2072 self.ui.warn(_('updating bookmark %s'
2075 self.ui.warn(_('updating bookmark %s'
2073 ' failed!\n') % k)
2076 ' failed!\n') % k)
2074
2077
2075 return ret
2078 return ret
2076
2079
2077 def changegroupinfo(self, nodes, source):
2080 def changegroupinfo(self, nodes, source):
2078 if self.ui.verbose or source == 'bundle':
2081 if self.ui.verbose or source == 'bundle':
2079 self.ui.status(_("%d changesets found\n") % len(nodes))
2082 self.ui.status(_("%d changesets found\n") % len(nodes))
2080 if self.ui.debugflag:
2083 if self.ui.debugflag:
2081 self.ui.debug("list of changesets:\n")
2084 self.ui.debug("list of changesets:\n")
2082 for node in nodes:
2085 for node in nodes:
2083 self.ui.debug("%s\n" % hex(node))
2086 self.ui.debug("%s\n" % hex(node))
2084
2087
2085 def changegroupsubset(self, bases, heads, source):
2088 def changegroupsubset(self, bases, heads, source):
2086 """Compute a changegroup consisting of all the nodes that are
2089 """Compute a changegroup consisting of all the nodes that are
2087 descendants of any of the bases and ancestors of any of the heads.
2090 descendants of any of the bases and ancestors of any of the heads.
2088 Return a chunkbuffer object whose read() method will return
2091 Return a chunkbuffer object whose read() method will return
2089 successive changegroup chunks.
2092 successive changegroup chunks.
2090
2093
2091 It is fairly complex as determining which filenodes and which
2094 It is fairly complex as determining which filenodes and which
2092 manifest nodes need to be included for the changeset to be complete
2095 manifest nodes need to be included for the changeset to be complete
2093 is non-trivial.
2096 is non-trivial.
2094
2097
2095 Another wrinkle is doing the reverse, figuring out which changeset in
2098 Another wrinkle is doing the reverse, figuring out which changeset in
2096 the changegroup a particular filenode or manifestnode belongs to.
2099 the changegroup a particular filenode or manifestnode belongs to.
2097 """
2100 """
2098 cl = self.changelog
2101 cl = self.changelog
2099 if not bases:
2102 if not bases:
2100 bases = [nullid]
2103 bases = [nullid]
2101 csets, bases, heads = cl.nodesbetween(bases, heads)
2104 csets, bases, heads = cl.nodesbetween(bases, heads)
2102 # We assume that all ancestors of bases are known
2105 # We assume that all ancestors of bases are known
2103 common = cl.ancestors([cl.rev(n) for n in bases])
2106 common = cl.ancestors([cl.rev(n) for n in bases])
2104 return self._changegroupsubset(common, csets, heads, source)
2107 return self._changegroupsubset(common, csets, heads, source)
2105
2108
2106 def getlocalbundle(self, source, outgoing):
2109 def getlocalbundle(self, source, outgoing):
2107 """Like getbundle, but taking a discovery.outgoing as an argument.
2110 """Like getbundle, but taking a discovery.outgoing as an argument.
2108
2111
2109 This is only implemented for local repos and reuses potentially
2112 This is only implemented for local repos and reuses potentially
2110 precomputed sets in outgoing."""
2113 precomputed sets in outgoing."""
2111 if not outgoing.missing:
2114 if not outgoing.missing:
2112 return None
2115 return None
2113 return self._changegroupsubset(outgoing.common,
2116 return self._changegroupsubset(outgoing.common,
2114 outgoing.missing,
2117 outgoing.missing,
2115 outgoing.missingheads,
2118 outgoing.missingheads,
2116 source)
2119 source)
2117
2120
2118 def getbundle(self, source, heads=None, common=None):
2121 def getbundle(self, source, heads=None, common=None):
2119 """Like changegroupsubset, but returns the set difference between the
2122 """Like changegroupsubset, but returns the set difference between the
2120 ancestors of heads and the ancestors common.
2123 ancestors of heads and the ancestors common.
2121
2124
2122 If heads is None, use the local heads. If common is None, use [nullid].
2125 If heads is None, use the local heads. If common is None, use [nullid].
2123
2126
2124 The nodes in common might not all be known locally due to the way the
2127 The nodes in common might not all be known locally due to the way the
2125 current discovery protocol works.
2128 current discovery protocol works.
2126 """
2129 """
2127 cl = self.changelog
2130 cl = self.changelog
2128 if common:
2131 if common:
2129 hasnode = cl.hasnode
2132 hasnode = cl.hasnode
2130 common = [n for n in common if hasnode(n)]
2133 common = [n for n in common if hasnode(n)]
2131 else:
2134 else:
2132 common = [nullid]
2135 common = [nullid]
2133 if not heads:
2136 if not heads:
2134 heads = cl.heads()
2137 heads = cl.heads()
2135 return self.getlocalbundle(source,
2138 return self.getlocalbundle(source,
2136 discovery.outgoing(cl, common, heads))
2139 discovery.outgoing(cl, common, heads))
2137
2140
2138 @unfilteredmethod
2141 @unfilteredmethod
2139 def _changegroupsubset(self, commonrevs, csets, heads, source):
2142 def _changegroupsubset(self, commonrevs, csets, heads, source):
2140
2143
2141 cl = self.changelog
2144 cl = self.changelog
2142 mf = self.manifest
2145 mf = self.manifest
2143 mfs = {} # needed manifests
2146 mfs = {} # needed manifests
2144 fnodes = {} # needed file nodes
2147 fnodes = {} # needed file nodes
2145 changedfiles = set()
2148 changedfiles = set()
2146 fstate = ['', {}]
2149 fstate = ['', {}]
2147 count = [0, 0]
2150 count = [0, 0]
2148
2151
2149 # can we go through the fast path ?
2152 # can we go through the fast path ?
2150 heads.sort()
2153 heads.sort()
2151 if heads == sorted(self.heads()):
2154 if heads == sorted(self.heads()):
2152 return self._changegroup(csets, source)
2155 return self._changegroup(csets, source)
2153
2156
2154 # slow path
2157 # slow path
2155 self.hook('preoutgoing', throw=True, source=source)
2158 self.hook('preoutgoing', throw=True, source=source)
2156 self.changegroupinfo(csets, source)
2159 self.changegroupinfo(csets, source)
2157
2160
2158 # filter any nodes that claim to be part of the known set
2161 # filter any nodes that claim to be part of the known set
2159 def prune(revlog, missing):
2162 def prune(revlog, missing):
2160 rr, rl = revlog.rev, revlog.linkrev
2163 rr, rl = revlog.rev, revlog.linkrev
2161 return [n for n in missing
2164 return [n for n in missing
2162 if rl(rr(n)) not in commonrevs]
2165 if rl(rr(n)) not in commonrevs]
2163
2166
2164 progress = self.ui.progress
2167 progress = self.ui.progress
2165 _bundling = _('bundling')
2168 _bundling = _('bundling')
2166 _changesets = _('changesets')
2169 _changesets = _('changesets')
2167 _manifests = _('manifests')
2170 _manifests = _('manifests')
2168 _files = _('files')
2171 _files = _('files')
2169
2172
2170 def lookup(revlog, x):
2173 def lookup(revlog, x):
2171 if revlog == cl:
2174 if revlog == cl:
2172 c = cl.read(x)
2175 c = cl.read(x)
2173 changedfiles.update(c[3])
2176 changedfiles.update(c[3])
2174 mfs.setdefault(c[0], x)
2177 mfs.setdefault(c[0], x)
2175 count[0] += 1
2178 count[0] += 1
2176 progress(_bundling, count[0],
2179 progress(_bundling, count[0],
2177 unit=_changesets, total=count[1])
2180 unit=_changesets, total=count[1])
2178 return x
2181 return x
2179 elif revlog == mf:
2182 elif revlog == mf:
2180 clnode = mfs[x]
2183 clnode = mfs[x]
2181 mdata = mf.readfast(x)
2184 mdata = mf.readfast(x)
2182 for f, n in mdata.iteritems():
2185 for f, n in mdata.iteritems():
2183 if f in changedfiles:
2186 if f in changedfiles:
2184 fnodes[f].setdefault(n, clnode)
2187 fnodes[f].setdefault(n, clnode)
2185 count[0] += 1
2188 count[0] += 1
2186 progress(_bundling, count[0],
2189 progress(_bundling, count[0],
2187 unit=_manifests, total=count[1])
2190 unit=_manifests, total=count[1])
2188 return clnode
2191 return clnode
2189 else:
2192 else:
2190 progress(_bundling, count[0], item=fstate[0],
2193 progress(_bundling, count[0], item=fstate[0],
2191 unit=_files, total=count[1])
2194 unit=_files, total=count[1])
2192 return fstate[1][x]
2195 return fstate[1][x]
2193
2196
2194 bundler = changegroup.bundle10(lookup)
2197 bundler = changegroup.bundle10(lookup)
2195 reorder = self.ui.config('bundle', 'reorder', 'auto')
2198 reorder = self.ui.config('bundle', 'reorder', 'auto')
2196 if reorder == 'auto':
2199 if reorder == 'auto':
2197 reorder = None
2200 reorder = None
2198 else:
2201 else:
2199 reorder = util.parsebool(reorder)
2202 reorder = util.parsebool(reorder)
2200
2203
2201 def gengroup():
2204 def gengroup():
2202 # Create a changenode group generator that will call our functions
2205 # Create a changenode group generator that will call our functions
2203 # back to lookup the owning changenode and collect information.
2206 # back to lookup the owning changenode and collect information.
2204 count[:] = [0, len(csets)]
2207 count[:] = [0, len(csets)]
2205 for chunk in cl.group(csets, bundler, reorder=reorder):
2208 for chunk in cl.group(csets, bundler, reorder=reorder):
2206 yield chunk
2209 yield chunk
2207 progress(_bundling, None)
2210 progress(_bundling, None)
2208
2211
2209 # Create a generator for the manifestnodes that calls our lookup
2212 # Create a generator for the manifestnodes that calls our lookup
2210 # and data collection functions back.
2213 # and data collection functions back.
2211 for f in changedfiles:
2214 for f in changedfiles:
2212 fnodes[f] = {}
2215 fnodes[f] = {}
2213 count[:] = [0, len(mfs)]
2216 count[:] = [0, len(mfs)]
2214 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2217 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2215 yield chunk
2218 yield chunk
2216 progress(_bundling, None)
2219 progress(_bundling, None)
2217
2220
2218 mfs.clear()
2221 mfs.clear()
2219
2222
2220 # Go through all our files in order sorted by name.
2223 # Go through all our files in order sorted by name.
2221 count[:] = [0, len(changedfiles)]
2224 count[:] = [0, len(changedfiles)]
2222 for fname in sorted(changedfiles):
2225 for fname in sorted(changedfiles):
2223 filerevlog = self.file(fname)
2226 filerevlog = self.file(fname)
2224 if not len(filerevlog):
2227 if not len(filerevlog):
2225 raise util.Abort(_("empty or missing revlog for %s")
2228 raise util.Abort(_("empty or missing revlog for %s")
2226 % fname)
2229 % fname)
2227 fstate[0] = fname
2230 fstate[0] = fname
2228 fstate[1] = fnodes.pop(fname, {})
2231 fstate[1] = fnodes.pop(fname, {})
2229
2232
2230 nodelist = prune(filerevlog, fstate[1])
2233 nodelist = prune(filerevlog, fstate[1])
2231 if nodelist:
2234 if nodelist:
2232 count[0] += 1
2235 count[0] += 1
2233 yield bundler.fileheader(fname)
2236 yield bundler.fileheader(fname)
2234 for chunk in filerevlog.group(nodelist, bundler, reorder):
2237 for chunk in filerevlog.group(nodelist, bundler, reorder):
2235 yield chunk
2238 yield chunk
2236
2239
2237 # Signal that no more groups are left.
2240 # Signal that no more groups are left.
2238 yield bundler.close()
2241 yield bundler.close()
2239 progress(_bundling, None)
2242 progress(_bundling, None)
2240
2243
2241 if csets:
2244 if csets:
2242 self.hook('outgoing', node=hex(csets[0]), source=source)
2245 self.hook('outgoing', node=hex(csets[0]), source=source)
2243
2246
2244 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2247 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2245
2248
2246 def changegroup(self, basenodes, source):
2249 def changegroup(self, basenodes, source):
2247 # to avoid a race we use changegroupsubset() (issue1320)
2250 # to avoid a race we use changegroupsubset() (issue1320)
2248 return self.changegroupsubset(basenodes, self.heads(), source)
2251 return self.changegroupsubset(basenodes, self.heads(), source)
2249
2252
2250 @unfilteredmethod
2253 @unfilteredmethod
2251 def _changegroup(self, nodes, source):
2254 def _changegroup(self, nodes, source):
2252 """Compute the changegroup of all nodes that we have that a recipient
2255 """Compute the changegroup of all nodes that we have that a recipient
2253 doesn't. Return a chunkbuffer object whose read() method will return
2256 doesn't. Return a chunkbuffer object whose read() method will return
2254 successive changegroup chunks.
2257 successive changegroup chunks.
2255
2258
2256 This is much easier than the previous function as we can assume that
2259 This is much easier than the previous function as we can assume that
2257 the recipient has any changenode we aren't sending them.
2260 the recipient has any changenode we aren't sending them.
2258
2261
2259 nodes is the set of nodes to send"""
2262 nodes is the set of nodes to send"""
2260
2263
2261 cl = self.changelog
2264 cl = self.changelog
2262 mf = self.manifest
2265 mf = self.manifest
2263 mfs = {}
2266 mfs = {}
2264 changedfiles = set()
2267 changedfiles = set()
2265 fstate = ['']
2268 fstate = ['']
2266 count = [0, 0]
2269 count = [0, 0]
2267
2270
2268 self.hook('preoutgoing', throw=True, source=source)
2271 self.hook('preoutgoing', throw=True, source=source)
2269 self.changegroupinfo(nodes, source)
2272 self.changegroupinfo(nodes, source)
2270
2273
2271 revset = set([cl.rev(n) for n in nodes])
2274 revset = set([cl.rev(n) for n in nodes])
2272
2275
2273 def gennodelst(log):
2276 def gennodelst(log):
2274 ln, llr = log.node, log.linkrev
2277 ln, llr = log.node, log.linkrev
2275 return [ln(r) for r in log if llr(r) in revset]
2278 return [ln(r) for r in log if llr(r) in revset]
2276
2279
2277 progress = self.ui.progress
2280 progress = self.ui.progress
2278 _bundling = _('bundling')
2281 _bundling = _('bundling')
2279 _changesets = _('changesets')
2282 _changesets = _('changesets')
2280 _manifests = _('manifests')
2283 _manifests = _('manifests')
2281 _files = _('files')
2284 _files = _('files')
2282
2285
2283 def lookup(revlog, x):
2286 def lookup(revlog, x):
2284 if revlog == cl:
2287 if revlog == cl:
2285 c = cl.read(x)
2288 c = cl.read(x)
2286 changedfiles.update(c[3])
2289 changedfiles.update(c[3])
2287 mfs.setdefault(c[0], x)
2290 mfs.setdefault(c[0], x)
2288 count[0] += 1
2291 count[0] += 1
2289 progress(_bundling, count[0],
2292 progress(_bundling, count[0],
2290 unit=_changesets, total=count[1])
2293 unit=_changesets, total=count[1])
2291 return x
2294 return x
2292 elif revlog == mf:
2295 elif revlog == mf:
2293 count[0] += 1
2296 count[0] += 1
2294 progress(_bundling, count[0],
2297 progress(_bundling, count[0],
2295 unit=_manifests, total=count[1])
2298 unit=_manifests, total=count[1])
2296 return cl.node(revlog.linkrev(revlog.rev(x)))
2299 return cl.node(revlog.linkrev(revlog.rev(x)))
2297 else:
2300 else:
2298 progress(_bundling, count[0], item=fstate[0],
2301 progress(_bundling, count[0], item=fstate[0],
2299 total=count[1], unit=_files)
2302 total=count[1], unit=_files)
2300 return cl.node(revlog.linkrev(revlog.rev(x)))
2303 return cl.node(revlog.linkrev(revlog.rev(x)))
2301
2304
2302 bundler = changegroup.bundle10(lookup)
2305 bundler = changegroup.bundle10(lookup)
2303 reorder = self.ui.config('bundle', 'reorder', 'auto')
2306 reorder = self.ui.config('bundle', 'reorder', 'auto')
2304 if reorder == 'auto':
2307 if reorder == 'auto':
2305 reorder = None
2308 reorder = None
2306 else:
2309 else:
2307 reorder = util.parsebool(reorder)
2310 reorder = util.parsebool(reorder)
2308
2311
2309 def gengroup():
2312 def gengroup():
2310 '''yield a sequence of changegroup chunks (strings)'''
2313 '''yield a sequence of changegroup chunks (strings)'''
2311 # construct a list of all changed files
2314 # construct a list of all changed files
2312
2315
2313 count[:] = [0, len(nodes)]
2316 count[:] = [0, len(nodes)]
2314 for chunk in cl.group(nodes, bundler, reorder=reorder):
2317 for chunk in cl.group(nodes, bundler, reorder=reorder):
2315 yield chunk
2318 yield chunk
2316 progress(_bundling, None)
2319 progress(_bundling, None)
2317
2320
2318 count[:] = [0, len(mfs)]
2321 count[:] = [0, len(mfs)]
2319 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2322 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2320 yield chunk
2323 yield chunk
2321 progress(_bundling, None)
2324 progress(_bundling, None)
2322
2325
2323 count[:] = [0, len(changedfiles)]
2326 count[:] = [0, len(changedfiles)]
2324 for fname in sorted(changedfiles):
2327 for fname in sorted(changedfiles):
2325 filerevlog = self.file(fname)
2328 filerevlog = self.file(fname)
2326 if not len(filerevlog):
2329 if not len(filerevlog):
2327 raise util.Abort(_("empty or missing revlog for %s")
2330 raise util.Abort(_("empty or missing revlog for %s")
2328 % fname)
2331 % fname)
2329 fstate[0] = fname
2332 fstate[0] = fname
2330 nodelist = gennodelst(filerevlog)
2333 nodelist = gennodelst(filerevlog)
2331 if nodelist:
2334 if nodelist:
2332 count[0] += 1
2335 count[0] += 1
2333 yield bundler.fileheader(fname)
2336 yield bundler.fileheader(fname)
2334 for chunk in filerevlog.group(nodelist, bundler, reorder):
2337 for chunk in filerevlog.group(nodelist, bundler, reorder):
2335 yield chunk
2338 yield chunk
2336 yield bundler.close()
2339 yield bundler.close()
2337 progress(_bundling, None)
2340 progress(_bundling, None)
2338
2341
2339 if nodes:
2342 if nodes:
2340 self.hook('outgoing', node=hex(nodes[0]), source=source)
2343 self.hook('outgoing', node=hex(nodes[0]), source=source)
2341
2344
2342 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2345 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2343
2346
2344 @unfilteredmethod
2347 @unfilteredmethod
2345 def addchangegroup(self, source, srctype, url, emptyok=False):
2348 def addchangegroup(self, source, srctype, url, emptyok=False):
2346 """Add the changegroup returned by source.read() to this repo.
2349 """Add the changegroup returned by source.read() to this repo.
2347 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2350 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2348 the URL of the repo where this changegroup is coming from.
2351 the URL of the repo where this changegroup is coming from.
2349
2352
2350 Return an integer summarizing the change to this repo:
2353 Return an integer summarizing the change to this repo:
2351 - nothing changed or no source: 0
2354 - nothing changed or no source: 0
2352 - more heads than before: 1+added heads (2..n)
2355 - more heads than before: 1+added heads (2..n)
2353 - fewer heads than before: -1-removed heads (-2..-n)
2356 - fewer heads than before: -1-removed heads (-2..-n)
2354 - number of heads stays the same: 1
2357 - number of heads stays the same: 1
2355 """
2358 """
2356 def csmap(x):
2359 def csmap(x):
2357 self.ui.debug("add changeset %s\n" % short(x))
2360 self.ui.debug("add changeset %s\n" % short(x))
2358 return len(cl)
2361 return len(cl)
2359
2362
2360 def revmap(x):
2363 def revmap(x):
2361 return cl.rev(x)
2364 return cl.rev(x)
2362
2365
2363 if not source:
2366 if not source:
2364 return 0
2367 return 0
2365
2368
2366 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2369 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2367
2370
2368 changesets = files = revisions = 0
2371 changesets = files = revisions = 0
2369 efiles = set()
2372 efiles = set()
2370
2373
2371 # write changelog data to temp files so concurrent readers will not see
2374 # write changelog data to temp files so concurrent readers will not see
2372 # inconsistent view
2375 # inconsistent view
2373 cl = self.changelog
2376 cl = self.changelog
2374 cl.delayupdate()
2377 cl.delayupdate()
2375 oldheads = cl.heads()
2378 oldheads = cl.heads()
2376
2379
2377 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2380 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2378 try:
2381 try:
2379 trp = weakref.proxy(tr)
2382 trp = weakref.proxy(tr)
2380 # pull off the changeset group
2383 # pull off the changeset group
2381 self.ui.status(_("adding changesets\n"))
2384 self.ui.status(_("adding changesets\n"))
2382 clstart = len(cl)
2385 clstart = len(cl)
2383 class prog(object):
2386 class prog(object):
2384 step = _('changesets')
2387 step = _('changesets')
2385 count = 1
2388 count = 1
2386 ui = self.ui
2389 ui = self.ui
2387 total = None
2390 total = None
2388 def __call__(self):
2391 def __call__(self):
2389 self.ui.progress(self.step, self.count, unit=_('chunks'),
2392 self.ui.progress(self.step, self.count, unit=_('chunks'),
2390 total=self.total)
2393 total=self.total)
2391 self.count += 1
2394 self.count += 1
2392 pr = prog()
2395 pr = prog()
2393 source.callback = pr
2396 source.callback = pr
2394
2397
2395 source.changelogheader()
2398 source.changelogheader()
2396 srccontent = cl.addgroup(source, csmap, trp)
2399 srccontent = cl.addgroup(source, csmap, trp)
2397 if not (srccontent or emptyok):
2400 if not (srccontent or emptyok):
2398 raise util.Abort(_("received changelog group is empty"))
2401 raise util.Abort(_("received changelog group is empty"))
2399 clend = len(cl)
2402 clend = len(cl)
2400 changesets = clend - clstart
2403 changesets = clend - clstart
2401 for c in xrange(clstart, clend):
2404 for c in xrange(clstart, clend):
2402 efiles.update(self[c].files())
2405 efiles.update(self[c].files())
2403 efiles = len(efiles)
2406 efiles = len(efiles)
2404 self.ui.progress(_('changesets'), None)
2407 self.ui.progress(_('changesets'), None)
2405
2408
2406 # pull off the manifest group
2409 # pull off the manifest group
2407 self.ui.status(_("adding manifests\n"))
2410 self.ui.status(_("adding manifests\n"))
2408 pr.step = _('manifests')
2411 pr.step = _('manifests')
2409 pr.count = 1
2412 pr.count = 1
2410 pr.total = changesets # manifests <= changesets
2413 pr.total = changesets # manifests <= changesets
2411 # no need to check for empty manifest group here:
2414 # no need to check for empty manifest group here:
2412 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2415 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2413 # no new manifest will be created and the manifest group will
2416 # no new manifest will be created and the manifest group will
2414 # be empty during the pull
2417 # be empty during the pull
2415 source.manifestheader()
2418 source.manifestheader()
2416 self.manifest.addgroup(source, revmap, trp)
2419 self.manifest.addgroup(source, revmap, trp)
2417 self.ui.progress(_('manifests'), None)
2420 self.ui.progress(_('manifests'), None)
2418
2421
2419 needfiles = {}
2422 needfiles = {}
2420 if self.ui.configbool('server', 'validate', default=False):
2423 if self.ui.configbool('server', 'validate', default=False):
2421 # validate incoming csets have their manifests
2424 # validate incoming csets have their manifests
2422 for cset in xrange(clstart, clend):
2425 for cset in xrange(clstart, clend):
2423 mfest = self.changelog.read(self.changelog.node(cset))[0]
2426 mfest = self.changelog.read(self.changelog.node(cset))[0]
2424 mfest = self.manifest.readdelta(mfest)
2427 mfest = self.manifest.readdelta(mfest)
2425 # store file nodes we must see
2428 # store file nodes we must see
2426 for f, n in mfest.iteritems():
2429 for f, n in mfest.iteritems():
2427 needfiles.setdefault(f, set()).add(n)
2430 needfiles.setdefault(f, set()).add(n)
2428
2431
2429 # process the files
2432 # process the files
2430 self.ui.status(_("adding file changes\n"))
2433 self.ui.status(_("adding file changes\n"))
2431 pr.step = _('files')
2434 pr.step = _('files')
2432 pr.count = 1
2435 pr.count = 1
2433 pr.total = efiles
2436 pr.total = efiles
2434 source.callback = None
2437 source.callback = None
2435
2438
2436 while True:
2439 while True:
2437 chunkdata = source.filelogheader()
2440 chunkdata = source.filelogheader()
2438 if not chunkdata:
2441 if not chunkdata:
2439 break
2442 break
2440 f = chunkdata["filename"]
2443 f = chunkdata["filename"]
2441 self.ui.debug("adding %s revisions\n" % f)
2444 self.ui.debug("adding %s revisions\n" % f)
2442 pr()
2445 pr()
2443 fl = self.file(f)
2446 fl = self.file(f)
2444 o = len(fl)
2447 o = len(fl)
2445 if not fl.addgroup(source, revmap, trp):
2448 if not fl.addgroup(source, revmap, trp):
2446 raise util.Abort(_("received file revlog group is empty"))
2449 raise util.Abort(_("received file revlog group is empty"))
2447 revisions += len(fl) - o
2450 revisions += len(fl) - o
2448 files += 1
2451 files += 1
2449 if f in needfiles:
2452 if f in needfiles:
2450 needs = needfiles[f]
2453 needs = needfiles[f]
2451 for new in xrange(o, len(fl)):
2454 for new in xrange(o, len(fl)):
2452 n = fl.node(new)
2455 n = fl.node(new)
2453 if n in needs:
2456 if n in needs:
2454 needs.remove(n)
2457 needs.remove(n)
2455 if not needs:
2458 if not needs:
2456 del needfiles[f]
2459 del needfiles[f]
2457 self.ui.progress(_('files'), None)
2460 self.ui.progress(_('files'), None)
2458
2461
2459 for f, needs in needfiles.iteritems():
2462 for f, needs in needfiles.iteritems():
2460 fl = self.file(f)
2463 fl = self.file(f)
2461 for n in needs:
2464 for n in needs:
2462 try:
2465 try:
2463 fl.rev(n)
2466 fl.rev(n)
2464 except error.LookupError:
2467 except error.LookupError:
2465 raise util.Abort(
2468 raise util.Abort(
2466 _('missing file data for %s:%s - run hg verify') %
2469 _('missing file data for %s:%s - run hg verify') %
2467 (f, hex(n)))
2470 (f, hex(n)))
2468
2471
2469 dh = 0
2472 dh = 0
2470 if oldheads:
2473 if oldheads:
2471 heads = cl.heads()
2474 heads = cl.heads()
2472 dh = len(heads) - len(oldheads)
2475 dh = len(heads) - len(oldheads)
2473 for h in heads:
2476 for h in heads:
2474 if h not in oldheads and self[h].closesbranch():
2477 if h not in oldheads and self[h].closesbranch():
2475 dh -= 1
2478 dh -= 1
2476 htext = ""
2479 htext = ""
2477 if dh:
2480 if dh:
2478 htext = _(" (%+d heads)") % dh
2481 htext = _(" (%+d heads)") % dh
2479
2482
2480 self.ui.status(_("added %d changesets"
2483 self.ui.status(_("added %d changesets"
2481 " with %d changes to %d files%s\n")
2484 " with %d changes to %d files%s\n")
2482 % (changesets, revisions, files, htext))
2485 % (changesets, revisions, files, htext))
2483 obsolete.clearobscaches(self)
2486 self.invalidatevolatilesets()
2484 self.filteredrevcache.clear()
2485
2487
2486 if changesets > 0:
2488 if changesets > 0:
2487 p = lambda: cl.writepending() and self.root or ""
2489 p = lambda: cl.writepending() and self.root or ""
2488 self.hook('pretxnchangegroup', throw=True,
2490 self.hook('pretxnchangegroup', throw=True,
2489 node=hex(cl.node(clstart)), source=srctype,
2491 node=hex(cl.node(clstart)), source=srctype,
2490 url=url, pending=p)
2492 url=url, pending=p)
2491
2493
2492 added = [cl.node(r) for r in xrange(clstart, clend)]
2494 added = [cl.node(r) for r in xrange(clstart, clend)]
2493 publishing = self.ui.configbool('phases', 'publish', True)
2495 publishing = self.ui.configbool('phases', 'publish', True)
2494 if srctype == 'push':
2496 if srctype == 'push':
2495 # Old server can not push the boundary themself.
2497 # Old server can not push the boundary themself.
2496 # New server won't push the boundary if changeset already
2498 # New server won't push the boundary if changeset already
2497 # existed locally as secrete
2499 # existed locally as secrete
2498 #
2500 #
2499 # We should not use added here but the list of all change in
2501 # We should not use added here but the list of all change in
2500 # the bundle
2502 # the bundle
2501 if publishing:
2503 if publishing:
2502 phases.advanceboundary(self, phases.public, srccontent)
2504 phases.advanceboundary(self, phases.public, srccontent)
2503 else:
2505 else:
2504 phases.advanceboundary(self, phases.draft, srccontent)
2506 phases.advanceboundary(self, phases.draft, srccontent)
2505 phases.retractboundary(self, phases.draft, added)
2507 phases.retractboundary(self, phases.draft, added)
2506 elif srctype != 'strip':
2508 elif srctype != 'strip':
2507 # publishing only alter behavior during push
2509 # publishing only alter behavior during push
2508 #
2510 #
2509 # strip should not touch boundary at all
2511 # strip should not touch boundary at all
2510 phases.retractboundary(self, phases.draft, added)
2512 phases.retractboundary(self, phases.draft, added)
2511
2513
2512 # make changelog see real files again
2514 # make changelog see real files again
2513 cl.finalize(trp)
2515 cl.finalize(trp)
2514
2516
2515 tr.close()
2517 tr.close()
2516
2518
2517 if changesets > 0:
2519 if changesets > 0:
2518 self.updatebranchcache()
2520 self.updatebranchcache()
2519 def runhooks():
2521 def runhooks():
2520 # forcefully update the on-disk branch cache
2522 # forcefully update the on-disk branch cache
2521 self.ui.debug("updating the branch cache\n")
2523 self.ui.debug("updating the branch cache\n")
2522 self.hook("changegroup", node=hex(cl.node(clstart)),
2524 self.hook("changegroup", node=hex(cl.node(clstart)),
2523 source=srctype, url=url)
2525 source=srctype, url=url)
2524
2526
2525 for n in added:
2527 for n in added:
2526 self.hook("incoming", node=hex(n), source=srctype,
2528 self.hook("incoming", node=hex(n), source=srctype,
2527 url=url)
2529 url=url)
2528 self._afterlock(runhooks)
2530 self._afterlock(runhooks)
2529
2531
2530 finally:
2532 finally:
2531 tr.release()
2533 tr.release()
2532 # never return 0 here:
2534 # never return 0 here:
2533 if dh < 0:
2535 if dh < 0:
2534 return dh - 1
2536 return dh - 1
2535 else:
2537 else:
2536 return dh + 1
2538 return dh + 1
2537
2539
2538 def stream_in(self, remote, requirements):
2540 def stream_in(self, remote, requirements):
2539 lock = self.lock()
2541 lock = self.lock()
2540 try:
2542 try:
2541 # Save remote branchmap. We will use it later
2543 # Save remote branchmap. We will use it later
2542 # to speed up branchcache creation
2544 # to speed up branchcache creation
2543 rbranchmap = None
2545 rbranchmap = None
2544 if remote.capable("branchmap"):
2546 if remote.capable("branchmap"):
2545 rbranchmap = remote.branchmap()
2547 rbranchmap = remote.branchmap()
2546
2548
2547 fp = remote.stream_out()
2549 fp = remote.stream_out()
2548 l = fp.readline()
2550 l = fp.readline()
2549 try:
2551 try:
2550 resp = int(l)
2552 resp = int(l)
2551 except ValueError:
2553 except ValueError:
2552 raise error.ResponseError(
2554 raise error.ResponseError(
2553 _('unexpected response from remote server:'), l)
2555 _('unexpected response from remote server:'), l)
2554 if resp == 1:
2556 if resp == 1:
2555 raise util.Abort(_('operation forbidden by server'))
2557 raise util.Abort(_('operation forbidden by server'))
2556 elif resp == 2:
2558 elif resp == 2:
2557 raise util.Abort(_('locking the remote repository failed'))
2559 raise util.Abort(_('locking the remote repository failed'))
2558 elif resp != 0:
2560 elif resp != 0:
2559 raise util.Abort(_('the server sent an unknown error code'))
2561 raise util.Abort(_('the server sent an unknown error code'))
2560 self.ui.status(_('streaming all changes\n'))
2562 self.ui.status(_('streaming all changes\n'))
2561 l = fp.readline()
2563 l = fp.readline()
2562 try:
2564 try:
2563 total_files, total_bytes = map(int, l.split(' ', 1))
2565 total_files, total_bytes = map(int, l.split(' ', 1))
2564 except (ValueError, TypeError):
2566 except (ValueError, TypeError):
2565 raise error.ResponseError(
2567 raise error.ResponseError(
2566 _('unexpected response from remote server:'), l)
2568 _('unexpected response from remote server:'), l)
2567 self.ui.status(_('%d files to transfer, %s of data\n') %
2569 self.ui.status(_('%d files to transfer, %s of data\n') %
2568 (total_files, util.bytecount(total_bytes)))
2570 (total_files, util.bytecount(total_bytes)))
2569 handled_bytes = 0
2571 handled_bytes = 0
2570 self.ui.progress(_('clone'), 0, total=total_bytes)
2572 self.ui.progress(_('clone'), 0, total=total_bytes)
2571 start = time.time()
2573 start = time.time()
2572 for i in xrange(total_files):
2574 for i in xrange(total_files):
2573 # XXX doesn't support '\n' or '\r' in filenames
2575 # XXX doesn't support '\n' or '\r' in filenames
2574 l = fp.readline()
2576 l = fp.readline()
2575 try:
2577 try:
2576 name, size = l.split('\0', 1)
2578 name, size = l.split('\0', 1)
2577 size = int(size)
2579 size = int(size)
2578 except (ValueError, TypeError):
2580 except (ValueError, TypeError):
2579 raise error.ResponseError(
2581 raise error.ResponseError(
2580 _('unexpected response from remote server:'), l)
2582 _('unexpected response from remote server:'), l)
2581 if self.ui.debugflag:
2583 if self.ui.debugflag:
2582 self.ui.debug('adding %s (%s)\n' %
2584 self.ui.debug('adding %s (%s)\n' %
2583 (name, util.bytecount(size)))
2585 (name, util.bytecount(size)))
2584 # for backwards compat, name was partially encoded
2586 # for backwards compat, name was partially encoded
2585 ofp = self.sopener(store.decodedir(name), 'w')
2587 ofp = self.sopener(store.decodedir(name), 'w')
2586 for chunk in util.filechunkiter(fp, limit=size):
2588 for chunk in util.filechunkiter(fp, limit=size):
2587 handled_bytes += len(chunk)
2589 handled_bytes += len(chunk)
2588 self.ui.progress(_('clone'), handled_bytes,
2590 self.ui.progress(_('clone'), handled_bytes,
2589 total=total_bytes)
2591 total=total_bytes)
2590 ofp.write(chunk)
2592 ofp.write(chunk)
2591 ofp.close()
2593 ofp.close()
2592 elapsed = time.time() - start
2594 elapsed = time.time() - start
2593 if elapsed <= 0:
2595 if elapsed <= 0:
2594 elapsed = 0.001
2596 elapsed = 0.001
2595 self.ui.progress(_('clone'), None)
2597 self.ui.progress(_('clone'), None)
2596 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2598 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2597 (util.bytecount(total_bytes), elapsed,
2599 (util.bytecount(total_bytes), elapsed,
2598 util.bytecount(total_bytes / elapsed)))
2600 util.bytecount(total_bytes / elapsed)))
2599
2601
2600 # new requirements = old non-format requirements +
2602 # new requirements = old non-format requirements +
2601 # new format-related
2603 # new format-related
2602 # requirements from the streamed-in repository
2604 # requirements from the streamed-in repository
2603 requirements.update(set(self.requirements) - self.supportedformats)
2605 requirements.update(set(self.requirements) - self.supportedformats)
2604 self._applyrequirements(requirements)
2606 self._applyrequirements(requirements)
2605 self._writerequirements()
2607 self._writerequirements()
2606
2608
2607 if rbranchmap:
2609 if rbranchmap:
2608 rbheads = []
2610 rbheads = []
2609 for bheads in rbranchmap.itervalues():
2611 for bheads in rbranchmap.itervalues():
2610 rbheads.extend(bheads)
2612 rbheads.extend(bheads)
2611
2613
2612 self.branchcache = rbranchmap
2614 self.branchcache = rbranchmap
2613 if rbheads:
2615 if rbheads:
2614 rtiprev = max((int(self.changelog.rev(node))
2616 rtiprev = max((int(self.changelog.rev(node))
2615 for node in rbheads))
2617 for node in rbheads))
2616 self._writebranchcache(self.branchcache,
2618 self._writebranchcache(self.branchcache,
2617 self[rtiprev].node(), rtiprev)
2619 self[rtiprev].node(), rtiprev)
2618 self.invalidate()
2620 self.invalidate()
2619 return len(self.heads()) + 1
2621 return len(self.heads()) + 1
2620 finally:
2622 finally:
2621 lock.release()
2623 lock.release()
2622
2624
2623 def clone(self, remote, heads=[], stream=False):
2625 def clone(self, remote, heads=[], stream=False):
2624 '''clone remote repository.
2626 '''clone remote repository.
2625
2627
2626 keyword arguments:
2628 keyword arguments:
2627 heads: list of revs to clone (forces use of pull)
2629 heads: list of revs to clone (forces use of pull)
2628 stream: use streaming clone if possible'''
2630 stream: use streaming clone if possible'''
2629
2631
2630 # now, all clients that can request uncompressed clones can
2632 # now, all clients that can request uncompressed clones can
2631 # read repo formats supported by all servers that can serve
2633 # read repo formats supported by all servers that can serve
2632 # them.
2634 # them.
2633
2635
2634 # if revlog format changes, client will have to check version
2636 # if revlog format changes, client will have to check version
2635 # and format flags on "stream" capability, and use
2637 # and format flags on "stream" capability, and use
2636 # uncompressed only if compatible.
2638 # uncompressed only if compatible.
2637
2639
2638 if not stream:
2640 if not stream:
2639 # if the server explicitly prefers to stream (for fast LANs)
2641 # if the server explicitly prefers to stream (for fast LANs)
2640 stream = remote.capable('stream-preferred')
2642 stream = remote.capable('stream-preferred')
2641
2643
2642 if stream and not heads:
2644 if stream and not heads:
2643 # 'stream' means remote revlog format is revlogv1 only
2645 # 'stream' means remote revlog format is revlogv1 only
2644 if remote.capable('stream'):
2646 if remote.capable('stream'):
2645 return self.stream_in(remote, set(('revlogv1',)))
2647 return self.stream_in(remote, set(('revlogv1',)))
2646 # otherwise, 'streamreqs' contains the remote revlog format
2648 # otherwise, 'streamreqs' contains the remote revlog format
2647 streamreqs = remote.capable('streamreqs')
2649 streamreqs = remote.capable('streamreqs')
2648 if streamreqs:
2650 if streamreqs:
2649 streamreqs = set(streamreqs.split(','))
2651 streamreqs = set(streamreqs.split(','))
2650 # if we support it, stream in and adjust our requirements
2652 # if we support it, stream in and adjust our requirements
2651 if not streamreqs - self.supportedformats:
2653 if not streamreqs - self.supportedformats:
2652 return self.stream_in(remote, streamreqs)
2654 return self.stream_in(remote, streamreqs)
2653 return self.pull(remote, heads)
2655 return self.pull(remote, heads)
2654
2656
2655 def pushkey(self, namespace, key, old, new):
2657 def pushkey(self, namespace, key, old, new):
2656 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2658 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2657 old=old, new=new)
2659 old=old, new=new)
2658 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2660 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2659 ret = pushkey.push(self, namespace, key, old, new)
2661 ret = pushkey.push(self, namespace, key, old, new)
2660 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2662 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2661 ret=ret)
2663 ret=ret)
2662 return ret
2664 return ret
2663
2665
2664 def listkeys(self, namespace):
2666 def listkeys(self, namespace):
2665 self.hook('prelistkeys', throw=True, namespace=namespace)
2667 self.hook('prelistkeys', throw=True, namespace=namespace)
2666 self.ui.debug('listing keys for "%s"\n' % namespace)
2668 self.ui.debug('listing keys for "%s"\n' % namespace)
2667 values = pushkey.list(self, namespace)
2669 values = pushkey.list(self, namespace)
2668 self.hook('listkeys', namespace=namespace, values=values)
2670 self.hook('listkeys', namespace=namespace, values=values)
2669 return values
2671 return values
2670
2672
2671 def debugwireargs(self, one, two, three=None, four=None, five=None):
2673 def debugwireargs(self, one, two, three=None, four=None, five=None):
2672 '''used to test argument passing over the wire'''
2674 '''used to test argument passing over the wire'''
2673 return "%s %s %s %s %s" % (one, two, three, four, five)
2675 return "%s %s %s %s %s" % (one, two, three, four, five)
2674
2676
2675 def savecommitmessage(self, text):
2677 def savecommitmessage(self, text):
2676 fp = self.opener('last-message.txt', 'wb')
2678 fp = self.opener('last-message.txt', 'wb')
2677 try:
2679 try:
2678 fp.write(text)
2680 fp.write(text)
2679 finally:
2681 finally:
2680 fp.close()
2682 fp.close()
2681 return self.pathto(fp.name[len(self.root) + 1:])
2683 return self.pathto(fp.name[len(self.root) + 1:])
2682
2684
2683 # used to avoid circular references so destructors work
2685 # used to avoid circular references so destructors work
2684 def aftertrans(files):
2686 def aftertrans(files):
2685 renamefiles = [tuple(t) for t in files]
2687 renamefiles = [tuple(t) for t in files]
2686 def a():
2688 def a():
2687 for src, dest in renamefiles:
2689 for src, dest in renamefiles:
2688 try:
2690 try:
2689 util.rename(src, dest)
2691 util.rename(src, dest)
2690 except OSError: # journal file does not yet exist
2692 except OSError: # journal file does not yet exist
2691 pass
2693 pass
2692 return a
2694 return a
2693
2695
2694 def undoname(fn):
2696 def undoname(fn):
2695 base, name = os.path.split(fn)
2697 base, name = os.path.split(fn)
2696 assert name.startswith('journal')
2698 assert name.startswith('journal')
2697 return os.path.join(base, name.replace('journal', 'undo', 1))
2699 return os.path.join(base, name.replace('journal', 'undo', 1))
2698
2700
2699 def instance(ui, path, create):
2701 def instance(ui, path, create):
2700 return localrepository(ui, util.urllocalpath(path), create)
2702 return localrepository(ui, util.urllocalpath(path), create)
2701
2703
2702 def islocal(path):
2704 def islocal(path):
2703 return True
2705 return True
@@ -1,402 +1,399 b''
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License version 2 or any later version.
10 of the GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phase' is an indicator that tells us how a changeset is
20 A 'changeset phase' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described
21 manipulated and communicated. The details of each phase is described
22 below, here we describe the properties they have in common.
22 below, here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not
24 Like bookmarks, phases are not stored in history and thus are not
25 permanent and leave no audit trail.
25 permanent and leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered,
27 First, no changeset can be in two phases at once. Phases are ordered,
28 so they can be considered from lowest to highest. The default, lowest
28 so they can be considered from lowest to highest. The default, lowest
29 phase is 'public' - this is the normal phase of existing changesets. A
29 phase is 'public' - this is the normal phase of existing changesets. A
30 child changeset can not be in a lower phase than its parents.
30 child changeset can not be in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 Local commits are draft by default.
39 Local commits are draft by default.
40
40
41 Phase Movement and Exchange
41 Phase Movement and Exchange
42 ===========================
42 ===========================
43
43
44 Phase data is exchanged by pushkey on pull and push. Some servers have
44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 a publish option set, we call such a server a "publishing server".
45 a publish option set, we call such a server a "publishing server".
46 Pushing a draft changeset to a publishing server changes the phase to
46 Pushing a draft changeset to a publishing server changes the phase to
47 public.
47 public.
48
48
49 A small list of fact/rules define the exchange of phase:
49 A small list of fact/rules define the exchange of phase:
50
50
51 * old client never changes server states
51 * old client never changes server states
52 * pull never changes server states
52 * pull never changes server states
53 * publish and old server changesets are seen as public by client
53 * publish and old server changesets are seen as public by client
54 * any secret changeset seen in another repository is lowered to at
54 * any secret changeset seen in another repository is lowered to at
55 least draft
55 least draft
56
56
57 Here is the final table summing up the 49 possible use cases of phase
57 Here is the final table summing up the 49 possible use cases of phase
58 exchange:
58 exchange:
59
59
60 server
60 server
61 old publish non-publish
61 old publish non-publish
62 N X N D P N D P
62 N X N D P N D P
63 old client
63 old client
64 pull
64 pull
65 N - X/X - X/D X/P - X/D X/P
65 N - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
67 push
67 push
68 X X/X X/X X/P X/P X/P X/D X/D X/P
68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 new client
69 new client
70 pull
70 pull
71 N - P/X - P/D P/P - D/D P/P
71 N - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
73 P - P/X - P/D P/P - P/D P/P
73 P - P/X - P/D P/P - P/D P/P
74 push
74 push
75 D P/X P/X P/P P/P P/P D/D D/D P/P
75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
77
77
78 Legend:
78 Legend:
79
79
80 A/B = final state on client / state on server
80 A/B = final state on client / state on server
81
81
82 * N = new/not present,
82 * N = new/not present,
83 * P = public,
83 * P = public,
84 * D = draft,
84 * D = draft,
85 * X = not tracked (i.e., the old client or server has no internal
85 * X = not tracked (i.e., the old client or server has no internal
86 way of recording the phase.)
86 way of recording the phase.)
87
87
88 passive = only pushes
88 passive = only pushes
89
89
90
90
91 A cell here can be read like this:
91 A cell here can be read like this:
92
92
93 "When a new client pushes a draft changeset (D) to a publishing
93 "When a new client pushes a draft changeset (D) to a publishing
94 server where it's not present (N), it's marked public on both
94 server where it's not present (N), it's marked public on both
95 sides (P/P)."
95 sides (P/P)."
96
96
97 Note: old client behave as a publishing server with draft only content
97 Note: old client behave as a publishing server with draft only content
98 - other people see it as public
98 - other people see it as public
99 - content is pushed as draft
99 - content is pushed as draft
100
100
101 """
101 """
102
102
103 import errno
103 import errno
104 from node import nullid, nullrev, bin, hex, short
104 from node import nullid, nullrev, bin, hex, short
105 from i18n import _
105 from i18n import _
106 import util, error
106 import util, error
107 import obsolete
108
107
109 allphases = public, draft, secret = range(3)
108 allphases = public, draft, secret = range(3)
110 trackedphases = allphases[1:]
109 trackedphases = allphases[1:]
111 phasenames = ['public', 'draft', 'secret']
110 phasenames = ['public', 'draft', 'secret']
112
111
113 def _filterunknown(ui, changelog, phaseroots):
112 def _filterunknown(ui, changelog, phaseroots):
114 """remove unknown nodes from the phase boundary
113 """remove unknown nodes from the phase boundary
115
114
116 Nothing is lost as unknown nodes only hold data for their descendants.
115 Nothing is lost as unknown nodes only hold data for their descendants.
117 """
116 """
118 updated = False
117 updated = False
119 nodemap = changelog.nodemap # to filter unknown nodes
118 nodemap = changelog.nodemap # to filter unknown nodes
120 for phase, nodes in enumerate(phaseroots):
119 for phase, nodes in enumerate(phaseroots):
121 missing = [node for node in nodes if node not in nodemap]
120 missing = [node for node in nodes if node not in nodemap]
122 if missing:
121 if missing:
123 for mnode in missing:
122 for mnode in missing:
124 ui.debug(
123 ui.debug(
125 'removing unknown node %s from %i-phase boundary\n'
124 'removing unknown node %s from %i-phase boundary\n'
126 % (short(mnode), phase))
125 % (short(mnode), phase))
127 nodes.symmetric_difference_update(missing)
126 nodes.symmetric_difference_update(missing)
128 updated = True
127 updated = True
129 return updated
128 return updated
130
129
131 def _readroots(repo, phasedefaults=None):
130 def _readroots(repo, phasedefaults=None):
132 """Read phase roots from disk
131 """Read phase roots from disk
133
132
134 phasedefaults is a list of fn(repo, roots) callable, which are
133 phasedefaults is a list of fn(repo, roots) callable, which are
135 executed if the phase roots file does not exist. When phases are
134 executed if the phase roots file does not exist. When phases are
136 being initialized on an existing repository, this could be used to
135 being initialized on an existing repository, this could be used to
137 set selected changesets phase to something else than public.
136 set selected changesets phase to something else than public.
138
137
139 Return (roots, dirty) where dirty is true if roots differ from
138 Return (roots, dirty) where dirty is true if roots differ from
140 what is being stored.
139 what is being stored.
141 """
140 """
142 repo = repo.unfiltered()
141 repo = repo.unfiltered()
143 dirty = False
142 dirty = False
144 roots = [set() for i in allphases]
143 roots = [set() for i in allphases]
145 try:
144 try:
146 f = repo.sopener('phaseroots')
145 f = repo.sopener('phaseroots')
147 try:
146 try:
148 for line in f:
147 for line in f:
149 phase, nh = line.split()
148 phase, nh = line.split()
150 roots[int(phase)].add(bin(nh))
149 roots[int(phase)].add(bin(nh))
151 finally:
150 finally:
152 f.close()
151 f.close()
153 except IOError, inst:
152 except IOError, inst:
154 if inst.errno != errno.ENOENT:
153 if inst.errno != errno.ENOENT:
155 raise
154 raise
156 if phasedefaults:
155 if phasedefaults:
157 for f in phasedefaults:
156 for f in phasedefaults:
158 roots = f(repo, roots)
157 roots = f(repo, roots)
159 dirty = True
158 dirty = True
160 if _filterunknown(repo.ui, repo.changelog, roots):
159 if _filterunknown(repo.ui, repo.changelog, roots):
161 dirty = True
160 dirty = True
162 return roots, dirty
161 return roots, dirty
163
162
164 class phasecache(object):
163 class phasecache(object):
165 def __init__(self, repo, phasedefaults, _load=True):
164 def __init__(self, repo, phasedefaults, _load=True):
166 if _load:
165 if _load:
167 # Cheap trick to allow shallow-copy without copy module
166 # Cheap trick to allow shallow-copy without copy module
168 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
167 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
169 self.opener = repo.sopener
168 self.opener = repo.sopener
170 self._phaserevs = None
169 self._phaserevs = None
171
170
172 def copy(self):
171 def copy(self):
173 # Shallow copy meant to ensure isolation in
172 # Shallow copy meant to ensure isolation in
174 # advance/retractboundary(), nothing more.
173 # advance/retractboundary(), nothing more.
175 ph = phasecache(None, None, _load=False)
174 ph = phasecache(None, None, _load=False)
176 ph.phaseroots = self.phaseroots[:]
175 ph.phaseroots = self.phaseroots[:]
177 ph.dirty = self.dirty
176 ph.dirty = self.dirty
178 ph.opener = self.opener
177 ph.opener = self.opener
179 ph._phaserevs = self._phaserevs
178 ph._phaserevs = self._phaserevs
180 return ph
179 return ph
181
180
182 def replace(self, phcache):
181 def replace(self, phcache):
183 for a in 'phaseroots dirty opener _phaserevs'.split():
182 for a in 'phaseroots dirty opener _phaserevs'.split():
184 setattr(self, a, getattr(phcache, a))
183 setattr(self, a, getattr(phcache, a))
185
184
186 def getphaserevs(self, repo, rebuild=False):
185 def getphaserevs(self, repo, rebuild=False):
187 if rebuild or self._phaserevs is None:
186 if rebuild or self._phaserevs is None:
188 repo = repo.unfiltered()
187 repo = repo.unfiltered()
189 revs = [public] * len(repo.changelog)
188 revs = [public] * len(repo.changelog)
190 for phase in trackedphases:
189 for phase in trackedphases:
191 roots = map(repo.changelog.rev, self.phaseroots[phase])
190 roots = map(repo.changelog.rev, self.phaseroots[phase])
192 if roots:
191 if roots:
193 for rev in roots:
192 for rev in roots:
194 revs[rev] = phase
193 revs[rev] = phase
195 for rev in repo.changelog.descendants(roots):
194 for rev in repo.changelog.descendants(roots):
196 revs[rev] = phase
195 revs[rev] = phase
197 self._phaserevs = revs
196 self._phaserevs = revs
198 return self._phaserevs
197 return self._phaserevs
199
198
200 def phase(self, repo, rev):
199 def phase(self, repo, rev):
201 # We need a repo argument here to be able to build _phaserevs
200 # We need a repo argument here to be able to build _phaserevs
202 # if necessary. The repository instance is not stored in
201 # if necessary. The repository instance is not stored in
203 # phasecache to avoid reference cycles. The changelog instance
202 # phasecache to avoid reference cycles. The changelog instance
204 # is not stored because it is a filecache() property and can
203 # is not stored because it is a filecache() property and can
205 # be replaced without us being notified.
204 # be replaced without us being notified.
206 if rev == nullrev:
205 if rev == nullrev:
207 return public
206 return public
208 if self._phaserevs is None or rev >= len(self._phaserevs):
207 if self._phaserevs is None or rev >= len(self._phaserevs):
209 self._phaserevs = self.getphaserevs(repo, rebuild=True)
208 self._phaserevs = self.getphaserevs(repo, rebuild=True)
210 return self._phaserevs[rev]
209 return self._phaserevs[rev]
211
210
212 def write(self):
211 def write(self):
213 if not self.dirty:
212 if not self.dirty:
214 return
213 return
215 f = self.opener('phaseroots', 'w', atomictemp=True)
214 f = self.opener('phaseroots', 'w', atomictemp=True)
216 try:
215 try:
217 for phase, roots in enumerate(self.phaseroots):
216 for phase, roots in enumerate(self.phaseroots):
218 for h in roots:
217 for h in roots:
219 f.write('%i %s\n' % (phase, hex(h)))
218 f.write('%i %s\n' % (phase, hex(h)))
220 finally:
219 finally:
221 f.close()
220 f.close()
222 self.dirty = False
221 self.dirty = False
223
222
224 def _updateroots(self, phase, newroots):
223 def _updateroots(self, phase, newroots):
225 self.phaseroots[phase] = newroots
224 self.phaseroots[phase] = newroots
226 self._phaserevs = None
225 self._phaserevs = None
227 self.dirty = True
226 self.dirty = True
228
227
229 def advanceboundary(self, repo, targetphase, nodes):
228 def advanceboundary(self, repo, targetphase, nodes):
230 # Be careful to preserve shallow-copied values: do not update
229 # Be careful to preserve shallow-copied values: do not update
231 # phaseroots values, replace them.
230 # phaseroots values, replace them.
232
231
233 repo = repo.unfiltered()
232 repo = repo.unfiltered()
234 delroots = [] # set of root deleted by this path
233 delroots = [] # set of root deleted by this path
235 for phase in xrange(targetphase + 1, len(allphases)):
234 for phase in xrange(targetphase + 1, len(allphases)):
236 # filter nodes that are not in a compatible phase already
235 # filter nodes that are not in a compatible phase already
237 nodes = [n for n in nodes
236 nodes = [n for n in nodes
238 if self.phase(repo, repo[n].rev()) >= phase]
237 if self.phase(repo, repo[n].rev()) >= phase]
239 if not nodes:
238 if not nodes:
240 break # no roots to move anymore
239 break # no roots to move anymore
241 olds = self.phaseroots[phase]
240 olds = self.phaseroots[phase]
242 roots = set(ctx.node() for ctx in repo.set(
241 roots = set(ctx.node() for ctx in repo.set(
243 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
242 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
244 if olds != roots:
243 if olds != roots:
245 self._updateroots(phase, roots)
244 self._updateroots(phase, roots)
246 # some roots may need to be declared for lower phases
245 # some roots may need to be declared for lower phases
247 delroots.extend(olds - roots)
246 delroots.extend(olds - roots)
248 # declare deleted root in the target phase
247 # declare deleted root in the target phase
249 if targetphase != 0:
248 if targetphase != 0:
250 self.retractboundary(repo, targetphase, delroots)
249 self.retractboundary(repo, targetphase, delroots)
251 obsolete.clearobscaches(repo)
250 repo.invalidatevolatilesets()
252 repo.filteredrevcache.clear()
253
251
254 def retractboundary(self, repo, targetphase, nodes):
252 def retractboundary(self, repo, targetphase, nodes):
255 # Be careful to preserve shallow-copied values: do not update
253 # Be careful to preserve shallow-copied values: do not update
256 # phaseroots values, replace them.
254 # phaseroots values, replace them.
257
255
258 repo = repo.unfiltered()
256 repo = repo.unfiltered()
259 currentroots = self.phaseroots[targetphase]
257 currentroots = self.phaseroots[targetphase]
260 newroots = [n for n in nodes
258 newroots = [n for n in nodes
261 if self.phase(repo, repo[n].rev()) < targetphase]
259 if self.phase(repo, repo[n].rev()) < targetphase]
262 if newroots:
260 if newroots:
263 if nullid in newroots:
261 if nullid in newroots:
264 raise util.Abort(_('cannot change null revision phase'))
262 raise util.Abort(_('cannot change null revision phase'))
265 currentroots = currentroots.copy()
263 currentroots = currentroots.copy()
266 currentroots.update(newroots)
264 currentroots.update(newroots)
267 ctxs = repo.set('roots(%ln::)', currentroots)
265 ctxs = repo.set('roots(%ln::)', currentroots)
268 currentroots.intersection_update(ctx.node() for ctx in ctxs)
266 currentroots.intersection_update(ctx.node() for ctx in ctxs)
269 self._updateroots(targetphase, currentroots)
267 self._updateroots(targetphase, currentroots)
270 obsolete.clearobscaches(repo)
268 repo.invalidatevolatilesets()
271 repo.filteredrevcache.clear()
272
269
273 def advanceboundary(repo, targetphase, nodes):
270 def advanceboundary(repo, targetphase, nodes):
274 """Add nodes to a phase changing other nodes phases if necessary.
271 """Add nodes to a phase changing other nodes phases if necessary.
275
272
276 This function move boundary *forward* this means that all nodes
273 This function move boundary *forward* this means that all nodes
277 are set in the target phase or kept in a *lower* phase.
274 are set in the target phase or kept in a *lower* phase.
278
275
279 Simplify boundary to contains phase roots only."""
276 Simplify boundary to contains phase roots only."""
280 phcache = repo._phasecache.copy()
277 phcache = repo._phasecache.copy()
281 phcache.advanceboundary(repo, targetphase, nodes)
278 phcache.advanceboundary(repo, targetphase, nodes)
282 repo._phasecache.replace(phcache)
279 repo._phasecache.replace(phcache)
283
280
284 def retractboundary(repo, targetphase, nodes):
281 def retractboundary(repo, targetphase, nodes):
285 """Set nodes back to a phase changing other nodes phases if
282 """Set nodes back to a phase changing other nodes phases if
286 necessary.
283 necessary.
287
284
288 This function move boundary *backward* this means that all nodes
285 This function move boundary *backward* this means that all nodes
289 are set in the target phase or kept in a *higher* phase.
286 are set in the target phase or kept in a *higher* phase.
290
287
291 Simplify boundary to contains phase roots only."""
288 Simplify boundary to contains phase roots only."""
292 phcache = repo._phasecache.copy()
289 phcache = repo._phasecache.copy()
293 phcache.retractboundary(repo, targetphase, nodes)
290 phcache.retractboundary(repo, targetphase, nodes)
294 repo._phasecache.replace(phcache)
291 repo._phasecache.replace(phcache)
295
292
296 def listphases(repo):
293 def listphases(repo):
297 """List phases root for serialization over pushkey"""
294 """List phases root for serialization over pushkey"""
298 keys = {}
295 keys = {}
299 value = '%i' % draft
296 value = '%i' % draft
300 for root in repo._phasecache.phaseroots[draft]:
297 for root in repo._phasecache.phaseroots[draft]:
301 keys[hex(root)] = value
298 keys[hex(root)] = value
302
299
303 if repo.ui.configbool('phases', 'publish', True):
300 if repo.ui.configbool('phases', 'publish', True):
304 # Add an extra data to let remote know we are a publishing
301 # Add an extra data to let remote know we are a publishing
305 # repo. Publishing repo can't just pretend they are old repo.
302 # repo. Publishing repo can't just pretend they are old repo.
306 # When pushing to a publishing repo, the client still need to
303 # When pushing to a publishing repo, the client still need to
307 # push phase boundary
304 # push phase boundary
308 #
305 #
309 # Push do not only push changeset. It also push phase data.
306 # Push do not only push changeset. It also push phase data.
310 # New phase data may apply to common changeset which won't be
307 # New phase data may apply to common changeset which won't be
311 # push (as they are common). Here is a very simple example:
308 # push (as they are common). Here is a very simple example:
312 #
309 #
313 # 1) repo A push changeset X as draft to repo B
310 # 1) repo A push changeset X as draft to repo B
314 # 2) repo B make changeset X public
311 # 2) repo B make changeset X public
315 # 3) repo B push to repo A. X is not pushed but the data that
312 # 3) repo B push to repo A. X is not pushed but the data that
316 # X as now public should
313 # X as now public should
317 #
314 #
318 # The server can't handle it on it's own as it has no idea of
315 # The server can't handle it on it's own as it has no idea of
319 # client phase data.
316 # client phase data.
320 keys['publishing'] = 'True'
317 keys['publishing'] = 'True'
321 return keys
318 return keys
322
319
323 def pushphase(repo, nhex, oldphasestr, newphasestr):
320 def pushphase(repo, nhex, oldphasestr, newphasestr):
324 """List phases root for serialization over pushkey"""
321 """List phases root for serialization over pushkey"""
325 repo = repo.unfiltered()
322 repo = repo.unfiltered()
326 lock = repo.lock()
323 lock = repo.lock()
327 try:
324 try:
328 currentphase = repo[nhex].phase()
325 currentphase = repo[nhex].phase()
329 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
326 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
330 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
327 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
331 if currentphase == oldphase and newphase < oldphase:
328 if currentphase == oldphase and newphase < oldphase:
332 advanceboundary(repo, newphase, [bin(nhex)])
329 advanceboundary(repo, newphase, [bin(nhex)])
333 return 1
330 return 1
334 elif currentphase == newphase:
331 elif currentphase == newphase:
335 # raced, but got correct result
332 # raced, but got correct result
336 return 1
333 return 1
337 else:
334 else:
338 return 0
335 return 0
339 finally:
336 finally:
340 lock.release()
337 lock.release()
341
338
342 def analyzeremotephases(repo, subset, roots):
339 def analyzeremotephases(repo, subset, roots):
343 """Compute phases heads and root in a subset of node from root dict
340 """Compute phases heads and root in a subset of node from root dict
344
341
345 * subset is heads of the subset
342 * subset is heads of the subset
346 * roots is {<nodeid> => phase} mapping. key and value are string.
343 * roots is {<nodeid> => phase} mapping. key and value are string.
347
344
348 Accept unknown element input
345 Accept unknown element input
349 """
346 """
350 repo = repo.unfiltered()
347 repo = repo.unfiltered()
351 # build list from dictionary
348 # build list from dictionary
352 draftroots = []
349 draftroots = []
353 nodemap = repo.changelog.nodemap # to filter unknown nodes
350 nodemap = repo.changelog.nodemap # to filter unknown nodes
354 for nhex, phase in roots.iteritems():
351 for nhex, phase in roots.iteritems():
355 if nhex == 'publishing': # ignore data related to publish option
352 if nhex == 'publishing': # ignore data related to publish option
356 continue
353 continue
357 node = bin(nhex)
354 node = bin(nhex)
358 phase = int(phase)
355 phase = int(phase)
359 if phase == 0:
356 if phase == 0:
360 if node != nullid:
357 if node != nullid:
361 repo.ui.warn(_('ignoring inconsistent public root'
358 repo.ui.warn(_('ignoring inconsistent public root'
362 ' from remote: %s\n') % nhex)
359 ' from remote: %s\n') % nhex)
363 elif phase == 1:
360 elif phase == 1:
364 if node in nodemap:
361 if node in nodemap:
365 draftroots.append(node)
362 draftroots.append(node)
366 else:
363 else:
367 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
364 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
368 % (phase, nhex))
365 % (phase, nhex))
369 # compute heads
366 # compute heads
370 publicheads = newheads(repo, subset, draftroots)
367 publicheads = newheads(repo, subset, draftroots)
371 return publicheads, draftroots
368 return publicheads, draftroots
372
369
373 def newheads(repo, heads, roots):
370 def newheads(repo, heads, roots):
374 """compute new head of a subset minus another
371 """compute new head of a subset minus another
375
372
376 * `heads`: define the first subset
373 * `heads`: define the first subset
377 * `roots`: define the second we subtract from the first"""
374 * `roots`: define the second we subtract from the first"""
378 repo = repo.unfiltered()
375 repo = repo.unfiltered()
379 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
376 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
380 heads, roots, roots, heads)
377 heads, roots, roots, heads)
381 return [c.node() for c in revset]
378 return [c.node() for c in revset]
382
379
383
380
384 def newcommitphase(ui):
381 def newcommitphase(ui):
385 """helper to get the target phase of new commit
382 """helper to get the target phase of new commit
386
383
387 Handle all possible values for the phases.new-commit options.
384 Handle all possible values for the phases.new-commit options.
388
385
389 """
386 """
390 v = ui.config('phases', 'new-commit', draft)
387 v = ui.config('phases', 'new-commit', draft)
391 try:
388 try:
392 return phasenames.index(v)
389 return phasenames.index(v)
393 except ValueError:
390 except ValueError:
394 try:
391 try:
395 return int(v)
392 return int(v)
396 except ValueError:
393 except ValueError:
397 msg = _("phases.new-commit: not a valid phase name ('%s')")
394 msg = _("phases.new-commit: not a valid phase name ('%s')")
398 raise error.ConfigError(msg % v)
395 raise error.ConfigError(msg % v)
399
396
400 def hassecret(repo):
397 def hassecret(repo):
401 """utility function that check if a repo have any secret changeset."""
398 """utility function that check if a repo have any secret changeset."""
402 return bool(repo._phasecache.phaseroots[2])
399 return bool(repo._phasecache.phaseroots[2])
General Comments 0
You need to be logged in to leave comments. Login now