##// END OF EJS Templates
clfilter: ensure unfiltered repo have a filtername attribute too...
Pierre-Yves David -
r18186:d336f53c default
parent child Browse files
Show More
@@ -1,2589 +1,2590 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 import branchmap
18 import branchmap
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class repofilecache(filecache):
22 class repofilecache(filecache):
23 """All filecache usage on repo are done for logic that should be unfiltered
23 """All filecache usage on repo are done for logic that should be unfiltered
24 """
24 """
25
25
26 def __get__(self, repo, type=None):
26 def __get__(self, repo, type=None):
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 def __set__(self, repo, value):
28 def __set__(self, repo, value):
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 def __delete__(self, repo):
30 def __delete__(self, repo):
31 return super(repofilecache, self).__delete__(repo.unfiltered())
31 return super(repofilecache, self).__delete__(repo.unfiltered())
32
32
33 class storecache(repofilecache):
33 class storecache(repofilecache):
34 """filecache for files in the store"""
34 """filecache for files in the store"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj.sjoin(fname)
36 return obj.sjoin(fname)
37
37
38 class unfilteredpropertycache(propertycache):
38 class unfilteredpropertycache(propertycache):
39 """propertycache that apply to unfiltered repo only"""
39 """propertycache that apply to unfiltered repo only"""
40
40
41 def __get__(self, repo, type=None):
41 def __get__(self, repo, type=None):
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43
43
44 class filteredpropertycache(propertycache):
44 class filteredpropertycache(propertycache):
45 """propertycache that must take filtering in account"""
45 """propertycache that must take filtering in account"""
46
46
47 def cachevalue(self, obj, value):
47 def cachevalue(self, obj, value):
48 object.__setattr__(obj, self.name, value)
48 object.__setattr__(obj, self.name, value)
49
49
50
50
51 def hasunfilteredcache(repo, name):
51 def hasunfilteredcache(repo, name):
52 """check if an repo and a unfilteredproperty cached value for <name>"""
52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 return name in vars(repo.unfiltered())
53 return name in vars(repo.unfiltered())
54
54
55 def unfilteredmethod(orig):
55 def unfilteredmethod(orig):
56 """decorate method that always need to be run on unfiltered version"""
56 """decorate method that always need to be run on unfiltered version"""
57 def wrapper(repo, *args, **kwargs):
57 def wrapper(repo, *args, **kwargs):
58 return orig(repo.unfiltered(), *args, **kwargs)
58 return orig(repo.unfiltered(), *args, **kwargs)
59 return wrapper
59 return wrapper
60
60
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63
63
64 class localpeer(peer.peerrepository):
64 class localpeer(peer.peerrepository):
65 '''peer for a local repo; reflects only the most recent API'''
65 '''peer for a local repo; reflects only the most recent API'''
66
66
67 def __init__(self, repo, caps=MODERNCAPS):
67 def __init__(self, repo, caps=MODERNCAPS):
68 peer.peerrepository.__init__(self)
68 peer.peerrepository.__init__(self)
69 self._repo = repo
69 self._repo = repo
70 self.ui = repo.ui
70 self.ui = repo.ui
71 self._caps = repo._restrictcapabilities(caps)
71 self._caps = repo._restrictcapabilities(caps)
72 self.requirements = repo.requirements
72 self.requirements = repo.requirements
73 self.supportedformats = repo.supportedformats
73 self.supportedformats = repo.supportedformats
74
74
75 def close(self):
75 def close(self):
76 self._repo.close()
76 self._repo.close()
77
77
78 def _capabilities(self):
78 def _capabilities(self):
79 return self._caps
79 return self._caps
80
80
81 def local(self):
81 def local(self):
82 return self._repo
82 return self._repo
83
83
84 def canpush(self):
84 def canpush(self):
85 return True
85 return True
86
86
87 def url(self):
87 def url(self):
88 return self._repo.url()
88 return self._repo.url()
89
89
90 def lookup(self, key):
90 def lookup(self, key):
91 return self._repo.lookup(key)
91 return self._repo.lookup(key)
92
92
93 def branchmap(self):
93 def branchmap(self):
94 return discovery.visiblebranchmap(self._repo)
94 return discovery.visiblebranchmap(self._repo)
95
95
96 def heads(self):
96 def heads(self):
97 return discovery.visibleheads(self._repo)
97 return discovery.visibleheads(self._repo)
98
98
99 def known(self, nodes):
99 def known(self, nodes):
100 return self._repo.known(nodes)
100 return self._repo.known(nodes)
101
101
102 def getbundle(self, source, heads=None, common=None):
102 def getbundle(self, source, heads=None, common=None):
103 return self._repo.getbundle(source, heads=heads, common=common)
103 return self._repo.getbundle(source, heads=heads, common=common)
104
104
105 # TODO We might want to move the next two calls into legacypeer and add
105 # TODO We might want to move the next two calls into legacypeer and add
106 # unbundle instead.
106 # unbundle instead.
107
107
108 def lock(self):
108 def lock(self):
109 return self._repo.lock()
109 return self._repo.lock()
110
110
111 def addchangegroup(self, cg, source, url):
111 def addchangegroup(self, cg, source, url):
112 return self._repo.addchangegroup(cg, source, url)
112 return self._repo.addchangegroup(cg, source, url)
113
113
114 def pushkey(self, namespace, key, old, new):
114 def pushkey(self, namespace, key, old, new):
115 return self._repo.pushkey(namespace, key, old, new)
115 return self._repo.pushkey(namespace, key, old, new)
116
116
117 def listkeys(self, namespace):
117 def listkeys(self, namespace):
118 return self._repo.listkeys(namespace)
118 return self._repo.listkeys(namespace)
119
119
120 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 '''used to test argument passing over the wire'''
121 '''used to test argument passing over the wire'''
122 return "%s %s %s %s %s" % (one, two, three, four, five)
122 return "%s %s %s %s %s" % (one, two, three, four, five)
123
123
124 class locallegacypeer(localpeer):
124 class locallegacypeer(localpeer):
125 '''peer extension which implements legacy methods too; used for tests with
125 '''peer extension which implements legacy methods too; used for tests with
126 restricted capabilities'''
126 restricted capabilities'''
127
127
128 def __init__(self, repo):
128 def __init__(self, repo):
129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130
130
131 def branches(self, nodes):
131 def branches(self, nodes):
132 return self._repo.branches(nodes)
132 return self._repo.branches(nodes)
133
133
134 def between(self, pairs):
134 def between(self, pairs):
135 return self._repo.between(pairs)
135 return self._repo.between(pairs)
136
136
137 def changegroup(self, basenodes, source):
137 def changegroup(self, basenodes, source):
138 return self._repo.changegroup(basenodes, source)
138 return self._repo.changegroup(basenodes, source)
139
139
140 def changegroupsubset(self, bases, heads, source):
140 def changegroupsubset(self, bases, heads, source):
141 return self._repo.changegroupsubset(bases, heads, source)
141 return self._repo.changegroupsubset(bases, heads, source)
142
142
143 class localrepository(object):
143 class localrepository(object):
144
144
145 supportedformats = set(('revlogv1', 'generaldelta'))
145 supportedformats = set(('revlogv1', 'generaldelta'))
146 supported = supportedformats | set(('store', 'fncache', 'shared',
146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 'dotencode'))
147 'dotencode'))
148 openerreqs = set(('revlogv1', 'generaldelta'))
148 openerreqs = set(('revlogv1', 'generaldelta'))
149 requirements = ['revlogv1']
149 requirements = ['revlogv1']
150 filtername = None
150
151
151 def _baserequirements(self, create):
152 def _baserequirements(self, create):
152 return self.requirements[:]
153 return self.requirements[:]
153
154
154 def __init__(self, baseui, path=None, create=False):
155 def __init__(self, baseui, path=None, create=False):
155 self.wvfs = scmutil.vfs(path, expand=True)
156 self.wvfs = scmutil.vfs(path, expand=True)
156 self.wopener = self.wvfs
157 self.wopener = self.wvfs
157 self.root = self.wvfs.base
158 self.root = self.wvfs.base
158 self.path = self.wvfs.join(".hg")
159 self.path = self.wvfs.join(".hg")
159 self.origroot = path
160 self.origroot = path
160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
161 self.auditor = scmutil.pathauditor(self.root, self._checknested)
161 self.vfs = scmutil.vfs(self.path)
162 self.vfs = scmutil.vfs(self.path)
162 self.opener = self.vfs
163 self.opener = self.vfs
163 self.baseui = baseui
164 self.baseui = baseui
164 self.ui = baseui.copy()
165 self.ui = baseui.copy()
165 # A list of callback to shape the phase if no data were found.
166 # A list of callback to shape the phase if no data were found.
166 # Callback are in the form: func(repo, roots) --> processed root.
167 # Callback are in the form: func(repo, roots) --> processed root.
167 # This list it to be filled by extension during repo setup
168 # This list it to be filled by extension during repo setup
168 self._phasedefaults = []
169 self._phasedefaults = []
169 try:
170 try:
170 self.ui.readconfig(self.join("hgrc"), self.root)
171 self.ui.readconfig(self.join("hgrc"), self.root)
171 extensions.loadall(self.ui)
172 extensions.loadall(self.ui)
172 except IOError:
173 except IOError:
173 pass
174 pass
174
175
175 if not self.vfs.isdir():
176 if not self.vfs.isdir():
176 if create:
177 if create:
177 if not self.wvfs.exists():
178 if not self.wvfs.exists():
178 self.wvfs.makedirs()
179 self.wvfs.makedirs()
179 self.vfs.makedir(notindexed=True)
180 self.vfs.makedir(notindexed=True)
180 requirements = self._baserequirements(create)
181 requirements = self._baserequirements(create)
181 if self.ui.configbool('format', 'usestore', True):
182 if self.ui.configbool('format', 'usestore', True):
182 self.vfs.mkdir("store")
183 self.vfs.mkdir("store")
183 requirements.append("store")
184 requirements.append("store")
184 if self.ui.configbool('format', 'usefncache', True):
185 if self.ui.configbool('format', 'usefncache', True):
185 requirements.append("fncache")
186 requirements.append("fncache")
186 if self.ui.configbool('format', 'dotencode', True):
187 if self.ui.configbool('format', 'dotencode', True):
187 requirements.append('dotencode')
188 requirements.append('dotencode')
188 # create an invalid changelog
189 # create an invalid changelog
189 self.vfs.append(
190 self.vfs.append(
190 "00changelog.i",
191 "00changelog.i",
191 '\0\0\0\2' # represents revlogv2
192 '\0\0\0\2' # represents revlogv2
192 ' dummy changelog to prevent using the old repo layout'
193 ' dummy changelog to prevent using the old repo layout'
193 )
194 )
194 if self.ui.configbool('format', 'generaldelta', False):
195 if self.ui.configbool('format', 'generaldelta', False):
195 requirements.append("generaldelta")
196 requirements.append("generaldelta")
196 requirements = set(requirements)
197 requirements = set(requirements)
197 else:
198 else:
198 raise error.RepoError(_("repository %s not found") % path)
199 raise error.RepoError(_("repository %s not found") % path)
199 elif create:
200 elif create:
200 raise error.RepoError(_("repository %s already exists") % path)
201 raise error.RepoError(_("repository %s already exists") % path)
201 else:
202 else:
202 try:
203 try:
203 requirements = scmutil.readrequires(self.vfs, self.supported)
204 requirements = scmutil.readrequires(self.vfs, self.supported)
204 except IOError, inst:
205 except IOError, inst:
205 if inst.errno != errno.ENOENT:
206 if inst.errno != errno.ENOENT:
206 raise
207 raise
207 requirements = set()
208 requirements = set()
208
209
209 self.sharedpath = self.path
210 self.sharedpath = self.path
210 try:
211 try:
211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
212 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
212 if not os.path.exists(s):
213 if not os.path.exists(s):
213 raise error.RepoError(
214 raise error.RepoError(
214 _('.hg/sharedpath points to nonexistent directory %s') % s)
215 _('.hg/sharedpath points to nonexistent directory %s') % s)
215 self.sharedpath = s
216 self.sharedpath = s
216 except IOError, inst:
217 except IOError, inst:
217 if inst.errno != errno.ENOENT:
218 if inst.errno != errno.ENOENT:
218 raise
219 raise
219
220
220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
221 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
221 self.spath = self.store.path
222 self.spath = self.store.path
222 self.svfs = self.store.vfs
223 self.svfs = self.store.vfs
223 self.sopener = self.svfs
224 self.sopener = self.svfs
224 self.sjoin = self.store.join
225 self.sjoin = self.store.join
225 self.vfs.createmode = self.store.createmode
226 self.vfs.createmode = self.store.createmode
226 self._applyrequirements(requirements)
227 self._applyrequirements(requirements)
227 if create:
228 if create:
228 self._writerequirements()
229 self._writerequirements()
229
230
230
231
231 self._branchcache = None
232 self._branchcache = None
232 self.filterpats = {}
233 self.filterpats = {}
233 self._datafilters = {}
234 self._datafilters = {}
234 self._transref = self._lockref = self._wlockref = None
235 self._transref = self._lockref = self._wlockref = None
235
236
236 # A cache for various files under .hg/ that tracks file changes,
237 # A cache for various files under .hg/ that tracks file changes,
237 # (used by the filecache decorator)
238 # (used by the filecache decorator)
238 #
239 #
239 # Maps a property name to its util.filecacheentry
240 # Maps a property name to its util.filecacheentry
240 self._filecache = {}
241 self._filecache = {}
241
242
242 # hold sets of revision to be filtered
243 # hold sets of revision to be filtered
243 # should be cleared when something might have changed the filter value:
244 # should be cleared when something might have changed the filter value:
244 # - new changesets,
245 # - new changesets,
245 # - phase change,
246 # - phase change,
246 # - new obsolescence marker,
247 # - new obsolescence marker,
247 # - working directory parent change,
248 # - working directory parent change,
248 # - bookmark changes
249 # - bookmark changes
249 self.filteredrevcache = {}
250 self.filteredrevcache = {}
250
251
251 def close(self):
252 def close(self):
252 pass
253 pass
253
254
254 def _restrictcapabilities(self, caps):
255 def _restrictcapabilities(self, caps):
255 return caps
256 return caps
256
257
257 def _applyrequirements(self, requirements):
258 def _applyrequirements(self, requirements):
258 self.requirements = requirements
259 self.requirements = requirements
259 self.sopener.options = dict((r, 1) for r in requirements
260 self.sopener.options = dict((r, 1) for r in requirements
260 if r in self.openerreqs)
261 if r in self.openerreqs)
261
262
262 def _writerequirements(self):
263 def _writerequirements(self):
263 reqfile = self.opener("requires", "w")
264 reqfile = self.opener("requires", "w")
264 for r in self.requirements:
265 for r in self.requirements:
265 reqfile.write("%s\n" % r)
266 reqfile.write("%s\n" % r)
266 reqfile.close()
267 reqfile.close()
267
268
268 def _checknested(self, path):
269 def _checknested(self, path):
269 """Determine if path is a legal nested repository."""
270 """Determine if path is a legal nested repository."""
270 if not path.startswith(self.root):
271 if not path.startswith(self.root):
271 return False
272 return False
272 subpath = path[len(self.root) + 1:]
273 subpath = path[len(self.root) + 1:]
273 normsubpath = util.pconvert(subpath)
274 normsubpath = util.pconvert(subpath)
274
275
275 # XXX: Checking against the current working copy is wrong in
276 # XXX: Checking against the current working copy is wrong in
276 # the sense that it can reject things like
277 # the sense that it can reject things like
277 #
278 #
278 # $ hg cat -r 10 sub/x.txt
279 # $ hg cat -r 10 sub/x.txt
279 #
280 #
280 # if sub/ is no longer a subrepository in the working copy
281 # if sub/ is no longer a subrepository in the working copy
281 # parent revision.
282 # parent revision.
282 #
283 #
283 # However, it can of course also allow things that would have
284 # However, it can of course also allow things that would have
284 # been rejected before, such as the above cat command if sub/
285 # been rejected before, such as the above cat command if sub/
285 # is a subrepository now, but was a normal directory before.
286 # is a subrepository now, but was a normal directory before.
286 # The old path auditor would have rejected by mistake since it
287 # The old path auditor would have rejected by mistake since it
287 # panics when it sees sub/.hg/.
288 # panics when it sees sub/.hg/.
288 #
289 #
289 # All in all, checking against the working copy seems sensible
290 # All in all, checking against the working copy seems sensible
290 # since we want to prevent access to nested repositories on
291 # since we want to prevent access to nested repositories on
291 # the filesystem *now*.
292 # the filesystem *now*.
292 ctx = self[None]
293 ctx = self[None]
293 parts = util.splitpath(subpath)
294 parts = util.splitpath(subpath)
294 while parts:
295 while parts:
295 prefix = '/'.join(parts)
296 prefix = '/'.join(parts)
296 if prefix in ctx.substate:
297 if prefix in ctx.substate:
297 if prefix == normsubpath:
298 if prefix == normsubpath:
298 return True
299 return True
299 else:
300 else:
300 sub = ctx.sub(prefix)
301 sub = ctx.sub(prefix)
301 return sub.checknested(subpath[len(prefix) + 1:])
302 return sub.checknested(subpath[len(prefix) + 1:])
302 else:
303 else:
303 parts.pop()
304 parts.pop()
304 return False
305 return False
305
306
306 def peer(self):
307 def peer(self):
307 return localpeer(self) # not cached to avoid reference cycle
308 return localpeer(self) # not cached to avoid reference cycle
308
309
309 def unfiltered(self):
310 def unfiltered(self):
310 """Return unfiltered version of the repository
311 """Return unfiltered version of the repository
311
312
312 Intended to be ovewritten by filtered repo."""
313 Intended to be ovewritten by filtered repo."""
313 return self
314 return self
314
315
315 def filtered(self, name):
316 def filtered(self, name):
316 """Return a filtered version of a repository"""
317 """Return a filtered version of a repository"""
317 # build a new class with the mixin and the current class
318 # build a new class with the mixin and the current class
318 # (possibily subclass of the repo)
319 # (possibily subclass of the repo)
319 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 pass
321 pass
321 return proxycls(self, name)
322 return proxycls(self, name)
322
323
323 @repofilecache('bookmarks')
324 @repofilecache('bookmarks')
324 def _bookmarks(self):
325 def _bookmarks(self):
325 return bookmarks.bmstore(self)
326 return bookmarks.bmstore(self)
326
327
327 @repofilecache('bookmarks.current')
328 @repofilecache('bookmarks.current')
328 def _bookmarkcurrent(self):
329 def _bookmarkcurrent(self):
329 return bookmarks.readcurrent(self)
330 return bookmarks.readcurrent(self)
330
331
331 def bookmarkheads(self, bookmark):
332 def bookmarkheads(self, bookmark):
332 name = bookmark.split('@', 1)[0]
333 name = bookmark.split('@', 1)[0]
333 heads = []
334 heads = []
334 for mark, n in self._bookmarks.iteritems():
335 for mark, n in self._bookmarks.iteritems():
335 if mark.split('@', 1)[0] == name:
336 if mark.split('@', 1)[0] == name:
336 heads.append(n)
337 heads.append(n)
337 return heads
338 return heads
338
339
339 @storecache('phaseroots')
340 @storecache('phaseroots')
340 def _phasecache(self):
341 def _phasecache(self):
341 return phases.phasecache(self, self._phasedefaults)
342 return phases.phasecache(self, self._phasedefaults)
342
343
343 @storecache('obsstore')
344 @storecache('obsstore')
344 def obsstore(self):
345 def obsstore(self):
345 store = obsolete.obsstore(self.sopener)
346 store = obsolete.obsstore(self.sopener)
346 if store and not obsolete._enabled:
347 if store and not obsolete._enabled:
347 # message is rare enough to not be translated
348 # message is rare enough to not be translated
348 msg = 'obsolete feature not enabled but %i markers found!\n'
349 msg = 'obsolete feature not enabled but %i markers found!\n'
349 self.ui.warn(msg % len(list(store)))
350 self.ui.warn(msg % len(list(store)))
350 return store
351 return store
351
352
352 @unfilteredpropertycache
353 @unfilteredpropertycache
353 def hiddenrevs(self):
354 def hiddenrevs(self):
354 """hiddenrevs: revs that should be hidden by command and tools
355 """hiddenrevs: revs that should be hidden by command and tools
355
356
356 This set is carried on the repo to ease initialization and lazy
357 This set is carried on the repo to ease initialization and lazy
357 loading; it'll probably move back to changelog for efficiency and
358 loading; it'll probably move back to changelog for efficiency and
358 consistency reasons.
359 consistency reasons.
359
360
360 Note that the hiddenrevs will needs invalidations when
361 Note that the hiddenrevs will needs invalidations when
361 - a new changesets is added (possible unstable above extinct)
362 - a new changesets is added (possible unstable above extinct)
362 - a new obsolete marker is added (possible new extinct changeset)
363 - a new obsolete marker is added (possible new extinct changeset)
363
364
364 hidden changesets cannot have non-hidden descendants
365 hidden changesets cannot have non-hidden descendants
365 """
366 """
366 hidden = set()
367 hidden = set()
367 if self.obsstore:
368 if self.obsstore:
368 ### hide extinct changeset that are not accessible by any mean
369 ### hide extinct changeset that are not accessible by any mean
369 hiddenquery = 'extinct() - ::(. + bookmark())'
370 hiddenquery = 'extinct() - ::(. + bookmark())'
370 hidden.update(self.revs(hiddenquery))
371 hidden.update(self.revs(hiddenquery))
371 return hidden
372 return hidden
372
373
373 @storecache('00changelog.i')
374 @storecache('00changelog.i')
374 def changelog(self):
375 def changelog(self):
375 c = changelog.changelog(self.sopener)
376 c = changelog.changelog(self.sopener)
376 if 'HG_PENDING' in os.environ:
377 if 'HG_PENDING' in os.environ:
377 p = os.environ['HG_PENDING']
378 p = os.environ['HG_PENDING']
378 if p.startswith(self.root):
379 if p.startswith(self.root):
379 c.readpending('00changelog.i.a')
380 c.readpending('00changelog.i.a')
380 return c
381 return c
381
382
382 @storecache('00manifest.i')
383 @storecache('00manifest.i')
383 def manifest(self):
384 def manifest(self):
384 return manifest.manifest(self.sopener)
385 return manifest.manifest(self.sopener)
385
386
386 @repofilecache('dirstate')
387 @repofilecache('dirstate')
387 def dirstate(self):
388 def dirstate(self):
388 warned = [0]
389 warned = [0]
389 def validate(node):
390 def validate(node):
390 try:
391 try:
391 self.changelog.rev(node)
392 self.changelog.rev(node)
392 return node
393 return node
393 except error.LookupError:
394 except error.LookupError:
394 if not warned[0]:
395 if not warned[0]:
395 warned[0] = True
396 warned[0] = True
396 self.ui.warn(_("warning: ignoring unknown"
397 self.ui.warn(_("warning: ignoring unknown"
397 " working parent %s!\n") % short(node))
398 " working parent %s!\n") % short(node))
398 return nullid
399 return nullid
399
400
400 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401
402
402 def __getitem__(self, changeid):
403 def __getitem__(self, changeid):
403 if changeid is None:
404 if changeid is None:
404 return context.workingctx(self)
405 return context.workingctx(self)
405 return context.changectx(self, changeid)
406 return context.changectx(self, changeid)
406
407
407 def __contains__(self, changeid):
408 def __contains__(self, changeid):
408 try:
409 try:
409 return bool(self.lookup(changeid))
410 return bool(self.lookup(changeid))
410 except error.RepoLookupError:
411 except error.RepoLookupError:
411 return False
412 return False
412
413
413 def __nonzero__(self):
414 def __nonzero__(self):
414 return True
415 return True
415
416
416 def __len__(self):
417 def __len__(self):
417 return len(self.changelog)
418 return len(self.changelog)
418
419
419 def __iter__(self):
420 def __iter__(self):
420 return iter(self.changelog)
421 return iter(self.changelog)
421
422
422 def revs(self, expr, *args):
423 def revs(self, expr, *args):
423 '''Return a list of revisions matching the given revset'''
424 '''Return a list of revisions matching the given revset'''
424 expr = revset.formatspec(expr, *args)
425 expr = revset.formatspec(expr, *args)
425 m = revset.match(None, expr)
426 m = revset.match(None, expr)
426 return [r for r in m(self, list(self))]
427 return [r for r in m(self, list(self))]
427
428
428 def set(self, expr, *args):
429 def set(self, expr, *args):
429 '''
430 '''
430 Yield a context for each matching revision, after doing arg
431 Yield a context for each matching revision, after doing arg
431 replacement via revset.formatspec
432 replacement via revset.formatspec
432 '''
433 '''
433 for r in self.revs(expr, *args):
434 for r in self.revs(expr, *args):
434 yield self[r]
435 yield self[r]
435
436
436 def url(self):
437 def url(self):
437 return 'file:' + self.root
438 return 'file:' + self.root
438
439
439 def hook(self, name, throw=False, **args):
440 def hook(self, name, throw=False, **args):
440 return hook.hook(self.ui, self, name, throw, **args)
441 return hook.hook(self.ui, self, name, throw, **args)
441
442
442 @unfilteredmethod
443 @unfilteredmethod
443 def _tag(self, names, node, message, local, user, date, extra={}):
444 def _tag(self, names, node, message, local, user, date, extra={}):
444 if isinstance(names, str):
445 if isinstance(names, str):
445 names = (names,)
446 names = (names,)
446
447
447 branches = self.branchmap()
448 branches = self.branchmap()
448 for name in names:
449 for name in names:
449 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 local=local)
451 local=local)
451 if name in branches:
452 if name in branches:
452 self.ui.warn(_("warning: tag %s conflicts with existing"
453 self.ui.warn(_("warning: tag %s conflicts with existing"
453 " branch name\n") % name)
454 " branch name\n") % name)
454
455
455 def writetags(fp, names, munge, prevtags):
456 def writetags(fp, names, munge, prevtags):
456 fp.seek(0, 2)
457 fp.seek(0, 2)
457 if prevtags and prevtags[-1] != '\n':
458 if prevtags and prevtags[-1] != '\n':
458 fp.write('\n')
459 fp.write('\n')
459 for name in names:
460 for name in names:
460 m = munge and munge(name) or name
461 m = munge and munge(name) or name
461 if (self._tagscache.tagtypes and
462 if (self._tagscache.tagtypes and
462 name in self._tagscache.tagtypes):
463 name in self._tagscache.tagtypes):
463 old = self.tags().get(name, nullid)
464 old = self.tags().get(name, nullid)
464 fp.write('%s %s\n' % (hex(old), m))
465 fp.write('%s %s\n' % (hex(old), m))
465 fp.write('%s %s\n' % (hex(node), m))
466 fp.write('%s %s\n' % (hex(node), m))
466 fp.close()
467 fp.close()
467
468
468 prevtags = ''
469 prevtags = ''
469 if local:
470 if local:
470 try:
471 try:
471 fp = self.opener('localtags', 'r+')
472 fp = self.opener('localtags', 'r+')
472 except IOError:
473 except IOError:
473 fp = self.opener('localtags', 'a')
474 fp = self.opener('localtags', 'a')
474 else:
475 else:
475 prevtags = fp.read()
476 prevtags = fp.read()
476
477
477 # local tags are stored in the current charset
478 # local tags are stored in the current charset
478 writetags(fp, names, None, prevtags)
479 writetags(fp, names, None, prevtags)
479 for name in names:
480 for name in names:
480 self.hook('tag', node=hex(node), tag=name, local=local)
481 self.hook('tag', node=hex(node), tag=name, local=local)
481 return
482 return
482
483
483 try:
484 try:
484 fp = self.wfile('.hgtags', 'rb+')
485 fp = self.wfile('.hgtags', 'rb+')
485 except IOError, e:
486 except IOError, e:
486 if e.errno != errno.ENOENT:
487 if e.errno != errno.ENOENT:
487 raise
488 raise
488 fp = self.wfile('.hgtags', 'ab')
489 fp = self.wfile('.hgtags', 'ab')
489 else:
490 else:
490 prevtags = fp.read()
491 prevtags = fp.read()
491
492
492 # committed tags are stored in UTF-8
493 # committed tags are stored in UTF-8
493 writetags(fp, names, encoding.fromlocal, prevtags)
494 writetags(fp, names, encoding.fromlocal, prevtags)
494
495
495 fp.close()
496 fp.close()
496
497
497 self.invalidatecaches()
498 self.invalidatecaches()
498
499
499 if '.hgtags' not in self.dirstate:
500 if '.hgtags' not in self.dirstate:
500 self[None].add(['.hgtags'])
501 self[None].add(['.hgtags'])
501
502
502 m = matchmod.exact(self.root, '', ['.hgtags'])
503 m = matchmod.exact(self.root, '', ['.hgtags'])
503 tagnode = self.commit(message, user, date, extra=extra, match=m)
504 tagnode = self.commit(message, user, date, extra=extra, match=m)
504
505
505 for name in names:
506 for name in names:
506 self.hook('tag', node=hex(node), tag=name, local=local)
507 self.hook('tag', node=hex(node), tag=name, local=local)
507
508
508 return tagnode
509 return tagnode
509
510
510 def tag(self, names, node, message, local, user, date):
511 def tag(self, names, node, message, local, user, date):
511 '''tag a revision with one or more symbolic names.
512 '''tag a revision with one or more symbolic names.
512
513
513 names is a list of strings or, when adding a single tag, names may be a
514 names is a list of strings or, when adding a single tag, names may be a
514 string.
515 string.
515
516
516 if local is True, the tags are stored in a per-repository file.
517 if local is True, the tags are stored in a per-repository file.
517 otherwise, they are stored in the .hgtags file, and a new
518 otherwise, they are stored in the .hgtags file, and a new
518 changeset is committed with the change.
519 changeset is committed with the change.
519
520
520 keyword arguments:
521 keyword arguments:
521
522
522 local: whether to store tags in non-version-controlled file
523 local: whether to store tags in non-version-controlled file
523 (default False)
524 (default False)
524
525
525 message: commit message to use if committing
526 message: commit message to use if committing
526
527
527 user: name of user to use if committing
528 user: name of user to use if committing
528
529
529 date: date tuple to use if committing'''
530 date: date tuple to use if committing'''
530
531
531 if not local:
532 if not local:
532 for x in self.status()[:5]:
533 for x in self.status()[:5]:
533 if '.hgtags' in x:
534 if '.hgtags' in x:
534 raise util.Abort(_('working copy of .hgtags is changed '
535 raise util.Abort(_('working copy of .hgtags is changed '
535 '(please commit .hgtags manually)'))
536 '(please commit .hgtags manually)'))
536
537
537 self.tags() # instantiate the cache
538 self.tags() # instantiate the cache
538 self._tag(names, node, message, local, user, date)
539 self._tag(names, node, message, local, user, date)
539
540
540 @filteredpropertycache
541 @filteredpropertycache
541 def _tagscache(self):
542 def _tagscache(self):
542 '''Returns a tagscache object that contains various tags related
543 '''Returns a tagscache object that contains various tags related
543 caches.'''
544 caches.'''
544
545
545 # This simplifies its cache management by having one decorated
546 # This simplifies its cache management by having one decorated
546 # function (this one) and the rest simply fetch things from it.
547 # function (this one) and the rest simply fetch things from it.
547 class tagscache(object):
548 class tagscache(object):
548 def __init__(self):
549 def __init__(self):
549 # These two define the set of tags for this repository. tags
550 # These two define the set of tags for this repository. tags
550 # maps tag name to node; tagtypes maps tag name to 'global' or
551 # maps tag name to node; tagtypes maps tag name to 'global' or
551 # 'local'. (Global tags are defined by .hgtags across all
552 # 'local'. (Global tags are defined by .hgtags across all
552 # heads, and local tags are defined in .hg/localtags.)
553 # heads, and local tags are defined in .hg/localtags.)
553 # They constitute the in-memory cache of tags.
554 # They constitute the in-memory cache of tags.
554 self.tags = self.tagtypes = None
555 self.tags = self.tagtypes = None
555
556
556 self.nodetagscache = self.tagslist = None
557 self.nodetagscache = self.tagslist = None
557
558
558 cache = tagscache()
559 cache = tagscache()
559 cache.tags, cache.tagtypes = self._findtags()
560 cache.tags, cache.tagtypes = self._findtags()
560
561
561 return cache
562 return cache
562
563
563 def tags(self):
564 def tags(self):
564 '''return a mapping of tag to node'''
565 '''return a mapping of tag to node'''
565 t = {}
566 t = {}
566 if self.changelog.filteredrevs:
567 if self.changelog.filteredrevs:
567 tags, tt = self._findtags()
568 tags, tt = self._findtags()
568 else:
569 else:
569 tags = self._tagscache.tags
570 tags = self._tagscache.tags
570 for k, v in tags.iteritems():
571 for k, v in tags.iteritems():
571 try:
572 try:
572 # ignore tags to unknown nodes
573 # ignore tags to unknown nodes
573 self.changelog.rev(v)
574 self.changelog.rev(v)
574 t[k] = v
575 t[k] = v
575 except (error.LookupError, ValueError):
576 except (error.LookupError, ValueError):
576 pass
577 pass
577 return t
578 return t
578
579
579 def _findtags(self):
580 def _findtags(self):
580 '''Do the hard work of finding tags. Return a pair of dicts
581 '''Do the hard work of finding tags. Return a pair of dicts
581 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 maps tag name to a string like \'global\' or \'local\'.
583 maps tag name to a string like \'global\' or \'local\'.
583 Subclasses or extensions are free to add their own tags, but
584 Subclasses or extensions are free to add their own tags, but
584 should be aware that the returned dicts will be retained for the
585 should be aware that the returned dicts will be retained for the
585 duration of the localrepo object.'''
586 duration of the localrepo object.'''
586
587
587 # XXX what tagtype should subclasses/extensions use? Currently
588 # XXX what tagtype should subclasses/extensions use? Currently
588 # mq and bookmarks add tags, but do not set the tagtype at all.
589 # mq and bookmarks add tags, but do not set the tagtype at all.
589 # Should each extension invent its own tag type? Should there
590 # Should each extension invent its own tag type? Should there
590 # be one tagtype for all such "virtual" tags? Or is the status
591 # be one tagtype for all such "virtual" tags? Or is the status
591 # quo fine?
592 # quo fine?
592
593
593 alltags = {} # map tag name to (node, hist)
594 alltags = {} # map tag name to (node, hist)
594 tagtypes = {}
595 tagtypes = {}
595
596
596 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598
599
599 # Build the return dicts. Have to re-encode tag names because
600 # Build the return dicts. Have to re-encode tag names because
600 # the tags module always uses UTF-8 (in order not to lose info
601 # the tags module always uses UTF-8 (in order not to lose info
601 # writing to the cache), but the rest of Mercurial wants them in
602 # writing to the cache), but the rest of Mercurial wants them in
602 # local encoding.
603 # local encoding.
603 tags = {}
604 tags = {}
604 for (name, (node, hist)) in alltags.iteritems():
605 for (name, (node, hist)) in alltags.iteritems():
605 if node != nullid:
606 if node != nullid:
606 tags[encoding.tolocal(name)] = node
607 tags[encoding.tolocal(name)] = node
607 tags['tip'] = self.changelog.tip()
608 tags['tip'] = self.changelog.tip()
608 tagtypes = dict([(encoding.tolocal(name), value)
609 tagtypes = dict([(encoding.tolocal(name), value)
609 for (name, value) in tagtypes.iteritems()])
610 for (name, value) in tagtypes.iteritems()])
610 return (tags, tagtypes)
611 return (tags, tagtypes)
611
612
612 def tagtype(self, tagname):
613 def tagtype(self, tagname):
613 '''
614 '''
614 return the type of the given tag. result can be:
615 return the type of the given tag. result can be:
615
616
616 'local' : a local tag
617 'local' : a local tag
617 'global' : a global tag
618 'global' : a global tag
618 None : tag does not exist
619 None : tag does not exist
619 '''
620 '''
620
621
621 return self._tagscache.tagtypes.get(tagname)
622 return self._tagscache.tagtypes.get(tagname)
622
623
623 def tagslist(self):
624 def tagslist(self):
624 '''return a list of tags ordered by revision'''
625 '''return a list of tags ordered by revision'''
625 if not self._tagscache.tagslist:
626 if not self._tagscache.tagslist:
626 l = []
627 l = []
627 for t, n in self.tags().iteritems():
628 for t, n in self.tags().iteritems():
628 r = self.changelog.rev(n)
629 r = self.changelog.rev(n)
629 l.append((r, t, n))
630 l.append((r, t, n))
630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631
632
632 return self._tagscache.tagslist
633 return self._tagscache.tagslist
633
634
634 def nodetags(self, node):
635 def nodetags(self, node):
635 '''return the tags associated with a node'''
636 '''return the tags associated with a node'''
636 if not self._tagscache.nodetagscache:
637 if not self._tagscache.nodetagscache:
637 nodetagscache = {}
638 nodetagscache = {}
638 for t, n in self._tagscache.tags.iteritems():
639 for t, n in self._tagscache.tags.iteritems():
639 nodetagscache.setdefault(n, []).append(t)
640 nodetagscache.setdefault(n, []).append(t)
640 for tags in nodetagscache.itervalues():
641 for tags in nodetagscache.itervalues():
641 tags.sort()
642 tags.sort()
642 self._tagscache.nodetagscache = nodetagscache
643 self._tagscache.nodetagscache = nodetagscache
643 return self._tagscache.nodetagscache.get(node, [])
644 return self._tagscache.nodetagscache.get(node, [])
644
645
645 def nodebookmarks(self, node):
646 def nodebookmarks(self, node):
646 marks = []
647 marks = []
647 for bookmark, n in self._bookmarks.iteritems():
648 for bookmark, n in self._bookmarks.iteritems():
648 if n == node:
649 if n == node:
649 marks.append(bookmark)
650 marks.append(bookmark)
650 return sorted(marks)
651 return sorted(marks)
651
652
652 def _cacheabletip(self):
653 def _cacheabletip(self):
653 """tip-most revision stable enought to used in persistent cache
654 """tip-most revision stable enought to used in persistent cache
654
655
655 This function is overwritten by MQ to ensure we do not write cache for
656 This function is overwritten by MQ to ensure we do not write cache for
656 a part of the history that will likely change.
657 a part of the history that will likely change.
657
658
658 Efficient handling of filtered revision in branchcache should offer a
659 Efficient handling of filtered revision in branchcache should offer a
659 better alternative. But we are using this approach until it is ready.
660 better alternative. But we are using this approach until it is ready.
660 """
661 """
661 cl = self.changelog
662 cl = self.changelog
662 return cl.rev(cl.tip())
663 return cl.rev(cl.tip())
663
664
664 def branchmap(self):
665 def branchmap(self):
665 '''returns a dictionary {branch: [branchheads]}'''
666 '''returns a dictionary {branch: [branchheads]}'''
666 if self.changelog.filteredrevs:
667 if self.changelog.filteredrevs:
667 # some changeset are excluded we can't use the cache
668 # some changeset are excluded we can't use the cache
668 bmap = branchmap.branchcache()
669 bmap = branchmap.branchcache()
669 bmap.update(self, (self[r] for r in self))
670 bmap.update(self, (self[r] for r in self))
670 return bmap
671 return bmap
671 else:
672 else:
672 branchmap.updatecache(self)
673 branchmap.updatecache(self)
673 return self._branchcache
674 return self._branchcache
674
675
675
676
676 def _branchtip(self, heads):
677 def _branchtip(self, heads):
677 '''return the tipmost branch head in heads'''
678 '''return the tipmost branch head in heads'''
678 tip = heads[-1]
679 tip = heads[-1]
679 for h in reversed(heads):
680 for h in reversed(heads):
680 if not self[h].closesbranch():
681 if not self[h].closesbranch():
681 tip = h
682 tip = h
682 break
683 break
683 return tip
684 return tip
684
685
685 def branchtip(self, branch):
686 def branchtip(self, branch):
686 '''return the tip node for a given branch'''
687 '''return the tip node for a given branch'''
687 if branch not in self.branchmap():
688 if branch not in self.branchmap():
688 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
689 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
689 return self._branchtip(self.branchmap()[branch])
690 return self._branchtip(self.branchmap()[branch])
690
691
691 def branchtags(self):
692 def branchtags(self):
692 '''return a dict where branch names map to the tipmost head of
693 '''return a dict where branch names map to the tipmost head of
693 the branch, open heads come before closed'''
694 the branch, open heads come before closed'''
694 bt = {}
695 bt = {}
695 for bn, heads in self.branchmap().iteritems():
696 for bn, heads in self.branchmap().iteritems():
696 bt[bn] = self._branchtip(heads)
697 bt[bn] = self._branchtip(heads)
697 return bt
698 return bt
698
699
699 def lookup(self, key):
700 def lookup(self, key):
700 return self[key].node()
701 return self[key].node()
701
702
702 def lookupbranch(self, key, remote=None):
703 def lookupbranch(self, key, remote=None):
703 repo = remote or self
704 repo = remote or self
704 if key in repo.branchmap():
705 if key in repo.branchmap():
705 return key
706 return key
706
707
707 repo = (remote and remote.local()) and remote or self
708 repo = (remote and remote.local()) and remote or self
708 return repo[key].branch()
709 return repo[key].branch()
709
710
710 def known(self, nodes):
711 def known(self, nodes):
711 nm = self.changelog.nodemap
712 nm = self.changelog.nodemap
712 pc = self._phasecache
713 pc = self._phasecache
713 result = []
714 result = []
714 for n in nodes:
715 for n in nodes:
715 r = nm.get(n)
716 r = nm.get(n)
716 resp = not (r is None or pc.phase(self, r) >= phases.secret)
717 resp = not (r is None or pc.phase(self, r) >= phases.secret)
717 result.append(resp)
718 result.append(resp)
718 return result
719 return result
719
720
720 def local(self):
721 def local(self):
721 return self
722 return self
722
723
723 def cancopy(self):
724 def cancopy(self):
724 return self.local() # so statichttprepo's override of local() works
725 return self.local() # so statichttprepo's override of local() works
725
726
726 def join(self, f):
727 def join(self, f):
727 return os.path.join(self.path, f)
728 return os.path.join(self.path, f)
728
729
729 def wjoin(self, f):
730 def wjoin(self, f):
730 return os.path.join(self.root, f)
731 return os.path.join(self.root, f)
731
732
732 def file(self, f):
733 def file(self, f):
733 if f[0] == '/':
734 if f[0] == '/':
734 f = f[1:]
735 f = f[1:]
735 return filelog.filelog(self.sopener, f)
736 return filelog.filelog(self.sopener, f)
736
737
737 def changectx(self, changeid):
738 def changectx(self, changeid):
738 return self[changeid]
739 return self[changeid]
739
740
740 def parents(self, changeid=None):
741 def parents(self, changeid=None):
741 '''get list of changectxs for parents of changeid'''
742 '''get list of changectxs for parents of changeid'''
742 return self[changeid].parents()
743 return self[changeid].parents()
743
744
744 def setparents(self, p1, p2=nullid):
745 def setparents(self, p1, p2=nullid):
745 copies = self.dirstate.setparents(p1, p2)
746 copies = self.dirstate.setparents(p1, p2)
746 if copies:
747 if copies:
747 # Adjust copy records, the dirstate cannot do it, it
748 # Adjust copy records, the dirstate cannot do it, it
748 # requires access to parents manifests. Preserve them
749 # requires access to parents manifests. Preserve them
749 # only for entries added to first parent.
750 # only for entries added to first parent.
750 pctx = self[p1]
751 pctx = self[p1]
751 for f in copies:
752 for f in copies:
752 if f not in pctx and copies[f] in pctx:
753 if f not in pctx and copies[f] in pctx:
753 self.dirstate.copy(copies[f], f)
754 self.dirstate.copy(copies[f], f)
754
755
755 def filectx(self, path, changeid=None, fileid=None):
756 def filectx(self, path, changeid=None, fileid=None):
756 """changeid can be a changeset revision, node, or tag.
757 """changeid can be a changeset revision, node, or tag.
757 fileid can be a file revision or node."""
758 fileid can be a file revision or node."""
758 return context.filectx(self, path, changeid, fileid)
759 return context.filectx(self, path, changeid, fileid)
759
760
760 def getcwd(self):
761 def getcwd(self):
761 return self.dirstate.getcwd()
762 return self.dirstate.getcwd()
762
763
763 def pathto(self, f, cwd=None):
764 def pathto(self, f, cwd=None):
764 return self.dirstate.pathto(f, cwd)
765 return self.dirstate.pathto(f, cwd)
765
766
766 def wfile(self, f, mode='r'):
767 def wfile(self, f, mode='r'):
767 return self.wopener(f, mode)
768 return self.wopener(f, mode)
768
769
769 def _link(self, f):
770 def _link(self, f):
770 return os.path.islink(self.wjoin(f))
771 return os.path.islink(self.wjoin(f))
771
772
772 def _loadfilter(self, filter):
773 def _loadfilter(self, filter):
773 if filter not in self.filterpats:
774 if filter not in self.filterpats:
774 l = []
775 l = []
775 for pat, cmd in self.ui.configitems(filter):
776 for pat, cmd in self.ui.configitems(filter):
776 if cmd == '!':
777 if cmd == '!':
777 continue
778 continue
778 mf = matchmod.match(self.root, '', [pat])
779 mf = matchmod.match(self.root, '', [pat])
779 fn = None
780 fn = None
780 params = cmd
781 params = cmd
781 for name, filterfn in self._datafilters.iteritems():
782 for name, filterfn in self._datafilters.iteritems():
782 if cmd.startswith(name):
783 if cmd.startswith(name):
783 fn = filterfn
784 fn = filterfn
784 params = cmd[len(name):].lstrip()
785 params = cmd[len(name):].lstrip()
785 break
786 break
786 if not fn:
787 if not fn:
787 fn = lambda s, c, **kwargs: util.filter(s, c)
788 fn = lambda s, c, **kwargs: util.filter(s, c)
788 # Wrap old filters not supporting keyword arguments
789 # Wrap old filters not supporting keyword arguments
789 if not inspect.getargspec(fn)[2]:
790 if not inspect.getargspec(fn)[2]:
790 oldfn = fn
791 oldfn = fn
791 fn = lambda s, c, **kwargs: oldfn(s, c)
792 fn = lambda s, c, **kwargs: oldfn(s, c)
792 l.append((mf, fn, params))
793 l.append((mf, fn, params))
793 self.filterpats[filter] = l
794 self.filterpats[filter] = l
794 return self.filterpats[filter]
795 return self.filterpats[filter]
795
796
796 def _filter(self, filterpats, filename, data):
797 def _filter(self, filterpats, filename, data):
797 for mf, fn, cmd in filterpats:
798 for mf, fn, cmd in filterpats:
798 if mf(filename):
799 if mf(filename):
799 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
800 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
800 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
801 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
801 break
802 break
802
803
803 return data
804 return data
804
805
805 @unfilteredpropertycache
806 @unfilteredpropertycache
806 def _encodefilterpats(self):
807 def _encodefilterpats(self):
807 return self._loadfilter('encode')
808 return self._loadfilter('encode')
808
809
809 @unfilteredpropertycache
810 @unfilteredpropertycache
810 def _decodefilterpats(self):
811 def _decodefilterpats(self):
811 return self._loadfilter('decode')
812 return self._loadfilter('decode')
812
813
813 def adddatafilter(self, name, filter):
814 def adddatafilter(self, name, filter):
814 self._datafilters[name] = filter
815 self._datafilters[name] = filter
815
816
816 def wread(self, filename):
817 def wread(self, filename):
817 if self._link(filename):
818 if self._link(filename):
818 data = os.readlink(self.wjoin(filename))
819 data = os.readlink(self.wjoin(filename))
819 else:
820 else:
820 data = self.wopener.read(filename)
821 data = self.wopener.read(filename)
821 return self._filter(self._encodefilterpats, filename, data)
822 return self._filter(self._encodefilterpats, filename, data)
822
823
823 def wwrite(self, filename, data, flags):
824 def wwrite(self, filename, data, flags):
824 data = self._filter(self._decodefilterpats, filename, data)
825 data = self._filter(self._decodefilterpats, filename, data)
825 if 'l' in flags:
826 if 'l' in flags:
826 self.wopener.symlink(data, filename)
827 self.wopener.symlink(data, filename)
827 else:
828 else:
828 self.wopener.write(filename, data)
829 self.wopener.write(filename, data)
829 if 'x' in flags:
830 if 'x' in flags:
830 util.setflags(self.wjoin(filename), False, True)
831 util.setflags(self.wjoin(filename), False, True)
831
832
832 def wwritedata(self, filename, data):
833 def wwritedata(self, filename, data):
833 return self._filter(self._decodefilterpats, filename, data)
834 return self._filter(self._decodefilterpats, filename, data)
834
835
835 def transaction(self, desc):
836 def transaction(self, desc):
836 tr = self._transref and self._transref() or None
837 tr = self._transref and self._transref() or None
837 if tr and tr.running():
838 if tr and tr.running():
838 return tr.nest()
839 return tr.nest()
839
840
840 # abort here if the journal already exists
841 # abort here if the journal already exists
841 if os.path.exists(self.sjoin("journal")):
842 if os.path.exists(self.sjoin("journal")):
842 raise error.RepoError(
843 raise error.RepoError(
843 _("abandoned transaction found - run hg recover"))
844 _("abandoned transaction found - run hg recover"))
844
845
845 self._writejournal(desc)
846 self._writejournal(desc)
846 renames = [(x, undoname(x)) for x in self._journalfiles()]
847 renames = [(x, undoname(x)) for x in self._journalfiles()]
847
848
848 tr = transaction.transaction(self.ui.warn, self.sopener,
849 tr = transaction.transaction(self.ui.warn, self.sopener,
849 self.sjoin("journal"),
850 self.sjoin("journal"),
850 aftertrans(renames),
851 aftertrans(renames),
851 self.store.createmode)
852 self.store.createmode)
852 self._transref = weakref.ref(tr)
853 self._transref = weakref.ref(tr)
853 return tr
854 return tr
854
855
855 def _journalfiles(self):
856 def _journalfiles(self):
856 return (self.sjoin('journal'), self.join('journal.dirstate'),
857 return (self.sjoin('journal'), self.join('journal.dirstate'),
857 self.join('journal.branch'), self.join('journal.desc'),
858 self.join('journal.branch'), self.join('journal.desc'),
858 self.join('journal.bookmarks'),
859 self.join('journal.bookmarks'),
859 self.sjoin('journal.phaseroots'))
860 self.sjoin('journal.phaseroots'))
860
861
861 def undofiles(self):
862 def undofiles(self):
862 return [undoname(x) for x in self._journalfiles()]
863 return [undoname(x) for x in self._journalfiles()]
863
864
864 def _writejournal(self, desc):
865 def _writejournal(self, desc):
865 self.opener.write("journal.dirstate",
866 self.opener.write("journal.dirstate",
866 self.opener.tryread("dirstate"))
867 self.opener.tryread("dirstate"))
867 self.opener.write("journal.branch",
868 self.opener.write("journal.branch",
868 encoding.fromlocal(self.dirstate.branch()))
869 encoding.fromlocal(self.dirstate.branch()))
869 self.opener.write("journal.desc",
870 self.opener.write("journal.desc",
870 "%d\n%s\n" % (len(self), desc))
871 "%d\n%s\n" % (len(self), desc))
871 self.opener.write("journal.bookmarks",
872 self.opener.write("journal.bookmarks",
872 self.opener.tryread("bookmarks"))
873 self.opener.tryread("bookmarks"))
873 self.sopener.write("journal.phaseroots",
874 self.sopener.write("journal.phaseroots",
874 self.sopener.tryread("phaseroots"))
875 self.sopener.tryread("phaseroots"))
875
876
876 def recover(self):
877 def recover(self):
877 lock = self.lock()
878 lock = self.lock()
878 try:
879 try:
879 if os.path.exists(self.sjoin("journal")):
880 if os.path.exists(self.sjoin("journal")):
880 self.ui.status(_("rolling back interrupted transaction\n"))
881 self.ui.status(_("rolling back interrupted transaction\n"))
881 transaction.rollback(self.sopener, self.sjoin("journal"),
882 transaction.rollback(self.sopener, self.sjoin("journal"),
882 self.ui.warn)
883 self.ui.warn)
883 self.invalidate()
884 self.invalidate()
884 return True
885 return True
885 else:
886 else:
886 self.ui.warn(_("no interrupted transaction available\n"))
887 self.ui.warn(_("no interrupted transaction available\n"))
887 return False
888 return False
888 finally:
889 finally:
889 lock.release()
890 lock.release()
890
891
891 def rollback(self, dryrun=False, force=False):
892 def rollback(self, dryrun=False, force=False):
892 wlock = lock = None
893 wlock = lock = None
893 try:
894 try:
894 wlock = self.wlock()
895 wlock = self.wlock()
895 lock = self.lock()
896 lock = self.lock()
896 if os.path.exists(self.sjoin("undo")):
897 if os.path.exists(self.sjoin("undo")):
897 return self._rollback(dryrun, force)
898 return self._rollback(dryrun, force)
898 else:
899 else:
899 self.ui.warn(_("no rollback information available\n"))
900 self.ui.warn(_("no rollback information available\n"))
900 return 1
901 return 1
901 finally:
902 finally:
902 release(lock, wlock)
903 release(lock, wlock)
903
904
904 @unfilteredmethod # Until we get smarter cache management
905 @unfilteredmethod # Until we get smarter cache management
905 def _rollback(self, dryrun, force):
906 def _rollback(self, dryrun, force):
906 ui = self.ui
907 ui = self.ui
907 try:
908 try:
908 args = self.opener.read('undo.desc').splitlines()
909 args = self.opener.read('undo.desc').splitlines()
909 (oldlen, desc, detail) = (int(args[0]), args[1], None)
910 (oldlen, desc, detail) = (int(args[0]), args[1], None)
910 if len(args) >= 3:
911 if len(args) >= 3:
911 detail = args[2]
912 detail = args[2]
912 oldtip = oldlen - 1
913 oldtip = oldlen - 1
913
914
914 if detail and ui.verbose:
915 if detail and ui.verbose:
915 msg = (_('repository tip rolled back to revision %s'
916 msg = (_('repository tip rolled back to revision %s'
916 ' (undo %s: %s)\n')
917 ' (undo %s: %s)\n')
917 % (oldtip, desc, detail))
918 % (oldtip, desc, detail))
918 else:
919 else:
919 msg = (_('repository tip rolled back to revision %s'
920 msg = (_('repository tip rolled back to revision %s'
920 ' (undo %s)\n')
921 ' (undo %s)\n')
921 % (oldtip, desc))
922 % (oldtip, desc))
922 except IOError:
923 except IOError:
923 msg = _('rolling back unknown transaction\n')
924 msg = _('rolling back unknown transaction\n')
924 desc = None
925 desc = None
925
926
926 if not force and self['.'] != self['tip'] and desc == 'commit':
927 if not force and self['.'] != self['tip'] and desc == 'commit':
927 raise util.Abort(
928 raise util.Abort(
928 _('rollback of last commit while not checked out '
929 _('rollback of last commit while not checked out '
929 'may lose data'), hint=_('use -f to force'))
930 'may lose data'), hint=_('use -f to force'))
930
931
931 ui.status(msg)
932 ui.status(msg)
932 if dryrun:
933 if dryrun:
933 return 0
934 return 0
934
935
935 parents = self.dirstate.parents()
936 parents = self.dirstate.parents()
936 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
937 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
937 if os.path.exists(self.join('undo.bookmarks')):
938 if os.path.exists(self.join('undo.bookmarks')):
938 util.rename(self.join('undo.bookmarks'),
939 util.rename(self.join('undo.bookmarks'),
939 self.join('bookmarks'))
940 self.join('bookmarks'))
940 if os.path.exists(self.sjoin('undo.phaseroots')):
941 if os.path.exists(self.sjoin('undo.phaseroots')):
941 util.rename(self.sjoin('undo.phaseroots'),
942 util.rename(self.sjoin('undo.phaseroots'),
942 self.sjoin('phaseroots'))
943 self.sjoin('phaseroots'))
943 self.invalidate()
944 self.invalidate()
944
945
945 # Discard all cache entries to force reloading everything.
946 # Discard all cache entries to force reloading everything.
946 self._filecache.clear()
947 self._filecache.clear()
947
948
948 parentgone = (parents[0] not in self.changelog.nodemap or
949 parentgone = (parents[0] not in self.changelog.nodemap or
949 parents[1] not in self.changelog.nodemap)
950 parents[1] not in self.changelog.nodemap)
950 if parentgone:
951 if parentgone:
951 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
952 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
952 try:
953 try:
953 branch = self.opener.read('undo.branch')
954 branch = self.opener.read('undo.branch')
954 self.dirstate.setbranch(encoding.tolocal(branch))
955 self.dirstate.setbranch(encoding.tolocal(branch))
955 except IOError:
956 except IOError:
956 ui.warn(_('named branch could not be reset: '
957 ui.warn(_('named branch could not be reset: '
957 'current branch is still \'%s\'\n')
958 'current branch is still \'%s\'\n')
958 % self.dirstate.branch())
959 % self.dirstate.branch())
959
960
960 self.dirstate.invalidate()
961 self.dirstate.invalidate()
961 parents = tuple([p.rev() for p in self.parents()])
962 parents = tuple([p.rev() for p in self.parents()])
962 if len(parents) > 1:
963 if len(parents) > 1:
963 ui.status(_('working directory now based on '
964 ui.status(_('working directory now based on '
964 'revisions %d and %d\n') % parents)
965 'revisions %d and %d\n') % parents)
965 else:
966 else:
966 ui.status(_('working directory now based on '
967 ui.status(_('working directory now based on '
967 'revision %d\n') % parents)
968 'revision %d\n') % parents)
968 # TODO: if we know which new heads may result from this rollback, pass
969 # TODO: if we know which new heads may result from this rollback, pass
969 # them to destroy(), which will prevent the branchhead cache from being
970 # them to destroy(), which will prevent the branchhead cache from being
970 # invalidated.
971 # invalidated.
971 self.destroyed()
972 self.destroyed()
972 return 0
973 return 0
973
974
974 def invalidatecaches(self):
975 def invalidatecaches(self):
975
976
976 if '_tagscache' in vars(self):
977 if '_tagscache' in vars(self):
977 # can't use delattr on proxy
978 # can't use delattr on proxy
978 del self.__dict__['_tagscache']
979 del self.__dict__['_tagscache']
979
980
980 self.unfiltered()._branchcache = None # in UTF-8
981 self.unfiltered()._branchcache = None # in UTF-8
981 self.invalidatevolatilesets()
982 self.invalidatevolatilesets()
982
983
983 def invalidatevolatilesets(self):
984 def invalidatevolatilesets(self):
984 self.filteredrevcache.clear()
985 self.filteredrevcache.clear()
985 obsolete.clearobscaches(self)
986 obsolete.clearobscaches(self)
986 if 'hiddenrevs' in vars(self):
987 if 'hiddenrevs' in vars(self):
987 del self.hiddenrevs
988 del self.hiddenrevs
988
989
989 def invalidatedirstate(self):
990 def invalidatedirstate(self):
990 '''Invalidates the dirstate, causing the next call to dirstate
991 '''Invalidates the dirstate, causing the next call to dirstate
991 to check if it was modified since the last time it was read,
992 to check if it was modified since the last time it was read,
992 rereading it if it has.
993 rereading it if it has.
993
994
994 This is different to dirstate.invalidate() that it doesn't always
995 This is different to dirstate.invalidate() that it doesn't always
995 rereads the dirstate. Use dirstate.invalidate() if you want to
996 rereads the dirstate. Use dirstate.invalidate() if you want to
996 explicitly read the dirstate again (i.e. restoring it to a previous
997 explicitly read the dirstate again (i.e. restoring it to a previous
997 known good state).'''
998 known good state).'''
998 if hasunfilteredcache(self, 'dirstate'):
999 if hasunfilteredcache(self, 'dirstate'):
999 for k in self.dirstate._filecache:
1000 for k in self.dirstate._filecache:
1000 try:
1001 try:
1001 delattr(self.dirstate, k)
1002 delattr(self.dirstate, k)
1002 except AttributeError:
1003 except AttributeError:
1003 pass
1004 pass
1004 delattr(self.unfiltered(), 'dirstate')
1005 delattr(self.unfiltered(), 'dirstate')
1005
1006
1006 def invalidate(self):
1007 def invalidate(self):
1007 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1008 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1008 for k in self._filecache:
1009 for k in self._filecache:
1009 # dirstate is invalidated separately in invalidatedirstate()
1010 # dirstate is invalidated separately in invalidatedirstate()
1010 if k == 'dirstate':
1011 if k == 'dirstate':
1011 continue
1012 continue
1012
1013
1013 try:
1014 try:
1014 delattr(unfiltered, k)
1015 delattr(unfiltered, k)
1015 except AttributeError:
1016 except AttributeError:
1016 pass
1017 pass
1017 self.invalidatecaches()
1018 self.invalidatecaches()
1018
1019
1019 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1020 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1020 try:
1021 try:
1021 l = lock.lock(lockname, 0, releasefn, desc=desc)
1022 l = lock.lock(lockname, 0, releasefn, desc=desc)
1022 except error.LockHeld, inst:
1023 except error.LockHeld, inst:
1023 if not wait:
1024 if not wait:
1024 raise
1025 raise
1025 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1026 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1026 (desc, inst.locker))
1027 (desc, inst.locker))
1027 # default to 600 seconds timeout
1028 # default to 600 seconds timeout
1028 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1029 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1029 releasefn, desc=desc)
1030 releasefn, desc=desc)
1030 if acquirefn:
1031 if acquirefn:
1031 acquirefn()
1032 acquirefn()
1032 return l
1033 return l
1033
1034
1034 def _afterlock(self, callback):
1035 def _afterlock(self, callback):
1035 """add a callback to the current repository lock.
1036 """add a callback to the current repository lock.
1036
1037
1037 The callback will be executed on lock release."""
1038 The callback will be executed on lock release."""
1038 l = self._lockref and self._lockref()
1039 l = self._lockref and self._lockref()
1039 if l:
1040 if l:
1040 l.postrelease.append(callback)
1041 l.postrelease.append(callback)
1041 else:
1042 else:
1042 callback()
1043 callback()
1043
1044
1044 def lock(self, wait=True):
1045 def lock(self, wait=True):
1045 '''Lock the repository store (.hg/store) and return a weak reference
1046 '''Lock the repository store (.hg/store) and return a weak reference
1046 to the lock. Use this before modifying the store (e.g. committing or
1047 to the lock. Use this before modifying the store (e.g. committing or
1047 stripping). If you are opening a transaction, get a lock as well.)'''
1048 stripping). If you are opening a transaction, get a lock as well.)'''
1048 l = self._lockref and self._lockref()
1049 l = self._lockref and self._lockref()
1049 if l is not None and l.held:
1050 if l is not None and l.held:
1050 l.lock()
1051 l.lock()
1051 return l
1052 return l
1052
1053
1053 def unlock():
1054 def unlock():
1054 self.store.write()
1055 self.store.write()
1055 if hasunfilteredcache(self, '_phasecache'):
1056 if hasunfilteredcache(self, '_phasecache'):
1056 self._phasecache.write()
1057 self._phasecache.write()
1057 for k, ce in self._filecache.items():
1058 for k, ce in self._filecache.items():
1058 if k == 'dirstate':
1059 if k == 'dirstate':
1059 continue
1060 continue
1060 ce.refresh()
1061 ce.refresh()
1061
1062
1062 l = self._lock(self.sjoin("lock"), wait, unlock,
1063 l = self._lock(self.sjoin("lock"), wait, unlock,
1063 self.invalidate, _('repository %s') % self.origroot)
1064 self.invalidate, _('repository %s') % self.origroot)
1064 self._lockref = weakref.ref(l)
1065 self._lockref = weakref.ref(l)
1065 return l
1066 return l
1066
1067
1067 def wlock(self, wait=True):
1068 def wlock(self, wait=True):
1068 '''Lock the non-store parts of the repository (everything under
1069 '''Lock the non-store parts of the repository (everything under
1069 .hg except .hg/store) and return a weak reference to the lock.
1070 .hg except .hg/store) and return a weak reference to the lock.
1070 Use this before modifying files in .hg.'''
1071 Use this before modifying files in .hg.'''
1071 l = self._wlockref and self._wlockref()
1072 l = self._wlockref and self._wlockref()
1072 if l is not None and l.held:
1073 if l is not None and l.held:
1073 l.lock()
1074 l.lock()
1074 return l
1075 return l
1075
1076
1076 def unlock():
1077 def unlock():
1077 self.dirstate.write()
1078 self.dirstate.write()
1078 ce = self._filecache.get('dirstate')
1079 ce = self._filecache.get('dirstate')
1079 if ce:
1080 if ce:
1080 ce.refresh()
1081 ce.refresh()
1081
1082
1082 l = self._lock(self.join("wlock"), wait, unlock,
1083 l = self._lock(self.join("wlock"), wait, unlock,
1083 self.invalidatedirstate, _('working directory of %s') %
1084 self.invalidatedirstate, _('working directory of %s') %
1084 self.origroot)
1085 self.origroot)
1085 self._wlockref = weakref.ref(l)
1086 self._wlockref = weakref.ref(l)
1086 return l
1087 return l
1087
1088
1088 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1089 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1089 """
1090 """
1090 commit an individual file as part of a larger transaction
1091 commit an individual file as part of a larger transaction
1091 """
1092 """
1092
1093
1093 fname = fctx.path()
1094 fname = fctx.path()
1094 text = fctx.data()
1095 text = fctx.data()
1095 flog = self.file(fname)
1096 flog = self.file(fname)
1096 fparent1 = manifest1.get(fname, nullid)
1097 fparent1 = manifest1.get(fname, nullid)
1097 fparent2 = fparent2o = manifest2.get(fname, nullid)
1098 fparent2 = fparent2o = manifest2.get(fname, nullid)
1098
1099
1099 meta = {}
1100 meta = {}
1100 copy = fctx.renamed()
1101 copy = fctx.renamed()
1101 if copy and copy[0] != fname:
1102 if copy and copy[0] != fname:
1102 # Mark the new revision of this file as a copy of another
1103 # Mark the new revision of this file as a copy of another
1103 # file. This copy data will effectively act as a parent
1104 # file. This copy data will effectively act as a parent
1104 # of this new revision. If this is a merge, the first
1105 # of this new revision. If this is a merge, the first
1105 # parent will be the nullid (meaning "look up the copy data")
1106 # parent will be the nullid (meaning "look up the copy data")
1106 # and the second one will be the other parent. For example:
1107 # and the second one will be the other parent. For example:
1107 #
1108 #
1108 # 0 --- 1 --- 3 rev1 changes file foo
1109 # 0 --- 1 --- 3 rev1 changes file foo
1109 # \ / rev2 renames foo to bar and changes it
1110 # \ / rev2 renames foo to bar and changes it
1110 # \- 2 -/ rev3 should have bar with all changes and
1111 # \- 2 -/ rev3 should have bar with all changes and
1111 # should record that bar descends from
1112 # should record that bar descends from
1112 # bar in rev2 and foo in rev1
1113 # bar in rev2 and foo in rev1
1113 #
1114 #
1114 # this allows this merge to succeed:
1115 # this allows this merge to succeed:
1115 #
1116 #
1116 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1117 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1117 # \ / merging rev3 and rev4 should use bar@rev2
1118 # \ / merging rev3 and rev4 should use bar@rev2
1118 # \- 2 --- 4 as the merge base
1119 # \- 2 --- 4 as the merge base
1119 #
1120 #
1120
1121
1121 cfname = copy[0]
1122 cfname = copy[0]
1122 crev = manifest1.get(cfname)
1123 crev = manifest1.get(cfname)
1123 newfparent = fparent2
1124 newfparent = fparent2
1124
1125
1125 if manifest2: # branch merge
1126 if manifest2: # branch merge
1126 if fparent2 == nullid or crev is None: # copied on remote side
1127 if fparent2 == nullid or crev is None: # copied on remote side
1127 if cfname in manifest2:
1128 if cfname in manifest2:
1128 crev = manifest2[cfname]
1129 crev = manifest2[cfname]
1129 newfparent = fparent1
1130 newfparent = fparent1
1130
1131
1131 # find source in nearest ancestor if we've lost track
1132 # find source in nearest ancestor if we've lost track
1132 if not crev:
1133 if not crev:
1133 self.ui.debug(" %s: searching for copy revision for %s\n" %
1134 self.ui.debug(" %s: searching for copy revision for %s\n" %
1134 (fname, cfname))
1135 (fname, cfname))
1135 for ancestor in self[None].ancestors():
1136 for ancestor in self[None].ancestors():
1136 if cfname in ancestor:
1137 if cfname in ancestor:
1137 crev = ancestor[cfname].filenode()
1138 crev = ancestor[cfname].filenode()
1138 break
1139 break
1139
1140
1140 if crev:
1141 if crev:
1141 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1142 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1142 meta["copy"] = cfname
1143 meta["copy"] = cfname
1143 meta["copyrev"] = hex(crev)
1144 meta["copyrev"] = hex(crev)
1144 fparent1, fparent2 = nullid, newfparent
1145 fparent1, fparent2 = nullid, newfparent
1145 else:
1146 else:
1146 self.ui.warn(_("warning: can't find ancestor for '%s' "
1147 self.ui.warn(_("warning: can't find ancestor for '%s' "
1147 "copied from '%s'!\n") % (fname, cfname))
1148 "copied from '%s'!\n") % (fname, cfname))
1148
1149
1149 elif fparent2 != nullid:
1150 elif fparent2 != nullid:
1150 # is one parent an ancestor of the other?
1151 # is one parent an ancestor of the other?
1151 fparentancestor = flog.ancestor(fparent1, fparent2)
1152 fparentancestor = flog.ancestor(fparent1, fparent2)
1152 if fparentancestor == fparent1:
1153 if fparentancestor == fparent1:
1153 fparent1, fparent2 = fparent2, nullid
1154 fparent1, fparent2 = fparent2, nullid
1154 elif fparentancestor == fparent2:
1155 elif fparentancestor == fparent2:
1155 fparent2 = nullid
1156 fparent2 = nullid
1156
1157
1157 # is the file changed?
1158 # is the file changed?
1158 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1159 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1159 changelist.append(fname)
1160 changelist.append(fname)
1160 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1161 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1161
1162
1162 # are just the flags changed during merge?
1163 # are just the flags changed during merge?
1163 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1164 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1164 changelist.append(fname)
1165 changelist.append(fname)
1165
1166
1166 return fparent1
1167 return fparent1
1167
1168
1168 @unfilteredmethod
1169 @unfilteredmethod
1169 def commit(self, text="", user=None, date=None, match=None, force=False,
1170 def commit(self, text="", user=None, date=None, match=None, force=False,
1170 editor=False, extra={}):
1171 editor=False, extra={}):
1171 """Add a new revision to current repository.
1172 """Add a new revision to current repository.
1172
1173
1173 Revision information is gathered from the working directory,
1174 Revision information is gathered from the working directory,
1174 match can be used to filter the committed files. If editor is
1175 match can be used to filter the committed files. If editor is
1175 supplied, it is called to get a commit message.
1176 supplied, it is called to get a commit message.
1176 """
1177 """
1177
1178
1178 def fail(f, msg):
1179 def fail(f, msg):
1179 raise util.Abort('%s: %s' % (f, msg))
1180 raise util.Abort('%s: %s' % (f, msg))
1180
1181
1181 if not match:
1182 if not match:
1182 match = matchmod.always(self.root, '')
1183 match = matchmod.always(self.root, '')
1183
1184
1184 if not force:
1185 if not force:
1185 vdirs = []
1186 vdirs = []
1186 match.dir = vdirs.append
1187 match.dir = vdirs.append
1187 match.bad = fail
1188 match.bad = fail
1188
1189
1189 wlock = self.wlock()
1190 wlock = self.wlock()
1190 try:
1191 try:
1191 wctx = self[None]
1192 wctx = self[None]
1192 merge = len(wctx.parents()) > 1
1193 merge = len(wctx.parents()) > 1
1193
1194
1194 if (not force and merge and match and
1195 if (not force and merge and match and
1195 (match.files() or match.anypats())):
1196 (match.files() or match.anypats())):
1196 raise util.Abort(_('cannot partially commit a merge '
1197 raise util.Abort(_('cannot partially commit a merge '
1197 '(do not specify files or patterns)'))
1198 '(do not specify files or patterns)'))
1198
1199
1199 changes = self.status(match=match, clean=force)
1200 changes = self.status(match=match, clean=force)
1200 if force:
1201 if force:
1201 changes[0].extend(changes[6]) # mq may commit unchanged files
1202 changes[0].extend(changes[6]) # mq may commit unchanged files
1202
1203
1203 # check subrepos
1204 # check subrepos
1204 subs = []
1205 subs = []
1205 commitsubs = set()
1206 commitsubs = set()
1206 newstate = wctx.substate.copy()
1207 newstate = wctx.substate.copy()
1207 # only manage subrepos and .hgsubstate if .hgsub is present
1208 # only manage subrepos and .hgsubstate if .hgsub is present
1208 if '.hgsub' in wctx:
1209 if '.hgsub' in wctx:
1209 # we'll decide whether to track this ourselves, thanks
1210 # we'll decide whether to track this ourselves, thanks
1210 if '.hgsubstate' in changes[0]:
1211 if '.hgsubstate' in changes[0]:
1211 changes[0].remove('.hgsubstate')
1212 changes[0].remove('.hgsubstate')
1212 if '.hgsubstate' in changes[2]:
1213 if '.hgsubstate' in changes[2]:
1213 changes[2].remove('.hgsubstate')
1214 changes[2].remove('.hgsubstate')
1214
1215
1215 # compare current state to last committed state
1216 # compare current state to last committed state
1216 # build new substate based on last committed state
1217 # build new substate based on last committed state
1217 oldstate = wctx.p1().substate
1218 oldstate = wctx.p1().substate
1218 for s in sorted(newstate.keys()):
1219 for s in sorted(newstate.keys()):
1219 if not match(s):
1220 if not match(s):
1220 # ignore working copy, use old state if present
1221 # ignore working copy, use old state if present
1221 if s in oldstate:
1222 if s in oldstate:
1222 newstate[s] = oldstate[s]
1223 newstate[s] = oldstate[s]
1223 continue
1224 continue
1224 if not force:
1225 if not force:
1225 raise util.Abort(
1226 raise util.Abort(
1226 _("commit with new subrepo %s excluded") % s)
1227 _("commit with new subrepo %s excluded") % s)
1227 if wctx.sub(s).dirty(True):
1228 if wctx.sub(s).dirty(True):
1228 if not self.ui.configbool('ui', 'commitsubrepos'):
1229 if not self.ui.configbool('ui', 'commitsubrepos'):
1229 raise util.Abort(
1230 raise util.Abort(
1230 _("uncommitted changes in subrepo %s") % s,
1231 _("uncommitted changes in subrepo %s") % s,
1231 hint=_("use --subrepos for recursive commit"))
1232 hint=_("use --subrepos for recursive commit"))
1232 subs.append(s)
1233 subs.append(s)
1233 commitsubs.add(s)
1234 commitsubs.add(s)
1234 else:
1235 else:
1235 bs = wctx.sub(s).basestate()
1236 bs = wctx.sub(s).basestate()
1236 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1237 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1237 if oldstate.get(s, (None, None, None))[1] != bs:
1238 if oldstate.get(s, (None, None, None))[1] != bs:
1238 subs.append(s)
1239 subs.append(s)
1239
1240
1240 # check for removed subrepos
1241 # check for removed subrepos
1241 for p in wctx.parents():
1242 for p in wctx.parents():
1242 r = [s for s in p.substate if s not in newstate]
1243 r = [s for s in p.substate if s not in newstate]
1243 subs += [s for s in r if match(s)]
1244 subs += [s for s in r if match(s)]
1244 if subs:
1245 if subs:
1245 if (not match('.hgsub') and
1246 if (not match('.hgsub') and
1246 '.hgsub' in (wctx.modified() + wctx.added())):
1247 '.hgsub' in (wctx.modified() + wctx.added())):
1247 raise util.Abort(
1248 raise util.Abort(
1248 _("can't commit subrepos without .hgsub"))
1249 _("can't commit subrepos without .hgsub"))
1249 changes[0].insert(0, '.hgsubstate')
1250 changes[0].insert(0, '.hgsubstate')
1250
1251
1251 elif '.hgsub' in changes[2]:
1252 elif '.hgsub' in changes[2]:
1252 # clean up .hgsubstate when .hgsub is removed
1253 # clean up .hgsubstate when .hgsub is removed
1253 if ('.hgsubstate' in wctx and
1254 if ('.hgsubstate' in wctx and
1254 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1255 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1255 changes[2].insert(0, '.hgsubstate')
1256 changes[2].insert(0, '.hgsubstate')
1256
1257
1257 # make sure all explicit patterns are matched
1258 # make sure all explicit patterns are matched
1258 if not force and match.files():
1259 if not force and match.files():
1259 matched = set(changes[0] + changes[1] + changes[2])
1260 matched = set(changes[0] + changes[1] + changes[2])
1260
1261
1261 for f in match.files():
1262 for f in match.files():
1262 f = self.dirstate.normalize(f)
1263 f = self.dirstate.normalize(f)
1263 if f == '.' or f in matched or f in wctx.substate:
1264 if f == '.' or f in matched or f in wctx.substate:
1264 continue
1265 continue
1265 if f in changes[3]: # missing
1266 if f in changes[3]: # missing
1266 fail(f, _('file not found!'))
1267 fail(f, _('file not found!'))
1267 if f in vdirs: # visited directory
1268 if f in vdirs: # visited directory
1268 d = f + '/'
1269 d = f + '/'
1269 for mf in matched:
1270 for mf in matched:
1270 if mf.startswith(d):
1271 if mf.startswith(d):
1271 break
1272 break
1272 else:
1273 else:
1273 fail(f, _("no match under directory!"))
1274 fail(f, _("no match under directory!"))
1274 elif f not in self.dirstate:
1275 elif f not in self.dirstate:
1275 fail(f, _("file not tracked!"))
1276 fail(f, _("file not tracked!"))
1276
1277
1277 if (not force and not extra.get("close") and not merge
1278 if (not force and not extra.get("close") and not merge
1278 and not (changes[0] or changes[1] or changes[2])
1279 and not (changes[0] or changes[1] or changes[2])
1279 and wctx.branch() == wctx.p1().branch()):
1280 and wctx.branch() == wctx.p1().branch()):
1280 return None
1281 return None
1281
1282
1282 if merge and changes[3]:
1283 if merge and changes[3]:
1283 raise util.Abort(_("cannot commit merge with missing files"))
1284 raise util.Abort(_("cannot commit merge with missing files"))
1284
1285
1285 ms = mergemod.mergestate(self)
1286 ms = mergemod.mergestate(self)
1286 for f in changes[0]:
1287 for f in changes[0]:
1287 if f in ms and ms[f] == 'u':
1288 if f in ms and ms[f] == 'u':
1288 raise util.Abort(_("unresolved merge conflicts "
1289 raise util.Abort(_("unresolved merge conflicts "
1289 "(see hg help resolve)"))
1290 "(see hg help resolve)"))
1290
1291
1291 cctx = context.workingctx(self, text, user, date, extra, changes)
1292 cctx = context.workingctx(self, text, user, date, extra, changes)
1292 if editor:
1293 if editor:
1293 cctx._text = editor(self, cctx, subs)
1294 cctx._text = editor(self, cctx, subs)
1294 edited = (text != cctx._text)
1295 edited = (text != cctx._text)
1295
1296
1296 # commit subs and write new state
1297 # commit subs and write new state
1297 if subs:
1298 if subs:
1298 for s in sorted(commitsubs):
1299 for s in sorted(commitsubs):
1299 sub = wctx.sub(s)
1300 sub = wctx.sub(s)
1300 self.ui.status(_('committing subrepository %s\n') %
1301 self.ui.status(_('committing subrepository %s\n') %
1301 subrepo.subrelpath(sub))
1302 subrepo.subrelpath(sub))
1302 sr = sub.commit(cctx._text, user, date)
1303 sr = sub.commit(cctx._text, user, date)
1303 newstate[s] = (newstate[s][0], sr)
1304 newstate[s] = (newstate[s][0], sr)
1304 subrepo.writestate(self, newstate)
1305 subrepo.writestate(self, newstate)
1305
1306
1306 # Save commit message in case this transaction gets rolled back
1307 # Save commit message in case this transaction gets rolled back
1307 # (e.g. by a pretxncommit hook). Leave the content alone on
1308 # (e.g. by a pretxncommit hook). Leave the content alone on
1308 # the assumption that the user will use the same editor again.
1309 # the assumption that the user will use the same editor again.
1309 msgfn = self.savecommitmessage(cctx._text)
1310 msgfn = self.savecommitmessage(cctx._text)
1310
1311
1311 p1, p2 = self.dirstate.parents()
1312 p1, p2 = self.dirstate.parents()
1312 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1313 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1313 try:
1314 try:
1314 self.hook("precommit", throw=True, parent1=hookp1,
1315 self.hook("precommit", throw=True, parent1=hookp1,
1315 parent2=hookp2)
1316 parent2=hookp2)
1316 ret = self.commitctx(cctx, True)
1317 ret = self.commitctx(cctx, True)
1317 except: # re-raises
1318 except: # re-raises
1318 if edited:
1319 if edited:
1319 self.ui.write(
1320 self.ui.write(
1320 _('note: commit message saved in %s\n') % msgfn)
1321 _('note: commit message saved in %s\n') % msgfn)
1321 raise
1322 raise
1322
1323
1323 # update bookmarks, dirstate and mergestate
1324 # update bookmarks, dirstate and mergestate
1324 bookmarks.update(self, [p1, p2], ret)
1325 bookmarks.update(self, [p1, p2], ret)
1325 for f in changes[0] + changes[1]:
1326 for f in changes[0] + changes[1]:
1326 self.dirstate.normal(f)
1327 self.dirstate.normal(f)
1327 for f in changes[2]:
1328 for f in changes[2]:
1328 self.dirstate.drop(f)
1329 self.dirstate.drop(f)
1329 self.dirstate.setparents(ret)
1330 self.dirstate.setparents(ret)
1330 ms.reset()
1331 ms.reset()
1331 finally:
1332 finally:
1332 wlock.release()
1333 wlock.release()
1333
1334
1334 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1335 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1335 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1336 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1336 self._afterlock(commithook)
1337 self._afterlock(commithook)
1337 return ret
1338 return ret
1338
1339
1339 @unfilteredmethod
1340 @unfilteredmethod
1340 def commitctx(self, ctx, error=False):
1341 def commitctx(self, ctx, error=False):
1341 """Add a new revision to current repository.
1342 """Add a new revision to current repository.
1342 Revision information is passed via the context argument.
1343 Revision information is passed via the context argument.
1343 """
1344 """
1344
1345
1345 tr = lock = None
1346 tr = lock = None
1346 removed = list(ctx.removed())
1347 removed = list(ctx.removed())
1347 p1, p2 = ctx.p1(), ctx.p2()
1348 p1, p2 = ctx.p1(), ctx.p2()
1348 user = ctx.user()
1349 user = ctx.user()
1349
1350
1350 lock = self.lock()
1351 lock = self.lock()
1351 try:
1352 try:
1352 tr = self.transaction("commit")
1353 tr = self.transaction("commit")
1353 trp = weakref.proxy(tr)
1354 trp = weakref.proxy(tr)
1354
1355
1355 if ctx.files():
1356 if ctx.files():
1356 m1 = p1.manifest().copy()
1357 m1 = p1.manifest().copy()
1357 m2 = p2.manifest()
1358 m2 = p2.manifest()
1358
1359
1359 # check in files
1360 # check in files
1360 new = {}
1361 new = {}
1361 changed = []
1362 changed = []
1362 linkrev = len(self)
1363 linkrev = len(self)
1363 for f in sorted(ctx.modified() + ctx.added()):
1364 for f in sorted(ctx.modified() + ctx.added()):
1364 self.ui.note(f + "\n")
1365 self.ui.note(f + "\n")
1365 try:
1366 try:
1366 fctx = ctx[f]
1367 fctx = ctx[f]
1367 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1368 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1368 changed)
1369 changed)
1369 m1.set(f, fctx.flags())
1370 m1.set(f, fctx.flags())
1370 except OSError, inst:
1371 except OSError, inst:
1371 self.ui.warn(_("trouble committing %s!\n") % f)
1372 self.ui.warn(_("trouble committing %s!\n") % f)
1372 raise
1373 raise
1373 except IOError, inst:
1374 except IOError, inst:
1374 errcode = getattr(inst, 'errno', errno.ENOENT)
1375 errcode = getattr(inst, 'errno', errno.ENOENT)
1375 if error or errcode and errcode != errno.ENOENT:
1376 if error or errcode and errcode != errno.ENOENT:
1376 self.ui.warn(_("trouble committing %s!\n") % f)
1377 self.ui.warn(_("trouble committing %s!\n") % f)
1377 raise
1378 raise
1378 else:
1379 else:
1379 removed.append(f)
1380 removed.append(f)
1380
1381
1381 # update manifest
1382 # update manifest
1382 m1.update(new)
1383 m1.update(new)
1383 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1384 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1384 drop = [f for f in removed if f in m1]
1385 drop = [f for f in removed if f in m1]
1385 for f in drop:
1386 for f in drop:
1386 del m1[f]
1387 del m1[f]
1387 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1388 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1388 p2.manifestnode(), (new, drop))
1389 p2.manifestnode(), (new, drop))
1389 files = changed + removed
1390 files = changed + removed
1390 else:
1391 else:
1391 mn = p1.manifestnode()
1392 mn = p1.manifestnode()
1392 files = []
1393 files = []
1393
1394
1394 # update changelog
1395 # update changelog
1395 self.changelog.delayupdate()
1396 self.changelog.delayupdate()
1396 n = self.changelog.add(mn, files, ctx.description(),
1397 n = self.changelog.add(mn, files, ctx.description(),
1397 trp, p1.node(), p2.node(),
1398 trp, p1.node(), p2.node(),
1398 user, ctx.date(), ctx.extra().copy())
1399 user, ctx.date(), ctx.extra().copy())
1399 p = lambda: self.changelog.writepending() and self.root or ""
1400 p = lambda: self.changelog.writepending() and self.root or ""
1400 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1401 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1401 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1402 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1402 parent2=xp2, pending=p)
1403 parent2=xp2, pending=p)
1403 self.changelog.finalize(trp)
1404 self.changelog.finalize(trp)
1404 # set the new commit is proper phase
1405 # set the new commit is proper phase
1405 targetphase = phases.newcommitphase(self.ui)
1406 targetphase = phases.newcommitphase(self.ui)
1406 if targetphase:
1407 if targetphase:
1407 # retract boundary do not alter parent changeset.
1408 # retract boundary do not alter parent changeset.
1408 # if a parent have higher the resulting phase will
1409 # if a parent have higher the resulting phase will
1409 # be compliant anyway
1410 # be compliant anyway
1410 #
1411 #
1411 # if minimal phase was 0 we don't need to retract anything
1412 # if minimal phase was 0 we don't need to retract anything
1412 phases.retractboundary(self, targetphase, [n])
1413 phases.retractboundary(self, targetphase, [n])
1413 tr.close()
1414 tr.close()
1414 branchmap.updatecache(self)
1415 branchmap.updatecache(self)
1415 return n
1416 return n
1416 finally:
1417 finally:
1417 if tr:
1418 if tr:
1418 tr.release()
1419 tr.release()
1419 lock.release()
1420 lock.release()
1420
1421
1421 @unfilteredmethod
1422 @unfilteredmethod
1422 def destroyed(self, newheadnodes=None):
1423 def destroyed(self, newheadnodes=None):
1423 '''Inform the repository that nodes have been destroyed.
1424 '''Inform the repository that nodes have been destroyed.
1424 Intended for use by strip and rollback, so there's a common
1425 Intended for use by strip and rollback, so there's a common
1425 place for anything that has to be done after destroying history.
1426 place for anything that has to be done after destroying history.
1426
1427
1427 If you know the branchheadcache was uptodate before nodes were removed
1428 If you know the branchheadcache was uptodate before nodes were removed
1428 and you also know the set of candidate new heads that may have resulted
1429 and you also know the set of candidate new heads that may have resulted
1429 from the destruction, you can set newheadnodes. This will enable the
1430 from the destruction, you can set newheadnodes. This will enable the
1430 code to update the branchheads cache, rather than having future code
1431 code to update the branchheads cache, rather than having future code
1431 decide it's invalid and regenerating it from scratch.
1432 decide it's invalid and regenerating it from scratch.
1432 '''
1433 '''
1433 # If we have info, newheadnodes, on how to update the branch cache, do
1434 # If we have info, newheadnodes, on how to update the branch cache, do
1434 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1435 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1435 # will be caught the next time it is read.
1436 # will be caught the next time it is read.
1436 if newheadnodes:
1437 if newheadnodes:
1437 ctxgen = (self[node] for node in newheadnodes
1438 ctxgen = (self[node] for node in newheadnodes
1438 if self.changelog.hasnode(node))
1439 if self.changelog.hasnode(node))
1439 cache = self._branchcache
1440 cache = self._branchcache
1440 cache.update(self, ctxgen)
1441 cache.update(self, ctxgen)
1441 cache.write(self)
1442 cache.write(self)
1442
1443
1443 # Ensure the persistent tag cache is updated. Doing it now
1444 # Ensure the persistent tag cache is updated. Doing it now
1444 # means that the tag cache only has to worry about destroyed
1445 # means that the tag cache only has to worry about destroyed
1445 # heads immediately after a strip/rollback. That in turn
1446 # heads immediately after a strip/rollback. That in turn
1446 # guarantees that "cachetip == currenttip" (comparing both rev
1447 # guarantees that "cachetip == currenttip" (comparing both rev
1447 # and node) always means no nodes have been added or destroyed.
1448 # and node) always means no nodes have been added or destroyed.
1448
1449
1449 # XXX this is suboptimal when qrefresh'ing: we strip the current
1450 # XXX this is suboptimal when qrefresh'ing: we strip the current
1450 # head, refresh the tag cache, then immediately add a new head.
1451 # head, refresh the tag cache, then immediately add a new head.
1451 # But I think doing it this way is necessary for the "instant
1452 # But I think doing it this way is necessary for the "instant
1452 # tag cache retrieval" case to work.
1453 # tag cache retrieval" case to work.
1453 self.invalidatecaches()
1454 self.invalidatecaches()
1454
1455
1455 # Discard all cache entries to force reloading everything.
1456 # Discard all cache entries to force reloading everything.
1456 self._filecache.clear()
1457 self._filecache.clear()
1457
1458
1458 def walk(self, match, node=None):
1459 def walk(self, match, node=None):
1459 '''
1460 '''
1460 walk recursively through the directory tree or a given
1461 walk recursively through the directory tree or a given
1461 changeset, finding all files matched by the match
1462 changeset, finding all files matched by the match
1462 function
1463 function
1463 '''
1464 '''
1464 return self[node].walk(match)
1465 return self[node].walk(match)
1465
1466
1466 def status(self, node1='.', node2=None, match=None,
1467 def status(self, node1='.', node2=None, match=None,
1467 ignored=False, clean=False, unknown=False,
1468 ignored=False, clean=False, unknown=False,
1468 listsubrepos=False):
1469 listsubrepos=False):
1469 """return status of files between two nodes or node and working
1470 """return status of files between two nodes or node and working
1470 directory.
1471 directory.
1471
1472
1472 If node1 is None, use the first dirstate parent instead.
1473 If node1 is None, use the first dirstate parent instead.
1473 If node2 is None, compare node1 with working directory.
1474 If node2 is None, compare node1 with working directory.
1474 """
1475 """
1475
1476
1476 def mfmatches(ctx):
1477 def mfmatches(ctx):
1477 mf = ctx.manifest().copy()
1478 mf = ctx.manifest().copy()
1478 if match.always():
1479 if match.always():
1479 return mf
1480 return mf
1480 for fn in mf.keys():
1481 for fn in mf.keys():
1481 if not match(fn):
1482 if not match(fn):
1482 del mf[fn]
1483 del mf[fn]
1483 return mf
1484 return mf
1484
1485
1485 if isinstance(node1, context.changectx):
1486 if isinstance(node1, context.changectx):
1486 ctx1 = node1
1487 ctx1 = node1
1487 else:
1488 else:
1488 ctx1 = self[node1]
1489 ctx1 = self[node1]
1489 if isinstance(node2, context.changectx):
1490 if isinstance(node2, context.changectx):
1490 ctx2 = node2
1491 ctx2 = node2
1491 else:
1492 else:
1492 ctx2 = self[node2]
1493 ctx2 = self[node2]
1493
1494
1494 working = ctx2.rev() is None
1495 working = ctx2.rev() is None
1495 parentworking = working and ctx1 == self['.']
1496 parentworking = working and ctx1 == self['.']
1496 match = match or matchmod.always(self.root, self.getcwd())
1497 match = match or matchmod.always(self.root, self.getcwd())
1497 listignored, listclean, listunknown = ignored, clean, unknown
1498 listignored, listclean, listunknown = ignored, clean, unknown
1498
1499
1499 # load earliest manifest first for caching reasons
1500 # load earliest manifest first for caching reasons
1500 if not working and ctx2.rev() < ctx1.rev():
1501 if not working and ctx2.rev() < ctx1.rev():
1501 ctx2.manifest()
1502 ctx2.manifest()
1502
1503
1503 if not parentworking:
1504 if not parentworking:
1504 def bad(f, msg):
1505 def bad(f, msg):
1505 # 'f' may be a directory pattern from 'match.files()',
1506 # 'f' may be a directory pattern from 'match.files()',
1506 # so 'f not in ctx1' is not enough
1507 # so 'f not in ctx1' is not enough
1507 if f not in ctx1 and f not in ctx1.dirs():
1508 if f not in ctx1 and f not in ctx1.dirs():
1508 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1509 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1509 match.bad = bad
1510 match.bad = bad
1510
1511
1511 if working: # we need to scan the working dir
1512 if working: # we need to scan the working dir
1512 subrepos = []
1513 subrepos = []
1513 if '.hgsub' in self.dirstate:
1514 if '.hgsub' in self.dirstate:
1514 subrepos = ctx2.substate.keys()
1515 subrepos = ctx2.substate.keys()
1515 s = self.dirstate.status(match, subrepos, listignored,
1516 s = self.dirstate.status(match, subrepos, listignored,
1516 listclean, listunknown)
1517 listclean, listunknown)
1517 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1518 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1518
1519
1519 # check for any possibly clean files
1520 # check for any possibly clean files
1520 if parentworking and cmp:
1521 if parentworking and cmp:
1521 fixup = []
1522 fixup = []
1522 # do a full compare of any files that might have changed
1523 # do a full compare of any files that might have changed
1523 for f in sorted(cmp):
1524 for f in sorted(cmp):
1524 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1525 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1525 or ctx1[f].cmp(ctx2[f])):
1526 or ctx1[f].cmp(ctx2[f])):
1526 modified.append(f)
1527 modified.append(f)
1527 else:
1528 else:
1528 fixup.append(f)
1529 fixup.append(f)
1529
1530
1530 # update dirstate for files that are actually clean
1531 # update dirstate for files that are actually clean
1531 if fixup:
1532 if fixup:
1532 if listclean:
1533 if listclean:
1533 clean += fixup
1534 clean += fixup
1534
1535
1535 try:
1536 try:
1536 # updating the dirstate is optional
1537 # updating the dirstate is optional
1537 # so we don't wait on the lock
1538 # so we don't wait on the lock
1538 wlock = self.wlock(False)
1539 wlock = self.wlock(False)
1539 try:
1540 try:
1540 for f in fixup:
1541 for f in fixup:
1541 self.dirstate.normal(f)
1542 self.dirstate.normal(f)
1542 finally:
1543 finally:
1543 wlock.release()
1544 wlock.release()
1544 except error.LockError:
1545 except error.LockError:
1545 pass
1546 pass
1546
1547
1547 if not parentworking:
1548 if not parentworking:
1548 mf1 = mfmatches(ctx1)
1549 mf1 = mfmatches(ctx1)
1549 if working:
1550 if working:
1550 # we are comparing working dir against non-parent
1551 # we are comparing working dir against non-parent
1551 # generate a pseudo-manifest for the working dir
1552 # generate a pseudo-manifest for the working dir
1552 mf2 = mfmatches(self['.'])
1553 mf2 = mfmatches(self['.'])
1553 for f in cmp + modified + added:
1554 for f in cmp + modified + added:
1554 mf2[f] = None
1555 mf2[f] = None
1555 mf2.set(f, ctx2.flags(f))
1556 mf2.set(f, ctx2.flags(f))
1556 for f in removed:
1557 for f in removed:
1557 if f in mf2:
1558 if f in mf2:
1558 del mf2[f]
1559 del mf2[f]
1559 else:
1560 else:
1560 # we are comparing two revisions
1561 # we are comparing two revisions
1561 deleted, unknown, ignored = [], [], []
1562 deleted, unknown, ignored = [], [], []
1562 mf2 = mfmatches(ctx2)
1563 mf2 = mfmatches(ctx2)
1563
1564
1564 modified, added, clean = [], [], []
1565 modified, added, clean = [], [], []
1565 withflags = mf1.withflags() | mf2.withflags()
1566 withflags = mf1.withflags() | mf2.withflags()
1566 for fn in mf2:
1567 for fn in mf2:
1567 if fn in mf1:
1568 if fn in mf1:
1568 if (fn not in deleted and
1569 if (fn not in deleted and
1569 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1570 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1570 (mf1[fn] != mf2[fn] and
1571 (mf1[fn] != mf2[fn] and
1571 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1572 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1572 modified.append(fn)
1573 modified.append(fn)
1573 elif listclean:
1574 elif listclean:
1574 clean.append(fn)
1575 clean.append(fn)
1575 del mf1[fn]
1576 del mf1[fn]
1576 elif fn not in deleted:
1577 elif fn not in deleted:
1577 added.append(fn)
1578 added.append(fn)
1578 removed = mf1.keys()
1579 removed = mf1.keys()
1579
1580
1580 if working and modified and not self.dirstate._checklink:
1581 if working and modified and not self.dirstate._checklink:
1581 # Symlink placeholders may get non-symlink-like contents
1582 # Symlink placeholders may get non-symlink-like contents
1582 # via user error or dereferencing by NFS or Samba servers,
1583 # via user error or dereferencing by NFS or Samba servers,
1583 # so we filter out any placeholders that don't look like a
1584 # so we filter out any placeholders that don't look like a
1584 # symlink
1585 # symlink
1585 sane = []
1586 sane = []
1586 for f in modified:
1587 for f in modified:
1587 if ctx2.flags(f) == 'l':
1588 if ctx2.flags(f) == 'l':
1588 d = ctx2[f].data()
1589 d = ctx2[f].data()
1589 if len(d) >= 1024 or '\n' in d or util.binary(d):
1590 if len(d) >= 1024 or '\n' in d or util.binary(d):
1590 self.ui.debug('ignoring suspect symlink placeholder'
1591 self.ui.debug('ignoring suspect symlink placeholder'
1591 ' "%s"\n' % f)
1592 ' "%s"\n' % f)
1592 continue
1593 continue
1593 sane.append(f)
1594 sane.append(f)
1594 modified = sane
1595 modified = sane
1595
1596
1596 r = modified, added, removed, deleted, unknown, ignored, clean
1597 r = modified, added, removed, deleted, unknown, ignored, clean
1597
1598
1598 if listsubrepos:
1599 if listsubrepos:
1599 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1600 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1600 if working:
1601 if working:
1601 rev2 = None
1602 rev2 = None
1602 else:
1603 else:
1603 rev2 = ctx2.substate[subpath][1]
1604 rev2 = ctx2.substate[subpath][1]
1604 try:
1605 try:
1605 submatch = matchmod.narrowmatcher(subpath, match)
1606 submatch = matchmod.narrowmatcher(subpath, match)
1606 s = sub.status(rev2, match=submatch, ignored=listignored,
1607 s = sub.status(rev2, match=submatch, ignored=listignored,
1607 clean=listclean, unknown=listunknown,
1608 clean=listclean, unknown=listunknown,
1608 listsubrepos=True)
1609 listsubrepos=True)
1609 for rfiles, sfiles in zip(r, s):
1610 for rfiles, sfiles in zip(r, s):
1610 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1611 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1611 except error.LookupError:
1612 except error.LookupError:
1612 self.ui.status(_("skipping missing subrepository: %s\n")
1613 self.ui.status(_("skipping missing subrepository: %s\n")
1613 % subpath)
1614 % subpath)
1614
1615
1615 for l in r:
1616 for l in r:
1616 l.sort()
1617 l.sort()
1617 return r
1618 return r
1618
1619
1619 def heads(self, start=None):
1620 def heads(self, start=None):
1620 heads = self.changelog.heads(start)
1621 heads = self.changelog.heads(start)
1621 # sort the output in rev descending order
1622 # sort the output in rev descending order
1622 return sorted(heads, key=self.changelog.rev, reverse=True)
1623 return sorted(heads, key=self.changelog.rev, reverse=True)
1623
1624
1624 def branchheads(self, branch=None, start=None, closed=False):
1625 def branchheads(self, branch=None, start=None, closed=False):
1625 '''return a (possibly filtered) list of heads for the given branch
1626 '''return a (possibly filtered) list of heads for the given branch
1626
1627
1627 Heads are returned in topological order, from newest to oldest.
1628 Heads are returned in topological order, from newest to oldest.
1628 If branch is None, use the dirstate branch.
1629 If branch is None, use the dirstate branch.
1629 If start is not None, return only heads reachable from start.
1630 If start is not None, return only heads reachable from start.
1630 If closed is True, return heads that are marked as closed as well.
1631 If closed is True, return heads that are marked as closed as well.
1631 '''
1632 '''
1632 if branch is None:
1633 if branch is None:
1633 branch = self[None].branch()
1634 branch = self[None].branch()
1634 branches = self.branchmap()
1635 branches = self.branchmap()
1635 if branch not in branches:
1636 if branch not in branches:
1636 return []
1637 return []
1637 # the cache returns heads ordered lowest to highest
1638 # the cache returns heads ordered lowest to highest
1638 bheads = list(reversed(branches[branch]))
1639 bheads = list(reversed(branches[branch]))
1639 if start is not None:
1640 if start is not None:
1640 # filter out the heads that cannot be reached from startrev
1641 # filter out the heads that cannot be reached from startrev
1641 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1642 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1642 bheads = [h for h in bheads if h in fbheads]
1643 bheads = [h for h in bheads if h in fbheads]
1643 if not closed:
1644 if not closed:
1644 bheads = [h for h in bheads if not self[h].closesbranch()]
1645 bheads = [h for h in bheads if not self[h].closesbranch()]
1645 return bheads
1646 return bheads
1646
1647
1647 def branches(self, nodes):
1648 def branches(self, nodes):
1648 if not nodes:
1649 if not nodes:
1649 nodes = [self.changelog.tip()]
1650 nodes = [self.changelog.tip()]
1650 b = []
1651 b = []
1651 for n in nodes:
1652 for n in nodes:
1652 t = n
1653 t = n
1653 while True:
1654 while True:
1654 p = self.changelog.parents(n)
1655 p = self.changelog.parents(n)
1655 if p[1] != nullid or p[0] == nullid:
1656 if p[1] != nullid or p[0] == nullid:
1656 b.append((t, n, p[0], p[1]))
1657 b.append((t, n, p[0], p[1]))
1657 break
1658 break
1658 n = p[0]
1659 n = p[0]
1659 return b
1660 return b
1660
1661
1661 def between(self, pairs):
1662 def between(self, pairs):
1662 r = []
1663 r = []
1663
1664
1664 for top, bottom in pairs:
1665 for top, bottom in pairs:
1665 n, l, i = top, [], 0
1666 n, l, i = top, [], 0
1666 f = 1
1667 f = 1
1667
1668
1668 while n != bottom and n != nullid:
1669 while n != bottom and n != nullid:
1669 p = self.changelog.parents(n)[0]
1670 p = self.changelog.parents(n)[0]
1670 if i == f:
1671 if i == f:
1671 l.append(n)
1672 l.append(n)
1672 f = f * 2
1673 f = f * 2
1673 n = p
1674 n = p
1674 i += 1
1675 i += 1
1675
1676
1676 r.append(l)
1677 r.append(l)
1677
1678
1678 return r
1679 return r
1679
1680
1680 def pull(self, remote, heads=None, force=False):
1681 def pull(self, remote, heads=None, force=False):
1681 # don't open transaction for nothing or you break future useful
1682 # don't open transaction for nothing or you break future useful
1682 # rollback call
1683 # rollback call
1683 tr = None
1684 tr = None
1684 trname = 'pull\n' + util.hidepassword(remote.url())
1685 trname = 'pull\n' + util.hidepassword(remote.url())
1685 lock = self.lock()
1686 lock = self.lock()
1686 try:
1687 try:
1687 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1688 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1688 force=force)
1689 force=force)
1689 common, fetch, rheads = tmp
1690 common, fetch, rheads = tmp
1690 if not fetch:
1691 if not fetch:
1691 self.ui.status(_("no changes found\n"))
1692 self.ui.status(_("no changes found\n"))
1692 added = []
1693 added = []
1693 result = 0
1694 result = 0
1694 else:
1695 else:
1695 tr = self.transaction(trname)
1696 tr = self.transaction(trname)
1696 if heads is None and list(common) == [nullid]:
1697 if heads is None and list(common) == [nullid]:
1697 self.ui.status(_("requesting all changes\n"))
1698 self.ui.status(_("requesting all changes\n"))
1698 elif heads is None and remote.capable('changegroupsubset'):
1699 elif heads is None and remote.capable('changegroupsubset'):
1699 # issue1320, avoid a race if remote changed after discovery
1700 # issue1320, avoid a race if remote changed after discovery
1700 heads = rheads
1701 heads = rheads
1701
1702
1702 if remote.capable('getbundle'):
1703 if remote.capable('getbundle'):
1703 cg = remote.getbundle('pull', common=common,
1704 cg = remote.getbundle('pull', common=common,
1704 heads=heads or rheads)
1705 heads=heads or rheads)
1705 elif heads is None:
1706 elif heads is None:
1706 cg = remote.changegroup(fetch, 'pull')
1707 cg = remote.changegroup(fetch, 'pull')
1707 elif not remote.capable('changegroupsubset'):
1708 elif not remote.capable('changegroupsubset'):
1708 raise util.Abort(_("partial pull cannot be done because "
1709 raise util.Abort(_("partial pull cannot be done because "
1709 "other repository doesn't support "
1710 "other repository doesn't support "
1710 "changegroupsubset."))
1711 "changegroupsubset."))
1711 else:
1712 else:
1712 cg = remote.changegroupsubset(fetch, heads, 'pull')
1713 cg = remote.changegroupsubset(fetch, heads, 'pull')
1713 clstart = len(self.changelog)
1714 clstart = len(self.changelog)
1714 result = self.addchangegroup(cg, 'pull', remote.url())
1715 result = self.addchangegroup(cg, 'pull', remote.url())
1715 clend = len(self.changelog)
1716 clend = len(self.changelog)
1716 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1717 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1717
1718
1718 # compute target subset
1719 # compute target subset
1719 if heads is None:
1720 if heads is None:
1720 # We pulled every thing possible
1721 # We pulled every thing possible
1721 # sync on everything common
1722 # sync on everything common
1722 subset = common + added
1723 subset = common + added
1723 else:
1724 else:
1724 # We pulled a specific subset
1725 # We pulled a specific subset
1725 # sync on this subset
1726 # sync on this subset
1726 subset = heads
1727 subset = heads
1727
1728
1728 # Get remote phases data from remote
1729 # Get remote phases data from remote
1729 remotephases = remote.listkeys('phases')
1730 remotephases = remote.listkeys('phases')
1730 publishing = bool(remotephases.get('publishing', False))
1731 publishing = bool(remotephases.get('publishing', False))
1731 if remotephases and not publishing:
1732 if remotephases and not publishing:
1732 # remote is new and unpublishing
1733 # remote is new and unpublishing
1733 pheads, _dr = phases.analyzeremotephases(self, subset,
1734 pheads, _dr = phases.analyzeremotephases(self, subset,
1734 remotephases)
1735 remotephases)
1735 phases.advanceboundary(self, phases.public, pheads)
1736 phases.advanceboundary(self, phases.public, pheads)
1736 phases.advanceboundary(self, phases.draft, subset)
1737 phases.advanceboundary(self, phases.draft, subset)
1737 else:
1738 else:
1738 # Remote is old or publishing all common changesets
1739 # Remote is old or publishing all common changesets
1739 # should be seen as public
1740 # should be seen as public
1740 phases.advanceboundary(self, phases.public, subset)
1741 phases.advanceboundary(self, phases.public, subset)
1741
1742
1742 if obsolete._enabled:
1743 if obsolete._enabled:
1743 self.ui.debug('fetching remote obsolete markers\n')
1744 self.ui.debug('fetching remote obsolete markers\n')
1744 remoteobs = remote.listkeys('obsolete')
1745 remoteobs = remote.listkeys('obsolete')
1745 if 'dump0' in remoteobs:
1746 if 'dump0' in remoteobs:
1746 if tr is None:
1747 if tr is None:
1747 tr = self.transaction(trname)
1748 tr = self.transaction(trname)
1748 for key in sorted(remoteobs, reverse=True):
1749 for key in sorted(remoteobs, reverse=True):
1749 if key.startswith('dump'):
1750 if key.startswith('dump'):
1750 data = base85.b85decode(remoteobs[key])
1751 data = base85.b85decode(remoteobs[key])
1751 self.obsstore.mergemarkers(tr, data)
1752 self.obsstore.mergemarkers(tr, data)
1752 self.invalidatevolatilesets()
1753 self.invalidatevolatilesets()
1753 if tr is not None:
1754 if tr is not None:
1754 tr.close()
1755 tr.close()
1755 finally:
1756 finally:
1756 if tr is not None:
1757 if tr is not None:
1757 tr.release()
1758 tr.release()
1758 lock.release()
1759 lock.release()
1759
1760
1760 return result
1761 return result
1761
1762
1762 def checkpush(self, force, revs):
1763 def checkpush(self, force, revs):
1763 """Extensions can override this function if additional checks have
1764 """Extensions can override this function if additional checks have
1764 to be performed before pushing, or call it if they override push
1765 to be performed before pushing, or call it if they override push
1765 command.
1766 command.
1766 """
1767 """
1767 pass
1768 pass
1768
1769
1769 def push(self, remote, force=False, revs=None, newbranch=False):
1770 def push(self, remote, force=False, revs=None, newbranch=False):
1770 '''Push outgoing changesets (limited by revs) from the current
1771 '''Push outgoing changesets (limited by revs) from the current
1771 repository to remote. Return an integer:
1772 repository to remote. Return an integer:
1772 - None means nothing to push
1773 - None means nothing to push
1773 - 0 means HTTP error
1774 - 0 means HTTP error
1774 - 1 means we pushed and remote head count is unchanged *or*
1775 - 1 means we pushed and remote head count is unchanged *or*
1775 we have outgoing changesets but refused to push
1776 we have outgoing changesets but refused to push
1776 - other values as described by addchangegroup()
1777 - other values as described by addchangegroup()
1777 '''
1778 '''
1778 # there are two ways to push to remote repo:
1779 # there are two ways to push to remote repo:
1779 #
1780 #
1780 # addchangegroup assumes local user can lock remote
1781 # addchangegroup assumes local user can lock remote
1781 # repo (local filesystem, old ssh servers).
1782 # repo (local filesystem, old ssh servers).
1782 #
1783 #
1783 # unbundle assumes local user cannot lock remote repo (new ssh
1784 # unbundle assumes local user cannot lock remote repo (new ssh
1784 # servers, http servers).
1785 # servers, http servers).
1785
1786
1786 if not remote.canpush():
1787 if not remote.canpush():
1787 raise util.Abort(_("destination does not support push"))
1788 raise util.Abort(_("destination does not support push"))
1788 unfi = self.unfiltered()
1789 unfi = self.unfiltered()
1789 # get local lock as we might write phase data
1790 # get local lock as we might write phase data
1790 locallock = self.lock()
1791 locallock = self.lock()
1791 try:
1792 try:
1792 self.checkpush(force, revs)
1793 self.checkpush(force, revs)
1793 lock = None
1794 lock = None
1794 unbundle = remote.capable('unbundle')
1795 unbundle = remote.capable('unbundle')
1795 if not unbundle:
1796 if not unbundle:
1796 lock = remote.lock()
1797 lock = remote.lock()
1797 try:
1798 try:
1798 # discovery
1799 # discovery
1799 fci = discovery.findcommonincoming
1800 fci = discovery.findcommonincoming
1800 commoninc = fci(unfi, remote, force=force)
1801 commoninc = fci(unfi, remote, force=force)
1801 common, inc, remoteheads = commoninc
1802 common, inc, remoteheads = commoninc
1802 fco = discovery.findcommonoutgoing
1803 fco = discovery.findcommonoutgoing
1803 outgoing = fco(unfi, remote, onlyheads=revs,
1804 outgoing = fco(unfi, remote, onlyheads=revs,
1804 commoninc=commoninc, force=force)
1805 commoninc=commoninc, force=force)
1805
1806
1806
1807
1807 if not outgoing.missing:
1808 if not outgoing.missing:
1808 # nothing to push
1809 # nothing to push
1809 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1810 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1810 ret = None
1811 ret = None
1811 else:
1812 else:
1812 # something to push
1813 # something to push
1813 if not force:
1814 if not force:
1814 # if self.obsstore == False --> no obsolete
1815 # if self.obsstore == False --> no obsolete
1815 # then, save the iteration
1816 # then, save the iteration
1816 if unfi.obsstore:
1817 if unfi.obsstore:
1817 # this message are here for 80 char limit reason
1818 # this message are here for 80 char limit reason
1818 mso = _("push includes obsolete changeset: %s!")
1819 mso = _("push includes obsolete changeset: %s!")
1819 mst = "push includes %s changeset: %s!"
1820 mst = "push includes %s changeset: %s!"
1820 # plain versions for i18n tool to detect them
1821 # plain versions for i18n tool to detect them
1821 _("push includes unstable changeset: %s!")
1822 _("push includes unstable changeset: %s!")
1822 _("push includes bumped changeset: %s!")
1823 _("push includes bumped changeset: %s!")
1823 _("push includes divergent changeset: %s!")
1824 _("push includes divergent changeset: %s!")
1824 # If we are to push if there is at least one
1825 # If we are to push if there is at least one
1825 # obsolete or unstable changeset in missing, at
1826 # obsolete or unstable changeset in missing, at
1826 # least one of the missinghead will be obsolete or
1827 # least one of the missinghead will be obsolete or
1827 # unstable. So checking heads only is ok
1828 # unstable. So checking heads only is ok
1828 for node in outgoing.missingheads:
1829 for node in outgoing.missingheads:
1829 ctx = unfi[node]
1830 ctx = unfi[node]
1830 if ctx.obsolete():
1831 if ctx.obsolete():
1831 raise util.Abort(mso % ctx)
1832 raise util.Abort(mso % ctx)
1832 elif ctx.troubled():
1833 elif ctx.troubled():
1833 raise util.Abort(_(mst)
1834 raise util.Abort(_(mst)
1834 % (ctx.troubles()[0],
1835 % (ctx.troubles()[0],
1835 ctx))
1836 ctx))
1836 discovery.checkheads(unfi, remote, outgoing,
1837 discovery.checkheads(unfi, remote, outgoing,
1837 remoteheads, newbranch,
1838 remoteheads, newbranch,
1838 bool(inc))
1839 bool(inc))
1839
1840
1840 # create a changegroup from local
1841 # create a changegroup from local
1841 if revs is None and not outgoing.excluded:
1842 if revs is None and not outgoing.excluded:
1842 # push everything,
1843 # push everything,
1843 # use the fast path, no race possible on push
1844 # use the fast path, no race possible on push
1844 cg = self._changegroup(outgoing.missing, 'push')
1845 cg = self._changegroup(outgoing.missing, 'push')
1845 else:
1846 else:
1846 cg = self.getlocalbundle('push', outgoing)
1847 cg = self.getlocalbundle('push', outgoing)
1847
1848
1848 # apply changegroup to remote
1849 # apply changegroup to remote
1849 if unbundle:
1850 if unbundle:
1850 # local repo finds heads on server, finds out what
1851 # local repo finds heads on server, finds out what
1851 # revs it must push. once revs transferred, if server
1852 # revs it must push. once revs transferred, if server
1852 # finds it has different heads (someone else won
1853 # finds it has different heads (someone else won
1853 # commit/push race), server aborts.
1854 # commit/push race), server aborts.
1854 if force:
1855 if force:
1855 remoteheads = ['force']
1856 remoteheads = ['force']
1856 # ssh: return remote's addchangegroup()
1857 # ssh: return remote's addchangegroup()
1857 # http: return remote's addchangegroup() or 0 for error
1858 # http: return remote's addchangegroup() or 0 for error
1858 ret = remote.unbundle(cg, remoteheads, 'push')
1859 ret = remote.unbundle(cg, remoteheads, 'push')
1859 else:
1860 else:
1860 # we return an integer indicating remote head count
1861 # we return an integer indicating remote head count
1861 # change
1862 # change
1862 ret = remote.addchangegroup(cg, 'push', self.url())
1863 ret = remote.addchangegroup(cg, 'push', self.url())
1863
1864
1864 if ret:
1865 if ret:
1865 # push succeed, synchronize target of the push
1866 # push succeed, synchronize target of the push
1866 cheads = outgoing.missingheads
1867 cheads = outgoing.missingheads
1867 elif revs is None:
1868 elif revs is None:
1868 # All out push fails. synchronize all common
1869 # All out push fails. synchronize all common
1869 cheads = outgoing.commonheads
1870 cheads = outgoing.commonheads
1870 else:
1871 else:
1871 # I want cheads = heads(::missingheads and ::commonheads)
1872 # I want cheads = heads(::missingheads and ::commonheads)
1872 # (missingheads is revs with secret changeset filtered out)
1873 # (missingheads is revs with secret changeset filtered out)
1873 #
1874 #
1874 # This can be expressed as:
1875 # This can be expressed as:
1875 # cheads = ( (missingheads and ::commonheads)
1876 # cheads = ( (missingheads and ::commonheads)
1876 # + (commonheads and ::missingheads))"
1877 # + (commonheads and ::missingheads))"
1877 # )
1878 # )
1878 #
1879 #
1879 # while trying to push we already computed the following:
1880 # while trying to push we already computed the following:
1880 # common = (::commonheads)
1881 # common = (::commonheads)
1881 # missing = ((commonheads::missingheads) - commonheads)
1882 # missing = ((commonheads::missingheads) - commonheads)
1882 #
1883 #
1883 # We can pick:
1884 # We can pick:
1884 # * missingheads part of common (::commonheads)
1885 # * missingheads part of common (::commonheads)
1885 common = set(outgoing.common)
1886 common = set(outgoing.common)
1886 cheads = [node for node in revs if node in common]
1887 cheads = [node for node in revs if node in common]
1887 # and
1888 # and
1888 # * commonheads parents on missing
1889 # * commonheads parents on missing
1889 revset = unfi.set('%ln and parents(roots(%ln))',
1890 revset = unfi.set('%ln and parents(roots(%ln))',
1890 outgoing.commonheads,
1891 outgoing.commonheads,
1891 outgoing.missing)
1892 outgoing.missing)
1892 cheads.extend(c.node() for c in revset)
1893 cheads.extend(c.node() for c in revset)
1893 # even when we don't push, exchanging phase data is useful
1894 # even when we don't push, exchanging phase data is useful
1894 remotephases = remote.listkeys('phases')
1895 remotephases = remote.listkeys('phases')
1895 if not remotephases: # old server or public only repo
1896 if not remotephases: # old server or public only repo
1896 phases.advanceboundary(self, phases.public, cheads)
1897 phases.advanceboundary(self, phases.public, cheads)
1897 # don't push any phase data as there is nothing to push
1898 # don't push any phase data as there is nothing to push
1898 else:
1899 else:
1899 ana = phases.analyzeremotephases(self, cheads, remotephases)
1900 ana = phases.analyzeremotephases(self, cheads, remotephases)
1900 pheads, droots = ana
1901 pheads, droots = ana
1901 ### Apply remote phase on local
1902 ### Apply remote phase on local
1902 if remotephases.get('publishing', False):
1903 if remotephases.get('publishing', False):
1903 phases.advanceboundary(self, phases.public, cheads)
1904 phases.advanceboundary(self, phases.public, cheads)
1904 else: # publish = False
1905 else: # publish = False
1905 phases.advanceboundary(self, phases.public, pheads)
1906 phases.advanceboundary(self, phases.public, pheads)
1906 phases.advanceboundary(self, phases.draft, cheads)
1907 phases.advanceboundary(self, phases.draft, cheads)
1907 ### Apply local phase on remote
1908 ### Apply local phase on remote
1908
1909
1909 # Get the list of all revs draft on remote by public here.
1910 # Get the list of all revs draft on remote by public here.
1910 # XXX Beware that revset break if droots is not strictly
1911 # XXX Beware that revset break if droots is not strictly
1911 # XXX root we may want to ensure it is but it is costly
1912 # XXX root we may want to ensure it is but it is costly
1912 outdated = unfi.set('heads((%ln::%ln) and public())',
1913 outdated = unfi.set('heads((%ln::%ln) and public())',
1913 droots, cheads)
1914 droots, cheads)
1914 for newremotehead in outdated:
1915 for newremotehead in outdated:
1915 r = remote.pushkey('phases',
1916 r = remote.pushkey('phases',
1916 newremotehead.hex(),
1917 newremotehead.hex(),
1917 str(phases.draft),
1918 str(phases.draft),
1918 str(phases.public))
1919 str(phases.public))
1919 if not r:
1920 if not r:
1920 self.ui.warn(_('updating %s to public failed!\n')
1921 self.ui.warn(_('updating %s to public failed!\n')
1921 % newremotehead)
1922 % newremotehead)
1922 self.ui.debug('try to push obsolete markers to remote\n')
1923 self.ui.debug('try to push obsolete markers to remote\n')
1923 if (obsolete._enabled and self.obsstore and
1924 if (obsolete._enabled and self.obsstore and
1924 'obsolete' in remote.listkeys('namespaces')):
1925 'obsolete' in remote.listkeys('namespaces')):
1925 rslts = []
1926 rslts = []
1926 remotedata = self.listkeys('obsolete')
1927 remotedata = self.listkeys('obsolete')
1927 for key in sorted(remotedata, reverse=True):
1928 for key in sorted(remotedata, reverse=True):
1928 # reverse sort to ensure we end with dump0
1929 # reverse sort to ensure we end with dump0
1929 data = remotedata[key]
1930 data = remotedata[key]
1930 rslts.append(remote.pushkey('obsolete', key, '', data))
1931 rslts.append(remote.pushkey('obsolete', key, '', data))
1931 if [r for r in rslts if not r]:
1932 if [r for r in rslts if not r]:
1932 msg = _('failed to push some obsolete markers!\n')
1933 msg = _('failed to push some obsolete markers!\n')
1933 self.ui.warn(msg)
1934 self.ui.warn(msg)
1934 finally:
1935 finally:
1935 if lock is not None:
1936 if lock is not None:
1936 lock.release()
1937 lock.release()
1937 finally:
1938 finally:
1938 locallock.release()
1939 locallock.release()
1939
1940
1940 self.ui.debug("checking for updated bookmarks\n")
1941 self.ui.debug("checking for updated bookmarks\n")
1941 rb = remote.listkeys('bookmarks')
1942 rb = remote.listkeys('bookmarks')
1942 for k in rb.keys():
1943 for k in rb.keys():
1943 if k in unfi._bookmarks:
1944 if k in unfi._bookmarks:
1944 nr, nl = rb[k], hex(self._bookmarks[k])
1945 nr, nl = rb[k], hex(self._bookmarks[k])
1945 if nr in unfi:
1946 if nr in unfi:
1946 cr = unfi[nr]
1947 cr = unfi[nr]
1947 cl = unfi[nl]
1948 cl = unfi[nl]
1948 if bookmarks.validdest(unfi, cr, cl):
1949 if bookmarks.validdest(unfi, cr, cl):
1949 r = remote.pushkey('bookmarks', k, nr, nl)
1950 r = remote.pushkey('bookmarks', k, nr, nl)
1950 if r:
1951 if r:
1951 self.ui.status(_("updating bookmark %s\n") % k)
1952 self.ui.status(_("updating bookmark %s\n") % k)
1952 else:
1953 else:
1953 self.ui.warn(_('updating bookmark %s'
1954 self.ui.warn(_('updating bookmark %s'
1954 ' failed!\n') % k)
1955 ' failed!\n') % k)
1955
1956
1956 return ret
1957 return ret
1957
1958
1958 def changegroupinfo(self, nodes, source):
1959 def changegroupinfo(self, nodes, source):
1959 if self.ui.verbose or source == 'bundle':
1960 if self.ui.verbose or source == 'bundle':
1960 self.ui.status(_("%d changesets found\n") % len(nodes))
1961 self.ui.status(_("%d changesets found\n") % len(nodes))
1961 if self.ui.debugflag:
1962 if self.ui.debugflag:
1962 self.ui.debug("list of changesets:\n")
1963 self.ui.debug("list of changesets:\n")
1963 for node in nodes:
1964 for node in nodes:
1964 self.ui.debug("%s\n" % hex(node))
1965 self.ui.debug("%s\n" % hex(node))
1965
1966
1966 def changegroupsubset(self, bases, heads, source):
1967 def changegroupsubset(self, bases, heads, source):
1967 """Compute a changegroup consisting of all the nodes that are
1968 """Compute a changegroup consisting of all the nodes that are
1968 descendants of any of the bases and ancestors of any of the heads.
1969 descendants of any of the bases and ancestors of any of the heads.
1969 Return a chunkbuffer object whose read() method will return
1970 Return a chunkbuffer object whose read() method will return
1970 successive changegroup chunks.
1971 successive changegroup chunks.
1971
1972
1972 It is fairly complex as determining which filenodes and which
1973 It is fairly complex as determining which filenodes and which
1973 manifest nodes need to be included for the changeset to be complete
1974 manifest nodes need to be included for the changeset to be complete
1974 is non-trivial.
1975 is non-trivial.
1975
1976
1976 Another wrinkle is doing the reverse, figuring out which changeset in
1977 Another wrinkle is doing the reverse, figuring out which changeset in
1977 the changegroup a particular filenode or manifestnode belongs to.
1978 the changegroup a particular filenode or manifestnode belongs to.
1978 """
1979 """
1979 cl = self.changelog
1980 cl = self.changelog
1980 if not bases:
1981 if not bases:
1981 bases = [nullid]
1982 bases = [nullid]
1982 csets, bases, heads = cl.nodesbetween(bases, heads)
1983 csets, bases, heads = cl.nodesbetween(bases, heads)
1983 # We assume that all ancestors of bases are known
1984 # We assume that all ancestors of bases are known
1984 common = cl.ancestors([cl.rev(n) for n in bases])
1985 common = cl.ancestors([cl.rev(n) for n in bases])
1985 return self._changegroupsubset(common, csets, heads, source)
1986 return self._changegroupsubset(common, csets, heads, source)
1986
1987
1987 def getlocalbundle(self, source, outgoing):
1988 def getlocalbundle(self, source, outgoing):
1988 """Like getbundle, but taking a discovery.outgoing as an argument.
1989 """Like getbundle, but taking a discovery.outgoing as an argument.
1989
1990
1990 This is only implemented for local repos and reuses potentially
1991 This is only implemented for local repos and reuses potentially
1991 precomputed sets in outgoing."""
1992 precomputed sets in outgoing."""
1992 if not outgoing.missing:
1993 if not outgoing.missing:
1993 return None
1994 return None
1994 return self._changegroupsubset(outgoing.common,
1995 return self._changegroupsubset(outgoing.common,
1995 outgoing.missing,
1996 outgoing.missing,
1996 outgoing.missingheads,
1997 outgoing.missingheads,
1997 source)
1998 source)
1998
1999
1999 def getbundle(self, source, heads=None, common=None):
2000 def getbundle(self, source, heads=None, common=None):
2000 """Like changegroupsubset, but returns the set difference between the
2001 """Like changegroupsubset, but returns the set difference between the
2001 ancestors of heads and the ancestors common.
2002 ancestors of heads and the ancestors common.
2002
2003
2003 If heads is None, use the local heads. If common is None, use [nullid].
2004 If heads is None, use the local heads. If common is None, use [nullid].
2004
2005
2005 The nodes in common might not all be known locally due to the way the
2006 The nodes in common might not all be known locally due to the way the
2006 current discovery protocol works.
2007 current discovery protocol works.
2007 """
2008 """
2008 cl = self.changelog
2009 cl = self.changelog
2009 if common:
2010 if common:
2010 hasnode = cl.hasnode
2011 hasnode = cl.hasnode
2011 common = [n for n in common if hasnode(n)]
2012 common = [n for n in common if hasnode(n)]
2012 else:
2013 else:
2013 common = [nullid]
2014 common = [nullid]
2014 if not heads:
2015 if not heads:
2015 heads = cl.heads()
2016 heads = cl.heads()
2016 return self.getlocalbundle(source,
2017 return self.getlocalbundle(source,
2017 discovery.outgoing(cl, common, heads))
2018 discovery.outgoing(cl, common, heads))
2018
2019
2019 @unfilteredmethod
2020 @unfilteredmethod
2020 def _changegroupsubset(self, commonrevs, csets, heads, source):
2021 def _changegroupsubset(self, commonrevs, csets, heads, source):
2021
2022
2022 cl = self.changelog
2023 cl = self.changelog
2023 mf = self.manifest
2024 mf = self.manifest
2024 mfs = {} # needed manifests
2025 mfs = {} # needed manifests
2025 fnodes = {} # needed file nodes
2026 fnodes = {} # needed file nodes
2026 changedfiles = set()
2027 changedfiles = set()
2027 fstate = ['', {}]
2028 fstate = ['', {}]
2028 count = [0, 0]
2029 count = [0, 0]
2029
2030
2030 # can we go through the fast path ?
2031 # can we go through the fast path ?
2031 heads.sort()
2032 heads.sort()
2032 if heads == sorted(self.heads()):
2033 if heads == sorted(self.heads()):
2033 return self._changegroup(csets, source)
2034 return self._changegroup(csets, source)
2034
2035
2035 # slow path
2036 # slow path
2036 self.hook('preoutgoing', throw=True, source=source)
2037 self.hook('preoutgoing', throw=True, source=source)
2037 self.changegroupinfo(csets, source)
2038 self.changegroupinfo(csets, source)
2038
2039
2039 # filter any nodes that claim to be part of the known set
2040 # filter any nodes that claim to be part of the known set
2040 def prune(revlog, missing):
2041 def prune(revlog, missing):
2041 rr, rl = revlog.rev, revlog.linkrev
2042 rr, rl = revlog.rev, revlog.linkrev
2042 return [n for n in missing
2043 return [n for n in missing
2043 if rl(rr(n)) not in commonrevs]
2044 if rl(rr(n)) not in commonrevs]
2044
2045
2045 progress = self.ui.progress
2046 progress = self.ui.progress
2046 _bundling = _('bundling')
2047 _bundling = _('bundling')
2047 _changesets = _('changesets')
2048 _changesets = _('changesets')
2048 _manifests = _('manifests')
2049 _manifests = _('manifests')
2049 _files = _('files')
2050 _files = _('files')
2050
2051
2051 def lookup(revlog, x):
2052 def lookup(revlog, x):
2052 if revlog == cl:
2053 if revlog == cl:
2053 c = cl.read(x)
2054 c = cl.read(x)
2054 changedfiles.update(c[3])
2055 changedfiles.update(c[3])
2055 mfs.setdefault(c[0], x)
2056 mfs.setdefault(c[0], x)
2056 count[0] += 1
2057 count[0] += 1
2057 progress(_bundling, count[0],
2058 progress(_bundling, count[0],
2058 unit=_changesets, total=count[1])
2059 unit=_changesets, total=count[1])
2059 return x
2060 return x
2060 elif revlog == mf:
2061 elif revlog == mf:
2061 clnode = mfs[x]
2062 clnode = mfs[x]
2062 mdata = mf.readfast(x)
2063 mdata = mf.readfast(x)
2063 for f, n in mdata.iteritems():
2064 for f, n in mdata.iteritems():
2064 if f in changedfiles:
2065 if f in changedfiles:
2065 fnodes[f].setdefault(n, clnode)
2066 fnodes[f].setdefault(n, clnode)
2066 count[0] += 1
2067 count[0] += 1
2067 progress(_bundling, count[0],
2068 progress(_bundling, count[0],
2068 unit=_manifests, total=count[1])
2069 unit=_manifests, total=count[1])
2069 return clnode
2070 return clnode
2070 else:
2071 else:
2071 progress(_bundling, count[0], item=fstate[0],
2072 progress(_bundling, count[0], item=fstate[0],
2072 unit=_files, total=count[1])
2073 unit=_files, total=count[1])
2073 return fstate[1][x]
2074 return fstate[1][x]
2074
2075
2075 bundler = changegroup.bundle10(lookup)
2076 bundler = changegroup.bundle10(lookup)
2076 reorder = self.ui.config('bundle', 'reorder', 'auto')
2077 reorder = self.ui.config('bundle', 'reorder', 'auto')
2077 if reorder == 'auto':
2078 if reorder == 'auto':
2078 reorder = None
2079 reorder = None
2079 else:
2080 else:
2080 reorder = util.parsebool(reorder)
2081 reorder = util.parsebool(reorder)
2081
2082
2082 def gengroup():
2083 def gengroup():
2083 # Create a changenode group generator that will call our functions
2084 # Create a changenode group generator that will call our functions
2084 # back to lookup the owning changenode and collect information.
2085 # back to lookup the owning changenode and collect information.
2085 count[:] = [0, len(csets)]
2086 count[:] = [0, len(csets)]
2086 for chunk in cl.group(csets, bundler, reorder=reorder):
2087 for chunk in cl.group(csets, bundler, reorder=reorder):
2087 yield chunk
2088 yield chunk
2088 progress(_bundling, None)
2089 progress(_bundling, None)
2089
2090
2090 # Create a generator for the manifestnodes that calls our lookup
2091 # Create a generator for the manifestnodes that calls our lookup
2091 # and data collection functions back.
2092 # and data collection functions back.
2092 for f in changedfiles:
2093 for f in changedfiles:
2093 fnodes[f] = {}
2094 fnodes[f] = {}
2094 count[:] = [0, len(mfs)]
2095 count[:] = [0, len(mfs)]
2095 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2096 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2096 yield chunk
2097 yield chunk
2097 progress(_bundling, None)
2098 progress(_bundling, None)
2098
2099
2099 mfs.clear()
2100 mfs.clear()
2100
2101
2101 # Go through all our files in order sorted by name.
2102 # Go through all our files in order sorted by name.
2102 count[:] = [0, len(changedfiles)]
2103 count[:] = [0, len(changedfiles)]
2103 for fname in sorted(changedfiles):
2104 for fname in sorted(changedfiles):
2104 filerevlog = self.file(fname)
2105 filerevlog = self.file(fname)
2105 if not len(filerevlog):
2106 if not len(filerevlog):
2106 raise util.Abort(_("empty or missing revlog for %s")
2107 raise util.Abort(_("empty or missing revlog for %s")
2107 % fname)
2108 % fname)
2108 fstate[0] = fname
2109 fstate[0] = fname
2109 fstate[1] = fnodes.pop(fname, {})
2110 fstate[1] = fnodes.pop(fname, {})
2110
2111
2111 nodelist = prune(filerevlog, fstate[1])
2112 nodelist = prune(filerevlog, fstate[1])
2112 if nodelist:
2113 if nodelist:
2113 count[0] += 1
2114 count[0] += 1
2114 yield bundler.fileheader(fname)
2115 yield bundler.fileheader(fname)
2115 for chunk in filerevlog.group(nodelist, bundler, reorder):
2116 for chunk in filerevlog.group(nodelist, bundler, reorder):
2116 yield chunk
2117 yield chunk
2117
2118
2118 # Signal that no more groups are left.
2119 # Signal that no more groups are left.
2119 yield bundler.close()
2120 yield bundler.close()
2120 progress(_bundling, None)
2121 progress(_bundling, None)
2121
2122
2122 if csets:
2123 if csets:
2123 self.hook('outgoing', node=hex(csets[0]), source=source)
2124 self.hook('outgoing', node=hex(csets[0]), source=source)
2124
2125
2125 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2126 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2126
2127
2127 def changegroup(self, basenodes, source):
2128 def changegroup(self, basenodes, source):
2128 # to avoid a race we use changegroupsubset() (issue1320)
2129 # to avoid a race we use changegroupsubset() (issue1320)
2129 return self.changegroupsubset(basenodes, self.heads(), source)
2130 return self.changegroupsubset(basenodes, self.heads(), source)
2130
2131
2131 @unfilteredmethod
2132 @unfilteredmethod
2132 def _changegroup(self, nodes, source):
2133 def _changegroup(self, nodes, source):
2133 """Compute the changegroup of all nodes that we have that a recipient
2134 """Compute the changegroup of all nodes that we have that a recipient
2134 doesn't. Return a chunkbuffer object whose read() method will return
2135 doesn't. Return a chunkbuffer object whose read() method will return
2135 successive changegroup chunks.
2136 successive changegroup chunks.
2136
2137
2137 This is much easier than the previous function as we can assume that
2138 This is much easier than the previous function as we can assume that
2138 the recipient has any changenode we aren't sending them.
2139 the recipient has any changenode we aren't sending them.
2139
2140
2140 nodes is the set of nodes to send"""
2141 nodes is the set of nodes to send"""
2141
2142
2142 cl = self.changelog
2143 cl = self.changelog
2143 mf = self.manifest
2144 mf = self.manifest
2144 mfs = {}
2145 mfs = {}
2145 changedfiles = set()
2146 changedfiles = set()
2146 fstate = ['']
2147 fstate = ['']
2147 count = [0, 0]
2148 count = [0, 0]
2148
2149
2149 self.hook('preoutgoing', throw=True, source=source)
2150 self.hook('preoutgoing', throw=True, source=source)
2150 self.changegroupinfo(nodes, source)
2151 self.changegroupinfo(nodes, source)
2151
2152
2152 revset = set([cl.rev(n) for n in nodes])
2153 revset = set([cl.rev(n) for n in nodes])
2153
2154
2154 def gennodelst(log):
2155 def gennodelst(log):
2155 ln, llr = log.node, log.linkrev
2156 ln, llr = log.node, log.linkrev
2156 return [ln(r) for r in log if llr(r) in revset]
2157 return [ln(r) for r in log if llr(r) in revset]
2157
2158
2158 progress = self.ui.progress
2159 progress = self.ui.progress
2159 _bundling = _('bundling')
2160 _bundling = _('bundling')
2160 _changesets = _('changesets')
2161 _changesets = _('changesets')
2161 _manifests = _('manifests')
2162 _manifests = _('manifests')
2162 _files = _('files')
2163 _files = _('files')
2163
2164
2164 def lookup(revlog, x):
2165 def lookup(revlog, x):
2165 if revlog == cl:
2166 if revlog == cl:
2166 c = cl.read(x)
2167 c = cl.read(x)
2167 changedfiles.update(c[3])
2168 changedfiles.update(c[3])
2168 mfs.setdefault(c[0], x)
2169 mfs.setdefault(c[0], x)
2169 count[0] += 1
2170 count[0] += 1
2170 progress(_bundling, count[0],
2171 progress(_bundling, count[0],
2171 unit=_changesets, total=count[1])
2172 unit=_changesets, total=count[1])
2172 return x
2173 return x
2173 elif revlog == mf:
2174 elif revlog == mf:
2174 count[0] += 1
2175 count[0] += 1
2175 progress(_bundling, count[0],
2176 progress(_bundling, count[0],
2176 unit=_manifests, total=count[1])
2177 unit=_manifests, total=count[1])
2177 return cl.node(revlog.linkrev(revlog.rev(x)))
2178 return cl.node(revlog.linkrev(revlog.rev(x)))
2178 else:
2179 else:
2179 progress(_bundling, count[0], item=fstate[0],
2180 progress(_bundling, count[0], item=fstate[0],
2180 total=count[1], unit=_files)
2181 total=count[1], unit=_files)
2181 return cl.node(revlog.linkrev(revlog.rev(x)))
2182 return cl.node(revlog.linkrev(revlog.rev(x)))
2182
2183
2183 bundler = changegroup.bundle10(lookup)
2184 bundler = changegroup.bundle10(lookup)
2184 reorder = self.ui.config('bundle', 'reorder', 'auto')
2185 reorder = self.ui.config('bundle', 'reorder', 'auto')
2185 if reorder == 'auto':
2186 if reorder == 'auto':
2186 reorder = None
2187 reorder = None
2187 else:
2188 else:
2188 reorder = util.parsebool(reorder)
2189 reorder = util.parsebool(reorder)
2189
2190
2190 def gengroup():
2191 def gengroup():
2191 '''yield a sequence of changegroup chunks (strings)'''
2192 '''yield a sequence of changegroup chunks (strings)'''
2192 # construct a list of all changed files
2193 # construct a list of all changed files
2193
2194
2194 count[:] = [0, len(nodes)]
2195 count[:] = [0, len(nodes)]
2195 for chunk in cl.group(nodes, bundler, reorder=reorder):
2196 for chunk in cl.group(nodes, bundler, reorder=reorder):
2196 yield chunk
2197 yield chunk
2197 progress(_bundling, None)
2198 progress(_bundling, None)
2198
2199
2199 count[:] = [0, len(mfs)]
2200 count[:] = [0, len(mfs)]
2200 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2201 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2201 yield chunk
2202 yield chunk
2202 progress(_bundling, None)
2203 progress(_bundling, None)
2203
2204
2204 count[:] = [0, len(changedfiles)]
2205 count[:] = [0, len(changedfiles)]
2205 for fname in sorted(changedfiles):
2206 for fname in sorted(changedfiles):
2206 filerevlog = self.file(fname)
2207 filerevlog = self.file(fname)
2207 if not len(filerevlog):
2208 if not len(filerevlog):
2208 raise util.Abort(_("empty or missing revlog for %s")
2209 raise util.Abort(_("empty or missing revlog for %s")
2209 % fname)
2210 % fname)
2210 fstate[0] = fname
2211 fstate[0] = fname
2211 nodelist = gennodelst(filerevlog)
2212 nodelist = gennodelst(filerevlog)
2212 if nodelist:
2213 if nodelist:
2213 count[0] += 1
2214 count[0] += 1
2214 yield bundler.fileheader(fname)
2215 yield bundler.fileheader(fname)
2215 for chunk in filerevlog.group(nodelist, bundler, reorder):
2216 for chunk in filerevlog.group(nodelist, bundler, reorder):
2216 yield chunk
2217 yield chunk
2217 yield bundler.close()
2218 yield bundler.close()
2218 progress(_bundling, None)
2219 progress(_bundling, None)
2219
2220
2220 if nodes:
2221 if nodes:
2221 self.hook('outgoing', node=hex(nodes[0]), source=source)
2222 self.hook('outgoing', node=hex(nodes[0]), source=source)
2222
2223
2223 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2224 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2224
2225
2225 @unfilteredmethod
2226 @unfilteredmethod
2226 def addchangegroup(self, source, srctype, url, emptyok=False):
2227 def addchangegroup(self, source, srctype, url, emptyok=False):
2227 """Add the changegroup returned by source.read() to this repo.
2228 """Add the changegroup returned by source.read() to this repo.
2228 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2229 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2229 the URL of the repo where this changegroup is coming from.
2230 the URL of the repo where this changegroup is coming from.
2230
2231
2231 Return an integer summarizing the change to this repo:
2232 Return an integer summarizing the change to this repo:
2232 - nothing changed or no source: 0
2233 - nothing changed or no source: 0
2233 - more heads than before: 1+added heads (2..n)
2234 - more heads than before: 1+added heads (2..n)
2234 - fewer heads than before: -1-removed heads (-2..-n)
2235 - fewer heads than before: -1-removed heads (-2..-n)
2235 - number of heads stays the same: 1
2236 - number of heads stays the same: 1
2236 """
2237 """
2237 def csmap(x):
2238 def csmap(x):
2238 self.ui.debug("add changeset %s\n" % short(x))
2239 self.ui.debug("add changeset %s\n" % short(x))
2239 return len(cl)
2240 return len(cl)
2240
2241
2241 def revmap(x):
2242 def revmap(x):
2242 return cl.rev(x)
2243 return cl.rev(x)
2243
2244
2244 if not source:
2245 if not source:
2245 return 0
2246 return 0
2246
2247
2247 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2248 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2248
2249
2249 changesets = files = revisions = 0
2250 changesets = files = revisions = 0
2250 efiles = set()
2251 efiles = set()
2251
2252
2252 # write changelog data to temp files so concurrent readers will not see
2253 # write changelog data to temp files so concurrent readers will not see
2253 # inconsistent view
2254 # inconsistent view
2254 cl = self.changelog
2255 cl = self.changelog
2255 cl.delayupdate()
2256 cl.delayupdate()
2256 oldheads = cl.heads()
2257 oldheads = cl.heads()
2257
2258
2258 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2259 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2259 try:
2260 try:
2260 trp = weakref.proxy(tr)
2261 trp = weakref.proxy(tr)
2261 # pull off the changeset group
2262 # pull off the changeset group
2262 self.ui.status(_("adding changesets\n"))
2263 self.ui.status(_("adding changesets\n"))
2263 clstart = len(cl)
2264 clstart = len(cl)
2264 class prog(object):
2265 class prog(object):
2265 step = _('changesets')
2266 step = _('changesets')
2266 count = 1
2267 count = 1
2267 ui = self.ui
2268 ui = self.ui
2268 total = None
2269 total = None
2269 def __call__(self):
2270 def __call__(self):
2270 self.ui.progress(self.step, self.count, unit=_('chunks'),
2271 self.ui.progress(self.step, self.count, unit=_('chunks'),
2271 total=self.total)
2272 total=self.total)
2272 self.count += 1
2273 self.count += 1
2273 pr = prog()
2274 pr = prog()
2274 source.callback = pr
2275 source.callback = pr
2275
2276
2276 source.changelogheader()
2277 source.changelogheader()
2277 srccontent = cl.addgroup(source, csmap, trp)
2278 srccontent = cl.addgroup(source, csmap, trp)
2278 if not (srccontent or emptyok):
2279 if not (srccontent or emptyok):
2279 raise util.Abort(_("received changelog group is empty"))
2280 raise util.Abort(_("received changelog group is empty"))
2280 clend = len(cl)
2281 clend = len(cl)
2281 changesets = clend - clstart
2282 changesets = clend - clstart
2282 for c in xrange(clstart, clend):
2283 for c in xrange(clstart, clend):
2283 efiles.update(self[c].files())
2284 efiles.update(self[c].files())
2284 efiles = len(efiles)
2285 efiles = len(efiles)
2285 self.ui.progress(_('changesets'), None)
2286 self.ui.progress(_('changesets'), None)
2286
2287
2287 # pull off the manifest group
2288 # pull off the manifest group
2288 self.ui.status(_("adding manifests\n"))
2289 self.ui.status(_("adding manifests\n"))
2289 pr.step = _('manifests')
2290 pr.step = _('manifests')
2290 pr.count = 1
2291 pr.count = 1
2291 pr.total = changesets # manifests <= changesets
2292 pr.total = changesets # manifests <= changesets
2292 # no need to check for empty manifest group here:
2293 # no need to check for empty manifest group here:
2293 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2294 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2294 # no new manifest will be created and the manifest group will
2295 # no new manifest will be created and the manifest group will
2295 # be empty during the pull
2296 # be empty during the pull
2296 source.manifestheader()
2297 source.manifestheader()
2297 self.manifest.addgroup(source, revmap, trp)
2298 self.manifest.addgroup(source, revmap, trp)
2298 self.ui.progress(_('manifests'), None)
2299 self.ui.progress(_('manifests'), None)
2299
2300
2300 needfiles = {}
2301 needfiles = {}
2301 if self.ui.configbool('server', 'validate', default=False):
2302 if self.ui.configbool('server', 'validate', default=False):
2302 # validate incoming csets have their manifests
2303 # validate incoming csets have their manifests
2303 for cset in xrange(clstart, clend):
2304 for cset in xrange(clstart, clend):
2304 mfest = self.changelog.read(self.changelog.node(cset))[0]
2305 mfest = self.changelog.read(self.changelog.node(cset))[0]
2305 mfest = self.manifest.readdelta(mfest)
2306 mfest = self.manifest.readdelta(mfest)
2306 # store file nodes we must see
2307 # store file nodes we must see
2307 for f, n in mfest.iteritems():
2308 for f, n in mfest.iteritems():
2308 needfiles.setdefault(f, set()).add(n)
2309 needfiles.setdefault(f, set()).add(n)
2309
2310
2310 # process the files
2311 # process the files
2311 self.ui.status(_("adding file changes\n"))
2312 self.ui.status(_("adding file changes\n"))
2312 pr.step = _('files')
2313 pr.step = _('files')
2313 pr.count = 1
2314 pr.count = 1
2314 pr.total = efiles
2315 pr.total = efiles
2315 source.callback = None
2316 source.callback = None
2316
2317
2317 while True:
2318 while True:
2318 chunkdata = source.filelogheader()
2319 chunkdata = source.filelogheader()
2319 if not chunkdata:
2320 if not chunkdata:
2320 break
2321 break
2321 f = chunkdata["filename"]
2322 f = chunkdata["filename"]
2322 self.ui.debug("adding %s revisions\n" % f)
2323 self.ui.debug("adding %s revisions\n" % f)
2323 pr()
2324 pr()
2324 fl = self.file(f)
2325 fl = self.file(f)
2325 o = len(fl)
2326 o = len(fl)
2326 if not fl.addgroup(source, revmap, trp):
2327 if not fl.addgroup(source, revmap, trp):
2327 raise util.Abort(_("received file revlog group is empty"))
2328 raise util.Abort(_("received file revlog group is empty"))
2328 revisions += len(fl) - o
2329 revisions += len(fl) - o
2329 files += 1
2330 files += 1
2330 if f in needfiles:
2331 if f in needfiles:
2331 needs = needfiles[f]
2332 needs = needfiles[f]
2332 for new in xrange(o, len(fl)):
2333 for new in xrange(o, len(fl)):
2333 n = fl.node(new)
2334 n = fl.node(new)
2334 if n in needs:
2335 if n in needs:
2335 needs.remove(n)
2336 needs.remove(n)
2336 if not needs:
2337 if not needs:
2337 del needfiles[f]
2338 del needfiles[f]
2338 self.ui.progress(_('files'), None)
2339 self.ui.progress(_('files'), None)
2339
2340
2340 for f, needs in needfiles.iteritems():
2341 for f, needs in needfiles.iteritems():
2341 fl = self.file(f)
2342 fl = self.file(f)
2342 for n in needs:
2343 for n in needs:
2343 try:
2344 try:
2344 fl.rev(n)
2345 fl.rev(n)
2345 except error.LookupError:
2346 except error.LookupError:
2346 raise util.Abort(
2347 raise util.Abort(
2347 _('missing file data for %s:%s - run hg verify') %
2348 _('missing file data for %s:%s - run hg verify') %
2348 (f, hex(n)))
2349 (f, hex(n)))
2349
2350
2350 dh = 0
2351 dh = 0
2351 if oldheads:
2352 if oldheads:
2352 heads = cl.heads()
2353 heads = cl.heads()
2353 dh = len(heads) - len(oldheads)
2354 dh = len(heads) - len(oldheads)
2354 for h in heads:
2355 for h in heads:
2355 if h not in oldheads and self[h].closesbranch():
2356 if h not in oldheads and self[h].closesbranch():
2356 dh -= 1
2357 dh -= 1
2357 htext = ""
2358 htext = ""
2358 if dh:
2359 if dh:
2359 htext = _(" (%+d heads)") % dh
2360 htext = _(" (%+d heads)") % dh
2360
2361
2361 self.ui.status(_("added %d changesets"
2362 self.ui.status(_("added %d changesets"
2362 " with %d changes to %d files%s\n")
2363 " with %d changes to %d files%s\n")
2363 % (changesets, revisions, files, htext))
2364 % (changesets, revisions, files, htext))
2364 self.invalidatevolatilesets()
2365 self.invalidatevolatilesets()
2365
2366
2366 if changesets > 0:
2367 if changesets > 0:
2367 p = lambda: cl.writepending() and self.root or ""
2368 p = lambda: cl.writepending() and self.root or ""
2368 self.hook('pretxnchangegroup', throw=True,
2369 self.hook('pretxnchangegroup', throw=True,
2369 node=hex(cl.node(clstart)), source=srctype,
2370 node=hex(cl.node(clstart)), source=srctype,
2370 url=url, pending=p)
2371 url=url, pending=p)
2371
2372
2372 added = [cl.node(r) for r in xrange(clstart, clend)]
2373 added = [cl.node(r) for r in xrange(clstart, clend)]
2373 publishing = self.ui.configbool('phases', 'publish', True)
2374 publishing = self.ui.configbool('phases', 'publish', True)
2374 if srctype == 'push':
2375 if srctype == 'push':
2375 # Old server can not push the boundary themself.
2376 # Old server can not push the boundary themself.
2376 # New server won't push the boundary if changeset already
2377 # New server won't push the boundary if changeset already
2377 # existed locally as secrete
2378 # existed locally as secrete
2378 #
2379 #
2379 # We should not use added here but the list of all change in
2380 # We should not use added here but the list of all change in
2380 # the bundle
2381 # the bundle
2381 if publishing:
2382 if publishing:
2382 phases.advanceboundary(self, phases.public, srccontent)
2383 phases.advanceboundary(self, phases.public, srccontent)
2383 else:
2384 else:
2384 phases.advanceboundary(self, phases.draft, srccontent)
2385 phases.advanceboundary(self, phases.draft, srccontent)
2385 phases.retractboundary(self, phases.draft, added)
2386 phases.retractboundary(self, phases.draft, added)
2386 elif srctype != 'strip':
2387 elif srctype != 'strip':
2387 # publishing only alter behavior during push
2388 # publishing only alter behavior during push
2388 #
2389 #
2389 # strip should not touch boundary at all
2390 # strip should not touch boundary at all
2390 phases.retractboundary(self, phases.draft, added)
2391 phases.retractboundary(self, phases.draft, added)
2391
2392
2392 # make changelog see real files again
2393 # make changelog see real files again
2393 cl.finalize(trp)
2394 cl.finalize(trp)
2394
2395
2395 tr.close()
2396 tr.close()
2396
2397
2397 if changesets > 0:
2398 if changesets > 0:
2398 if srctype != 'strip':
2399 if srctype != 'strip':
2399 # During strip, branchcache is invalid but coming call to
2400 # During strip, branchcache is invalid but coming call to
2400 # `destroyed` will repair it.
2401 # `destroyed` will repair it.
2401 # In other case we can safely update cache on disk.
2402 # In other case we can safely update cache on disk.
2402 branchmap.updatecache(self)
2403 branchmap.updatecache(self)
2403 def runhooks():
2404 def runhooks():
2404 # forcefully update the on-disk branch cache
2405 # forcefully update the on-disk branch cache
2405 self.ui.debug("updating the branch cache\n")
2406 self.ui.debug("updating the branch cache\n")
2406 self.hook("changegroup", node=hex(cl.node(clstart)),
2407 self.hook("changegroup", node=hex(cl.node(clstart)),
2407 source=srctype, url=url)
2408 source=srctype, url=url)
2408
2409
2409 for n in added:
2410 for n in added:
2410 self.hook("incoming", node=hex(n), source=srctype,
2411 self.hook("incoming", node=hex(n), source=srctype,
2411 url=url)
2412 url=url)
2412 self._afterlock(runhooks)
2413 self._afterlock(runhooks)
2413
2414
2414 finally:
2415 finally:
2415 tr.release()
2416 tr.release()
2416 # never return 0 here:
2417 # never return 0 here:
2417 if dh < 0:
2418 if dh < 0:
2418 return dh - 1
2419 return dh - 1
2419 else:
2420 else:
2420 return dh + 1
2421 return dh + 1
2421
2422
2422 def stream_in(self, remote, requirements):
2423 def stream_in(self, remote, requirements):
2423 lock = self.lock()
2424 lock = self.lock()
2424 try:
2425 try:
2425 # Save remote branchmap. We will use it later
2426 # Save remote branchmap. We will use it later
2426 # to speed up branchcache creation
2427 # to speed up branchcache creation
2427 rbranchmap = None
2428 rbranchmap = None
2428 if remote.capable("branchmap"):
2429 if remote.capable("branchmap"):
2429 rbranchmap = remote.branchmap()
2430 rbranchmap = remote.branchmap()
2430
2431
2431 fp = remote.stream_out()
2432 fp = remote.stream_out()
2432 l = fp.readline()
2433 l = fp.readline()
2433 try:
2434 try:
2434 resp = int(l)
2435 resp = int(l)
2435 except ValueError:
2436 except ValueError:
2436 raise error.ResponseError(
2437 raise error.ResponseError(
2437 _('unexpected response from remote server:'), l)
2438 _('unexpected response from remote server:'), l)
2438 if resp == 1:
2439 if resp == 1:
2439 raise util.Abort(_('operation forbidden by server'))
2440 raise util.Abort(_('operation forbidden by server'))
2440 elif resp == 2:
2441 elif resp == 2:
2441 raise util.Abort(_('locking the remote repository failed'))
2442 raise util.Abort(_('locking the remote repository failed'))
2442 elif resp != 0:
2443 elif resp != 0:
2443 raise util.Abort(_('the server sent an unknown error code'))
2444 raise util.Abort(_('the server sent an unknown error code'))
2444 self.ui.status(_('streaming all changes\n'))
2445 self.ui.status(_('streaming all changes\n'))
2445 l = fp.readline()
2446 l = fp.readline()
2446 try:
2447 try:
2447 total_files, total_bytes = map(int, l.split(' ', 1))
2448 total_files, total_bytes = map(int, l.split(' ', 1))
2448 except (ValueError, TypeError):
2449 except (ValueError, TypeError):
2449 raise error.ResponseError(
2450 raise error.ResponseError(
2450 _('unexpected response from remote server:'), l)
2451 _('unexpected response from remote server:'), l)
2451 self.ui.status(_('%d files to transfer, %s of data\n') %
2452 self.ui.status(_('%d files to transfer, %s of data\n') %
2452 (total_files, util.bytecount(total_bytes)))
2453 (total_files, util.bytecount(total_bytes)))
2453 handled_bytes = 0
2454 handled_bytes = 0
2454 self.ui.progress(_('clone'), 0, total=total_bytes)
2455 self.ui.progress(_('clone'), 0, total=total_bytes)
2455 start = time.time()
2456 start = time.time()
2456 for i in xrange(total_files):
2457 for i in xrange(total_files):
2457 # XXX doesn't support '\n' or '\r' in filenames
2458 # XXX doesn't support '\n' or '\r' in filenames
2458 l = fp.readline()
2459 l = fp.readline()
2459 try:
2460 try:
2460 name, size = l.split('\0', 1)
2461 name, size = l.split('\0', 1)
2461 size = int(size)
2462 size = int(size)
2462 except (ValueError, TypeError):
2463 except (ValueError, TypeError):
2463 raise error.ResponseError(
2464 raise error.ResponseError(
2464 _('unexpected response from remote server:'), l)
2465 _('unexpected response from remote server:'), l)
2465 if self.ui.debugflag:
2466 if self.ui.debugflag:
2466 self.ui.debug('adding %s (%s)\n' %
2467 self.ui.debug('adding %s (%s)\n' %
2467 (name, util.bytecount(size)))
2468 (name, util.bytecount(size)))
2468 # for backwards compat, name was partially encoded
2469 # for backwards compat, name was partially encoded
2469 ofp = self.sopener(store.decodedir(name), 'w')
2470 ofp = self.sopener(store.decodedir(name), 'w')
2470 for chunk in util.filechunkiter(fp, limit=size):
2471 for chunk in util.filechunkiter(fp, limit=size):
2471 handled_bytes += len(chunk)
2472 handled_bytes += len(chunk)
2472 self.ui.progress(_('clone'), handled_bytes,
2473 self.ui.progress(_('clone'), handled_bytes,
2473 total=total_bytes)
2474 total=total_bytes)
2474 ofp.write(chunk)
2475 ofp.write(chunk)
2475 ofp.close()
2476 ofp.close()
2476 elapsed = time.time() - start
2477 elapsed = time.time() - start
2477 if elapsed <= 0:
2478 if elapsed <= 0:
2478 elapsed = 0.001
2479 elapsed = 0.001
2479 self.ui.progress(_('clone'), None)
2480 self.ui.progress(_('clone'), None)
2480 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2481 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2481 (util.bytecount(total_bytes), elapsed,
2482 (util.bytecount(total_bytes), elapsed,
2482 util.bytecount(total_bytes / elapsed)))
2483 util.bytecount(total_bytes / elapsed)))
2483
2484
2484 # new requirements = old non-format requirements +
2485 # new requirements = old non-format requirements +
2485 # new format-related
2486 # new format-related
2486 # requirements from the streamed-in repository
2487 # requirements from the streamed-in repository
2487 requirements.update(set(self.requirements) - self.supportedformats)
2488 requirements.update(set(self.requirements) - self.supportedformats)
2488 self._applyrequirements(requirements)
2489 self._applyrequirements(requirements)
2489 self._writerequirements()
2490 self._writerequirements()
2490
2491
2491 if rbranchmap:
2492 if rbranchmap:
2492 rbheads = []
2493 rbheads = []
2493 for bheads in rbranchmap.itervalues():
2494 for bheads in rbranchmap.itervalues():
2494 rbheads.extend(bheads)
2495 rbheads.extend(bheads)
2495
2496
2496 if rbheads:
2497 if rbheads:
2497 rtiprev = max((int(self.changelog.rev(node))
2498 rtiprev = max((int(self.changelog.rev(node))
2498 for node in rbheads))
2499 for node in rbheads))
2499 cache = branchmap.branchcache(rbranchmap,
2500 cache = branchmap.branchcache(rbranchmap,
2500 self[rtiprev].node(),
2501 self[rtiprev].node(),
2501 rtiprev)
2502 rtiprev)
2502 self._branchcache = cache
2503 self._branchcache = cache
2503 cache.write(self)
2504 cache.write(self)
2504 self.invalidate()
2505 self.invalidate()
2505 return len(self.heads()) + 1
2506 return len(self.heads()) + 1
2506 finally:
2507 finally:
2507 lock.release()
2508 lock.release()
2508
2509
2509 def clone(self, remote, heads=[], stream=False):
2510 def clone(self, remote, heads=[], stream=False):
2510 '''clone remote repository.
2511 '''clone remote repository.
2511
2512
2512 keyword arguments:
2513 keyword arguments:
2513 heads: list of revs to clone (forces use of pull)
2514 heads: list of revs to clone (forces use of pull)
2514 stream: use streaming clone if possible'''
2515 stream: use streaming clone if possible'''
2515
2516
2516 # now, all clients that can request uncompressed clones can
2517 # now, all clients that can request uncompressed clones can
2517 # read repo formats supported by all servers that can serve
2518 # read repo formats supported by all servers that can serve
2518 # them.
2519 # them.
2519
2520
2520 # if revlog format changes, client will have to check version
2521 # if revlog format changes, client will have to check version
2521 # and format flags on "stream" capability, and use
2522 # and format flags on "stream" capability, and use
2522 # uncompressed only if compatible.
2523 # uncompressed only if compatible.
2523
2524
2524 if not stream:
2525 if not stream:
2525 # if the server explicitly prefers to stream (for fast LANs)
2526 # if the server explicitly prefers to stream (for fast LANs)
2526 stream = remote.capable('stream-preferred')
2527 stream = remote.capable('stream-preferred')
2527
2528
2528 if stream and not heads:
2529 if stream and not heads:
2529 # 'stream' means remote revlog format is revlogv1 only
2530 # 'stream' means remote revlog format is revlogv1 only
2530 if remote.capable('stream'):
2531 if remote.capable('stream'):
2531 return self.stream_in(remote, set(('revlogv1',)))
2532 return self.stream_in(remote, set(('revlogv1',)))
2532 # otherwise, 'streamreqs' contains the remote revlog format
2533 # otherwise, 'streamreqs' contains the remote revlog format
2533 streamreqs = remote.capable('streamreqs')
2534 streamreqs = remote.capable('streamreqs')
2534 if streamreqs:
2535 if streamreqs:
2535 streamreqs = set(streamreqs.split(','))
2536 streamreqs = set(streamreqs.split(','))
2536 # if we support it, stream in and adjust our requirements
2537 # if we support it, stream in and adjust our requirements
2537 if not streamreqs - self.supportedformats:
2538 if not streamreqs - self.supportedformats:
2538 return self.stream_in(remote, streamreqs)
2539 return self.stream_in(remote, streamreqs)
2539 return self.pull(remote, heads)
2540 return self.pull(remote, heads)
2540
2541
2541 def pushkey(self, namespace, key, old, new):
2542 def pushkey(self, namespace, key, old, new):
2542 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2543 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2543 old=old, new=new)
2544 old=old, new=new)
2544 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2545 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2545 ret = pushkey.push(self, namespace, key, old, new)
2546 ret = pushkey.push(self, namespace, key, old, new)
2546 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2547 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2547 ret=ret)
2548 ret=ret)
2548 return ret
2549 return ret
2549
2550
2550 def listkeys(self, namespace):
2551 def listkeys(self, namespace):
2551 self.hook('prelistkeys', throw=True, namespace=namespace)
2552 self.hook('prelistkeys', throw=True, namespace=namespace)
2552 self.ui.debug('listing keys for "%s"\n' % namespace)
2553 self.ui.debug('listing keys for "%s"\n' % namespace)
2553 values = pushkey.list(self, namespace)
2554 values = pushkey.list(self, namespace)
2554 self.hook('listkeys', namespace=namespace, values=values)
2555 self.hook('listkeys', namespace=namespace, values=values)
2555 return values
2556 return values
2556
2557
2557 def debugwireargs(self, one, two, three=None, four=None, five=None):
2558 def debugwireargs(self, one, two, three=None, four=None, five=None):
2558 '''used to test argument passing over the wire'''
2559 '''used to test argument passing over the wire'''
2559 return "%s %s %s %s %s" % (one, two, three, four, five)
2560 return "%s %s %s %s %s" % (one, two, three, four, five)
2560
2561
2561 def savecommitmessage(self, text):
2562 def savecommitmessage(self, text):
2562 fp = self.opener('last-message.txt', 'wb')
2563 fp = self.opener('last-message.txt', 'wb')
2563 try:
2564 try:
2564 fp.write(text)
2565 fp.write(text)
2565 finally:
2566 finally:
2566 fp.close()
2567 fp.close()
2567 return self.pathto(fp.name[len(self.root) + 1:])
2568 return self.pathto(fp.name[len(self.root) + 1:])
2568
2569
2569 # used to avoid circular references so destructors work
2570 # used to avoid circular references so destructors work
2570 def aftertrans(files):
2571 def aftertrans(files):
2571 renamefiles = [tuple(t) for t in files]
2572 renamefiles = [tuple(t) for t in files]
2572 def a():
2573 def a():
2573 for src, dest in renamefiles:
2574 for src, dest in renamefiles:
2574 try:
2575 try:
2575 util.rename(src, dest)
2576 util.rename(src, dest)
2576 except OSError: # journal file does not yet exist
2577 except OSError: # journal file does not yet exist
2577 pass
2578 pass
2578 return a
2579 return a
2579
2580
2580 def undoname(fn):
2581 def undoname(fn):
2581 base, name = os.path.split(fn)
2582 base, name = os.path.split(fn)
2582 assert name.startswith('journal')
2583 assert name.startswith('journal')
2583 return os.path.join(base, name.replace('journal', 'undo', 1))
2584 return os.path.join(base, name.replace('journal', 'undo', 1))
2584
2585
2585 def instance(ui, path, create):
2586 def instance(ui, path, create):
2586 return localrepository(ui, util.urllocalpath(path), create)
2587 return localrepository(ui, util.urllocalpath(path), create)
2587
2588
2588 def islocal(path):
2589 def islocal(path):
2589 return True
2590 return True
General Comments 0
You need to be logged in to leave comments. Login now