##// END OF EJS Templates
localrepo: drop unused variable
Matt Mackall -
r20226:76d93641 default
parent child Browse files
Show More
@@ -1,2448 +1,2447 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock as lockmod
11 import lock as lockmod
12 import transaction, store, encoding
12 import transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 import branchmap, pathutil
19 import branchmap, pathutil
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21 filecache = scmutil.filecache
21 filecache = scmutil.filecache
22
22
23 class repofilecache(filecache):
23 class repofilecache(filecache):
24 """All filecache usage on repo are done for logic that should be unfiltered
24 """All filecache usage on repo are done for logic that should be unfiltered
25 """
25 """
26
26
27 def __get__(self, repo, type=None):
27 def __get__(self, repo, type=None):
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 def __set__(self, repo, value):
29 def __set__(self, repo, value):
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 def __delete__(self, repo):
31 def __delete__(self, repo):
32 return super(repofilecache, self).__delete__(repo.unfiltered())
32 return super(repofilecache, self).__delete__(repo.unfiltered())
33
33
34 class storecache(repofilecache):
34 class storecache(repofilecache):
35 """filecache for files in the store"""
35 """filecache for files in the store"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj.sjoin(fname)
37 return obj.sjoin(fname)
38
38
39 class unfilteredpropertycache(propertycache):
39 class unfilteredpropertycache(propertycache):
40 """propertycache that apply to unfiltered repo only"""
40 """propertycache that apply to unfiltered repo only"""
41
41
42 def __get__(self, repo, type=None):
42 def __get__(self, repo, type=None):
43 unfi = repo.unfiltered()
43 unfi = repo.unfiltered()
44 if unfi is repo:
44 if unfi is repo:
45 return super(unfilteredpropertycache, self).__get__(unfi)
45 return super(unfilteredpropertycache, self).__get__(unfi)
46 return getattr(unfi, self.name)
46 return getattr(unfi, self.name)
47
47
48 class filteredpropertycache(propertycache):
48 class filteredpropertycache(propertycache):
49 """propertycache that must take filtering in account"""
49 """propertycache that must take filtering in account"""
50
50
51 def cachevalue(self, obj, value):
51 def cachevalue(self, obj, value):
52 object.__setattr__(obj, self.name, value)
52 object.__setattr__(obj, self.name, value)
53
53
54
54
55 def hasunfilteredcache(repo, name):
55 def hasunfilteredcache(repo, name):
56 """check if a repo has an unfilteredpropertycache value for <name>"""
56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 return name in vars(repo.unfiltered())
57 return name in vars(repo.unfiltered())
58
58
59 def unfilteredmethod(orig):
59 def unfilteredmethod(orig):
60 """decorate method that always need to be run on unfiltered version"""
60 """decorate method that always need to be run on unfiltered version"""
61 def wrapper(repo, *args, **kwargs):
61 def wrapper(repo, *args, **kwargs):
62 return orig(repo.unfiltered(), *args, **kwargs)
62 return orig(repo.unfiltered(), *args, **kwargs)
63 return wrapper
63 return wrapper
64
64
65 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
65 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
66 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
66 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
67
67
68 class localpeer(peer.peerrepository):
68 class localpeer(peer.peerrepository):
69 '''peer for a local repo; reflects only the most recent API'''
69 '''peer for a local repo; reflects only the most recent API'''
70
70
71 def __init__(self, repo, caps=MODERNCAPS):
71 def __init__(self, repo, caps=MODERNCAPS):
72 peer.peerrepository.__init__(self)
72 peer.peerrepository.__init__(self)
73 self._repo = repo.filtered('served')
73 self._repo = repo.filtered('served')
74 self.ui = repo.ui
74 self.ui = repo.ui
75 self._caps = repo._restrictcapabilities(caps)
75 self._caps = repo._restrictcapabilities(caps)
76 self.requirements = repo.requirements
76 self.requirements = repo.requirements
77 self.supportedformats = repo.supportedformats
77 self.supportedformats = repo.supportedformats
78
78
79 def close(self):
79 def close(self):
80 self._repo.close()
80 self._repo.close()
81
81
82 def _capabilities(self):
82 def _capabilities(self):
83 return self._caps
83 return self._caps
84
84
85 def local(self):
85 def local(self):
86 return self._repo
86 return self._repo
87
87
88 def canpush(self):
88 def canpush(self):
89 return True
89 return True
90
90
91 def url(self):
91 def url(self):
92 return self._repo.url()
92 return self._repo.url()
93
93
94 def lookup(self, key):
94 def lookup(self, key):
95 return self._repo.lookup(key)
95 return self._repo.lookup(key)
96
96
97 def branchmap(self):
97 def branchmap(self):
98 return self._repo.branchmap()
98 return self._repo.branchmap()
99
99
100 def heads(self):
100 def heads(self):
101 return self._repo.heads()
101 return self._repo.heads()
102
102
103 def known(self, nodes):
103 def known(self, nodes):
104 return self._repo.known(nodes)
104 return self._repo.known(nodes)
105
105
106 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
106 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
107 return self._repo.getbundle(source, heads=heads, common=common,
107 return self._repo.getbundle(source, heads=heads, common=common,
108 bundlecaps=None)
108 bundlecaps=None)
109
109
110 # TODO We might want to move the next two calls into legacypeer and add
110 # TODO We might want to move the next two calls into legacypeer and add
111 # unbundle instead.
111 # unbundle instead.
112
112
113 def lock(self):
113 def lock(self):
114 return self._repo.lock()
114 return self._repo.lock()
115
115
116 def addchangegroup(self, cg, source, url):
116 def addchangegroup(self, cg, source, url):
117 return self._repo.addchangegroup(cg, source, url)
117 return self._repo.addchangegroup(cg, source, url)
118
118
119 def pushkey(self, namespace, key, old, new):
119 def pushkey(self, namespace, key, old, new):
120 return self._repo.pushkey(namespace, key, old, new)
120 return self._repo.pushkey(namespace, key, old, new)
121
121
122 def listkeys(self, namespace):
122 def listkeys(self, namespace):
123 return self._repo.listkeys(namespace)
123 return self._repo.listkeys(namespace)
124
124
125 def debugwireargs(self, one, two, three=None, four=None, five=None):
125 def debugwireargs(self, one, two, three=None, four=None, five=None):
126 '''used to test argument passing over the wire'''
126 '''used to test argument passing over the wire'''
127 return "%s %s %s %s %s" % (one, two, three, four, five)
127 return "%s %s %s %s %s" % (one, two, three, four, five)
128
128
129 class locallegacypeer(localpeer):
129 class locallegacypeer(localpeer):
130 '''peer extension which implements legacy methods too; used for tests with
130 '''peer extension which implements legacy methods too; used for tests with
131 restricted capabilities'''
131 restricted capabilities'''
132
132
133 def __init__(self, repo):
133 def __init__(self, repo):
134 localpeer.__init__(self, repo, caps=LEGACYCAPS)
134 localpeer.__init__(self, repo, caps=LEGACYCAPS)
135
135
136 def branches(self, nodes):
136 def branches(self, nodes):
137 return self._repo.branches(nodes)
137 return self._repo.branches(nodes)
138
138
139 def between(self, pairs):
139 def between(self, pairs):
140 return self._repo.between(pairs)
140 return self._repo.between(pairs)
141
141
142 def changegroup(self, basenodes, source):
142 def changegroup(self, basenodes, source):
143 return self._repo.changegroup(basenodes, source)
143 return self._repo.changegroup(basenodes, source)
144
144
145 def changegroupsubset(self, bases, heads, source):
145 def changegroupsubset(self, bases, heads, source):
146 return self._repo.changegroupsubset(bases, heads, source)
146 return self._repo.changegroupsubset(bases, heads, source)
147
147
148 class localrepository(object):
148 class localrepository(object):
149
149
150 supportedformats = set(('revlogv1', 'generaldelta'))
150 supportedformats = set(('revlogv1', 'generaldelta'))
151 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
151 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
152 'dotencode'))
152 'dotencode'))
153 openerreqs = set(('revlogv1', 'generaldelta'))
153 openerreqs = set(('revlogv1', 'generaldelta'))
154 requirements = ['revlogv1']
154 requirements = ['revlogv1']
155 filtername = None
155 filtername = None
156
156
157 # a list of (ui, featureset) functions.
157 # a list of (ui, featureset) functions.
158 # only functions defined in module of enabled extensions are invoked
158 # only functions defined in module of enabled extensions are invoked
159 featuresetupfuncs = set()
159 featuresetupfuncs = set()
160
160
161 def _baserequirements(self, create):
161 def _baserequirements(self, create):
162 return self.requirements[:]
162 return self.requirements[:]
163
163
164 def __init__(self, baseui, path=None, create=False):
164 def __init__(self, baseui, path=None, create=False):
165 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
165 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
166 self.wopener = self.wvfs
166 self.wopener = self.wvfs
167 self.root = self.wvfs.base
167 self.root = self.wvfs.base
168 self.path = self.wvfs.join(".hg")
168 self.path = self.wvfs.join(".hg")
169 self.origroot = path
169 self.origroot = path
170 self.auditor = pathutil.pathauditor(self.root, self._checknested)
170 self.auditor = pathutil.pathauditor(self.root, self._checknested)
171 self.vfs = scmutil.vfs(self.path)
171 self.vfs = scmutil.vfs(self.path)
172 self.opener = self.vfs
172 self.opener = self.vfs
173 self.baseui = baseui
173 self.baseui = baseui
174 self.ui = baseui.copy()
174 self.ui = baseui.copy()
175 self.ui.copy = baseui.copy # prevent copying repo configuration
175 self.ui.copy = baseui.copy # prevent copying repo configuration
176 # A list of callback to shape the phase if no data were found.
176 # A list of callback to shape the phase if no data were found.
177 # Callback are in the form: func(repo, roots) --> processed root.
177 # Callback are in the form: func(repo, roots) --> processed root.
178 # This list it to be filled by extension during repo setup
178 # This list it to be filled by extension during repo setup
179 self._phasedefaults = []
179 self._phasedefaults = []
180 try:
180 try:
181 self.ui.readconfig(self.join("hgrc"), self.root)
181 self.ui.readconfig(self.join("hgrc"), self.root)
182 extensions.loadall(self.ui)
182 extensions.loadall(self.ui)
183 except IOError:
183 except IOError:
184 pass
184 pass
185
185
186 if self.featuresetupfuncs:
186 if self.featuresetupfuncs:
187 self.supported = set(self._basesupported) # use private copy
187 self.supported = set(self._basesupported) # use private copy
188 extmods = set(m.__name__ for n, m
188 extmods = set(m.__name__ for n, m
189 in extensions.extensions(self.ui))
189 in extensions.extensions(self.ui))
190 for setupfunc in self.featuresetupfuncs:
190 for setupfunc in self.featuresetupfuncs:
191 if setupfunc.__module__ in extmods:
191 if setupfunc.__module__ in extmods:
192 setupfunc(self.ui, self.supported)
192 setupfunc(self.ui, self.supported)
193 else:
193 else:
194 self.supported = self._basesupported
194 self.supported = self._basesupported
195
195
196 if not self.vfs.isdir():
196 if not self.vfs.isdir():
197 if create:
197 if create:
198 if not self.wvfs.exists():
198 if not self.wvfs.exists():
199 self.wvfs.makedirs()
199 self.wvfs.makedirs()
200 self.vfs.makedir(notindexed=True)
200 self.vfs.makedir(notindexed=True)
201 requirements = self._baserequirements(create)
201 requirements = self._baserequirements(create)
202 if self.ui.configbool('format', 'usestore', True):
202 if self.ui.configbool('format', 'usestore', True):
203 self.vfs.mkdir("store")
203 self.vfs.mkdir("store")
204 requirements.append("store")
204 requirements.append("store")
205 if self.ui.configbool('format', 'usefncache', True):
205 if self.ui.configbool('format', 'usefncache', True):
206 requirements.append("fncache")
206 requirements.append("fncache")
207 if self.ui.configbool('format', 'dotencode', True):
207 if self.ui.configbool('format', 'dotencode', True):
208 requirements.append('dotencode')
208 requirements.append('dotencode')
209 # create an invalid changelog
209 # create an invalid changelog
210 self.vfs.append(
210 self.vfs.append(
211 "00changelog.i",
211 "00changelog.i",
212 '\0\0\0\2' # represents revlogv2
212 '\0\0\0\2' # represents revlogv2
213 ' dummy changelog to prevent using the old repo layout'
213 ' dummy changelog to prevent using the old repo layout'
214 )
214 )
215 if self.ui.configbool('format', 'generaldelta', False):
215 if self.ui.configbool('format', 'generaldelta', False):
216 requirements.append("generaldelta")
216 requirements.append("generaldelta")
217 requirements = set(requirements)
217 requirements = set(requirements)
218 else:
218 else:
219 raise error.RepoError(_("repository %s not found") % path)
219 raise error.RepoError(_("repository %s not found") % path)
220 elif create:
220 elif create:
221 raise error.RepoError(_("repository %s already exists") % path)
221 raise error.RepoError(_("repository %s already exists") % path)
222 else:
222 else:
223 try:
223 try:
224 requirements = scmutil.readrequires(self.vfs, self.supported)
224 requirements = scmutil.readrequires(self.vfs, self.supported)
225 except IOError, inst:
225 except IOError, inst:
226 if inst.errno != errno.ENOENT:
226 if inst.errno != errno.ENOENT:
227 raise
227 raise
228 requirements = set()
228 requirements = set()
229
229
230 self.sharedpath = self.path
230 self.sharedpath = self.path
231 try:
231 try:
232 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
232 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
233 realpath=True)
233 realpath=True)
234 s = vfs.base
234 s = vfs.base
235 if not vfs.exists():
235 if not vfs.exists():
236 raise error.RepoError(
236 raise error.RepoError(
237 _('.hg/sharedpath points to nonexistent directory %s') % s)
237 _('.hg/sharedpath points to nonexistent directory %s') % s)
238 self.sharedpath = s
238 self.sharedpath = s
239 except IOError, inst:
239 except IOError, inst:
240 if inst.errno != errno.ENOENT:
240 if inst.errno != errno.ENOENT:
241 raise
241 raise
242
242
243 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
243 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
244 self.spath = self.store.path
244 self.spath = self.store.path
245 self.svfs = self.store.vfs
245 self.svfs = self.store.vfs
246 self.sopener = self.svfs
246 self.sopener = self.svfs
247 self.sjoin = self.store.join
247 self.sjoin = self.store.join
248 self.vfs.createmode = self.store.createmode
248 self.vfs.createmode = self.store.createmode
249 self._applyrequirements(requirements)
249 self._applyrequirements(requirements)
250 if create:
250 if create:
251 self._writerequirements()
251 self._writerequirements()
252
252
253
253
254 self._branchcaches = {}
254 self._branchcaches = {}
255 self.filterpats = {}
255 self.filterpats = {}
256 self._datafilters = {}
256 self._datafilters = {}
257 self._transref = self._lockref = self._wlockref = None
257 self._transref = self._lockref = self._wlockref = None
258
258
259 # A cache for various files under .hg/ that tracks file changes,
259 # A cache for various files under .hg/ that tracks file changes,
260 # (used by the filecache decorator)
260 # (used by the filecache decorator)
261 #
261 #
262 # Maps a property name to its util.filecacheentry
262 # Maps a property name to its util.filecacheentry
263 self._filecache = {}
263 self._filecache = {}
264
264
265 # hold sets of revision to be filtered
265 # hold sets of revision to be filtered
266 # should be cleared when something might have changed the filter value:
266 # should be cleared when something might have changed the filter value:
267 # - new changesets,
267 # - new changesets,
268 # - phase change,
268 # - phase change,
269 # - new obsolescence marker,
269 # - new obsolescence marker,
270 # - working directory parent change,
270 # - working directory parent change,
271 # - bookmark changes
271 # - bookmark changes
272 self.filteredrevcache = {}
272 self.filteredrevcache = {}
273
273
274 def close(self):
274 def close(self):
275 pass
275 pass
276
276
277 def _restrictcapabilities(self, caps):
277 def _restrictcapabilities(self, caps):
278 return caps
278 return caps
279
279
280 def _applyrequirements(self, requirements):
280 def _applyrequirements(self, requirements):
281 self.requirements = requirements
281 self.requirements = requirements
282 self.sopener.options = dict((r, 1) for r in requirements
282 self.sopener.options = dict((r, 1) for r in requirements
283 if r in self.openerreqs)
283 if r in self.openerreqs)
284 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
284 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
285 if chunkcachesize is not None:
285 if chunkcachesize is not None:
286 self.sopener.options['chunkcachesize'] = chunkcachesize
286 self.sopener.options['chunkcachesize'] = chunkcachesize
287
287
288 def _writerequirements(self):
288 def _writerequirements(self):
289 reqfile = self.opener("requires", "w")
289 reqfile = self.opener("requires", "w")
290 for r in sorted(self.requirements):
290 for r in sorted(self.requirements):
291 reqfile.write("%s\n" % r)
291 reqfile.write("%s\n" % r)
292 reqfile.close()
292 reqfile.close()
293
293
294 def _checknested(self, path):
294 def _checknested(self, path):
295 """Determine if path is a legal nested repository."""
295 """Determine if path is a legal nested repository."""
296 if not path.startswith(self.root):
296 if not path.startswith(self.root):
297 return False
297 return False
298 subpath = path[len(self.root) + 1:]
298 subpath = path[len(self.root) + 1:]
299 normsubpath = util.pconvert(subpath)
299 normsubpath = util.pconvert(subpath)
300
300
301 # XXX: Checking against the current working copy is wrong in
301 # XXX: Checking against the current working copy is wrong in
302 # the sense that it can reject things like
302 # the sense that it can reject things like
303 #
303 #
304 # $ hg cat -r 10 sub/x.txt
304 # $ hg cat -r 10 sub/x.txt
305 #
305 #
306 # if sub/ is no longer a subrepository in the working copy
306 # if sub/ is no longer a subrepository in the working copy
307 # parent revision.
307 # parent revision.
308 #
308 #
309 # However, it can of course also allow things that would have
309 # However, it can of course also allow things that would have
310 # been rejected before, such as the above cat command if sub/
310 # been rejected before, such as the above cat command if sub/
311 # is a subrepository now, but was a normal directory before.
311 # is a subrepository now, but was a normal directory before.
312 # The old path auditor would have rejected by mistake since it
312 # The old path auditor would have rejected by mistake since it
313 # panics when it sees sub/.hg/.
313 # panics when it sees sub/.hg/.
314 #
314 #
315 # All in all, checking against the working copy seems sensible
315 # All in all, checking against the working copy seems sensible
316 # since we want to prevent access to nested repositories on
316 # since we want to prevent access to nested repositories on
317 # the filesystem *now*.
317 # the filesystem *now*.
318 ctx = self[None]
318 ctx = self[None]
319 parts = util.splitpath(subpath)
319 parts = util.splitpath(subpath)
320 while parts:
320 while parts:
321 prefix = '/'.join(parts)
321 prefix = '/'.join(parts)
322 if prefix in ctx.substate:
322 if prefix in ctx.substate:
323 if prefix == normsubpath:
323 if prefix == normsubpath:
324 return True
324 return True
325 else:
325 else:
326 sub = ctx.sub(prefix)
326 sub = ctx.sub(prefix)
327 return sub.checknested(subpath[len(prefix) + 1:])
327 return sub.checknested(subpath[len(prefix) + 1:])
328 else:
328 else:
329 parts.pop()
329 parts.pop()
330 return False
330 return False
331
331
332 def peer(self):
332 def peer(self):
333 return localpeer(self) # not cached to avoid reference cycle
333 return localpeer(self) # not cached to avoid reference cycle
334
334
335 def unfiltered(self):
335 def unfiltered(self):
336 """Return unfiltered version of the repository
336 """Return unfiltered version of the repository
337
337
338 Intended to be overwritten by filtered repo."""
338 Intended to be overwritten by filtered repo."""
339 return self
339 return self
340
340
341 def filtered(self, name):
341 def filtered(self, name):
342 """Return a filtered version of a repository"""
342 """Return a filtered version of a repository"""
343 # build a new class with the mixin and the current class
343 # build a new class with the mixin and the current class
344 # (possibly subclass of the repo)
344 # (possibly subclass of the repo)
345 class proxycls(repoview.repoview, self.unfiltered().__class__):
345 class proxycls(repoview.repoview, self.unfiltered().__class__):
346 pass
346 pass
347 return proxycls(self, name)
347 return proxycls(self, name)
348
348
349 @repofilecache('bookmarks')
349 @repofilecache('bookmarks')
350 def _bookmarks(self):
350 def _bookmarks(self):
351 return bookmarks.bmstore(self)
351 return bookmarks.bmstore(self)
352
352
353 @repofilecache('bookmarks.current')
353 @repofilecache('bookmarks.current')
354 def _bookmarkcurrent(self):
354 def _bookmarkcurrent(self):
355 return bookmarks.readcurrent(self)
355 return bookmarks.readcurrent(self)
356
356
357 def bookmarkheads(self, bookmark):
357 def bookmarkheads(self, bookmark):
358 name = bookmark.split('@', 1)[0]
358 name = bookmark.split('@', 1)[0]
359 heads = []
359 heads = []
360 for mark, n in self._bookmarks.iteritems():
360 for mark, n in self._bookmarks.iteritems():
361 if mark.split('@', 1)[0] == name:
361 if mark.split('@', 1)[0] == name:
362 heads.append(n)
362 heads.append(n)
363 return heads
363 return heads
364
364
365 @storecache('phaseroots')
365 @storecache('phaseroots')
366 def _phasecache(self):
366 def _phasecache(self):
367 return phases.phasecache(self, self._phasedefaults)
367 return phases.phasecache(self, self._phasedefaults)
368
368
369 @storecache('obsstore')
369 @storecache('obsstore')
370 def obsstore(self):
370 def obsstore(self):
371 store = obsolete.obsstore(self.sopener)
371 store = obsolete.obsstore(self.sopener)
372 if store and not obsolete._enabled:
372 if store and not obsolete._enabled:
373 # message is rare enough to not be translated
373 # message is rare enough to not be translated
374 msg = 'obsolete feature not enabled but %i markers found!\n'
374 msg = 'obsolete feature not enabled but %i markers found!\n'
375 self.ui.warn(msg % len(list(store)))
375 self.ui.warn(msg % len(list(store)))
376 return store
376 return store
377
377
378 @storecache('00changelog.i')
378 @storecache('00changelog.i')
379 def changelog(self):
379 def changelog(self):
380 c = changelog.changelog(self.sopener)
380 c = changelog.changelog(self.sopener)
381 if 'HG_PENDING' in os.environ:
381 if 'HG_PENDING' in os.environ:
382 p = os.environ['HG_PENDING']
382 p = os.environ['HG_PENDING']
383 if p.startswith(self.root):
383 if p.startswith(self.root):
384 c.readpending('00changelog.i.a')
384 c.readpending('00changelog.i.a')
385 return c
385 return c
386
386
387 @storecache('00manifest.i')
387 @storecache('00manifest.i')
388 def manifest(self):
388 def manifest(self):
389 return manifest.manifest(self.sopener)
389 return manifest.manifest(self.sopener)
390
390
391 @repofilecache('dirstate')
391 @repofilecache('dirstate')
392 def dirstate(self):
392 def dirstate(self):
393 warned = [0]
393 warned = [0]
394 def validate(node):
394 def validate(node):
395 try:
395 try:
396 self.changelog.rev(node)
396 self.changelog.rev(node)
397 return node
397 return node
398 except error.LookupError:
398 except error.LookupError:
399 if not warned[0]:
399 if not warned[0]:
400 warned[0] = True
400 warned[0] = True
401 self.ui.warn(_("warning: ignoring unknown"
401 self.ui.warn(_("warning: ignoring unknown"
402 " working parent %s!\n") % short(node))
402 " working parent %s!\n") % short(node))
403 return nullid
403 return nullid
404
404
405 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
405 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
406
406
407 def __getitem__(self, changeid):
407 def __getitem__(self, changeid):
408 if changeid is None:
408 if changeid is None:
409 return context.workingctx(self)
409 return context.workingctx(self)
410 return context.changectx(self, changeid)
410 return context.changectx(self, changeid)
411
411
412 def __contains__(self, changeid):
412 def __contains__(self, changeid):
413 try:
413 try:
414 return bool(self.lookup(changeid))
414 return bool(self.lookup(changeid))
415 except error.RepoLookupError:
415 except error.RepoLookupError:
416 return False
416 return False
417
417
418 def __nonzero__(self):
418 def __nonzero__(self):
419 return True
419 return True
420
420
421 def __len__(self):
421 def __len__(self):
422 return len(self.changelog)
422 return len(self.changelog)
423
423
424 def __iter__(self):
424 def __iter__(self):
425 return iter(self.changelog)
425 return iter(self.changelog)
426
426
427 def revs(self, expr, *args):
427 def revs(self, expr, *args):
428 '''Return a list of revisions matching the given revset'''
428 '''Return a list of revisions matching the given revset'''
429 expr = revset.formatspec(expr, *args)
429 expr = revset.formatspec(expr, *args)
430 m = revset.match(None, expr)
430 m = revset.match(None, expr)
431 return [r for r in m(self, list(self))]
431 return [r for r in m(self, list(self))]
432
432
433 def set(self, expr, *args):
433 def set(self, expr, *args):
434 '''
434 '''
435 Yield a context for each matching revision, after doing arg
435 Yield a context for each matching revision, after doing arg
436 replacement via revset.formatspec
436 replacement via revset.formatspec
437 '''
437 '''
438 for r in self.revs(expr, *args):
438 for r in self.revs(expr, *args):
439 yield self[r]
439 yield self[r]
440
440
441 def url(self):
441 def url(self):
442 return 'file:' + self.root
442 return 'file:' + self.root
443
443
444 def hook(self, name, throw=False, **args):
444 def hook(self, name, throw=False, **args):
445 return hook.hook(self.ui, self, name, throw, **args)
445 return hook.hook(self.ui, self, name, throw, **args)
446
446
447 @unfilteredmethod
447 @unfilteredmethod
448 def _tag(self, names, node, message, local, user, date, extra={}):
448 def _tag(self, names, node, message, local, user, date, extra={}):
449 if isinstance(names, str):
449 if isinstance(names, str):
450 names = (names,)
450 names = (names,)
451
451
452 branches = self.branchmap()
452 branches = self.branchmap()
453 for name in names:
453 for name in names:
454 self.hook('pretag', throw=True, node=hex(node), tag=name,
454 self.hook('pretag', throw=True, node=hex(node), tag=name,
455 local=local)
455 local=local)
456 if name in branches:
456 if name in branches:
457 self.ui.warn(_("warning: tag %s conflicts with existing"
457 self.ui.warn(_("warning: tag %s conflicts with existing"
458 " branch name\n") % name)
458 " branch name\n") % name)
459
459
460 def writetags(fp, names, munge, prevtags):
460 def writetags(fp, names, munge, prevtags):
461 fp.seek(0, 2)
461 fp.seek(0, 2)
462 if prevtags and prevtags[-1] != '\n':
462 if prevtags and prevtags[-1] != '\n':
463 fp.write('\n')
463 fp.write('\n')
464 for name in names:
464 for name in names:
465 m = munge and munge(name) or name
465 m = munge and munge(name) or name
466 if (self._tagscache.tagtypes and
466 if (self._tagscache.tagtypes and
467 name in self._tagscache.tagtypes):
467 name in self._tagscache.tagtypes):
468 old = self.tags().get(name, nullid)
468 old = self.tags().get(name, nullid)
469 fp.write('%s %s\n' % (hex(old), m))
469 fp.write('%s %s\n' % (hex(old), m))
470 fp.write('%s %s\n' % (hex(node), m))
470 fp.write('%s %s\n' % (hex(node), m))
471 fp.close()
471 fp.close()
472
472
473 prevtags = ''
473 prevtags = ''
474 if local:
474 if local:
475 try:
475 try:
476 fp = self.opener('localtags', 'r+')
476 fp = self.opener('localtags', 'r+')
477 except IOError:
477 except IOError:
478 fp = self.opener('localtags', 'a')
478 fp = self.opener('localtags', 'a')
479 else:
479 else:
480 prevtags = fp.read()
480 prevtags = fp.read()
481
481
482 # local tags are stored in the current charset
482 # local tags are stored in the current charset
483 writetags(fp, names, None, prevtags)
483 writetags(fp, names, None, prevtags)
484 for name in names:
484 for name in names:
485 self.hook('tag', node=hex(node), tag=name, local=local)
485 self.hook('tag', node=hex(node), tag=name, local=local)
486 return
486 return
487
487
488 try:
488 try:
489 fp = self.wfile('.hgtags', 'rb+')
489 fp = self.wfile('.hgtags', 'rb+')
490 except IOError, e:
490 except IOError, e:
491 if e.errno != errno.ENOENT:
491 if e.errno != errno.ENOENT:
492 raise
492 raise
493 fp = self.wfile('.hgtags', 'ab')
493 fp = self.wfile('.hgtags', 'ab')
494 else:
494 else:
495 prevtags = fp.read()
495 prevtags = fp.read()
496
496
497 # committed tags are stored in UTF-8
497 # committed tags are stored in UTF-8
498 writetags(fp, names, encoding.fromlocal, prevtags)
498 writetags(fp, names, encoding.fromlocal, prevtags)
499
499
500 fp.close()
500 fp.close()
501
501
502 self.invalidatecaches()
502 self.invalidatecaches()
503
503
504 if '.hgtags' not in self.dirstate:
504 if '.hgtags' not in self.dirstate:
505 self[None].add(['.hgtags'])
505 self[None].add(['.hgtags'])
506
506
507 m = matchmod.exact(self.root, '', ['.hgtags'])
507 m = matchmod.exact(self.root, '', ['.hgtags'])
508 tagnode = self.commit(message, user, date, extra=extra, match=m)
508 tagnode = self.commit(message, user, date, extra=extra, match=m)
509
509
510 for name in names:
510 for name in names:
511 self.hook('tag', node=hex(node), tag=name, local=local)
511 self.hook('tag', node=hex(node), tag=name, local=local)
512
512
513 return tagnode
513 return tagnode
514
514
515 def tag(self, names, node, message, local, user, date):
515 def tag(self, names, node, message, local, user, date):
516 '''tag a revision with one or more symbolic names.
516 '''tag a revision with one or more symbolic names.
517
517
518 names is a list of strings or, when adding a single tag, names may be a
518 names is a list of strings or, when adding a single tag, names may be a
519 string.
519 string.
520
520
521 if local is True, the tags are stored in a per-repository file.
521 if local is True, the tags are stored in a per-repository file.
522 otherwise, they are stored in the .hgtags file, and a new
522 otherwise, they are stored in the .hgtags file, and a new
523 changeset is committed with the change.
523 changeset is committed with the change.
524
524
525 keyword arguments:
525 keyword arguments:
526
526
527 local: whether to store tags in non-version-controlled file
527 local: whether to store tags in non-version-controlled file
528 (default False)
528 (default False)
529
529
530 message: commit message to use if committing
530 message: commit message to use if committing
531
531
532 user: name of user to use if committing
532 user: name of user to use if committing
533
533
534 date: date tuple to use if committing'''
534 date: date tuple to use if committing'''
535
535
536 if not local:
536 if not local:
537 for x in self.status()[:5]:
537 for x in self.status()[:5]:
538 if '.hgtags' in x:
538 if '.hgtags' in x:
539 raise util.Abort(_('working copy of .hgtags is changed '
539 raise util.Abort(_('working copy of .hgtags is changed '
540 '(please commit .hgtags manually)'))
540 '(please commit .hgtags manually)'))
541
541
542 self.tags() # instantiate the cache
542 self.tags() # instantiate the cache
543 self._tag(names, node, message, local, user, date)
543 self._tag(names, node, message, local, user, date)
544
544
545 @filteredpropertycache
545 @filteredpropertycache
546 def _tagscache(self):
546 def _tagscache(self):
547 '''Returns a tagscache object that contains various tags related
547 '''Returns a tagscache object that contains various tags related
548 caches.'''
548 caches.'''
549
549
550 # This simplifies its cache management by having one decorated
550 # This simplifies its cache management by having one decorated
551 # function (this one) and the rest simply fetch things from it.
551 # function (this one) and the rest simply fetch things from it.
552 class tagscache(object):
552 class tagscache(object):
553 def __init__(self):
553 def __init__(self):
554 # These two define the set of tags for this repository. tags
554 # These two define the set of tags for this repository. tags
555 # maps tag name to node; tagtypes maps tag name to 'global' or
555 # maps tag name to node; tagtypes maps tag name to 'global' or
556 # 'local'. (Global tags are defined by .hgtags across all
556 # 'local'. (Global tags are defined by .hgtags across all
557 # heads, and local tags are defined in .hg/localtags.)
557 # heads, and local tags are defined in .hg/localtags.)
558 # They constitute the in-memory cache of tags.
558 # They constitute the in-memory cache of tags.
559 self.tags = self.tagtypes = None
559 self.tags = self.tagtypes = None
560
560
561 self.nodetagscache = self.tagslist = None
561 self.nodetagscache = self.tagslist = None
562
562
563 cache = tagscache()
563 cache = tagscache()
564 cache.tags, cache.tagtypes = self._findtags()
564 cache.tags, cache.tagtypes = self._findtags()
565
565
566 return cache
566 return cache
567
567
568 def tags(self):
568 def tags(self):
569 '''return a mapping of tag to node'''
569 '''return a mapping of tag to node'''
570 t = {}
570 t = {}
571 if self.changelog.filteredrevs:
571 if self.changelog.filteredrevs:
572 tags, tt = self._findtags()
572 tags, tt = self._findtags()
573 else:
573 else:
574 tags = self._tagscache.tags
574 tags = self._tagscache.tags
575 for k, v in tags.iteritems():
575 for k, v in tags.iteritems():
576 try:
576 try:
577 # ignore tags to unknown nodes
577 # ignore tags to unknown nodes
578 self.changelog.rev(v)
578 self.changelog.rev(v)
579 t[k] = v
579 t[k] = v
580 except (error.LookupError, ValueError):
580 except (error.LookupError, ValueError):
581 pass
581 pass
582 return t
582 return t
583
583
584 def _findtags(self):
584 def _findtags(self):
585 '''Do the hard work of finding tags. Return a pair of dicts
585 '''Do the hard work of finding tags. Return a pair of dicts
586 (tags, tagtypes) where tags maps tag name to node, and tagtypes
586 (tags, tagtypes) where tags maps tag name to node, and tagtypes
587 maps tag name to a string like \'global\' or \'local\'.
587 maps tag name to a string like \'global\' or \'local\'.
588 Subclasses or extensions are free to add their own tags, but
588 Subclasses or extensions are free to add their own tags, but
589 should be aware that the returned dicts will be retained for the
589 should be aware that the returned dicts will be retained for the
590 duration of the localrepo object.'''
590 duration of the localrepo object.'''
591
591
592 # XXX what tagtype should subclasses/extensions use? Currently
592 # XXX what tagtype should subclasses/extensions use? Currently
593 # mq and bookmarks add tags, but do not set the tagtype at all.
593 # mq and bookmarks add tags, but do not set the tagtype at all.
594 # Should each extension invent its own tag type? Should there
594 # Should each extension invent its own tag type? Should there
595 # be one tagtype for all such "virtual" tags? Or is the status
595 # be one tagtype for all such "virtual" tags? Or is the status
596 # quo fine?
596 # quo fine?
597
597
598 alltags = {} # map tag name to (node, hist)
598 alltags = {} # map tag name to (node, hist)
599 tagtypes = {}
599 tagtypes = {}
600
600
601 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
601 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
602 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
602 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
603
603
604 # Build the return dicts. Have to re-encode tag names because
604 # Build the return dicts. Have to re-encode tag names because
605 # the tags module always uses UTF-8 (in order not to lose info
605 # the tags module always uses UTF-8 (in order not to lose info
606 # writing to the cache), but the rest of Mercurial wants them in
606 # writing to the cache), but the rest of Mercurial wants them in
607 # local encoding.
607 # local encoding.
608 tags = {}
608 tags = {}
609 for (name, (node, hist)) in alltags.iteritems():
609 for (name, (node, hist)) in alltags.iteritems():
610 if node != nullid:
610 if node != nullid:
611 tags[encoding.tolocal(name)] = node
611 tags[encoding.tolocal(name)] = node
612 tags['tip'] = self.changelog.tip()
612 tags['tip'] = self.changelog.tip()
613 tagtypes = dict([(encoding.tolocal(name), value)
613 tagtypes = dict([(encoding.tolocal(name), value)
614 for (name, value) in tagtypes.iteritems()])
614 for (name, value) in tagtypes.iteritems()])
615 return (tags, tagtypes)
615 return (tags, tagtypes)
616
616
617 def tagtype(self, tagname):
617 def tagtype(self, tagname):
618 '''
618 '''
619 return the type of the given tag. result can be:
619 return the type of the given tag. result can be:
620
620
621 'local' : a local tag
621 'local' : a local tag
622 'global' : a global tag
622 'global' : a global tag
623 None : tag does not exist
623 None : tag does not exist
624 '''
624 '''
625
625
626 return self._tagscache.tagtypes.get(tagname)
626 return self._tagscache.tagtypes.get(tagname)
627
627
628 def tagslist(self):
628 def tagslist(self):
629 '''return a list of tags ordered by revision'''
629 '''return a list of tags ordered by revision'''
630 if not self._tagscache.tagslist:
630 if not self._tagscache.tagslist:
631 l = []
631 l = []
632 for t, n in self.tags().iteritems():
632 for t, n in self.tags().iteritems():
633 r = self.changelog.rev(n)
633 r = self.changelog.rev(n)
634 l.append((r, t, n))
634 l.append((r, t, n))
635 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
635 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
636
636
637 return self._tagscache.tagslist
637 return self._tagscache.tagslist
638
638
639 def nodetags(self, node):
639 def nodetags(self, node):
640 '''return the tags associated with a node'''
640 '''return the tags associated with a node'''
641 if not self._tagscache.nodetagscache:
641 if not self._tagscache.nodetagscache:
642 nodetagscache = {}
642 nodetagscache = {}
643 for t, n in self._tagscache.tags.iteritems():
643 for t, n in self._tagscache.tags.iteritems():
644 nodetagscache.setdefault(n, []).append(t)
644 nodetagscache.setdefault(n, []).append(t)
645 for tags in nodetagscache.itervalues():
645 for tags in nodetagscache.itervalues():
646 tags.sort()
646 tags.sort()
647 self._tagscache.nodetagscache = nodetagscache
647 self._tagscache.nodetagscache = nodetagscache
648 return self._tagscache.nodetagscache.get(node, [])
648 return self._tagscache.nodetagscache.get(node, [])
649
649
650 def nodebookmarks(self, node):
650 def nodebookmarks(self, node):
651 marks = []
651 marks = []
652 for bookmark, n in self._bookmarks.iteritems():
652 for bookmark, n in self._bookmarks.iteritems():
653 if n == node:
653 if n == node:
654 marks.append(bookmark)
654 marks.append(bookmark)
655 return sorted(marks)
655 return sorted(marks)
656
656
657 def branchmap(self):
657 def branchmap(self):
658 '''returns a dictionary {branch: [branchheads]}'''
658 '''returns a dictionary {branch: [branchheads]}'''
659 branchmap.updatecache(self)
659 branchmap.updatecache(self)
660 return self._branchcaches[self.filtername]
660 return self._branchcaches[self.filtername]
661
661
662 def branchtip(self, branch):
662 def branchtip(self, branch):
663 '''return the tip node for a given branch'''
663 '''return the tip node for a given branch'''
664 try:
664 try:
665 return self.branchmap().branchtip(branch)
665 return self.branchmap().branchtip(branch)
666 except KeyError:
666 except KeyError:
667 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
667 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
668
668
669 def lookup(self, key):
669 def lookup(self, key):
670 return self[key].node()
670 return self[key].node()
671
671
672 def lookupbranch(self, key, remote=None):
672 def lookupbranch(self, key, remote=None):
673 repo = remote or self
673 repo = remote or self
674 if key in repo.branchmap():
674 if key in repo.branchmap():
675 return key
675 return key
676
676
677 repo = (remote and remote.local()) and remote or self
677 repo = (remote and remote.local()) and remote or self
678 return repo[key].branch()
678 return repo[key].branch()
679
679
680 def known(self, nodes):
680 def known(self, nodes):
681 nm = self.changelog.nodemap
681 nm = self.changelog.nodemap
682 pc = self._phasecache
682 pc = self._phasecache
683 result = []
683 result = []
684 for n in nodes:
684 for n in nodes:
685 r = nm.get(n)
685 r = nm.get(n)
686 resp = not (r is None or pc.phase(self, r) >= phases.secret)
686 resp = not (r is None or pc.phase(self, r) >= phases.secret)
687 result.append(resp)
687 result.append(resp)
688 return result
688 return result
689
689
690 def local(self):
690 def local(self):
691 return self
691 return self
692
692
693 def cancopy(self):
693 def cancopy(self):
694 return self.local() # so statichttprepo's override of local() works
694 return self.local() # so statichttprepo's override of local() works
695
695
696 def join(self, f):
696 def join(self, f):
697 return os.path.join(self.path, f)
697 return os.path.join(self.path, f)
698
698
699 def wjoin(self, f):
699 def wjoin(self, f):
700 return os.path.join(self.root, f)
700 return os.path.join(self.root, f)
701
701
702 def file(self, f):
702 def file(self, f):
703 if f[0] == '/':
703 if f[0] == '/':
704 f = f[1:]
704 f = f[1:]
705 return filelog.filelog(self.sopener, f)
705 return filelog.filelog(self.sopener, f)
706
706
707 def changectx(self, changeid):
707 def changectx(self, changeid):
708 return self[changeid]
708 return self[changeid]
709
709
710 def parents(self, changeid=None):
710 def parents(self, changeid=None):
711 '''get list of changectxs for parents of changeid'''
711 '''get list of changectxs for parents of changeid'''
712 return self[changeid].parents()
712 return self[changeid].parents()
713
713
714 def setparents(self, p1, p2=nullid):
714 def setparents(self, p1, p2=nullid):
715 copies = self.dirstate.setparents(p1, p2)
715 copies = self.dirstate.setparents(p1, p2)
716 pctx = self[p1]
716 pctx = self[p1]
717 if copies:
717 if copies:
718 # Adjust copy records, the dirstate cannot do it, it
718 # Adjust copy records, the dirstate cannot do it, it
719 # requires access to parents manifests. Preserve them
719 # requires access to parents manifests. Preserve them
720 # only for entries added to first parent.
720 # only for entries added to first parent.
721 for f in copies:
721 for f in copies:
722 if f not in pctx and copies[f] in pctx:
722 if f not in pctx and copies[f] in pctx:
723 self.dirstate.copy(copies[f], f)
723 self.dirstate.copy(copies[f], f)
724 if p2 == nullid:
724 if p2 == nullid:
725 for f, s in sorted(self.dirstate.copies().items()):
725 for f, s in sorted(self.dirstate.copies().items()):
726 if f not in pctx and s not in pctx:
726 if f not in pctx and s not in pctx:
727 self.dirstate.copy(None, f)
727 self.dirstate.copy(None, f)
728
728
729 def filectx(self, path, changeid=None, fileid=None):
729 def filectx(self, path, changeid=None, fileid=None):
730 """changeid can be a changeset revision, node, or tag.
730 """changeid can be a changeset revision, node, or tag.
731 fileid can be a file revision or node."""
731 fileid can be a file revision or node."""
732 return context.filectx(self, path, changeid, fileid)
732 return context.filectx(self, path, changeid, fileid)
733
733
734 def getcwd(self):
734 def getcwd(self):
735 return self.dirstate.getcwd()
735 return self.dirstate.getcwd()
736
736
737 def pathto(self, f, cwd=None):
737 def pathto(self, f, cwd=None):
738 return self.dirstate.pathto(f, cwd)
738 return self.dirstate.pathto(f, cwd)
739
739
740 def wfile(self, f, mode='r'):
740 def wfile(self, f, mode='r'):
741 return self.wopener(f, mode)
741 return self.wopener(f, mode)
742
742
743 def _link(self, f):
743 def _link(self, f):
744 return self.wvfs.islink(f)
744 return self.wvfs.islink(f)
745
745
746 def _loadfilter(self, filter):
746 def _loadfilter(self, filter):
747 if filter not in self.filterpats:
747 if filter not in self.filterpats:
748 l = []
748 l = []
749 for pat, cmd in self.ui.configitems(filter):
749 for pat, cmd in self.ui.configitems(filter):
750 if cmd == '!':
750 if cmd == '!':
751 continue
751 continue
752 mf = matchmod.match(self.root, '', [pat])
752 mf = matchmod.match(self.root, '', [pat])
753 fn = None
753 fn = None
754 params = cmd
754 params = cmd
755 for name, filterfn in self._datafilters.iteritems():
755 for name, filterfn in self._datafilters.iteritems():
756 if cmd.startswith(name):
756 if cmd.startswith(name):
757 fn = filterfn
757 fn = filterfn
758 params = cmd[len(name):].lstrip()
758 params = cmd[len(name):].lstrip()
759 break
759 break
760 if not fn:
760 if not fn:
761 fn = lambda s, c, **kwargs: util.filter(s, c)
761 fn = lambda s, c, **kwargs: util.filter(s, c)
762 # Wrap old filters not supporting keyword arguments
762 # Wrap old filters not supporting keyword arguments
763 if not inspect.getargspec(fn)[2]:
763 if not inspect.getargspec(fn)[2]:
764 oldfn = fn
764 oldfn = fn
765 fn = lambda s, c, **kwargs: oldfn(s, c)
765 fn = lambda s, c, **kwargs: oldfn(s, c)
766 l.append((mf, fn, params))
766 l.append((mf, fn, params))
767 self.filterpats[filter] = l
767 self.filterpats[filter] = l
768 return self.filterpats[filter]
768 return self.filterpats[filter]
769
769
770 def _filter(self, filterpats, filename, data):
770 def _filter(self, filterpats, filename, data):
771 for mf, fn, cmd in filterpats:
771 for mf, fn, cmd in filterpats:
772 if mf(filename):
772 if mf(filename):
773 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
773 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
774 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
774 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
775 break
775 break
776
776
777 return data
777 return data
778
778
779 @unfilteredpropertycache
779 @unfilteredpropertycache
780 def _encodefilterpats(self):
780 def _encodefilterpats(self):
781 return self._loadfilter('encode')
781 return self._loadfilter('encode')
782
782
783 @unfilteredpropertycache
783 @unfilteredpropertycache
784 def _decodefilterpats(self):
784 def _decodefilterpats(self):
785 return self._loadfilter('decode')
785 return self._loadfilter('decode')
786
786
787 def adddatafilter(self, name, filter):
787 def adddatafilter(self, name, filter):
788 self._datafilters[name] = filter
788 self._datafilters[name] = filter
789
789
790 def wread(self, filename):
790 def wread(self, filename):
791 if self._link(filename):
791 if self._link(filename):
792 data = self.wvfs.readlink(filename)
792 data = self.wvfs.readlink(filename)
793 else:
793 else:
794 data = self.wopener.read(filename)
794 data = self.wopener.read(filename)
795 return self._filter(self._encodefilterpats, filename, data)
795 return self._filter(self._encodefilterpats, filename, data)
796
796
797 def wwrite(self, filename, data, flags):
797 def wwrite(self, filename, data, flags):
798 data = self._filter(self._decodefilterpats, filename, data)
798 data = self._filter(self._decodefilterpats, filename, data)
799 if 'l' in flags:
799 if 'l' in flags:
800 self.wopener.symlink(data, filename)
800 self.wopener.symlink(data, filename)
801 else:
801 else:
802 self.wopener.write(filename, data)
802 self.wopener.write(filename, data)
803 if 'x' in flags:
803 if 'x' in flags:
804 self.wvfs.setflags(filename, False, True)
804 self.wvfs.setflags(filename, False, True)
805
805
806 def wwritedata(self, filename, data):
806 def wwritedata(self, filename, data):
807 return self._filter(self._decodefilterpats, filename, data)
807 return self._filter(self._decodefilterpats, filename, data)
808
808
809 def transaction(self, desc, report=None):
809 def transaction(self, desc, report=None):
810 tr = self._transref and self._transref() or None
810 tr = self._transref and self._transref() or None
811 if tr and tr.running():
811 if tr and tr.running():
812 return tr.nest()
812 return tr.nest()
813
813
814 # abort here if the journal already exists
814 # abort here if the journal already exists
815 if self.svfs.exists("journal"):
815 if self.svfs.exists("journal"):
816 raise error.RepoError(
816 raise error.RepoError(
817 _("abandoned transaction found - run hg recover"))
817 _("abandoned transaction found - run hg recover"))
818
818
819 self._writejournal(desc)
819 self._writejournal(desc)
820 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
820 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
821 rp = report and report or self.ui.warn
821 rp = report and report or self.ui.warn
822 tr = transaction.transaction(rp, self.sopener,
822 tr = transaction.transaction(rp, self.sopener,
823 "journal",
823 "journal",
824 aftertrans(renames),
824 aftertrans(renames),
825 self.store.createmode)
825 self.store.createmode)
826 self._transref = weakref.ref(tr)
826 self._transref = weakref.ref(tr)
827 return tr
827 return tr
828
828
829 def _journalfiles(self):
829 def _journalfiles(self):
830 return ((self.svfs, 'journal'),
830 return ((self.svfs, 'journal'),
831 (self.vfs, 'journal.dirstate'),
831 (self.vfs, 'journal.dirstate'),
832 (self.vfs, 'journal.branch'),
832 (self.vfs, 'journal.branch'),
833 (self.vfs, 'journal.desc'),
833 (self.vfs, 'journal.desc'),
834 (self.vfs, 'journal.bookmarks'),
834 (self.vfs, 'journal.bookmarks'),
835 (self.svfs, 'journal.phaseroots'))
835 (self.svfs, 'journal.phaseroots'))
836
836
837 def undofiles(self):
837 def undofiles(self):
838 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
838 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
839
839
840 def _writejournal(self, desc):
840 def _writejournal(self, desc):
841 self.opener.write("journal.dirstate",
841 self.opener.write("journal.dirstate",
842 self.opener.tryread("dirstate"))
842 self.opener.tryread("dirstate"))
843 self.opener.write("journal.branch",
843 self.opener.write("journal.branch",
844 encoding.fromlocal(self.dirstate.branch()))
844 encoding.fromlocal(self.dirstate.branch()))
845 self.opener.write("journal.desc",
845 self.opener.write("journal.desc",
846 "%d\n%s\n" % (len(self), desc))
846 "%d\n%s\n" % (len(self), desc))
847 self.opener.write("journal.bookmarks",
847 self.opener.write("journal.bookmarks",
848 self.opener.tryread("bookmarks"))
848 self.opener.tryread("bookmarks"))
849 self.sopener.write("journal.phaseroots",
849 self.sopener.write("journal.phaseroots",
850 self.sopener.tryread("phaseroots"))
850 self.sopener.tryread("phaseroots"))
851
851
852 def recover(self):
852 def recover(self):
853 lock = self.lock()
853 lock = self.lock()
854 try:
854 try:
855 if self.svfs.exists("journal"):
855 if self.svfs.exists("journal"):
856 self.ui.status(_("rolling back interrupted transaction\n"))
856 self.ui.status(_("rolling back interrupted transaction\n"))
857 transaction.rollback(self.sopener, "journal",
857 transaction.rollback(self.sopener, "journal",
858 self.ui.warn)
858 self.ui.warn)
859 self.invalidate()
859 self.invalidate()
860 return True
860 return True
861 else:
861 else:
862 self.ui.warn(_("no interrupted transaction available\n"))
862 self.ui.warn(_("no interrupted transaction available\n"))
863 return False
863 return False
864 finally:
864 finally:
865 lock.release()
865 lock.release()
866
866
867 def rollback(self, dryrun=False, force=False):
867 def rollback(self, dryrun=False, force=False):
868 wlock = lock = None
868 wlock = lock = None
869 try:
869 try:
870 wlock = self.wlock()
870 wlock = self.wlock()
871 lock = self.lock()
871 lock = self.lock()
872 if self.svfs.exists("undo"):
872 if self.svfs.exists("undo"):
873 return self._rollback(dryrun, force)
873 return self._rollback(dryrun, force)
874 else:
874 else:
875 self.ui.warn(_("no rollback information available\n"))
875 self.ui.warn(_("no rollback information available\n"))
876 return 1
876 return 1
877 finally:
877 finally:
878 release(lock, wlock)
878 release(lock, wlock)
879
879
880 @unfilteredmethod # Until we get smarter cache management
880 @unfilteredmethod # Until we get smarter cache management
881 def _rollback(self, dryrun, force):
881 def _rollback(self, dryrun, force):
882 ui = self.ui
882 ui = self.ui
883 try:
883 try:
884 args = self.opener.read('undo.desc').splitlines()
884 args = self.opener.read('undo.desc').splitlines()
885 (oldlen, desc, detail) = (int(args[0]), args[1], None)
885 (oldlen, desc, detail) = (int(args[0]), args[1], None)
886 if len(args) >= 3:
886 if len(args) >= 3:
887 detail = args[2]
887 detail = args[2]
888 oldtip = oldlen - 1
888 oldtip = oldlen - 1
889
889
890 if detail and ui.verbose:
890 if detail and ui.verbose:
891 msg = (_('repository tip rolled back to revision %s'
891 msg = (_('repository tip rolled back to revision %s'
892 ' (undo %s: %s)\n')
892 ' (undo %s: %s)\n')
893 % (oldtip, desc, detail))
893 % (oldtip, desc, detail))
894 else:
894 else:
895 msg = (_('repository tip rolled back to revision %s'
895 msg = (_('repository tip rolled back to revision %s'
896 ' (undo %s)\n')
896 ' (undo %s)\n')
897 % (oldtip, desc))
897 % (oldtip, desc))
898 except IOError:
898 except IOError:
899 msg = _('rolling back unknown transaction\n')
899 msg = _('rolling back unknown transaction\n')
900 desc = None
900 desc = None
901
901
902 if not force and self['.'] != self['tip'] and desc == 'commit':
902 if not force and self['.'] != self['tip'] and desc == 'commit':
903 raise util.Abort(
903 raise util.Abort(
904 _('rollback of last commit while not checked out '
904 _('rollback of last commit while not checked out '
905 'may lose data'), hint=_('use -f to force'))
905 'may lose data'), hint=_('use -f to force'))
906
906
907 ui.status(msg)
907 ui.status(msg)
908 if dryrun:
908 if dryrun:
909 return 0
909 return 0
910
910
911 parents = self.dirstate.parents()
911 parents = self.dirstate.parents()
912 self.destroying()
912 self.destroying()
913 transaction.rollback(self.sopener, 'undo', ui.warn)
913 transaction.rollback(self.sopener, 'undo', ui.warn)
914 if self.vfs.exists('undo.bookmarks'):
914 if self.vfs.exists('undo.bookmarks'):
915 self.vfs.rename('undo.bookmarks', 'bookmarks')
915 self.vfs.rename('undo.bookmarks', 'bookmarks')
916 if self.svfs.exists('undo.phaseroots'):
916 if self.svfs.exists('undo.phaseroots'):
917 self.svfs.rename('undo.phaseroots', 'phaseroots')
917 self.svfs.rename('undo.phaseroots', 'phaseroots')
918 self.invalidate()
918 self.invalidate()
919
919
920 parentgone = (parents[0] not in self.changelog.nodemap or
920 parentgone = (parents[0] not in self.changelog.nodemap or
921 parents[1] not in self.changelog.nodemap)
921 parents[1] not in self.changelog.nodemap)
922 if parentgone:
922 if parentgone:
923 self.vfs.rename('undo.dirstate', 'dirstate')
923 self.vfs.rename('undo.dirstate', 'dirstate')
924 try:
924 try:
925 branch = self.opener.read('undo.branch')
925 branch = self.opener.read('undo.branch')
926 self.dirstate.setbranch(encoding.tolocal(branch))
926 self.dirstate.setbranch(encoding.tolocal(branch))
927 except IOError:
927 except IOError:
928 ui.warn(_('named branch could not be reset: '
928 ui.warn(_('named branch could not be reset: '
929 'current branch is still \'%s\'\n')
929 'current branch is still \'%s\'\n')
930 % self.dirstate.branch())
930 % self.dirstate.branch())
931
931
932 self.dirstate.invalidate()
932 self.dirstate.invalidate()
933 parents = tuple([p.rev() for p in self.parents()])
933 parents = tuple([p.rev() for p in self.parents()])
934 if len(parents) > 1:
934 if len(parents) > 1:
935 ui.status(_('working directory now based on '
935 ui.status(_('working directory now based on '
936 'revisions %d and %d\n') % parents)
936 'revisions %d and %d\n') % parents)
937 else:
937 else:
938 ui.status(_('working directory now based on '
938 ui.status(_('working directory now based on '
939 'revision %d\n') % parents)
939 'revision %d\n') % parents)
940 # TODO: if we know which new heads may result from this rollback, pass
940 # TODO: if we know which new heads may result from this rollback, pass
941 # them to destroy(), which will prevent the branchhead cache from being
941 # them to destroy(), which will prevent the branchhead cache from being
942 # invalidated.
942 # invalidated.
943 self.destroyed()
943 self.destroyed()
944 return 0
944 return 0
945
945
946 def invalidatecaches(self):
946 def invalidatecaches(self):
947
947
948 if '_tagscache' in vars(self):
948 if '_tagscache' in vars(self):
949 # can't use delattr on proxy
949 # can't use delattr on proxy
950 del self.__dict__['_tagscache']
950 del self.__dict__['_tagscache']
951
951
952 self.unfiltered()._branchcaches.clear()
952 self.unfiltered()._branchcaches.clear()
953 self.invalidatevolatilesets()
953 self.invalidatevolatilesets()
954
954
955 def invalidatevolatilesets(self):
955 def invalidatevolatilesets(self):
956 self.filteredrevcache.clear()
956 self.filteredrevcache.clear()
957 obsolete.clearobscaches(self)
957 obsolete.clearobscaches(self)
958
958
959 def invalidatedirstate(self):
959 def invalidatedirstate(self):
960 '''Invalidates the dirstate, causing the next call to dirstate
960 '''Invalidates the dirstate, causing the next call to dirstate
961 to check if it was modified since the last time it was read,
961 to check if it was modified since the last time it was read,
962 rereading it if it has.
962 rereading it if it has.
963
963
964 This is different to dirstate.invalidate() that it doesn't always
964 This is different to dirstate.invalidate() that it doesn't always
965 rereads the dirstate. Use dirstate.invalidate() if you want to
965 rereads the dirstate. Use dirstate.invalidate() if you want to
966 explicitly read the dirstate again (i.e. restoring it to a previous
966 explicitly read the dirstate again (i.e. restoring it to a previous
967 known good state).'''
967 known good state).'''
968 if hasunfilteredcache(self, 'dirstate'):
968 if hasunfilteredcache(self, 'dirstate'):
969 for k in self.dirstate._filecache:
969 for k in self.dirstate._filecache:
970 try:
970 try:
971 delattr(self.dirstate, k)
971 delattr(self.dirstate, k)
972 except AttributeError:
972 except AttributeError:
973 pass
973 pass
974 delattr(self.unfiltered(), 'dirstate')
974 delattr(self.unfiltered(), 'dirstate')
975
975
976 def invalidate(self):
976 def invalidate(self):
977 unfiltered = self.unfiltered() # all file caches are stored unfiltered
977 unfiltered = self.unfiltered() # all file caches are stored unfiltered
978 for k in self._filecache:
978 for k in self._filecache:
979 # dirstate is invalidated separately in invalidatedirstate()
979 # dirstate is invalidated separately in invalidatedirstate()
980 if k == 'dirstate':
980 if k == 'dirstate':
981 continue
981 continue
982
982
983 try:
983 try:
984 delattr(unfiltered, k)
984 delattr(unfiltered, k)
985 except AttributeError:
985 except AttributeError:
986 pass
986 pass
987 self.invalidatecaches()
987 self.invalidatecaches()
988
988
989 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
989 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
990 try:
990 try:
991 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
991 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
992 except error.LockHeld, inst:
992 except error.LockHeld, inst:
993 if not wait:
993 if not wait:
994 raise
994 raise
995 self.ui.warn(_("waiting for lock on %s held by %r\n") %
995 self.ui.warn(_("waiting for lock on %s held by %r\n") %
996 (desc, inst.locker))
996 (desc, inst.locker))
997 # default to 600 seconds timeout
997 # default to 600 seconds timeout
998 l = lockmod.lock(vfs, lockname,
998 l = lockmod.lock(vfs, lockname,
999 int(self.ui.config("ui", "timeout", "600")),
999 int(self.ui.config("ui", "timeout", "600")),
1000 releasefn, desc=desc)
1000 releasefn, desc=desc)
1001 if acquirefn:
1001 if acquirefn:
1002 acquirefn()
1002 acquirefn()
1003 return l
1003 return l
1004
1004
1005 def _afterlock(self, callback):
1005 def _afterlock(self, callback):
1006 """add a callback to the current repository lock.
1006 """add a callback to the current repository lock.
1007
1007
1008 The callback will be executed on lock release."""
1008 The callback will be executed on lock release."""
1009 l = self._lockref and self._lockref()
1009 l = self._lockref and self._lockref()
1010 if l:
1010 if l:
1011 l.postrelease.append(callback)
1011 l.postrelease.append(callback)
1012 else:
1012 else:
1013 callback()
1013 callback()
1014
1014
1015 def lock(self, wait=True):
1015 def lock(self, wait=True):
1016 '''Lock the repository store (.hg/store) and return a weak reference
1016 '''Lock the repository store (.hg/store) and return a weak reference
1017 to the lock. Use this before modifying the store (e.g. committing or
1017 to the lock. Use this before modifying the store (e.g. committing or
1018 stripping). If you are opening a transaction, get a lock as well.)'''
1018 stripping). If you are opening a transaction, get a lock as well.)'''
1019 l = self._lockref and self._lockref()
1019 l = self._lockref and self._lockref()
1020 if l is not None and l.held:
1020 if l is not None and l.held:
1021 l.lock()
1021 l.lock()
1022 return l
1022 return l
1023
1023
1024 def unlock():
1024 def unlock():
1025 self.store.write()
1025 self.store.write()
1026 if hasunfilteredcache(self, '_phasecache'):
1026 if hasunfilteredcache(self, '_phasecache'):
1027 self._phasecache.write()
1027 self._phasecache.write()
1028 for k, ce in self._filecache.items():
1028 for k, ce in self._filecache.items():
1029 if k == 'dirstate' or k not in self.__dict__:
1029 if k == 'dirstate' or k not in self.__dict__:
1030 continue
1030 continue
1031 ce.refresh()
1031 ce.refresh()
1032
1032
1033 l = self._lock(self.svfs, "lock", wait, unlock,
1033 l = self._lock(self.svfs, "lock", wait, unlock,
1034 self.invalidate, _('repository %s') % self.origroot)
1034 self.invalidate, _('repository %s') % self.origroot)
1035 self._lockref = weakref.ref(l)
1035 self._lockref = weakref.ref(l)
1036 return l
1036 return l
1037
1037
1038 def wlock(self, wait=True):
1038 def wlock(self, wait=True):
1039 '''Lock the non-store parts of the repository (everything under
1039 '''Lock the non-store parts of the repository (everything under
1040 .hg except .hg/store) and return a weak reference to the lock.
1040 .hg except .hg/store) and return a weak reference to the lock.
1041 Use this before modifying files in .hg.'''
1041 Use this before modifying files in .hg.'''
1042 l = self._wlockref and self._wlockref()
1042 l = self._wlockref and self._wlockref()
1043 if l is not None and l.held:
1043 if l is not None and l.held:
1044 l.lock()
1044 l.lock()
1045 return l
1045 return l
1046
1046
1047 def unlock():
1047 def unlock():
1048 self.dirstate.write()
1048 self.dirstate.write()
1049 self._filecache['dirstate'].refresh()
1049 self._filecache['dirstate'].refresh()
1050
1050
1051 l = self._lock(self.vfs, "wlock", wait, unlock,
1051 l = self._lock(self.vfs, "wlock", wait, unlock,
1052 self.invalidatedirstate, _('working directory of %s') %
1052 self.invalidatedirstate, _('working directory of %s') %
1053 self.origroot)
1053 self.origroot)
1054 self._wlockref = weakref.ref(l)
1054 self._wlockref = weakref.ref(l)
1055 return l
1055 return l
1056
1056
1057 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1057 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1058 """
1058 """
1059 commit an individual file as part of a larger transaction
1059 commit an individual file as part of a larger transaction
1060 """
1060 """
1061
1061
1062 fname = fctx.path()
1062 fname = fctx.path()
1063 text = fctx.data()
1063 text = fctx.data()
1064 flog = self.file(fname)
1064 flog = self.file(fname)
1065 fparent1 = manifest1.get(fname, nullid)
1065 fparent1 = manifest1.get(fname, nullid)
1066 fparent2 = fparent2o = manifest2.get(fname, nullid)
1066 fparent2 = fparent2o = manifest2.get(fname, nullid)
1067
1067
1068 meta = {}
1068 meta = {}
1069 copy = fctx.renamed()
1069 copy = fctx.renamed()
1070 if copy and copy[0] != fname:
1070 if copy and copy[0] != fname:
1071 # Mark the new revision of this file as a copy of another
1071 # Mark the new revision of this file as a copy of another
1072 # file. This copy data will effectively act as a parent
1072 # file. This copy data will effectively act as a parent
1073 # of this new revision. If this is a merge, the first
1073 # of this new revision. If this is a merge, the first
1074 # parent will be the nullid (meaning "look up the copy data")
1074 # parent will be the nullid (meaning "look up the copy data")
1075 # and the second one will be the other parent. For example:
1075 # and the second one will be the other parent. For example:
1076 #
1076 #
1077 # 0 --- 1 --- 3 rev1 changes file foo
1077 # 0 --- 1 --- 3 rev1 changes file foo
1078 # \ / rev2 renames foo to bar and changes it
1078 # \ / rev2 renames foo to bar and changes it
1079 # \- 2 -/ rev3 should have bar with all changes and
1079 # \- 2 -/ rev3 should have bar with all changes and
1080 # should record that bar descends from
1080 # should record that bar descends from
1081 # bar in rev2 and foo in rev1
1081 # bar in rev2 and foo in rev1
1082 #
1082 #
1083 # this allows this merge to succeed:
1083 # this allows this merge to succeed:
1084 #
1084 #
1085 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1085 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1086 # \ / merging rev3 and rev4 should use bar@rev2
1086 # \ / merging rev3 and rev4 should use bar@rev2
1087 # \- 2 --- 4 as the merge base
1087 # \- 2 --- 4 as the merge base
1088 #
1088 #
1089
1089
1090 cfname = copy[0]
1090 cfname = copy[0]
1091 crev = manifest1.get(cfname)
1091 crev = manifest1.get(cfname)
1092 newfparent = fparent2
1092 newfparent = fparent2
1093
1093
1094 if manifest2: # branch merge
1094 if manifest2: # branch merge
1095 if fparent2 == nullid or crev is None: # copied on remote side
1095 if fparent2 == nullid or crev is None: # copied on remote side
1096 if cfname in manifest2:
1096 if cfname in manifest2:
1097 crev = manifest2[cfname]
1097 crev = manifest2[cfname]
1098 newfparent = fparent1
1098 newfparent = fparent1
1099
1099
1100 # find source in nearest ancestor if we've lost track
1100 # find source in nearest ancestor if we've lost track
1101 if not crev:
1101 if not crev:
1102 self.ui.debug(" %s: searching for copy revision for %s\n" %
1102 self.ui.debug(" %s: searching for copy revision for %s\n" %
1103 (fname, cfname))
1103 (fname, cfname))
1104 for ancestor in self[None].ancestors():
1104 for ancestor in self[None].ancestors():
1105 if cfname in ancestor:
1105 if cfname in ancestor:
1106 crev = ancestor[cfname].filenode()
1106 crev = ancestor[cfname].filenode()
1107 break
1107 break
1108
1108
1109 if crev:
1109 if crev:
1110 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1110 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1111 meta["copy"] = cfname
1111 meta["copy"] = cfname
1112 meta["copyrev"] = hex(crev)
1112 meta["copyrev"] = hex(crev)
1113 fparent1, fparent2 = nullid, newfparent
1113 fparent1, fparent2 = nullid, newfparent
1114 else:
1114 else:
1115 self.ui.warn(_("warning: can't find ancestor for '%s' "
1115 self.ui.warn(_("warning: can't find ancestor for '%s' "
1116 "copied from '%s'!\n") % (fname, cfname))
1116 "copied from '%s'!\n") % (fname, cfname))
1117
1117
1118 elif fparent2 != nullid:
1118 elif fparent2 != nullid:
1119 # is one parent an ancestor of the other?
1119 # is one parent an ancestor of the other?
1120 fparentancestor = flog.ancestor(fparent1, fparent2)
1120 fparentancestor = flog.ancestor(fparent1, fparent2)
1121 if fparentancestor == fparent1:
1121 if fparentancestor == fparent1:
1122 fparent1, fparent2 = fparent2, nullid
1122 fparent1, fparent2 = fparent2, nullid
1123 elif fparentancestor == fparent2:
1123 elif fparentancestor == fparent2:
1124 fparent2 = nullid
1124 fparent2 = nullid
1125
1125
1126 # is the file changed?
1126 # is the file changed?
1127 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1127 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1128 changelist.append(fname)
1128 changelist.append(fname)
1129 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1129 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1130
1130
1131 # are just the flags changed during merge?
1131 # are just the flags changed during merge?
1132 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1132 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1133 changelist.append(fname)
1133 changelist.append(fname)
1134
1134
1135 return fparent1
1135 return fparent1
1136
1136
1137 @unfilteredmethod
1137 @unfilteredmethod
1138 def commit(self, text="", user=None, date=None, match=None, force=False,
1138 def commit(self, text="", user=None, date=None, match=None, force=False,
1139 editor=False, extra={}):
1139 editor=False, extra={}):
1140 """Add a new revision to current repository.
1140 """Add a new revision to current repository.
1141
1141
1142 Revision information is gathered from the working directory,
1142 Revision information is gathered from the working directory,
1143 match can be used to filter the committed files. If editor is
1143 match can be used to filter the committed files. If editor is
1144 supplied, it is called to get a commit message.
1144 supplied, it is called to get a commit message.
1145 """
1145 """
1146
1146
1147 def fail(f, msg):
1147 def fail(f, msg):
1148 raise util.Abort('%s: %s' % (f, msg))
1148 raise util.Abort('%s: %s' % (f, msg))
1149
1149
1150 if not match:
1150 if not match:
1151 match = matchmod.always(self.root, '')
1151 match = matchmod.always(self.root, '')
1152
1152
1153 if not force:
1153 if not force:
1154 vdirs = []
1154 vdirs = []
1155 match.explicitdir = vdirs.append
1155 match.explicitdir = vdirs.append
1156 match.bad = fail
1156 match.bad = fail
1157
1157
1158 wlock = self.wlock()
1158 wlock = self.wlock()
1159 try:
1159 try:
1160 wctx = self[None]
1160 wctx = self[None]
1161 merge = len(wctx.parents()) > 1
1161 merge = len(wctx.parents()) > 1
1162
1162
1163 if (not force and merge and match and
1163 if (not force and merge and match and
1164 (match.files() or match.anypats())):
1164 (match.files() or match.anypats())):
1165 raise util.Abort(_('cannot partially commit a merge '
1165 raise util.Abort(_('cannot partially commit a merge '
1166 '(do not specify files or patterns)'))
1166 '(do not specify files or patterns)'))
1167
1167
1168 changes = self.status(match=match, clean=force)
1168 changes = self.status(match=match, clean=force)
1169 if force:
1169 if force:
1170 changes[0].extend(changes[6]) # mq may commit unchanged files
1170 changes[0].extend(changes[6]) # mq may commit unchanged files
1171
1171
1172 # check subrepos
1172 # check subrepos
1173 subs = []
1173 subs = []
1174 commitsubs = set()
1174 commitsubs = set()
1175 newstate = wctx.substate.copy()
1175 newstate = wctx.substate.copy()
1176 # only manage subrepos and .hgsubstate if .hgsub is present
1176 # only manage subrepos and .hgsubstate if .hgsub is present
1177 if '.hgsub' in wctx:
1177 if '.hgsub' in wctx:
1178 # we'll decide whether to track this ourselves, thanks
1178 # we'll decide whether to track this ourselves, thanks
1179 if '.hgsubstate' in changes[0]:
1179 if '.hgsubstate' in changes[0]:
1180 changes[0].remove('.hgsubstate')
1180 changes[0].remove('.hgsubstate')
1181 if '.hgsubstate' in changes[2]:
1181 if '.hgsubstate' in changes[2]:
1182 changes[2].remove('.hgsubstate')
1182 changes[2].remove('.hgsubstate')
1183
1183
1184 # compare current state to last committed state
1184 # compare current state to last committed state
1185 # build new substate based on last committed state
1185 # build new substate based on last committed state
1186 oldstate = wctx.p1().substate
1186 oldstate = wctx.p1().substate
1187 for s in sorted(newstate.keys()):
1187 for s in sorted(newstate.keys()):
1188 if not match(s):
1188 if not match(s):
1189 # ignore working copy, use old state if present
1189 # ignore working copy, use old state if present
1190 if s in oldstate:
1190 if s in oldstate:
1191 newstate[s] = oldstate[s]
1191 newstate[s] = oldstate[s]
1192 continue
1192 continue
1193 if not force:
1193 if not force:
1194 raise util.Abort(
1194 raise util.Abort(
1195 _("commit with new subrepo %s excluded") % s)
1195 _("commit with new subrepo %s excluded") % s)
1196 if wctx.sub(s).dirty(True):
1196 if wctx.sub(s).dirty(True):
1197 if not self.ui.configbool('ui', 'commitsubrepos'):
1197 if not self.ui.configbool('ui', 'commitsubrepos'):
1198 raise util.Abort(
1198 raise util.Abort(
1199 _("uncommitted changes in subrepo %s") % s,
1199 _("uncommitted changes in subrepo %s") % s,
1200 hint=_("use --subrepos for recursive commit"))
1200 hint=_("use --subrepos for recursive commit"))
1201 subs.append(s)
1201 subs.append(s)
1202 commitsubs.add(s)
1202 commitsubs.add(s)
1203 else:
1203 else:
1204 bs = wctx.sub(s).basestate()
1204 bs = wctx.sub(s).basestate()
1205 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1205 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1206 if oldstate.get(s, (None, None, None))[1] != bs:
1206 if oldstate.get(s, (None, None, None))[1] != bs:
1207 subs.append(s)
1207 subs.append(s)
1208
1208
1209 # check for removed subrepos
1209 # check for removed subrepos
1210 for p in wctx.parents():
1210 for p in wctx.parents():
1211 r = [s for s in p.substate if s not in newstate]
1211 r = [s for s in p.substate if s not in newstate]
1212 subs += [s for s in r if match(s)]
1212 subs += [s for s in r if match(s)]
1213 if subs:
1213 if subs:
1214 if (not match('.hgsub') and
1214 if (not match('.hgsub') and
1215 '.hgsub' in (wctx.modified() + wctx.added())):
1215 '.hgsub' in (wctx.modified() + wctx.added())):
1216 raise util.Abort(
1216 raise util.Abort(
1217 _("can't commit subrepos without .hgsub"))
1217 _("can't commit subrepos without .hgsub"))
1218 changes[0].insert(0, '.hgsubstate')
1218 changes[0].insert(0, '.hgsubstate')
1219
1219
1220 elif '.hgsub' in changes[2]:
1220 elif '.hgsub' in changes[2]:
1221 # clean up .hgsubstate when .hgsub is removed
1221 # clean up .hgsubstate when .hgsub is removed
1222 if ('.hgsubstate' in wctx and
1222 if ('.hgsubstate' in wctx and
1223 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1223 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1224 changes[2].insert(0, '.hgsubstate')
1224 changes[2].insert(0, '.hgsubstate')
1225
1225
1226 # make sure all explicit patterns are matched
1226 # make sure all explicit patterns are matched
1227 if not force and match.files():
1227 if not force and match.files():
1228 matched = set(changes[0] + changes[1] + changes[2])
1228 matched = set(changes[0] + changes[1] + changes[2])
1229
1229
1230 for f in match.files():
1230 for f in match.files():
1231 f = self.dirstate.normalize(f)
1231 f = self.dirstate.normalize(f)
1232 if f == '.' or f in matched or f in wctx.substate:
1232 if f == '.' or f in matched or f in wctx.substate:
1233 continue
1233 continue
1234 if f in changes[3]: # missing
1234 if f in changes[3]: # missing
1235 fail(f, _('file not found!'))
1235 fail(f, _('file not found!'))
1236 if f in vdirs: # visited directory
1236 if f in vdirs: # visited directory
1237 d = f + '/'
1237 d = f + '/'
1238 for mf in matched:
1238 for mf in matched:
1239 if mf.startswith(d):
1239 if mf.startswith(d):
1240 break
1240 break
1241 else:
1241 else:
1242 fail(f, _("no match under directory!"))
1242 fail(f, _("no match under directory!"))
1243 elif f not in self.dirstate:
1243 elif f not in self.dirstate:
1244 fail(f, _("file not tracked!"))
1244 fail(f, _("file not tracked!"))
1245
1245
1246 cctx = context.workingctx(self, text, user, date, extra, changes)
1246 cctx = context.workingctx(self, text, user, date, extra, changes)
1247
1247
1248 if (not force and not extra.get("close") and not merge
1248 if (not force and not extra.get("close") and not merge
1249 and not cctx.files()
1249 and not cctx.files()
1250 and wctx.branch() == wctx.p1().branch()):
1250 and wctx.branch() == wctx.p1().branch()):
1251 return None
1251 return None
1252
1252
1253 if merge and cctx.deleted():
1253 if merge and cctx.deleted():
1254 raise util.Abort(_("cannot commit merge with missing files"))
1254 raise util.Abort(_("cannot commit merge with missing files"))
1255
1255
1256 ms = mergemod.mergestate(self)
1256 ms = mergemod.mergestate(self)
1257 for f in changes[0]:
1257 for f in changes[0]:
1258 if f in ms and ms[f] == 'u':
1258 if f in ms and ms[f] == 'u':
1259 raise util.Abort(_("unresolved merge conflicts "
1259 raise util.Abort(_("unresolved merge conflicts "
1260 "(see hg help resolve)"))
1260 "(see hg help resolve)"))
1261
1261
1262 if editor:
1262 if editor:
1263 cctx._text = editor(self, cctx, subs)
1263 cctx._text = editor(self, cctx, subs)
1264 edited = (text != cctx._text)
1264 edited = (text != cctx._text)
1265
1265
1266 # commit subs and write new state
1266 # commit subs and write new state
1267 if subs:
1267 if subs:
1268 for s in sorted(commitsubs):
1268 for s in sorted(commitsubs):
1269 sub = wctx.sub(s)
1269 sub = wctx.sub(s)
1270 self.ui.status(_('committing subrepository %s\n') %
1270 self.ui.status(_('committing subrepository %s\n') %
1271 subrepo.subrelpath(sub))
1271 subrepo.subrelpath(sub))
1272 sr = sub.commit(cctx._text, user, date)
1272 sr = sub.commit(cctx._text, user, date)
1273 newstate[s] = (newstate[s][0], sr)
1273 newstate[s] = (newstate[s][0], sr)
1274 subrepo.writestate(self, newstate)
1274 subrepo.writestate(self, newstate)
1275
1275
1276 # Save commit message in case this transaction gets rolled back
1276 # Save commit message in case this transaction gets rolled back
1277 # (e.g. by a pretxncommit hook). Leave the content alone on
1277 # (e.g. by a pretxncommit hook). Leave the content alone on
1278 # the assumption that the user will use the same editor again.
1278 # the assumption that the user will use the same editor again.
1279 msgfn = self.savecommitmessage(cctx._text)
1279 msgfn = self.savecommitmessage(cctx._text)
1280
1280
1281 p1, p2 = self.dirstate.parents()
1281 p1, p2 = self.dirstate.parents()
1282 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1282 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1283 try:
1283 try:
1284 self.hook("precommit", throw=True, parent1=hookp1,
1284 self.hook("precommit", throw=True, parent1=hookp1,
1285 parent2=hookp2)
1285 parent2=hookp2)
1286 ret = self.commitctx(cctx, True)
1286 ret = self.commitctx(cctx, True)
1287 except: # re-raises
1287 except: # re-raises
1288 if edited:
1288 if edited:
1289 self.ui.write(
1289 self.ui.write(
1290 _('note: commit message saved in %s\n') % msgfn)
1290 _('note: commit message saved in %s\n') % msgfn)
1291 raise
1291 raise
1292
1292
1293 # update bookmarks, dirstate and mergestate
1293 # update bookmarks, dirstate and mergestate
1294 bookmarks.update(self, [p1, p2], ret)
1294 bookmarks.update(self, [p1, p2], ret)
1295 cctx.markcommitted(ret)
1295 cctx.markcommitted(ret)
1296 ms.reset()
1296 ms.reset()
1297 finally:
1297 finally:
1298 wlock.release()
1298 wlock.release()
1299
1299
1300 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1300 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1301 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1301 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1302 self._afterlock(commithook)
1302 self._afterlock(commithook)
1303 return ret
1303 return ret
1304
1304
1305 @unfilteredmethod
1305 @unfilteredmethod
1306 def commitctx(self, ctx, error=False):
1306 def commitctx(self, ctx, error=False):
1307 """Add a new revision to current repository.
1307 """Add a new revision to current repository.
1308 Revision information is passed via the context argument.
1308 Revision information is passed via the context argument.
1309 """
1309 """
1310
1310
1311 tr = lock = None
1311 tr = lock = None
1312 removed = list(ctx.removed())
1312 removed = list(ctx.removed())
1313 p1, p2 = ctx.p1(), ctx.p2()
1313 p1, p2 = ctx.p1(), ctx.p2()
1314 user = ctx.user()
1314 user = ctx.user()
1315
1315
1316 lock = self.lock()
1316 lock = self.lock()
1317 try:
1317 try:
1318 tr = self.transaction("commit")
1318 tr = self.transaction("commit")
1319 trp = weakref.proxy(tr)
1319 trp = weakref.proxy(tr)
1320
1320
1321 if ctx.files():
1321 if ctx.files():
1322 m1 = p1.manifest().copy()
1322 m1 = p1.manifest().copy()
1323 m2 = p2.manifest()
1323 m2 = p2.manifest()
1324
1324
1325 # check in files
1325 # check in files
1326 new = {}
1326 new = {}
1327 changed = []
1327 changed = []
1328 linkrev = len(self)
1328 linkrev = len(self)
1329 for f in sorted(ctx.modified() + ctx.added()):
1329 for f in sorted(ctx.modified() + ctx.added()):
1330 self.ui.note(f + "\n")
1330 self.ui.note(f + "\n")
1331 try:
1331 try:
1332 fctx = ctx[f]
1332 fctx = ctx[f]
1333 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1333 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1334 changed)
1334 changed)
1335 m1.set(f, fctx.flags())
1335 m1.set(f, fctx.flags())
1336 except OSError, inst:
1336 except OSError, inst:
1337 self.ui.warn(_("trouble committing %s!\n") % f)
1337 self.ui.warn(_("trouble committing %s!\n") % f)
1338 raise
1338 raise
1339 except IOError, inst:
1339 except IOError, inst:
1340 errcode = getattr(inst, 'errno', errno.ENOENT)
1340 errcode = getattr(inst, 'errno', errno.ENOENT)
1341 if error or errcode and errcode != errno.ENOENT:
1341 if error or errcode and errcode != errno.ENOENT:
1342 self.ui.warn(_("trouble committing %s!\n") % f)
1342 self.ui.warn(_("trouble committing %s!\n") % f)
1343 raise
1343 raise
1344 else:
1344 else:
1345 removed.append(f)
1345 removed.append(f)
1346
1346
1347 # update manifest
1347 # update manifest
1348 m1.update(new)
1348 m1.update(new)
1349 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1349 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1350 drop = [f for f in removed if f in m1]
1350 drop = [f for f in removed if f in m1]
1351 for f in drop:
1351 for f in drop:
1352 del m1[f]
1352 del m1[f]
1353 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1353 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1354 p2.manifestnode(), (new, drop))
1354 p2.manifestnode(), (new, drop))
1355 files = changed + removed
1355 files = changed + removed
1356 else:
1356 else:
1357 mn = p1.manifestnode()
1357 mn = p1.manifestnode()
1358 files = []
1358 files = []
1359
1359
1360 # update changelog
1360 # update changelog
1361 self.changelog.delayupdate()
1361 self.changelog.delayupdate()
1362 n = self.changelog.add(mn, files, ctx.description(),
1362 n = self.changelog.add(mn, files, ctx.description(),
1363 trp, p1.node(), p2.node(),
1363 trp, p1.node(), p2.node(),
1364 user, ctx.date(), ctx.extra().copy())
1364 user, ctx.date(), ctx.extra().copy())
1365 p = lambda: self.changelog.writepending() and self.root or ""
1365 p = lambda: self.changelog.writepending() and self.root or ""
1366 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1366 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1367 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1367 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1368 parent2=xp2, pending=p)
1368 parent2=xp2, pending=p)
1369 self.changelog.finalize(trp)
1369 self.changelog.finalize(trp)
1370 # set the new commit is proper phase
1370 # set the new commit is proper phase
1371 targetphase = subrepo.newcommitphase(self.ui, ctx)
1371 targetphase = subrepo.newcommitphase(self.ui, ctx)
1372 if targetphase:
1372 if targetphase:
1373 # retract boundary do not alter parent changeset.
1373 # retract boundary do not alter parent changeset.
1374 # if a parent have higher the resulting phase will
1374 # if a parent have higher the resulting phase will
1375 # be compliant anyway
1375 # be compliant anyway
1376 #
1376 #
1377 # if minimal phase was 0 we don't need to retract anything
1377 # if minimal phase was 0 we don't need to retract anything
1378 phases.retractboundary(self, targetphase, [n])
1378 phases.retractboundary(self, targetphase, [n])
1379 tr.close()
1379 tr.close()
1380 branchmap.updatecache(self.filtered('served'))
1380 branchmap.updatecache(self.filtered('served'))
1381 return n
1381 return n
1382 finally:
1382 finally:
1383 if tr:
1383 if tr:
1384 tr.release()
1384 tr.release()
1385 lock.release()
1385 lock.release()
1386
1386
1387 @unfilteredmethod
1387 @unfilteredmethod
1388 def destroying(self):
1388 def destroying(self):
1389 '''Inform the repository that nodes are about to be destroyed.
1389 '''Inform the repository that nodes are about to be destroyed.
1390 Intended for use by strip and rollback, so there's a common
1390 Intended for use by strip and rollback, so there's a common
1391 place for anything that has to be done before destroying history.
1391 place for anything that has to be done before destroying history.
1392
1392
1393 This is mostly useful for saving state that is in memory and waiting
1393 This is mostly useful for saving state that is in memory and waiting
1394 to be flushed when the current lock is released. Because a call to
1394 to be flushed when the current lock is released. Because a call to
1395 destroyed is imminent, the repo will be invalidated causing those
1395 destroyed is imminent, the repo will be invalidated causing those
1396 changes to stay in memory (waiting for the next unlock), or vanish
1396 changes to stay in memory (waiting for the next unlock), or vanish
1397 completely.
1397 completely.
1398 '''
1398 '''
1399 # When using the same lock to commit and strip, the phasecache is left
1399 # When using the same lock to commit and strip, the phasecache is left
1400 # dirty after committing. Then when we strip, the repo is invalidated,
1400 # dirty after committing. Then when we strip, the repo is invalidated,
1401 # causing those changes to disappear.
1401 # causing those changes to disappear.
1402 if '_phasecache' in vars(self):
1402 if '_phasecache' in vars(self):
1403 self._phasecache.write()
1403 self._phasecache.write()
1404
1404
1405 @unfilteredmethod
1405 @unfilteredmethod
1406 def destroyed(self):
1406 def destroyed(self):
1407 '''Inform the repository that nodes have been destroyed.
1407 '''Inform the repository that nodes have been destroyed.
1408 Intended for use by strip and rollback, so there's a common
1408 Intended for use by strip and rollback, so there's a common
1409 place for anything that has to be done after destroying history.
1409 place for anything that has to be done after destroying history.
1410 '''
1410 '''
1411 # When one tries to:
1411 # When one tries to:
1412 # 1) destroy nodes thus calling this method (e.g. strip)
1412 # 1) destroy nodes thus calling this method (e.g. strip)
1413 # 2) use phasecache somewhere (e.g. commit)
1413 # 2) use phasecache somewhere (e.g. commit)
1414 #
1414 #
1415 # then 2) will fail because the phasecache contains nodes that were
1415 # then 2) will fail because the phasecache contains nodes that were
1416 # removed. We can either remove phasecache from the filecache,
1416 # removed. We can either remove phasecache from the filecache,
1417 # causing it to reload next time it is accessed, or simply filter
1417 # causing it to reload next time it is accessed, or simply filter
1418 # the removed nodes now and write the updated cache.
1418 # the removed nodes now and write the updated cache.
1419 self._phasecache.filterunknown(self)
1419 self._phasecache.filterunknown(self)
1420 self._phasecache.write()
1420 self._phasecache.write()
1421
1421
1422 # update the 'served' branch cache to help read only server process
1422 # update the 'served' branch cache to help read only server process
1423 # Thanks to branchcache collaboration this is done from the nearest
1423 # Thanks to branchcache collaboration this is done from the nearest
1424 # filtered subset and it is expected to be fast.
1424 # filtered subset and it is expected to be fast.
1425 branchmap.updatecache(self.filtered('served'))
1425 branchmap.updatecache(self.filtered('served'))
1426
1426
1427 # Ensure the persistent tag cache is updated. Doing it now
1427 # Ensure the persistent tag cache is updated. Doing it now
1428 # means that the tag cache only has to worry about destroyed
1428 # means that the tag cache only has to worry about destroyed
1429 # heads immediately after a strip/rollback. That in turn
1429 # heads immediately after a strip/rollback. That in turn
1430 # guarantees that "cachetip == currenttip" (comparing both rev
1430 # guarantees that "cachetip == currenttip" (comparing both rev
1431 # and node) always means no nodes have been added or destroyed.
1431 # and node) always means no nodes have been added or destroyed.
1432
1432
1433 # XXX this is suboptimal when qrefresh'ing: we strip the current
1433 # XXX this is suboptimal when qrefresh'ing: we strip the current
1434 # head, refresh the tag cache, then immediately add a new head.
1434 # head, refresh the tag cache, then immediately add a new head.
1435 # But I think doing it this way is necessary for the "instant
1435 # But I think doing it this way is necessary for the "instant
1436 # tag cache retrieval" case to work.
1436 # tag cache retrieval" case to work.
1437 self.invalidate()
1437 self.invalidate()
1438
1438
1439 def walk(self, match, node=None):
1439 def walk(self, match, node=None):
1440 '''
1440 '''
1441 walk recursively through the directory tree or a given
1441 walk recursively through the directory tree or a given
1442 changeset, finding all files matched by the match
1442 changeset, finding all files matched by the match
1443 function
1443 function
1444 '''
1444 '''
1445 return self[node].walk(match)
1445 return self[node].walk(match)
1446
1446
1447 def status(self, node1='.', node2=None, match=None,
1447 def status(self, node1='.', node2=None, match=None,
1448 ignored=False, clean=False, unknown=False,
1448 ignored=False, clean=False, unknown=False,
1449 listsubrepos=False):
1449 listsubrepos=False):
1450 """return status of files between two nodes or node and working
1450 """return status of files between two nodes or node and working
1451 directory.
1451 directory.
1452
1452
1453 If node1 is None, use the first dirstate parent instead.
1453 If node1 is None, use the first dirstate parent instead.
1454 If node2 is None, compare node1 with working directory.
1454 If node2 is None, compare node1 with working directory.
1455 """
1455 """
1456
1456
1457 def mfmatches(ctx):
1457 def mfmatches(ctx):
1458 mf = ctx.manifest().copy()
1458 mf = ctx.manifest().copy()
1459 if match.always():
1459 if match.always():
1460 return mf
1460 return mf
1461 for fn in mf.keys():
1461 for fn in mf.keys():
1462 if not match(fn):
1462 if not match(fn):
1463 del mf[fn]
1463 del mf[fn]
1464 return mf
1464 return mf
1465
1465
1466 ctx1 = self[node1]
1466 ctx1 = self[node1]
1467 ctx2 = self[node2]
1467 ctx2 = self[node2]
1468
1468
1469 working = ctx2.rev() is None
1469 working = ctx2.rev() is None
1470 parentworking = working and ctx1 == self['.']
1470 parentworking = working and ctx1 == self['.']
1471 match = match or matchmod.always(self.root, self.getcwd())
1471 match = match or matchmod.always(self.root, self.getcwd())
1472 listignored, listclean, listunknown = ignored, clean, unknown
1472 listignored, listclean, listunknown = ignored, clean, unknown
1473
1473
1474 # load earliest manifest first for caching reasons
1474 # load earliest manifest first for caching reasons
1475 if not working and ctx2.rev() < ctx1.rev():
1475 if not working and ctx2.rev() < ctx1.rev():
1476 ctx2.manifest()
1476 ctx2.manifest()
1477
1477
1478 if not parentworking:
1478 if not parentworking:
1479 def bad(f, msg):
1479 def bad(f, msg):
1480 # 'f' may be a directory pattern from 'match.files()',
1480 # 'f' may be a directory pattern from 'match.files()',
1481 # so 'f not in ctx1' is not enough
1481 # so 'f not in ctx1' is not enough
1482 if f not in ctx1 and f not in ctx1.dirs():
1482 if f not in ctx1 and f not in ctx1.dirs():
1483 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1483 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1484 match.bad = bad
1484 match.bad = bad
1485
1485
1486 if working: # we need to scan the working dir
1486 if working: # we need to scan the working dir
1487 subrepos = []
1487 subrepos = []
1488 if '.hgsub' in self.dirstate:
1488 if '.hgsub' in self.dirstate:
1489 subrepos = sorted(ctx2.substate)
1489 subrepos = sorted(ctx2.substate)
1490 s = self.dirstate.status(match, subrepos, listignored,
1490 s = self.dirstate.status(match, subrepos, listignored,
1491 listclean, listunknown)
1491 listclean, listunknown)
1492 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1492 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1493
1493
1494 # check for any possibly clean files
1494 # check for any possibly clean files
1495 if parentworking and cmp:
1495 if parentworking and cmp:
1496 fixup = []
1496 fixup = []
1497 # do a full compare of any files that might have changed
1497 # do a full compare of any files that might have changed
1498 for f in sorted(cmp):
1498 for f in sorted(cmp):
1499 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1499 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1500 or ctx1[f].cmp(ctx2[f])):
1500 or ctx1[f].cmp(ctx2[f])):
1501 modified.append(f)
1501 modified.append(f)
1502 else:
1502 else:
1503 fixup.append(f)
1503 fixup.append(f)
1504
1504
1505 # update dirstate for files that are actually clean
1505 # update dirstate for files that are actually clean
1506 if fixup:
1506 if fixup:
1507 if listclean:
1507 if listclean:
1508 clean += fixup
1508 clean += fixup
1509
1509
1510 try:
1510 try:
1511 # updating the dirstate is optional
1511 # updating the dirstate is optional
1512 # so we don't wait on the lock
1512 # so we don't wait on the lock
1513 wlock = self.wlock(False)
1513 wlock = self.wlock(False)
1514 try:
1514 try:
1515 for f in fixup:
1515 for f in fixup:
1516 self.dirstate.normal(f)
1516 self.dirstate.normal(f)
1517 finally:
1517 finally:
1518 wlock.release()
1518 wlock.release()
1519 except error.LockError:
1519 except error.LockError:
1520 pass
1520 pass
1521
1521
1522 if not parentworking:
1522 if not parentworking:
1523 mf1 = mfmatches(ctx1)
1523 mf1 = mfmatches(ctx1)
1524 if working:
1524 if working:
1525 # we are comparing working dir against non-parent
1525 # we are comparing working dir against non-parent
1526 # generate a pseudo-manifest for the working dir
1526 # generate a pseudo-manifest for the working dir
1527 mf2 = mfmatches(self['.'])
1527 mf2 = mfmatches(self['.'])
1528 for f in cmp + modified + added:
1528 for f in cmp + modified + added:
1529 mf2[f] = None
1529 mf2[f] = None
1530 mf2.set(f, ctx2.flags(f))
1530 mf2.set(f, ctx2.flags(f))
1531 for f in removed:
1531 for f in removed:
1532 if f in mf2:
1532 if f in mf2:
1533 del mf2[f]
1533 del mf2[f]
1534 else:
1534 else:
1535 # we are comparing two revisions
1535 # we are comparing two revisions
1536 deleted, unknown, ignored = [], [], []
1536 deleted, unknown, ignored = [], [], []
1537 mf2 = mfmatches(ctx2)
1537 mf2 = mfmatches(ctx2)
1538
1538
1539 modified, added, clean = [], [], []
1539 modified, added, clean = [], [], []
1540 withflags = mf1.withflags() | mf2.withflags()
1540 withflags = mf1.withflags() | mf2.withflags()
1541 for fn, mf2node in mf2.iteritems():
1541 for fn, mf2node in mf2.iteritems():
1542 if fn in mf1:
1542 if fn in mf1:
1543 if (fn not in deleted and
1543 if (fn not in deleted and
1544 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1544 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1545 (mf1[fn] != mf2node and
1545 (mf1[fn] != mf2node and
1546 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1546 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1547 modified.append(fn)
1547 modified.append(fn)
1548 elif listclean:
1548 elif listclean:
1549 clean.append(fn)
1549 clean.append(fn)
1550 del mf1[fn]
1550 del mf1[fn]
1551 elif fn not in deleted:
1551 elif fn not in deleted:
1552 added.append(fn)
1552 added.append(fn)
1553 removed = mf1.keys()
1553 removed = mf1.keys()
1554
1554
1555 if working and modified and not self.dirstate._checklink:
1555 if working and modified and not self.dirstate._checklink:
1556 # Symlink placeholders may get non-symlink-like contents
1556 # Symlink placeholders may get non-symlink-like contents
1557 # via user error or dereferencing by NFS or Samba servers,
1557 # via user error or dereferencing by NFS or Samba servers,
1558 # so we filter out any placeholders that don't look like a
1558 # so we filter out any placeholders that don't look like a
1559 # symlink
1559 # symlink
1560 sane = []
1560 sane = []
1561 for f in modified:
1561 for f in modified:
1562 if ctx2.flags(f) == 'l':
1562 if ctx2.flags(f) == 'l':
1563 d = ctx2[f].data()
1563 d = ctx2[f].data()
1564 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1564 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1565 self.ui.debug('ignoring suspect symlink placeholder'
1565 self.ui.debug('ignoring suspect symlink placeholder'
1566 ' "%s"\n' % f)
1566 ' "%s"\n' % f)
1567 continue
1567 continue
1568 sane.append(f)
1568 sane.append(f)
1569 modified = sane
1569 modified = sane
1570
1570
1571 r = modified, added, removed, deleted, unknown, ignored, clean
1571 r = modified, added, removed, deleted, unknown, ignored, clean
1572
1572
1573 if listsubrepos:
1573 if listsubrepos:
1574 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1574 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1575 if working:
1575 if working:
1576 rev2 = None
1576 rev2 = None
1577 else:
1577 else:
1578 rev2 = ctx2.substate[subpath][1]
1578 rev2 = ctx2.substate[subpath][1]
1579 try:
1579 try:
1580 submatch = matchmod.narrowmatcher(subpath, match)
1580 submatch = matchmod.narrowmatcher(subpath, match)
1581 s = sub.status(rev2, match=submatch, ignored=listignored,
1581 s = sub.status(rev2, match=submatch, ignored=listignored,
1582 clean=listclean, unknown=listunknown,
1582 clean=listclean, unknown=listunknown,
1583 listsubrepos=True)
1583 listsubrepos=True)
1584 for rfiles, sfiles in zip(r, s):
1584 for rfiles, sfiles in zip(r, s):
1585 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1585 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1586 except error.LookupError:
1586 except error.LookupError:
1587 self.ui.status(_("skipping missing subrepository: %s\n")
1587 self.ui.status(_("skipping missing subrepository: %s\n")
1588 % subpath)
1588 % subpath)
1589
1589
1590 for l in r:
1590 for l in r:
1591 l.sort()
1591 l.sort()
1592 return r
1592 return r
1593
1593
1594 def heads(self, start=None):
1594 def heads(self, start=None):
1595 heads = self.changelog.heads(start)
1595 heads = self.changelog.heads(start)
1596 # sort the output in rev descending order
1596 # sort the output in rev descending order
1597 return sorted(heads, key=self.changelog.rev, reverse=True)
1597 return sorted(heads, key=self.changelog.rev, reverse=True)
1598
1598
1599 def branchheads(self, branch=None, start=None, closed=False):
1599 def branchheads(self, branch=None, start=None, closed=False):
1600 '''return a (possibly filtered) list of heads for the given branch
1600 '''return a (possibly filtered) list of heads for the given branch
1601
1601
1602 Heads are returned in topological order, from newest to oldest.
1602 Heads are returned in topological order, from newest to oldest.
1603 If branch is None, use the dirstate branch.
1603 If branch is None, use the dirstate branch.
1604 If start is not None, return only heads reachable from start.
1604 If start is not None, return only heads reachable from start.
1605 If closed is True, return heads that are marked as closed as well.
1605 If closed is True, return heads that are marked as closed as well.
1606 '''
1606 '''
1607 if branch is None:
1607 if branch is None:
1608 branch = self[None].branch()
1608 branch = self[None].branch()
1609 branches = self.branchmap()
1609 branches = self.branchmap()
1610 if branch not in branches:
1610 if branch not in branches:
1611 return []
1611 return []
1612 # the cache returns heads ordered lowest to highest
1612 # the cache returns heads ordered lowest to highest
1613 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1613 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1614 if start is not None:
1614 if start is not None:
1615 # filter out the heads that cannot be reached from startrev
1615 # filter out the heads that cannot be reached from startrev
1616 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1616 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1617 bheads = [h for h in bheads if h in fbheads]
1617 bheads = [h for h in bheads if h in fbheads]
1618 return bheads
1618 return bheads
1619
1619
1620 def branches(self, nodes):
1620 def branches(self, nodes):
1621 if not nodes:
1621 if not nodes:
1622 nodes = [self.changelog.tip()]
1622 nodes = [self.changelog.tip()]
1623 b = []
1623 b = []
1624 for n in nodes:
1624 for n in nodes:
1625 t = n
1625 t = n
1626 while True:
1626 while True:
1627 p = self.changelog.parents(n)
1627 p = self.changelog.parents(n)
1628 if p[1] != nullid or p[0] == nullid:
1628 if p[1] != nullid or p[0] == nullid:
1629 b.append((t, n, p[0], p[1]))
1629 b.append((t, n, p[0], p[1]))
1630 break
1630 break
1631 n = p[0]
1631 n = p[0]
1632 return b
1632 return b
1633
1633
1634 def between(self, pairs):
1634 def between(self, pairs):
1635 r = []
1635 r = []
1636
1636
1637 for top, bottom in pairs:
1637 for top, bottom in pairs:
1638 n, l, i = top, [], 0
1638 n, l, i = top, [], 0
1639 f = 1
1639 f = 1
1640
1640
1641 while n != bottom and n != nullid:
1641 while n != bottom and n != nullid:
1642 p = self.changelog.parents(n)[0]
1642 p = self.changelog.parents(n)[0]
1643 if i == f:
1643 if i == f:
1644 l.append(n)
1644 l.append(n)
1645 f = f * 2
1645 f = f * 2
1646 n = p
1646 n = p
1647 i += 1
1647 i += 1
1648
1648
1649 r.append(l)
1649 r.append(l)
1650
1650
1651 return r
1651 return r
1652
1652
1653 def pull(self, remote, heads=None, force=False):
1653 def pull(self, remote, heads=None, force=False):
1654 if remote.local():
1654 if remote.local():
1655 missing = set(remote.requirements) - self.supported
1655 missing = set(remote.requirements) - self.supported
1656 if missing:
1656 if missing:
1657 msg = _("required features are not"
1657 msg = _("required features are not"
1658 " supported in the destination:"
1658 " supported in the destination:"
1659 " %s") % (', '.join(sorted(missing)))
1659 " %s") % (', '.join(sorted(missing)))
1660 raise util.Abort(msg)
1660 raise util.Abort(msg)
1661
1661
1662 # don't open transaction for nothing or you break future useful
1662 # don't open transaction for nothing or you break future useful
1663 # rollback call
1663 # rollback call
1664 tr = None
1664 tr = None
1665 trname = 'pull\n' + util.hidepassword(remote.url())
1665 trname = 'pull\n' + util.hidepassword(remote.url())
1666 lock = self.lock()
1666 lock = self.lock()
1667 try:
1667 try:
1668 tmp = discovery.findcommonincoming(self.unfiltered(), remote,
1668 tmp = discovery.findcommonincoming(self.unfiltered(), remote,
1669 heads=heads, force=force)
1669 heads=heads, force=force)
1670 common, fetch, rheads = tmp
1670 common, fetch, rheads = tmp
1671 if not fetch:
1671 if not fetch:
1672 self.ui.status(_("no changes found\n"))
1672 self.ui.status(_("no changes found\n"))
1673 result = 0
1673 result = 0
1674 else:
1674 else:
1675 tr = self.transaction(trname)
1675 tr = self.transaction(trname)
1676 if heads is None and list(common) == [nullid]:
1676 if heads is None and list(common) == [nullid]:
1677 self.ui.status(_("requesting all changes\n"))
1677 self.ui.status(_("requesting all changes\n"))
1678 elif heads is None and remote.capable('changegroupsubset'):
1678 elif heads is None and remote.capable('changegroupsubset'):
1679 # issue1320, avoid a race if remote changed after discovery
1679 # issue1320, avoid a race if remote changed after discovery
1680 heads = rheads
1680 heads = rheads
1681
1681
1682 if remote.capable('getbundle'):
1682 if remote.capable('getbundle'):
1683 # TODO: get bundlecaps from remote
1683 # TODO: get bundlecaps from remote
1684 cg = remote.getbundle('pull', common=common,
1684 cg = remote.getbundle('pull', common=common,
1685 heads=heads or rheads)
1685 heads=heads or rheads)
1686 elif heads is None:
1686 elif heads is None:
1687 cg = remote.changegroup(fetch, 'pull')
1687 cg = remote.changegroup(fetch, 'pull')
1688 elif not remote.capable('changegroupsubset'):
1688 elif not remote.capable('changegroupsubset'):
1689 raise util.Abort(_("partial pull cannot be done because "
1689 raise util.Abort(_("partial pull cannot be done because "
1690 "other repository doesn't support "
1690 "other repository doesn't support "
1691 "changegroupsubset."))
1691 "changegroupsubset."))
1692 else:
1692 else:
1693 cg = remote.changegroupsubset(fetch, heads, 'pull')
1693 cg = remote.changegroupsubset(fetch, heads, 'pull')
1694 # we use unfiltered changelog here because hidden revision must
1694 # we use unfiltered changelog here because hidden revision must
1695 # be taken in account for phase synchronization. They may
1695 # be taken in account for phase synchronization. They may
1696 # becomes public and becomes visible again.
1696 # becomes public and becomes visible again.
1697 cl = self.unfiltered().changelog
1698 result = self.addchangegroup(cg, 'pull', remote.url())
1697 result = self.addchangegroup(cg, 'pull', remote.url())
1699
1698
1700 # compute target subset
1699 # compute target subset
1701 if heads is None:
1700 if heads is None:
1702 # We pulled every thing possible
1701 # We pulled every thing possible
1703 # sync on everything common
1702 # sync on everything common
1704 subset = common + rheads
1703 subset = common + rheads
1705 else:
1704 else:
1706 # We pulled a specific subset
1705 # We pulled a specific subset
1707 # sync on this subset
1706 # sync on this subset
1708 subset = heads
1707 subset = heads
1709
1708
1710 # Get remote phases data from remote
1709 # Get remote phases data from remote
1711 remotephases = remote.listkeys('phases')
1710 remotephases = remote.listkeys('phases')
1712 publishing = bool(remotephases.get('publishing', False))
1711 publishing = bool(remotephases.get('publishing', False))
1713 if remotephases and not publishing:
1712 if remotephases and not publishing:
1714 # remote is new and unpublishing
1713 # remote is new and unpublishing
1715 pheads, _dr = phases.analyzeremotephases(self, subset,
1714 pheads, _dr = phases.analyzeremotephases(self, subset,
1716 remotephases)
1715 remotephases)
1717 phases.advanceboundary(self, phases.public, pheads)
1716 phases.advanceboundary(self, phases.public, pheads)
1718 phases.advanceboundary(self, phases.draft, subset)
1717 phases.advanceboundary(self, phases.draft, subset)
1719 else:
1718 else:
1720 # Remote is old or publishing all common changesets
1719 # Remote is old or publishing all common changesets
1721 # should be seen as public
1720 # should be seen as public
1722 phases.advanceboundary(self, phases.public, subset)
1721 phases.advanceboundary(self, phases.public, subset)
1723
1722
1724 def gettransaction():
1723 def gettransaction():
1725 if tr is None:
1724 if tr is None:
1726 return self.transaction(trname)
1725 return self.transaction(trname)
1727 return tr
1726 return tr
1728
1727
1729 obstr = obsolete.syncpull(self, remote, gettransaction)
1728 obstr = obsolete.syncpull(self, remote, gettransaction)
1730 if obstr is not None:
1729 if obstr is not None:
1731 tr = obstr
1730 tr = obstr
1732
1731
1733 if tr is not None:
1732 if tr is not None:
1734 tr.close()
1733 tr.close()
1735 finally:
1734 finally:
1736 if tr is not None:
1735 if tr is not None:
1737 tr.release()
1736 tr.release()
1738 lock.release()
1737 lock.release()
1739
1738
1740 return result
1739 return result
1741
1740
1742 def checkpush(self, force, revs):
1741 def checkpush(self, force, revs):
1743 """Extensions can override this function if additional checks have
1742 """Extensions can override this function if additional checks have
1744 to be performed before pushing, or call it if they override push
1743 to be performed before pushing, or call it if they override push
1745 command.
1744 command.
1746 """
1745 """
1747 pass
1746 pass
1748
1747
1749 def push(self, remote, force=False, revs=None, newbranch=False):
1748 def push(self, remote, force=False, revs=None, newbranch=False):
1750 '''Push outgoing changesets (limited by revs) from the current
1749 '''Push outgoing changesets (limited by revs) from the current
1751 repository to remote. Return an integer:
1750 repository to remote. Return an integer:
1752 - None means nothing to push
1751 - None means nothing to push
1753 - 0 means HTTP error
1752 - 0 means HTTP error
1754 - 1 means we pushed and remote head count is unchanged *or*
1753 - 1 means we pushed and remote head count is unchanged *or*
1755 we have outgoing changesets but refused to push
1754 we have outgoing changesets but refused to push
1756 - other values as described by addchangegroup()
1755 - other values as described by addchangegroup()
1757 '''
1756 '''
1758 if remote.local():
1757 if remote.local():
1759 missing = set(self.requirements) - remote.local().supported
1758 missing = set(self.requirements) - remote.local().supported
1760 if missing:
1759 if missing:
1761 msg = _("required features are not"
1760 msg = _("required features are not"
1762 " supported in the destination:"
1761 " supported in the destination:"
1763 " %s") % (', '.join(sorted(missing)))
1762 " %s") % (', '.join(sorted(missing)))
1764 raise util.Abort(msg)
1763 raise util.Abort(msg)
1765
1764
1766 # there are two ways to push to remote repo:
1765 # there are two ways to push to remote repo:
1767 #
1766 #
1768 # addchangegroup assumes local user can lock remote
1767 # addchangegroup assumes local user can lock remote
1769 # repo (local filesystem, old ssh servers).
1768 # repo (local filesystem, old ssh servers).
1770 #
1769 #
1771 # unbundle assumes local user cannot lock remote repo (new ssh
1770 # unbundle assumes local user cannot lock remote repo (new ssh
1772 # servers, http servers).
1771 # servers, http servers).
1773
1772
1774 if not remote.canpush():
1773 if not remote.canpush():
1775 raise util.Abort(_("destination does not support push"))
1774 raise util.Abort(_("destination does not support push"))
1776 unfi = self.unfiltered()
1775 unfi = self.unfiltered()
1777 def localphasemove(nodes, phase=phases.public):
1776 def localphasemove(nodes, phase=phases.public):
1778 """move <nodes> to <phase> in the local source repo"""
1777 """move <nodes> to <phase> in the local source repo"""
1779 if locallock is not None:
1778 if locallock is not None:
1780 phases.advanceboundary(self, phase, nodes)
1779 phases.advanceboundary(self, phase, nodes)
1781 else:
1780 else:
1782 # repo is not locked, do not change any phases!
1781 # repo is not locked, do not change any phases!
1783 # Informs the user that phases should have been moved when
1782 # Informs the user that phases should have been moved when
1784 # applicable.
1783 # applicable.
1785 actualmoves = [n for n in nodes if phase < self[n].phase()]
1784 actualmoves = [n for n in nodes if phase < self[n].phase()]
1786 phasestr = phases.phasenames[phase]
1785 phasestr = phases.phasenames[phase]
1787 if actualmoves:
1786 if actualmoves:
1788 self.ui.status(_('cannot lock source repo, skipping local'
1787 self.ui.status(_('cannot lock source repo, skipping local'
1789 ' %s phase update\n') % phasestr)
1788 ' %s phase update\n') % phasestr)
1790 # get local lock as we might write phase data
1789 # get local lock as we might write phase data
1791 locallock = None
1790 locallock = None
1792 try:
1791 try:
1793 locallock = self.lock()
1792 locallock = self.lock()
1794 except IOError, err:
1793 except IOError, err:
1795 if err.errno != errno.EACCES:
1794 if err.errno != errno.EACCES:
1796 raise
1795 raise
1797 # source repo cannot be locked.
1796 # source repo cannot be locked.
1798 # We do not abort the push, but just disable the local phase
1797 # We do not abort the push, but just disable the local phase
1799 # synchronisation.
1798 # synchronisation.
1800 msg = 'cannot lock source repository: %s\n' % err
1799 msg = 'cannot lock source repository: %s\n' % err
1801 self.ui.debug(msg)
1800 self.ui.debug(msg)
1802 try:
1801 try:
1803 self.checkpush(force, revs)
1802 self.checkpush(force, revs)
1804 lock = None
1803 lock = None
1805 unbundle = remote.capable('unbundle')
1804 unbundle = remote.capable('unbundle')
1806 if not unbundle:
1805 if not unbundle:
1807 lock = remote.lock()
1806 lock = remote.lock()
1808 try:
1807 try:
1809 # discovery
1808 # discovery
1810 fci = discovery.findcommonincoming
1809 fci = discovery.findcommonincoming
1811 commoninc = fci(unfi, remote, force=force)
1810 commoninc = fci(unfi, remote, force=force)
1812 common, inc, remoteheads = commoninc
1811 common, inc, remoteheads = commoninc
1813 fco = discovery.findcommonoutgoing
1812 fco = discovery.findcommonoutgoing
1814 outgoing = fco(unfi, remote, onlyheads=revs,
1813 outgoing = fco(unfi, remote, onlyheads=revs,
1815 commoninc=commoninc, force=force)
1814 commoninc=commoninc, force=force)
1816
1815
1817
1816
1818 if not outgoing.missing:
1817 if not outgoing.missing:
1819 # nothing to push
1818 # nothing to push
1820 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1819 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1821 ret = None
1820 ret = None
1822 else:
1821 else:
1823 # something to push
1822 # something to push
1824 if not force:
1823 if not force:
1825 # if self.obsstore == False --> no obsolete
1824 # if self.obsstore == False --> no obsolete
1826 # then, save the iteration
1825 # then, save the iteration
1827 if unfi.obsstore:
1826 if unfi.obsstore:
1828 # this message are here for 80 char limit reason
1827 # this message are here for 80 char limit reason
1829 mso = _("push includes obsolete changeset: %s!")
1828 mso = _("push includes obsolete changeset: %s!")
1830 mst = "push includes %s changeset: %s!"
1829 mst = "push includes %s changeset: %s!"
1831 # plain versions for i18n tool to detect them
1830 # plain versions for i18n tool to detect them
1832 _("push includes unstable changeset: %s!")
1831 _("push includes unstable changeset: %s!")
1833 _("push includes bumped changeset: %s!")
1832 _("push includes bumped changeset: %s!")
1834 _("push includes divergent changeset: %s!")
1833 _("push includes divergent changeset: %s!")
1835 # If we are to push if there is at least one
1834 # If we are to push if there is at least one
1836 # obsolete or unstable changeset in missing, at
1835 # obsolete or unstable changeset in missing, at
1837 # least one of the missinghead will be obsolete or
1836 # least one of the missinghead will be obsolete or
1838 # unstable. So checking heads only is ok
1837 # unstable. So checking heads only is ok
1839 for node in outgoing.missingheads:
1838 for node in outgoing.missingheads:
1840 ctx = unfi[node]
1839 ctx = unfi[node]
1841 if ctx.obsolete():
1840 if ctx.obsolete():
1842 raise util.Abort(mso % ctx)
1841 raise util.Abort(mso % ctx)
1843 elif ctx.troubled():
1842 elif ctx.troubled():
1844 raise util.Abort(_(mst)
1843 raise util.Abort(_(mst)
1845 % (ctx.troubles()[0],
1844 % (ctx.troubles()[0],
1846 ctx))
1845 ctx))
1847 newbm = self.ui.configlist('bookmarks', 'pushing')
1846 newbm = self.ui.configlist('bookmarks', 'pushing')
1848 discovery.checkheads(unfi, remote, outgoing,
1847 discovery.checkheads(unfi, remote, outgoing,
1849 remoteheads, newbranch,
1848 remoteheads, newbranch,
1850 bool(inc), newbm)
1849 bool(inc), newbm)
1851
1850
1852 # TODO: get bundlecaps from remote
1851 # TODO: get bundlecaps from remote
1853 bundlecaps = None
1852 bundlecaps = None
1854 # create a changegroup from local
1853 # create a changegroup from local
1855 if revs is None and not (outgoing.excluded
1854 if revs is None and not (outgoing.excluded
1856 or self.changelog.filteredrevs):
1855 or self.changelog.filteredrevs):
1857 # push everything,
1856 # push everything,
1858 # use the fast path, no race possible on push
1857 # use the fast path, no race possible on push
1859 bundler = changegroup.bundle10(self, bundlecaps)
1858 bundler = changegroup.bundle10(self, bundlecaps)
1860 cg = self._changegroupsubset(outgoing,
1859 cg = self._changegroupsubset(outgoing,
1861 bundler,
1860 bundler,
1862 'push',
1861 'push',
1863 fastpath=True)
1862 fastpath=True)
1864 else:
1863 else:
1865 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1864 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1866
1865
1867 # apply changegroup to remote
1866 # apply changegroup to remote
1868 if unbundle:
1867 if unbundle:
1869 # local repo finds heads on server, finds out what
1868 # local repo finds heads on server, finds out what
1870 # revs it must push. once revs transferred, if server
1869 # revs it must push. once revs transferred, if server
1871 # finds it has different heads (someone else won
1870 # finds it has different heads (someone else won
1872 # commit/push race), server aborts.
1871 # commit/push race), server aborts.
1873 if force:
1872 if force:
1874 remoteheads = ['force']
1873 remoteheads = ['force']
1875 # ssh: return remote's addchangegroup()
1874 # ssh: return remote's addchangegroup()
1876 # http: return remote's addchangegroup() or 0 for error
1875 # http: return remote's addchangegroup() or 0 for error
1877 ret = remote.unbundle(cg, remoteheads, 'push')
1876 ret = remote.unbundle(cg, remoteheads, 'push')
1878 else:
1877 else:
1879 # we return an integer indicating remote head count
1878 # we return an integer indicating remote head count
1880 # change
1879 # change
1881 ret = remote.addchangegroup(cg, 'push', self.url())
1880 ret = remote.addchangegroup(cg, 'push', self.url())
1882
1881
1883 if ret:
1882 if ret:
1884 # push succeed, synchronize target of the push
1883 # push succeed, synchronize target of the push
1885 cheads = outgoing.missingheads
1884 cheads = outgoing.missingheads
1886 elif revs is None:
1885 elif revs is None:
1887 # All out push fails. synchronize all common
1886 # All out push fails. synchronize all common
1888 cheads = outgoing.commonheads
1887 cheads = outgoing.commonheads
1889 else:
1888 else:
1890 # I want cheads = heads(::missingheads and ::commonheads)
1889 # I want cheads = heads(::missingheads and ::commonheads)
1891 # (missingheads is revs with secret changeset filtered out)
1890 # (missingheads is revs with secret changeset filtered out)
1892 #
1891 #
1893 # This can be expressed as:
1892 # This can be expressed as:
1894 # cheads = ( (missingheads and ::commonheads)
1893 # cheads = ( (missingheads and ::commonheads)
1895 # + (commonheads and ::missingheads))"
1894 # + (commonheads and ::missingheads))"
1896 # )
1895 # )
1897 #
1896 #
1898 # while trying to push we already computed the following:
1897 # while trying to push we already computed the following:
1899 # common = (::commonheads)
1898 # common = (::commonheads)
1900 # missing = ((commonheads::missingheads) - commonheads)
1899 # missing = ((commonheads::missingheads) - commonheads)
1901 #
1900 #
1902 # We can pick:
1901 # We can pick:
1903 # * missingheads part of common (::commonheads)
1902 # * missingheads part of common (::commonheads)
1904 common = set(outgoing.common)
1903 common = set(outgoing.common)
1905 cheads = [node for node in revs if node in common]
1904 cheads = [node for node in revs if node in common]
1906 # and
1905 # and
1907 # * commonheads parents on missing
1906 # * commonheads parents on missing
1908 revset = unfi.set('%ln and parents(roots(%ln))',
1907 revset = unfi.set('%ln and parents(roots(%ln))',
1909 outgoing.commonheads,
1908 outgoing.commonheads,
1910 outgoing.missing)
1909 outgoing.missing)
1911 cheads.extend(c.node() for c in revset)
1910 cheads.extend(c.node() for c in revset)
1912 # even when we don't push, exchanging phase data is useful
1911 # even when we don't push, exchanging phase data is useful
1913 remotephases = remote.listkeys('phases')
1912 remotephases = remote.listkeys('phases')
1914 if (self.ui.configbool('ui', '_usedassubrepo', False)
1913 if (self.ui.configbool('ui', '_usedassubrepo', False)
1915 and remotephases # server supports phases
1914 and remotephases # server supports phases
1916 and ret is None # nothing was pushed
1915 and ret is None # nothing was pushed
1917 and remotephases.get('publishing', False)):
1916 and remotephases.get('publishing', False)):
1918 # When:
1917 # When:
1919 # - this is a subrepo push
1918 # - this is a subrepo push
1920 # - and remote support phase
1919 # - and remote support phase
1921 # - and no changeset was pushed
1920 # - and no changeset was pushed
1922 # - and remote is publishing
1921 # - and remote is publishing
1923 # We may be in issue 3871 case!
1922 # We may be in issue 3871 case!
1924 # We drop the possible phase synchronisation done by
1923 # We drop the possible phase synchronisation done by
1925 # courtesy to publish changesets possibly locally draft
1924 # courtesy to publish changesets possibly locally draft
1926 # on the remote.
1925 # on the remote.
1927 remotephases = {'publishing': 'True'}
1926 remotephases = {'publishing': 'True'}
1928 if not remotephases: # old server or public only repo
1927 if not remotephases: # old server or public only repo
1929 localphasemove(cheads)
1928 localphasemove(cheads)
1930 # don't push any phase data as there is nothing to push
1929 # don't push any phase data as there is nothing to push
1931 else:
1930 else:
1932 ana = phases.analyzeremotephases(self, cheads, remotephases)
1931 ana = phases.analyzeremotephases(self, cheads, remotephases)
1933 pheads, droots = ana
1932 pheads, droots = ana
1934 ### Apply remote phase on local
1933 ### Apply remote phase on local
1935 if remotephases.get('publishing', False):
1934 if remotephases.get('publishing', False):
1936 localphasemove(cheads)
1935 localphasemove(cheads)
1937 else: # publish = False
1936 else: # publish = False
1938 localphasemove(pheads)
1937 localphasemove(pheads)
1939 localphasemove(cheads, phases.draft)
1938 localphasemove(cheads, phases.draft)
1940 ### Apply local phase on remote
1939 ### Apply local phase on remote
1941
1940
1942 # Get the list of all revs draft on remote by public here.
1941 # Get the list of all revs draft on remote by public here.
1943 # XXX Beware that revset break if droots is not strictly
1942 # XXX Beware that revset break if droots is not strictly
1944 # XXX root we may want to ensure it is but it is costly
1943 # XXX root we may want to ensure it is but it is costly
1945 outdated = unfi.set('heads((%ln::%ln) and public())',
1944 outdated = unfi.set('heads((%ln::%ln) and public())',
1946 droots, cheads)
1945 droots, cheads)
1947 for newremotehead in outdated:
1946 for newremotehead in outdated:
1948 r = remote.pushkey('phases',
1947 r = remote.pushkey('phases',
1949 newremotehead.hex(),
1948 newremotehead.hex(),
1950 str(phases.draft),
1949 str(phases.draft),
1951 str(phases.public))
1950 str(phases.public))
1952 if not r:
1951 if not r:
1953 self.ui.warn(_('updating %s to public failed!\n')
1952 self.ui.warn(_('updating %s to public failed!\n')
1954 % newremotehead)
1953 % newremotehead)
1955 self.ui.debug('try to push obsolete markers to remote\n')
1954 self.ui.debug('try to push obsolete markers to remote\n')
1956 obsolete.syncpush(self, remote)
1955 obsolete.syncpush(self, remote)
1957 finally:
1956 finally:
1958 if lock is not None:
1957 if lock is not None:
1959 lock.release()
1958 lock.release()
1960 finally:
1959 finally:
1961 if locallock is not None:
1960 if locallock is not None:
1962 locallock.release()
1961 locallock.release()
1963
1962
1964 bookmarks.updateremote(self.ui, unfi, remote, revs)
1963 bookmarks.updateremote(self.ui, unfi, remote, revs)
1965 return ret
1964 return ret
1966
1965
1967 def changegroupinfo(self, nodes, source):
1966 def changegroupinfo(self, nodes, source):
1968 if self.ui.verbose or source == 'bundle':
1967 if self.ui.verbose or source == 'bundle':
1969 self.ui.status(_("%d changesets found\n") % len(nodes))
1968 self.ui.status(_("%d changesets found\n") % len(nodes))
1970 if self.ui.debugflag:
1969 if self.ui.debugflag:
1971 self.ui.debug("list of changesets:\n")
1970 self.ui.debug("list of changesets:\n")
1972 for node in nodes:
1971 for node in nodes:
1973 self.ui.debug("%s\n" % hex(node))
1972 self.ui.debug("%s\n" % hex(node))
1974
1973
1975 def changegroupsubset(self, bases, heads, source):
1974 def changegroupsubset(self, bases, heads, source):
1976 """Compute a changegroup consisting of all the nodes that are
1975 """Compute a changegroup consisting of all the nodes that are
1977 descendants of any of the bases and ancestors of any of the heads.
1976 descendants of any of the bases and ancestors of any of the heads.
1978 Return a chunkbuffer object whose read() method will return
1977 Return a chunkbuffer object whose read() method will return
1979 successive changegroup chunks.
1978 successive changegroup chunks.
1980
1979
1981 It is fairly complex as determining which filenodes and which
1980 It is fairly complex as determining which filenodes and which
1982 manifest nodes need to be included for the changeset to be complete
1981 manifest nodes need to be included for the changeset to be complete
1983 is non-trivial.
1982 is non-trivial.
1984
1983
1985 Another wrinkle is doing the reverse, figuring out which changeset in
1984 Another wrinkle is doing the reverse, figuring out which changeset in
1986 the changegroup a particular filenode or manifestnode belongs to.
1985 the changegroup a particular filenode or manifestnode belongs to.
1987 """
1986 """
1988 cl = self.changelog
1987 cl = self.changelog
1989 if not bases:
1988 if not bases:
1990 bases = [nullid]
1989 bases = [nullid]
1991 # TODO: remove call to nodesbetween.
1990 # TODO: remove call to nodesbetween.
1992 csets, bases, heads = cl.nodesbetween(bases, heads)
1991 csets, bases, heads = cl.nodesbetween(bases, heads)
1993 discbases = []
1992 discbases = []
1994 for n in bases:
1993 for n in bases:
1995 discbases.extend([p for p in cl.parents(n) if p != nullid])
1994 discbases.extend([p for p in cl.parents(n) if p != nullid])
1996 outgoing = discovery.outgoing(cl, discbases, heads)
1995 outgoing = discovery.outgoing(cl, discbases, heads)
1997 bundler = changegroup.bundle10(self)
1996 bundler = changegroup.bundle10(self)
1998 return self._changegroupsubset(outgoing, bundler, source)
1997 return self._changegroupsubset(outgoing, bundler, source)
1999
1998
2000 def getlocalbundle(self, source, outgoing, bundlecaps=None):
1999 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2001 """Like getbundle, but taking a discovery.outgoing as an argument.
2000 """Like getbundle, but taking a discovery.outgoing as an argument.
2002
2001
2003 This is only implemented for local repos and reuses potentially
2002 This is only implemented for local repos and reuses potentially
2004 precomputed sets in outgoing."""
2003 precomputed sets in outgoing."""
2005 if not outgoing.missing:
2004 if not outgoing.missing:
2006 return None
2005 return None
2007 bundler = changegroup.bundle10(self, bundlecaps)
2006 bundler = changegroup.bundle10(self, bundlecaps)
2008 return self._changegroupsubset(outgoing, bundler, source)
2007 return self._changegroupsubset(outgoing, bundler, source)
2009
2008
2010 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2009 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2011 """Like changegroupsubset, but returns the set difference between the
2010 """Like changegroupsubset, but returns the set difference between the
2012 ancestors of heads and the ancestors common.
2011 ancestors of heads and the ancestors common.
2013
2012
2014 If heads is None, use the local heads. If common is None, use [nullid].
2013 If heads is None, use the local heads. If common is None, use [nullid].
2015
2014
2016 The nodes in common might not all be known locally due to the way the
2015 The nodes in common might not all be known locally due to the way the
2017 current discovery protocol works.
2016 current discovery protocol works.
2018 """
2017 """
2019 cl = self.changelog
2018 cl = self.changelog
2020 if common:
2019 if common:
2021 hasnode = cl.hasnode
2020 hasnode = cl.hasnode
2022 common = [n for n in common if hasnode(n)]
2021 common = [n for n in common if hasnode(n)]
2023 else:
2022 else:
2024 common = [nullid]
2023 common = [nullid]
2025 if not heads:
2024 if not heads:
2026 heads = cl.heads()
2025 heads = cl.heads()
2027 return self.getlocalbundle(source,
2026 return self.getlocalbundle(source,
2028 discovery.outgoing(cl, common, heads),
2027 discovery.outgoing(cl, common, heads),
2029 bundlecaps=bundlecaps)
2028 bundlecaps=bundlecaps)
2030
2029
2031 @unfilteredmethod
2030 @unfilteredmethod
2032 def _changegroupsubset(self, outgoing, bundler, source,
2031 def _changegroupsubset(self, outgoing, bundler, source,
2033 fastpath=False):
2032 fastpath=False):
2034 commonrevs = outgoing.common
2033 commonrevs = outgoing.common
2035 csets = outgoing.missing
2034 csets = outgoing.missing
2036 heads = outgoing.missingheads
2035 heads = outgoing.missingheads
2037 # We go through the fast path if we get told to, or if all (unfiltered
2036 # We go through the fast path if we get told to, or if all (unfiltered
2038 # heads have been requested (since we then know there all linkrevs will
2037 # heads have been requested (since we then know there all linkrevs will
2039 # be pulled by the client).
2038 # be pulled by the client).
2040 heads.sort()
2039 heads.sort()
2041 fastpathlinkrev = fastpath or (
2040 fastpathlinkrev = fastpath or (
2042 self.filtername is None and heads == sorted(self.heads()))
2041 self.filtername is None and heads == sorted(self.heads()))
2043
2042
2044 self.hook('preoutgoing', throw=True, source=source)
2043 self.hook('preoutgoing', throw=True, source=source)
2045 self.changegroupinfo(csets, source)
2044 self.changegroupinfo(csets, source)
2046 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2045 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2047 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2046 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2048
2047
2049 def changegroup(self, basenodes, source):
2048 def changegroup(self, basenodes, source):
2050 # to avoid a race we use changegroupsubset() (issue1320)
2049 # to avoid a race we use changegroupsubset() (issue1320)
2051 return self.changegroupsubset(basenodes, self.heads(), source)
2050 return self.changegroupsubset(basenodes, self.heads(), source)
2052
2051
2053 @unfilteredmethod
2052 @unfilteredmethod
2054 def addchangegroup(self, source, srctype, url, emptyok=False):
2053 def addchangegroup(self, source, srctype, url, emptyok=False):
2055 """Add the changegroup returned by source.read() to this repo.
2054 """Add the changegroup returned by source.read() to this repo.
2056 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2055 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2057 the URL of the repo where this changegroup is coming from.
2056 the URL of the repo where this changegroup is coming from.
2058
2057
2059 Return an integer summarizing the change to this repo:
2058 Return an integer summarizing the change to this repo:
2060 - nothing changed or no source: 0
2059 - nothing changed or no source: 0
2061 - more heads than before: 1+added heads (2..n)
2060 - more heads than before: 1+added heads (2..n)
2062 - fewer heads than before: -1-removed heads (-2..-n)
2061 - fewer heads than before: -1-removed heads (-2..-n)
2063 - number of heads stays the same: 1
2062 - number of heads stays the same: 1
2064 """
2063 """
2065 def csmap(x):
2064 def csmap(x):
2066 self.ui.debug("add changeset %s\n" % short(x))
2065 self.ui.debug("add changeset %s\n" % short(x))
2067 return len(cl)
2066 return len(cl)
2068
2067
2069 def revmap(x):
2068 def revmap(x):
2070 return cl.rev(x)
2069 return cl.rev(x)
2071
2070
2072 if not source:
2071 if not source:
2073 return 0
2072 return 0
2074
2073
2075 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2074 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2076
2075
2077 changesets = files = revisions = 0
2076 changesets = files = revisions = 0
2078 efiles = set()
2077 efiles = set()
2079
2078
2080 # write changelog data to temp files so concurrent readers will not see
2079 # write changelog data to temp files so concurrent readers will not see
2081 # inconsistent view
2080 # inconsistent view
2082 cl = self.changelog
2081 cl = self.changelog
2083 cl.delayupdate()
2082 cl.delayupdate()
2084 oldheads = cl.heads()
2083 oldheads = cl.heads()
2085
2084
2086 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2085 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2087 try:
2086 try:
2088 trp = weakref.proxy(tr)
2087 trp = weakref.proxy(tr)
2089 # pull off the changeset group
2088 # pull off the changeset group
2090 self.ui.status(_("adding changesets\n"))
2089 self.ui.status(_("adding changesets\n"))
2091 clstart = len(cl)
2090 clstart = len(cl)
2092 class prog(object):
2091 class prog(object):
2093 step = _('changesets')
2092 step = _('changesets')
2094 count = 1
2093 count = 1
2095 ui = self.ui
2094 ui = self.ui
2096 total = None
2095 total = None
2097 def __call__(self):
2096 def __call__(self):
2098 self.ui.progress(self.step, self.count, unit=_('chunks'),
2097 self.ui.progress(self.step, self.count, unit=_('chunks'),
2099 total=self.total)
2098 total=self.total)
2100 self.count += 1
2099 self.count += 1
2101 pr = prog()
2100 pr = prog()
2102 source.callback = pr
2101 source.callback = pr
2103
2102
2104 source.changelogheader()
2103 source.changelogheader()
2105 srccontent = cl.addgroup(source, csmap, trp)
2104 srccontent = cl.addgroup(source, csmap, trp)
2106 if not (srccontent or emptyok):
2105 if not (srccontent or emptyok):
2107 raise util.Abort(_("received changelog group is empty"))
2106 raise util.Abort(_("received changelog group is empty"))
2108 clend = len(cl)
2107 clend = len(cl)
2109 changesets = clend - clstart
2108 changesets = clend - clstart
2110 for c in xrange(clstart, clend):
2109 for c in xrange(clstart, clend):
2111 efiles.update(self[c].files())
2110 efiles.update(self[c].files())
2112 efiles = len(efiles)
2111 efiles = len(efiles)
2113 self.ui.progress(_('changesets'), None)
2112 self.ui.progress(_('changesets'), None)
2114
2113
2115 # pull off the manifest group
2114 # pull off the manifest group
2116 self.ui.status(_("adding manifests\n"))
2115 self.ui.status(_("adding manifests\n"))
2117 pr.step = _('manifests')
2116 pr.step = _('manifests')
2118 pr.count = 1
2117 pr.count = 1
2119 pr.total = changesets # manifests <= changesets
2118 pr.total = changesets # manifests <= changesets
2120 # no need to check for empty manifest group here:
2119 # no need to check for empty manifest group here:
2121 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2120 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2122 # no new manifest will be created and the manifest group will
2121 # no new manifest will be created and the manifest group will
2123 # be empty during the pull
2122 # be empty during the pull
2124 source.manifestheader()
2123 source.manifestheader()
2125 self.manifest.addgroup(source, revmap, trp)
2124 self.manifest.addgroup(source, revmap, trp)
2126 self.ui.progress(_('manifests'), None)
2125 self.ui.progress(_('manifests'), None)
2127
2126
2128 needfiles = {}
2127 needfiles = {}
2129 if self.ui.configbool('server', 'validate', default=False):
2128 if self.ui.configbool('server', 'validate', default=False):
2130 # validate incoming csets have their manifests
2129 # validate incoming csets have their manifests
2131 for cset in xrange(clstart, clend):
2130 for cset in xrange(clstart, clend):
2132 mfest = self.changelog.read(self.changelog.node(cset))[0]
2131 mfest = self.changelog.read(self.changelog.node(cset))[0]
2133 mfest = self.manifest.readdelta(mfest)
2132 mfest = self.manifest.readdelta(mfest)
2134 # store file nodes we must see
2133 # store file nodes we must see
2135 for f, n in mfest.iteritems():
2134 for f, n in mfest.iteritems():
2136 needfiles.setdefault(f, set()).add(n)
2135 needfiles.setdefault(f, set()).add(n)
2137
2136
2138 # process the files
2137 # process the files
2139 self.ui.status(_("adding file changes\n"))
2138 self.ui.status(_("adding file changes\n"))
2140 pr.step = _('files')
2139 pr.step = _('files')
2141 pr.count = 1
2140 pr.count = 1
2142 pr.total = efiles
2141 pr.total = efiles
2143 source.callback = None
2142 source.callback = None
2144
2143
2145 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2144 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2146 pr, needfiles)
2145 pr, needfiles)
2147 revisions += newrevs
2146 revisions += newrevs
2148 files += newfiles
2147 files += newfiles
2149
2148
2150 dh = 0
2149 dh = 0
2151 if oldheads:
2150 if oldheads:
2152 heads = cl.heads()
2151 heads = cl.heads()
2153 dh = len(heads) - len(oldheads)
2152 dh = len(heads) - len(oldheads)
2154 for h in heads:
2153 for h in heads:
2155 if h not in oldheads and self[h].closesbranch():
2154 if h not in oldheads and self[h].closesbranch():
2156 dh -= 1
2155 dh -= 1
2157 htext = ""
2156 htext = ""
2158 if dh:
2157 if dh:
2159 htext = _(" (%+d heads)") % dh
2158 htext = _(" (%+d heads)") % dh
2160
2159
2161 self.ui.status(_("added %d changesets"
2160 self.ui.status(_("added %d changesets"
2162 " with %d changes to %d files%s\n")
2161 " with %d changes to %d files%s\n")
2163 % (changesets, revisions, files, htext))
2162 % (changesets, revisions, files, htext))
2164 self.invalidatevolatilesets()
2163 self.invalidatevolatilesets()
2165
2164
2166 if changesets > 0:
2165 if changesets > 0:
2167 p = lambda: cl.writepending() and self.root or ""
2166 p = lambda: cl.writepending() and self.root or ""
2168 self.hook('pretxnchangegroup', throw=True,
2167 self.hook('pretxnchangegroup', throw=True,
2169 node=hex(cl.node(clstart)), source=srctype,
2168 node=hex(cl.node(clstart)), source=srctype,
2170 url=url, pending=p)
2169 url=url, pending=p)
2171
2170
2172 added = [cl.node(r) for r in xrange(clstart, clend)]
2171 added = [cl.node(r) for r in xrange(clstart, clend)]
2173 publishing = self.ui.configbool('phases', 'publish', True)
2172 publishing = self.ui.configbool('phases', 'publish', True)
2174 if srctype == 'push':
2173 if srctype == 'push':
2175 # Old server can not push the boundary themself.
2174 # Old server can not push the boundary themself.
2176 # New server won't push the boundary if changeset already
2175 # New server won't push the boundary if changeset already
2177 # existed locally as secrete
2176 # existed locally as secrete
2178 #
2177 #
2179 # We should not use added here but the list of all change in
2178 # We should not use added here but the list of all change in
2180 # the bundle
2179 # the bundle
2181 if publishing:
2180 if publishing:
2182 phases.advanceboundary(self, phases.public, srccontent)
2181 phases.advanceboundary(self, phases.public, srccontent)
2183 else:
2182 else:
2184 phases.advanceboundary(self, phases.draft, srccontent)
2183 phases.advanceboundary(self, phases.draft, srccontent)
2185 phases.retractboundary(self, phases.draft, added)
2184 phases.retractboundary(self, phases.draft, added)
2186 elif srctype != 'strip':
2185 elif srctype != 'strip':
2187 # publishing only alter behavior during push
2186 # publishing only alter behavior during push
2188 #
2187 #
2189 # strip should not touch boundary at all
2188 # strip should not touch boundary at all
2190 phases.retractboundary(self, phases.draft, added)
2189 phases.retractboundary(self, phases.draft, added)
2191
2190
2192 # make changelog see real files again
2191 # make changelog see real files again
2193 cl.finalize(trp)
2192 cl.finalize(trp)
2194
2193
2195 tr.close()
2194 tr.close()
2196
2195
2197 if changesets > 0:
2196 if changesets > 0:
2198 if srctype != 'strip':
2197 if srctype != 'strip':
2199 # During strip, branchcache is invalid but coming call to
2198 # During strip, branchcache is invalid but coming call to
2200 # `destroyed` will repair it.
2199 # `destroyed` will repair it.
2201 # In other case we can safely update cache on disk.
2200 # In other case we can safely update cache on disk.
2202 branchmap.updatecache(self.filtered('served'))
2201 branchmap.updatecache(self.filtered('served'))
2203 def runhooks():
2202 def runhooks():
2204 # These hooks run when the lock releases, not when the
2203 # These hooks run when the lock releases, not when the
2205 # transaction closes. So it's possible for the changelog
2204 # transaction closes. So it's possible for the changelog
2206 # to have changed since we last saw it.
2205 # to have changed since we last saw it.
2207 if clstart >= len(self):
2206 if clstart >= len(self):
2208 return
2207 return
2209
2208
2210 # forcefully update the on-disk branch cache
2209 # forcefully update the on-disk branch cache
2211 self.ui.debug("updating the branch cache\n")
2210 self.ui.debug("updating the branch cache\n")
2212 self.hook("changegroup", node=hex(cl.node(clstart)),
2211 self.hook("changegroup", node=hex(cl.node(clstart)),
2213 source=srctype, url=url)
2212 source=srctype, url=url)
2214
2213
2215 for n in added:
2214 for n in added:
2216 self.hook("incoming", node=hex(n), source=srctype,
2215 self.hook("incoming", node=hex(n), source=srctype,
2217 url=url)
2216 url=url)
2218
2217
2219 newheads = [h for h in self.heads() if h not in oldheads]
2218 newheads = [h for h in self.heads() if h not in oldheads]
2220 self.ui.log("incoming",
2219 self.ui.log("incoming",
2221 "%s incoming changes - new heads: %s\n",
2220 "%s incoming changes - new heads: %s\n",
2222 len(added),
2221 len(added),
2223 ', '.join([hex(c[:6]) for c in newheads]))
2222 ', '.join([hex(c[:6]) for c in newheads]))
2224 self._afterlock(runhooks)
2223 self._afterlock(runhooks)
2225
2224
2226 finally:
2225 finally:
2227 tr.release()
2226 tr.release()
2228 # never return 0 here:
2227 # never return 0 here:
2229 if dh < 0:
2228 if dh < 0:
2230 return dh - 1
2229 return dh - 1
2231 else:
2230 else:
2232 return dh + 1
2231 return dh + 1
2233
2232
2234 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2233 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2235 revisions = 0
2234 revisions = 0
2236 files = 0
2235 files = 0
2237 while True:
2236 while True:
2238 chunkdata = source.filelogheader()
2237 chunkdata = source.filelogheader()
2239 if not chunkdata:
2238 if not chunkdata:
2240 break
2239 break
2241 f = chunkdata["filename"]
2240 f = chunkdata["filename"]
2242 self.ui.debug("adding %s revisions\n" % f)
2241 self.ui.debug("adding %s revisions\n" % f)
2243 pr()
2242 pr()
2244 fl = self.file(f)
2243 fl = self.file(f)
2245 o = len(fl)
2244 o = len(fl)
2246 if not fl.addgroup(source, revmap, trp):
2245 if not fl.addgroup(source, revmap, trp):
2247 raise util.Abort(_("received file revlog group is empty"))
2246 raise util.Abort(_("received file revlog group is empty"))
2248 revisions += len(fl) - o
2247 revisions += len(fl) - o
2249 files += 1
2248 files += 1
2250 if f in needfiles:
2249 if f in needfiles:
2251 needs = needfiles[f]
2250 needs = needfiles[f]
2252 for new in xrange(o, len(fl)):
2251 for new in xrange(o, len(fl)):
2253 n = fl.node(new)
2252 n = fl.node(new)
2254 if n in needs:
2253 if n in needs:
2255 needs.remove(n)
2254 needs.remove(n)
2256 else:
2255 else:
2257 raise util.Abort(
2256 raise util.Abort(
2258 _("received spurious file revlog entry"))
2257 _("received spurious file revlog entry"))
2259 if not needs:
2258 if not needs:
2260 del needfiles[f]
2259 del needfiles[f]
2261 self.ui.progress(_('files'), None)
2260 self.ui.progress(_('files'), None)
2262
2261
2263 for f, needs in needfiles.iteritems():
2262 for f, needs in needfiles.iteritems():
2264 fl = self.file(f)
2263 fl = self.file(f)
2265 for n in needs:
2264 for n in needs:
2266 try:
2265 try:
2267 fl.rev(n)
2266 fl.rev(n)
2268 except error.LookupError:
2267 except error.LookupError:
2269 raise util.Abort(
2268 raise util.Abort(
2270 _('missing file data for %s:%s - run hg verify') %
2269 _('missing file data for %s:%s - run hg verify') %
2271 (f, hex(n)))
2270 (f, hex(n)))
2272
2271
2273 return revisions, files
2272 return revisions, files
2274
2273
2275 def stream_in(self, remote, requirements):
2274 def stream_in(self, remote, requirements):
2276 lock = self.lock()
2275 lock = self.lock()
2277 try:
2276 try:
2278 # Save remote branchmap. We will use it later
2277 # Save remote branchmap. We will use it later
2279 # to speed up branchcache creation
2278 # to speed up branchcache creation
2280 rbranchmap = None
2279 rbranchmap = None
2281 if remote.capable("branchmap"):
2280 if remote.capable("branchmap"):
2282 rbranchmap = remote.branchmap()
2281 rbranchmap = remote.branchmap()
2283
2282
2284 fp = remote.stream_out()
2283 fp = remote.stream_out()
2285 l = fp.readline()
2284 l = fp.readline()
2286 try:
2285 try:
2287 resp = int(l)
2286 resp = int(l)
2288 except ValueError:
2287 except ValueError:
2289 raise error.ResponseError(
2288 raise error.ResponseError(
2290 _('unexpected response from remote server:'), l)
2289 _('unexpected response from remote server:'), l)
2291 if resp == 1:
2290 if resp == 1:
2292 raise util.Abort(_('operation forbidden by server'))
2291 raise util.Abort(_('operation forbidden by server'))
2293 elif resp == 2:
2292 elif resp == 2:
2294 raise util.Abort(_('locking the remote repository failed'))
2293 raise util.Abort(_('locking the remote repository failed'))
2295 elif resp != 0:
2294 elif resp != 0:
2296 raise util.Abort(_('the server sent an unknown error code'))
2295 raise util.Abort(_('the server sent an unknown error code'))
2297 self.ui.status(_('streaming all changes\n'))
2296 self.ui.status(_('streaming all changes\n'))
2298 l = fp.readline()
2297 l = fp.readline()
2299 try:
2298 try:
2300 total_files, total_bytes = map(int, l.split(' ', 1))
2299 total_files, total_bytes = map(int, l.split(' ', 1))
2301 except (ValueError, TypeError):
2300 except (ValueError, TypeError):
2302 raise error.ResponseError(
2301 raise error.ResponseError(
2303 _('unexpected response from remote server:'), l)
2302 _('unexpected response from remote server:'), l)
2304 self.ui.status(_('%d files to transfer, %s of data\n') %
2303 self.ui.status(_('%d files to transfer, %s of data\n') %
2305 (total_files, util.bytecount(total_bytes)))
2304 (total_files, util.bytecount(total_bytes)))
2306 handled_bytes = 0
2305 handled_bytes = 0
2307 self.ui.progress(_('clone'), 0, total=total_bytes)
2306 self.ui.progress(_('clone'), 0, total=total_bytes)
2308 start = time.time()
2307 start = time.time()
2309 for i in xrange(total_files):
2308 for i in xrange(total_files):
2310 # XXX doesn't support '\n' or '\r' in filenames
2309 # XXX doesn't support '\n' or '\r' in filenames
2311 l = fp.readline()
2310 l = fp.readline()
2312 try:
2311 try:
2313 name, size = l.split('\0', 1)
2312 name, size = l.split('\0', 1)
2314 size = int(size)
2313 size = int(size)
2315 except (ValueError, TypeError):
2314 except (ValueError, TypeError):
2316 raise error.ResponseError(
2315 raise error.ResponseError(
2317 _('unexpected response from remote server:'), l)
2316 _('unexpected response from remote server:'), l)
2318 if self.ui.debugflag:
2317 if self.ui.debugflag:
2319 self.ui.debug('adding %s (%s)\n' %
2318 self.ui.debug('adding %s (%s)\n' %
2320 (name, util.bytecount(size)))
2319 (name, util.bytecount(size)))
2321 # for backwards compat, name was partially encoded
2320 # for backwards compat, name was partially encoded
2322 ofp = self.sopener(store.decodedir(name), 'w')
2321 ofp = self.sopener(store.decodedir(name), 'w')
2323 for chunk in util.filechunkiter(fp, limit=size):
2322 for chunk in util.filechunkiter(fp, limit=size):
2324 handled_bytes += len(chunk)
2323 handled_bytes += len(chunk)
2325 self.ui.progress(_('clone'), handled_bytes,
2324 self.ui.progress(_('clone'), handled_bytes,
2326 total=total_bytes)
2325 total=total_bytes)
2327 ofp.write(chunk)
2326 ofp.write(chunk)
2328 ofp.close()
2327 ofp.close()
2329 elapsed = time.time() - start
2328 elapsed = time.time() - start
2330 if elapsed <= 0:
2329 if elapsed <= 0:
2331 elapsed = 0.001
2330 elapsed = 0.001
2332 self.ui.progress(_('clone'), None)
2331 self.ui.progress(_('clone'), None)
2333 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2332 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2334 (util.bytecount(total_bytes), elapsed,
2333 (util.bytecount(total_bytes), elapsed,
2335 util.bytecount(total_bytes / elapsed)))
2334 util.bytecount(total_bytes / elapsed)))
2336
2335
2337 # new requirements = old non-format requirements +
2336 # new requirements = old non-format requirements +
2338 # new format-related
2337 # new format-related
2339 # requirements from the streamed-in repository
2338 # requirements from the streamed-in repository
2340 requirements.update(set(self.requirements) - self.supportedformats)
2339 requirements.update(set(self.requirements) - self.supportedformats)
2341 self._applyrequirements(requirements)
2340 self._applyrequirements(requirements)
2342 self._writerequirements()
2341 self._writerequirements()
2343
2342
2344 if rbranchmap:
2343 if rbranchmap:
2345 rbheads = []
2344 rbheads = []
2346 for bheads in rbranchmap.itervalues():
2345 for bheads in rbranchmap.itervalues():
2347 rbheads.extend(bheads)
2346 rbheads.extend(bheads)
2348
2347
2349 if rbheads:
2348 if rbheads:
2350 rtiprev = max((int(self.changelog.rev(node))
2349 rtiprev = max((int(self.changelog.rev(node))
2351 for node in rbheads))
2350 for node in rbheads))
2352 cache = branchmap.branchcache(rbranchmap,
2351 cache = branchmap.branchcache(rbranchmap,
2353 self[rtiprev].node(),
2352 self[rtiprev].node(),
2354 rtiprev)
2353 rtiprev)
2355 # Try to stick it as low as possible
2354 # Try to stick it as low as possible
2356 # filter above served are unlikely to be fetch from a clone
2355 # filter above served are unlikely to be fetch from a clone
2357 for candidate in ('base', 'immutable', 'served'):
2356 for candidate in ('base', 'immutable', 'served'):
2358 rview = self.filtered(candidate)
2357 rview = self.filtered(candidate)
2359 if cache.validfor(rview):
2358 if cache.validfor(rview):
2360 self._branchcaches[candidate] = cache
2359 self._branchcaches[candidate] = cache
2361 cache.write(rview)
2360 cache.write(rview)
2362 break
2361 break
2363 self.invalidate()
2362 self.invalidate()
2364 return len(self.heads()) + 1
2363 return len(self.heads()) + 1
2365 finally:
2364 finally:
2366 lock.release()
2365 lock.release()
2367
2366
2368 def clone(self, remote, heads=[], stream=False):
2367 def clone(self, remote, heads=[], stream=False):
2369 '''clone remote repository.
2368 '''clone remote repository.
2370
2369
2371 keyword arguments:
2370 keyword arguments:
2372 heads: list of revs to clone (forces use of pull)
2371 heads: list of revs to clone (forces use of pull)
2373 stream: use streaming clone if possible'''
2372 stream: use streaming clone if possible'''
2374
2373
2375 # now, all clients that can request uncompressed clones can
2374 # now, all clients that can request uncompressed clones can
2376 # read repo formats supported by all servers that can serve
2375 # read repo formats supported by all servers that can serve
2377 # them.
2376 # them.
2378
2377
2379 # if revlog format changes, client will have to check version
2378 # if revlog format changes, client will have to check version
2380 # and format flags on "stream" capability, and use
2379 # and format flags on "stream" capability, and use
2381 # uncompressed only if compatible.
2380 # uncompressed only if compatible.
2382
2381
2383 if not stream:
2382 if not stream:
2384 # if the server explicitly prefers to stream (for fast LANs)
2383 # if the server explicitly prefers to stream (for fast LANs)
2385 stream = remote.capable('stream-preferred')
2384 stream = remote.capable('stream-preferred')
2386
2385
2387 if stream and not heads:
2386 if stream and not heads:
2388 # 'stream' means remote revlog format is revlogv1 only
2387 # 'stream' means remote revlog format is revlogv1 only
2389 if remote.capable('stream'):
2388 if remote.capable('stream'):
2390 return self.stream_in(remote, set(('revlogv1',)))
2389 return self.stream_in(remote, set(('revlogv1',)))
2391 # otherwise, 'streamreqs' contains the remote revlog format
2390 # otherwise, 'streamreqs' contains the remote revlog format
2392 streamreqs = remote.capable('streamreqs')
2391 streamreqs = remote.capable('streamreqs')
2393 if streamreqs:
2392 if streamreqs:
2394 streamreqs = set(streamreqs.split(','))
2393 streamreqs = set(streamreqs.split(','))
2395 # if we support it, stream in and adjust our requirements
2394 # if we support it, stream in and adjust our requirements
2396 if not streamreqs - self.supportedformats:
2395 if not streamreqs - self.supportedformats:
2397 return self.stream_in(remote, streamreqs)
2396 return self.stream_in(remote, streamreqs)
2398 return self.pull(remote, heads)
2397 return self.pull(remote, heads)
2399
2398
2400 def pushkey(self, namespace, key, old, new):
2399 def pushkey(self, namespace, key, old, new):
2401 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2400 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2402 old=old, new=new)
2401 old=old, new=new)
2403 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2402 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2404 ret = pushkey.push(self, namespace, key, old, new)
2403 ret = pushkey.push(self, namespace, key, old, new)
2405 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2404 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2406 ret=ret)
2405 ret=ret)
2407 return ret
2406 return ret
2408
2407
2409 def listkeys(self, namespace):
2408 def listkeys(self, namespace):
2410 self.hook('prelistkeys', throw=True, namespace=namespace)
2409 self.hook('prelistkeys', throw=True, namespace=namespace)
2411 self.ui.debug('listing keys for "%s"\n' % namespace)
2410 self.ui.debug('listing keys for "%s"\n' % namespace)
2412 values = pushkey.list(self, namespace)
2411 values = pushkey.list(self, namespace)
2413 self.hook('listkeys', namespace=namespace, values=values)
2412 self.hook('listkeys', namespace=namespace, values=values)
2414 return values
2413 return values
2415
2414
2416 def debugwireargs(self, one, two, three=None, four=None, five=None):
2415 def debugwireargs(self, one, two, three=None, four=None, five=None):
2417 '''used to test argument passing over the wire'''
2416 '''used to test argument passing over the wire'''
2418 return "%s %s %s %s %s" % (one, two, three, four, five)
2417 return "%s %s %s %s %s" % (one, two, three, four, five)
2419
2418
2420 def savecommitmessage(self, text):
2419 def savecommitmessage(self, text):
2421 fp = self.opener('last-message.txt', 'wb')
2420 fp = self.opener('last-message.txt', 'wb')
2422 try:
2421 try:
2423 fp.write(text)
2422 fp.write(text)
2424 finally:
2423 finally:
2425 fp.close()
2424 fp.close()
2426 return self.pathto(fp.name[len(self.root) + 1:])
2425 return self.pathto(fp.name[len(self.root) + 1:])
2427
2426
2428 # used to avoid circular references so destructors work
2427 # used to avoid circular references so destructors work
2429 def aftertrans(files):
2428 def aftertrans(files):
2430 renamefiles = [tuple(t) for t in files]
2429 renamefiles = [tuple(t) for t in files]
2431 def a():
2430 def a():
2432 for vfs, src, dest in renamefiles:
2431 for vfs, src, dest in renamefiles:
2433 try:
2432 try:
2434 vfs.rename(src, dest)
2433 vfs.rename(src, dest)
2435 except OSError: # journal file does not yet exist
2434 except OSError: # journal file does not yet exist
2436 pass
2435 pass
2437 return a
2436 return a
2438
2437
2439 def undoname(fn):
2438 def undoname(fn):
2440 base, name = os.path.split(fn)
2439 base, name = os.path.split(fn)
2441 assert name.startswith('journal')
2440 assert name.startswith('journal')
2442 return os.path.join(base, name.replace('journal', 'undo', 1))
2441 return os.path.join(base, name.replace('journal', 'undo', 1))
2443
2442
2444 def instance(ui, path, create):
2443 def instance(ui, path, create):
2445 return localrepository(ui, util.urllocalpath(path), create)
2444 return localrepository(ui, util.urllocalpath(path), create)
2446
2445
2447 def islocal(path):
2446 def islocal(path):
2448 return True
2447 return True
General Comments 0
You need to be logged in to leave comments. Login now