##// END OF EJS Templates
localrepo: remove unused repo.branchtags()/_branchtip() methods
Brodie Rao -
r20195:4274eda1 default
parent child Browse files
Show More
@@ -1,2467 +1,2449 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock as lockmod
11 import lock as lockmod
12 import transaction, store, encoding
12 import transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 import branchmap, pathutil
19 import branchmap, pathutil
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21 filecache = scmutil.filecache
21 filecache = scmutil.filecache
22
22
23 class repofilecache(filecache):
23 class repofilecache(filecache):
24 """All filecache usage on repo are done for logic that should be unfiltered
24 """All filecache usage on repo are done for logic that should be unfiltered
25 """
25 """
26
26
27 def __get__(self, repo, type=None):
27 def __get__(self, repo, type=None):
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 def __set__(self, repo, value):
29 def __set__(self, repo, value):
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 def __delete__(self, repo):
31 def __delete__(self, repo):
32 return super(repofilecache, self).__delete__(repo.unfiltered())
32 return super(repofilecache, self).__delete__(repo.unfiltered())
33
33
34 class storecache(repofilecache):
34 class storecache(repofilecache):
35 """filecache for files in the store"""
35 """filecache for files in the store"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj.sjoin(fname)
37 return obj.sjoin(fname)
38
38
39 class unfilteredpropertycache(propertycache):
39 class unfilteredpropertycache(propertycache):
40 """propertycache that apply to unfiltered repo only"""
40 """propertycache that apply to unfiltered repo only"""
41
41
42 def __get__(self, repo, type=None):
42 def __get__(self, repo, type=None):
43 unfi = repo.unfiltered()
43 unfi = repo.unfiltered()
44 if unfi is repo:
44 if unfi is repo:
45 return super(unfilteredpropertycache, self).__get__(unfi)
45 return super(unfilteredpropertycache, self).__get__(unfi)
46 return getattr(unfi, self.name)
46 return getattr(unfi, self.name)
47
47
48 class filteredpropertycache(propertycache):
48 class filteredpropertycache(propertycache):
49 """propertycache that must take filtering in account"""
49 """propertycache that must take filtering in account"""
50
50
51 def cachevalue(self, obj, value):
51 def cachevalue(self, obj, value):
52 object.__setattr__(obj, self.name, value)
52 object.__setattr__(obj, self.name, value)
53
53
54
54
55 def hasunfilteredcache(repo, name):
55 def hasunfilteredcache(repo, name):
56 """check if a repo has an unfilteredpropertycache value for <name>"""
56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 return name in vars(repo.unfiltered())
57 return name in vars(repo.unfiltered())
58
58
59 def unfilteredmethod(orig):
59 def unfilteredmethod(orig):
60 """decorate method that always need to be run on unfiltered version"""
60 """decorate method that always need to be run on unfiltered version"""
61 def wrapper(repo, *args, **kwargs):
61 def wrapper(repo, *args, **kwargs):
62 return orig(repo.unfiltered(), *args, **kwargs)
62 return orig(repo.unfiltered(), *args, **kwargs)
63 return wrapper
63 return wrapper
64
64
65 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
65 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
66 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
66 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
67
67
68 class localpeer(peer.peerrepository):
68 class localpeer(peer.peerrepository):
69 '''peer for a local repo; reflects only the most recent API'''
69 '''peer for a local repo; reflects only the most recent API'''
70
70
71 def __init__(self, repo, caps=MODERNCAPS):
71 def __init__(self, repo, caps=MODERNCAPS):
72 peer.peerrepository.__init__(self)
72 peer.peerrepository.__init__(self)
73 self._repo = repo.filtered('served')
73 self._repo = repo.filtered('served')
74 self.ui = repo.ui
74 self.ui = repo.ui
75 self._caps = repo._restrictcapabilities(caps)
75 self._caps = repo._restrictcapabilities(caps)
76 self.requirements = repo.requirements
76 self.requirements = repo.requirements
77 self.supportedformats = repo.supportedformats
77 self.supportedformats = repo.supportedformats
78
78
79 def close(self):
79 def close(self):
80 self._repo.close()
80 self._repo.close()
81
81
82 def _capabilities(self):
82 def _capabilities(self):
83 return self._caps
83 return self._caps
84
84
85 def local(self):
85 def local(self):
86 return self._repo
86 return self._repo
87
87
88 def canpush(self):
88 def canpush(self):
89 return True
89 return True
90
90
91 def url(self):
91 def url(self):
92 return self._repo.url()
92 return self._repo.url()
93
93
94 def lookup(self, key):
94 def lookup(self, key):
95 return self._repo.lookup(key)
95 return self._repo.lookup(key)
96
96
97 def branchmap(self):
97 def branchmap(self):
98 return self._repo.branchmap()
98 return self._repo.branchmap()
99
99
100 def heads(self):
100 def heads(self):
101 return self._repo.heads()
101 return self._repo.heads()
102
102
103 def known(self, nodes):
103 def known(self, nodes):
104 return self._repo.known(nodes)
104 return self._repo.known(nodes)
105
105
106 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
106 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
107 return self._repo.getbundle(source, heads=heads, common=common,
107 return self._repo.getbundle(source, heads=heads, common=common,
108 bundlecaps=None)
108 bundlecaps=None)
109
109
110 # TODO We might want to move the next two calls into legacypeer and add
110 # TODO We might want to move the next two calls into legacypeer and add
111 # unbundle instead.
111 # unbundle instead.
112
112
113 def lock(self):
113 def lock(self):
114 return self._repo.lock()
114 return self._repo.lock()
115
115
116 def addchangegroup(self, cg, source, url):
116 def addchangegroup(self, cg, source, url):
117 return self._repo.addchangegroup(cg, source, url)
117 return self._repo.addchangegroup(cg, source, url)
118
118
119 def pushkey(self, namespace, key, old, new):
119 def pushkey(self, namespace, key, old, new):
120 return self._repo.pushkey(namespace, key, old, new)
120 return self._repo.pushkey(namespace, key, old, new)
121
121
122 def listkeys(self, namespace):
122 def listkeys(self, namespace):
123 return self._repo.listkeys(namespace)
123 return self._repo.listkeys(namespace)
124
124
125 def debugwireargs(self, one, two, three=None, four=None, five=None):
125 def debugwireargs(self, one, two, three=None, four=None, five=None):
126 '''used to test argument passing over the wire'''
126 '''used to test argument passing over the wire'''
127 return "%s %s %s %s %s" % (one, two, three, four, five)
127 return "%s %s %s %s %s" % (one, two, three, four, five)
128
128
129 class locallegacypeer(localpeer):
129 class locallegacypeer(localpeer):
130 '''peer extension which implements legacy methods too; used for tests with
130 '''peer extension which implements legacy methods too; used for tests with
131 restricted capabilities'''
131 restricted capabilities'''
132
132
133 def __init__(self, repo):
133 def __init__(self, repo):
134 localpeer.__init__(self, repo, caps=LEGACYCAPS)
134 localpeer.__init__(self, repo, caps=LEGACYCAPS)
135
135
136 def branches(self, nodes):
136 def branches(self, nodes):
137 return self._repo.branches(nodes)
137 return self._repo.branches(nodes)
138
138
139 def between(self, pairs):
139 def between(self, pairs):
140 return self._repo.between(pairs)
140 return self._repo.between(pairs)
141
141
142 def changegroup(self, basenodes, source):
142 def changegroup(self, basenodes, source):
143 return self._repo.changegroup(basenodes, source)
143 return self._repo.changegroup(basenodes, source)
144
144
145 def changegroupsubset(self, bases, heads, source):
145 def changegroupsubset(self, bases, heads, source):
146 return self._repo.changegroupsubset(bases, heads, source)
146 return self._repo.changegroupsubset(bases, heads, source)
147
147
148 class localrepository(object):
148 class localrepository(object):
149
149
150 supportedformats = set(('revlogv1', 'generaldelta'))
150 supportedformats = set(('revlogv1', 'generaldelta'))
151 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
151 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
152 'dotencode'))
152 'dotencode'))
153 openerreqs = set(('revlogv1', 'generaldelta'))
153 openerreqs = set(('revlogv1', 'generaldelta'))
154 requirements = ['revlogv1']
154 requirements = ['revlogv1']
155 filtername = None
155 filtername = None
156
156
157 # a list of (ui, featureset) functions.
157 # a list of (ui, featureset) functions.
158 # only functions defined in module of enabled extensions are invoked
158 # only functions defined in module of enabled extensions are invoked
159 featuresetupfuncs = set()
159 featuresetupfuncs = set()
160
160
161 def _baserequirements(self, create):
161 def _baserequirements(self, create):
162 return self.requirements[:]
162 return self.requirements[:]
163
163
164 def __init__(self, baseui, path=None, create=False):
164 def __init__(self, baseui, path=None, create=False):
165 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
165 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
166 self.wopener = self.wvfs
166 self.wopener = self.wvfs
167 self.root = self.wvfs.base
167 self.root = self.wvfs.base
168 self.path = self.wvfs.join(".hg")
168 self.path = self.wvfs.join(".hg")
169 self.origroot = path
169 self.origroot = path
170 self.auditor = pathutil.pathauditor(self.root, self._checknested)
170 self.auditor = pathutil.pathauditor(self.root, self._checknested)
171 self.vfs = scmutil.vfs(self.path)
171 self.vfs = scmutil.vfs(self.path)
172 self.opener = self.vfs
172 self.opener = self.vfs
173 self.baseui = baseui
173 self.baseui = baseui
174 self.ui = baseui.copy()
174 self.ui = baseui.copy()
175 self.ui.copy = baseui.copy # prevent copying repo configuration
175 self.ui.copy = baseui.copy # prevent copying repo configuration
176 # A list of callback to shape the phase if no data were found.
176 # A list of callback to shape the phase if no data were found.
177 # Callback are in the form: func(repo, roots) --> processed root.
177 # Callback are in the form: func(repo, roots) --> processed root.
178 # This list it to be filled by extension during repo setup
178 # This list it to be filled by extension during repo setup
179 self._phasedefaults = []
179 self._phasedefaults = []
180 try:
180 try:
181 self.ui.readconfig(self.join("hgrc"), self.root)
181 self.ui.readconfig(self.join("hgrc"), self.root)
182 extensions.loadall(self.ui)
182 extensions.loadall(self.ui)
183 except IOError:
183 except IOError:
184 pass
184 pass
185
185
186 if self.featuresetupfuncs:
186 if self.featuresetupfuncs:
187 self.supported = set(self._basesupported) # use private copy
187 self.supported = set(self._basesupported) # use private copy
188 extmods = set(m.__name__ for n, m
188 extmods = set(m.__name__ for n, m
189 in extensions.extensions(self.ui))
189 in extensions.extensions(self.ui))
190 for setupfunc in self.featuresetupfuncs:
190 for setupfunc in self.featuresetupfuncs:
191 if setupfunc.__module__ in extmods:
191 if setupfunc.__module__ in extmods:
192 setupfunc(self.ui, self.supported)
192 setupfunc(self.ui, self.supported)
193 else:
193 else:
194 self.supported = self._basesupported
194 self.supported = self._basesupported
195
195
196 if not self.vfs.isdir():
196 if not self.vfs.isdir():
197 if create:
197 if create:
198 if not self.wvfs.exists():
198 if not self.wvfs.exists():
199 self.wvfs.makedirs()
199 self.wvfs.makedirs()
200 self.vfs.makedir(notindexed=True)
200 self.vfs.makedir(notindexed=True)
201 requirements = self._baserequirements(create)
201 requirements = self._baserequirements(create)
202 if self.ui.configbool('format', 'usestore', True):
202 if self.ui.configbool('format', 'usestore', True):
203 self.vfs.mkdir("store")
203 self.vfs.mkdir("store")
204 requirements.append("store")
204 requirements.append("store")
205 if self.ui.configbool('format', 'usefncache', True):
205 if self.ui.configbool('format', 'usefncache', True):
206 requirements.append("fncache")
206 requirements.append("fncache")
207 if self.ui.configbool('format', 'dotencode', True):
207 if self.ui.configbool('format', 'dotencode', True):
208 requirements.append('dotencode')
208 requirements.append('dotencode')
209 # create an invalid changelog
209 # create an invalid changelog
210 self.vfs.append(
210 self.vfs.append(
211 "00changelog.i",
211 "00changelog.i",
212 '\0\0\0\2' # represents revlogv2
212 '\0\0\0\2' # represents revlogv2
213 ' dummy changelog to prevent using the old repo layout'
213 ' dummy changelog to prevent using the old repo layout'
214 )
214 )
215 if self.ui.configbool('format', 'generaldelta', False):
215 if self.ui.configbool('format', 'generaldelta', False):
216 requirements.append("generaldelta")
216 requirements.append("generaldelta")
217 requirements = set(requirements)
217 requirements = set(requirements)
218 else:
218 else:
219 raise error.RepoError(_("repository %s not found") % path)
219 raise error.RepoError(_("repository %s not found") % path)
220 elif create:
220 elif create:
221 raise error.RepoError(_("repository %s already exists") % path)
221 raise error.RepoError(_("repository %s already exists") % path)
222 else:
222 else:
223 try:
223 try:
224 requirements = scmutil.readrequires(self.vfs, self.supported)
224 requirements = scmutil.readrequires(self.vfs, self.supported)
225 except IOError, inst:
225 except IOError, inst:
226 if inst.errno != errno.ENOENT:
226 if inst.errno != errno.ENOENT:
227 raise
227 raise
228 requirements = set()
228 requirements = set()
229
229
230 self.sharedpath = self.path
230 self.sharedpath = self.path
231 try:
231 try:
232 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
232 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
233 realpath=True)
233 realpath=True)
234 s = vfs.base
234 s = vfs.base
235 if not vfs.exists():
235 if not vfs.exists():
236 raise error.RepoError(
236 raise error.RepoError(
237 _('.hg/sharedpath points to nonexistent directory %s') % s)
237 _('.hg/sharedpath points to nonexistent directory %s') % s)
238 self.sharedpath = s
238 self.sharedpath = s
239 except IOError, inst:
239 except IOError, inst:
240 if inst.errno != errno.ENOENT:
240 if inst.errno != errno.ENOENT:
241 raise
241 raise
242
242
243 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
243 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
244 self.spath = self.store.path
244 self.spath = self.store.path
245 self.svfs = self.store.vfs
245 self.svfs = self.store.vfs
246 self.sopener = self.svfs
246 self.sopener = self.svfs
247 self.sjoin = self.store.join
247 self.sjoin = self.store.join
248 self.vfs.createmode = self.store.createmode
248 self.vfs.createmode = self.store.createmode
249 self._applyrequirements(requirements)
249 self._applyrequirements(requirements)
250 if create:
250 if create:
251 self._writerequirements()
251 self._writerequirements()
252
252
253
253
254 self._branchcaches = {}
254 self._branchcaches = {}
255 self.filterpats = {}
255 self.filterpats = {}
256 self._datafilters = {}
256 self._datafilters = {}
257 self._transref = self._lockref = self._wlockref = None
257 self._transref = self._lockref = self._wlockref = None
258
258
259 # A cache for various files under .hg/ that tracks file changes,
259 # A cache for various files under .hg/ that tracks file changes,
260 # (used by the filecache decorator)
260 # (used by the filecache decorator)
261 #
261 #
262 # Maps a property name to its util.filecacheentry
262 # Maps a property name to its util.filecacheentry
263 self._filecache = {}
263 self._filecache = {}
264
264
265 # hold sets of revision to be filtered
265 # hold sets of revision to be filtered
266 # should be cleared when something might have changed the filter value:
266 # should be cleared when something might have changed the filter value:
267 # - new changesets,
267 # - new changesets,
268 # - phase change,
268 # - phase change,
269 # - new obsolescence marker,
269 # - new obsolescence marker,
270 # - working directory parent change,
270 # - working directory parent change,
271 # - bookmark changes
271 # - bookmark changes
272 self.filteredrevcache = {}
272 self.filteredrevcache = {}
273
273
274 def close(self):
274 def close(self):
275 pass
275 pass
276
276
277 def _restrictcapabilities(self, caps):
277 def _restrictcapabilities(self, caps):
278 return caps
278 return caps
279
279
280 def _applyrequirements(self, requirements):
280 def _applyrequirements(self, requirements):
281 self.requirements = requirements
281 self.requirements = requirements
282 self.sopener.options = dict((r, 1) for r in requirements
282 self.sopener.options = dict((r, 1) for r in requirements
283 if r in self.openerreqs)
283 if r in self.openerreqs)
284 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
284 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
285 if chunkcachesize is not None:
285 if chunkcachesize is not None:
286 self.sopener.options['chunkcachesize'] = chunkcachesize
286 self.sopener.options['chunkcachesize'] = chunkcachesize
287
287
288 def _writerequirements(self):
288 def _writerequirements(self):
289 reqfile = self.opener("requires", "w")
289 reqfile = self.opener("requires", "w")
290 for r in sorted(self.requirements):
290 for r in sorted(self.requirements):
291 reqfile.write("%s\n" % r)
291 reqfile.write("%s\n" % r)
292 reqfile.close()
292 reqfile.close()
293
293
294 def _checknested(self, path):
294 def _checknested(self, path):
295 """Determine if path is a legal nested repository."""
295 """Determine if path is a legal nested repository."""
296 if not path.startswith(self.root):
296 if not path.startswith(self.root):
297 return False
297 return False
298 subpath = path[len(self.root) + 1:]
298 subpath = path[len(self.root) + 1:]
299 normsubpath = util.pconvert(subpath)
299 normsubpath = util.pconvert(subpath)
300
300
301 # XXX: Checking against the current working copy is wrong in
301 # XXX: Checking against the current working copy is wrong in
302 # the sense that it can reject things like
302 # the sense that it can reject things like
303 #
303 #
304 # $ hg cat -r 10 sub/x.txt
304 # $ hg cat -r 10 sub/x.txt
305 #
305 #
306 # if sub/ is no longer a subrepository in the working copy
306 # if sub/ is no longer a subrepository in the working copy
307 # parent revision.
307 # parent revision.
308 #
308 #
309 # However, it can of course also allow things that would have
309 # However, it can of course also allow things that would have
310 # been rejected before, such as the above cat command if sub/
310 # been rejected before, such as the above cat command if sub/
311 # is a subrepository now, but was a normal directory before.
311 # is a subrepository now, but was a normal directory before.
312 # The old path auditor would have rejected by mistake since it
312 # The old path auditor would have rejected by mistake since it
313 # panics when it sees sub/.hg/.
313 # panics when it sees sub/.hg/.
314 #
314 #
315 # All in all, checking against the working copy seems sensible
315 # All in all, checking against the working copy seems sensible
316 # since we want to prevent access to nested repositories on
316 # since we want to prevent access to nested repositories on
317 # the filesystem *now*.
317 # the filesystem *now*.
318 ctx = self[None]
318 ctx = self[None]
319 parts = util.splitpath(subpath)
319 parts = util.splitpath(subpath)
320 while parts:
320 while parts:
321 prefix = '/'.join(parts)
321 prefix = '/'.join(parts)
322 if prefix in ctx.substate:
322 if prefix in ctx.substate:
323 if prefix == normsubpath:
323 if prefix == normsubpath:
324 return True
324 return True
325 else:
325 else:
326 sub = ctx.sub(prefix)
326 sub = ctx.sub(prefix)
327 return sub.checknested(subpath[len(prefix) + 1:])
327 return sub.checknested(subpath[len(prefix) + 1:])
328 else:
328 else:
329 parts.pop()
329 parts.pop()
330 return False
330 return False
331
331
332 def peer(self):
332 def peer(self):
333 return localpeer(self) # not cached to avoid reference cycle
333 return localpeer(self) # not cached to avoid reference cycle
334
334
335 def unfiltered(self):
335 def unfiltered(self):
336 """Return unfiltered version of the repository
336 """Return unfiltered version of the repository
337
337
338 Intended to be overwritten by filtered repo."""
338 Intended to be overwritten by filtered repo."""
339 return self
339 return self
340
340
341 def filtered(self, name):
341 def filtered(self, name):
342 """Return a filtered version of a repository"""
342 """Return a filtered version of a repository"""
343 # build a new class with the mixin and the current class
343 # build a new class with the mixin and the current class
344 # (possibly subclass of the repo)
344 # (possibly subclass of the repo)
345 class proxycls(repoview.repoview, self.unfiltered().__class__):
345 class proxycls(repoview.repoview, self.unfiltered().__class__):
346 pass
346 pass
347 return proxycls(self, name)
347 return proxycls(self, name)
348
348
349 @repofilecache('bookmarks')
349 @repofilecache('bookmarks')
350 def _bookmarks(self):
350 def _bookmarks(self):
351 return bookmarks.bmstore(self)
351 return bookmarks.bmstore(self)
352
352
353 @repofilecache('bookmarks.current')
353 @repofilecache('bookmarks.current')
354 def _bookmarkcurrent(self):
354 def _bookmarkcurrent(self):
355 return bookmarks.readcurrent(self)
355 return bookmarks.readcurrent(self)
356
356
357 def bookmarkheads(self, bookmark):
357 def bookmarkheads(self, bookmark):
358 name = bookmark.split('@', 1)[0]
358 name = bookmark.split('@', 1)[0]
359 heads = []
359 heads = []
360 for mark, n in self._bookmarks.iteritems():
360 for mark, n in self._bookmarks.iteritems():
361 if mark.split('@', 1)[0] == name:
361 if mark.split('@', 1)[0] == name:
362 heads.append(n)
362 heads.append(n)
363 return heads
363 return heads
364
364
365 @storecache('phaseroots')
365 @storecache('phaseroots')
366 def _phasecache(self):
366 def _phasecache(self):
367 return phases.phasecache(self, self._phasedefaults)
367 return phases.phasecache(self, self._phasedefaults)
368
368
369 @storecache('obsstore')
369 @storecache('obsstore')
370 def obsstore(self):
370 def obsstore(self):
371 store = obsolete.obsstore(self.sopener)
371 store = obsolete.obsstore(self.sopener)
372 if store and not obsolete._enabled:
372 if store and not obsolete._enabled:
373 # message is rare enough to not be translated
373 # message is rare enough to not be translated
374 msg = 'obsolete feature not enabled but %i markers found!\n'
374 msg = 'obsolete feature not enabled but %i markers found!\n'
375 self.ui.warn(msg % len(list(store)))
375 self.ui.warn(msg % len(list(store)))
376 return store
376 return store
377
377
378 @storecache('00changelog.i')
378 @storecache('00changelog.i')
379 def changelog(self):
379 def changelog(self):
380 c = changelog.changelog(self.sopener)
380 c = changelog.changelog(self.sopener)
381 if 'HG_PENDING' in os.environ:
381 if 'HG_PENDING' in os.environ:
382 p = os.environ['HG_PENDING']
382 p = os.environ['HG_PENDING']
383 if p.startswith(self.root):
383 if p.startswith(self.root):
384 c.readpending('00changelog.i.a')
384 c.readpending('00changelog.i.a')
385 return c
385 return c
386
386
387 @storecache('00manifest.i')
387 @storecache('00manifest.i')
388 def manifest(self):
388 def manifest(self):
389 return manifest.manifest(self.sopener)
389 return manifest.manifest(self.sopener)
390
390
391 @repofilecache('dirstate')
391 @repofilecache('dirstate')
392 def dirstate(self):
392 def dirstate(self):
393 warned = [0]
393 warned = [0]
394 def validate(node):
394 def validate(node):
395 try:
395 try:
396 self.changelog.rev(node)
396 self.changelog.rev(node)
397 return node
397 return node
398 except error.LookupError:
398 except error.LookupError:
399 if not warned[0]:
399 if not warned[0]:
400 warned[0] = True
400 warned[0] = True
401 self.ui.warn(_("warning: ignoring unknown"
401 self.ui.warn(_("warning: ignoring unknown"
402 " working parent %s!\n") % short(node))
402 " working parent %s!\n") % short(node))
403 return nullid
403 return nullid
404
404
405 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
405 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
406
406
407 def __getitem__(self, changeid):
407 def __getitem__(self, changeid):
408 if changeid is None:
408 if changeid is None:
409 return context.workingctx(self)
409 return context.workingctx(self)
410 return context.changectx(self, changeid)
410 return context.changectx(self, changeid)
411
411
412 def __contains__(self, changeid):
412 def __contains__(self, changeid):
413 try:
413 try:
414 return bool(self.lookup(changeid))
414 return bool(self.lookup(changeid))
415 except error.RepoLookupError:
415 except error.RepoLookupError:
416 return False
416 return False
417
417
418 def __nonzero__(self):
418 def __nonzero__(self):
419 return True
419 return True
420
420
421 def __len__(self):
421 def __len__(self):
422 return len(self.changelog)
422 return len(self.changelog)
423
423
424 def __iter__(self):
424 def __iter__(self):
425 return iter(self.changelog)
425 return iter(self.changelog)
426
426
427 def revs(self, expr, *args):
427 def revs(self, expr, *args):
428 '''Return a list of revisions matching the given revset'''
428 '''Return a list of revisions matching the given revset'''
429 expr = revset.formatspec(expr, *args)
429 expr = revset.formatspec(expr, *args)
430 m = revset.match(None, expr)
430 m = revset.match(None, expr)
431 return [r for r in m(self, list(self))]
431 return [r for r in m(self, list(self))]
432
432
433 def set(self, expr, *args):
433 def set(self, expr, *args):
434 '''
434 '''
435 Yield a context for each matching revision, after doing arg
435 Yield a context for each matching revision, after doing arg
436 replacement via revset.formatspec
436 replacement via revset.formatspec
437 '''
437 '''
438 for r in self.revs(expr, *args):
438 for r in self.revs(expr, *args):
439 yield self[r]
439 yield self[r]
440
440
441 def url(self):
441 def url(self):
442 return 'file:' + self.root
442 return 'file:' + self.root
443
443
444 def hook(self, name, throw=False, **args):
444 def hook(self, name, throw=False, **args):
445 return hook.hook(self.ui, self, name, throw, **args)
445 return hook.hook(self.ui, self, name, throw, **args)
446
446
447 @unfilteredmethod
447 @unfilteredmethod
448 def _tag(self, names, node, message, local, user, date, extra={}):
448 def _tag(self, names, node, message, local, user, date, extra={}):
449 if isinstance(names, str):
449 if isinstance(names, str):
450 names = (names,)
450 names = (names,)
451
451
452 branches = self.branchmap()
452 branches = self.branchmap()
453 for name in names:
453 for name in names:
454 self.hook('pretag', throw=True, node=hex(node), tag=name,
454 self.hook('pretag', throw=True, node=hex(node), tag=name,
455 local=local)
455 local=local)
456 if name in branches:
456 if name in branches:
457 self.ui.warn(_("warning: tag %s conflicts with existing"
457 self.ui.warn(_("warning: tag %s conflicts with existing"
458 " branch name\n") % name)
458 " branch name\n") % name)
459
459
460 def writetags(fp, names, munge, prevtags):
460 def writetags(fp, names, munge, prevtags):
461 fp.seek(0, 2)
461 fp.seek(0, 2)
462 if prevtags and prevtags[-1] != '\n':
462 if prevtags and prevtags[-1] != '\n':
463 fp.write('\n')
463 fp.write('\n')
464 for name in names:
464 for name in names:
465 m = munge and munge(name) or name
465 m = munge and munge(name) or name
466 if (self._tagscache.tagtypes and
466 if (self._tagscache.tagtypes and
467 name in self._tagscache.tagtypes):
467 name in self._tagscache.tagtypes):
468 old = self.tags().get(name, nullid)
468 old = self.tags().get(name, nullid)
469 fp.write('%s %s\n' % (hex(old), m))
469 fp.write('%s %s\n' % (hex(old), m))
470 fp.write('%s %s\n' % (hex(node), m))
470 fp.write('%s %s\n' % (hex(node), m))
471 fp.close()
471 fp.close()
472
472
473 prevtags = ''
473 prevtags = ''
474 if local:
474 if local:
475 try:
475 try:
476 fp = self.opener('localtags', 'r+')
476 fp = self.opener('localtags', 'r+')
477 except IOError:
477 except IOError:
478 fp = self.opener('localtags', 'a')
478 fp = self.opener('localtags', 'a')
479 else:
479 else:
480 prevtags = fp.read()
480 prevtags = fp.read()
481
481
482 # local tags are stored in the current charset
482 # local tags are stored in the current charset
483 writetags(fp, names, None, prevtags)
483 writetags(fp, names, None, prevtags)
484 for name in names:
484 for name in names:
485 self.hook('tag', node=hex(node), tag=name, local=local)
485 self.hook('tag', node=hex(node), tag=name, local=local)
486 return
486 return
487
487
488 try:
488 try:
489 fp = self.wfile('.hgtags', 'rb+')
489 fp = self.wfile('.hgtags', 'rb+')
490 except IOError, e:
490 except IOError, e:
491 if e.errno != errno.ENOENT:
491 if e.errno != errno.ENOENT:
492 raise
492 raise
493 fp = self.wfile('.hgtags', 'ab')
493 fp = self.wfile('.hgtags', 'ab')
494 else:
494 else:
495 prevtags = fp.read()
495 prevtags = fp.read()
496
496
497 # committed tags are stored in UTF-8
497 # committed tags are stored in UTF-8
498 writetags(fp, names, encoding.fromlocal, prevtags)
498 writetags(fp, names, encoding.fromlocal, prevtags)
499
499
500 fp.close()
500 fp.close()
501
501
502 self.invalidatecaches()
502 self.invalidatecaches()
503
503
504 if '.hgtags' not in self.dirstate:
504 if '.hgtags' not in self.dirstate:
505 self[None].add(['.hgtags'])
505 self[None].add(['.hgtags'])
506
506
507 m = matchmod.exact(self.root, '', ['.hgtags'])
507 m = matchmod.exact(self.root, '', ['.hgtags'])
508 tagnode = self.commit(message, user, date, extra=extra, match=m)
508 tagnode = self.commit(message, user, date, extra=extra, match=m)
509
509
510 for name in names:
510 for name in names:
511 self.hook('tag', node=hex(node), tag=name, local=local)
511 self.hook('tag', node=hex(node), tag=name, local=local)
512
512
513 return tagnode
513 return tagnode
514
514
515 def tag(self, names, node, message, local, user, date):
515 def tag(self, names, node, message, local, user, date):
516 '''tag a revision with one or more symbolic names.
516 '''tag a revision with one or more symbolic names.
517
517
518 names is a list of strings or, when adding a single tag, names may be a
518 names is a list of strings or, when adding a single tag, names may be a
519 string.
519 string.
520
520
521 if local is True, the tags are stored in a per-repository file.
521 if local is True, the tags are stored in a per-repository file.
522 otherwise, they are stored in the .hgtags file, and a new
522 otherwise, they are stored in the .hgtags file, and a new
523 changeset is committed with the change.
523 changeset is committed with the change.
524
524
525 keyword arguments:
525 keyword arguments:
526
526
527 local: whether to store tags in non-version-controlled file
527 local: whether to store tags in non-version-controlled file
528 (default False)
528 (default False)
529
529
530 message: commit message to use if committing
530 message: commit message to use if committing
531
531
532 user: name of user to use if committing
532 user: name of user to use if committing
533
533
534 date: date tuple to use if committing'''
534 date: date tuple to use if committing'''
535
535
536 if not local:
536 if not local:
537 for x in self.status()[:5]:
537 for x in self.status()[:5]:
538 if '.hgtags' in x:
538 if '.hgtags' in x:
539 raise util.Abort(_('working copy of .hgtags is changed '
539 raise util.Abort(_('working copy of .hgtags is changed '
540 '(please commit .hgtags manually)'))
540 '(please commit .hgtags manually)'))
541
541
542 self.tags() # instantiate the cache
542 self.tags() # instantiate the cache
543 self._tag(names, node, message, local, user, date)
543 self._tag(names, node, message, local, user, date)
544
544
545 @filteredpropertycache
545 @filteredpropertycache
546 def _tagscache(self):
546 def _tagscache(self):
547 '''Returns a tagscache object that contains various tags related
547 '''Returns a tagscache object that contains various tags related
548 caches.'''
548 caches.'''
549
549
550 # This simplifies its cache management by having one decorated
550 # This simplifies its cache management by having one decorated
551 # function (this one) and the rest simply fetch things from it.
551 # function (this one) and the rest simply fetch things from it.
552 class tagscache(object):
552 class tagscache(object):
553 def __init__(self):
553 def __init__(self):
554 # These two define the set of tags for this repository. tags
554 # These two define the set of tags for this repository. tags
555 # maps tag name to node; tagtypes maps tag name to 'global' or
555 # maps tag name to node; tagtypes maps tag name to 'global' or
556 # 'local'. (Global tags are defined by .hgtags across all
556 # 'local'. (Global tags are defined by .hgtags across all
557 # heads, and local tags are defined in .hg/localtags.)
557 # heads, and local tags are defined in .hg/localtags.)
558 # They constitute the in-memory cache of tags.
558 # They constitute the in-memory cache of tags.
559 self.tags = self.tagtypes = None
559 self.tags = self.tagtypes = None
560
560
561 self.nodetagscache = self.tagslist = None
561 self.nodetagscache = self.tagslist = None
562
562
563 cache = tagscache()
563 cache = tagscache()
564 cache.tags, cache.tagtypes = self._findtags()
564 cache.tags, cache.tagtypes = self._findtags()
565
565
566 return cache
566 return cache
567
567
568 def tags(self):
568 def tags(self):
569 '''return a mapping of tag to node'''
569 '''return a mapping of tag to node'''
570 t = {}
570 t = {}
571 if self.changelog.filteredrevs:
571 if self.changelog.filteredrevs:
572 tags, tt = self._findtags()
572 tags, tt = self._findtags()
573 else:
573 else:
574 tags = self._tagscache.tags
574 tags = self._tagscache.tags
575 for k, v in tags.iteritems():
575 for k, v in tags.iteritems():
576 try:
576 try:
577 # ignore tags to unknown nodes
577 # ignore tags to unknown nodes
578 self.changelog.rev(v)
578 self.changelog.rev(v)
579 t[k] = v
579 t[k] = v
580 except (error.LookupError, ValueError):
580 except (error.LookupError, ValueError):
581 pass
581 pass
582 return t
582 return t
583
583
584 def _findtags(self):
584 def _findtags(self):
585 '''Do the hard work of finding tags. Return a pair of dicts
585 '''Do the hard work of finding tags. Return a pair of dicts
586 (tags, tagtypes) where tags maps tag name to node, and tagtypes
586 (tags, tagtypes) where tags maps tag name to node, and tagtypes
587 maps tag name to a string like \'global\' or \'local\'.
587 maps tag name to a string like \'global\' or \'local\'.
588 Subclasses or extensions are free to add their own tags, but
588 Subclasses or extensions are free to add their own tags, but
589 should be aware that the returned dicts will be retained for the
589 should be aware that the returned dicts will be retained for the
590 duration of the localrepo object.'''
590 duration of the localrepo object.'''
591
591
592 # XXX what tagtype should subclasses/extensions use? Currently
592 # XXX what tagtype should subclasses/extensions use? Currently
593 # mq and bookmarks add tags, but do not set the tagtype at all.
593 # mq and bookmarks add tags, but do not set the tagtype at all.
594 # Should each extension invent its own tag type? Should there
594 # Should each extension invent its own tag type? Should there
595 # be one tagtype for all such "virtual" tags? Or is the status
595 # be one tagtype for all such "virtual" tags? Or is the status
596 # quo fine?
596 # quo fine?
597
597
598 alltags = {} # map tag name to (node, hist)
598 alltags = {} # map tag name to (node, hist)
599 tagtypes = {}
599 tagtypes = {}
600
600
601 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
601 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
602 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
602 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
603
603
604 # Build the return dicts. Have to re-encode tag names because
604 # Build the return dicts. Have to re-encode tag names because
605 # the tags module always uses UTF-8 (in order not to lose info
605 # the tags module always uses UTF-8 (in order not to lose info
606 # writing to the cache), but the rest of Mercurial wants them in
606 # writing to the cache), but the rest of Mercurial wants them in
607 # local encoding.
607 # local encoding.
608 tags = {}
608 tags = {}
609 for (name, (node, hist)) in alltags.iteritems():
609 for (name, (node, hist)) in alltags.iteritems():
610 if node != nullid:
610 if node != nullid:
611 tags[encoding.tolocal(name)] = node
611 tags[encoding.tolocal(name)] = node
612 tags['tip'] = self.changelog.tip()
612 tags['tip'] = self.changelog.tip()
613 tagtypes = dict([(encoding.tolocal(name), value)
613 tagtypes = dict([(encoding.tolocal(name), value)
614 for (name, value) in tagtypes.iteritems()])
614 for (name, value) in tagtypes.iteritems()])
615 return (tags, tagtypes)
615 return (tags, tagtypes)
616
616
617 def tagtype(self, tagname):
617 def tagtype(self, tagname):
618 '''
618 '''
619 return the type of the given tag. result can be:
619 return the type of the given tag. result can be:
620
620
621 'local' : a local tag
621 'local' : a local tag
622 'global' : a global tag
622 'global' : a global tag
623 None : tag does not exist
623 None : tag does not exist
624 '''
624 '''
625
625
626 return self._tagscache.tagtypes.get(tagname)
626 return self._tagscache.tagtypes.get(tagname)
627
627
628 def tagslist(self):
628 def tagslist(self):
629 '''return a list of tags ordered by revision'''
629 '''return a list of tags ordered by revision'''
630 if not self._tagscache.tagslist:
630 if not self._tagscache.tagslist:
631 l = []
631 l = []
632 for t, n in self.tags().iteritems():
632 for t, n in self.tags().iteritems():
633 r = self.changelog.rev(n)
633 r = self.changelog.rev(n)
634 l.append((r, t, n))
634 l.append((r, t, n))
635 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
635 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
636
636
637 return self._tagscache.tagslist
637 return self._tagscache.tagslist
638
638
639 def nodetags(self, node):
639 def nodetags(self, node):
640 '''return the tags associated with a node'''
640 '''return the tags associated with a node'''
641 if not self._tagscache.nodetagscache:
641 if not self._tagscache.nodetagscache:
642 nodetagscache = {}
642 nodetagscache = {}
643 for t, n in self._tagscache.tags.iteritems():
643 for t, n in self._tagscache.tags.iteritems():
644 nodetagscache.setdefault(n, []).append(t)
644 nodetagscache.setdefault(n, []).append(t)
645 for tags in nodetagscache.itervalues():
645 for tags in nodetagscache.itervalues():
646 tags.sort()
646 tags.sort()
647 self._tagscache.nodetagscache = nodetagscache
647 self._tagscache.nodetagscache = nodetagscache
648 return self._tagscache.nodetagscache.get(node, [])
648 return self._tagscache.nodetagscache.get(node, [])
649
649
650 def nodebookmarks(self, node):
650 def nodebookmarks(self, node):
651 marks = []
651 marks = []
652 for bookmark, n in self._bookmarks.iteritems():
652 for bookmark, n in self._bookmarks.iteritems():
653 if n == node:
653 if n == node:
654 marks.append(bookmark)
654 marks.append(bookmark)
655 return sorted(marks)
655 return sorted(marks)
656
656
657 def branchmap(self):
657 def branchmap(self):
658 '''returns a dictionary {branch: [branchheads]}'''
658 '''returns a dictionary {branch: [branchheads]}'''
659 branchmap.updatecache(self)
659 branchmap.updatecache(self)
660 return self._branchcaches[self.filtername]
660 return self._branchcaches[self.filtername]
661
661
662
663 def _branchtip(self, heads):
664 '''return the tipmost branch head in heads'''
665 tip = heads[-1]
666 for h in reversed(heads):
667 if not self[h].closesbranch():
668 tip = h
669 break
670 return tip
671
672 def branchtip(self, branch):
662 def branchtip(self, branch):
673 '''return the tip node for a given branch'''
663 '''return the tip node for a given branch'''
674 try:
664 try:
675 return self.branchmap().branchtip(branch)
665 return self.branchmap().branchtip(branch)
676 except KeyError:
666 except KeyError:
677 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
667 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
678
668
679 def branchtags(self):
680 '''return a dict where branch names map to the tipmost head of
681 the branch, open heads come before closed'''
682 bt = {}
683 for bn, heads in self.branchmap().iteritems():
684 bt[bn] = self._branchtip(heads)
685 return bt
686
687 def lookup(self, key):
669 def lookup(self, key):
688 return self[key].node()
670 return self[key].node()
689
671
690 def lookupbranch(self, key, remote=None):
672 def lookupbranch(self, key, remote=None):
691 repo = remote or self
673 repo = remote or self
692 if key in repo.branchmap():
674 if key in repo.branchmap():
693 return key
675 return key
694
676
695 repo = (remote and remote.local()) and remote or self
677 repo = (remote and remote.local()) and remote or self
696 return repo[key].branch()
678 return repo[key].branch()
697
679
698 def known(self, nodes):
680 def known(self, nodes):
699 nm = self.changelog.nodemap
681 nm = self.changelog.nodemap
700 pc = self._phasecache
682 pc = self._phasecache
701 result = []
683 result = []
702 for n in nodes:
684 for n in nodes:
703 r = nm.get(n)
685 r = nm.get(n)
704 resp = not (r is None or pc.phase(self, r) >= phases.secret)
686 resp = not (r is None or pc.phase(self, r) >= phases.secret)
705 result.append(resp)
687 result.append(resp)
706 return result
688 return result
707
689
708 def local(self):
690 def local(self):
709 return self
691 return self
710
692
711 def cancopy(self):
693 def cancopy(self):
712 return self.local() # so statichttprepo's override of local() works
694 return self.local() # so statichttprepo's override of local() works
713
695
714 def join(self, f):
696 def join(self, f):
715 return os.path.join(self.path, f)
697 return os.path.join(self.path, f)
716
698
717 def wjoin(self, f):
699 def wjoin(self, f):
718 return os.path.join(self.root, f)
700 return os.path.join(self.root, f)
719
701
720 def file(self, f):
702 def file(self, f):
721 if f[0] == '/':
703 if f[0] == '/':
722 f = f[1:]
704 f = f[1:]
723 return filelog.filelog(self.sopener, f)
705 return filelog.filelog(self.sopener, f)
724
706
725 def changectx(self, changeid):
707 def changectx(self, changeid):
726 return self[changeid]
708 return self[changeid]
727
709
728 def parents(self, changeid=None):
710 def parents(self, changeid=None):
729 '''get list of changectxs for parents of changeid'''
711 '''get list of changectxs for parents of changeid'''
730 return self[changeid].parents()
712 return self[changeid].parents()
731
713
732 def setparents(self, p1, p2=nullid):
714 def setparents(self, p1, p2=nullid):
733 copies = self.dirstate.setparents(p1, p2)
715 copies = self.dirstate.setparents(p1, p2)
734 pctx = self[p1]
716 pctx = self[p1]
735 if copies:
717 if copies:
736 # Adjust copy records, the dirstate cannot do it, it
718 # Adjust copy records, the dirstate cannot do it, it
737 # requires access to parents manifests. Preserve them
719 # requires access to parents manifests. Preserve them
738 # only for entries added to first parent.
720 # only for entries added to first parent.
739 for f in copies:
721 for f in copies:
740 if f not in pctx and copies[f] in pctx:
722 if f not in pctx and copies[f] in pctx:
741 self.dirstate.copy(copies[f], f)
723 self.dirstate.copy(copies[f], f)
742 if p2 == nullid:
724 if p2 == nullid:
743 for f, s in sorted(self.dirstate.copies().items()):
725 for f, s in sorted(self.dirstate.copies().items()):
744 if f not in pctx and s not in pctx:
726 if f not in pctx and s not in pctx:
745 self.dirstate.copy(None, f)
727 self.dirstate.copy(None, f)
746
728
747 def filectx(self, path, changeid=None, fileid=None):
729 def filectx(self, path, changeid=None, fileid=None):
748 """changeid can be a changeset revision, node, or tag.
730 """changeid can be a changeset revision, node, or tag.
749 fileid can be a file revision or node."""
731 fileid can be a file revision or node."""
750 return context.filectx(self, path, changeid, fileid)
732 return context.filectx(self, path, changeid, fileid)
751
733
752 def getcwd(self):
734 def getcwd(self):
753 return self.dirstate.getcwd()
735 return self.dirstate.getcwd()
754
736
755 def pathto(self, f, cwd=None):
737 def pathto(self, f, cwd=None):
756 return self.dirstate.pathto(f, cwd)
738 return self.dirstate.pathto(f, cwd)
757
739
758 def wfile(self, f, mode='r'):
740 def wfile(self, f, mode='r'):
759 return self.wopener(f, mode)
741 return self.wopener(f, mode)
760
742
761 def _link(self, f):
743 def _link(self, f):
762 return self.wvfs.islink(f)
744 return self.wvfs.islink(f)
763
745
764 def _loadfilter(self, filter):
746 def _loadfilter(self, filter):
765 if filter not in self.filterpats:
747 if filter not in self.filterpats:
766 l = []
748 l = []
767 for pat, cmd in self.ui.configitems(filter):
749 for pat, cmd in self.ui.configitems(filter):
768 if cmd == '!':
750 if cmd == '!':
769 continue
751 continue
770 mf = matchmod.match(self.root, '', [pat])
752 mf = matchmod.match(self.root, '', [pat])
771 fn = None
753 fn = None
772 params = cmd
754 params = cmd
773 for name, filterfn in self._datafilters.iteritems():
755 for name, filterfn in self._datafilters.iteritems():
774 if cmd.startswith(name):
756 if cmd.startswith(name):
775 fn = filterfn
757 fn = filterfn
776 params = cmd[len(name):].lstrip()
758 params = cmd[len(name):].lstrip()
777 break
759 break
778 if not fn:
760 if not fn:
779 fn = lambda s, c, **kwargs: util.filter(s, c)
761 fn = lambda s, c, **kwargs: util.filter(s, c)
780 # Wrap old filters not supporting keyword arguments
762 # Wrap old filters not supporting keyword arguments
781 if not inspect.getargspec(fn)[2]:
763 if not inspect.getargspec(fn)[2]:
782 oldfn = fn
764 oldfn = fn
783 fn = lambda s, c, **kwargs: oldfn(s, c)
765 fn = lambda s, c, **kwargs: oldfn(s, c)
784 l.append((mf, fn, params))
766 l.append((mf, fn, params))
785 self.filterpats[filter] = l
767 self.filterpats[filter] = l
786 return self.filterpats[filter]
768 return self.filterpats[filter]
787
769
788 def _filter(self, filterpats, filename, data):
770 def _filter(self, filterpats, filename, data):
789 for mf, fn, cmd in filterpats:
771 for mf, fn, cmd in filterpats:
790 if mf(filename):
772 if mf(filename):
791 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
773 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
792 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
774 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
793 break
775 break
794
776
795 return data
777 return data
796
778
797 @unfilteredpropertycache
779 @unfilteredpropertycache
798 def _encodefilterpats(self):
780 def _encodefilterpats(self):
799 return self._loadfilter('encode')
781 return self._loadfilter('encode')
800
782
801 @unfilteredpropertycache
783 @unfilteredpropertycache
802 def _decodefilterpats(self):
784 def _decodefilterpats(self):
803 return self._loadfilter('decode')
785 return self._loadfilter('decode')
804
786
805 def adddatafilter(self, name, filter):
787 def adddatafilter(self, name, filter):
806 self._datafilters[name] = filter
788 self._datafilters[name] = filter
807
789
808 def wread(self, filename):
790 def wread(self, filename):
809 if self._link(filename):
791 if self._link(filename):
810 data = self.wvfs.readlink(filename)
792 data = self.wvfs.readlink(filename)
811 else:
793 else:
812 data = self.wopener.read(filename)
794 data = self.wopener.read(filename)
813 return self._filter(self._encodefilterpats, filename, data)
795 return self._filter(self._encodefilterpats, filename, data)
814
796
815 def wwrite(self, filename, data, flags):
797 def wwrite(self, filename, data, flags):
816 data = self._filter(self._decodefilterpats, filename, data)
798 data = self._filter(self._decodefilterpats, filename, data)
817 if 'l' in flags:
799 if 'l' in flags:
818 self.wopener.symlink(data, filename)
800 self.wopener.symlink(data, filename)
819 else:
801 else:
820 self.wopener.write(filename, data)
802 self.wopener.write(filename, data)
821 if 'x' in flags:
803 if 'x' in flags:
822 self.wvfs.setflags(filename, False, True)
804 self.wvfs.setflags(filename, False, True)
823
805
824 def wwritedata(self, filename, data):
806 def wwritedata(self, filename, data):
825 return self._filter(self._decodefilterpats, filename, data)
807 return self._filter(self._decodefilterpats, filename, data)
826
808
827 def transaction(self, desc, report=None):
809 def transaction(self, desc, report=None):
828 tr = self._transref and self._transref() or None
810 tr = self._transref and self._transref() or None
829 if tr and tr.running():
811 if tr and tr.running():
830 return tr.nest()
812 return tr.nest()
831
813
832 # abort here if the journal already exists
814 # abort here if the journal already exists
833 if self.svfs.exists("journal"):
815 if self.svfs.exists("journal"):
834 raise error.RepoError(
816 raise error.RepoError(
835 _("abandoned transaction found - run hg recover"))
817 _("abandoned transaction found - run hg recover"))
836
818
837 self._writejournal(desc)
819 self._writejournal(desc)
838 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
820 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
839 rp = report and report or self.ui.warn
821 rp = report and report or self.ui.warn
840 tr = transaction.transaction(rp, self.sopener,
822 tr = transaction.transaction(rp, self.sopener,
841 "journal",
823 "journal",
842 aftertrans(renames),
824 aftertrans(renames),
843 self.store.createmode)
825 self.store.createmode)
844 self._transref = weakref.ref(tr)
826 self._transref = weakref.ref(tr)
845 return tr
827 return tr
846
828
847 def _journalfiles(self):
829 def _journalfiles(self):
848 return ((self.svfs, 'journal'),
830 return ((self.svfs, 'journal'),
849 (self.vfs, 'journal.dirstate'),
831 (self.vfs, 'journal.dirstate'),
850 (self.vfs, 'journal.branch'),
832 (self.vfs, 'journal.branch'),
851 (self.vfs, 'journal.desc'),
833 (self.vfs, 'journal.desc'),
852 (self.vfs, 'journal.bookmarks'),
834 (self.vfs, 'journal.bookmarks'),
853 (self.svfs, 'journal.phaseroots'))
835 (self.svfs, 'journal.phaseroots'))
854
836
855 def undofiles(self):
837 def undofiles(self):
856 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
838 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
857
839
858 def _writejournal(self, desc):
840 def _writejournal(self, desc):
859 self.opener.write("journal.dirstate",
841 self.opener.write("journal.dirstate",
860 self.opener.tryread("dirstate"))
842 self.opener.tryread("dirstate"))
861 self.opener.write("journal.branch",
843 self.opener.write("journal.branch",
862 encoding.fromlocal(self.dirstate.branch()))
844 encoding.fromlocal(self.dirstate.branch()))
863 self.opener.write("journal.desc",
845 self.opener.write("journal.desc",
864 "%d\n%s\n" % (len(self), desc))
846 "%d\n%s\n" % (len(self), desc))
865 self.opener.write("journal.bookmarks",
847 self.opener.write("journal.bookmarks",
866 self.opener.tryread("bookmarks"))
848 self.opener.tryread("bookmarks"))
867 self.sopener.write("journal.phaseroots",
849 self.sopener.write("journal.phaseroots",
868 self.sopener.tryread("phaseroots"))
850 self.sopener.tryread("phaseroots"))
869
851
870 def recover(self):
852 def recover(self):
871 lock = self.lock()
853 lock = self.lock()
872 try:
854 try:
873 if self.svfs.exists("journal"):
855 if self.svfs.exists("journal"):
874 self.ui.status(_("rolling back interrupted transaction\n"))
856 self.ui.status(_("rolling back interrupted transaction\n"))
875 transaction.rollback(self.sopener, "journal",
857 transaction.rollback(self.sopener, "journal",
876 self.ui.warn)
858 self.ui.warn)
877 self.invalidate()
859 self.invalidate()
878 return True
860 return True
879 else:
861 else:
880 self.ui.warn(_("no interrupted transaction available\n"))
862 self.ui.warn(_("no interrupted transaction available\n"))
881 return False
863 return False
882 finally:
864 finally:
883 lock.release()
865 lock.release()
884
866
885 def rollback(self, dryrun=False, force=False):
867 def rollback(self, dryrun=False, force=False):
886 wlock = lock = None
868 wlock = lock = None
887 try:
869 try:
888 wlock = self.wlock()
870 wlock = self.wlock()
889 lock = self.lock()
871 lock = self.lock()
890 if self.svfs.exists("undo"):
872 if self.svfs.exists("undo"):
891 return self._rollback(dryrun, force)
873 return self._rollback(dryrun, force)
892 else:
874 else:
893 self.ui.warn(_("no rollback information available\n"))
875 self.ui.warn(_("no rollback information available\n"))
894 return 1
876 return 1
895 finally:
877 finally:
896 release(lock, wlock)
878 release(lock, wlock)
897
879
898 @unfilteredmethod # Until we get smarter cache management
880 @unfilteredmethod # Until we get smarter cache management
899 def _rollback(self, dryrun, force):
881 def _rollback(self, dryrun, force):
900 ui = self.ui
882 ui = self.ui
901 try:
883 try:
902 args = self.opener.read('undo.desc').splitlines()
884 args = self.opener.read('undo.desc').splitlines()
903 (oldlen, desc, detail) = (int(args[0]), args[1], None)
885 (oldlen, desc, detail) = (int(args[0]), args[1], None)
904 if len(args) >= 3:
886 if len(args) >= 3:
905 detail = args[2]
887 detail = args[2]
906 oldtip = oldlen - 1
888 oldtip = oldlen - 1
907
889
908 if detail and ui.verbose:
890 if detail and ui.verbose:
909 msg = (_('repository tip rolled back to revision %s'
891 msg = (_('repository tip rolled back to revision %s'
910 ' (undo %s: %s)\n')
892 ' (undo %s: %s)\n')
911 % (oldtip, desc, detail))
893 % (oldtip, desc, detail))
912 else:
894 else:
913 msg = (_('repository tip rolled back to revision %s'
895 msg = (_('repository tip rolled back to revision %s'
914 ' (undo %s)\n')
896 ' (undo %s)\n')
915 % (oldtip, desc))
897 % (oldtip, desc))
916 except IOError:
898 except IOError:
917 msg = _('rolling back unknown transaction\n')
899 msg = _('rolling back unknown transaction\n')
918 desc = None
900 desc = None
919
901
920 if not force and self['.'] != self['tip'] and desc == 'commit':
902 if not force and self['.'] != self['tip'] and desc == 'commit':
921 raise util.Abort(
903 raise util.Abort(
922 _('rollback of last commit while not checked out '
904 _('rollback of last commit while not checked out '
923 'may lose data'), hint=_('use -f to force'))
905 'may lose data'), hint=_('use -f to force'))
924
906
925 ui.status(msg)
907 ui.status(msg)
926 if dryrun:
908 if dryrun:
927 return 0
909 return 0
928
910
929 parents = self.dirstate.parents()
911 parents = self.dirstate.parents()
930 self.destroying()
912 self.destroying()
931 transaction.rollback(self.sopener, 'undo', ui.warn)
913 transaction.rollback(self.sopener, 'undo', ui.warn)
932 if self.vfs.exists('undo.bookmarks'):
914 if self.vfs.exists('undo.bookmarks'):
933 self.vfs.rename('undo.bookmarks', 'bookmarks')
915 self.vfs.rename('undo.bookmarks', 'bookmarks')
934 if self.svfs.exists('undo.phaseroots'):
916 if self.svfs.exists('undo.phaseroots'):
935 self.svfs.rename('undo.phaseroots', 'phaseroots')
917 self.svfs.rename('undo.phaseroots', 'phaseroots')
936 self.invalidate()
918 self.invalidate()
937
919
938 parentgone = (parents[0] not in self.changelog.nodemap or
920 parentgone = (parents[0] not in self.changelog.nodemap or
939 parents[1] not in self.changelog.nodemap)
921 parents[1] not in self.changelog.nodemap)
940 if parentgone:
922 if parentgone:
941 self.vfs.rename('undo.dirstate', 'dirstate')
923 self.vfs.rename('undo.dirstate', 'dirstate')
942 try:
924 try:
943 branch = self.opener.read('undo.branch')
925 branch = self.opener.read('undo.branch')
944 self.dirstate.setbranch(encoding.tolocal(branch))
926 self.dirstate.setbranch(encoding.tolocal(branch))
945 except IOError:
927 except IOError:
946 ui.warn(_('named branch could not be reset: '
928 ui.warn(_('named branch could not be reset: '
947 'current branch is still \'%s\'\n')
929 'current branch is still \'%s\'\n')
948 % self.dirstate.branch())
930 % self.dirstate.branch())
949
931
950 self.dirstate.invalidate()
932 self.dirstate.invalidate()
951 parents = tuple([p.rev() for p in self.parents()])
933 parents = tuple([p.rev() for p in self.parents()])
952 if len(parents) > 1:
934 if len(parents) > 1:
953 ui.status(_('working directory now based on '
935 ui.status(_('working directory now based on '
954 'revisions %d and %d\n') % parents)
936 'revisions %d and %d\n') % parents)
955 else:
937 else:
956 ui.status(_('working directory now based on '
938 ui.status(_('working directory now based on '
957 'revision %d\n') % parents)
939 'revision %d\n') % parents)
958 # TODO: if we know which new heads may result from this rollback, pass
940 # TODO: if we know which new heads may result from this rollback, pass
959 # them to destroy(), which will prevent the branchhead cache from being
941 # them to destroy(), which will prevent the branchhead cache from being
960 # invalidated.
942 # invalidated.
961 self.destroyed()
943 self.destroyed()
962 return 0
944 return 0
963
945
964 def invalidatecaches(self):
946 def invalidatecaches(self):
965
947
966 if '_tagscache' in vars(self):
948 if '_tagscache' in vars(self):
967 # can't use delattr on proxy
949 # can't use delattr on proxy
968 del self.__dict__['_tagscache']
950 del self.__dict__['_tagscache']
969
951
970 self.unfiltered()._branchcaches.clear()
952 self.unfiltered()._branchcaches.clear()
971 self.invalidatevolatilesets()
953 self.invalidatevolatilesets()
972
954
973 def invalidatevolatilesets(self):
955 def invalidatevolatilesets(self):
974 self.filteredrevcache.clear()
956 self.filteredrevcache.clear()
975 obsolete.clearobscaches(self)
957 obsolete.clearobscaches(self)
976
958
977 def invalidatedirstate(self):
959 def invalidatedirstate(self):
978 '''Invalidates the dirstate, causing the next call to dirstate
960 '''Invalidates the dirstate, causing the next call to dirstate
979 to check if it was modified since the last time it was read,
961 to check if it was modified since the last time it was read,
980 rereading it if it has.
962 rereading it if it has.
981
963
982 This is different to dirstate.invalidate() that it doesn't always
964 This is different to dirstate.invalidate() that it doesn't always
983 rereads the dirstate. Use dirstate.invalidate() if you want to
965 rereads the dirstate. Use dirstate.invalidate() if you want to
984 explicitly read the dirstate again (i.e. restoring it to a previous
966 explicitly read the dirstate again (i.e. restoring it to a previous
985 known good state).'''
967 known good state).'''
986 if hasunfilteredcache(self, 'dirstate'):
968 if hasunfilteredcache(self, 'dirstate'):
987 for k in self.dirstate._filecache:
969 for k in self.dirstate._filecache:
988 try:
970 try:
989 delattr(self.dirstate, k)
971 delattr(self.dirstate, k)
990 except AttributeError:
972 except AttributeError:
991 pass
973 pass
992 delattr(self.unfiltered(), 'dirstate')
974 delattr(self.unfiltered(), 'dirstate')
993
975
994 def invalidate(self):
976 def invalidate(self):
995 unfiltered = self.unfiltered() # all file caches are stored unfiltered
977 unfiltered = self.unfiltered() # all file caches are stored unfiltered
996 for k in self._filecache:
978 for k in self._filecache:
997 # dirstate is invalidated separately in invalidatedirstate()
979 # dirstate is invalidated separately in invalidatedirstate()
998 if k == 'dirstate':
980 if k == 'dirstate':
999 continue
981 continue
1000
982
1001 try:
983 try:
1002 delattr(unfiltered, k)
984 delattr(unfiltered, k)
1003 except AttributeError:
985 except AttributeError:
1004 pass
986 pass
1005 self.invalidatecaches()
987 self.invalidatecaches()
1006
988
1007 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
989 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1008 try:
990 try:
1009 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
991 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1010 except error.LockHeld, inst:
992 except error.LockHeld, inst:
1011 if not wait:
993 if not wait:
1012 raise
994 raise
1013 self.ui.warn(_("waiting for lock on %s held by %r\n") %
995 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1014 (desc, inst.locker))
996 (desc, inst.locker))
1015 # default to 600 seconds timeout
997 # default to 600 seconds timeout
1016 l = lockmod.lock(vfs, lockname,
998 l = lockmod.lock(vfs, lockname,
1017 int(self.ui.config("ui", "timeout", "600")),
999 int(self.ui.config("ui", "timeout", "600")),
1018 releasefn, desc=desc)
1000 releasefn, desc=desc)
1019 if acquirefn:
1001 if acquirefn:
1020 acquirefn()
1002 acquirefn()
1021 return l
1003 return l
1022
1004
1023 def _afterlock(self, callback):
1005 def _afterlock(self, callback):
1024 """add a callback to the current repository lock.
1006 """add a callback to the current repository lock.
1025
1007
1026 The callback will be executed on lock release."""
1008 The callback will be executed on lock release."""
1027 l = self._lockref and self._lockref()
1009 l = self._lockref and self._lockref()
1028 if l:
1010 if l:
1029 l.postrelease.append(callback)
1011 l.postrelease.append(callback)
1030 else:
1012 else:
1031 callback()
1013 callback()
1032
1014
1033 def lock(self, wait=True):
1015 def lock(self, wait=True):
1034 '''Lock the repository store (.hg/store) and return a weak reference
1016 '''Lock the repository store (.hg/store) and return a weak reference
1035 to the lock. Use this before modifying the store (e.g. committing or
1017 to the lock. Use this before modifying the store (e.g. committing or
1036 stripping). If you are opening a transaction, get a lock as well.)'''
1018 stripping). If you are opening a transaction, get a lock as well.)'''
1037 l = self._lockref and self._lockref()
1019 l = self._lockref and self._lockref()
1038 if l is not None and l.held:
1020 if l is not None and l.held:
1039 l.lock()
1021 l.lock()
1040 return l
1022 return l
1041
1023
1042 def unlock():
1024 def unlock():
1043 self.store.write()
1025 self.store.write()
1044 if hasunfilteredcache(self, '_phasecache'):
1026 if hasunfilteredcache(self, '_phasecache'):
1045 self._phasecache.write()
1027 self._phasecache.write()
1046 for k, ce in self._filecache.items():
1028 for k, ce in self._filecache.items():
1047 if k == 'dirstate' or k not in self.__dict__:
1029 if k == 'dirstate' or k not in self.__dict__:
1048 continue
1030 continue
1049 ce.refresh()
1031 ce.refresh()
1050
1032
1051 l = self._lock(self.svfs, "lock", wait, unlock,
1033 l = self._lock(self.svfs, "lock", wait, unlock,
1052 self.invalidate, _('repository %s') % self.origroot)
1034 self.invalidate, _('repository %s') % self.origroot)
1053 self._lockref = weakref.ref(l)
1035 self._lockref = weakref.ref(l)
1054 return l
1036 return l
1055
1037
1056 def wlock(self, wait=True):
1038 def wlock(self, wait=True):
1057 '''Lock the non-store parts of the repository (everything under
1039 '''Lock the non-store parts of the repository (everything under
1058 .hg except .hg/store) and return a weak reference to the lock.
1040 .hg except .hg/store) and return a weak reference to the lock.
1059 Use this before modifying files in .hg.'''
1041 Use this before modifying files in .hg.'''
1060 l = self._wlockref and self._wlockref()
1042 l = self._wlockref and self._wlockref()
1061 if l is not None and l.held:
1043 if l is not None and l.held:
1062 l.lock()
1044 l.lock()
1063 return l
1045 return l
1064
1046
1065 def unlock():
1047 def unlock():
1066 self.dirstate.write()
1048 self.dirstate.write()
1067 self._filecache['dirstate'].refresh()
1049 self._filecache['dirstate'].refresh()
1068
1050
1069 l = self._lock(self.vfs, "wlock", wait, unlock,
1051 l = self._lock(self.vfs, "wlock", wait, unlock,
1070 self.invalidatedirstate, _('working directory of %s') %
1052 self.invalidatedirstate, _('working directory of %s') %
1071 self.origroot)
1053 self.origroot)
1072 self._wlockref = weakref.ref(l)
1054 self._wlockref = weakref.ref(l)
1073 return l
1055 return l
1074
1056
1075 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1057 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1076 """
1058 """
1077 commit an individual file as part of a larger transaction
1059 commit an individual file as part of a larger transaction
1078 """
1060 """
1079
1061
1080 fname = fctx.path()
1062 fname = fctx.path()
1081 text = fctx.data()
1063 text = fctx.data()
1082 flog = self.file(fname)
1064 flog = self.file(fname)
1083 fparent1 = manifest1.get(fname, nullid)
1065 fparent1 = manifest1.get(fname, nullid)
1084 fparent2 = fparent2o = manifest2.get(fname, nullid)
1066 fparent2 = fparent2o = manifest2.get(fname, nullid)
1085
1067
1086 meta = {}
1068 meta = {}
1087 copy = fctx.renamed()
1069 copy = fctx.renamed()
1088 if copy and copy[0] != fname:
1070 if copy and copy[0] != fname:
1089 # Mark the new revision of this file as a copy of another
1071 # Mark the new revision of this file as a copy of another
1090 # file. This copy data will effectively act as a parent
1072 # file. This copy data will effectively act as a parent
1091 # of this new revision. If this is a merge, the first
1073 # of this new revision. If this is a merge, the first
1092 # parent will be the nullid (meaning "look up the copy data")
1074 # parent will be the nullid (meaning "look up the copy data")
1093 # and the second one will be the other parent. For example:
1075 # and the second one will be the other parent. For example:
1094 #
1076 #
1095 # 0 --- 1 --- 3 rev1 changes file foo
1077 # 0 --- 1 --- 3 rev1 changes file foo
1096 # \ / rev2 renames foo to bar and changes it
1078 # \ / rev2 renames foo to bar and changes it
1097 # \- 2 -/ rev3 should have bar with all changes and
1079 # \- 2 -/ rev3 should have bar with all changes and
1098 # should record that bar descends from
1080 # should record that bar descends from
1099 # bar in rev2 and foo in rev1
1081 # bar in rev2 and foo in rev1
1100 #
1082 #
1101 # this allows this merge to succeed:
1083 # this allows this merge to succeed:
1102 #
1084 #
1103 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1085 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1104 # \ / merging rev3 and rev4 should use bar@rev2
1086 # \ / merging rev3 and rev4 should use bar@rev2
1105 # \- 2 --- 4 as the merge base
1087 # \- 2 --- 4 as the merge base
1106 #
1088 #
1107
1089
1108 cfname = copy[0]
1090 cfname = copy[0]
1109 crev = manifest1.get(cfname)
1091 crev = manifest1.get(cfname)
1110 newfparent = fparent2
1092 newfparent = fparent2
1111
1093
1112 if manifest2: # branch merge
1094 if manifest2: # branch merge
1113 if fparent2 == nullid or crev is None: # copied on remote side
1095 if fparent2 == nullid or crev is None: # copied on remote side
1114 if cfname in manifest2:
1096 if cfname in manifest2:
1115 crev = manifest2[cfname]
1097 crev = manifest2[cfname]
1116 newfparent = fparent1
1098 newfparent = fparent1
1117
1099
1118 # find source in nearest ancestor if we've lost track
1100 # find source in nearest ancestor if we've lost track
1119 if not crev:
1101 if not crev:
1120 self.ui.debug(" %s: searching for copy revision for %s\n" %
1102 self.ui.debug(" %s: searching for copy revision for %s\n" %
1121 (fname, cfname))
1103 (fname, cfname))
1122 for ancestor in self[None].ancestors():
1104 for ancestor in self[None].ancestors():
1123 if cfname in ancestor:
1105 if cfname in ancestor:
1124 crev = ancestor[cfname].filenode()
1106 crev = ancestor[cfname].filenode()
1125 break
1107 break
1126
1108
1127 if crev:
1109 if crev:
1128 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1110 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1129 meta["copy"] = cfname
1111 meta["copy"] = cfname
1130 meta["copyrev"] = hex(crev)
1112 meta["copyrev"] = hex(crev)
1131 fparent1, fparent2 = nullid, newfparent
1113 fparent1, fparent2 = nullid, newfparent
1132 else:
1114 else:
1133 self.ui.warn(_("warning: can't find ancestor for '%s' "
1115 self.ui.warn(_("warning: can't find ancestor for '%s' "
1134 "copied from '%s'!\n") % (fname, cfname))
1116 "copied from '%s'!\n") % (fname, cfname))
1135
1117
1136 elif fparent2 != nullid:
1118 elif fparent2 != nullid:
1137 # is one parent an ancestor of the other?
1119 # is one parent an ancestor of the other?
1138 fparentancestor = flog.ancestor(fparent1, fparent2)
1120 fparentancestor = flog.ancestor(fparent1, fparent2)
1139 if fparentancestor == fparent1:
1121 if fparentancestor == fparent1:
1140 fparent1, fparent2 = fparent2, nullid
1122 fparent1, fparent2 = fparent2, nullid
1141 elif fparentancestor == fparent2:
1123 elif fparentancestor == fparent2:
1142 fparent2 = nullid
1124 fparent2 = nullid
1143
1125
1144 # is the file changed?
1126 # is the file changed?
1145 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1127 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1146 changelist.append(fname)
1128 changelist.append(fname)
1147 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1129 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1148
1130
1149 # are just the flags changed during merge?
1131 # are just the flags changed during merge?
1150 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1132 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1151 changelist.append(fname)
1133 changelist.append(fname)
1152
1134
1153 return fparent1
1135 return fparent1
1154
1136
1155 @unfilteredmethod
1137 @unfilteredmethod
1156 def commit(self, text="", user=None, date=None, match=None, force=False,
1138 def commit(self, text="", user=None, date=None, match=None, force=False,
1157 editor=False, extra={}):
1139 editor=False, extra={}):
1158 """Add a new revision to current repository.
1140 """Add a new revision to current repository.
1159
1141
1160 Revision information is gathered from the working directory,
1142 Revision information is gathered from the working directory,
1161 match can be used to filter the committed files. If editor is
1143 match can be used to filter the committed files. If editor is
1162 supplied, it is called to get a commit message.
1144 supplied, it is called to get a commit message.
1163 """
1145 """
1164
1146
1165 def fail(f, msg):
1147 def fail(f, msg):
1166 raise util.Abort('%s: %s' % (f, msg))
1148 raise util.Abort('%s: %s' % (f, msg))
1167
1149
1168 if not match:
1150 if not match:
1169 match = matchmod.always(self.root, '')
1151 match = matchmod.always(self.root, '')
1170
1152
1171 if not force:
1153 if not force:
1172 vdirs = []
1154 vdirs = []
1173 match.explicitdir = vdirs.append
1155 match.explicitdir = vdirs.append
1174 match.bad = fail
1156 match.bad = fail
1175
1157
1176 wlock = self.wlock()
1158 wlock = self.wlock()
1177 try:
1159 try:
1178 wctx = self[None]
1160 wctx = self[None]
1179 merge = len(wctx.parents()) > 1
1161 merge = len(wctx.parents()) > 1
1180
1162
1181 if (not force and merge and match and
1163 if (not force and merge and match and
1182 (match.files() or match.anypats())):
1164 (match.files() or match.anypats())):
1183 raise util.Abort(_('cannot partially commit a merge '
1165 raise util.Abort(_('cannot partially commit a merge '
1184 '(do not specify files or patterns)'))
1166 '(do not specify files or patterns)'))
1185
1167
1186 changes = self.status(match=match, clean=force)
1168 changes = self.status(match=match, clean=force)
1187 if force:
1169 if force:
1188 changes[0].extend(changes[6]) # mq may commit unchanged files
1170 changes[0].extend(changes[6]) # mq may commit unchanged files
1189
1171
1190 # check subrepos
1172 # check subrepos
1191 subs = []
1173 subs = []
1192 commitsubs = set()
1174 commitsubs = set()
1193 newstate = wctx.substate.copy()
1175 newstate = wctx.substate.copy()
1194 # only manage subrepos and .hgsubstate if .hgsub is present
1176 # only manage subrepos and .hgsubstate if .hgsub is present
1195 if '.hgsub' in wctx:
1177 if '.hgsub' in wctx:
1196 # we'll decide whether to track this ourselves, thanks
1178 # we'll decide whether to track this ourselves, thanks
1197 if '.hgsubstate' in changes[0]:
1179 if '.hgsubstate' in changes[0]:
1198 changes[0].remove('.hgsubstate')
1180 changes[0].remove('.hgsubstate')
1199 if '.hgsubstate' in changes[2]:
1181 if '.hgsubstate' in changes[2]:
1200 changes[2].remove('.hgsubstate')
1182 changes[2].remove('.hgsubstate')
1201
1183
1202 # compare current state to last committed state
1184 # compare current state to last committed state
1203 # build new substate based on last committed state
1185 # build new substate based on last committed state
1204 oldstate = wctx.p1().substate
1186 oldstate = wctx.p1().substate
1205 for s in sorted(newstate.keys()):
1187 for s in sorted(newstate.keys()):
1206 if not match(s):
1188 if not match(s):
1207 # ignore working copy, use old state if present
1189 # ignore working copy, use old state if present
1208 if s in oldstate:
1190 if s in oldstate:
1209 newstate[s] = oldstate[s]
1191 newstate[s] = oldstate[s]
1210 continue
1192 continue
1211 if not force:
1193 if not force:
1212 raise util.Abort(
1194 raise util.Abort(
1213 _("commit with new subrepo %s excluded") % s)
1195 _("commit with new subrepo %s excluded") % s)
1214 if wctx.sub(s).dirty(True):
1196 if wctx.sub(s).dirty(True):
1215 if not self.ui.configbool('ui', 'commitsubrepos'):
1197 if not self.ui.configbool('ui', 'commitsubrepos'):
1216 raise util.Abort(
1198 raise util.Abort(
1217 _("uncommitted changes in subrepo %s") % s,
1199 _("uncommitted changes in subrepo %s") % s,
1218 hint=_("use --subrepos for recursive commit"))
1200 hint=_("use --subrepos for recursive commit"))
1219 subs.append(s)
1201 subs.append(s)
1220 commitsubs.add(s)
1202 commitsubs.add(s)
1221 else:
1203 else:
1222 bs = wctx.sub(s).basestate()
1204 bs = wctx.sub(s).basestate()
1223 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1205 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1224 if oldstate.get(s, (None, None, None))[1] != bs:
1206 if oldstate.get(s, (None, None, None))[1] != bs:
1225 subs.append(s)
1207 subs.append(s)
1226
1208
1227 # check for removed subrepos
1209 # check for removed subrepos
1228 for p in wctx.parents():
1210 for p in wctx.parents():
1229 r = [s for s in p.substate if s not in newstate]
1211 r = [s for s in p.substate if s not in newstate]
1230 subs += [s for s in r if match(s)]
1212 subs += [s for s in r if match(s)]
1231 if subs:
1213 if subs:
1232 if (not match('.hgsub') and
1214 if (not match('.hgsub') and
1233 '.hgsub' in (wctx.modified() + wctx.added())):
1215 '.hgsub' in (wctx.modified() + wctx.added())):
1234 raise util.Abort(
1216 raise util.Abort(
1235 _("can't commit subrepos without .hgsub"))
1217 _("can't commit subrepos without .hgsub"))
1236 changes[0].insert(0, '.hgsubstate')
1218 changes[0].insert(0, '.hgsubstate')
1237
1219
1238 elif '.hgsub' in changes[2]:
1220 elif '.hgsub' in changes[2]:
1239 # clean up .hgsubstate when .hgsub is removed
1221 # clean up .hgsubstate when .hgsub is removed
1240 if ('.hgsubstate' in wctx and
1222 if ('.hgsubstate' in wctx and
1241 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1223 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1242 changes[2].insert(0, '.hgsubstate')
1224 changes[2].insert(0, '.hgsubstate')
1243
1225
1244 # make sure all explicit patterns are matched
1226 # make sure all explicit patterns are matched
1245 if not force and match.files():
1227 if not force and match.files():
1246 matched = set(changes[0] + changes[1] + changes[2])
1228 matched = set(changes[0] + changes[1] + changes[2])
1247
1229
1248 for f in match.files():
1230 for f in match.files():
1249 f = self.dirstate.normalize(f)
1231 f = self.dirstate.normalize(f)
1250 if f == '.' or f in matched or f in wctx.substate:
1232 if f == '.' or f in matched or f in wctx.substate:
1251 continue
1233 continue
1252 if f in changes[3]: # missing
1234 if f in changes[3]: # missing
1253 fail(f, _('file not found!'))
1235 fail(f, _('file not found!'))
1254 if f in vdirs: # visited directory
1236 if f in vdirs: # visited directory
1255 d = f + '/'
1237 d = f + '/'
1256 for mf in matched:
1238 for mf in matched:
1257 if mf.startswith(d):
1239 if mf.startswith(d):
1258 break
1240 break
1259 else:
1241 else:
1260 fail(f, _("no match under directory!"))
1242 fail(f, _("no match under directory!"))
1261 elif f not in self.dirstate:
1243 elif f not in self.dirstate:
1262 fail(f, _("file not tracked!"))
1244 fail(f, _("file not tracked!"))
1263
1245
1264 cctx = context.workingctx(self, text, user, date, extra, changes)
1246 cctx = context.workingctx(self, text, user, date, extra, changes)
1265
1247
1266 if (not force and not extra.get("close") and not merge
1248 if (not force and not extra.get("close") and not merge
1267 and not cctx.files()
1249 and not cctx.files()
1268 and wctx.branch() == wctx.p1().branch()):
1250 and wctx.branch() == wctx.p1().branch()):
1269 return None
1251 return None
1270
1252
1271 if merge and cctx.deleted():
1253 if merge and cctx.deleted():
1272 raise util.Abort(_("cannot commit merge with missing files"))
1254 raise util.Abort(_("cannot commit merge with missing files"))
1273
1255
1274 ms = mergemod.mergestate(self)
1256 ms = mergemod.mergestate(self)
1275 for f in changes[0]:
1257 for f in changes[0]:
1276 if f in ms and ms[f] == 'u':
1258 if f in ms and ms[f] == 'u':
1277 raise util.Abort(_("unresolved merge conflicts "
1259 raise util.Abort(_("unresolved merge conflicts "
1278 "(see hg help resolve)"))
1260 "(see hg help resolve)"))
1279
1261
1280 if editor:
1262 if editor:
1281 cctx._text = editor(self, cctx, subs)
1263 cctx._text = editor(self, cctx, subs)
1282 edited = (text != cctx._text)
1264 edited = (text != cctx._text)
1283
1265
1284 # commit subs and write new state
1266 # commit subs and write new state
1285 if subs:
1267 if subs:
1286 for s in sorted(commitsubs):
1268 for s in sorted(commitsubs):
1287 sub = wctx.sub(s)
1269 sub = wctx.sub(s)
1288 self.ui.status(_('committing subrepository %s\n') %
1270 self.ui.status(_('committing subrepository %s\n') %
1289 subrepo.subrelpath(sub))
1271 subrepo.subrelpath(sub))
1290 sr = sub.commit(cctx._text, user, date)
1272 sr = sub.commit(cctx._text, user, date)
1291 newstate[s] = (newstate[s][0], sr)
1273 newstate[s] = (newstate[s][0], sr)
1292 subrepo.writestate(self, newstate)
1274 subrepo.writestate(self, newstate)
1293
1275
1294 # Save commit message in case this transaction gets rolled back
1276 # Save commit message in case this transaction gets rolled back
1295 # (e.g. by a pretxncommit hook). Leave the content alone on
1277 # (e.g. by a pretxncommit hook). Leave the content alone on
1296 # the assumption that the user will use the same editor again.
1278 # the assumption that the user will use the same editor again.
1297 msgfn = self.savecommitmessage(cctx._text)
1279 msgfn = self.savecommitmessage(cctx._text)
1298
1280
1299 p1, p2 = self.dirstate.parents()
1281 p1, p2 = self.dirstate.parents()
1300 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1282 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1301 try:
1283 try:
1302 self.hook("precommit", throw=True, parent1=hookp1,
1284 self.hook("precommit", throw=True, parent1=hookp1,
1303 parent2=hookp2)
1285 parent2=hookp2)
1304 ret = self.commitctx(cctx, True)
1286 ret = self.commitctx(cctx, True)
1305 except: # re-raises
1287 except: # re-raises
1306 if edited:
1288 if edited:
1307 self.ui.write(
1289 self.ui.write(
1308 _('note: commit message saved in %s\n') % msgfn)
1290 _('note: commit message saved in %s\n') % msgfn)
1309 raise
1291 raise
1310
1292
1311 # update bookmarks, dirstate and mergestate
1293 # update bookmarks, dirstate and mergestate
1312 bookmarks.update(self, [p1, p2], ret)
1294 bookmarks.update(self, [p1, p2], ret)
1313 cctx.markcommitted(ret)
1295 cctx.markcommitted(ret)
1314 ms.reset()
1296 ms.reset()
1315 finally:
1297 finally:
1316 wlock.release()
1298 wlock.release()
1317
1299
1318 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1300 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1319 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1301 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1320 self._afterlock(commithook)
1302 self._afterlock(commithook)
1321 return ret
1303 return ret
1322
1304
1323 @unfilteredmethod
1305 @unfilteredmethod
1324 def commitctx(self, ctx, error=False):
1306 def commitctx(self, ctx, error=False):
1325 """Add a new revision to current repository.
1307 """Add a new revision to current repository.
1326 Revision information is passed via the context argument.
1308 Revision information is passed via the context argument.
1327 """
1309 """
1328
1310
1329 tr = lock = None
1311 tr = lock = None
1330 removed = list(ctx.removed())
1312 removed = list(ctx.removed())
1331 p1, p2 = ctx.p1(), ctx.p2()
1313 p1, p2 = ctx.p1(), ctx.p2()
1332 user = ctx.user()
1314 user = ctx.user()
1333
1315
1334 lock = self.lock()
1316 lock = self.lock()
1335 try:
1317 try:
1336 tr = self.transaction("commit")
1318 tr = self.transaction("commit")
1337 trp = weakref.proxy(tr)
1319 trp = weakref.proxy(tr)
1338
1320
1339 if ctx.files():
1321 if ctx.files():
1340 m1 = p1.manifest().copy()
1322 m1 = p1.manifest().copy()
1341 m2 = p2.manifest()
1323 m2 = p2.manifest()
1342
1324
1343 # check in files
1325 # check in files
1344 new = {}
1326 new = {}
1345 changed = []
1327 changed = []
1346 linkrev = len(self)
1328 linkrev = len(self)
1347 for f in sorted(ctx.modified() + ctx.added()):
1329 for f in sorted(ctx.modified() + ctx.added()):
1348 self.ui.note(f + "\n")
1330 self.ui.note(f + "\n")
1349 try:
1331 try:
1350 fctx = ctx[f]
1332 fctx = ctx[f]
1351 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1333 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1352 changed)
1334 changed)
1353 m1.set(f, fctx.flags())
1335 m1.set(f, fctx.flags())
1354 except OSError, inst:
1336 except OSError, inst:
1355 self.ui.warn(_("trouble committing %s!\n") % f)
1337 self.ui.warn(_("trouble committing %s!\n") % f)
1356 raise
1338 raise
1357 except IOError, inst:
1339 except IOError, inst:
1358 errcode = getattr(inst, 'errno', errno.ENOENT)
1340 errcode = getattr(inst, 'errno', errno.ENOENT)
1359 if error or errcode and errcode != errno.ENOENT:
1341 if error or errcode and errcode != errno.ENOENT:
1360 self.ui.warn(_("trouble committing %s!\n") % f)
1342 self.ui.warn(_("trouble committing %s!\n") % f)
1361 raise
1343 raise
1362 else:
1344 else:
1363 removed.append(f)
1345 removed.append(f)
1364
1346
1365 # update manifest
1347 # update manifest
1366 m1.update(new)
1348 m1.update(new)
1367 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1349 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1368 drop = [f for f in removed if f in m1]
1350 drop = [f for f in removed if f in m1]
1369 for f in drop:
1351 for f in drop:
1370 del m1[f]
1352 del m1[f]
1371 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1353 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1372 p2.manifestnode(), (new, drop))
1354 p2.manifestnode(), (new, drop))
1373 files = changed + removed
1355 files = changed + removed
1374 else:
1356 else:
1375 mn = p1.manifestnode()
1357 mn = p1.manifestnode()
1376 files = []
1358 files = []
1377
1359
1378 # update changelog
1360 # update changelog
1379 self.changelog.delayupdate()
1361 self.changelog.delayupdate()
1380 n = self.changelog.add(mn, files, ctx.description(),
1362 n = self.changelog.add(mn, files, ctx.description(),
1381 trp, p1.node(), p2.node(),
1363 trp, p1.node(), p2.node(),
1382 user, ctx.date(), ctx.extra().copy())
1364 user, ctx.date(), ctx.extra().copy())
1383 p = lambda: self.changelog.writepending() and self.root or ""
1365 p = lambda: self.changelog.writepending() and self.root or ""
1384 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1366 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1385 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1367 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1386 parent2=xp2, pending=p)
1368 parent2=xp2, pending=p)
1387 self.changelog.finalize(trp)
1369 self.changelog.finalize(trp)
1388 # set the new commit is proper phase
1370 # set the new commit is proper phase
1389 targetphase = subrepo.newcommitphase(self.ui, ctx)
1371 targetphase = subrepo.newcommitphase(self.ui, ctx)
1390 if targetphase:
1372 if targetphase:
1391 # retract boundary do not alter parent changeset.
1373 # retract boundary do not alter parent changeset.
1392 # if a parent have higher the resulting phase will
1374 # if a parent have higher the resulting phase will
1393 # be compliant anyway
1375 # be compliant anyway
1394 #
1376 #
1395 # if minimal phase was 0 we don't need to retract anything
1377 # if minimal phase was 0 we don't need to retract anything
1396 phases.retractboundary(self, targetphase, [n])
1378 phases.retractboundary(self, targetphase, [n])
1397 tr.close()
1379 tr.close()
1398 branchmap.updatecache(self.filtered('served'))
1380 branchmap.updatecache(self.filtered('served'))
1399 return n
1381 return n
1400 finally:
1382 finally:
1401 if tr:
1383 if tr:
1402 tr.release()
1384 tr.release()
1403 lock.release()
1385 lock.release()
1404
1386
1405 @unfilteredmethod
1387 @unfilteredmethod
1406 def destroying(self):
1388 def destroying(self):
1407 '''Inform the repository that nodes are about to be destroyed.
1389 '''Inform the repository that nodes are about to be destroyed.
1408 Intended for use by strip and rollback, so there's a common
1390 Intended for use by strip and rollback, so there's a common
1409 place for anything that has to be done before destroying history.
1391 place for anything that has to be done before destroying history.
1410
1392
1411 This is mostly useful for saving state that is in memory and waiting
1393 This is mostly useful for saving state that is in memory and waiting
1412 to be flushed when the current lock is released. Because a call to
1394 to be flushed when the current lock is released. Because a call to
1413 destroyed is imminent, the repo will be invalidated causing those
1395 destroyed is imminent, the repo will be invalidated causing those
1414 changes to stay in memory (waiting for the next unlock), or vanish
1396 changes to stay in memory (waiting for the next unlock), or vanish
1415 completely.
1397 completely.
1416 '''
1398 '''
1417 # When using the same lock to commit and strip, the phasecache is left
1399 # When using the same lock to commit and strip, the phasecache is left
1418 # dirty after committing. Then when we strip, the repo is invalidated,
1400 # dirty after committing. Then when we strip, the repo is invalidated,
1419 # causing those changes to disappear.
1401 # causing those changes to disappear.
1420 if '_phasecache' in vars(self):
1402 if '_phasecache' in vars(self):
1421 self._phasecache.write()
1403 self._phasecache.write()
1422
1404
1423 @unfilteredmethod
1405 @unfilteredmethod
1424 def destroyed(self):
1406 def destroyed(self):
1425 '''Inform the repository that nodes have been destroyed.
1407 '''Inform the repository that nodes have been destroyed.
1426 Intended for use by strip and rollback, so there's a common
1408 Intended for use by strip and rollback, so there's a common
1427 place for anything that has to be done after destroying history.
1409 place for anything that has to be done after destroying history.
1428 '''
1410 '''
1429 # When one tries to:
1411 # When one tries to:
1430 # 1) destroy nodes thus calling this method (e.g. strip)
1412 # 1) destroy nodes thus calling this method (e.g. strip)
1431 # 2) use phasecache somewhere (e.g. commit)
1413 # 2) use phasecache somewhere (e.g. commit)
1432 #
1414 #
1433 # then 2) will fail because the phasecache contains nodes that were
1415 # then 2) will fail because the phasecache contains nodes that were
1434 # removed. We can either remove phasecache from the filecache,
1416 # removed. We can either remove phasecache from the filecache,
1435 # causing it to reload next time it is accessed, or simply filter
1417 # causing it to reload next time it is accessed, or simply filter
1436 # the removed nodes now and write the updated cache.
1418 # the removed nodes now and write the updated cache.
1437 self._phasecache.filterunknown(self)
1419 self._phasecache.filterunknown(self)
1438 self._phasecache.write()
1420 self._phasecache.write()
1439
1421
1440 # update the 'served' branch cache to help read only server process
1422 # update the 'served' branch cache to help read only server process
1441 # Thanks to branchcache collaboration this is done from the nearest
1423 # Thanks to branchcache collaboration this is done from the nearest
1442 # filtered subset and it is expected to be fast.
1424 # filtered subset and it is expected to be fast.
1443 branchmap.updatecache(self.filtered('served'))
1425 branchmap.updatecache(self.filtered('served'))
1444
1426
1445 # Ensure the persistent tag cache is updated. Doing it now
1427 # Ensure the persistent tag cache is updated. Doing it now
1446 # means that the tag cache only has to worry about destroyed
1428 # means that the tag cache only has to worry about destroyed
1447 # heads immediately after a strip/rollback. That in turn
1429 # heads immediately after a strip/rollback. That in turn
1448 # guarantees that "cachetip == currenttip" (comparing both rev
1430 # guarantees that "cachetip == currenttip" (comparing both rev
1449 # and node) always means no nodes have been added or destroyed.
1431 # and node) always means no nodes have been added or destroyed.
1450
1432
1451 # XXX this is suboptimal when qrefresh'ing: we strip the current
1433 # XXX this is suboptimal when qrefresh'ing: we strip the current
1452 # head, refresh the tag cache, then immediately add a new head.
1434 # head, refresh the tag cache, then immediately add a new head.
1453 # But I think doing it this way is necessary for the "instant
1435 # But I think doing it this way is necessary for the "instant
1454 # tag cache retrieval" case to work.
1436 # tag cache retrieval" case to work.
1455 self.invalidate()
1437 self.invalidate()
1456
1438
1457 def walk(self, match, node=None):
1439 def walk(self, match, node=None):
1458 '''
1440 '''
1459 walk recursively through the directory tree or a given
1441 walk recursively through the directory tree or a given
1460 changeset, finding all files matched by the match
1442 changeset, finding all files matched by the match
1461 function
1443 function
1462 '''
1444 '''
1463 return self[node].walk(match)
1445 return self[node].walk(match)
1464
1446
1465 def status(self, node1='.', node2=None, match=None,
1447 def status(self, node1='.', node2=None, match=None,
1466 ignored=False, clean=False, unknown=False,
1448 ignored=False, clean=False, unknown=False,
1467 listsubrepos=False):
1449 listsubrepos=False):
1468 """return status of files between two nodes or node and working
1450 """return status of files between two nodes or node and working
1469 directory.
1451 directory.
1470
1452
1471 If node1 is None, use the first dirstate parent instead.
1453 If node1 is None, use the first dirstate parent instead.
1472 If node2 is None, compare node1 with working directory.
1454 If node2 is None, compare node1 with working directory.
1473 """
1455 """
1474
1456
1475 def mfmatches(ctx):
1457 def mfmatches(ctx):
1476 mf = ctx.manifest().copy()
1458 mf = ctx.manifest().copy()
1477 if match.always():
1459 if match.always():
1478 return mf
1460 return mf
1479 for fn in mf.keys():
1461 for fn in mf.keys():
1480 if not match(fn):
1462 if not match(fn):
1481 del mf[fn]
1463 del mf[fn]
1482 return mf
1464 return mf
1483
1465
1484 ctx1 = self[node1]
1466 ctx1 = self[node1]
1485 ctx2 = self[node2]
1467 ctx2 = self[node2]
1486
1468
1487 working = ctx2.rev() is None
1469 working = ctx2.rev() is None
1488 parentworking = working and ctx1 == self['.']
1470 parentworking = working and ctx1 == self['.']
1489 match = match or matchmod.always(self.root, self.getcwd())
1471 match = match or matchmod.always(self.root, self.getcwd())
1490 listignored, listclean, listunknown = ignored, clean, unknown
1472 listignored, listclean, listunknown = ignored, clean, unknown
1491
1473
1492 # load earliest manifest first for caching reasons
1474 # load earliest manifest first for caching reasons
1493 if not working and ctx2.rev() < ctx1.rev():
1475 if not working and ctx2.rev() < ctx1.rev():
1494 ctx2.manifest()
1476 ctx2.manifest()
1495
1477
1496 if not parentworking:
1478 if not parentworking:
1497 def bad(f, msg):
1479 def bad(f, msg):
1498 # 'f' may be a directory pattern from 'match.files()',
1480 # 'f' may be a directory pattern from 'match.files()',
1499 # so 'f not in ctx1' is not enough
1481 # so 'f not in ctx1' is not enough
1500 if f not in ctx1 and f not in ctx1.dirs():
1482 if f not in ctx1 and f not in ctx1.dirs():
1501 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1483 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1502 match.bad = bad
1484 match.bad = bad
1503
1485
1504 if working: # we need to scan the working dir
1486 if working: # we need to scan the working dir
1505 subrepos = []
1487 subrepos = []
1506 if '.hgsub' in self.dirstate:
1488 if '.hgsub' in self.dirstate:
1507 subrepos = sorted(ctx2.substate)
1489 subrepos = sorted(ctx2.substate)
1508 s = self.dirstate.status(match, subrepos, listignored,
1490 s = self.dirstate.status(match, subrepos, listignored,
1509 listclean, listunknown)
1491 listclean, listunknown)
1510 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1492 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1511
1493
1512 # check for any possibly clean files
1494 # check for any possibly clean files
1513 if parentworking and cmp:
1495 if parentworking and cmp:
1514 fixup = []
1496 fixup = []
1515 # do a full compare of any files that might have changed
1497 # do a full compare of any files that might have changed
1516 for f in sorted(cmp):
1498 for f in sorted(cmp):
1517 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1499 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1518 or ctx1[f].cmp(ctx2[f])):
1500 or ctx1[f].cmp(ctx2[f])):
1519 modified.append(f)
1501 modified.append(f)
1520 else:
1502 else:
1521 fixup.append(f)
1503 fixup.append(f)
1522
1504
1523 # update dirstate for files that are actually clean
1505 # update dirstate for files that are actually clean
1524 if fixup:
1506 if fixup:
1525 if listclean:
1507 if listclean:
1526 clean += fixup
1508 clean += fixup
1527
1509
1528 try:
1510 try:
1529 # updating the dirstate is optional
1511 # updating the dirstate is optional
1530 # so we don't wait on the lock
1512 # so we don't wait on the lock
1531 wlock = self.wlock(False)
1513 wlock = self.wlock(False)
1532 try:
1514 try:
1533 for f in fixup:
1515 for f in fixup:
1534 self.dirstate.normal(f)
1516 self.dirstate.normal(f)
1535 finally:
1517 finally:
1536 wlock.release()
1518 wlock.release()
1537 except error.LockError:
1519 except error.LockError:
1538 pass
1520 pass
1539
1521
1540 if not parentworking:
1522 if not parentworking:
1541 mf1 = mfmatches(ctx1)
1523 mf1 = mfmatches(ctx1)
1542 if working:
1524 if working:
1543 # we are comparing working dir against non-parent
1525 # we are comparing working dir against non-parent
1544 # generate a pseudo-manifest for the working dir
1526 # generate a pseudo-manifest for the working dir
1545 mf2 = mfmatches(self['.'])
1527 mf2 = mfmatches(self['.'])
1546 for f in cmp + modified + added:
1528 for f in cmp + modified + added:
1547 mf2[f] = None
1529 mf2[f] = None
1548 mf2.set(f, ctx2.flags(f))
1530 mf2.set(f, ctx2.flags(f))
1549 for f in removed:
1531 for f in removed:
1550 if f in mf2:
1532 if f in mf2:
1551 del mf2[f]
1533 del mf2[f]
1552 else:
1534 else:
1553 # we are comparing two revisions
1535 # we are comparing two revisions
1554 deleted, unknown, ignored = [], [], []
1536 deleted, unknown, ignored = [], [], []
1555 mf2 = mfmatches(ctx2)
1537 mf2 = mfmatches(ctx2)
1556
1538
1557 modified, added, clean = [], [], []
1539 modified, added, clean = [], [], []
1558 withflags = mf1.withflags() | mf2.withflags()
1540 withflags = mf1.withflags() | mf2.withflags()
1559 for fn, mf2node in mf2.iteritems():
1541 for fn, mf2node in mf2.iteritems():
1560 if fn in mf1:
1542 if fn in mf1:
1561 if (fn not in deleted and
1543 if (fn not in deleted and
1562 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1544 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1563 (mf1[fn] != mf2node and
1545 (mf1[fn] != mf2node and
1564 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1546 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1565 modified.append(fn)
1547 modified.append(fn)
1566 elif listclean:
1548 elif listclean:
1567 clean.append(fn)
1549 clean.append(fn)
1568 del mf1[fn]
1550 del mf1[fn]
1569 elif fn not in deleted:
1551 elif fn not in deleted:
1570 added.append(fn)
1552 added.append(fn)
1571 removed = mf1.keys()
1553 removed = mf1.keys()
1572
1554
1573 if working and modified and not self.dirstate._checklink:
1555 if working and modified and not self.dirstate._checklink:
1574 # Symlink placeholders may get non-symlink-like contents
1556 # Symlink placeholders may get non-symlink-like contents
1575 # via user error or dereferencing by NFS or Samba servers,
1557 # via user error or dereferencing by NFS or Samba servers,
1576 # so we filter out any placeholders that don't look like a
1558 # so we filter out any placeholders that don't look like a
1577 # symlink
1559 # symlink
1578 sane = []
1560 sane = []
1579 for f in modified:
1561 for f in modified:
1580 if ctx2.flags(f) == 'l':
1562 if ctx2.flags(f) == 'l':
1581 d = ctx2[f].data()
1563 d = ctx2[f].data()
1582 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1564 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1583 self.ui.debug('ignoring suspect symlink placeholder'
1565 self.ui.debug('ignoring suspect symlink placeholder'
1584 ' "%s"\n' % f)
1566 ' "%s"\n' % f)
1585 continue
1567 continue
1586 sane.append(f)
1568 sane.append(f)
1587 modified = sane
1569 modified = sane
1588
1570
1589 r = modified, added, removed, deleted, unknown, ignored, clean
1571 r = modified, added, removed, deleted, unknown, ignored, clean
1590
1572
1591 if listsubrepos:
1573 if listsubrepos:
1592 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1574 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1593 if working:
1575 if working:
1594 rev2 = None
1576 rev2 = None
1595 else:
1577 else:
1596 rev2 = ctx2.substate[subpath][1]
1578 rev2 = ctx2.substate[subpath][1]
1597 try:
1579 try:
1598 submatch = matchmod.narrowmatcher(subpath, match)
1580 submatch = matchmod.narrowmatcher(subpath, match)
1599 s = sub.status(rev2, match=submatch, ignored=listignored,
1581 s = sub.status(rev2, match=submatch, ignored=listignored,
1600 clean=listclean, unknown=listunknown,
1582 clean=listclean, unknown=listunknown,
1601 listsubrepos=True)
1583 listsubrepos=True)
1602 for rfiles, sfiles in zip(r, s):
1584 for rfiles, sfiles in zip(r, s):
1603 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1585 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1604 except error.LookupError:
1586 except error.LookupError:
1605 self.ui.status(_("skipping missing subrepository: %s\n")
1587 self.ui.status(_("skipping missing subrepository: %s\n")
1606 % subpath)
1588 % subpath)
1607
1589
1608 for l in r:
1590 for l in r:
1609 l.sort()
1591 l.sort()
1610 return r
1592 return r
1611
1593
1612 def heads(self, start=None):
1594 def heads(self, start=None):
1613 heads = self.changelog.heads(start)
1595 heads = self.changelog.heads(start)
1614 # sort the output in rev descending order
1596 # sort the output in rev descending order
1615 return sorted(heads, key=self.changelog.rev, reverse=True)
1597 return sorted(heads, key=self.changelog.rev, reverse=True)
1616
1598
1617 def branchheads(self, branch=None, start=None, closed=False):
1599 def branchheads(self, branch=None, start=None, closed=False):
1618 '''return a (possibly filtered) list of heads for the given branch
1600 '''return a (possibly filtered) list of heads for the given branch
1619
1601
1620 Heads are returned in topological order, from newest to oldest.
1602 Heads are returned in topological order, from newest to oldest.
1621 If branch is None, use the dirstate branch.
1603 If branch is None, use the dirstate branch.
1622 If start is not None, return only heads reachable from start.
1604 If start is not None, return only heads reachable from start.
1623 If closed is True, return heads that are marked as closed as well.
1605 If closed is True, return heads that are marked as closed as well.
1624 '''
1606 '''
1625 if branch is None:
1607 if branch is None:
1626 branch = self[None].branch()
1608 branch = self[None].branch()
1627 branches = self.branchmap()
1609 branches = self.branchmap()
1628 if branch not in branches:
1610 if branch not in branches:
1629 return []
1611 return []
1630 # the cache returns heads ordered lowest to highest
1612 # the cache returns heads ordered lowest to highest
1631 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1613 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1632 if start is not None:
1614 if start is not None:
1633 # filter out the heads that cannot be reached from startrev
1615 # filter out the heads that cannot be reached from startrev
1634 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1616 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1635 bheads = [h for h in bheads if h in fbheads]
1617 bheads = [h for h in bheads if h in fbheads]
1636 return bheads
1618 return bheads
1637
1619
1638 def branches(self, nodes):
1620 def branches(self, nodes):
1639 if not nodes:
1621 if not nodes:
1640 nodes = [self.changelog.tip()]
1622 nodes = [self.changelog.tip()]
1641 b = []
1623 b = []
1642 for n in nodes:
1624 for n in nodes:
1643 t = n
1625 t = n
1644 while True:
1626 while True:
1645 p = self.changelog.parents(n)
1627 p = self.changelog.parents(n)
1646 if p[1] != nullid or p[0] == nullid:
1628 if p[1] != nullid or p[0] == nullid:
1647 b.append((t, n, p[0], p[1]))
1629 b.append((t, n, p[0], p[1]))
1648 break
1630 break
1649 n = p[0]
1631 n = p[0]
1650 return b
1632 return b
1651
1633
1652 def between(self, pairs):
1634 def between(self, pairs):
1653 r = []
1635 r = []
1654
1636
1655 for top, bottom in pairs:
1637 for top, bottom in pairs:
1656 n, l, i = top, [], 0
1638 n, l, i = top, [], 0
1657 f = 1
1639 f = 1
1658
1640
1659 while n != bottom and n != nullid:
1641 while n != bottom and n != nullid:
1660 p = self.changelog.parents(n)[0]
1642 p = self.changelog.parents(n)[0]
1661 if i == f:
1643 if i == f:
1662 l.append(n)
1644 l.append(n)
1663 f = f * 2
1645 f = f * 2
1664 n = p
1646 n = p
1665 i += 1
1647 i += 1
1666
1648
1667 r.append(l)
1649 r.append(l)
1668
1650
1669 return r
1651 return r
1670
1652
1671 def pull(self, remote, heads=None, force=False):
1653 def pull(self, remote, heads=None, force=False):
1672 if remote.local():
1654 if remote.local():
1673 missing = set(remote.requirements) - self.supported
1655 missing = set(remote.requirements) - self.supported
1674 if missing:
1656 if missing:
1675 msg = _("required features are not"
1657 msg = _("required features are not"
1676 " supported in the destination:"
1658 " supported in the destination:"
1677 " %s") % (', '.join(sorted(missing)))
1659 " %s") % (', '.join(sorted(missing)))
1678 raise util.Abort(msg)
1660 raise util.Abort(msg)
1679
1661
1680 # don't open transaction for nothing or you break future useful
1662 # don't open transaction for nothing or you break future useful
1681 # rollback call
1663 # rollback call
1682 tr = None
1664 tr = None
1683 trname = 'pull\n' + util.hidepassword(remote.url())
1665 trname = 'pull\n' + util.hidepassword(remote.url())
1684 lock = self.lock()
1666 lock = self.lock()
1685 try:
1667 try:
1686 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1668 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1687 force=force)
1669 force=force)
1688 common, fetch, rheads = tmp
1670 common, fetch, rheads = tmp
1689 if not fetch:
1671 if not fetch:
1690 self.ui.status(_("no changes found\n"))
1672 self.ui.status(_("no changes found\n"))
1691 added = []
1673 added = []
1692 result = 0
1674 result = 0
1693 else:
1675 else:
1694 tr = self.transaction(trname)
1676 tr = self.transaction(trname)
1695 if heads is None and list(common) == [nullid]:
1677 if heads is None and list(common) == [nullid]:
1696 self.ui.status(_("requesting all changes\n"))
1678 self.ui.status(_("requesting all changes\n"))
1697 elif heads is None and remote.capable('changegroupsubset'):
1679 elif heads is None and remote.capable('changegroupsubset'):
1698 # issue1320, avoid a race if remote changed after discovery
1680 # issue1320, avoid a race if remote changed after discovery
1699 heads = rheads
1681 heads = rheads
1700
1682
1701 if remote.capable('getbundle'):
1683 if remote.capable('getbundle'):
1702 # TODO: get bundlecaps from remote
1684 # TODO: get bundlecaps from remote
1703 cg = remote.getbundle('pull', common=common,
1685 cg = remote.getbundle('pull', common=common,
1704 heads=heads or rheads)
1686 heads=heads or rheads)
1705 elif heads is None:
1687 elif heads is None:
1706 cg = remote.changegroup(fetch, 'pull')
1688 cg = remote.changegroup(fetch, 'pull')
1707 elif not remote.capable('changegroupsubset'):
1689 elif not remote.capable('changegroupsubset'):
1708 raise util.Abort(_("partial pull cannot be done because "
1690 raise util.Abort(_("partial pull cannot be done because "
1709 "other repository doesn't support "
1691 "other repository doesn't support "
1710 "changegroupsubset."))
1692 "changegroupsubset."))
1711 else:
1693 else:
1712 cg = remote.changegroupsubset(fetch, heads, 'pull')
1694 cg = remote.changegroupsubset(fetch, heads, 'pull')
1713 # we use unfiltered changelog here because hidden revision must
1695 # we use unfiltered changelog here because hidden revision must
1714 # be taken in account for phase synchronization. They may
1696 # be taken in account for phase synchronization. They may
1715 # becomes public and becomes visible again.
1697 # becomes public and becomes visible again.
1716 cl = self.unfiltered().changelog
1698 cl = self.unfiltered().changelog
1717 clstart = len(cl)
1699 clstart = len(cl)
1718 result = self.addchangegroup(cg, 'pull', remote.url())
1700 result = self.addchangegroup(cg, 'pull', remote.url())
1719 clend = len(cl)
1701 clend = len(cl)
1720 added = [cl.node(r) for r in xrange(clstart, clend)]
1702 added = [cl.node(r) for r in xrange(clstart, clend)]
1721
1703
1722 # compute target subset
1704 # compute target subset
1723 if heads is None:
1705 if heads is None:
1724 # We pulled every thing possible
1706 # We pulled every thing possible
1725 # sync on everything common
1707 # sync on everything common
1726 subset = common + added
1708 subset = common + added
1727 else:
1709 else:
1728 # We pulled a specific subset
1710 # We pulled a specific subset
1729 # sync on this subset
1711 # sync on this subset
1730 subset = heads
1712 subset = heads
1731
1713
1732 # Get remote phases data from remote
1714 # Get remote phases data from remote
1733 remotephases = remote.listkeys('phases')
1715 remotephases = remote.listkeys('phases')
1734 publishing = bool(remotephases.get('publishing', False))
1716 publishing = bool(remotephases.get('publishing', False))
1735 if remotephases and not publishing:
1717 if remotephases and not publishing:
1736 # remote is new and unpublishing
1718 # remote is new and unpublishing
1737 pheads, _dr = phases.analyzeremotephases(self, subset,
1719 pheads, _dr = phases.analyzeremotephases(self, subset,
1738 remotephases)
1720 remotephases)
1739 phases.advanceboundary(self, phases.public, pheads)
1721 phases.advanceboundary(self, phases.public, pheads)
1740 phases.advanceboundary(self, phases.draft, subset)
1722 phases.advanceboundary(self, phases.draft, subset)
1741 else:
1723 else:
1742 # Remote is old or publishing all common changesets
1724 # Remote is old or publishing all common changesets
1743 # should be seen as public
1725 # should be seen as public
1744 phases.advanceboundary(self, phases.public, subset)
1726 phases.advanceboundary(self, phases.public, subset)
1745
1727
1746 def gettransaction():
1728 def gettransaction():
1747 if tr is None:
1729 if tr is None:
1748 return self.transaction(trname)
1730 return self.transaction(trname)
1749 return tr
1731 return tr
1750
1732
1751 obstr = obsolete.syncpull(self, remote, gettransaction)
1733 obstr = obsolete.syncpull(self, remote, gettransaction)
1752 if obstr is not None:
1734 if obstr is not None:
1753 tr = obstr
1735 tr = obstr
1754
1736
1755 if tr is not None:
1737 if tr is not None:
1756 tr.close()
1738 tr.close()
1757 finally:
1739 finally:
1758 if tr is not None:
1740 if tr is not None:
1759 tr.release()
1741 tr.release()
1760 lock.release()
1742 lock.release()
1761
1743
1762 return result
1744 return result
1763
1745
1764 def checkpush(self, force, revs):
1746 def checkpush(self, force, revs):
1765 """Extensions can override this function if additional checks have
1747 """Extensions can override this function if additional checks have
1766 to be performed before pushing, or call it if they override push
1748 to be performed before pushing, or call it if they override push
1767 command.
1749 command.
1768 """
1750 """
1769 pass
1751 pass
1770
1752
1771 def push(self, remote, force=False, revs=None, newbranch=False):
1753 def push(self, remote, force=False, revs=None, newbranch=False):
1772 '''Push outgoing changesets (limited by revs) from the current
1754 '''Push outgoing changesets (limited by revs) from the current
1773 repository to remote. Return an integer:
1755 repository to remote. Return an integer:
1774 - None means nothing to push
1756 - None means nothing to push
1775 - 0 means HTTP error
1757 - 0 means HTTP error
1776 - 1 means we pushed and remote head count is unchanged *or*
1758 - 1 means we pushed and remote head count is unchanged *or*
1777 we have outgoing changesets but refused to push
1759 we have outgoing changesets but refused to push
1778 - other values as described by addchangegroup()
1760 - other values as described by addchangegroup()
1779 '''
1761 '''
1780 if remote.local():
1762 if remote.local():
1781 missing = set(self.requirements) - remote.local().supported
1763 missing = set(self.requirements) - remote.local().supported
1782 if missing:
1764 if missing:
1783 msg = _("required features are not"
1765 msg = _("required features are not"
1784 " supported in the destination:"
1766 " supported in the destination:"
1785 " %s") % (', '.join(sorted(missing)))
1767 " %s") % (', '.join(sorted(missing)))
1786 raise util.Abort(msg)
1768 raise util.Abort(msg)
1787
1769
1788 # there are two ways to push to remote repo:
1770 # there are two ways to push to remote repo:
1789 #
1771 #
1790 # addchangegroup assumes local user can lock remote
1772 # addchangegroup assumes local user can lock remote
1791 # repo (local filesystem, old ssh servers).
1773 # repo (local filesystem, old ssh servers).
1792 #
1774 #
1793 # unbundle assumes local user cannot lock remote repo (new ssh
1775 # unbundle assumes local user cannot lock remote repo (new ssh
1794 # servers, http servers).
1776 # servers, http servers).
1795
1777
1796 if not remote.canpush():
1778 if not remote.canpush():
1797 raise util.Abort(_("destination does not support push"))
1779 raise util.Abort(_("destination does not support push"))
1798 unfi = self.unfiltered()
1780 unfi = self.unfiltered()
1799 def localphasemove(nodes, phase=phases.public):
1781 def localphasemove(nodes, phase=phases.public):
1800 """move <nodes> to <phase> in the local source repo"""
1782 """move <nodes> to <phase> in the local source repo"""
1801 if locallock is not None:
1783 if locallock is not None:
1802 phases.advanceboundary(self, phase, nodes)
1784 phases.advanceboundary(self, phase, nodes)
1803 else:
1785 else:
1804 # repo is not locked, do not change any phases!
1786 # repo is not locked, do not change any phases!
1805 # Informs the user that phases should have been moved when
1787 # Informs the user that phases should have been moved when
1806 # applicable.
1788 # applicable.
1807 actualmoves = [n for n in nodes if phase < self[n].phase()]
1789 actualmoves = [n for n in nodes if phase < self[n].phase()]
1808 phasestr = phases.phasenames[phase]
1790 phasestr = phases.phasenames[phase]
1809 if actualmoves:
1791 if actualmoves:
1810 self.ui.status(_('cannot lock source repo, skipping local'
1792 self.ui.status(_('cannot lock source repo, skipping local'
1811 ' %s phase update\n') % phasestr)
1793 ' %s phase update\n') % phasestr)
1812 # get local lock as we might write phase data
1794 # get local lock as we might write phase data
1813 locallock = None
1795 locallock = None
1814 try:
1796 try:
1815 locallock = self.lock()
1797 locallock = self.lock()
1816 except IOError, err:
1798 except IOError, err:
1817 if err.errno != errno.EACCES:
1799 if err.errno != errno.EACCES:
1818 raise
1800 raise
1819 # source repo cannot be locked.
1801 # source repo cannot be locked.
1820 # We do not abort the push, but just disable the local phase
1802 # We do not abort the push, but just disable the local phase
1821 # synchronisation.
1803 # synchronisation.
1822 msg = 'cannot lock source repository: %s\n' % err
1804 msg = 'cannot lock source repository: %s\n' % err
1823 self.ui.debug(msg)
1805 self.ui.debug(msg)
1824 try:
1806 try:
1825 self.checkpush(force, revs)
1807 self.checkpush(force, revs)
1826 lock = None
1808 lock = None
1827 unbundle = remote.capable('unbundle')
1809 unbundle = remote.capable('unbundle')
1828 if not unbundle:
1810 if not unbundle:
1829 lock = remote.lock()
1811 lock = remote.lock()
1830 try:
1812 try:
1831 # discovery
1813 # discovery
1832 fci = discovery.findcommonincoming
1814 fci = discovery.findcommonincoming
1833 commoninc = fci(unfi, remote, force=force)
1815 commoninc = fci(unfi, remote, force=force)
1834 common, inc, remoteheads = commoninc
1816 common, inc, remoteheads = commoninc
1835 fco = discovery.findcommonoutgoing
1817 fco = discovery.findcommonoutgoing
1836 outgoing = fco(unfi, remote, onlyheads=revs,
1818 outgoing = fco(unfi, remote, onlyheads=revs,
1837 commoninc=commoninc, force=force)
1819 commoninc=commoninc, force=force)
1838
1820
1839
1821
1840 if not outgoing.missing:
1822 if not outgoing.missing:
1841 # nothing to push
1823 # nothing to push
1842 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1824 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1843 ret = None
1825 ret = None
1844 else:
1826 else:
1845 # something to push
1827 # something to push
1846 if not force:
1828 if not force:
1847 # if self.obsstore == False --> no obsolete
1829 # if self.obsstore == False --> no obsolete
1848 # then, save the iteration
1830 # then, save the iteration
1849 if unfi.obsstore:
1831 if unfi.obsstore:
1850 # this message are here for 80 char limit reason
1832 # this message are here for 80 char limit reason
1851 mso = _("push includes obsolete changeset: %s!")
1833 mso = _("push includes obsolete changeset: %s!")
1852 mst = "push includes %s changeset: %s!"
1834 mst = "push includes %s changeset: %s!"
1853 # plain versions for i18n tool to detect them
1835 # plain versions for i18n tool to detect them
1854 _("push includes unstable changeset: %s!")
1836 _("push includes unstable changeset: %s!")
1855 _("push includes bumped changeset: %s!")
1837 _("push includes bumped changeset: %s!")
1856 _("push includes divergent changeset: %s!")
1838 _("push includes divergent changeset: %s!")
1857 # If we are to push if there is at least one
1839 # If we are to push if there is at least one
1858 # obsolete or unstable changeset in missing, at
1840 # obsolete or unstable changeset in missing, at
1859 # least one of the missinghead will be obsolete or
1841 # least one of the missinghead will be obsolete or
1860 # unstable. So checking heads only is ok
1842 # unstable. So checking heads only is ok
1861 for node in outgoing.missingheads:
1843 for node in outgoing.missingheads:
1862 ctx = unfi[node]
1844 ctx = unfi[node]
1863 if ctx.obsolete():
1845 if ctx.obsolete():
1864 raise util.Abort(mso % ctx)
1846 raise util.Abort(mso % ctx)
1865 elif ctx.troubled():
1847 elif ctx.troubled():
1866 raise util.Abort(_(mst)
1848 raise util.Abort(_(mst)
1867 % (ctx.troubles()[0],
1849 % (ctx.troubles()[0],
1868 ctx))
1850 ctx))
1869 newbm = self.ui.configlist('bookmarks', 'pushing')
1851 newbm = self.ui.configlist('bookmarks', 'pushing')
1870 discovery.checkheads(unfi, remote, outgoing,
1852 discovery.checkheads(unfi, remote, outgoing,
1871 remoteheads, newbranch,
1853 remoteheads, newbranch,
1872 bool(inc), newbm)
1854 bool(inc), newbm)
1873
1855
1874 # TODO: get bundlecaps from remote
1856 # TODO: get bundlecaps from remote
1875 bundlecaps = None
1857 bundlecaps = None
1876 # create a changegroup from local
1858 # create a changegroup from local
1877 if revs is None and not outgoing.excluded:
1859 if revs is None and not outgoing.excluded:
1878 # push everything,
1860 # push everything,
1879 # use the fast path, no race possible on push
1861 # use the fast path, no race possible on push
1880 bundler = changegroup.bundle10(self, bundlecaps)
1862 bundler = changegroup.bundle10(self, bundlecaps)
1881 cg = self._changegroupsubset(outgoing,
1863 cg = self._changegroupsubset(outgoing,
1882 bundler,
1864 bundler,
1883 'push',
1865 'push',
1884 fastpath=True)
1866 fastpath=True)
1885 else:
1867 else:
1886 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1868 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1887
1869
1888 # apply changegroup to remote
1870 # apply changegroup to remote
1889 if unbundle:
1871 if unbundle:
1890 # local repo finds heads on server, finds out what
1872 # local repo finds heads on server, finds out what
1891 # revs it must push. once revs transferred, if server
1873 # revs it must push. once revs transferred, if server
1892 # finds it has different heads (someone else won
1874 # finds it has different heads (someone else won
1893 # commit/push race), server aborts.
1875 # commit/push race), server aborts.
1894 if force:
1876 if force:
1895 remoteheads = ['force']
1877 remoteheads = ['force']
1896 # ssh: return remote's addchangegroup()
1878 # ssh: return remote's addchangegroup()
1897 # http: return remote's addchangegroup() or 0 for error
1879 # http: return remote's addchangegroup() or 0 for error
1898 ret = remote.unbundle(cg, remoteheads, 'push')
1880 ret = remote.unbundle(cg, remoteheads, 'push')
1899 else:
1881 else:
1900 # we return an integer indicating remote head count
1882 # we return an integer indicating remote head count
1901 # change
1883 # change
1902 ret = remote.addchangegroup(cg, 'push', self.url())
1884 ret = remote.addchangegroup(cg, 'push', self.url())
1903
1885
1904 if ret:
1886 if ret:
1905 # push succeed, synchronize target of the push
1887 # push succeed, synchronize target of the push
1906 cheads = outgoing.missingheads
1888 cheads = outgoing.missingheads
1907 elif revs is None:
1889 elif revs is None:
1908 # All out push fails. synchronize all common
1890 # All out push fails. synchronize all common
1909 cheads = outgoing.commonheads
1891 cheads = outgoing.commonheads
1910 else:
1892 else:
1911 # I want cheads = heads(::missingheads and ::commonheads)
1893 # I want cheads = heads(::missingheads and ::commonheads)
1912 # (missingheads is revs with secret changeset filtered out)
1894 # (missingheads is revs with secret changeset filtered out)
1913 #
1895 #
1914 # This can be expressed as:
1896 # This can be expressed as:
1915 # cheads = ( (missingheads and ::commonheads)
1897 # cheads = ( (missingheads and ::commonheads)
1916 # + (commonheads and ::missingheads))"
1898 # + (commonheads and ::missingheads))"
1917 # )
1899 # )
1918 #
1900 #
1919 # while trying to push we already computed the following:
1901 # while trying to push we already computed the following:
1920 # common = (::commonheads)
1902 # common = (::commonheads)
1921 # missing = ((commonheads::missingheads) - commonheads)
1903 # missing = ((commonheads::missingheads) - commonheads)
1922 #
1904 #
1923 # We can pick:
1905 # We can pick:
1924 # * missingheads part of common (::commonheads)
1906 # * missingheads part of common (::commonheads)
1925 common = set(outgoing.common)
1907 common = set(outgoing.common)
1926 cheads = [node for node in revs if node in common]
1908 cheads = [node for node in revs if node in common]
1927 # and
1909 # and
1928 # * commonheads parents on missing
1910 # * commonheads parents on missing
1929 revset = unfi.set('%ln and parents(roots(%ln))',
1911 revset = unfi.set('%ln and parents(roots(%ln))',
1930 outgoing.commonheads,
1912 outgoing.commonheads,
1931 outgoing.missing)
1913 outgoing.missing)
1932 cheads.extend(c.node() for c in revset)
1914 cheads.extend(c.node() for c in revset)
1933 # even when we don't push, exchanging phase data is useful
1915 # even when we don't push, exchanging phase data is useful
1934 remotephases = remote.listkeys('phases')
1916 remotephases = remote.listkeys('phases')
1935 if (self.ui.configbool('ui', '_usedassubrepo', False)
1917 if (self.ui.configbool('ui', '_usedassubrepo', False)
1936 and remotephases # server supports phases
1918 and remotephases # server supports phases
1937 and ret is None # nothing was pushed
1919 and ret is None # nothing was pushed
1938 and remotephases.get('publishing', False)):
1920 and remotephases.get('publishing', False)):
1939 # When:
1921 # When:
1940 # - this is a subrepo push
1922 # - this is a subrepo push
1941 # - and remote support phase
1923 # - and remote support phase
1942 # - and no changeset was pushed
1924 # - and no changeset was pushed
1943 # - and remote is publishing
1925 # - and remote is publishing
1944 # We may be in issue 3871 case!
1926 # We may be in issue 3871 case!
1945 # We drop the possible phase synchronisation done by
1927 # We drop the possible phase synchronisation done by
1946 # courtesy to publish changesets possibly locally draft
1928 # courtesy to publish changesets possibly locally draft
1947 # on the remote.
1929 # on the remote.
1948 remotephases = {'publishing': 'True'}
1930 remotephases = {'publishing': 'True'}
1949 if not remotephases: # old server or public only repo
1931 if not remotephases: # old server or public only repo
1950 localphasemove(cheads)
1932 localphasemove(cheads)
1951 # don't push any phase data as there is nothing to push
1933 # don't push any phase data as there is nothing to push
1952 else:
1934 else:
1953 ana = phases.analyzeremotephases(self, cheads, remotephases)
1935 ana = phases.analyzeremotephases(self, cheads, remotephases)
1954 pheads, droots = ana
1936 pheads, droots = ana
1955 ### Apply remote phase on local
1937 ### Apply remote phase on local
1956 if remotephases.get('publishing', False):
1938 if remotephases.get('publishing', False):
1957 localphasemove(cheads)
1939 localphasemove(cheads)
1958 else: # publish = False
1940 else: # publish = False
1959 localphasemove(pheads)
1941 localphasemove(pheads)
1960 localphasemove(cheads, phases.draft)
1942 localphasemove(cheads, phases.draft)
1961 ### Apply local phase on remote
1943 ### Apply local phase on remote
1962
1944
1963 # Get the list of all revs draft on remote by public here.
1945 # Get the list of all revs draft on remote by public here.
1964 # XXX Beware that revset break if droots is not strictly
1946 # XXX Beware that revset break if droots is not strictly
1965 # XXX root we may want to ensure it is but it is costly
1947 # XXX root we may want to ensure it is but it is costly
1966 outdated = unfi.set('heads((%ln::%ln) and public())',
1948 outdated = unfi.set('heads((%ln::%ln) and public())',
1967 droots, cheads)
1949 droots, cheads)
1968 for newremotehead in outdated:
1950 for newremotehead in outdated:
1969 r = remote.pushkey('phases',
1951 r = remote.pushkey('phases',
1970 newremotehead.hex(),
1952 newremotehead.hex(),
1971 str(phases.draft),
1953 str(phases.draft),
1972 str(phases.public))
1954 str(phases.public))
1973 if not r:
1955 if not r:
1974 self.ui.warn(_('updating %s to public failed!\n')
1956 self.ui.warn(_('updating %s to public failed!\n')
1975 % newremotehead)
1957 % newremotehead)
1976 self.ui.debug('try to push obsolete markers to remote\n')
1958 self.ui.debug('try to push obsolete markers to remote\n')
1977 obsolete.syncpush(self, remote)
1959 obsolete.syncpush(self, remote)
1978 finally:
1960 finally:
1979 if lock is not None:
1961 if lock is not None:
1980 lock.release()
1962 lock.release()
1981 finally:
1963 finally:
1982 if locallock is not None:
1964 if locallock is not None:
1983 locallock.release()
1965 locallock.release()
1984
1966
1985 bookmarks.updateremote(self.ui, unfi, remote, revs)
1967 bookmarks.updateremote(self.ui, unfi, remote, revs)
1986 return ret
1968 return ret
1987
1969
1988 def changegroupinfo(self, nodes, source):
1970 def changegroupinfo(self, nodes, source):
1989 if self.ui.verbose or source == 'bundle':
1971 if self.ui.verbose or source == 'bundle':
1990 self.ui.status(_("%d changesets found\n") % len(nodes))
1972 self.ui.status(_("%d changesets found\n") % len(nodes))
1991 if self.ui.debugflag:
1973 if self.ui.debugflag:
1992 self.ui.debug("list of changesets:\n")
1974 self.ui.debug("list of changesets:\n")
1993 for node in nodes:
1975 for node in nodes:
1994 self.ui.debug("%s\n" % hex(node))
1976 self.ui.debug("%s\n" % hex(node))
1995
1977
1996 def changegroupsubset(self, bases, heads, source):
1978 def changegroupsubset(self, bases, heads, source):
1997 """Compute a changegroup consisting of all the nodes that are
1979 """Compute a changegroup consisting of all the nodes that are
1998 descendants of any of the bases and ancestors of any of the heads.
1980 descendants of any of the bases and ancestors of any of the heads.
1999 Return a chunkbuffer object whose read() method will return
1981 Return a chunkbuffer object whose read() method will return
2000 successive changegroup chunks.
1982 successive changegroup chunks.
2001
1983
2002 It is fairly complex as determining which filenodes and which
1984 It is fairly complex as determining which filenodes and which
2003 manifest nodes need to be included for the changeset to be complete
1985 manifest nodes need to be included for the changeset to be complete
2004 is non-trivial.
1986 is non-trivial.
2005
1987
2006 Another wrinkle is doing the reverse, figuring out which changeset in
1988 Another wrinkle is doing the reverse, figuring out which changeset in
2007 the changegroup a particular filenode or manifestnode belongs to.
1989 the changegroup a particular filenode or manifestnode belongs to.
2008 """
1990 """
2009 cl = self.changelog
1991 cl = self.changelog
2010 if not bases:
1992 if not bases:
2011 bases = [nullid]
1993 bases = [nullid]
2012 # TODO: remove call to nodesbetween.
1994 # TODO: remove call to nodesbetween.
2013 csets, bases, heads = cl.nodesbetween(bases, heads)
1995 csets, bases, heads = cl.nodesbetween(bases, heads)
2014 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
1996 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
2015 outgoing = discovery.outgoing(cl, bases, heads)
1997 outgoing = discovery.outgoing(cl, bases, heads)
2016 bundler = changegroup.bundle10(self)
1998 bundler = changegroup.bundle10(self)
2017 return self._changegroupsubset(outgoing, bundler, source)
1999 return self._changegroupsubset(outgoing, bundler, source)
2018
2000
2019 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2001 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2020 """Like getbundle, but taking a discovery.outgoing as an argument.
2002 """Like getbundle, but taking a discovery.outgoing as an argument.
2021
2003
2022 This is only implemented for local repos and reuses potentially
2004 This is only implemented for local repos and reuses potentially
2023 precomputed sets in outgoing."""
2005 precomputed sets in outgoing."""
2024 if not outgoing.missing:
2006 if not outgoing.missing:
2025 return None
2007 return None
2026 bundler = changegroup.bundle10(self, bundlecaps)
2008 bundler = changegroup.bundle10(self, bundlecaps)
2027 return self._changegroupsubset(outgoing, bundler, source)
2009 return self._changegroupsubset(outgoing, bundler, source)
2028
2010
2029 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2011 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2030 """Like changegroupsubset, but returns the set difference between the
2012 """Like changegroupsubset, but returns the set difference between the
2031 ancestors of heads and the ancestors common.
2013 ancestors of heads and the ancestors common.
2032
2014
2033 If heads is None, use the local heads. If common is None, use [nullid].
2015 If heads is None, use the local heads. If common is None, use [nullid].
2034
2016
2035 The nodes in common might not all be known locally due to the way the
2017 The nodes in common might not all be known locally due to the way the
2036 current discovery protocol works.
2018 current discovery protocol works.
2037 """
2019 """
2038 cl = self.changelog
2020 cl = self.changelog
2039 if common:
2021 if common:
2040 hasnode = cl.hasnode
2022 hasnode = cl.hasnode
2041 common = [n for n in common if hasnode(n)]
2023 common = [n for n in common if hasnode(n)]
2042 else:
2024 else:
2043 common = [nullid]
2025 common = [nullid]
2044 if not heads:
2026 if not heads:
2045 heads = cl.heads()
2027 heads = cl.heads()
2046 return self.getlocalbundle(source,
2028 return self.getlocalbundle(source,
2047 discovery.outgoing(cl, common, heads),
2029 discovery.outgoing(cl, common, heads),
2048 bundlecaps=bundlecaps)
2030 bundlecaps=bundlecaps)
2049
2031
2050 @unfilteredmethod
2032 @unfilteredmethod
2051 def _changegroupsubset(self, outgoing, bundler, source,
2033 def _changegroupsubset(self, outgoing, bundler, source,
2052 fastpath=False):
2034 fastpath=False):
2053 commonrevs = outgoing.common
2035 commonrevs = outgoing.common
2054 csets = outgoing.missing
2036 csets = outgoing.missing
2055 heads = outgoing.missingheads
2037 heads = outgoing.missingheads
2056 # We go through the fast path if we get told to, or if all (unfiltered
2038 # We go through the fast path if we get told to, or if all (unfiltered
2057 # heads have been requested (since we then know there all linkrevs will
2039 # heads have been requested (since we then know there all linkrevs will
2058 # be pulled by the client).
2040 # be pulled by the client).
2059 heads.sort()
2041 heads.sort()
2060 fastpathlinkrev = fastpath or (
2042 fastpathlinkrev = fastpath or (
2061 self.filtername is None and heads == sorted(self.heads()))
2043 self.filtername is None and heads == sorted(self.heads()))
2062
2044
2063 self.hook('preoutgoing', throw=True, source=source)
2045 self.hook('preoutgoing', throw=True, source=source)
2064 self.changegroupinfo(csets, source)
2046 self.changegroupinfo(csets, source)
2065 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2047 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2066 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2048 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2067
2049
2068 def changegroup(self, basenodes, source):
2050 def changegroup(self, basenodes, source):
2069 # to avoid a race we use changegroupsubset() (issue1320)
2051 # to avoid a race we use changegroupsubset() (issue1320)
2070 return self.changegroupsubset(basenodes, self.heads(), source)
2052 return self.changegroupsubset(basenodes, self.heads(), source)
2071
2053
2072 @unfilteredmethod
2054 @unfilteredmethod
2073 def addchangegroup(self, source, srctype, url, emptyok=False):
2055 def addchangegroup(self, source, srctype, url, emptyok=False):
2074 """Add the changegroup returned by source.read() to this repo.
2056 """Add the changegroup returned by source.read() to this repo.
2075 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2057 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2076 the URL of the repo where this changegroup is coming from.
2058 the URL of the repo where this changegroup is coming from.
2077
2059
2078 Return an integer summarizing the change to this repo:
2060 Return an integer summarizing the change to this repo:
2079 - nothing changed or no source: 0
2061 - nothing changed or no source: 0
2080 - more heads than before: 1+added heads (2..n)
2062 - more heads than before: 1+added heads (2..n)
2081 - fewer heads than before: -1-removed heads (-2..-n)
2063 - fewer heads than before: -1-removed heads (-2..-n)
2082 - number of heads stays the same: 1
2064 - number of heads stays the same: 1
2083 """
2065 """
2084 def csmap(x):
2066 def csmap(x):
2085 self.ui.debug("add changeset %s\n" % short(x))
2067 self.ui.debug("add changeset %s\n" % short(x))
2086 return len(cl)
2068 return len(cl)
2087
2069
2088 def revmap(x):
2070 def revmap(x):
2089 return cl.rev(x)
2071 return cl.rev(x)
2090
2072
2091 if not source:
2073 if not source:
2092 return 0
2074 return 0
2093
2075
2094 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2076 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2095
2077
2096 changesets = files = revisions = 0
2078 changesets = files = revisions = 0
2097 efiles = set()
2079 efiles = set()
2098
2080
2099 # write changelog data to temp files so concurrent readers will not see
2081 # write changelog data to temp files so concurrent readers will not see
2100 # inconsistent view
2082 # inconsistent view
2101 cl = self.changelog
2083 cl = self.changelog
2102 cl.delayupdate()
2084 cl.delayupdate()
2103 oldheads = cl.heads()
2085 oldheads = cl.heads()
2104
2086
2105 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2087 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2106 try:
2088 try:
2107 trp = weakref.proxy(tr)
2089 trp = weakref.proxy(tr)
2108 # pull off the changeset group
2090 # pull off the changeset group
2109 self.ui.status(_("adding changesets\n"))
2091 self.ui.status(_("adding changesets\n"))
2110 clstart = len(cl)
2092 clstart = len(cl)
2111 class prog(object):
2093 class prog(object):
2112 step = _('changesets')
2094 step = _('changesets')
2113 count = 1
2095 count = 1
2114 ui = self.ui
2096 ui = self.ui
2115 total = None
2097 total = None
2116 def __call__(self):
2098 def __call__(self):
2117 self.ui.progress(self.step, self.count, unit=_('chunks'),
2099 self.ui.progress(self.step, self.count, unit=_('chunks'),
2118 total=self.total)
2100 total=self.total)
2119 self.count += 1
2101 self.count += 1
2120 pr = prog()
2102 pr = prog()
2121 source.callback = pr
2103 source.callback = pr
2122
2104
2123 source.changelogheader()
2105 source.changelogheader()
2124 srccontent = cl.addgroup(source, csmap, trp)
2106 srccontent = cl.addgroup(source, csmap, trp)
2125 if not (srccontent or emptyok):
2107 if not (srccontent or emptyok):
2126 raise util.Abort(_("received changelog group is empty"))
2108 raise util.Abort(_("received changelog group is empty"))
2127 clend = len(cl)
2109 clend = len(cl)
2128 changesets = clend - clstart
2110 changesets = clend - clstart
2129 for c in xrange(clstart, clend):
2111 for c in xrange(clstart, clend):
2130 efiles.update(self[c].files())
2112 efiles.update(self[c].files())
2131 efiles = len(efiles)
2113 efiles = len(efiles)
2132 self.ui.progress(_('changesets'), None)
2114 self.ui.progress(_('changesets'), None)
2133
2115
2134 # pull off the manifest group
2116 # pull off the manifest group
2135 self.ui.status(_("adding manifests\n"))
2117 self.ui.status(_("adding manifests\n"))
2136 pr.step = _('manifests')
2118 pr.step = _('manifests')
2137 pr.count = 1
2119 pr.count = 1
2138 pr.total = changesets # manifests <= changesets
2120 pr.total = changesets # manifests <= changesets
2139 # no need to check for empty manifest group here:
2121 # no need to check for empty manifest group here:
2140 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2122 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2141 # no new manifest will be created and the manifest group will
2123 # no new manifest will be created and the manifest group will
2142 # be empty during the pull
2124 # be empty during the pull
2143 source.manifestheader()
2125 source.manifestheader()
2144 self.manifest.addgroup(source, revmap, trp)
2126 self.manifest.addgroup(source, revmap, trp)
2145 self.ui.progress(_('manifests'), None)
2127 self.ui.progress(_('manifests'), None)
2146
2128
2147 needfiles = {}
2129 needfiles = {}
2148 if self.ui.configbool('server', 'validate', default=False):
2130 if self.ui.configbool('server', 'validate', default=False):
2149 # validate incoming csets have their manifests
2131 # validate incoming csets have their manifests
2150 for cset in xrange(clstart, clend):
2132 for cset in xrange(clstart, clend):
2151 mfest = self.changelog.read(self.changelog.node(cset))[0]
2133 mfest = self.changelog.read(self.changelog.node(cset))[0]
2152 mfest = self.manifest.readdelta(mfest)
2134 mfest = self.manifest.readdelta(mfest)
2153 # store file nodes we must see
2135 # store file nodes we must see
2154 for f, n in mfest.iteritems():
2136 for f, n in mfest.iteritems():
2155 needfiles.setdefault(f, set()).add(n)
2137 needfiles.setdefault(f, set()).add(n)
2156
2138
2157 # process the files
2139 # process the files
2158 self.ui.status(_("adding file changes\n"))
2140 self.ui.status(_("adding file changes\n"))
2159 pr.step = _('files')
2141 pr.step = _('files')
2160 pr.count = 1
2142 pr.count = 1
2161 pr.total = efiles
2143 pr.total = efiles
2162 source.callback = None
2144 source.callback = None
2163
2145
2164 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2146 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2165 pr, needfiles)
2147 pr, needfiles)
2166 revisions += newrevs
2148 revisions += newrevs
2167 files += newfiles
2149 files += newfiles
2168
2150
2169 dh = 0
2151 dh = 0
2170 if oldheads:
2152 if oldheads:
2171 heads = cl.heads()
2153 heads = cl.heads()
2172 dh = len(heads) - len(oldheads)
2154 dh = len(heads) - len(oldheads)
2173 for h in heads:
2155 for h in heads:
2174 if h not in oldheads and self[h].closesbranch():
2156 if h not in oldheads and self[h].closesbranch():
2175 dh -= 1
2157 dh -= 1
2176 htext = ""
2158 htext = ""
2177 if dh:
2159 if dh:
2178 htext = _(" (%+d heads)") % dh
2160 htext = _(" (%+d heads)") % dh
2179
2161
2180 self.ui.status(_("added %d changesets"
2162 self.ui.status(_("added %d changesets"
2181 " with %d changes to %d files%s\n")
2163 " with %d changes to %d files%s\n")
2182 % (changesets, revisions, files, htext))
2164 % (changesets, revisions, files, htext))
2183 self.invalidatevolatilesets()
2165 self.invalidatevolatilesets()
2184
2166
2185 if changesets > 0:
2167 if changesets > 0:
2186 p = lambda: cl.writepending() and self.root or ""
2168 p = lambda: cl.writepending() and self.root or ""
2187 self.hook('pretxnchangegroup', throw=True,
2169 self.hook('pretxnchangegroup', throw=True,
2188 node=hex(cl.node(clstart)), source=srctype,
2170 node=hex(cl.node(clstart)), source=srctype,
2189 url=url, pending=p)
2171 url=url, pending=p)
2190
2172
2191 added = [cl.node(r) for r in xrange(clstart, clend)]
2173 added = [cl.node(r) for r in xrange(clstart, clend)]
2192 publishing = self.ui.configbool('phases', 'publish', True)
2174 publishing = self.ui.configbool('phases', 'publish', True)
2193 if srctype == 'push':
2175 if srctype == 'push':
2194 # Old server can not push the boundary themself.
2176 # Old server can not push the boundary themself.
2195 # New server won't push the boundary if changeset already
2177 # New server won't push the boundary if changeset already
2196 # existed locally as secrete
2178 # existed locally as secrete
2197 #
2179 #
2198 # We should not use added here but the list of all change in
2180 # We should not use added here but the list of all change in
2199 # the bundle
2181 # the bundle
2200 if publishing:
2182 if publishing:
2201 phases.advanceboundary(self, phases.public, srccontent)
2183 phases.advanceboundary(self, phases.public, srccontent)
2202 else:
2184 else:
2203 phases.advanceboundary(self, phases.draft, srccontent)
2185 phases.advanceboundary(self, phases.draft, srccontent)
2204 phases.retractboundary(self, phases.draft, added)
2186 phases.retractboundary(self, phases.draft, added)
2205 elif srctype != 'strip':
2187 elif srctype != 'strip':
2206 # publishing only alter behavior during push
2188 # publishing only alter behavior during push
2207 #
2189 #
2208 # strip should not touch boundary at all
2190 # strip should not touch boundary at all
2209 phases.retractboundary(self, phases.draft, added)
2191 phases.retractboundary(self, phases.draft, added)
2210
2192
2211 # make changelog see real files again
2193 # make changelog see real files again
2212 cl.finalize(trp)
2194 cl.finalize(trp)
2213
2195
2214 tr.close()
2196 tr.close()
2215
2197
2216 if changesets > 0:
2198 if changesets > 0:
2217 if srctype != 'strip':
2199 if srctype != 'strip':
2218 # During strip, branchcache is invalid but coming call to
2200 # During strip, branchcache is invalid but coming call to
2219 # `destroyed` will repair it.
2201 # `destroyed` will repair it.
2220 # In other case we can safely update cache on disk.
2202 # In other case we can safely update cache on disk.
2221 branchmap.updatecache(self.filtered('served'))
2203 branchmap.updatecache(self.filtered('served'))
2222 def runhooks():
2204 def runhooks():
2223 # These hooks run when the lock releases, not when the
2205 # These hooks run when the lock releases, not when the
2224 # transaction closes. So it's possible for the changelog
2206 # transaction closes. So it's possible for the changelog
2225 # to have changed since we last saw it.
2207 # to have changed since we last saw it.
2226 if clstart >= len(self):
2208 if clstart >= len(self):
2227 return
2209 return
2228
2210
2229 # forcefully update the on-disk branch cache
2211 # forcefully update the on-disk branch cache
2230 self.ui.debug("updating the branch cache\n")
2212 self.ui.debug("updating the branch cache\n")
2231 self.hook("changegroup", node=hex(cl.node(clstart)),
2213 self.hook("changegroup", node=hex(cl.node(clstart)),
2232 source=srctype, url=url)
2214 source=srctype, url=url)
2233
2215
2234 for n in added:
2216 for n in added:
2235 self.hook("incoming", node=hex(n), source=srctype,
2217 self.hook("incoming", node=hex(n), source=srctype,
2236 url=url)
2218 url=url)
2237
2219
2238 newheads = [h for h in self.heads() if h not in oldheads]
2220 newheads = [h for h in self.heads() if h not in oldheads]
2239 self.ui.log("incoming",
2221 self.ui.log("incoming",
2240 "%s incoming changes - new heads: %s\n",
2222 "%s incoming changes - new heads: %s\n",
2241 len(added),
2223 len(added),
2242 ', '.join([hex(c[:6]) for c in newheads]))
2224 ', '.join([hex(c[:6]) for c in newheads]))
2243 self._afterlock(runhooks)
2225 self._afterlock(runhooks)
2244
2226
2245 finally:
2227 finally:
2246 tr.release()
2228 tr.release()
2247 # never return 0 here:
2229 # never return 0 here:
2248 if dh < 0:
2230 if dh < 0:
2249 return dh - 1
2231 return dh - 1
2250 else:
2232 else:
2251 return dh + 1
2233 return dh + 1
2252
2234
2253 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2235 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2254 revisions = 0
2236 revisions = 0
2255 files = 0
2237 files = 0
2256 while True:
2238 while True:
2257 chunkdata = source.filelogheader()
2239 chunkdata = source.filelogheader()
2258 if not chunkdata:
2240 if not chunkdata:
2259 break
2241 break
2260 f = chunkdata["filename"]
2242 f = chunkdata["filename"]
2261 self.ui.debug("adding %s revisions\n" % f)
2243 self.ui.debug("adding %s revisions\n" % f)
2262 pr()
2244 pr()
2263 fl = self.file(f)
2245 fl = self.file(f)
2264 o = len(fl)
2246 o = len(fl)
2265 if not fl.addgroup(source, revmap, trp):
2247 if not fl.addgroup(source, revmap, trp):
2266 raise util.Abort(_("received file revlog group is empty"))
2248 raise util.Abort(_("received file revlog group is empty"))
2267 revisions += len(fl) - o
2249 revisions += len(fl) - o
2268 files += 1
2250 files += 1
2269 if f in needfiles:
2251 if f in needfiles:
2270 needs = needfiles[f]
2252 needs = needfiles[f]
2271 for new in xrange(o, len(fl)):
2253 for new in xrange(o, len(fl)):
2272 n = fl.node(new)
2254 n = fl.node(new)
2273 if n in needs:
2255 if n in needs:
2274 needs.remove(n)
2256 needs.remove(n)
2275 else:
2257 else:
2276 raise util.Abort(
2258 raise util.Abort(
2277 _("received spurious file revlog entry"))
2259 _("received spurious file revlog entry"))
2278 if not needs:
2260 if not needs:
2279 del needfiles[f]
2261 del needfiles[f]
2280 self.ui.progress(_('files'), None)
2262 self.ui.progress(_('files'), None)
2281
2263
2282 for f, needs in needfiles.iteritems():
2264 for f, needs in needfiles.iteritems():
2283 fl = self.file(f)
2265 fl = self.file(f)
2284 for n in needs:
2266 for n in needs:
2285 try:
2267 try:
2286 fl.rev(n)
2268 fl.rev(n)
2287 except error.LookupError:
2269 except error.LookupError:
2288 raise util.Abort(
2270 raise util.Abort(
2289 _('missing file data for %s:%s - run hg verify') %
2271 _('missing file data for %s:%s - run hg verify') %
2290 (f, hex(n)))
2272 (f, hex(n)))
2291
2273
2292 return revisions, files
2274 return revisions, files
2293
2275
2294 def stream_in(self, remote, requirements):
2276 def stream_in(self, remote, requirements):
2295 lock = self.lock()
2277 lock = self.lock()
2296 try:
2278 try:
2297 # Save remote branchmap. We will use it later
2279 # Save remote branchmap. We will use it later
2298 # to speed up branchcache creation
2280 # to speed up branchcache creation
2299 rbranchmap = None
2281 rbranchmap = None
2300 if remote.capable("branchmap"):
2282 if remote.capable("branchmap"):
2301 rbranchmap = remote.branchmap()
2283 rbranchmap = remote.branchmap()
2302
2284
2303 fp = remote.stream_out()
2285 fp = remote.stream_out()
2304 l = fp.readline()
2286 l = fp.readline()
2305 try:
2287 try:
2306 resp = int(l)
2288 resp = int(l)
2307 except ValueError:
2289 except ValueError:
2308 raise error.ResponseError(
2290 raise error.ResponseError(
2309 _('unexpected response from remote server:'), l)
2291 _('unexpected response from remote server:'), l)
2310 if resp == 1:
2292 if resp == 1:
2311 raise util.Abort(_('operation forbidden by server'))
2293 raise util.Abort(_('operation forbidden by server'))
2312 elif resp == 2:
2294 elif resp == 2:
2313 raise util.Abort(_('locking the remote repository failed'))
2295 raise util.Abort(_('locking the remote repository failed'))
2314 elif resp != 0:
2296 elif resp != 0:
2315 raise util.Abort(_('the server sent an unknown error code'))
2297 raise util.Abort(_('the server sent an unknown error code'))
2316 self.ui.status(_('streaming all changes\n'))
2298 self.ui.status(_('streaming all changes\n'))
2317 l = fp.readline()
2299 l = fp.readline()
2318 try:
2300 try:
2319 total_files, total_bytes = map(int, l.split(' ', 1))
2301 total_files, total_bytes = map(int, l.split(' ', 1))
2320 except (ValueError, TypeError):
2302 except (ValueError, TypeError):
2321 raise error.ResponseError(
2303 raise error.ResponseError(
2322 _('unexpected response from remote server:'), l)
2304 _('unexpected response from remote server:'), l)
2323 self.ui.status(_('%d files to transfer, %s of data\n') %
2305 self.ui.status(_('%d files to transfer, %s of data\n') %
2324 (total_files, util.bytecount(total_bytes)))
2306 (total_files, util.bytecount(total_bytes)))
2325 handled_bytes = 0
2307 handled_bytes = 0
2326 self.ui.progress(_('clone'), 0, total=total_bytes)
2308 self.ui.progress(_('clone'), 0, total=total_bytes)
2327 start = time.time()
2309 start = time.time()
2328 for i in xrange(total_files):
2310 for i in xrange(total_files):
2329 # XXX doesn't support '\n' or '\r' in filenames
2311 # XXX doesn't support '\n' or '\r' in filenames
2330 l = fp.readline()
2312 l = fp.readline()
2331 try:
2313 try:
2332 name, size = l.split('\0', 1)
2314 name, size = l.split('\0', 1)
2333 size = int(size)
2315 size = int(size)
2334 except (ValueError, TypeError):
2316 except (ValueError, TypeError):
2335 raise error.ResponseError(
2317 raise error.ResponseError(
2336 _('unexpected response from remote server:'), l)
2318 _('unexpected response from remote server:'), l)
2337 if self.ui.debugflag:
2319 if self.ui.debugflag:
2338 self.ui.debug('adding %s (%s)\n' %
2320 self.ui.debug('adding %s (%s)\n' %
2339 (name, util.bytecount(size)))
2321 (name, util.bytecount(size)))
2340 # for backwards compat, name was partially encoded
2322 # for backwards compat, name was partially encoded
2341 ofp = self.sopener(store.decodedir(name), 'w')
2323 ofp = self.sopener(store.decodedir(name), 'w')
2342 for chunk in util.filechunkiter(fp, limit=size):
2324 for chunk in util.filechunkiter(fp, limit=size):
2343 handled_bytes += len(chunk)
2325 handled_bytes += len(chunk)
2344 self.ui.progress(_('clone'), handled_bytes,
2326 self.ui.progress(_('clone'), handled_bytes,
2345 total=total_bytes)
2327 total=total_bytes)
2346 ofp.write(chunk)
2328 ofp.write(chunk)
2347 ofp.close()
2329 ofp.close()
2348 elapsed = time.time() - start
2330 elapsed = time.time() - start
2349 if elapsed <= 0:
2331 if elapsed <= 0:
2350 elapsed = 0.001
2332 elapsed = 0.001
2351 self.ui.progress(_('clone'), None)
2333 self.ui.progress(_('clone'), None)
2352 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2334 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2353 (util.bytecount(total_bytes), elapsed,
2335 (util.bytecount(total_bytes), elapsed,
2354 util.bytecount(total_bytes / elapsed)))
2336 util.bytecount(total_bytes / elapsed)))
2355
2337
2356 # new requirements = old non-format requirements +
2338 # new requirements = old non-format requirements +
2357 # new format-related
2339 # new format-related
2358 # requirements from the streamed-in repository
2340 # requirements from the streamed-in repository
2359 requirements.update(set(self.requirements) - self.supportedformats)
2341 requirements.update(set(self.requirements) - self.supportedformats)
2360 self._applyrequirements(requirements)
2342 self._applyrequirements(requirements)
2361 self._writerequirements()
2343 self._writerequirements()
2362
2344
2363 if rbranchmap:
2345 if rbranchmap:
2364 rbheads = []
2346 rbheads = []
2365 for bheads in rbranchmap.itervalues():
2347 for bheads in rbranchmap.itervalues():
2366 rbheads.extend(bheads)
2348 rbheads.extend(bheads)
2367
2349
2368 if rbheads:
2350 if rbheads:
2369 rtiprev = max((int(self.changelog.rev(node))
2351 rtiprev = max((int(self.changelog.rev(node))
2370 for node in rbheads))
2352 for node in rbheads))
2371 cache = branchmap.branchcache(rbranchmap,
2353 cache = branchmap.branchcache(rbranchmap,
2372 self[rtiprev].node(),
2354 self[rtiprev].node(),
2373 rtiprev)
2355 rtiprev)
2374 # Try to stick it as low as possible
2356 # Try to stick it as low as possible
2375 # filter above served are unlikely to be fetch from a clone
2357 # filter above served are unlikely to be fetch from a clone
2376 for candidate in ('base', 'immutable', 'served'):
2358 for candidate in ('base', 'immutable', 'served'):
2377 rview = self.filtered(candidate)
2359 rview = self.filtered(candidate)
2378 if cache.validfor(rview):
2360 if cache.validfor(rview):
2379 self._branchcaches[candidate] = cache
2361 self._branchcaches[candidate] = cache
2380 cache.write(rview)
2362 cache.write(rview)
2381 break
2363 break
2382 self.invalidate()
2364 self.invalidate()
2383 return len(self.heads()) + 1
2365 return len(self.heads()) + 1
2384 finally:
2366 finally:
2385 lock.release()
2367 lock.release()
2386
2368
2387 def clone(self, remote, heads=[], stream=False):
2369 def clone(self, remote, heads=[], stream=False):
2388 '''clone remote repository.
2370 '''clone remote repository.
2389
2371
2390 keyword arguments:
2372 keyword arguments:
2391 heads: list of revs to clone (forces use of pull)
2373 heads: list of revs to clone (forces use of pull)
2392 stream: use streaming clone if possible'''
2374 stream: use streaming clone if possible'''
2393
2375
2394 # now, all clients that can request uncompressed clones can
2376 # now, all clients that can request uncompressed clones can
2395 # read repo formats supported by all servers that can serve
2377 # read repo formats supported by all servers that can serve
2396 # them.
2378 # them.
2397
2379
2398 # if revlog format changes, client will have to check version
2380 # if revlog format changes, client will have to check version
2399 # and format flags on "stream" capability, and use
2381 # and format flags on "stream" capability, and use
2400 # uncompressed only if compatible.
2382 # uncompressed only if compatible.
2401
2383
2402 if not stream:
2384 if not stream:
2403 # if the server explicitly prefers to stream (for fast LANs)
2385 # if the server explicitly prefers to stream (for fast LANs)
2404 stream = remote.capable('stream-preferred')
2386 stream = remote.capable('stream-preferred')
2405
2387
2406 if stream and not heads:
2388 if stream and not heads:
2407 # 'stream' means remote revlog format is revlogv1 only
2389 # 'stream' means remote revlog format is revlogv1 only
2408 if remote.capable('stream'):
2390 if remote.capable('stream'):
2409 return self.stream_in(remote, set(('revlogv1',)))
2391 return self.stream_in(remote, set(('revlogv1',)))
2410 # otherwise, 'streamreqs' contains the remote revlog format
2392 # otherwise, 'streamreqs' contains the remote revlog format
2411 streamreqs = remote.capable('streamreqs')
2393 streamreqs = remote.capable('streamreqs')
2412 if streamreqs:
2394 if streamreqs:
2413 streamreqs = set(streamreqs.split(','))
2395 streamreqs = set(streamreqs.split(','))
2414 # if we support it, stream in and adjust our requirements
2396 # if we support it, stream in and adjust our requirements
2415 if not streamreqs - self.supportedformats:
2397 if not streamreqs - self.supportedformats:
2416 return self.stream_in(remote, streamreqs)
2398 return self.stream_in(remote, streamreqs)
2417 return self.pull(remote, heads)
2399 return self.pull(remote, heads)
2418
2400
2419 def pushkey(self, namespace, key, old, new):
2401 def pushkey(self, namespace, key, old, new):
2420 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2402 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2421 old=old, new=new)
2403 old=old, new=new)
2422 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2404 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2423 ret = pushkey.push(self, namespace, key, old, new)
2405 ret = pushkey.push(self, namespace, key, old, new)
2424 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2406 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2425 ret=ret)
2407 ret=ret)
2426 return ret
2408 return ret
2427
2409
2428 def listkeys(self, namespace):
2410 def listkeys(self, namespace):
2429 self.hook('prelistkeys', throw=True, namespace=namespace)
2411 self.hook('prelistkeys', throw=True, namespace=namespace)
2430 self.ui.debug('listing keys for "%s"\n' % namespace)
2412 self.ui.debug('listing keys for "%s"\n' % namespace)
2431 values = pushkey.list(self, namespace)
2413 values = pushkey.list(self, namespace)
2432 self.hook('listkeys', namespace=namespace, values=values)
2414 self.hook('listkeys', namespace=namespace, values=values)
2433 return values
2415 return values
2434
2416
2435 def debugwireargs(self, one, two, three=None, four=None, five=None):
2417 def debugwireargs(self, one, two, three=None, four=None, five=None):
2436 '''used to test argument passing over the wire'''
2418 '''used to test argument passing over the wire'''
2437 return "%s %s %s %s %s" % (one, two, three, four, five)
2419 return "%s %s %s %s %s" % (one, two, three, four, five)
2438
2420
2439 def savecommitmessage(self, text):
2421 def savecommitmessage(self, text):
2440 fp = self.opener('last-message.txt', 'wb')
2422 fp = self.opener('last-message.txt', 'wb')
2441 try:
2423 try:
2442 fp.write(text)
2424 fp.write(text)
2443 finally:
2425 finally:
2444 fp.close()
2426 fp.close()
2445 return self.pathto(fp.name[len(self.root) + 1:])
2427 return self.pathto(fp.name[len(self.root) + 1:])
2446
2428
2447 # used to avoid circular references so destructors work
2429 # used to avoid circular references so destructors work
2448 def aftertrans(files):
2430 def aftertrans(files):
2449 renamefiles = [tuple(t) for t in files]
2431 renamefiles = [tuple(t) for t in files]
2450 def a():
2432 def a():
2451 for vfs, src, dest in renamefiles:
2433 for vfs, src, dest in renamefiles:
2452 try:
2434 try:
2453 vfs.rename(src, dest)
2435 vfs.rename(src, dest)
2454 except OSError: # journal file does not yet exist
2436 except OSError: # journal file does not yet exist
2455 pass
2437 pass
2456 return a
2438 return a
2457
2439
2458 def undoname(fn):
2440 def undoname(fn):
2459 base, name = os.path.split(fn)
2441 base, name = os.path.split(fn)
2460 assert name.startswith('journal')
2442 assert name.startswith('journal')
2461 return os.path.join(base, name.replace('journal', 'undo', 1))
2443 return os.path.join(base, name.replace('journal', 'undo', 1))
2462
2444
2463 def instance(ui, path, create):
2445 def instance(ui, path, create):
2464 return localrepository(ui, util.urllocalpath(path), create)
2446 return localrepository(ui, util.urllocalpath(path), create)
2465
2447
2466 def islocal(path):
2448 def islocal(path):
2467 return True
2449 return True
General Comments 0
You need to be logged in to leave comments. Login now