##// END OF EJS Templates
discovery: prefer loop to double-for list comprehension in changegroupsubset...
Kevin Bullock -
r20216:01bdccfe default
parent child Browse files
Show More
@@ -1,2449 +1,2451 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock as lockmod
11 import lock as lockmod
12 import transaction, store, encoding
12 import transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 import branchmap, pathutil
19 import branchmap, pathutil
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21 filecache = scmutil.filecache
21 filecache = scmutil.filecache
22
22
23 class repofilecache(filecache):
23 class repofilecache(filecache):
24 """All filecache usage on repo are done for logic that should be unfiltered
24 """All filecache usage on repo are done for logic that should be unfiltered
25 """
25 """
26
26
27 def __get__(self, repo, type=None):
27 def __get__(self, repo, type=None):
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 def __set__(self, repo, value):
29 def __set__(self, repo, value):
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 def __delete__(self, repo):
31 def __delete__(self, repo):
32 return super(repofilecache, self).__delete__(repo.unfiltered())
32 return super(repofilecache, self).__delete__(repo.unfiltered())
33
33
34 class storecache(repofilecache):
34 class storecache(repofilecache):
35 """filecache for files in the store"""
35 """filecache for files in the store"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj.sjoin(fname)
37 return obj.sjoin(fname)
38
38
39 class unfilteredpropertycache(propertycache):
39 class unfilteredpropertycache(propertycache):
40 """propertycache that apply to unfiltered repo only"""
40 """propertycache that apply to unfiltered repo only"""
41
41
42 def __get__(self, repo, type=None):
42 def __get__(self, repo, type=None):
43 unfi = repo.unfiltered()
43 unfi = repo.unfiltered()
44 if unfi is repo:
44 if unfi is repo:
45 return super(unfilteredpropertycache, self).__get__(unfi)
45 return super(unfilteredpropertycache, self).__get__(unfi)
46 return getattr(unfi, self.name)
46 return getattr(unfi, self.name)
47
47
48 class filteredpropertycache(propertycache):
48 class filteredpropertycache(propertycache):
49 """propertycache that must take filtering in account"""
49 """propertycache that must take filtering in account"""
50
50
51 def cachevalue(self, obj, value):
51 def cachevalue(self, obj, value):
52 object.__setattr__(obj, self.name, value)
52 object.__setattr__(obj, self.name, value)
53
53
54
54
55 def hasunfilteredcache(repo, name):
55 def hasunfilteredcache(repo, name):
56 """check if a repo has an unfilteredpropertycache value for <name>"""
56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 return name in vars(repo.unfiltered())
57 return name in vars(repo.unfiltered())
58
58
59 def unfilteredmethod(orig):
59 def unfilteredmethod(orig):
60 """decorate method that always need to be run on unfiltered version"""
60 """decorate method that always need to be run on unfiltered version"""
61 def wrapper(repo, *args, **kwargs):
61 def wrapper(repo, *args, **kwargs):
62 return orig(repo.unfiltered(), *args, **kwargs)
62 return orig(repo.unfiltered(), *args, **kwargs)
63 return wrapper
63 return wrapper
64
64
65 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
65 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
66 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
66 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
67
67
68 class localpeer(peer.peerrepository):
68 class localpeer(peer.peerrepository):
69 '''peer for a local repo; reflects only the most recent API'''
69 '''peer for a local repo; reflects only the most recent API'''
70
70
71 def __init__(self, repo, caps=MODERNCAPS):
71 def __init__(self, repo, caps=MODERNCAPS):
72 peer.peerrepository.__init__(self)
72 peer.peerrepository.__init__(self)
73 self._repo = repo.filtered('served')
73 self._repo = repo.filtered('served')
74 self.ui = repo.ui
74 self.ui = repo.ui
75 self._caps = repo._restrictcapabilities(caps)
75 self._caps = repo._restrictcapabilities(caps)
76 self.requirements = repo.requirements
76 self.requirements = repo.requirements
77 self.supportedformats = repo.supportedformats
77 self.supportedformats = repo.supportedformats
78
78
79 def close(self):
79 def close(self):
80 self._repo.close()
80 self._repo.close()
81
81
82 def _capabilities(self):
82 def _capabilities(self):
83 return self._caps
83 return self._caps
84
84
85 def local(self):
85 def local(self):
86 return self._repo
86 return self._repo
87
87
88 def canpush(self):
88 def canpush(self):
89 return True
89 return True
90
90
91 def url(self):
91 def url(self):
92 return self._repo.url()
92 return self._repo.url()
93
93
94 def lookup(self, key):
94 def lookup(self, key):
95 return self._repo.lookup(key)
95 return self._repo.lookup(key)
96
96
97 def branchmap(self):
97 def branchmap(self):
98 return self._repo.branchmap()
98 return self._repo.branchmap()
99
99
100 def heads(self):
100 def heads(self):
101 return self._repo.heads()
101 return self._repo.heads()
102
102
103 def known(self, nodes):
103 def known(self, nodes):
104 return self._repo.known(nodes)
104 return self._repo.known(nodes)
105
105
106 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
106 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
107 return self._repo.getbundle(source, heads=heads, common=common,
107 return self._repo.getbundle(source, heads=heads, common=common,
108 bundlecaps=None)
108 bundlecaps=None)
109
109
110 # TODO We might want to move the next two calls into legacypeer and add
110 # TODO We might want to move the next two calls into legacypeer and add
111 # unbundle instead.
111 # unbundle instead.
112
112
113 def lock(self):
113 def lock(self):
114 return self._repo.lock()
114 return self._repo.lock()
115
115
116 def addchangegroup(self, cg, source, url):
116 def addchangegroup(self, cg, source, url):
117 return self._repo.addchangegroup(cg, source, url)
117 return self._repo.addchangegroup(cg, source, url)
118
118
119 def pushkey(self, namespace, key, old, new):
119 def pushkey(self, namespace, key, old, new):
120 return self._repo.pushkey(namespace, key, old, new)
120 return self._repo.pushkey(namespace, key, old, new)
121
121
122 def listkeys(self, namespace):
122 def listkeys(self, namespace):
123 return self._repo.listkeys(namespace)
123 return self._repo.listkeys(namespace)
124
124
125 def debugwireargs(self, one, two, three=None, four=None, five=None):
125 def debugwireargs(self, one, two, three=None, four=None, five=None):
126 '''used to test argument passing over the wire'''
126 '''used to test argument passing over the wire'''
127 return "%s %s %s %s %s" % (one, two, three, four, five)
127 return "%s %s %s %s %s" % (one, two, three, four, five)
128
128
129 class locallegacypeer(localpeer):
129 class locallegacypeer(localpeer):
130 '''peer extension which implements legacy methods too; used for tests with
130 '''peer extension which implements legacy methods too; used for tests with
131 restricted capabilities'''
131 restricted capabilities'''
132
132
133 def __init__(self, repo):
133 def __init__(self, repo):
134 localpeer.__init__(self, repo, caps=LEGACYCAPS)
134 localpeer.__init__(self, repo, caps=LEGACYCAPS)
135
135
136 def branches(self, nodes):
136 def branches(self, nodes):
137 return self._repo.branches(nodes)
137 return self._repo.branches(nodes)
138
138
139 def between(self, pairs):
139 def between(self, pairs):
140 return self._repo.between(pairs)
140 return self._repo.between(pairs)
141
141
142 def changegroup(self, basenodes, source):
142 def changegroup(self, basenodes, source):
143 return self._repo.changegroup(basenodes, source)
143 return self._repo.changegroup(basenodes, source)
144
144
145 def changegroupsubset(self, bases, heads, source):
145 def changegroupsubset(self, bases, heads, source):
146 return self._repo.changegroupsubset(bases, heads, source)
146 return self._repo.changegroupsubset(bases, heads, source)
147
147
148 class localrepository(object):
148 class localrepository(object):
149
149
150 supportedformats = set(('revlogv1', 'generaldelta'))
150 supportedformats = set(('revlogv1', 'generaldelta'))
151 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
151 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
152 'dotencode'))
152 'dotencode'))
153 openerreqs = set(('revlogv1', 'generaldelta'))
153 openerreqs = set(('revlogv1', 'generaldelta'))
154 requirements = ['revlogv1']
154 requirements = ['revlogv1']
155 filtername = None
155 filtername = None
156
156
157 # a list of (ui, featureset) functions.
157 # a list of (ui, featureset) functions.
158 # only functions defined in module of enabled extensions are invoked
158 # only functions defined in module of enabled extensions are invoked
159 featuresetupfuncs = set()
159 featuresetupfuncs = set()
160
160
161 def _baserequirements(self, create):
161 def _baserequirements(self, create):
162 return self.requirements[:]
162 return self.requirements[:]
163
163
164 def __init__(self, baseui, path=None, create=False):
164 def __init__(self, baseui, path=None, create=False):
165 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
165 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
166 self.wopener = self.wvfs
166 self.wopener = self.wvfs
167 self.root = self.wvfs.base
167 self.root = self.wvfs.base
168 self.path = self.wvfs.join(".hg")
168 self.path = self.wvfs.join(".hg")
169 self.origroot = path
169 self.origroot = path
170 self.auditor = pathutil.pathauditor(self.root, self._checknested)
170 self.auditor = pathutil.pathauditor(self.root, self._checknested)
171 self.vfs = scmutil.vfs(self.path)
171 self.vfs = scmutil.vfs(self.path)
172 self.opener = self.vfs
172 self.opener = self.vfs
173 self.baseui = baseui
173 self.baseui = baseui
174 self.ui = baseui.copy()
174 self.ui = baseui.copy()
175 self.ui.copy = baseui.copy # prevent copying repo configuration
175 self.ui.copy = baseui.copy # prevent copying repo configuration
176 # A list of callback to shape the phase if no data were found.
176 # A list of callback to shape the phase if no data were found.
177 # Callback are in the form: func(repo, roots) --> processed root.
177 # Callback are in the form: func(repo, roots) --> processed root.
178 # This list it to be filled by extension during repo setup
178 # This list it to be filled by extension during repo setup
179 self._phasedefaults = []
179 self._phasedefaults = []
180 try:
180 try:
181 self.ui.readconfig(self.join("hgrc"), self.root)
181 self.ui.readconfig(self.join("hgrc"), self.root)
182 extensions.loadall(self.ui)
182 extensions.loadall(self.ui)
183 except IOError:
183 except IOError:
184 pass
184 pass
185
185
186 if self.featuresetupfuncs:
186 if self.featuresetupfuncs:
187 self.supported = set(self._basesupported) # use private copy
187 self.supported = set(self._basesupported) # use private copy
188 extmods = set(m.__name__ for n, m
188 extmods = set(m.__name__ for n, m
189 in extensions.extensions(self.ui))
189 in extensions.extensions(self.ui))
190 for setupfunc in self.featuresetupfuncs:
190 for setupfunc in self.featuresetupfuncs:
191 if setupfunc.__module__ in extmods:
191 if setupfunc.__module__ in extmods:
192 setupfunc(self.ui, self.supported)
192 setupfunc(self.ui, self.supported)
193 else:
193 else:
194 self.supported = self._basesupported
194 self.supported = self._basesupported
195
195
196 if not self.vfs.isdir():
196 if not self.vfs.isdir():
197 if create:
197 if create:
198 if not self.wvfs.exists():
198 if not self.wvfs.exists():
199 self.wvfs.makedirs()
199 self.wvfs.makedirs()
200 self.vfs.makedir(notindexed=True)
200 self.vfs.makedir(notindexed=True)
201 requirements = self._baserequirements(create)
201 requirements = self._baserequirements(create)
202 if self.ui.configbool('format', 'usestore', True):
202 if self.ui.configbool('format', 'usestore', True):
203 self.vfs.mkdir("store")
203 self.vfs.mkdir("store")
204 requirements.append("store")
204 requirements.append("store")
205 if self.ui.configbool('format', 'usefncache', True):
205 if self.ui.configbool('format', 'usefncache', True):
206 requirements.append("fncache")
206 requirements.append("fncache")
207 if self.ui.configbool('format', 'dotencode', True):
207 if self.ui.configbool('format', 'dotencode', True):
208 requirements.append('dotencode')
208 requirements.append('dotencode')
209 # create an invalid changelog
209 # create an invalid changelog
210 self.vfs.append(
210 self.vfs.append(
211 "00changelog.i",
211 "00changelog.i",
212 '\0\0\0\2' # represents revlogv2
212 '\0\0\0\2' # represents revlogv2
213 ' dummy changelog to prevent using the old repo layout'
213 ' dummy changelog to prevent using the old repo layout'
214 )
214 )
215 if self.ui.configbool('format', 'generaldelta', False):
215 if self.ui.configbool('format', 'generaldelta', False):
216 requirements.append("generaldelta")
216 requirements.append("generaldelta")
217 requirements = set(requirements)
217 requirements = set(requirements)
218 else:
218 else:
219 raise error.RepoError(_("repository %s not found") % path)
219 raise error.RepoError(_("repository %s not found") % path)
220 elif create:
220 elif create:
221 raise error.RepoError(_("repository %s already exists") % path)
221 raise error.RepoError(_("repository %s already exists") % path)
222 else:
222 else:
223 try:
223 try:
224 requirements = scmutil.readrequires(self.vfs, self.supported)
224 requirements = scmutil.readrequires(self.vfs, self.supported)
225 except IOError, inst:
225 except IOError, inst:
226 if inst.errno != errno.ENOENT:
226 if inst.errno != errno.ENOENT:
227 raise
227 raise
228 requirements = set()
228 requirements = set()
229
229
230 self.sharedpath = self.path
230 self.sharedpath = self.path
231 try:
231 try:
232 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
232 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
233 realpath=True)
233 realpath=True)
234 s = vfs.base
234 s = vfs.base
235 if not vfs.exists():
235 if not vfs.exists():
236 raise error.RepoError(
236 raise error.RepoError(
237 _('.hg/sharedpath points to nonexistent directory %s') % s)
237 _('.hg/sharedpath points to nonexistent directory %s') % s)
238 self.sharedpath = s
238 self.sharedpath = s
239 except IOError, inst:
239 except IOError, inst:
240 if inst.errno != errno.ENOENT:
240 if inst.errno != errno.ENOENT:
241 raise
241 raise
242
242
243 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
243 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
244 self.spath = self.store.path
244 self.spath = self.store.path
245 self.svfs = self.store.vfs
245 self.svfs = self.store.vfs
246 self.sopener = self.svfs
246 self.sopener = self.svfs
247 self.sjoin = self.store.join
247 self.sjoin = self.store.join
248 self.vfs.createmode = self.store.createmode
248 self.vfs.createmode = self.store.createmode
249 self._applyrequirements(requirements)
249 self._applyrequirements(requirements)
250 if create:
250 if create:
251 self._writerequirements()
251 self._writerequirements()
252
252
253
253
254 self._branchcaches = {}
254 self._branchcaches = {}
255 self.filterpats = {}
255 self.filterpats = {}
256 self._datafilters = {}
256 self._datafilters = {}
257 self._transref = self._lockref = self._wlockref = None
257 self._transref = self._lockref = self._wlockref = None
258
258
259 # A cache for various files under .hg/ that tracks file changes,
259 # A cache for various files under .hg/ that tracks file changes,
260 # (used by the filecache decorator)
260 # (used by the filecache decorator)
261 #
261 #
262 # Maps a property name to its util.filecacheentry
262 # Maps a property name to its util.filecacheentry
263 self._filecache = {}
263 self._filecache = {}
264
264
265 # hold sets of revision to be filtered
265 # hold sets of revision to be filtered
266 # should be cleared when something might have changed the filter value:
266 # should be cleared when something might have changed the filter value:
267 # - new changesets,
267 # - new changesets,
268 # - phase change,
268 # - phase change,
269 # - new obsolescence marker,
269 # - new obsolescence marker,
270 # - working directory parent change,
270 # - working directory parent change,
271 # - bookmark changes
271 # - bookmark changes
272 self.filteredrevcache = {}
272 self.filteredrevcache = {}
273
273
274 def close(self):
274 def close(self):
275 pass
275 pass
276
276
277 def _restrictcapabilities(self, caps):
277 def _restrictcapabilities(self, caps):
278 return caps
278 return caps
279
279
280 def _applyrequirements(self, requirements):
280 def _applyrequirements(self, requirements):
281 self.requirements = requirements
281 self.requirements = requirements
282 self.sopener.options = dict((r, 1) for r in requirements
282 self.sopener.options = dict((r, 1) for r in requirements
283 if r in self.openerreqs)
283 if r in self.openerreqs)
284 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
284 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
285 if chunkcachesize is not None:
285 if chunkcachesize is not None:
286 self.sopener.options['chunkcachesize'] = chunkcachesize
286 self.sopener.options['chunkcachesize'] = chunkcachesize
287
287
288 def _writerequirements(self):
288 def _writerequirements(self):
289 reqfile = self.opener("requires", "w")
289 reqfile = self.opener("requires", "w")
290 for r in sorted(self.requirements):
290 for r in sorted(self.requirements):
291 reqfile.write("%s\n" % r)
291 reqfile.write("%s\n" % r)
292 reqfile.close()
292 reqfile.close()
293
293
294 def _checknested(self, path):
294 def _checknested(self, path):
295 """Determine if path is a legal nested repository."""
295 """Determine if path is a legal nested repository."""
296 if not path.startswith(self.root):
296 if not path.startswith(self.root):
297 return False
297 return False
298 subpath = path[len(self.root) + 1:]
298 subpath = path[len(self.root) + 1:]
299 normsubpath = util.pconvert(subpath)
299 normsubpath = util.pconvert(subpath)
300
300
301 # XXX: Checking against the current working copy is wrong in
301 # XXX: Checking against the current working copy is wrong in
302 # the sense that it can reject things like
302 # the sense that it can reject things like
303 #
303 #
304 # $ hg cat -r 10 sub/x.txt
304 # $ hg cat -r 10 sub/x.txt
305 #
305 #
306 # if sub/ is no longer a subrepository in the working copy
306 # if sub/ is no longer a subrepository in the working copy
307 # parent revision.
307 # parent revision.
308 #
308 #
309 # However, it can of course also allow things that would have
309 # However, it can of course also allow things that would have
310 # been rejected before, such as the above cat command if sub/
310 # been rejected before, such as the above cat command if sub/
311 # is a subrepository now, but was a normal directory before.
311 # is a subrepository now, but was a normal directory before.
312 # The old path auditor would have rejected by mistake since it
312 # The old path auditor would have rejected by mistake since it
313 # panics when it sees sub/.hg/.
313 # panics when it sees sub/.hg/.
314 #
314 #
315 # All in all, checking against the working copy seems sensible
315 # All in all, checking against the working copy seems sensible
316 # since we want to prevent access to nested repositories on
316 # since we want to prevent access to nested repositories on
317 # the filesystem *now*.
317 # the filesystem *now*.
318 ctx = self[None]
318 ctx = self[None]
319 parts = util.splitpath(subpath)
319 parts = util.splitpath(subpath)
320 while parts:
320 while parts:
321 prefix = '/'.join(parts)
321 prefix = '/'.join(parts)
322 if prefix in ctx.substate:
322 if prefix in ctx.substate:
323 if prefix == normsubpath:
323 if prefix == normsubpath:
324 return True
324 return True
325 else:
325 else:
326 sub = ctx.sub(prefix)
326 sub = ctx.sub(prefix)
327 return sub.checknested(subpath[len(prefix) + 1:])
327 return sub.checknested(subpath[len(prefix) + 1:])
328 else:
328 else:
329 parts.pop()
329 parts.pop()
330 return False
330 return False
331
331
332 def peer(self):
332 def peer(self):
333 return localpeer(self) # not cached to avoid reference cycle
333 return localpeer(self) # not cached to avoid reference cycle
334
334
335 def unfiltered(self):
335 def unfiltered(self):
336 """Return unfiltered version of the repository
336 """Return unfiltered version of the repository
337
337
338 Intended to be overwritten by filtered repo."""
338 Intended to be overwritten by filtered repo."""
339 return self
339 return self
340
340
341 def filtered(self, name):
341 def filtered(self, name):
342 """Return a filtered version of a repository"""
342 """Return a filtered version of a repository"""
343 # build a new class with the mixin and the current class
343 # build a new class with the mixin and the current class
344 # (possibly subclass of the repo)
344 # (possibly subclass of the repo)
345 class proxycls(repoview.repoview, self.unfiltered().__class__):
345 class proxycls(repoview.repoview, self.unfiltered().__class__):
346 pass
346 pass
347 return proxycls(self, name)
347 return proxycls(self, name)
348
348
349 @repofilecache('bookmarks')
349 @repofilecache('bookmarks')
350 def _bookmarks(self):
350 def _bookmarks(self):
351 return bookmarks.bmstore(self)
351 return bookmarks.bmstore(self)
352
352
353 @repofilecache('bookmarks.current')
353 @repofilecache('bookmarks.current')
354 def _bookmarkcurrent(self):
354 def _bookmarkcurrent(self):
355 return bookmarks.readcurrent(self)
355 return bookmarks.readcurrent(self)
356
356
357 def bookmarkheads(self, bookmark):
357 def bookmarkheads(self, bookmark):
358 name = bookmark.split('@', 1)[0]
358 name = bookmark.split('@', 1)[0]
359 heads = []
359 heads = []
360 for mark, n in self._bookmarks.iteritems():
360 for mark, n in self._bookmarks.iteritems():
361 if mark.split('@', 1)[0] == name:
361 if mark.split('@', 1)[0] == name:
362 heads.append(n)
362 heads.append(n)
363 return heads
363 return heads
364
364
365 @storecache('phaseroots')
365 @storecache('phaseroots')
366 def _phasecache(self):
366 def _phasecache(self):
367 return phases.phasecache(self, self._phasedefaults)
367 return phases.phasecache(self, self._phasedefaults)
368
368
369 @storecache('obsstore')
369 @storecache('obsstore')
370 def obsstore(self):
370 def obsstore(self):
371 store = obsolete.obsstore(self.sopener)
371 store = obsolete.obsstore(self.sopener)
372 if store and not obsolete._enabled:
372 if store and not obsolete._enabled:
373 # message is rare enough to not be translated
373 # message is rare enough to not be translated
374 msg = 'obsolete feature not enabled but %i markers found!\n'
374 msg = 'obsolete feature not enabled but %i markers found!\n'
375 self.ui.warn(msg % len(list(store)))
375 self.ui.warn(msg % len(list(store)))
376 return store
376 return store
377
377
378 @storecache('00changelog.i')
378 @storecache('00changelog.i')
379 def changelog(self):
379 def changelog(self):
380 c = changelog.changelog(self.sopener)
380 c = changelog.changelog(self.sopener)
381 if 'HG_PENDING' in os.environ:
381 if 'HG_PENDING' in os.environ:
382 p = os.environ['HG_PENDING']
382 p = os.environ['HG_PENDING']
383 if p.startswith(self.root):
383 if p.startswith(self.root):
384 c.readpending('00changelog.i.a')
384 c.readpending('00changelog.i.a')
385 return c
385 return c
386
386
387 @storecache('00manifest.i')
387 @storecache('00manifest.i')
388 def manifest(self):
388 def manifest(self):
389 return manifest.manifest(self.sopener)
389 return manifest.manifest(self.sopener)
390
390
391 @repofilecache('dirstate')
391 @repofilecache('dirstate')
392 def dirstate(self):
392 def dirstate(self):
393 warned = [0]
393 warned = [0]
394 def validate(node):
394 def validate(node):
395 try:
395 try:
396 self.changelog.rev(node)
396 self.changelog.rev(node)
397 return node
397 return node
398 except error.LookupError:
398 except error.LookupError:
399 if not warned[0]:
399 if not warned[0]:
400 warned[0] = True
400 warned[0] = True
401 self.ui.warn(_("warning: ignoring unknown"
401 self.ui.warn(_("warning: ignoring unknown"
402 " working parent %s!\n") % short(node))
402 " working parent %s!\n") % short(node))
403 return nullid
403 return nullid
404
404
405 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
405 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
406
406
407 def __getitem__(self, changeid):
407 def __getitem__(self, changeid):
408 if changeid is None:
408 if changeid is None:
409 return context.workingctx(self)
409 return context.workingctx(self)
410 return context.changectx(self, changeid)
410 return context.changectx(self, changeid)
411
411
412 def __contains__(self, changeid):
412 def __contains__(self, changeid):
413 try:
413 try:
414 return bool(self.lookup(changeid))
414 return bool(self.lookup(changeid))
415 except error.RepoLookupError:
415 except error.RepoLookupError:
416 return False
416 return False
417
417
418 def __nonzero__(self):
418 def __nonzero__(self):
419 return True
419 return True
420
420
421 def __len__(self):
421 def __len__(self):
422 return len(self.changelog)
422 return len(self.changelog)
423
423
424 def __iter__(self):
424 def __iter__(self):
425 return iter(self.changelog)
425 return iter(self.changelog)
426
426
427 def revs(self, expr, *args):
427 def revs(self, expr, *args):
428 '''Return a list of revisions matching the given revset'''
428 '''Return a list of revisions matching the given revset'''
429 expr = revset.formatspec(expr, *args)
429 expr = revset.formatspec(expr, *args)
430 m = revset.match(None, expr)
430 m = revset.match(None, expr)
431 return [r for r in m(self, list(self))]
431 return [r for r in m(self, list(self))]
432
432
433 def set(self, expr, *args):
433 def set(self, expr, *args):
434 '''
434 '''
435 Yield a context for each matching revision, after doing arg
435 Yield a context for each matching revision, after doing arg
436 replacement via revset.formatspec
436 replacement via revset.formatspec
437 '''
437 '''
438 for r in self.revs(expr, *args):
438 for r in self.revs(expr, *args):
439 yield self[r]
439 yield self[r]
440
440
441 def url(self):
441 def url(self):
442 return 'file:' + self.root
442 return 'file:' + self.root
443
443
444 def hook(self, name, throw=False, **args):
444 def hook(self, name, throw=False, **args):
445 return hook.hook(self.ui, self, name, throw, **args)
445 return hook.hook(self.ui, self, name, throw, **args)
446
446
447 @unfilteredmethod
447 @unfilteredmethod
448 def _tag(self, names, node, message, local, user, date, extra={}):
448 def _tag(self, names, node, message, local, user, date, extra={}):
449 if isinstance(names, str):
449 if isinstance(names, str):
450 names = (names,)
450 names = (names,)
451
451
452 branches = self.branchmap()
452 branches = self.branchmap()
453 for name in names:
453 for name in names:
454 self.hook('pretag', throw=True, node=hex(node), tag=name,
454 self.hook('pretag', throw=True, node=hex(node), tag=name,
455 local=local)
455 local=local)
456 if name in branches:
456 if name in branches:
457 self.ui.warn(_("warning: tag %s conflicts with existing"
457 self.ui.warn(_("warning: tag %s conflicts with existing"
458 " branch name\n") % name)
458 " branch name\n") % name)
459
459
460 def writetags(fp, names, munge, prevtags):
460 def writetags(fp, names, munge, prevtags):
461 fp.seek(0, 2)
461 fp.seek(0, 2)
462 if prevtags and prevtags[-1] != '\n':
462 if prevtags and prevtags[-1] != '\n':
463 fp.write('\n')
463 fp.write('\n')
464 for name in names:
464 for name in names:
465 m = munge and munge(name) or name
465 m = munge and munge(name) or name
466 if (self._tagscache.tagtypes and
466 if (self._tagscache.tagtypes and
467 name in self._tagscache.tagtypes):
467 name in self._tagscache.tagtypes):
468 old = self.tags().get(name, nullid)
468 old = self.tags().get(name, nullid)
469 fp.write('%s %s\n' % (hex(old), m))
469 fp.write('%s %s\n' % (hex(old), m))
470 fp.write('%s %s\n' % (hex(node), m))
470 fp.write('%s %s\n' % (hex(node), m))
471 fp.close()
471 fp.close()
472
472
473 prevtags = ''
473 prevtags = ''
474 if local:
474 if local:
475 try:
475 try:
476 fp = self.opener('localtags', 'r+')
476 fp = self.opener('localtags', 'r+')
477 except IOError:
477 except IOError:
478 fp = self.opener('localtags', 'a')
478 fp = self.opener('localtags', 'a')
479 else:
479 else:
480 prevtags = fp.read()
480 prevtags = fp.read()
481
481
482 # local tags are stored in the current charset
482 # local tags are stored in the current charset
483 writetags(fp, names, None, prevtags)
483 writetags(fp, names, None, prevtags)
484 for name in names:
484 for name in names:
485 self.hook('tag', node=hex(node), tag=name, local=local)
485 self.hook('tag', node=hex(node), tag=name, local=local)
486 return
486 return
487
487
488 try:
488 try:
489 fp = self.wfile('.hgtags', 'rb+')
489 fp = self.wfile('.hgtags', 'rb+')
490 except IOError, e:
490 except IOError, e:
491 if e.errno != errno.ENOENT:
491 if e.errno != errno.ENOENT:
492 raise
492 raise
493 fp = self.wfile('.hgtags', 'ab')
493 fp = self.wfile('.hgtags', 'ab')
494 else:
494 else:
495 prevtags = fp.read()
495 prevtags = fp.read()
496
496
497 # committed tags are stored in UTF-8
497 # committed tags are stored in UTF-8
498 writetags(fp, names, encoding.fromlocal, prevtags)
498 writetags(fp, names, encoding.fromlocal, prevtags)
499
499
500 fp.close()
500 fp.close()
501
501
502 self.invalidatecaches()
502 self.invalidatecaches()
503
503
504 if '.hgtags' not in self.dirstate:
504 if '.hgtags' not in self.dirstate:
505 self[None].add(['.hgtags'])
505 self[None].add(['.hgtags'])
506
506
507 m = matchmod.exact(self.root, '', ['.hgtags'])
507 m = matchmod.exact(self.root, '', ['.hgtags'])
508 tagnode = self.commit(message, user, date, extra=extra, match=m)
508 tagnode = self.commit(message, user, date, extra=extra, match=m)
509
509
510 for name in names:
510 for name in names:
511 self.hook('tag', node=hex(node), tag=name, local=local)
511 self.hook('tag', node=hex(node), tag=name, local=local)
512
512
513 return tagnode
513 return tagnode
514
514
515 def tag(self, names, node, message, local, user, date):
515 def tag(self, names, node, message, local, user, date):
516 '''tag a revision with one or more symbolic names.
516 '''tag a revision with one or more symbolic names.
517
517
518 names is a list of strings or, when adding a single tag, names may be a
518 names is a list of strings or, when adding a single tag, names may be a
519 string.
519 string.
520
520
521 if local is True, the tags are stored in a per-repository file.
521 if local is True, the tags are stored in a per-repository file.
522 otherwise, they are stored in the .hgtags file, and a new
522 otherwise, they are stored in the .hgtags file, and a new
523 changeset is committed with the change.
523 changeset is committed with the change.
524
524
525 keyword arguments:
525 keyword arguments:
526
526
527 local: whether to store tags in non-version-controlled file
527 local: whether to store tags in non-version-controlled file
528 (default False)
528 (default False)
529
529
530 message: commit message to use if committing
530 message: commit message to use if committing
531
531
532 user: name of user to use if committing
532 user: name of user to use if committing
533
533
534 date: date tuple to use if committing'''
534 date: date tuple to use if committing'''
535
535
536 if not local:
536 if not local:
537 for x in self.status()[:5]:
537 for x in self.status()[:5]:
538 if '.hgtags' in x:
538 if '.hgtags' in x:
539 raise util.Abort(_('working copy of .hgtags is changed '
539 raise util.Abort(_('working copy of .hgtags is changed '
540 '(please commit .hgtags manually)'))
540 '(please commit .hgtags manually)'))
541
541
542 self.tags() # instantiate the cache
542 self.tags() # instantiate the cache
543 self._tag(names, node, message, local, user, date)
543 self._tag(names, node, message, local, user, date)
544
544
545 @filteredpropertycache
545 @filteredpropertycache
546 def _tagscache(self):
546 def _tagscache(self):
547 '''Returns a tagscache object that contains various tags related
547 '''Returns a tagscache object that contains various tags related
548 caches.'''
548 caches.'''
549
549
550 # This simplifies its cache management by having one decorated
550 # This simplifies its cache management by having one decorated
551 # function (this one) and the rest simply fetch things from it.
551 # function (this one) and the rest simply fetch things from it.
552 class tagscache(object):
552 class tagscache(object):
553 def __init__(self):
553 def __init__(self):
554 # These two define the set of tags for this repository. tags
554 # These two define the set of tags for this repository. tags
555 # maps tag name to node; tagtypes maps tag name to 'global' or
555 # maps tag name to node; tagtypes maps tag name to 'global' or
556 # 'local'. (Global tags are defined by .hgtags across all
556 # 'local'. (Global tags are defined by .hgtags across all
557 # heads, and local tags are defined in .hg/localtags.)
557 # heads, and local tags are defined in .hg/localtags.)
558 # They constitute the in-memory cache of tags.
558 # They constitute the in-memory cache of tags.
559 self.tags = self.tagtypes = None
559 self.tags = self.tagtypes = None
560
560
561 self.nodetagscache = self.tagslist = None
561 self.nodetagscache = self.tagslist = None
562
562
563 cache = tagscache()
563 cache = tagscache()
564 cache.tags, cache.tagtypes = self._findtags()
564 cache.tags, cache.tagtypes = self._findtags()
565
565
566 return cache
566 return cache
567
567
568 def tags(self):
568 def tags(self):
569 '''return a mapping of tag to node'''
569 '''return a mapping of tag to node'''
570 t = {}
570 t = {}
571 if self.changelog.filteredrevs:
571 if self.changelog.filteredrevs:
572 tags, tt = self._findtags()
572 tags, tt = self._findtags()
573 else:
573 else:
574 tags = self._tagscache.tags
574 tags = self._tagscache.tags
575 for k, v in tags.iteritems():
575 for k, v in tags.iteritems():
576 try:
576 try:
577 # ignore tags to unknown nodes
577 # ignore tags to unknown nodes
578 self.changelog.rev(v)
578 self.changelog.rev(v)
579 t[k] = v
579 t[k] = v
580 except (error.LookupError, ValueError):
580 except (error.LookupError, ValueError):
581 pass
581 pass
582 return t
582 return t
583
583
584 def _findtags(self):
584 def _findtags(self):
585 '''Do the hard work of finding tags. Return a pair of dicts
585 '''Do the hard work of finding tags. Return a pair of dicts
586 (tags, tagtypes) where tags maps tag name to node, and tagtypes
586 (tags, tagtypes) where tags maps tag name to node, and tagtypes
587 maps tag name to a string like \'global\' or \'local\'.
587 maps tag name to a string like \'global\' or \'local\'.
588 Subclasses or extensions are free to add their own tags, but
588 Subclasses or extensions are free to add their own tags, but
589 should be aware that the returned dicts will be retained for the
589 should be aware that the returned dicts will be retained for the
590 duration of the localrepo object.'''
590 duration of the localrepo object.'''
591
591
592 # XXX what tagtype should subclasses/extensions use? Currently
592 # XXX what tagtype should subclasses/extensions use? Currently
593 # mq and bookmarks add tags, but do not set the tagtype at all.
593 # mq and bookmarks add tags, but do not set the tagtype at all.
594 # Should each extension invent its own tag type? Should there
594 # Should each extension invent its own tag type? Should there
595 # be one tagtype for all such "virtual" tags? Or is the status
595 # be one tagtype for all such "virtual" tags? Or is the status
596 # quo fine?
596 # quo fine?
597
597
598 alltags = {} # map tag name to (node, hist)
598 alltags = {} # map tag name to (node, hist)
599 tagtypes = {}
599 tagtypes = {}
600
600
601 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
601 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
602 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
602 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
603
603
604 # Build the return dicts. Have to re-encode tag names because
604 # Build the return dicts. Have to re-encode tag names because
605 # the tags module always uses UTF-8 (in order not to lose info
605 # the tags module always uses UTF-8 (in order not to lose info
606 # writing to the cache), but the rest of Mercurial wants them in
606 # writing to the cache), but the rest of Mercurial wants them in
607 # local encoding.
607 # local encoding.
608 tags = {}
608 tags = {}
609 for (name, (node, hist)) in alltags.iteritems():
609 for (name, (node, hist)) in alltags.iteritems():
610 if node != nullid:
610 if node != nullid:
611 tags[encoding.tolocal(name)] = node
611 tags[encoding.tolocal(name)] = node
612 tags['tip'] = self.changelog.tip()
612 tags['tip'] = self.changelog.tip()
613 tagtypes = dict([(encoding.tolocal(name), value)
613 tagtypes = dict([(encoding.tolocal(name), value)
614 for (name, value) in tagtypes.iteritems()])
614 for (name, value) in tagtypes.iteritems()])
615 return (tags, tagtypes)
615 return (tags, tagtypes)
616
616
617 def tagtype(self, tagname):
617 def tagtype(self, tagname):
618 '''
618 '''
619 return the type of the given tag. result can be:
619 return the type of the given tag. result can be:
620
620
621 'local' : a local tag
621 'local' : a local tag
622 'global' : a global tag
622 'global' : a global tag
623 None : tag does not exist
623 None : tag does not exist
624 '''
624 '''
625
625
626 return self._tagscache.tagtypes.get(tagname)
626 return self._tagscache.tagtypes.get(tagname)
627
627
628 def tagslist(self):
628 def tagslist(self):
629 '''return a list of tags ordered by revision'''
629 '''return a list of tags ordered by revision'''
630 if not self._tagscache.tagslist:
630 if not self._tagscache.tagslist:
631 l = []
631 l = []
632 for t, n in self.tags().iteritems():
632 for t, n in self.tags().iteritems():
633 r = self.changelog.rev(n)
633 r = self.changelog.rev(n)
634 l.append((r, t, n))
634 l.append((r, t, n))
635 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
635 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
636
636
637 return self._tagscache.tagslist
637 return self._tagscache.tagslist
638
638
639 def nodetags(self, node):
639 def nodetags(self, node):
640 '''return the tags associated with a node'''
640 '''return the tags associated with a node'''
641 if not self._tagscache.nodetagscache:
641 if not self._tagscache.nodetagscache:
642 nodetagscache = {}
642 nodetagscache = {}
643 for t, n in self._tagscache.tags.iteritems():
643 for t, n in self._tagscache.tags.iteritems():
644 nodetagscache.setdefault(n, []).append(t)
644 nodetagscache.setdefault(n, []).append(t)
645 for tags in nodetagscache.itervalues():
645 for tags in nodetagscache.itervalues():
646 tags.sort()
646 tags.sort()
647 self._tagscache.nodetagscache = nodetagscache
647 self._tagscache.nodetagscache = nodetagscache
648 return self._tagscache.nodetagscache.get(node, [])
648 return self._tagscache.nodetagscache.get(node, [])
649
649
650 def nodebookmarks(self, node):
650 def nodebookmarks(self, node):
651 marks = []
651 marks = []
652 for bookmark, n in self._bookmarks.iteritems():
652 for bookmark, n in self._bookmarks.iteritems():
653 if n == node:
653 if n == node:
654 marks.append(bookmark)
654 marks.append(bookmark)
655 return sorted(marks)
655 return sorted(marks)
656
656
657 def branchmap(self):
657 def branchmap(self):
658 '''returns a dictionary {branch: [branchheads]}'''
658 '''returns a dictionary {branch: [branchheads]}'''
659 branchmap.updatecache(self)
659 branchmap.updatecache(self)
660 return self._branchcaches[self.filtername]
660 return self._branchcaches[self.filtername]
661
661
662 def branchtip(self, branch):
662 def branchtip(self, branch):
663 '''return the tip node for a given branch'''
663 '''return the tip node for a given branch'''
664 try:
664 try:
665 return self.branchmap().branchtip(branch)
665 return self.branchmap().branchtip(branch)
666 except KeyError:
666 except KeyError:
667 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
667 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
668
668
669 def lookup(self, key):
669 def lookup(self, key):
670 return self[key].node()
670 return self[key].node()
671
671
672 def lookupbranch(self, key, remote=None):
672 def lookupbranch(self, key, remote=None):
673 repo = remote or self
673 repo = remote or self
674 if key in repo.branchmap():
674 if key in repo.branchmap():
675 return key
675 return key
676
676
677 repo = (remote and remote.local()) and remote or self
677 repo = (remote and remote.local()) and remote or self
678 return repo[key].branch()
678 return repo[key].branch()
679
679
680 def known(self, nodes):
680 def known(self, nodes):
681 nm = self.changelog.nodemap
681 nm = self.changelog.nodemap
682 pc = self._phasecache
682 pc = self._phasecache
683 result = []
683 result = []
684 for n in nodes:
684 for n in nodes:
685 r = nm.get(n)
685 r = nm.get(n)
686 resp = not (r is None or pc.phase(self, r) >= phases.secret)
686 resp = not (r is None or pc.phase(self, r) >= phases.secret)
687 result.append(resp)
687 result.append(resp)
688 return result
688 return result
689
689
690 def local(self):
690 def local(self):
691 return self
691 return self
692
692
693 def cancopy(self):
693 def cancopy(self):
694 return self.local() # so statichttprepo's override of local() works
694 return self.local() # so statichttprepo's override of local() works
695
695
696 def join(self, f):
696 def join(self, f):
697 return os.path.join(self.path, f)
697 return os.path.join(self.path, f)
698
698
699 def wjoin(self, f):
699 def wjoin(self, f):
700 return os.path.join(self.root, f)
700 return os.path.join(self.root, f)
701
701
702 def file(self, f):
702 def file(self, f):
703 if f[0] == '/':
703 if f[0] == '/':
704 f = f[1:]
704 f = f[1:]
705 return filelog.filelog(self.sopener, f)
705 return filelog.filelog(self.sopener, f)
706
706
707 def changectx(self, changeid):
707 def changectx(self, changeid):
708 return self[changeid]
708 return self[changeid]
709
709
710 def parents(self, changeid=None):
710 def parents(self, changeid=None):
711 '''get list of changectxs for parents of changeid'''
711 '''get list of changectxs for parents of changeid'''
712 return self[changeid].parents()
712 return self[changeid].parents()
713
713
714 def setparents(self, p1, p2=nullid):
714 def setparents(self, p1, p2=nullid):
715 copies = self.dirstate.setparents(p1, p2)
715 copies = self.dirstate.setparents(p1, p2)
716 pctx = self[p1]
716 pctx = self[p1]
717 if copies:
717 if copies:
718 # Adjust copy records, the dirstate cannot do it, it
718 # Adjust copy records, the dirstate cannot do it, it
719 # requires access to parents manifests. Preserve them
719 # requires access to parents manifests. Preserve them
720 # only for entries added to first parent.
720 # only for entries added to first parent.
721 for f in copies:
721 for f in copies:
722 if f not in pctx and copies[f] in pctx:
722 if f not in pctx and copies[f] in pctx:
723 self.dirstate.copy(copies[f], f)
723 self.dirstate.copy(copies[f], f)
724 if p2 == nullid:
724 if p2 == nullid:
725 for f, s in sorted(self.dirstate.copies().items()):
725 for f, s in sorted(self.dirstate.copies().items()):
726 if f not in pctx and s not in pctx:
726 if f not in pctx and s not in pctx:
727 self.dirstate.copy(None, f)
727 self.dirstate.copy(None, f)
728
728
729 def filectx(self, path, changeid=None, fileid=None):
729 def filectx(self, path, changeid=None, fileid=None):
730 """changeid can be a changeset revision, node, or tag.
730 """changeid can be a changeset revision, node, or tag.
731 fileid can be a file revision or node."""
731 fileid can be a file revision or node."""
732 return context.filectx(self, path, changeid, fileid)
732 return context.filectx(self, path, changeid, fileid)
733
733
734 def getcwd(self):
734 def getcwd(self):
735 return self.dirstate.getcwd()
735 return self.dirstate.getcwd()
736
736
737 def pathto(self, f, cwd=None):
737 def pathto(self, f, cwd=None):
738 return self.dirstate.pathto(f, cwd)
738 return self.dirstate.pathto(f, cwd)
739
739
740 def wfile(self, f, mode='r'):
740 def wfile(self, f, mode='r'):
741 return self.wopener(f, mode)
741 return self.wopener(f, mode)
742
742
743 def _link(self, f):
743 def _link(self, f):
744 return self.wvfs.islink(f)
744 return self.wvfs.islink(f)
745
745
746 def _loadfilter(self, filter):
746 def _loadfilter(self, filter):
747 if filter not in self.filterpats:
747 if filter not in self.filterpats:
748 l = []
748 l = []
749 for pat, cmd in self.ui.configitems(filter):
749 for pat, cmd in self.ui.configitems(filter):
750 if cmd == '!':
750 if cmd == '!':
751 continue
751 continue
752 mf = matchmod.match(self.root, '', [pat])
752 mf = matchmod.match(self.root, '', [pat])
753 fn = None
753 fn = None
754 params = cmd
754 params = cmd
755 for name, filterfn in self._datafilters.iteritems():
755 for name, filterfn in self._datafilters.iteritems():
756 if cmd.startswith(name):
756 if cmd.startswith(name):
757 fn = filterfn
757 fn = filterfn
758 params = cmd[len(name):].lstrip()
758 params = cmd[len(name):].lstrip()
759 break
759 break
760 if not fn:
760 if not fn:
761 fn = lambda s, c, **kwargs: util.filter(s, c)
761 fn = lambda s, c, **kwargs: util.filter(s, c)
762 # Wrap old filters not supporting keyword arguments
762 # Wrap old filters not supporting keyword arguments
763 if not inspect.getargspec(fn)[2]:
763 if not inspect.getargspec(fn)[2]:
764 oldfn = fn
764 oldfn = fn
765 fn = lambda s, c, **kwargs: oldfn(s, c)
765 fn = lambda s, c, **kwargs: oldfn(s, c)
766 l.append((mf, fn, params))
766 l.append((mf, fn, params))
767 self.filterpats[filter] = l
767 self.filterpats[filter] = l
768 return self.filterpats[filter]
768 return self.filterpats[filter]
769
769
770 def _filter(self, filterpats, filename, data):
770 def _filter(self, filterpats, filename, data):
771 for mf, fn, cmd in filterpats:
771 for mf, fn, cmd in filterpats:
772 if mf(filename):
772 if mf(filename):
773 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
773 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
774 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
774 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
775 break
775 break
776
776
777 return data
777 return data
778
778
779 @unfilteredpropertycache
779 @unfilteredpropertycache
780 def _encodefilterpats(self):
780 def _encodefilterpats(self):
781 return self._loadfilter('encode')
781 return self._loadfilter('encode')
782
782
783 @unfilteredpropertycache
783 @unfilteredpropertycache
784 def _decodefilterpats(self):
784 def _decodefilterpats(self):
785 return self._loadfilter('decode')
785 return self._loadfilter('decode')
786
786
787 def adddatafilter(self, name, filter):
787 def adddatafilter(self, name, filter):
788 self._datafilters[name] = filter
788 self._datafilters[name] = filter
789
789
790 def wread(self, filename):
790 def wread(self, filename):
791 if self._link(filename):
791 if self._link(filename):
792 data = self.wvfs.readlink(filename)
792 data = self.wvfs.readlink(filename)
793 else:
793 else:
794 data = self.wopener.read(filename)
794 data = self.wopener.read(filename)
795 return self._filter(self._encodefilterpats, filename, data)
795 return self._filter(self._encodefilterpats, filename, data)
796
796
797 def wwrite(self, filename, data, flags):
797 def wwrite(self, filename, data, flags):
798 data = self._filter(self._decodefilterpats, filename, data)
798 data = self._filter(self._decodefilterpats, filename, data)
799 if 'l' in flags:
799 if 'l' in flags:
800 self.wopener.symlink(data, filename)
800 self.wopener.symlink(data, filename)
801 else:
801 else:
802 self.wopener.write(filename, data)
802 self.wopener.write(filename, data)
803 if 'x' in flags:
803 if 'x' in flags:
804 self.wvfs.setflags(filename, False, True)
804 self.wvfs.setflags(filename, False, True)
805
805
806 def wwritedata(self, filename, data):
806 def wwritedata(self, filename, data):
807 return self._filter(self._decodefilterpats, filename, data)
807 return self._filter(self._decodefilterpats, filename, data)
808
808
809 def transaction(self, desc, report=None):
809 def transaction(self, desc, report=None):
810 tr = self._transref and self._transref() or None
810 tr = self._transref and self._transref() or None
811 if tr and tr.running():
811 if tr and tr.running():
812 return tr.nest()
812 return tr.nest()
813
813
814 # abort here if the journal already exists
814 # abort here if the journal already exists
815 if self.svfs.exists("journal"):
815 if self.svfs.exists("journal"):
816 raise error.RepoError(
816 raise error.RepoError(
817 _("abandoned transaction found - run hg recover"))
817 _("abandoned transaction found - run hg recover"))
818
818
819 self._writejournal(desc)
819 self._writejournal(desc)
820 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
820 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
821 rp = report and report or self.ui.warn
821 rp = report and report or self.ui.warn
822 tr = transaction.transaction(rp, self.sopener,
822 tr = transaction.transaction(rp, self.sopener,
823 "journal",
823 "journal",
824 aftertrans(renames),
824 aftertrans(renames),
825 self.store.createmode)
825 self.store.createmode)
826 self._transref = weakref.ref(tr)
826 self._transref = weakref.ref(tr)
827 return tr
827 return tr
828
828
829 def _journalfiles(self):
829 def _journalfiles(self):
830 return ((self.svfs, 'journal'),
830 return ((self.svfs, 'journal'),
831 (self.vfs, 'journal.dirstate'),
831 (self.vfs, 'journal.dirstate'),
832 (self.vfs, 'journal.branch'),
832 (self.vfs, 'journal.branch'),
833 (self.vfs, 'journal.desc'),
833 (self.vfs, 'journal.desc'),
834 (self.vfs, 'journal.bookmarks'),
834 (self.vfs, 'journal.bookmarks'),
835 (self.svfs, 'journal.phaseroots'))
835 (self.svfs, 'journal.phaseroots'))
836
836
837 def undofiles(self):
837 def undofiles(self):
838 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
838 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
839
839
840 def _writejournal(self, desc):
840 def _writejournal(self, desc):
841 self.opener.write("journal.dirstate",
841 self.opener.write("journal.dirstate",
842 self.opener.tryread("dirstate"))
842 self.opener.tryread("dirstate"))
843 self.opener.write("journal.branch",
843 self.opener.write("journal.branch",
844 encoding.fromlocal(self.dirstate.branch()))
844 encoding.fromlocal(self.dirstate.branch()))
845 self.opener.write("journal.desc",
845 self.opener.write("journal.desc",
846 "%d\n%s\n" % (len(self), desc))
846 "%d\n%s\n" % (len(self), desc))
847 self.opener.write("journal.bookmarks",
847 self.opener.write("journal.bookmarks",
848 self.opener.tryread("bookmarks"))
848 self.opener.tryread("bookmarks"))
849 self.sopener.write("journal.phaseroots",
849 self.sopener.write("journal.phaseroots",
850 self.sopener.tryread("phaseroots"))
850 self.sopener.tryread("phaseroots"))
851
851
852 def recover(self):
852 def recover(self):
853 lock = self.lock()
853 lock = self.lock()
854 try:
854 try:
855 if self.svfs.exists("journal"):
855 if self.svfs.exists("journal"):
856 self.ui.status(_("rolling back interrupted transaction\n"))
856 self.ui.status(_("rolling back interrupted transaction\n"))
857 transaction.rollback(self.sopener, "journal",
857 transaction.rollback(self.sopener, "journal",
858 self.ui.warn)
858 self.ui.warn)
859 self.invalidate()
859 self.invalidate()
860 return True
860 return True
861 else:
861 else:
862 self.ui.warn(_("no interrupted transaction available\n"))
862 self.ui.warn(_("no interrupted transaction available\n"))
863 return False
863 return False
864 finally:
864 finally:
865 lock.release()
865 lock.release()
866
866
867 def rollback(self, dryrun=False, force=False):
867 def rollback(self, dryrun=False, force=False):
868 wlock = lock = None
868 wlock = lock = None
869 try:
869 try:
870 wlock = self.wlock()
870 wlock = self.wlock()
871 lock = self.lock()
871 lock = self.lock()
872 if self.svfs.exists("undo"):
872 if self.svfs.exists("undo"):
873 return self._rollback(dryrun, force)
873 return self._rollback(dryrun, force)
874 else:
874 else:
875 self.ui.warn(_("no rollback information available\n"))
875 self.ui.warn(_("no rollback information available\n"))
876 return 1
876 return 1
877 finally:
877 finally:
878 release(lock, wlock)
878 release(lock, wlock)
879
879
880 @unfilteredmethod # Until we get smarter cache management
880 @unfilteredmethod # Until we get smarter cache management
881 def _rollback(self, dryrun, force):
881 def _rollback(self, dryrun, force):
882 ui = self.ui
882 ui = self.ui
883 try:
883 try:
884 args = self.opener.read('undo.desc').splitlines()
884 args = self.opener.read('undo.desc').splitlines()
885 (oldlen, desc, detail) = (int(args[0]), args[1], None)
885 (oldlen, desc, detail) = (int(args[0]), args[1], None)
886 if len(args) >= 3:
886 if len(args) >= 3:
887 detail = args[2]
887 detail = args[2]
888 oldtip = oldlen - 1
888 oldtip = oldlen - 1
889
889
890 if detail and ui.verbose:
890 if detail and ui.verbose:
891 msg = (_('repository tip rolled back to revision %s'
891 msg = (_('repository tip rolled back to revision %s'
892 ' (undo %s: %s)\n')
892 ' (undo %s: %s)\n')
893 % (oldtip, desc, detail))
893 % (oldtip, desc, detail))
894 else:
894 else:
895 msg = (_('repository tip rolled back to revision %s'
895 msg = (_('repository tip rolled back to revision %s'
896 ' (undo %s)\n')
896 ' (undo %s)\n')
897 % (oldtip, desc))
897 % (oldtip, desc))
898 except IOError:
898 except IOError:
899 msg = _('rolling back unknown transaction\n')
899 msg = _('rolling back unknown transaction\n')
900 desc = None
900 desc = None
901
901
902 if not force and self['.'] != self['tip'] and desc == 'commit':
902 if not force and self['.'] != self['tip'] and desc == 'commit':
903 raise util.Abort(
903 raise util.Abort(
904 _('rollback of last commit while not checked out '
904 _('rollback of last commit while not checked out '
905 'may lose data'), hint=_('use -f to force'))
905 'may lose data'), hint=_('use -f to force'))
906
906
907 ui.status(msg)
907 ui.status(msg)
908 if dryrun:
908 if dryrun:
909 return 0
909 return 0
910
910
911 parents = self.dirstate.parents()
911 parents = self.dirstate.parents()
912 self.destroying()
912 self.destroying()
913 transaction.rollback(self.sopener, 'undo', ui.warn)
913 transaction.rollback(self.sopener, 'undo', ui.warn)
914 if self.vfs.exists('undo.bookmarks'):
914 if self.vfs.exists('undo.bookmarks'):
915 self.vfs.rename('undo.bookmarks', 'bookmarks')
915 self.vfs.rename('undo.bookmarks', 'bookmarks')
916 if self.svfs.exists('undo.phaseroots'):
916 if self.svfs.exists('undo.phaseroots'):
917 self.svfs.rename('undo.phaseroots', 'phaseroots')
917 self.svfs.rename('undo.phaseroots', 'phaseroots')
918 self.invalidate()
918 self.invalidate()
919
919
920 parentgone = (parents[0] not in self.changelog.nodemap or
920 parentgone = (parents[0] not in self.changelog.nodemap or
921 parents[1] not in self.changelog.nodemap)
921 parents[1] not in self.changelog.nodemap)
922 if parentgone:
922 if parentgone:
923 self.vfs.rename('undo.dirstate', 'dirstate')
923 self.vfs.rename('undo.dirstate', 'dirstate')
924 try:
924 try:
925 branch = self.opener.read('undo.branch')
925 branch = self.opener.read('undo.branch')
926 self.dirstate.setbranch(encoding.tolocal(branch))
926 self.dirstate.setbranch(encoding.tolocal(branch))
927 except IOError:
927 except IOError:
928 ui.warn(_('named branch could not be reset: '
928 ui.warn(_('named branch could not be reset: '
929 'current branch is still \'%s\'\n')
929 'current branch is still \'%s\'\n')
930 % self.dirstate.branch())
930 % self.dirstate.branch())
931
931
932 self.dirstate.invalidate()
932 self.dirstate.invalidate()
933 parents = tuple([p.rev() for p in self.parents()])
933 parents = tuple([p.rev() for p in self.parents()])
934 if len(parents) > 1:
934 if len(parents) > 1:
935 ui.status(_('working directory now based on '
935 ui.status(_('working directory now based on '
936 'revisions %d and %d\n') % parents)
936 'revisions %d and %d\n') % parents)
937 else:
937 else:
938 ui.status(_('working directory now based on '
938 ui.status(_('working directory now based on '
939 'revision %d\n') % parents)
939 'revision %d\n') % parents)
940 # TODO: if we know which new heads may result from this rollback, pass
940 # TODO: if we know which new heads may result from this rollback, pass
941 # them to destroy(), which will prevent the branchhead cache from being
941 # them to destroy(), which will prevent the branchhead cache from being
942 # invalidated.
942 # invalidated.
943 self.destroyed()
943 self.destroyed()
944 return 0
944 return 0
945
945
946 def invalidatecaches(self):
946 def invalidatecaches(self):
947
947
948 if '_tagscache' in vars(self):
948 if '_tagscache' in vars(self):
949 # can't use delattr on proxy
949 # can't use delattr on proxy
950 del self.__dict__['_tagscache']
950 del self.__dict__['_tagscache']
951
951
952 self.unfiltered()._branchcaches.clear()
952 self.unfiltered()._branchcaches.clear()
953 self.invalidatevolatilesets()
953 self.invalidatevolatilesets()
954
954
955 def invalidatevolatilesets(self):
955 def invalidatevolatilesets(self):
956 self.filteredrevcache.clear()
956 self.filteredrevcache.clear()
957 obsolete.clearobscaches(self)
957 obsolete.clearobscaches(self)
958
958
959 def invalidatedirstate(self):
959 def invalidatedirstate(self):
960 '''Invalidates the dirstate, causing the next call to dirstate
960 '''Invalidates the dirstate, causing the next call to dirstate
961 to check if it was modified since the last time it was read,
961 to check if it was modified since the last time it was read,
962 rereading it if it has.
962 rereading it if it has.
963
963
964 This is different to dirstate.invalidate() that it doesn't always
964 This is different to dirstate.invalidate() that it doesn't always
965 rereads the dirstate. Use dirstate.invalidate() if you want to
965 rereads the dirstate. Use dirstate.invalidate() if you want to
966 explicitly read the dirstate again (i.e. restoring it to a previous
966 explicitly read the dirstate again (i.e. restoring it to a previous
967 known good state).'''
967 known good state).'''
968 if hasunfilteredcache(self, 'dirstate'):
968 if hasunfilteredcache(self, 'dirstate'):
969 for k in self.dirstate._filecache:
969 for k in self.dirstate._filecache:
970 try:
970 try:
971 delattr(self.dirstate, k)
971 delattr(self.dirstate, k)
972 except AttributeError:
972 except AttributeError:
973 pass
973 pass
974 delattr(self.unfiltered(), 'dirstate')
974 delattr(self.unfiltered(), 'dirstate')
975
975
976 def invalidate(self):
976 def invalidate(self):
977 unfiltered = self.unfiltered() # all file caches are stored unfiltered
977 unfiltered = self.unfiltered() # all file caches are stored unfiltered
978 for k in self._filecache:
978 for k in self._filecache:
979 # dirstate is invalidated separately in invalidatedirstate()
979 # dirstate is invalidated separately in invalidatedirstate()
980 if k == 'dirstate':
980 if k == 'dirstate':
981 continue
981 continue
982
982
983 try:
983 try:
984 delattr(unfiltered, k)
984 delattr(unfiltered, k)
985 except AttributeError:
985 except AttributeError:
986 pass
986 pass
987 self.invalidatecaches()
987 self.invalidatecaches()
988
988
989 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
989 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
990 try:
990 try:
991 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
991 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
992 except error.LockHeld, inst:
992 except error.LockHeld, inst:
993 if not wait:
993 if not wait:
994 raise
994 raise
995 self.ui.warn(_("waiting for lock on %s held by %r\n") %
995 self.ui.warn(_("waiting for lock on %s held by %r\n") %
996 (desc, inst.locker))
996 (desc, inst.locker))
997 # default to 600 seconds timeout
997 # default to 600 seconds timeout
998 l = lockmod.lock(vfs, lockname,
998 l = lockmod.lock(vfs, lockname,
999 int(self.ui.config("ui", "timeout", "600")),
999 int(self.ui.config("ui", "timeout", "600")),
1000 releasefn, desc=desc)
1000 releasefn, desc=desc)
1001 if acquirefn:
1001 if acquirefn:
1002 acquirefn()
1002 acquirefn()
1003 return l
1003 return l
1004
1004
1005 def _afterlock(self, callback):
1005 def _afterlock(self, callback):
1006 """add a callback to the current repository lock.
1006 """add a callback to the current repository lock.
1007
1007
1008 The callback will be executed on lock release."""
1008 The callback will be executed on lock release."""
1009 l = self._lockref and self._lockref()
1009 l = self._lockref and self._lockref()
1010 if l:
1010 if l:
1011 l.postrelease.append(callback)
1011 l.postrelease.append(callback)
1012 else:
1012 else:
1013 callback()
1013 callback()
1014
1014
1015 def lock(self, wait=True):
1015 def lock(self, wait=True):
1016 '''Lock the repository store (.hg/store) and return a weak reference
1016 '''Lock the repository store (.hg/store) and return a weak reference
1017 to the lock. Use this before modifying the store (e.g. committing or
1017 to the lock. Use this before modifying the store (e.g. committing or
1018 stripping). If you are opening a transaction, get a lock as well.)'''
1018 stripping). If you are opening a transaction, get a lock as well.)'''
1019 l = self._lockref and self._lockref()
1019 l = self._lockref and self._lockref()
1020 if l is not None and l.held:
1020 if l is not None and l.held:
1021 l.lock()
1021 l.lock()
1022 return l
1022 return l
1023
1023
1024 def unlock():
1024 def unlock():
1025 self.store.write()
1025 self.store.write()
1026 if hasunfilteredcache(self, '_phasecache'):
1026 if hasunfilteredcache(self, '_phasecache'):
1027 self._phasecache.write()
1027 self._phasecache.write()
1028 for k, ce in self._filecache.items():
1028 for k, ce in self._filecache.items():
1029 if k == 'dirstate' or k not in self.__dict__:
1029 if k == 'dirstate' or k not in self.__dict__:
1030 continue
1030 continue
1031 ce.refresh()
1031 ce.refresh()
1032
1032
1033 l = self._lock(self.svfs, "lock", wait, unlock,
1033 l = self._lock(self.svfs, "lock", wait, unlock,
1034 self.invalidate, _('repository %s') % self.origroot)
1034 self.invalidate, _('repository %s') % self.origroot)
1035 self._lockref = weakref.ref(l)
1035 self._lockref = weakref.ref(l)
1036 return l
1036 return l
1037
1037
1038 def wlock(self, wait=True):
1038 def wlock(self, wait=True):
1039 '''Lock the non-store parts of the repository (everything under
1039 '''Lock the non-store parts of the repository (everything under
1040 .hg except .hg/store) and return a weak reference to the lock.
1040 .hg except .hg/store) and return a weak reference to the lock.
1041 Use this before modifying files in .hg.'''
1041 Use this before modifying files in .hg.'''
1042 l = self._wlockref and self._wlockref()
1042 l = self._wlockref and self._wlockref()
1043 if l is not None and l.held:
1043 if l is not None and l.held:
1044 l.lock()
1044 l.lock()
1045 return l
1045 return l
1046
1046
1047 def unlock():
1047 def unlock():
1048 self.dirstate.write()
1048 self.dirstate.write()
1049 self._filecache['dirstate'].refresh()
1049 self._filecache['dirstate'].refresh()
1050
1050
1051 l = self._lock(self.vfs, "wlock", wait, unlock,
1051 l = self._lock(self.vfs, "wlock", wait, unlock,
1052 self.invalidatedirstate, _('working directory of %s') %
1052 self.invalidatedirstate, _('working directory of %s') %
1053 self.origroot)
1053 self.origroot)
1054 self._wlockref = weakref.ref(l)
1054 self._wlockref = weakref.ref(l)
1055 return l
1055 return l
1056
1056
1057 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1057 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1058 """
1058 """
1059 commit an individual file as part of a larger transaction
1059 commit an individual file as part of a larger transaction
1060 """
1060 """
1061
1061
1062 fname = fctx.path()
1062 fname = fctx.path()
1063 text = fctx.data()
1063 text = fctx.data()
1064 flog = self.file(fname)
1064 flog = self.file(fname)
1065 fparent1 = manifest1.get(fname, nullid)
1065 fparent1 = manifest1.get(fname, nullid)
1066 fparent2 = fparent2o = manifest2.get(fname, nullid)
1066 fparent2 = fparent2o = manifest2.get(fname, nullid)
1067
1067
1068 meta = {}
1068 meta = {}
1069 copy = fctx.renamed()
1069 copy = fctx.renamed()
1070 if copy and copy[0] != fname:
1070 if copy and copy[0] != fname:
1071 # Mark the new revision of this file as a copy of another
1071 # Mark the new revision of this file as a copy of another
1072 # file. This copy data will effectively act as a parent
1072 # file. This copy data will effectively act as a parent
1073 # of this new revision. If this is a merge, the first
1073 # of this new revision. If this is a merge, the first
1074 # parent will be the nullid (meaning "look up the copy data")
1074 # parent will be the nullid (meaning "look up the copy data")
1075 # and the second one will be the other parent. For example:
1075 # and the second one will be the other parent. For example:
1076 #
1076 #
1077 # 0 --- 1 --- 3 rev1 changes file foo
1077 # 0 --- 1 --- 3 rev1 changes file foo
1078 # \ / rev2 renames foo to bar and changes it
1078 # \ / rev2 renames foo to bar and changes it
1079 # \- 2 -/ rev3 should have bar with all changes and
1079 # \- 2 -/ rev3 should have bar with all changes and
1080 # should record that bar descends from
1080 # should record that bar descends from
1081 # bar in rev2 and foo in rev1
1081 # bar in rev2 and foo in rev1
1082 #
1082 #
1083 # this allows this merge to succeed:
1083 # this allows this merge to succeed:
1084 #
1084 #
1085 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1085 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1086 # \ / merging rev3 and rev4 should use bar@rev2
1086 # \ / merging rev3 and rev4 should use bar@rev2
1087 # \- 2 --- 4 as the merge base
1087 # \- 2 --- 4 as the merge base
1088 #
1088 #
1089
1089
1090 cfname = copy[0]
1090 cfname = copy[0]
1091 crev = manifest1.get(cfname)
1091 crev = manifest1.get(cfname)
1092 newfparent = fparent2
1092 newfparent = fparent2
1093
1093
1094 if manifest2: # branch merge
1094 if manifest2: # branch merge
1095 if fparent2 == nullid or crev is None: # copied on remote side
1095 if fparent2 == nullid or crev is None: # copied on remote side
1096 if cfname in manifest2:
1096 if cfname in manifest2:
1097 crev = manifest2[cfname]
1097 crev = manifest2[cfname]
1098 newfparent = fparent1
1098 newfparent = fparent1
1099
1099
1100 # find source in nearest ancestor if we've lost track
1100 # find source in nearest ancestor if we've lost track
1101 if not crev:
1101 if not crev:
1102 self.ui.debug(" %s: searching for copy revision for %s\n" %
1102 self.ui.debug(" %s: searching for copy revision for %s\n" %
1103 (fname, cfname))
1103 (fname, cfname))
1104 for ancestor in self[None].ancestors():
1104 for ancestor in self[None].ancestors():
1105 if cfname in ancestor:
1105 if cfname in ancestor:
1106 crev = ancestor[cfname].filenode()
1106 crev = ancestor[cfname].filenode()
1107 break
1107 break
1108
1108
1109 if crev:
1109 if crev:
1110 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1110 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1111 meta["copy"] = cfname
1111 meta["copy"] = cfname
1112 meta["copyrev"] = hex(crev)
1112 meta["copyrev"] = hex(crev)
1113 fparent1, fparent2 = nullid, newfparent
1113 fparent1, fparent2 = nullid, newfparent
1114 else:
1114 else:
1115 self.ui.warn(_("warning: can't find ancestor for '%s' "
1115 self.ui.warn(_("warning: can't find ancestor for '%s' "
1116 "copied from '%s'!\n") % (fname, cfname))
1116 "copied from '%s'!\n") % (fname, cfname))
1117
1117
1118 elif fparent2 != nullid:
1118 elif fparent2 != nullid:
1119 # is one parent an ancestor of the other?
1119 # is one parent an ancestor of the other?
1120 fparentancestor = flog.ancestor(fparent1, fparent2)
1120 fparentancestor = flog.ancestor(fparent1, fparent2)
1121 if fparentancestor == fparent1:
1121 if fparentancestor == fparent1:
1122 fparent1, fparent2 = fparent2, nullid
1122 fparent1, fparent2 = fparent2, nullid
1123 elif fparentancestor == fparent2:
1123 elif fparentancestor == fparent2:
1124 fparent2 = nullid
1124 fparent2 = nullid
1125
1125
1126 # is the file changed?
1126 # is the file changed?
1127 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1127 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1128 changelist.append(fname)
1128 changelist.append(fname)
1129 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1129 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1130
1130
1131 # are just the flags changed during merge?
1131 # are just the flags changed during merge?
1132 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1132 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1133 changelist.append(fname)
1133 changelist.append(fname)
1134
1134
1135 return fparent1
1135 return fparent1
1136
1136
1137 @unfilteredmethod
1137 @unfilteredmethod
1138 def commit(self, text="", user=None, date=None, match=None, force=False,
1138 def commit(self, text="", user=None, date=None, match=None, force=False,
1139 editor=False, extra={}):
1139 editor=False, extra={}):
1140 """Add a new revision to current repository.
1140 """Add a new revision to current repository.
1141
1141
1142 Revision information is gathered from the working directory,
1142 Revision information is gathered from the working directory,
1143 match can be used to filter the committed files. If editor is
1143 match can be used to filter the committed files. If editor is
1144 supplied, it is called to get a commit message.
1144 supplied, it is called to get a commit message.
1145 """
1145 """
1146
1146
1147 def fail(f, msg):
1147 def fail(f, msg):
1148 raise util.Abort('%s: %s' % (f, msg))
1148 raise util.Abort('%s: %s' % (f, msg))
1149
1149
1150 if not match:
1150 if not match:
1151 match = matchmod.always(self.root, '')
1151 match = matchmod.always(self.root, '')
1152
1152
1153 if not force:
1153 if not force:
1154 vdirs = []
1154 vdirs = []
1155 match.explicitdir = vdirs.append
1155 match.explicitdir = vdirs.append
1156 match.bad = fail
1156 match.bad = fail
1157
1157
1158 wlock = self.wlock()
1158 wlock = self.wlock()
1159 try:
1159 try:
1160 wctx = self[None]
1160 wctx = self[None]
1161 merge = len(wctx.parents()) > 1
1161 merge = len(wctx.parents()) > 1
1162
1162
1163 if (not force and merge and match and
1163 if (not force and merge and match and
1164 (match.files() or match.anypats())):
1164 (match.files() or match.anypats())):
1165 raise util.Abort(_('cannot partially commit a merge '
1165 raise util.Abort(_('cannot partially commit a merge '
1166 '(do not specify files or patterns)'))
1166 '(do not specify files or patterns)'))
1167
1167
1168 changes = self.status(match=match, clean=force)
1168 changes = self.status(match=match, clean=force)
1169 if force:
1169 if force:
1170 changes[0].extend(changes[6]) # mq may commit unchanged files
1170 changes[0].extend(changes[6]) # mq may commit unchanged files
1171
1171
1172 # check subrepos
1172 # check subrepos
1173 subs = []
1173 subs = []
1174 commitsubs = set()
1174 commitsubs = set()
1175 newstate = wctx.substate.copy()
1175 newstate = wctx.substate.copy()
1176 # only manage subrepos and .hgsubstate if .hgsub is present
1176 # only manage subrepos and .hgsubstate if .hgsub is present
1177 if '.hgsub' in wctx:
1177 if '.hgsub' in wctx:
1178 # we'll decide whether to track this ourselves, thanks
1178 # we'll decide whether to track this ourselves, thanks
1179 if '.hgsubstate' in changes[0]:
1179 if '.hgsubstate' in changes[0]:
1180 changes[0].remove('.hgsubstate')
1180 changes[0].remove('.hgsubstate')
1181 if '.hgsubstate' in changes[2]:
1181 if '.hgsubstate' in changes[2]:
1182 changes[2].remove('.hgsubstate')
1182 changes[2].remove('.hgsubstate')
1183
1183
1184 # compare current state to last committed state
1184 # compare current state to last committed state
1185 # build new substate based on last committed state
1185 # build new substate based on last committed state
1186 oldstate = wctx.p1().substate
1186 oldstate = wctx.p1().substate
1187 for s in sorted(newstate.keys()):
1187 for s in sorted(newstate.keys()):
1188 if not match(s):
1188 if not match(s):
1189 # ignore working copy, use old state if present
1189 # ignore working copy, use old state if present
1190 if s in oldstate:
1190 if s in oldstate:
1191 newstate[s] = oldstate[s]
1191 newstate[s] = oldstate[s]
1192 continue
1192 continue
1193 if not force:
1193 if not force:
1194 raise util.Abort(
1194 raise util.Abort(
1195 _("commit with new subrepo %s excluded") % s)
1195 _("commit with new subrepo %s excluded") % s)
1196 if wctx.sub(s).dirty(True):
1196 if wctx.sub(s).dirty(True):
1197 if not self.ui.configbool('ui', 'commitsubrepos'):
1197 if not self.ui.configbool('ui', 'commitsubrepos'):
1198 raise util.Abort(
1198 raise util.Abort(
1199 _("uncommitted changes in subrepo %s") % s,
1199 _("uncommitted changes in subrepo %s") % s,
1200 hint=_("use --subrepos for recursive commit"))
1200 hint=_("use --subrepos for recursive commit"))
1201 subs.append(s)
1201 subs.append(s)
1202 commitsubs.add(s)
1202 commitsubs.add(s)
1203 else:
1203 else:
1204 bs = wctx.sub(s).basestate()
1204 bs = wctx.sub(s).basestate()
1205 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1205 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1206 if oldstate.get(s, (None, None, None))[1] != bs:
1206 if oldstate.get(s, (None, None, None))[1] != bs:
1207 subs.append(s)
1207 subs.append(s)
1208
1208
1209 # check for removed subrepos
1209 # check for removed subrepos
1210 for p in wctx.parents():
1210 for p in wctx.parents():
1211 r = [s for s in p.substate if s not in newstate]
1211 r = [s for s in p.substate if s not in newstate]
1212 subs += [s for s in r if match(s)]
1212 subs += [s for s in r if match(s)]
1213 if subs:
1213 if subs:
1214 if (not match('.hgsub') and
1214 if (not match('.hgsub') and
1215 '.hgsub' in (wctx.modified() + wctx.added())):
1215 '.hgsub' in (wctx.modified() + wctx.added())):
1216 raise util.Abort(
1216 raise util.Abort(
1217 _("can't commit subrepos without .hgsub"))
1217 _("can't commit subrepos without .hgsub"))
1218 changes[0].insert(0, '.hgsubstate')
1218 changes[0].insert(0, '.hgsubstate')
1219
1219
1220 elif '.hgsub' in changes[2]:
1220 elif '.hgsub' in changes[2]:
1221 # clean up .hgsubstate when .hgsub is removed
1221 # clean up .hgsubstate when .hgsub is removed
1222 if ('.hgsubstate' in wctx and
1222 if ('.hgsubstate' in wctx and
1223 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1223 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1224 changes[2].insert(0, '.hgsubstate')
1224 changes[2].insert(0, '.hgsubstate')
1225
1225
1226 # make sure all explicit patterns are matched
1226 # make sure all explicit patterns are matched
1227 if not force and match.files():
1227 if not force and match.files():
1228 matched = set(changes[0] + changes[1] + changes[2])
1228 matched = set(changes[0] + changes[1] + changes[2])
1229
1229
1230 for f in match.files():
1230 for f in match.files():
1231 f = self.dirstate.normalize(f)
1231 f = self.dirstate.normalize(f)
1232 if f == '.' or f in matched or f in wctx.substate:
1232 if f == '.' or f in matched or f in wctx.substate:
1233 continue
1233 continue
1234 if f in changes[3]: # missing
1234 if f in changes[3]: # missing
1235 fail(f, _('file not found!'))
1235 fail(f, _('file not found!'))
1236 if f in vdirs: # visited directory
1236 if f in vdirs: # visited directory
1237 d = f + '/'
1237 d = f + '/'
1238 for mf in matched:
1238 for mf in matched:
1239 if mf.startswith(d):
1239 if mf.startswith(d):
1240 break
1240 break
1241 else:
1241 else:
1242 fail(f, _("no match under directory!"))
1242 fail(f, _("no match under directory!"))
1243 elif f not in self.dirstate:
1243 elif f not in self.dirstate:
1244 fail(f, _("file not tracked!"))
1244 fail(f, _("file not tracked!"))
1245
1245
1246 cctx = context.workingctx(self, text, user, date, extra, changes)
1246 cctx = context.workingctx(self, text, user, date, extra, changes)
1247
1247
1248 if (not force and not extra.get("close") and not merge
1248 if (not force and not extra.get("close") and not merge
1249 and not cctx.files()
1249 and not cctx.files()
1250 and wctx.branch() == wctx.p1().branch()):
1250 and wctx.branch() == wctx.p1().branch()):
1251 return None
1251 return None
1252
1252
1253 if merge and cctx.deleted():
1253 if merge and cctx.deleted():
1254 raise util.Abort(_("cannot commit merge with missing files"))
1254 raise util.Abort(_("cannot commit merge with missing files"))
1255
1255
1256 ms = mergemod.mergestate(self)
1256 ms = mergemod.mergestate(self)
1257 for f in changes[0]:
1257 for f in changes[0]:
1258 if f in ms and ms[f] == 'u':
1258 if f in ms and ms[f] == 'u':
1259 raise util.Abort(_("unresolved merge conflicts "
1259 raise util.Abort(_("unresolved merge conflicts "
1260 "(see hg help resolve)"))
1260 "(see hg help resolve)"))
1261
1261
1262 if editor:
1262 if editor:
1263 cctx._text = editor(self, cctx, subs)
1263 cctx._text = editor(self, cctx, subs)
1264 edited = (text != cctx._text)
1264 edited = (text != cctx._text)
1265
1265
1266 # commit subs and write new state
1266 # commit subs and write new state
1267 if subs:
1267 if subs:
1268 for s in sorted(commitsubs):
1268 for s in sorted(commitsubs):
1269 sub = wctx.sub(s)
1269 sub = wctx.sub(s)
1270 self.ui.status(_('committing subrepository %s\n') %
1270 self.ui.status(_('committing subrepository %s\n') %
1271 subrepo.subrelpath(sub))
1271 subrepo.subrelpath(sub))
1272 sr = sub.commit(cctx._text, user, date)
1272 sr = sub.commit(cctx._text, user, date)
1273 newstate[s] = (newstate[s][0], sr)
1273 newstate[s] = (newstate[s][0], sr)
1274 subrepo.writestate(self, newstate)
1274 subrepo.writestate(self, newstate)
1275
1275
1276 # Save commit message in case this transaction gets rolled back
1276 # Save commit message in case this transaction gets rolled back
1277 # (e.g. by a pretxncommit hook). Leave the content alone on
1277 # (e.g. by a pretxncommit hook). Leave the content alone on
1278 # the assumption that the user will use the same editor again.
1278 # the assumption that the user will use the same editor again.
1279 msgfn = self.savecommitmessage(cctx._text)
1279 msgfn = self.savecommitmessage(cctx._text)
1280
1280
1281 p1, p2 = self.dirstate.parents()
1281 p1, p2 = self.dirstate.parents()
1282 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1282 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1283 try:
1283 try:
1284 self.hook("precommit", throw=True, parent1=hookp1,
1284 self.hook("precommit", throw=True, parent1=hookp1,
1285 parent2=hookp2)
1285 parent2=hookp2)
1286 ret = self.commitctx(cctx, True)
1286 ret = self.commitctx(cctx, True)
1287 except: # re-raises
1287 except: # re-raises
1288 if edited:
1288 if edited:
1289 self.ui.write(
1289 self.ui.write(
1290 _('note: commit message saved in %s\n') % msgfn)
1290 _('note: commit message saved in %s\n') % msgfn)
1291 raise
1291 raise
1292
1292
1293 # update bookmarks, dirstate and mergestate
1293 # update bookmarks, dirstate and mergestate
1294 bookmarks.update(self, [p1, p2], ret)
1294 bookmarks.update(self, [p1, p2], ret)
1295 cctx.markcommitted(ret)
1295 cctx.markcommitted(ret)
1296 ms.reset()
1296 ms.reset()
1297 finally:
1297 finally:
1298 wlock.release()
1298 wlock.release()
1299
1299
1300 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1300 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1301 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1301 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1302 self._afterlock(commithook)
1302 self._afterlock(commithook)
1303 return ret
1303 return ret
1304
1304
1305 @unfilteredmethod
1305 @unfilteredmethod
1306 def commitctx(self, ctx, error=False):
1306 def commitctx(self, ctx, error=False):
1307 """Add a new revision to current repository.
1307 """Add a new revision to current repository.
1308 Revision information is passed via the context argument.
1308 Revision information is passed via the context argument.
1309 """
1309 """
1310
1310
1311 tr = lock = None
1311 tr = lock = None
1312 removed = list(ctx.removed())
1312 removed = list(ctx.removed())
1313 p1, p2 = ctx.p1(), ctx.p2()
1313 p1, p2 = ctx.p1(), ctx.p2()
1314 user = ctx.user()
1314 user = ctx.user()
1315
1315
1316 lock = self.lock()
1316 lock = self.lock()
1317 try:
1317 try:
1318 tr = self.transaction("commit")
1318 tr = self.transaction("commit")
1319 trp = weakref.proxy(tr)
1319 trp = weakref.proxy(tr)
1320
1320
1321 if ctx.files():
1321 if ctx.files():
1322 m1 = p1.manifest().copy()
1322 m1 = p1.manifest().copy()
1323 m2 = p2.manifest()
1323 m2 = p2.manifest()
1324
1324
1325 # check in files
1325 # check in files
1326 new = {}
1326 new = {}
1327 changed = []
1327 changed = []
1328 linkrev = len(self)
1328 linkrev = len(self)
1329 for f in sorted(ctx.modified() + ctx.added()):
1329 for f in sorted(ctx.modified() + ctx.added()):
1330 self.ui.note(f + "\n")
1330 self.ui.note(f + "\n")
1331 try:
1331 try:
1332 fctx = ctx[f]
1332 fctx = ctx[f]
1333 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1333 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1334 changed)
1334 changed)
1335 m1.set(f, fctx.flags())
1335 m1.set(f, fctx.flags())
1336 except OSError, inst:
1336 except OSError, inst:
1337 self.ui.warn(_("trouble committing %s!\n") % f)
1337 self.ui.warn(_("trouble committing %s!\n") % f)
1338 raise
1338 raise
1339 except IOError, inst:
1339 except IOError, inst:
1340 errcode = getattr(inst, 'errno', errno.ENOENT)
1340 errcode = getattr(inst, 'errno', errno.ENOENT)
1341 if error or errcode and errcode != errno.ENOENT:
1341 if error or errcode and errcode != errno.ENOENT:
1342 self.ui.warn(_("trouble committing %s!\n") % f)
1342 self.ui.warn(_("trouble committing %s!\n") % f)
1343 raise
1343 raise
1344 else:
1344 else:
1345 removed.append(f)
1345 removed.append(f)
1346
1346
1347 # update manifest
1347 # update manifest
1348 m1.update(new)
1348 m1.update(new)
1349 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1349 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1350 drop = [f for f in removed if f in m1]
1350 drop = [f for f in removed if f in m1]
1351 for f in drop:
1351 for f in drop:
1352 del m1[f]
1352 del m1[f]
1353 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1353 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1354 p2.manifestnode(), (new, drop))
1354 p2.manifestnode(), (new, drop))
1355 files = changed + removed
1355 files = changed + removed
1356 else:
1356 else:
1357 mn = p1.manifestnode()
1357 mn = p1.manifestnode()
1358 files = []
1358 files = []
1359
1359
1360 # update changelog
1360 # update changelog
1361 self.changelog.delayupdate()
1361 self.changelog.delayupdate()
1362 n = self.changelog.add(mn, files, ctx.description(),
1362 n = self.changelog.add(mn, files, ctx.description(),
1363 trp, p1.node(), p2.node(),
1363 trp, p1.node(), p2.node(),
1364 user, ctx.date(), ctx.extra().copy())
1364 user, ctx.date(), ctx.extra().copy())
1365 p = lambda: self.changelog.writepending() and self.root or ""
1365 p = lambda: self.changelog.writepending() and self.root or ""
1366 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1366 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1367 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1367 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1368 parent2=xp2, pending=p)
1368 parent2=xp2, pending=p)
1369 self.changelog.finalize(trp)
1369 self.changelog.finalize(trp)
1370 # set the new commit is proper phase
1370 # set the new commit is proper phase
1371 targetphase = subrepo.newcommitphase(self.ui, ctx)
1371 targetphase = subrepo.newcommitphase(self.ui, ctx)
1372 if targetphase:
1372 if targetphase:
1373 # retract boundary do not alter parent changeset.
1373 # retract boundary do not alter parent changeset.
1374 # if a parent have higher the resulting phase will
1374 # if a parent have higher the resulting phase will
1375 # be compliant anyway
1375 # be compliant anyway
1376 #
1376 #
1377 # if minimal phase was 0 we don't need to retract anything
1377 # if minimal phase was 0 we don't need to retract anything
1378 phases.retractboundary(self, targetphase, [n])
1378 phases.retractboundary(self, targetphase, [n])
1379 tr.close()
1379 tr.close()
1380 branchmap.updatecache(self.filtered('served'))
1380 branchmap.updatecache(self.filtered('served'))
1381 return n
1381 return n
1382 finally:
1382 finally:
1383 if tr:
1383 if tr:
1384 tr.release()
1384 tr.release()
1385 lock.release()
1385 lock.release()
1386
1386
1387 @unfilteredmethod
1387 @unfilteredmethod
1388 def destroying(self):
1388 def destroying(self):
1389 '''Inform the repository that nodes are about to be destroyed.
1389 '''Inform the repository that nodes are about to be destroyed.
1390 Intended for use by strip and rollback, so there's a common
1390 Intended for use by strip and rollback, so there's a common
1391 place for anything that has to be done before destroying history.
1391 place for anything that has to be done before destroying history.
1392
1392
1393 This is mostly useful for saving state that is in memory and waiting
1393 This is mostly useful for saving state that is in memory and waiting
1394 to be flushed when the current lock is released. Because a call to
1394 to be flushed when the current lock is released. Because a call to
1395 destroyed is imminent, the repo will be invalidated causing those
1395 destroyed is imminent, the repo will be invalidated causing those
1396 changes to stay in memory (waiting for the next unlock), or vanish
1396 changes to stay in memory (waiting for the next unlock), or vanish
1397 completely.
1397 completely.
1398 '''
1398 '''
1399 # When using the same lock to commit and strip, the phasecache is left
1399 # When using the same lock to commit and strip, the phasecache is left
1400 # dirty after committing. Then when we strip, the repo is invalidated,
1400 # dirty after committing. Then when we strip, the repo is invalidated,
1401 # causing those changes to disappear.
1401 # causing those changes to disappear.
1402 if '_phasecache' in vars(self):
1402 if '_phasecache' in vars(self):
1403 self._phasecache.write()
1403 self._phasecache.write()
1404
1404
1405 @unfilteredmethod
1405 @unfilteredmethod
1406 def destroyed(self):
1406 def destroyed(self):
1407 '''Inform the repository that nodes have been destroyed.
1407 '''Inform the repository that nodes have been destroyed.
1408 Intended for use by strip and rollback, so there's a common
1408 Intended for use by strip and rollback, so there's a common
1409 place for anything that has to be done after destroying history.
1409 place for anything that has to be done after destroying history.
1410 '''
1410 '''
1411 # When one tries to:
1411 # When one tries to:
1412 # 1) destroy nodes thus calling this method (e.g. strip)
1412 # 1) destroy nodes thus calling this method (e.g. strip)
1413 # 2) use phasecache somewhere (e.g. commit)
1413 # 2) use phasecache somewhere (e.g. commit)
1414 #
1414 #
1415 # then 2) will fail because the phasecache contains nodes that were
1415 # then 2) will fail because the phasecache contains nodes that were
1416 # removed. We can either remove phasecache from the filecache,
1416 # removed. We can either remove phasecache from the filecache,
1417 # causing it to reload next time it is accessed, or simply filter
1417 # causing it to reload next time it is accessed, or simply filter
1418 # the removed nodes now and write the updated cache.
1418 # the removed nodes now and write the updated cache.
1419 self._phasecache.filterunknown(self)
1419 self._phasecache.filterunknown(self)
1420 self._phasecache.write()
1420 self._phasecache.write()
1421
1421
1422 # update the 'served' branch cache to help read only server process
1422 # update the 'served' branch cache to help read only server process
1423 # Thanks to branchcache collaboration this is done from the nearest
1423 # Thanks to branchcache collaboration this is done from the nearest
1424 # filtered subset and it is expected to be fast.
1424 # filtered subset and it is expected to be fast.
1425 branchmap.updatecache(self.filtered('served'))
1425 branchmap.updatecache(self.filtered('served'))
1426
1426
1427 # Ensure the persistent tag cache is updated. Doing it now
1427 # Ensure the persistent tag cache is updated. Doing it now
1428 # means that the tag cache only has to worry about destroyed
1428 # means that the tag cache only has to worry about destroyed
1429 # heads immediately after a strip/rollback. That in turn
1429 # heads immediately after a strip/rollback. That in turn
1430 # guarantees that "cachetip == currenttip" (comparing both rev
1430 # guarantees that "cachetip == currenttip" (comparing both rev
1431 # and node) always means no nodes have been added or destroyed.
1431 # and node) always means no nodes have been added or destroyed.
1432
1432
1433 # XXX this is suboptimal when qrefresh'ing: we strip the current
1433 # XXX this is suboptimal when qrefresh'ing: we strip the current
1434 # head, refresh the tag cache, then immediately add a new head.
1434 # head, refresh the tag cache, then immediately add a new head.
1435 # But I think doing it this way is necessary for the "instant
1435 # But I think doing it this way is necessary for the "instant
1436 # tag cache retrieval" case to work.
1436 # tag cache retrieval" case to work.
1437 self.invalidate()
1437 self.invalidate()
1438
1438
1439 def walk(self, match, node=None):
1439 def walk(self, match, node=None):
1440 '''
1440 '''
1441 walk recursively through the directory tree or a given
1441 walk recursively through the directory tree or a given
1442 changeset, finding all files matched by the match
1442 changeset, finding all files matched by the match
1443 function
1443 function
1444 '''
1444 '''
1445 return self[node].walk(match)
1445 return self[node].walk(match)
1446
1446
1447 def status(self, node1='.', node2=None, match=None,
1447 def status(self, node1='.', node2=None, match=None,
1448 ignored=False, clean=False, unknown=False,
1448 ignored=False, clean=False, unknown=False,
1449 listsubrepos=False):
1449 listsubrepos=False):
1450 """return status of files between two nodes or node and working
1450 """return status of files between two nodes or node and working
1451 directory.
1451 directory.
1452
1452
1453 If node1 is None, use the first dirstate parent instead.
1453 If node1 is None, use the first dirstate parent instead.
1454 If node2 is None, compare node1 with working directory.
1454 If node2 is None, compare node1 with working directory.
1455 """
1455 """
1456
1456
1457 def mfmatches(ctx):
1457 def mfmatches(ctx):
1458 mf = ctx.manifest().copy()
1458 mf = ctx.manifest().copy()
1459 if match.always():
1459 if match.always():
1460 return mf
1460 return mf
1461 for fn in mf.keys():
1461 for fn in mf.keys():
1462 if not match(fn):
1462 if not match(fn):
1463 del mf[fn]
1463 del mf[fn]
1464 return mf
1464 return mf
1465
1465
1466 ctx1 = self[node1]
1466 ctx1 = self[node1]
1467 ctx2 = self[node2]
1467 ctx2 = self[node2]
1468
1468
1469 working = ctx2.rev() is None
1469 working = ctx2.rev() is None
1470 parentworking = working and ctx1 == self['.']
1470 parentworking = working and ctx1 == self['.']
1471 match = match or matchmod.always(self.root, self.getcwd())
1471 match = match or matchmod.always(self.root, self.getcwd())
1472 listignored, listclean, listunknown = ignored, clean, unknown
1472 listignored, listclean, listunknown = ignored, clean, unknown
1473
1473
1474 # load earliest manifest first for caching reasons
1474 # load earliest manifest first for caching reasons
1475 if not working and ctx2.rev() < ctx1.rev():
1475 if not working and ctx2.rev() < ctx1.rev():
1476 ctx2.manifest()
1476 ctx2.manifest()
1477
1477
1478 if not parentworking:
1478 if not parentworking:
1479 def bad(f, msg):
1479 def bad(f, msg):
1480 # 'f' may be a directory pattern from 'match.files()',
1480 # 'f' may be a directory pattern from 'match.files()',
1481 # so 'f not in ctx1' is not enough
1481 # so 'f not in ctx1' is not enough
1482 if f not in ctx1 and f not in ctx1.dirs():
1482 if f not in ctx1 and f not in ctx1.dirs():
1483 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1483 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1484 match.bad = bad
1484 match.bad = bad
1485
1485
1486 if working: # we need to scan the working dir
1486 if working: # we need to scan the working dir
1487 subrepos = []
1487 subrepos = []
1488 if '.hgsub' in self.dirstate:
1488 if '.hgsub' in self.dirstate:
1489 subrepos = sorted(ctx2.substate)
1489 subrepos = sorted(ctx2.substate)
1490 s = self.dirstate.status(match, subrepos, listignored,
1490 s = self.dirstate.status(match, subrepos, listignored,
1491 listclean, listunknown)
1491 listclean, listunknown)
1492 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1492 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1493
1493
1494 # check for any possibly clean files
1494 # check for any possibly clean files
1495 if parentworking and cmp:
1495 if parentworking and cmp:
1496 fixup = []
1496 fixup = []
1497 # do a full compare of any files that might have changed
1497 # do a full compare of any files that might have changed
1498 for f in sorted(cmp):
1498 for f in sorted(cmp):
1499 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1499 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1500 or ctx1[f].cmp(ctx2[f])):
1500 or ctx1[f].cmp(ctx2[f])):
1501 modified.append(f)
1501 modified.append(f)
1502 else:
1502 else:
1503 fixup.append(f)
1503 fixup.append(f)
1504
1504
1505 # update dirstate for files that are actually clean
1505 # update dirstate for files that are actually clean
1506 if fixup:
1506 if fixup:
1507 if listclean:
1507 if listclean:
1508 clean += fixup
1508 clean += fixup
1509
1509
1510 try:
1510 try:
1511 # updating the dirstate is optional
1511 # updating the dirstate is optional
1512 # so we don't wait on the lock
1512 # so we don't wait on the lock
1513 wlock = self.wlock(False)
1513 wlock = self.wlock(False)
1514 try:
1514 try:
1515 for f in fixup:
1515 for f in fixup:
1516 self.dirstate.normal(f)
1516 self.dirstate.normal(f)
1517 finally:
1517 finally:
1518 wlock.release()
1518 wlock.release()
1519 except error.LockError:
1519 except error.LockError:
1520 pass
1520 pass
1521
1521
1522 if not parentworking:
1522 if not parentworking:
1523 mf1 = mfmatches(ctx1)
1523 mf1 = mfmatches(ctx1)
1524 if working:
1524 if working:
1525 # we are comparing working dir against non-parent
1525 # we are comparing working dir against non-parent
1526 # generate a pseudo-manifest for the working dir
1526 # generate a pseudo-manifest for the working dir
1527 mf2 = mfmatches(self['.'])
1527 mf2 = mfmatches(self['.'])
1528 for f in cmp + modified + added:
1528 for f in cmp + modified + added:
1529 mf2[f] = None
1529 mf2[f] = None
1530 mf2.set(f, ctx2.flags(f))
1530 mf2.set(f, ctx2.flags(f))
1531 for f in removed:
1531 for f in removed:
1532 if f in mf2:
1532 if f in mf2:
1533 del mf2[f]
1533 del mf2[f]
1534 else:
1534 else:
1535 # we are comparing two revisions
1535 # we are comparing two revisions
1536 deleted, unknown, ignored = [], [], []
1536 deleted, unknown, ignored = [], [], []
1537 mf2 = mfmatches(ctx2)
1537 mf2 = mfmatches(ctx2)
1538
1538
1539 modified, added, clean = [], [], []
1539 modified, added, clean = [], [], []
1540 withflags = mf1.withflags() | mf2.withflags()
1540 withflags = mf1.withflags() | mf2.withflags()
1541 for fn, mf2node in mf2.iteritems():
1541 for fn, mf2node in mf2.iteritems():
1542 if fn in mf1:
1542 if fn in mf1:
1543 if (fn not in deleted and
1543 if (fn not in deleted and
1544 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1544 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1545 (mf1[fn] != mf2node and
1545 (mf1[fn] != mf2node and
1546 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1546 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1547 modified.append(fn)
1547 modified.append(fn)
1548 elif listclean:
1548 elif listclean:
1549 clean.append(fn)
1549 clean.append(fn)
1550 del mf1[fn]
1550 del mf1[fn]
1551 elif fn not in deleted:
1551 elif fn not in deleted:
1552 added.append(fn)
1552 added.append(fn)
1553 removed = mf1.keys()
1553 removed = mf1.keys()
1554
1554
1555 if working and modified and not self.dirstate._checklink:
1555 if working and modified and not self.dirstate._checklink:
1556 # Symlink placeholders may get non-symlink-like contents
1556 # Symlink placeholders may get non-symlink-like contents
1557 # via user error or dereferencing by NFS or Samba servers,
1557 # via user error or dereferencing by NFS or Samba servers,
1558 # so we filter out any placeholders that don't look like a
1558 # so we filter out any placeholders that don't look like a
1559 # symlink
1559 # symlink
1560 sane = []
1560 sane = []
1561 for f in modified:
1561 for f in modified:
1562 if ctx2.flags(f) == 'l':
1562 if ctx2.flags(f) == 'l':
1563 d = ctx2[f].data()
1563 d = ctx2[f].data()
1564 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1564 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1565 self.ui.debug('ignoring suspect symlink placeholder'
1565 self.ui.debug('ignoring suspect symlink placeholder'
1566 ' "%s"\n' % f)
1566 ' "%s"\n' % f)
1567 continue
1567 continue
1568 sane.append(f)
1568 sane.append(f)
1569 modified = sane
1569 modified = sane
1570
1570
1571 r = modified, added, removed, deleted, unknown, ignored, clean
1571 r = modified, added, removed, deleted, unknown, ignored, clean
1572
1572
1573 if listsubrepos:
1573 if listsubrepos:
1574 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1574 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1575 if working:
1575 if working:
1576 rev2 = None
1576 rev2 = None
1577 else:
1577 else:
1578 rev2 = ctx2.substate[subpath][1]
1578 rev2 = ctx2.substate[subpath][1]
1579 try:
1579 try:
1580 submatch = matchmod.narrowmatcher(subpath, match)
1580 submatch = matchmod.narrowmatcher(subpath, match)
1581 s = sub.status(rev2, match=submatch, ignored=listignored,
1581 s = sub.status(rev2, match=submatch, ignored=listignored,
1582 clean=listclean, unknown=listunknown,
1582 clean=listclean, unknown=listunknown,
1583 listsubrepos=True)
1583 listsubrepos=True)
1584 for rfiles, sfiles in zip(r, s):
1584 for rfiles, sfiles in zip(r, s):
1585 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1585 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1586 except error.LookupError:
1586 except error.LookupError:
1587 self.ui.status(_("skipping missing subrepository: %s\n")
1587 self.ui.status(_("skipping missing subrepository: %s\n")
1588 % subpath)
1588 % subpath)
1589
1589
1590 for l in r:
1590 for l in r:
1591 l.sort()
1591 l.sort()
1592 return r
1592 return r
1593
1593
1594 def heads(self, start=None):
1594 def heads(self, start=None):
1595 heads = self.changelog.heads(start)
1595 heads = self.changelog.heads(start)
1596 # sort the output in rev descending order
1596 # sort the output in rev descending order
1597 return sorted(heads, key=self.changelog.rev, reverse=True)
1597 return sorted(heads, key=self.changelog.rev, reverse=True)
1598
1598
1599 def branchheads(self, branch=None, start=None, closed=False):
1599 def branchheads(self, branch=None, start=None, closed=False):
1600 '''return a (possibly filtered) list of heads for the given branch
1600 '''return a (possibly filtered) list of heads for the given branch
1601
1601
1602 Heads are returned in topological order, from newest to oldest.
1602 Heads are returned in topological order, from newest to oldest.
1603 If branch is None, use the dirstate branch.
1603 If branch is None, use the dirstate branch.
1604 If start is not None, return only heads reachable from start.
1604 If start is not None, return only heads reachable from start.
1605 If closed is True, return heads that are marked as closed as well.
1605 If closed is True, return heads that are marked as closed as well.
1606 '''
1606 '''
1607 if branch is None:
1607 if branch is None:
1608 branch = self[None].branch()
1608 branch = self[None].branch()
1609 branches = self.branchmap()
1609 branches = self.branchmap()
1610 if branch not in branches:
1610 if branch not in branches:
1611 return []
1611 return []
1612 # the cache returns heads ordered lowest to highest
1612 # the cache returns heads ordered lowest to highest
1613 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1613 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1614 if start is not None:
1614 if start is not None:
1615 # filter out the heads that cannot be reached from startrev
1615 # filter out the heads that cannot be reached from startrev
1616 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1616 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1617 bheads = [h for h in bheads if h in fbheads]
1617 bheads = [h for h in bheads if h in fbheads]
1618 return bheads
1618 return bheads
1619
1619
1620 def branches(self, nodes):
1620 def branches(self, nodes):
1621 if not nodes:
1621 if not nodes:
1622 nodes = [self.changelog.tip()]
1622 nodes = [self.changelog.tip()]
1623 b = []
1623 b = []
1624 for n in nodes:
1624 for n in nodes:
1625 t = n
1625 t = n
1626 while True:
1626 while True:
1627 p = self.changelog.parents(n)
1627 p = self.changelog.parents(n)
1628 if p[1] != nullid or p[0] == nullid:
1628 if p[1] != nullid or p[0] == nullid:
1629 b.append((t, n, p[0], p[1]))
1629 b.append((t, n, p[0], p[1]))
1630 break
1630 break
1631 n = p[0]
1631 n = p[0]
1632 return b
1632 return b
1633
1633
1634 def between(self, pairs):
1634 def between(self, pairs):
1635 r = []
1635 r = []
1636
1636
1637 for top, bottom in pairs:
1637 for top, bottom in pairs:
1638 n, l, i = top, [], 0
1638 n, l, i = top, [], 0
1639 f = 1
1639 f = 1
1640
1640
1641 while n != bottom and n != nullid:
1641 while n != bottom and n != nullid:
1642 p = self.changelog.parents(n)[0]
1642 p = self.changelog.parents(n)[0]
1643 if i == f:
1643 if i == f:
1644 l.append(n)
1644 l.append(n)
1645 f = f * 2
1645 f = f * 2
1646 n = p
1646 n = p
1647 i += 1
1647 i += 1
1648
1648
1649 r.append(l)
1649 r.append(l)
1650
1650
1651 return r
1651 return r
1652
1652
1653 def pull(self, remote, heads=None, force=False):
1653 def pull(self, remote, heads=None, force=False):
1654 if remote.local():
1654 if remote.local():
1655 missing = set(remote.requirements) - self.supported
1655 missing = set(remote.requirements) - self.supported
1656 if missing:
1656 if missing:
1657 msg = _("required features are not"
1657 msg = _("required features are not"
1658 " supported in the destination:"
1658 " supported in the destination:"
1659 " %s") % (', '.join(sorted(missing)))
1659 " %s") % (', '.join(sorted(missing)))
1660 raise util.Abort(msg)
1660 raise util.Abort(msg)
1661
1661
1662 # don't open transaction for nothing or you break future useful
1662 # don't open transaction for nothing or you break future useful
1663 # rollback call
1663 # rollback call
1664 tr = None
1664 tr = None
1665 trname = 'pull\n' + util.hidepassword(remote.url())
1665 trname = 'pull\n' + util.hidepassword(remote.url())
1666 lock = self.lock()
1666 lock = self.lock()
1667 try:
1667 try:
1668 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1668 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1669 force=force)
1669 force=force)
1670 common, fetch, rheads = tmp
1670 common, fetch, rheads = tmp
1671 if not fetch:
1671 if not fetch:
1672 self.ui.status(_("no changes found\n"))
1672 self.ui.status(_("no changes found\n"))
1673 added = []
1673 added = []
1674 result = 0
1674 result = 0
1675 else:
1675 else:
1676 tr = self.transaction(trname)
1676 tr = self.transaction(trname)
1677 if heads is None and list(common) == [nullid]:
1677 if heads is None and list(common) == [nullid]:
1678 self.ui.status(_("requesting all changes\n"))
1678 self.ui.status(_("requesting all changes\n"))
1679 elif heads is None and remote.capable('changegroupsubset'):
1679 elif heads is None and remote.capable('changegroupsubset'):
1680 # issue1320, avoid a race if remote changed after discovery
1680 # issue1320, avoid a race if remote changed after discovery
1681 heads = rheads
1681 heads = rheads
1682
1682
1683 if remote.capable('getbundle'):
1683 if remote.capable('getbundle'):
1684 # TODO: get bundlecaps from remote
1684 # TODO: get bundlecaps from remote
1685 cg = remote.getbundle('pull', common=common,
1685 cg = remote.getbundle('pull', common=common,
1686 heads=heads or rheads)
1686 heads=heads or rheads)
1687 elif heads is None:
1687 elif heads is None:
1688 cg = remote.changegroup(fetch, 'pull')
1688 cg = remote.changegroup(fetch, 'pull')
1689 elif not remote.capable('changegroupsubset'):
1689 elif not remote.capable('changegroupsubset'):
1690 raise util.Abort(_("partial pull cannot be done because "
1690 raise util.Abort(_("partial pull cannot be done because "
1691 "other repository doesn't support "
1691 "other repository doesn't support "
1692 "changegroupsubset."))
1692 "changegroupsubset."))
1693 else:
1693 else:
1694 cg = remote.changegroupsubset(fetch, heads, 'pull')
1694 cg = remote.changegroupsubset(fetch, heads, 'pull')
1695 # we use unfiltered changelog here because hidden revision must
1695 # we use unfiltered changelog here because hidden revision must
1696 # be taken in account for phase synchronization. They may
1696 # be taken in account for phase synchronization. They may
1697 # becomes public and becomes visible again.
1697 # becomes public and becomes visible again.
1698 cl = self.unfiltered().changelog
1698 cl = self.unfiltered().changelog
1699 clstart = len(cl)
1699 clstart = len(cl)
1700 result = self.addchangegroup(cg, 'pull', remote.url())
1700 result = self.addchangegroup(cg, 'pull', remote.url())
1701 clend = len(cl)
1701 clend = len(cl)
1702 added = [cl.node(r) for r in xrange(clstart, clend)]
1702 added = [cl.node(r) for r in xrange(clstart, clend)]
1703
1703
1704 # compute target subset
1704 # compute target subset
1705 if heads is None:
1705 if heads is None:
1706 # We pulled every thing possible
1706 # We pulled every thing possible
1707 # sync on everything common
1707 # sync on everything common
1708 subset = common + added
1708 subset = common + added
1709 else:
1709 else:
1710 # We pulled a specific subset
1710 # We pulled a specific subset
1711 # sync on this subset
1711 # sync on this subset
1712 subset = heads
1712 subset = heads
1713
1713
1714 # Get remote phases data from remote
1714 # Get remote phases data from remote
1715 remotephases = remote.listkeys('phases')
1715 remotephases = remote.listkeys('phases')
1716 publishing = bool(remotephases.get('publishing', False))
1716 publishing = bool(remotephases.get('publishing', False))
1717 if remotephases and not publishing:
1717 if remotephases and not publishing:
1718 # remote is new and unpublishing
1718 # remote is new and unpublishing
1719 pheads, _dr = phases.analyzeremotephases(self, subset,
1719 pheads, _dr = phases.analyzeremotephases(self, subset,
1720 remotephases)
1720 remotephases)
1721 phases.advanceboundary(self, phases.public, pheads)
1721 phases.advanceboundary(self, phases.public, pheads)
1722 phases.advanceboundary(self, phases.draft, subset)
1722 phases.advanceboundary(self, phases.draft, subset)
1723 else:
1723 else:
1724 # Remote is old or publishing all common changesets
1724 # Remote is old or publishing all common changesets
1725 # should be seen as public
1725 # should be seen as public
1726 phases.advanceboundary(self, phases.public, subset)
1726 phases.advanceboundary(self, phases.public, subset)
1727
1727
1728 def gettransaction():
1728 def gettransaction():
1729 if tr is None:
1729 if tr is None:
1730 return self.transaction(trname)
1730 return self.transaction(trname)
1731 return tr
1731 return tr
1732
1732
1733 obstr = obsolete.syncpull(self, remote, gettransaction)
1733 obstr = obsolete.syncpull(self, remote, gettransaction)
1734 if obstr is not None:
1734 if obstr is not None:
1735 tr = obstr
1735 tr = obstr
1736
1736
1737 if tr is not None:
1737 if tr is not None:
1738 tr.close()
1738 tr.close()
1739 finally:
1739 finally:
1740 if tr is not None:
1740 if tr is not None:
1741 tr.release()
1741 tr.release()
1742 lock.release()
1742 lock.release()
1743
1743
1744 return result
1744 return result
1745
1745
1746 def checkpush(self, force, revs):
1746 def checkpush(self, force, revs):
1747 """Extensions can override this function if additional checks have
1747 """Extensions can override this function if additional checks have
1748 to be performed before pushing, or call it if they override push
1748 to be performed before pushing, or call it if they override push
1749 command.
1749 command.
1750 """
1750 """
1751 pass
1751 pass
1752
1752
1753 def push(self, remote, force=False, revs=None, newbranch=False):
1753 def push(self, remote, force=False, revs=None, newbranch=False):
1754 '''Push outgoing changesets (limited by revs) from the current
1754 '''Push outgoing changesets (limited by revs) from the current
1755 repository to remote. Return an integer:
1755 repository to remote. Return an integer:
1756 - None means nothing to push
1756 - None means nothing to push
1757 - 0 means HTTP error
1757 - 0 means HTTP error
1758 - 1 means we pushed and remote head count is unchanged *or*
1758 - 1 means we pushed and remote head count is unchanged *or*
1759 we have outgoing changesets but refused to push
1759 we have outgoing changesets but refused to push
1760 - other values as described by addchangegroup()
1760 - other values as described by addchangegroup()
1761 '''
1761 '''
1762 if remote.local():
1762 if remote.local():
1763 missing = set(self.requirements) - remote.local().supported
1763 missing = set(self.requirements) - remote.local().supported
1764 if missing:
1764 if missing:
1765 msg = _("required features are not"
1765 msg = _("required features are not"
1766 " supported in the destination:"
1766 " supported in the destination:"
1767 " %s") % (', '.join(sorted(missing)))
1767 " %s") % (', '.join(sorted(missing)))
1768 raise util.Abort(msg)
1768 raise util.Abort(msg)
1769
1769
1770 # there are two ways to push to remote repo:
1770 # there are two ways to push to remote repo:
1771 #
1771 #
1772 # addchangegroup assumes local user can lock remote
1772 # addchangegroup assumes local user can lock remote
1773 # repo (local filesystem, old ssh servers).
1773 # repo (local filesystem, old ssh servers).
1774 #
1774 #
1775 # unbundle assumes local user cannot lock remote repo (new ssh
1775 # unbundle assumes local user cannot lock remote repo (new ssh
1776 # servers, http servers).
1776 # servers, http servers).
1777
1777
1778 if not remote.canpush():
1778 if not remote.canpush():
1779 raise util.Abort(_("destination does not support push"))
1779 raise util.Abort(_("destination does not support push"))
1780 unfi = self.unfiltered()
1780 unfi = self.unfiltered()
1781 def localphasemove(nodes, phase=phases.public):
1781 def localphasemove(nodes, phase=phases.public):
1782 """move <nodes> to <phase> in the local source repo"""
1782 """move <nodes> to <phase> in the local source repo"""
1783 if locallock is not None:
1783 if locallock is not None:
1784 phases.advanceboundary(self, phase, nodes)
1784 phases.advanceboundary(self, phase, nodes)
1785 else:
1785 else:
1786 # repo is not locked, do not change any phases!
1786 # repo is not locked, do not change any phases!
1787 # Informs the user that phases should have been moved when
1787 # Informs the user that phases should have been moved when
1788 # applicable.
1788 # applicable.
1789 actualmoves = [n for n in nodes if phase < self[n].phase()]
1789 actualmoves = [n for n in nodes if phase < self[n].phase()]
1790 phasestr = phases.phasenames[phase]
1790 phasestr = phases.phasenames[phase]
1791 if actualmoves:
1791 if actualmoves:
1792 self.ui.status(_('cannot lock source repo, skipping local'
1792 self.ui.status(_('cannot lock source repo, skipping local'
1793 ' %s phase update\n') % phasestr)
1793 ' %s phase update\n') % phasestr)
1794 # get local lock as we might write phase data
1794 # get local lock as we might write phase data
1795 locallock = None
1795 locallock = None
1796 try:
1796 try:
1797 locallock = self.lock()
1797 locallock = self.lock()
1798 except IOError, err:
1798 except IOError, err:
1799 if err.errno != errno.EACCES:
1799 if err.errno != errno.EACCES:
1800 raise
1800 raise
1801 # source repo cannot be locked.
1801 # source repo cannot be locked.
1802 # We do not abort the push, but just disable the local phase
1802 # We do not abort the push, but just disable the local phase
1803 # synchronisation.
1803 # synchronisation.
1804 msg = 'cannot lock source repository: %s\n' % err
1804 msg = 'cannot lock source repository: %s\n' % err
1805 self.ui.debug(msg)
1805 self.ui.debug(msg)
1806 try:
1806 try:
1807 self.checkpush(force, revs)
1807 self.checkpush(force, revs)
1808 lock = None
1808 lock = None
1809 unbundle = remote.capable('unbundle')
1809 unbundle = remote.capable('unbundle')
1810 if not unbundle:
1810 if not unbundle:
1811 lock = remote.lock()
1811 lock = remote.lock()
1812 try:
1812 try:
1813 # discovery
1813 # discovery
1814 fci = discovery.findcommonincoming
1814 fci = discovery.findcommonincoming
1815 commoninc = fci(unfi, remote, force=force)
1815 commoninc = fci(unfi, remote, force=force)
1816 common, inc, remoteheads = commoninc
1816 common, inc, remoteheads = commoninc
1817 fco = discovery.findcommonoutgoing
1817 fco = discovery.findcommonoutgoing
1818 outgoing = fco(unfi, remote, onlyheads=revs,
1818 outgoing = fco(unfi, remote, onlyheads=revs,
1819 commoninc=commoninc, force=force)
1819 commoninc=commoninc, force=force)
1820
1820
1821
1821
1822 if not outgoing.missing:
1822 if not outgoing.missing:
1823 # nothing to push
1823 # nothing to push
1824 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1824 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1825 ret = None
1825 ret = None
1826 else:
1826 else:
1827 # something to push
1827 # something to push
1828 if not force:
1828 if not force:
1829 # if self.obsstore == False --> no obsolete
1829 # if self.obsstore == False --> no obsolete
1830 # then, save the iteration
1830 # then, save the iteration
1831 if unfi.obsstore:
1831 if unfi.obsstore:
1832 # this message are here for 80 char limit reason
1832 # this message are here for 80 char limit reason
1833 mso = _("push includes obsolete changeset: %s!")
1833 mso = _("push includes obsolete changeset: %s!")
1834 mst = "push includes %s changeset: %s!"
1834 mst = "push includes %s changeset: %s!"
1835 # plain versions for i18n tool to detect them
1835 # plain versions for i18n tool to detect them
1836 _("push includes unstable changeset: %s!")
1836 _("push includes unstable changeset: %s!")
1837 _("push includes bumped changeset: %s!")
1837 _("push includes bumped changeset: %s!")
1838 _("push includes divergent changeset: %s!")
1838 _("push includes divergent changeset: %s!")
1839 # If we are to push if there is at least one
1839 # If we are to push if there is at least one
1840 # obsolete or unstable changeset in missing, at
1840 # obsolete or unstable changeset in missing, at
1841 # least one of the missinghead will be obsolete or
1841 # least one of the missinghead will be obsolete or
1842 # unstable. So checking heads only is ok
1842 # unstable. So checking heads only is ok
1843 for node in outgoing.missingheads:
1843 for node in outgoing.missingheads:
1844 ctx = unfi[node]
1844 ctx = unfi[node]
1845 if ctx.obsolete():
1845 if ctx.obsolete():
1846 raise util.Abort(mso % ctx)
1846 raise util.Abort(mso % ctx)
1847 elif ctx.troubled():
1847 elif ctx.troubled():
1848 raise util.Abort(_(mst)
1848 raise util.Abort(_(mst)
1849 % (ctx.troubles()[0],
1849 % (ctx.troubles()[0],
1850 ctx))
1850 ctx))
1851 newbm = self.ui.configlist('bookmarks', 'pushing')
1851 newbm = self.ui.configlist('bookmarks', 'pushing')
1852 discovery.checkheads(unfi, remote, outgoing,
1852 discovery.checkheads(unfi, remote, outgoing,
1853 remoteheads, newbranch,
1853 remoteheads, newbranch,
1854 bool(inc), newbm)
1854 bool(inc), newbm)
1855
1855
1856 # TODO: get bundlecaps from remote
1856 # TODO: get bundlecaps from remote
1857 bundlecaps = None
1857 bundlecaps = None
1858 # create a changegroup from local
1858 # create a changegroup from local
1859 if revs is None and not outgoing.excluded:
1859 if revs is None and not outgoing.excluded:
1860 # push everything,
1860 # push everything,
1861 # use the fast path, no race possible on push
1861 # use the fast path, no race possible on push
1862 bundler = changegroup.bundle10(self, bundlecaps)
1862 bundler = changegroup.bundle10(self, bundlecaps)
1863 cg = self._changegroupsubset(outgoing,
1863 cg = self._changegroupsubset(outgoing,
1864 bundler,
1864 bundler,
1865 'push',
1865 'push',
1866 fastpath=True)
1866 fastpath=True)
1867 else:
1867 else:
1868 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1868 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1869
1869
1870 # apply changegroup to remote
1870 # apply changegroup to remote
1871 if unbundle:
1871 if unbundle:
1872 # local repo finds heads on server, finds out what
1872 # local repo finds heads on server, finds out what
1873 # revs it must push. once revs transferred, if server
1873 # revs it must push. once revs transferred, if server
1874 # finds it has different heads (someone else won
1874 # finds it has different heads (someone else won
1875 # commit/push race), server aborts.
1875 # commit/push race), server aborts.
1876 if force:
1876 if force:
1877 remoteheads = ['force']
1877 remoteheads = ['force']
1878 # ssh: return remote's addchangegroup()
1878 # ssh: return remote's addchangegroup()
1879 # http: return remote's addchangegroup() or 0 for error
1879 # http: return remote's addchangegroup() or 0 for error
1880 ret = remote.unbundle(cg, remoteheads, 'push')
1880 ret = remote.unbundle(cg, remoteheads, 'push')
1881 else:
1881 else:
1882 # we return an integer indicating remote head count
1882 # we return an integer indicating remote head count
1883 # change
1883 # change
1884 ret = remote.addchangegroup(cg, 'push', self.url())
1884 ret = remote.addchangegroup(cg, 'push', self.url())
1885
1885
1886 if ret:
1886 if ret:
1887 # push succeed, synchronize target of the push
1887 # push succeed, synchronize target of the push
1888 cheads = outgoing.missingheads
1888 cheads = outgoing.missingheads
1889 elif revs is None:
1889 elif revs is None:
1890 # All out push fails. synchronize all common
1890 # All out push fails. synchronize all common
1891 cheads = outgoing.commonheads
1891 cheads = outgoing.commonheads
1892 else:
1892 else:
1893 # I want cheads = heads(::missingheads and ::commonheads)
1893 # I want cheads = heads(::missingheads and ::commonheads)
1894 # (missingheads is revs with secret changeset filtered out)
1894 # (missingheads is revs with secret changeset filtered out)
1895 #
1895 #
1896 # This can be expressed as:
1896 # This can be expressed as:
1897 # cheads = ( (missingheads and ::commonheads)
1897 # cheads = ( (missingheads and ::commonheads)
1898 # + (commonheads and ::missingheads))"
1898 # + (commonheads and ::missingheads))"
1899 # )
1899 # )
1900 #
1900 #
1901 # while trying to push we already computed the following:
1901 # while trying to push we already computed the following:
1902 # common = (::commonheads)
1902 # common = (::commonheads)
1903 # missing = ((commonheads::missingheads) - commonheads)
1903 # missing = ((commonheads::missingheads) - commonheads)
1904 #
1904 #
1905 # We can pick:
1905 # We can pick:
1906 # * missingheads part of common (::commonheads)
1906 # * missingheads part of common (::commonheads)
1907 common = set(outgoing.common)
1907 common = set(outgoing.common)
1908 cheads = [node for node in revs if node in common]
1908 cheads = [node for node in revs if node in common]
1909 # and
1909 # and
1910 # * commonheads parents on missing
1910 # * commonheads parents on missing
1911 revset = unfi.set('%ln and parents(roots(%ln))',
1911 revset = unfi.set('%ln and parents(roots(%ln))',
1912 outgoing.commonheads,
1912 outgoing.commonheads,
1913 outgoing.missing)
1913 outgoing.missing)
1914 cheads.extend(c.node() for c in revset)
1914 cheads.extend(c.node() for c in revset)
1915 # even when we don't push, exchanging phase data is useful
1915 # even when we don't push, exchanging phase data is useful
1916 remotephases = remote.listkeys('phases')
1916 remotephases = remote.listkeys('phases')
1917 if (self.ui.configbool('ui', '_usedassubrepo', False)
1917 if (self.ui.configbool('ui', '_usedassubrepo', False)
1918 and remotephases # server supports phases
1918 and remotephases # server supports phases
1919 and ret is None # nothing was pushed
1919 and ret is None # nothing was pushed
1920 and remotephases.get('publishing', False)):
1920 and remotephases.get('publishing', False)):
1921 # When:
1921 # When:
1922 # - this is a subrepo push
1922 # - this is a subrepo push
1923 # - and remote support phase
1923 # - and remote support phase
1924 # - and no changeset was pushed
1924 # - and no changeset was pushed
1925 # - and remote is publishing
1925 # - and remote is publishing
1926 # We may be in issue 3871 case!
1926 # We may be in issue 3871 case!
1927 # We drop the possible phase synchronisation done by
1927 # We drop the possible phase synchronisation done by
1928 # courtesy to publish changesets possibly locally draft
1928 # courtesy to publish changesets possibly locally draft
1929 # on the remote.
1929 # on the remote.
1930 remotephases = {'publishing': 'True'}
1930 remotephases = {'publishing': 'True'}
1931 if not remotephases: # old server or public only repo
1931 if not remotephases: # old server or public only repo
1932 localphasemove(cheads)
1932 localphasemove(cheads)
1933 # don't push any phase data as there is nothing to push
1933 # don't push any phase data as there is nothing to push
1934 else:
1934 else:
1935 ana = phases.analyzeremotephases(self, cheads, remotephases)
1935 ana = phases.analyzeremotephases(self, cheads, remotephases)
1936 pheads, droots = ana
1936 pheads, droots = ana
1937 ### Apply remote phase on local
1937 ### Apply remote phase on local
1938 if remotephases.get('publishing', False):
1938 if remotephases.get('publishing', False):
1939 localphasemove(cheads)
1939 localphasemove(cheads)
1940 else: # publish = False
1940 else: # publish = False
1941 localphasemove(pheads)
1941 localphasemove(pheads)
1942 localphasemove(cheads, phases.draft)
1942 localphasemove(cheads, phases.draft)
1943 ### Apply local phase on remote
1943 ### Apply local phase on remote
1944
1944
1945 # Get the list of all revs draft on remote by public here.
1945 # Get the list of all revs draft on remote by public here.
1946 # XXX Beware that revset break if droots is not strictly
1946 # XXX Beware that revset break if droots is not strictly
1947 # XXX root we may want to ensure it is but it is costly
1947 # XXX root we may want to ensure it is but it is costly
1948 outdated = unfi.set('heads((%ln::%ln) and public())',
1948 outdated = unfi.set('heads((%ln::%ln) and public())',
1949 droots, cheads)
1949 droots, cheads)
1950 for newremotehead in outdated:
1950 for newremotehead in outdated:
1951 r = remote.pushkey('phases',
1951 r = remote.pushkey('phases',
1952 newremotehead.hex(),
1952 newremotehead.hex(),
1953 str(phases.draft),
1953 str(phases.draft),
1954 str(phases.public))
1954 str(phases.public))
1955 if not r:
1955 if not r:
1956 self.ui.warn(_('updating %s to public failed!\n')
1956 self.ui.warn(_('updating %s to public failed!\n')
1957 % newremotehead)
1957 % newremotehead)
1958 self.ui.debug('try to push obsolete markers to remote\n')
1958 self.ui.debug('try to push obsolete markers to remote\n')
1959 obsolete.syncpush(self, remote)
1959 obsolete.syncpush(self, remote)
1960 finally:
1960 finally:
1961 if lock is not None:
1961 if lock is not None:
1962 lock.release()
1962 lock.release()
1963 finally:
1963 finally:
1964 if locallock is not None:
1964 if locallock is not None:
1965 locallock.release()
1965 locallock.release()
1966
1966
1967 bookmarks.updateremote(self.ui, unfi, remote, revs)
1967 bookmarks.updateremote(self.ui, unfi, remote, revs)
1968 return ret
1968 return ret
1969
1969
1970 def changegroupinfo(self, nodes, source):
1970 def changegroupinfo(self, nodes, source):
1971 if self.ui.verbose or source == 'bundle':
1971 if self.ui.verbose or source == 'bundle':
1972 self.ui.status(_("%d changesets found\n") % len(nodes))
1972 self.ui.status(_("%d changesets found\n") % len(nodes))
1973 if self.ui.debugflag:
1973 if self.ui.debugflag:
1974 self.ui.debug("list of changesets:\n")
1974 self.ui.debug("list of changesets:\n")
1975 for node in nodes:
1975 for node in nodes:
1976 self.ui.debug("%s\n" % hex(node))
1976 self.ui.debug("%s\n" % hex(node))
1977
1977
1978 def changegroupsubset(self, bases, heads, source):
1978 def changegroupsubset(self, bases, heads, source):
1979 """Compute a changegroup consisting of all the nodes that are
1979 """Compute a changegroup consisting of all the nodes that are
1980 descendants of any of the bases and ancestors of any of the heads.
1980 descendants of any of the bases and ancestors of any of the heads.
1981 Return a chunkbuffer object whose read() method will return
1981 Return a chunkbuffer object whose read() method will return
1982 successive changegroup chunks.
1982 successive changegroup chunks.
1983
1983
1984 It is fairly complex as determining which filenodes and which
1984 It is fairly complex as determining which filenodes and which
1985 manifest nodes need to be included for the changeset to be complete
1985 manifest nodes need to be included for the changeset to be complete
1986 is non-trivial.
1986 is non-trivial.
1987
1987
1988 Another wrinkle is doing the reverse, figuring out which changeset in
1988 Another wrinkle is doing the reverse, figuring out which changeset in
1989 the changegroup a particular filenode or manifestnode belongs to.
1989 the changegroup a particular filenode or manifestnode belongs to.
1990 """
1990 """
1991 cl = self.changelog
1991 cl = self.changelog
1992 if not bases:
1992 if not bases:
1993 bases = [nullid]
1993 bases = [nullid]
1994 # TODO: remove call to nodesbetween.
1994 # TODO: remove call to nodesbetween.
1995 csets, bases, heads = cl.nodesbetween(bases, heads)
1995 csets, bases, heads = cl.nodesbetween(bases, heads)
1996 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
1996 discbases = []
1997 outgoing = discovery.outgoing(cl, bases, heads)
1997 for n in bases:
1998 discbases.extend([p for p in cl.parents(n) if p != nullid])
1999 outgoing = discovery.outgoing(cl, discbases, heads)
1998 bundler = changegroup.bundle10(self)
2000 bundler = changegroup.bundle10(self)
1999 return self._changegroupsubset(outgoing, bundler, source)
2001 return self._changegroupsubset(outgoing, bundler, source)
2000
2002
2001 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2003 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2002 """Like getbundle, but taking a discovery.outgoing as an argument.
2004 """Like getbundle, but taking a discovery.outgoing as an argument.
2003
2005
2004 This is only implemented for local repos and reuses potentially
2006 This is only implemented for local repos and reuses potentially
2005 precomputed sets in outgoing."""
2007 precomputed sets in outgoing."""
2006 if not outgoing.missing:
2008 if not outgoing.missing:
2007 return None
2009 return None
2008 bundler = changegroup.bundle10(self, bundlecaps)
2010 bundler = changegroup.bundle10(self, bundlecaps)
2009 return self._changegroupsubset(outgoing, bundler, source)
2011 return self._changegroupsubset(outgoing, bundler, source)
2010
2012
2011 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2013 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2012 """Like changegroupsubset, but returns the set difference between the
2014 """Like changegroupsubset, but returns the set difference between the
2013 ancestors of heads and the ancestors common.
2015 ancestors of heads and the ancestors common.
2014
2016
2015 If heads is None, use the local heads. If common is None, use [nullid].
2017 If heads is None, use the local heads. If common is None, use [nullid].
2016
2018
2017 The nodes in common might not all be known locally due to the way the
2019 The nodes in common might not all be known locally due to the way the
2018 current discovery protocol works.
2020 current discovery protocol works.
2019 """
2021 """
2020 cl = self.changelog
2022 cl = self.changelog
2021 if common:
2023 if common:
2022 hasnode = cl.hasnode
2024 hasnode = cl.hasnode
2023 common = [n for n in common if hasnode(n)]
2025 common = [n for n in common if hasnode(n)]
2024 else:
2026 else:
2025 common = [nullid]
2027 common = [nullid]
2026 if not heads:
2028 if not heads:
2027 heads = cl.heads()
2029 heads = cl.heads()
2028 return self.getlocalbundle(source,
2030 return self.getlocalbundle(source,
2029 discovery.outgoing(cl, common, heads),
2031 discovery.outgoing(cl, common, heads),
2030 bundlecaps=bundlecaps)
2032 bundlecaps=bundlecaps)
2031
2033
2032 @unfilteredmethod
2034 @unfilteredmethod
2033 def _changegroupsubset(self, outgoing, bundler, source,
2035 def _changegroupsubset(self, outgoing, bundler, source,
2034 fastpath=False):
2036 fastpath=False):
2035 commonrevs = outgoing.common
2037 commonrevs = outgoing.common
2036 csets = outgoing.missing
2038 csets = outgoing.missing
2037 heads = outgoing.missingheads
2039 heads = outgoing.missingheads
2038 # We go through the fast path if we get told to, or if all (unfiltered
2040 # We go through the fast path if we get told to, or if all (unfiltered
2039 # heads have been requested (since we then know there all linkrevs will
2041 # heads have been requested (since we then know there all linkrevs will
2040 # be pulled by the client).
2042 # be pulled by the client).
2041 heads.sort()
2043 heads.sort()
2042 fastpathlinkrev = fastpath or (
2044 fastpathlinkrev = fastpath or (
2043 self.filtername is None and heads == sorted(self.heads()))
2045 self.filtername is None and heads == sorted(self.heads()))
2044
2046
2045 self.hook('preoutgoing', throw=True, source=source)
2047 self.hook('preoutgoing', throw=True, source=source)
2046 self.changegroupinfo(csets, source)
2048 self.changegroupinfo(csets, source)
2047 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2049 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2048 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2050 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2049
2051
2050 def changegroup(self, basenodes, source):
2052 def changegroup(self, basenodes, source):
2051 # to avoid a race we use changegroupsubset() (issue1320)
2053 # to avoid a race we use changegroupsubset() (issue1320)
2052 return self.changegroupsubset(basenodes, self.heads(), source)
2054 return self.changegroupsubset(basenodes, self.heads(), source)
2053
2055
2054 @unfilteredmethod
2056 @unfilteredmethod
2055 def addchangegroup(self, source, srctype, url, emptyok=False):
2057 def addchangegroup(self, source, srctype, url, emptyok=False):
2056 """Add the changegroup returned by source.read() to this repo.
2058 """Add the changegroup returned by source.read() to this repo.
2057 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2059 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2058 the URL of the repo where this changegroup is coming from.
2060 the URL of the repo where this changegroup is coming from.
2059
2061
2060 Return an integer summarizing the change to this repo:
2062 Return an integer summarizing the change to this repo:
2061 - nothing changed or no source: 0
2063 - nothing changed or no source: 0
2062 - more heads than before: 1+added heads (2..n)
2064 - more heads than before: 1+added heads (2..n)
2063 - fewer heads than before: -1-removed heads (-2..-n)
2065 - fewer heads than before: -1-removed heads (-2..-n)
2064 - number of heads stays the same: 1
2066 - number of heads stays the same: 1
2065 """
2067 """
2066 def csmap(x):
2068 def csmap(x):
2067 self.ui.debug("add changeset %s\n" % short(x))
2069 self.ui.debug("add changeset %s\n" % short(x))
2068 return len(cl)
2070 return len(cl)
2069
2071
2070 def revmap(x):
2072 def revmap(x):
2071 return cl.rev(x)
2073 return cl.rev(x)
2072
2074
2073 if not source:
2075 if not source:
2074 return 0
2076 return 0
2075
2077
2076 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2078 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2077
2079
2078 changesets = files = revisions = 0
2080 changesets = files = revisions = 0
2079 efiles = set()
2081 efiles = set()
2080
2082
2081 # write changelog data to temp files so concurrent readers will not see
2083 # write changelog data to temp files so concurrent readers will not see
2082 # inconsistent view
2084 # inconsistent view
2083 cl = self.changelog
2085 cl = self.changelog
2084 cl.delayupdate()
2086 cl.delayupdate()
2085 oldheads = cl.heads()
2087 oldheads = cl.heads()
2086
2088
2087 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2089 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2088 try:
2090 try:
2089 trp = weakref.proxy(tr)
2091 trp = weakref.proxy(tr)
2090 # pull off the changeset group
2092 # pull off the changeset group
2091 self.ui.status(_("adding changesets\n"))
2093 self.ui.status(_("adding changesets\n"))
2092 clstart = len(cl)
2094 clstart = len(cl)
2093 class prog(object):
2095 class prog(object):
2094 step = _('changesets')
2096 step = _('changesets')
2095 count = 1
2097 count = 1
2096 ui = self.ui
2098 ui = self.ui
2097 total = None
2099 total = None
2098 def __call__(self):
2100 def __call__(self):
2099 self.ui.progress(self.step, self.count, unit=_('chunks'),
2101 self.ui.progress(self.step, self.count, unit=_('chunks'),
2100 total=self.total)
2102 total=self.total)
2101 self.count += 1
2103 self.count += 1
2102 pr = prog()
2104 pr = prog()
2103 source.callback = pr
2105 source.callback = pr
2104
2106
2105 source.changelogheader()
2107 source.changelogheader()
2106 srccontent = cl.addgroup(source, csmap, trp)
2108 srccontent = cl.addgroup(source, csmap, trp)
2107 if not (srccontent or emptyok):
2109 if not (srccontent or emptyok):
2108 raise util.Abort(_("received changelog group is empty"))
2110 raise util.Abort(_("received changelog group is empty"))
2109 clend = len(cl)
2111 clend = len(cl)
2110 changesets = clend - clstart
2112 changesets = clend - clstart
2111 for c in xrange(clstart, clend):
2113 for c in xrange(clstart, clend):
2112 efiles.update(self[c].files())
2114 efiles.update(self[c].files())
2113 efiles = len(efiles)
2115 efiles = len(efiles)
2114 self.ui.progress(_('changesets'), None)
2116 self.ui.progress(_('changesets'), None)
2115
2117
2116 # pull off the manifest group
2118 # pull off the manifest group
2117 self.ui.status(_("adding manifests\n"))
2119 self.ui.status(_("adding manifests\n"))
2118 pr.step = _('manifests')
2120 pr.step = _('manifests')
2119 pr.count = 1
2121 pr.count = 1
2120 pr.total = changesets # manifests <= changesets
2122 pr.total = changesets # manifests <= changesets
2121 # no need to check for empty manifest group here:
2123 # no need to check for empty manifest group here:
2122 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2124 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2123 # no new manifest will be created and the manifest group will
2125 # no new manifest will be created and the manifest group will
2124 # be empty during the pull
2126 # be empty during the pull
2125 source.manifestheader()
2127 source.manifestheader()
2126 self.manifest.addgroup(source, revmap, trp)
2128 self.manifest.addgroup(source, revmap, trp)
2127 self.ui.progress(_('manifests'), None)
2129 self.ui.progress(_('manifests'), None)
2128
2130
2129 needfiles = {}
2131 needfiles = {}
2130 if self.ui.configbool('server', 'validate', default=False):
2132 if self.ui.configbool('server', 'validate', default=False):
2131 # validate incoming csets have their manifests
2133 # validate incoming csets have their manifests
2132 for cset in xrange(clstart, clend):
2134 for cset in xrange(clstart, clend):
2133 mfest = self.changelog.read(self.changelog.node(cset))[0]
2135 mfest = self.changelog.read(self.changelog.node(cset))[0]
2134 mfest = self.manifest.readdelta(mfest)
2136 mfest = self.manifest.readdelta(mfest)
2135 # store file nodes we must see
2137 # store file nodes we must see
2136 for f, n in mfest.iteritems():
2138 for f, n in mfest.iteritems():
2137 needfiles.setdefault(f, set()).add(n)
2139 needfiles.setdefault(f, set()).add(n)
2138
2140
2139 # process the files
2141 # process the files
2140 self.ui.status(_("adding file changes\n"))
2142 self.ui.status(_("adding file changes\n"))
2141 pr.step = _('files')
2143 pr.step = _('files')
2142 pr.count = 1
2144 pr.count = 1
2143 pr.total = efiles
2145 pr.total = efiles
2144 source.callback = None
2146 source.callback = None
2145
2147
2146 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2148 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2147 pr, needfiles)
2149 pr, needfiles)
2148 revisions += newrevs
2150 revisions += newrevs
2149 files += newfiles
2151 files += newfiles
2150
2152
2151 dh = 0
2153 dh = 0
2152 if oldheads:
2154 if oldheads:
2153 heads = cl.heads()
2155 heads = cl.heads()
2154 dh = len(heads) - len(oldheads)
2156 dh = len(heads) - len(oldheads)
2155 for h in heads:
2157 for h in heads:
2156 if h not in oldheads and self[h].closesbranch():
2158 if h not in oldheads and self[h].closesbranch():
2157 dh -= 1
2159 dh -= 1
2158 htext = ""
2160 htext = ""
2159 if dh:
2161 if dh:
2160 htext = _(" (%+d heads)") % dh
2162 htext = _(" (%+d heads)") % dh
2161
2163
2162 self.ui.status(_("added %d changesets"
2164 self.ui.status(_("added %d changesets"
2163 " with %d changes to %d files%s\n")
2165 " with %d changes to %d files%s\n")
2164 % (changesets, revisions, files, htext))
2166 % (changesets, revisions, files, htext))
2165 self.invalidatevolatilesets()
2167 self.invalidatevolatilesets()
2166
2168
2167 if changesets > 0:
2169 if changesets > 0:
2168 p = lambda: cl.writepending() and self.root or ""
2170 p = lambda: cl.writepending() and self.root or ""
2169 self.hook('pretxnchangegroup', throw=True,
2171 self.hook('pretxnchangegroup', throw=True,
2170 node=hex(cl.node(clstart)), source=srctype,
2172 node=hex(cl.node(clstart)), source=srctype,
2171 url=url, pending=p)
2173 url=url, pending=p)
2172
2174
2173 added = [cl.node(r) for r in xrange(clstart, clend)]
2175 added = [cl.node(r) for r in xrange(clstart, clend)]
2174 publishing = self.ui.configbool('phases', 'publish', True)
2176 publishing = self.ui.configbool('phases', 'publish', True)
2175 if srctype == 'push':
2177 if srctype == 'push':
2176 # Old server can not push the boundary themself.
2178 # Old server can not push the boundary themself.
2177 # New server won't push the boundary if changeset already
2179 # New server won't push the boundary if changeset already
2178 # existed locally as secrete
2180 # existed locally as secrete
2179 #
2181 #
2180 # We should not use added here but the list of all change in
2182 # We should not use added here but the list of all change in
2181 # the bundle
2183 # the bundle
2182 if publishing:
2184 if publishing:
2183 phases.advanceboundary(self, phases.public, srccontent)
2185 phases.advanceboundary(self, phases.public, srccontent)
2184 else:
2186 else:
2185 phases.advanceboundary(self, phases.draft, srccontent)
2187 phases.advanceboundary(self, phases.draft, srccontent)
2186 phases.retractboundary(self, phases.draft, added)
2188 phases.retractboundary(self, phases.draft, added)
2187 elif srctype != 'strip':
2189 elif srctype != 'strip':
2188 # publishing only alter behavior during push
2190 # publishing only alter behavior during push
2189 #
2191 #
2190 # strip should not touch boundary at all
2192 # strip should not touch boundary at all
2191 phases.retractboundary(self, phases.draft, added)
2193 phases.retractboundary(self, phases.draft, added)
2192
2194
2193 # make changelog see real files again
2195 # make changelog see real files again
2194 cl.finalize(trp)
2196 cl.finalize(trp)
2195
2197
2196 tr.close()
2198 tr.close()
2197
2199
2198 if changesets > 0:
2200 if changesets > 0:
2199 if srctype != 'strip':
2201 if srctype != 'strip':
2200 # During strip, branchcache is invalid but coming call to
2202 # During strip, branchcache is invalid but coming call to
2201 # `destroyed` will repair it.
2203 # `destroyed` will repair it.
2202 # In other case we can safely update cache on disk.
2204 # In other case we can safely update cache on disk.
2203 branchmap.updatecache(self.filtered('served'))
2205 branchmap.updatecache(self.filtered('served'))
2204 def runhooks():
2206 def runhooks():
2205 # These hooks run when the lock releases, not when the
2207 # These hooks run when the lock releases, not when the
2206 # transaction closes. So it's possible for the changelog
2208 # transaction closes. So it's possible for the changelog
2207 # to have changed since we last saw it.
2209 # to have changed since we last saw it.
2208 if clstart >= len(self):
2210 if clstart >= len(self):
2209 return
2211 return
2210
2212
2211 # forcefully update the on-disk branch cache
2213 # forcefully update the on-disk branch cache
2212 self.ui.debug("updating the branch cache\n")
2214 self.ui.debug("updating the branch cache\n")
2213 self.hook("changegroup", node=hex(cl.node(clstart)),
2215 self.hook("changegroup", node=hex(cl.node(clstart)),
2214 source=srctype, url=url)
2216 source=srctype, url=url)
2215
2217
2216 for n in added:
2218 for n in added:
2217 self.hook("incoming", node=hex(n), source=srctype,
2219 self.hook("incoming", node=hex(n), source=srctype,
2218 url=url)
2220 url=url)
2219
2221
2220 newheads = [h for h in self.heads() if h not in oldheads]
2222 newheads = [h for h in self.heads() if h not in oldheads]
2221 self.ui.log("incoming",
2223 self.ui.log("incoming",
2222 "%s incoming changes - new heads: %s\n",
2224 "%s incoming changes - new heads: %s\n",
2223 len(added),
2225 len(added),
2224 ', '.join([hex(c[:6]) for c in newheads]))
2226 ', '.join([hex(c[:6]) for c in newheads]))
2225 self._afterlock(runhooks)
2227 self._afterlock(runhooks)
2226
2228
2227 finally:
2229 finally:
2228 tr.release()
2230 tr.release()
2229 # never return 0 here:
2231 # never return 0 here:
2230 if dh < 0:
2232 if dh < 0:
2231 return dh - 1
2233 return dh - 1
2232 else:
2234 else:
2233 return dh + 1
2235 return dh + 1
2234
2236
2235 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2237 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2236 revisions = 0
2238 revisions = 0
2237 files = 0
2239 files = 0
2238 while True:
2240 while True:
2239 chunkdata = source.filelogheader()
2241 chunkdata = source.filelogheader()
2240 if not chunkdata:
2242 if not chunkdata:
2241 break
2243 break
2242 f = chunkdata["filename"]
2244 f = chunkdata["filename"]
2243 self.ui.debug("adding %s revisions\n" % f)
2245 self.ui.debug("adding %s revisions\n" % f)
2244 pr()
2246 pr()
2245 fl = self.file(f)
2247 fl = self.file(f)
2246 o = len(fl)
2248 o = len(fl)
2247 if not fl.addgroup(source, revmap, trp):
2249 if not fl.addgroup(source, revmap, trp):
2248 raise util.Abort(_("received file revlog group is empty"))
2250 raise util.Abort(_("received file revlog group is empty"))
2249 revisions += len(fl) - o
2251 revisions += len(fl) - o
2250 files += 1
2252 files += 1
2251 if f in needfiles:
2253 if f in needfiles:
2252 needs = needfiles[f]
2254 needs = needfiles[f]
2253 for new in xrange(o, len(fl)):
2255 for new in xrange(o, len(fl)):
2254 n = fl.node(new)
2256 n = fl.node(new)
2255 if n in needs:
2257 if n in needs:
2256 needs.remove(n)
2258 needs.remove(n)
2257 else:
2259 else:
2258 raise util.Abort(
2260 raise util.Abort(
2259 _("received spurious file revlog entry"))
2261 _("received spurious file revlog entry"))
2260 if not needs:
2262 if not needs:
2261 del needfiles[f]
2263 del needfiles[f]
2262 self.ui.progress(_('files'), None)
2264 self.ui.progress(_('files'), None)
2263
2265
2264 for f, needs in needfiles.iteritems():
2266 for f, needs in needfiles.iteritems():
2265 fl = self.file(f)
2267 fl = self.file(f)
2266 for n in needs:
2268 for n in needs:
2267 try:
2269 try:
2268 fl.rev(n)
2270 fl.rev(n)
2269 except error.LookupError:
2271 except error.LookupError:
2270 raise util.Abort(
2272 raise util.Abort(
2271 _('missing file data for %s:%s - run hg verify') %
2273 _('missing file data for %s:%s - run hg verify') %
2272 (f, hex(n)))
2274 (f, hex(n)))
2273
2275
2274 return revisions, files
2276 return revisions, files
2275
2277
2276 def stream_in(self, remote, requirements):
2278 def stream_in(self, remote, requirements):
2277 lock = self.lock()
2279 lock = self.lock()
2278 try:
2280 try:
2279 # Save remote branchmap. We will use it later
2281 # Save remote branchmap. We will use it later
2280 # to speed up branchcache creation
2282 # to speed up branchcache creation
2281 rbranchmap = None
2283 rbranchmap = None
2282 if remote.capable("branchmap"):
2284 if remote.capable("branchmap"):
2283 rbranchmap = remote.branchmap()
2285 rbranchmap = remote.branchmap()
2284
2286
2285 fp = remote.stream_out()
2287 fp = remote.stream_out()
2286 l = fp.readline()
2288 l = fp.readline()
2287 try:
2289 try:
2288 resp = int(l)
2290 resp = int(l)
2289 except ValueError:
2291 except ValueError:
2290 raise error.ResponseError(
2292 raise error.ResponseError(
2291 _('unexpected response from remote server:'), l)
2293 _('unexpected response from remote server:'), l)
2292 if resp == 1:
2294 if resp == 1:
2293 raise util.Abort(_('operation forbidden by server'))
2295 raise util.Abort(_('operation forbidden by server'))
2294 elif resp == 2:
2296 elif resp == 2:
2295 raise util.Abort(_('locking the remote repository failed'))
2297 raise util.Abort(_('locking the remote repository failed'))
2296 elif resp != 0:
2298 elif resp != 0:
2297 raise util.Abort(_('the server sent an unknown error code'))
2299 raise util.Abort(_('the server sent an unknown error code'))
2298 self.ui.status(_('streaming all changes\n'))
2300 self.ui.status(_('streaming all changes\n'))
2299 l = fp.readline()
2301 l = fp.readline()
2300 try:
2302 try:
2301 total_files, total_bytes = map(int, l.split(' ', 1))
2303 total_files, total_bytes = map(int, l.split(' ', 1))
2302 except (ValueError, TypeError):
2304 except (ValueError, TypeError):
2303 raise error.ResponseError(
2305 raise error.ResponseError(
2304 _('unexpected response from remote server:'), l)
2306 _('unexpected response from remote server:'), l)
2305 self.ui.status(_('%d files to transfer, %s of data\n') %
2307 self.ui.status(_('%d files to transfer, %s of data\n') %
2306 (total_files, util.bytecount(total_bytes)))
2308 (total_files, util.bytecount(total_bytes)))
2307 handled_bytes = 0
2309 handled_bytes = 0
2308 self.ui.progress(_('clone'), 0, total=total_bytes)
2310 self.ui.progress(_('clone'), 0, total=total_bytes)
2309 start = time.time()
2311 start = time.time()
2310 for i in xrange(total_files):
2312 for i in xrange(total_files):
2311 # XXX doesn't support '\n' or '\r' in filenames
2313 # XXX doesn't support '\n' or '\r' in filenames
2312 l = fp.readline()
2314 l = fp.readline()
2313 try:
2315 try:
2314 name, size = l.split('\0', 1)
2316 name, size = l.split('\0', 1)
2315 size = int(size)
2317 size = int(size)
2316 except (ValueError, TypeError):
2318 except (ValueError, TypeError):
2317 raise error.ResponseError(
2319 raise error.ResponseError(
2318 _('unexpected response from remote server:'), l)
2320 _('unexpected response from remote server:'), l)
2319 if self.ui.debugflag:
2321 if self.ui.debugflag:
2320 self.ui.debug('adding %s (%s)\n' %
2322 self.ui.debug('adding %s (%s)\n' %
2321 (name, util.bytecount(size)))
2323 (name, util.bytecount(size)))
2322 # for backwards compat, name was partially encoded
2324 # for backwards compat, name was partially encoded
2323 ofp = self.sopener(store.decodedir(name), 'w')
2325 ofp = self.sopener(store.decodedir(name), 'w')
2324 for chunk in util.filechunkiter(fp, limit=size):
2326 for chunk in util.filechunkiter(fp, limit=size):
2325 handled_bytes += len(chunk)
2327 handled_bytes += len(chunk)
2326 self.ui.progress(_('clone'), handled_bytes,
2328 self.ui.progress(_('clone'), handled_bytes,
2327 total=total_bytes)
2329 total=total_bytes)
2328 ofp.write(chunk)
2330 ofp.write(chunk)
2329 ofp.close()
2331 ofp.close()
2330 elapsed = time.time() - start
2332 elapsed = time.time() - start
2331 if elapsed <= 0:
2333 if elapsed <= 0:
2332 elapsed = 0.001
2334 elapsed = 0.001
2333 self.ui.progress(_('clone'), None)
2335 self.ui.progress(_('clone'), None)
2334 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2336 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2335 (util.bytecount(total_bytes), elapsed,
2337 (util.bytecount(total_bytes), elapsed,
2336 util.bytecount(total_bytes / elapsed)))
2338 util.bytecount(total_bytes / elapsed)))
2337
2339
2338 # new requirements = old non-format requirements +
2340 # new requirements = old non-format requirements +
2339 # new format-related
2341 # new format-related
2340 # requirements from the streamed-in repository
2342 # requirements from the streamed-in repository
2341 requirements.update(set(self.requirements) - self.supportedformats)
2343 requirements.update(set(self.requirements) - self.supportedformats)
2342 self._applyrequirements(requirements)
2344 self._applyrequirements(requirements)
2343 self._writerequirements()
2345 self._writerequirements()
2344
2346
2345 if rbranchmap:
2347 if rbranchmap:
2346 rbheads = []
2348 rbheads = []
2347 for bheads in rbranchmap.itervalues():
2349 for bheads in rbranchmap.itervalues():
2348 rbheads.extend(bheads)
2350 rbheads.extend(bheads)
2349
2351
2350 if rbheads:
2352 if rbheads:
2351 rtiprev = max((int(self.changelog.rev(node))
2353 rtiprev = max((int(self.changelog.rev(node))
2352 for node in rbheads))
2354 for node in rbheads))
2353 cache = branchmap.branchcache(rbranchmap,
2355 cache = branchmap.branchcache(rbranchmap,
2354 self[rtiprev].node(),
2356 self[rtiprev].node(),
2355 rtiprev)
2357 rtiprev)
2356 # Try to stick it as low as possible
2358 # Try to stick it as low as possible
2357 # filter above served are unlikely to be fetch from a clone
2359 # filter above served are unlikely to be fetch from a clone
2358 for candidate in ('base', 'immutable', 'served'):
2360 for candidate in ('base', 'immutable', 'served'):
2359 rview = self.filtered(candidate)
2361 rview = self.filtered(candidate)
2360 if cache.validfor(rview):
2362 if cache.validfor(rview):
2361 self._branchcaches[candidate] = cache
2363 self._branchcaches[candidate] = cache
2362 cache.write(rview)
2364 cache.write(rview)
2363 break
2365 break
2364 self.invalidate()
2366 self.invalidate()
2365 return len(self.heads()) + 1
2367 return len(self.heads()) + 1
2366 finally:
2368 finally:
2367 lock.release()
2369 lock.release()
2368
2370
2369 def clone(self, remote, heads=[], stream=False):
2371 def clone(self, remote, heads=[], stream=False):
2370 '''clone remote repository.
2372 '''clone remote repository.
2371
2373
2372 keyword arguments:
2374 keyword arguments:
2373 heads: list of revs to clone (forces use of pull)
2375 heads: list of revs to clone (forces use of pull)
2374 stream: use streaming clone if possible'''
2376 stream: use streaming clone if possible'''
2375
2377
2376 # now, all clients that can request uncompressed clones can
2378 # now, all clients that can request uncompressed clones can
2377 # read repo formats supported by all servers that can serve
2379 # read repo formats supported by all servers that can serve
2378 # them.
2380 # them.
2379
2381
2380 # if revlog format changes, client will have to check version
2382 # if revlog format changes, client will have to check version
2381 # and format flags on "stream" capability, and use
2383 # and format flags on "stream" capability, and use
2382 # uncompressed only if compatible.
2384 # uncompressed only if compatible.
2383
2385
2384 if not stream:
2386 if not stream:
2385 # if the server explicitly prefers to stream (for fast LANs)
2387 # if the server explicitly prefers to stream (for fast LANs)
2386 stream = remote.capable('stream-preferred')
2388 stream = remote.capable('stream-preferred')
2387
2389
2388 if stream and not heads:
2390 if stream and not heads:
2389 # 'stream' means remote revlog format is revlogv1 only
2391 # 'stream' means remote revlog format is revlogv1 only
2390 if remote.capable('stream'):
2392 if remote.capable('stream'):
2391 return self.stream_in(remote, set(('revlogv1',)))
2393 return self.stream_in(remote, set(('revlogv1',)))
2392 # otherwise, 'streamreqs' contains the remote revlog format
2394 # otherwise, 'streamreqs' contains the remote revlog format
2393 streamreqs = remote.capable('streamreqs')
2395 streamreqs = remote.capable('streamreqs')
2394 if streamreqs:
2396 if streamreqs:
2395 streamreqs = set(streamreqs.split(','))
2397 streamreqs = set(streamreqs.split(','))
2396 # if we support it, stream in and adjust our requirements
2398 # if we support it, stream in and adjust our requirements
2397 if not streamreqs - self.supportedformats:
2399 if not streamreqs - self.supportedformats:
2398 return self.stream_in(remote, streamreqs)
2400 return self.stream_in(remote, streamreqs)
2399 return self.pull(remote, heads)
2401 return self.pull(remote, heads)
2400
2402
2401 def pushkey(self, namespace, key, old, new):
2403 def pushkey(self, namespace, key, old, new):
2402 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2404 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2403 old=old, new=new)
2405 old=old, new=new)
2404 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2406 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2405 ret = pushkey.push(self, namespace, key, old, new)
2407 ret = pushkey.push(self, namespace, key, old, new)
2406 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2408 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2407 ret=ret)
2409 ret=ret)
2408 return ret
2410 return ret
2409
2411
2410 def listkeys(self, namespace):
2412 def listkeys(self, namespace):
2411 self.hook('prelistkeys', throw=True, namespace=namespace)
2413 self.hook('prelistkeys', throw=True, namespace=namespace)
2412 self.ui.debug('listing keys for "%s"\n' % namespace)
2414 self.ui.debug('listing keys for "%s"\n' % namespace)
2413 values = pushkey.list(self, namespace)
2415 values = pushkey.list(self, namespace)
2414 self.hook('listkeys', namespace=namespace, values=values)
2416 self.hook('listkeys', namespace=namespace, values=values)
2415 return values
2417 return values
2416
2418
2417 def debugwireargs(self, one, two, three=None, four=None, five=None):
2419 def debugwireargs(self, one, two, three=None, four=None, five=None):
2418 '''used to test argument passing over the wire'''
2420 '''used to test argument passing over the wire'''
2419 return "%s %s %s %s %s" % (one, two, three, four, five)
2421 return "%s %s %s %s %s" % (one, two, three, four, five)
2420
2422
2421 def savecommitmessage(self, text):
2423 def savecommitmessage(self, text):
2422 fp = self.opener('last-message.txt', 'wb')
2424 fp = self.opener('last-message.txt', 'wb')
2423 try:
2425 try:
2424 fp.write(text)
2426 fp.write(text)
2425 finally:
2427 finally:
2426 fp.close()
2428 fp.close()
2427 return self.pathto(fp.name[len(self.root) + 1:])
2429 return self.pathto(fp.name[len(self.root) + 1:])
2428
2430
2429 # used to avoid circular references so destructors work
2431 # used to avoid circular references so destructors work
2430 def aftertrans(files):
2432 def aftertrans(files):
2431 renamefiles = [tuple(t) for t in files]
2433 renamefiles = [tuple(t) for t in files]
2432 def a():
2434 def a():
2433 for vfs, src, dest in renamefiles:
2435 for vfs, src, dest in renamefiles:
2434 try:
2436 try:
2435 vfs.rename(src, dest)
2437 vfs.rename(src, dest)
2436 except OSError: # journal file does not yet exist
2438 except OSError: # journal file does not yet exist
2437 pass
2439 pass
2438 return a
2440 return a
2439
2441
2440 def undoname(fn):
2442 def undoname(fn):
2441 base, name = os.path.split(fn)
2443 base, name = os.path.split(fn)
2442 assert name.startswith('journal')
2444 assert name.startswith('journal')
2443 return os.path.join(base, name.replace('journal', 'undo', 1))
2445 return os.path.join(base, name.replace('journal', 'undo', 1))
2444
2446
2445 def instance(ui, path, create):
2447 def instance(ui, path, create):
2446 return localrepository(ui, util.urllocalpath(path), create)
2448 return localrepository(ui, util.urllocalpath(path), create)
2447
2449
2448 def islocal(path):
2450 def islocal(path):
2449 return True
2451 return True
General Comments 0
You need to be logged in to leave comments. Login now