##// END OF EJS Templates
repoview: have unfilteredpropertycache using the underlying cache...
Pierre-Yves David -
r19846:97896709 stable
parent child Browse files
Show More
@@ -1,2448 +1,2451 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding
11 import lock, transaction, store, encoding
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 import branchmap
18 import branchmap
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class repofilecache(filecache):
22 class repofilecache(filecache):
23 """All filecache usage on repo are done for logic that should be unfiltered
23 """All filecache usage on repo are done for logic that should be unfiltered
24 """
24 """
25
25
26 def __get__(self, repo, type=None):
26 def __get__(self, repo, type=None):
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 def __set__(self, repo, value):
28 def __set__(self, repo, value):
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 def __delete__(self, repo):
30 def __delete__(self, repo):
31 return super(repofilecache, self).__delete__(repo.unfiltered())
31 return super(repofilecache, self).__delete__(repo.unfiltered())
32
32
33 class storecache(repofilecache):
33 class storecache(repofilecache):
34 """filecache for files in the store"""
34 """filecache for files in the store"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj.sjoin(fname)
36 return obj.sjoin(fname)
37
37
38 class unfilteredpropertycache(propertycache):
38 class unfilteredpropertycache(propertycache):
39 """propertycache that apply to unfiltered repo only"""
39 """propertycache that apply to unfiltered repo only"""
40
40
41 def __get__(self, repo, type=None):
41 def __get__(self, repo, type=None):
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 unfi = repo.unfiltered()
43 if unfi is repo:
44 return super(unfilteredpropertycache, self).__get__(unfi)
45 return getattr(unfi, self.name)
43
46
44 class filteredpropertycache(propertycache):
47 class filteredpropertycache(propertycache):
45 """propertycache that must take filtering in account"""
48 """propertycache that must take filtering in account"""
46
49
47 def cachevalue(self, obj, value):
50 def cachevalue(self, obj, value):
48 object.__setattr__(obj, self.name, value)
51 object.__setattr__(obj, self.name, value)
49
52
50
53
51 def hasunfilteredcache(repo, name):
54 def hasunfilteredcache(repo, name):
52 """check if a repo has an unfilteredpropertycache value for <name>"""
55 """check if a repo has an unfilteredpropertycache value for <name>"""
53 return name in vars(repo.unfiltered())
56 return name in vars(repo.unfiltered())
54
57
55 def unfilteredmethod(orig):
58 def unfilteredmethod(orig):
56 """decorate method that always need to be run on unfiltered version"""
59 """decorate method that always need to be run on unfiltered version"""
57 def wrapper(repo, *args, **kwargs):
60 def wrapper(repo, *args, **kwargs):
58 return orig(repo.unfiltered(), *args, **kwargs)
61 return orig(repo.unfiltered(), *args, **kwargs)
59 return wrapper
62 return wrapper
60
63
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
64 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
65 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63
66
64 class localpeer(peer.peerrepository):
67 class localpeer(peer.peerrepository):
65 '''peer for a local repo; reflects only the most recent API'''
68 '''peer for a local repo; reflects only the most recent API'''
66
69
67 def __init__(self, repo, caps=MODERNCAPS):
70 def __init__(self, repo, caps=MODERNCAPS):
68 peer.peerrepository.__init__(self)
71 peer.peerrepository.__init__(self)
69 self._repo = repo.filtered('served')
72 self._repo = repo.filtered('served')
70 self.ui = repo.ui
73 self.ui = repo.ui
71 self._caps = repo._restrictcapabilities(caps)
74 self._caps = repo._restrictcapabilities(caps)
72 self.requirements = repo.requirements
75 self.requirements = repo.requirements
73 self.supportedformats = repo.supportedformats
76 self.supportedformats = repo.supportedformats
74
77
75 def close(self):
78 def close(self):
76 self._repo.close()
79 self._repo.close()
77
80
78 def _capabilities(self):
81 def _capabilities(self):
79 return self._caps
82 return self._caps
80
83
81 def local(self):
84 def local(self):
82 return self._repo
85 return self._repo
83
86
84 def canpush(self):
87 def canpush(self):
85 return True
88 return True
86
89
87 def url(self):
90 def url(self):
88 return self._repo.url()
91 return self._repo.url()
89
92
90 def lookup(self, key):
93 def lookup(self, key):
91 return self._repo.lookup(key)
94 return self._repo.lookup(key)
92
95
93 def branchmap(self):
96 def branchmap(self):
94 return self._repo.branchmap()
97 return self._repo.branchmap()
95
98
96 def heads(self):
99 def heads(self):
97 return self._repo.heads()
100 return self._repo.heads()
98
101
99 def known(self, nodes):
102 def known(self, nodes):
100 return self._repo.known(nodes)
103 return self._repo.known(nodes)
101
104
102 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
105 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
103 return self._repo.getbundle(source, heads=heads, common=common,
106 return self._repo.getbundle(source, heads=heads, common=common,
104 bundlecaps=None)
107 bundlecaps=None)
105
108
106 # TODO We might want to move the next two calls into legacypeer and add
109 # TODO We might want to move the next two calls into legacypeer and add
107 # unbundle instead.
110 # unbundle instead.
108
111
109 def lock(self):
112 def lock(self):
110 return self._repo.lock()
113 return self._repo.lock()
111
114
112 def addchangegroup(self, cg, source, url):
115 def addchangegroup(self, cg, source, url):
113 return self._repo.addchangegroup(cg, source, url)
116 return self._repo.addchangegroup(cg, source, url)
114
117
115 def pushkey(self, namespace, key, old, new):
118 def pushkey(self, namespace, key, old, new):
116 return self._repo.pushkey(namespace, key, old, new)
119 return self._repo.pushkey(namespace, key, old, new)
117
120
118 def listkeys(self, namespace):
121 def listkeys(self, namespace):
119 return self._repo.listkeys(namespace)
122 return self._repo.listkeys(namespace)
120
123
121 def debugwireargs(self, one, two, three=None, four=None, five=None):
124 def debugwireargs(self, one, two, three=None, four=None, five=None):
122 '''used to test argument passing over the wire'''
125 '''used to test argument passing over the wire'''
123 return "%s %s %s %s %s" % (one, two, three, four, five)
126 return "%s %s %s %s %s" % (one, two, three, four, five)
124
127
125 class locallegacypeer(localpeer):
128 class locallegacypeer(localpeer):
126 '''peer extension which implements legacy methods too; used for tests with
129 '''peer extension which implements legacy methods too; used for tests with
127 restricted capabilities'''
130 restricted capabilities'''
128
131
129 def __init__(self, repo):
132 def __init__(self, repo):
130 localpeer.__init__(self, repo, caps=LEGACYCAPS)
133 localpeer.__init__(self, repo, caps=LEGACYCAPS)
131
134
132 def branches(self, nodes):
135 def branches(self, nodes):
133 return self._repo.branches(nodes)
136 return self._repo.branches(nodes)
134
137
135 def between(self, pairs):
138 def between(self, pairs):
136 return self._repo.between(pairs)
139 return self._repo.between(pairs)
137
140
138 def changegroup(self, basenodes, source):
141 def changegroup(self, basenodes, source):
139 return self._repo.changegroup(basenodes, source)
142 return self._repo.changegroup(basenodes, source)
140
143
141 def changegroupsubset(self, bases, heads, source):
144 def changegroupsubset(self, bases, heads, source):
142 return self._repo.changegroupsubset(bases, heads, source)
145 return self._repo.changegroupsubset(bases, heads, source)
143
146
144 class localrepository(object):
147 class localrepository(object):
145
148
146 supportedformats = set(('revlogv1', 'generaldelta'))
149 supportedformats = set(('revlogv1', 'generaldelta'))
147 supported = supportedformats | set(('store', 'fncache', 'shared',
150 supported = supportedformats | set(('store', 'fncache', 'shared',
148 'dotencode'))
151 'dotencode'))
149 openerreqs = set(('revlogv1', 'generaldelta'))
152 openerreqs = set(('revlogv1', 'generaldelta'))
150 requirements = ['revlogv1']
153 requirements = ['revlogv1']
151 filtername = None
154 filtername = None
152
155
153 def _baserequirements(self, create):
156 def _baserequirements(self, create):
154 return self.requirements[:]
157 return self.requirements[:]
155
158
156 def __init__(self, baseui, path=None, create=False):
159 def __init__(self, baseui, path=None, create=False):
157 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
160 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
158 self.wopener = self.wvfs
161 self.wopener = self.wvfs
159 self.root = self.wvfs.base
162 self.root = self.wvfs.base
160 self.path = self.wvfs.join(".hg")
163 self.path = self.wvfs.join(".hg")
161 self.origroot = path
164 self.origroot = path
162 self.auditor = scmutil.pathauditor(self.root, self._checknested)
165 self.auditor = scmutil.pathauditor(self.root, self._checknested)
163 self.vfs = scmutil.vfs(self.path)
166 self.vfs = scmutil.vfs(self.path)
164 self.opener = self.vfs
167 self.opener = self.vfs
165 self.baseui = baseui
168 self.baseui = baseui
166 self.ui = baseui.copy()
169 self.ui = baseui.copy()
167 # A list of callback to shape the phase if no data were found.
170 # A list of callback to shape the phase if no data were found.
168 # Callback are in the form: func(repo, roots) --> processed root.
171 # Callback are in the form: func(repo, roots) --> processed root.
169 # This list it to be filled by extension during repo setup
172 # This list it to be filled by extension during repo setup
170 self._phasedefaults = []
173 self._phasedefaults = []
171 try:
174 try:
172 self.ui.readconfig(self.join("hgrc"), self.root)
175 self.ui.readconfig(self.join("hgrc"), self.root)
173 extensions.loadall(self.ui)
176 extensions.loadall(self.ui)
174 except IOError:
177 except IOError:
175 pass
178 pass
176
179
177 if not self.vfs.isdir():
180 if not self.vfs.isdir():
178 if create:
181 if create:
179 if not self.wvfs.exists():
182 if not self.wvfs.exists():
180 self.wvfs.makedirs()
183 self.wvfs.makedirs()
181 self.vfs.makedir(notindexed=True)
184 self.vfs.makedir(notindexed=True)
182 requirements = self._baserequirements(create)
185 requirements = self._baserequirements(create)
183 if self.ui.configbool('format', 'usestore', True):
186 if self.ui.configbool('format', 'usestore', True):
184 self.vfs.mkdir("store")
187 self.vfs.mkdir("store")
185 requirements.append("store")
188 requirements.append("store")
186 if self.ui.configbool('format', 'usefncache', True):
189 if self.ui.configbool('format', 'usefncache', True):
187 requirements.append("fncache")
190 requirements.append("fncache")
188 if self.ui.configbool('format', 'dotencode', True):
191 if self.ui.configbool('format', 'dotencode', True):
189 requirements.append('dotencode')
192 requirements.append('dotencode')
190 # create an invalid changelog
193 # create an invalid changelog
191 self.vfs.append(
194 self.vfs.append(
192 "00changelog.i",
195 "00changelog.i",
193 '\0\0\0\2' # represents revlogv2
196 '\0\0\0\2' # represents revlogv2
194 ' dummy changelog to prevent using the old repo layout'
197 ' dummy changelog to prevent using the old repo layout'
195 )
198 )
196 if self.ui.configbool('format', 'generaldelta', False):
199 if self.ui.configbool('format', 'generaldelta', False):
197 requirements.append("generaldelta")
200 requirements.append("generaldelta")
198 requirements = set(requirements)
201 requirements = set(requirements)
199 else:
202 else:
200 raise error.RepoError(_("repository %s not found") % path)
203 raise error.RepoError(_("repository %s not found") % path)
201 elif create:
204 elif create:
202 raise error.RepoError(_("repository %s already exists") % path)
205 raise error.RepoError(_("repository %s already exists") % path)
203 else:
206 else:
204 try:
207 try:
205 requirements = scmutil.readrequires(self.vfs, self.supported)
208 requirements = scmutil.readrequires(self.vfs, self.supported)
206 except IOError, inst:
209 except IOError, inst:
207 if inst.errno != errno.ENOENT:
210 if inst.errno != errno.ENOENT:
208 raise
211 raise
209 requirements = set()
212 requirements = set()
210
213
211 self.sharedpath = self.path
214 self.sharedpath = self.path
212 try:
215 try:
213 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
216 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
214 realpath=True)
217 realpath=True)
215 s = vfs.base
218 s = vfs.base
216 if not vfs.exists():
219 if not vfs.exists():
217 raise error.RepoError(
220 raise error.RepoError(
218 _('.hg/sharedpath points to nonexistent directory %s') % s)
221 _('.hg/sharedpath points to nonexistent directory %s') % s)
219 self.sharedpath = s
222 self.sharedpath = s
220 except IOError, inst:
223 except IOError, inst:
221 if inst.errno != errno.ENOENT:
224 if inst.errno != errno.ENOENT:
222 raise
225 raise
223
226
224 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
227 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
225 self.spath = self.store.path
228 self.spath = self.store.path
226 self.svfs = self.store.vfs
229 self.svfs = self.store.vfs
227 self.sopener = self.svfs
230 self.sopener = self.svfs
228 self.sjoin = self.store.join
231 self.sjoin = self.store.join
229 self.vfs.createmode = self.store.createmode
232 self.vfs.createmode = self.store.createmode
230 self._applyrequirements(requirements)
233 self._applyrequirements(requirements)
231 if create:
234 if create:
232 self._writerequirements()
235 self._writerequirements()
233
236
234
237
235 self._branchcaches = {}
238 self._branchcaches = {}
236 self.filterpats = {}
239 self.filterpats = {}
237 self._datafilters = {}
240 self._datafilters = {}
238 self._transref = self._lockref = self._wlockref = None
241 self._transref = self._lockref = self._wlockref = None
239
242
240 # A cache for various files under .hg/ that tracks file changes,
243 # A cache for various files under .hg/ that tracks file changes,
241 # (used by the filecache decorator)
244 # (used by the filecache decorator)
242 #
245 #
243 # Maps a property name to its util.filecacheentry
246 # Maps a property name to its util.filecacheentry
244 self._filecache = {}
247 self._filecache = {}
245
248
246 # hold sets of revision to be filtered
249 # hold sets of revision to be filtered
247 # should be cleared when something might have changed the filter value:
250 # should be cleared when something might have changed the filter value:
248 # - new changesets,
251 # - new changesets,
249 # - phase change,
252 # - phase change,
250 # - new obsolescence marker,
253 # - new obsolescence marker,
251 # - working directory parent change,
254 # - working directory parent change,
252 # - bookmark changes
255 # - bookmark changes
253 self.filteredrevcache = {}
256 self.filteredrevcache = {}
254
257
255 def close(self):
258 def close(self):
256 pass
259 pass
257
260
258 def _restrictcapabilities(self, caps):
261 def _restrictcapabilities(self, caps):
259 return caps
262 return caps
260
263
261 def _applyrequirements(self, requirements):
264 def _applyrequirements(self, requirements):
262 self.requirements = requirements
265 self.requirements = requirements
263 self.sopener.options = dict((r, 1) for r in requirements
266 self.sopener.options = dict((r, 1) for r in requirements
264 if r in self.openerreqs)
267 if r in self.openerreqs)
265
268
266 def _writerequirements(self):
269 def _writerequirements(self):
267 reqfile = self.opener("requires", "w")
270 reqfile = self.opener("requires", "w")
268 for r in sorted(self.requirements):
271 for r in sorted(self.requirements):
269 reqfile.write("%s\n" % r)
272 reqfile.write("%s\n" % r)
270 reqfile.close()
273 reqfile.close()
271
274
272 def _checknested(self, path):
275 def _checknested(self, path):
273 """Determine if path is a legal nested repository."""
276 """Determine if path is a legal nested repository."""
274 if not path.startswith(self.root):
277 if not path.startswith(self.root):
275 return False
278 return False
276 subpath = path[len(self.root) + 1:]
279 subpath = path[len(self.root) + 1:]
277 normsubpath = util.pconvert(subpath)
280 normsubpath = util.pconvert(subpath)
278
281
279 # XXX: Checking against the current working copy is wrong in
282 # XXX: Checking against the current working copy is wrong in
280 # the sense that it can reject things like
283 # the sense that it can reject things like
281 #
284 #
282 # $ hg cat -r 10 sub/x.txt
285 # $ hg cat -r 10 sub/x.txt
283 #
286 #
284 # if sub/ is no longer a subrepository in the working copy
287 # if sub/ is no longer a subrepository in the working copy
285 # parent revision.
288 # parent revision.
286 #
289 #
287 # However, it can of course also allow things that would have
290 # However, it can of course also allow things that would have
288 # been rejected before, such as the above cat command if sub/
291 # been rejected before, such as the above cat command if sub/
289 # is a subrepository now, but was a normal directory before.
292 # is a subrepository now, but was a normal directory before.
290 # The old path auditor would have rejected by mistake since it
293 # The old path auditor would have rejected by mistake since it
291 # panics when it sees sub/.hg/.
294 # panics when it sees sub/.hg/.
292 #
295 #
293 # All in all, checking against the working copy seems sensible
296 # All in all, checking against the working copy seems sensible
294 # since we want to prevent access to nested repositories on
297 # since we want to prevent access to nested repositories on
295 # the filesystem *now*.
298 # the filesystem *now*.
296 ctx = self[None]
299 ctx = self[None]
297 parts = util.splitpath(subpath)
300 parts = util.splitpath(subpath)
298 while parts:
301 while parts:
299 prefix = '/'.join(parts)
302 prefix = '/'.join(parts)
300 if prefix in ctx.substate:
303 if prefix in ctx.substate:
301 if prefix == normsubpath:
304 if prefix == normsubpath:
302 return True
305 return True
303 else:
306 else:
304 sub = ctx.sub(prefix)
307 sub = ctx.sub(prefix)
305 return sub.checknested(subpath[len(prefix) + 1:])
308 return sub.checknested(subpath[len(prefix) + 1:])
306 else:
309 else:
307 parts.pop()
310 parts.pop()
308 return False
311 return False
309
312
310 def peer(self):
313 def peer(self):
311 return localpeer(self) # not cached to avoid reference cycle
314 return localpeer(self) # not cached to avoid reference cycle
312
315
313 def unfiltered(self):
316 def unfiltered(self):
314 """Return unfiltered version of the repository
317 """Return unfiltered version of the repository
315
318
316 Intended to be overwritten by filtered repo."""
319 Intended to be overwritten by filtered repo."""
317 return self
320 return self
318
321
319 def filtered(self, name):
322 def filtered(self, name):
320 """Return a filtered version of a repository"""
323 """Return a filtered version of a repository"""
321 # build a new class with the mixin and the current class
324 # build a new class with the mixin and the current class
322 # (possibly subclass of the repo)
325 # (possibly subclass of the repo)
323 class proxycls(repoview.repoview, self.unfiltered().__class__):
326 class proxycls(repoview.repoview, self.unfiltered().__class__):
324 pass
327 pass
325 return proxycls(self, name)
328 return proxycls(self, name)
326
329
327 @repofilecache('bookmarks')
330 @repofilecache('bookmarks')
328 def _bookmarks(self):
331 def _bookmarks(self):
329 return bookmarks.bmstore(self)
332 return bookmarks.bmstore(self)
330
333
331 @repofilecache('bookmarks.current')
334 @repofilecache('bookmarks.current')
332 def _bookmarkcurrent(self):
335 def _bookmarkcurrent(self):
333 return bookmarks.readcurrent(self)
336 return bookmarks.readcurrent(self)
334
337
335 def bookmarkheads(self, bookmark):
338 def bookmarkheads(self, bookmark):
336 name = bookmark.split('@', 1)[0]
339 name = bookmark.split('@', 1)[0]
337 heads = []
340 heads = []
338 for mark, n in self._bookmarks.iteritems():
341 for mark, n in self._bookmarks.iteritems():
339 if mark.split('@', 1)[0] == name:
342 if mark.split('@', 1)[0] == name:
340 heads.append(n)
343 heads.append(n)
341 return heads
344 return heads
342
345
343 @storecache('phaseroots')
346 @storecache('phaseroots')
344 def _phasecache(self):
347 def _phasecache(self):
345 return phases.phasecache(self, self._phasedefaults)
348 return phases.phasecache(self, self._phasedefaults)
346
349
347 @storecache('obsstore')
350 @storecache('obsstore')
348 def obsstore(self):
351 def obsstore(self):
349 store = obsolete.obsstore(self.sopener)
352 store = obsolete.obsstore(self.sopener)
350 if store and not obsolete._enabled:
353 if store and not obsolete._enabled:
351 # message is rare enough to not be translated
354 # message is rare enough to not be translated
352 msg = 'obsolete feature not enabled but %i markers found!\n'
355 msg = 'obsolete feature not enabled but %i markers found!\n'
353 self.ui.warn(msg % len(list(store)))
356 self.ui.warn(msg % len(list(store)))
354 return store
357 return store
355
358
356 @storecache('00changelog.i')
359 @storecache('00changelog.i')
357 def changelog(self):
360 def changelog(self):
358 c = changelog.changelog(self.sopener)
361 c = changelog.changelog(self.sopener)
359 if 'HG_PENDING' in os.environ:
362 if 'HG_PENDING' in os.environ:
360 p = os.environ['HG_PENDING']
363 p = os.environ['HG_PENDING']
361 if p.startswith(self.root):
364 if p.startswith(self.root):
362 c.readpending('00changelog.i.a')
365 c.readpending('00changelog.i.a')
363 return c
366 return c
364
367
365 @storecache('00manifest.i')
368 @storecache('00manifest.i')
366 def manifest(self):
369 def manifest(self):
367 return manifest.manifest(self.sopener)
370 return manifest.manifest(self.sopener)
368
371
369 @repofilecache('dirstate')
372 @repofilecache('dirstate')
370 def dirstate(self):
373 def dirstate(self):
371 warned = [0]
374 warned = [0]
372 def validate(node):
375 def validate(node):
373 try:
376 try:
374 self.changelog.rev(node)
377 self.changelog.rev(node)
375 return node
378 return node
376 except error.LookupError:
379 except error.LookupError:
377 if not warned[0]:
380 if not warned[0]:
378 warned[0] = True
381 warned[0] = True
379 self.ui.warn(_("warning: ignoring unknown"
382 self.ui.warn(_("warning: ignoring unknown"
380 " working parent %s!\n") % short(node))
383 " working parent %s!\n") % short(node))
381 return nullid
384 return nullid
382
385
383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
386 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384
387
385 def __getitem__(self, changeid):
388 def __getitem__(self, changeid):
386 if changeid is None:
389 if changeid is None:
387 return context.workingctx(self)
390 return context.workingctx(self)
388 return context.changectx(self, changeid)
391 return context.changectx(self, changeid)
389
392
390 def __contains__(self, changeid):
393 def __contains__(self, changeid):
391 try:
394 try:
392 return bool(self.lookup(changeid))
395 return bool(self.lookup(changeid))
393 except error.RepoLookupError:
396 except error.RepoLookupError:
394 return False
397 return False
395
398
396 def __nonzero__(self):
399 def __nonzero__(self):
397 return True
400 return True
398
401
399 def __len__(self):
402 def __len__(self):
400 return len(self.changelog)
403 return len(self.changelog)
401
404
402 def __iter__(self):
405 def __iter__(self):
403 return iter(self.changelog)
406 return iter(self.changelog)
404
407
405 def revs(self, expr, *args):
408 def revs(self, expr, *args):
406 '''Return a list of revisions matching the given revset'''
409 '''Return a list of revisions matching the given revset'''
407 expr = revset.formatspec(expr, *args)
410 expr = revset.formatspec(expr, *args)
408 m = revset.match(None, expr)
411 m = revset.match(None, expr)
409 return [r for r in m(self, list(self))]
412 return [r for r in m(self, list(self))]
410
413
411 def set(self, expr, *args):
414 def set(self, expr, *args):
412 '''
415 '''
413 Yield a context for each matching revision, after doing arg
416 Yield a context for each matching revision, after doing arg
414 replacement via revset.formatspec
417 replacement via revset.formatspec
415 '''
418 '''
416 for r in self.revs(expr, *args):
419 for r in self.revs(expr, *args):
417 yield self[r]
420 yield self[r]
418
421
419 def url(self):
422 def url(self):
420 return 'file:' + self.root
423 return 'file:' + self.root
421
424
422 def hook(self, name, throw=False, **args):
425 def hook(self, name, throw=False, **args):
423 return hook.hook(self.ui, self, name, throw, **args)
426 return hook.hook(self.ui, self, name, throw, **args)
424
427
425 @unfilteredmethod
428 @unfilteredmethod
426 def _tag(self, names, node, message, local, user, date, extra={}):
429 def _tag(self, names, node, message, local, user, date, extra={}):
427 if isinstance(names, str):
430 if isinstance(names, str):
428 names = (names,)
431 names = (names,)
429
432
430 branches = self.branchmap()
433 branches = self.branchmap()
431 for name in names:
434 for name in names:
432 self.hook('pretag', throw=True, node=hex(node), tag=name,
435 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 local=local)
436 local=local)
434 if name in branches:
437 if name in branches:
435 self.ui.warn(_("warning: tag %s conflicts with existing"
438 self.ui.warn(_("warning: tag %s conflicts with existing"
436 " branch name\n") % name)
439 " branch name\n") % name)
437
440
438 def writetags(fp, names, munge, prevtags):
441 def writetags(fp, names, munge, prevtags):
439 fp.seek(0, 2)
442 fp.seek(0, 2)
440 if prevtags and prevtags[-1] != '\n':
443 if prevtags and prevtags[-1] != '\n':
441 fp.write('\n')
444 fp.write('\n')
442 for name in names:
445 for name in names:
443 m = munge and munge(name) or name
446 m = munge and munge(name) or name
444 if (self._tagscache.tagtypes and
447 if (self._tagscache.tagtypes and
445 name in self._tagscache.tagtypes):
448 name in self._tagscache.tagtypes):
446 old = self.tags().get(name, nullid)
449 old = self.tags().get(name, nullid)
447 fp.write('%s %s\n' % (hex(old), m))
450 fp.write('%s %s\n' % (hex(old), m))
448 fp.write('%s %s\n' % (hex(node), m))
451 fp.write('%s %s\n' % (hex(node), m))
449 fp.close()
452 fp.close()
450
453
451 prevtags = ''
454 prevtags = ''
452 if local:
455 if local:
453 try:
456 try:
454 fp = self.opener('localtags', 'r+')
457 fp = self.opener('localtags', 'r+')
455 except IOError:
458 except IOError:
456 fp = self.opener('localtags', 'a')
459 fp = self.opener('localtags', 'a')
457 else:
460 else:
458 prevtags = fp.read()
461 prevtags = fp.read()
459
462
460 # local tags are stored in the current charset
463 # local tags are stored in the current charset
461 writetags(fp, names, None, prevtags)
464 writetags(fp, names, None, prevtags)
462 for name in names:
465 for name in names:
463 self.hook('tag', node=hex(node), tag=name, local=local)
466 self.hook('tag', node=hex(node), tag=name, local=local)
464 return
467 return
465
468
466 try:
469 try:
467 fp = self.wfile('.hgtags', 'rb+')
470 fp = self.wfile('.hgtags', 'rb+')
468 except IOError, e:
471 except IOError, e:
469 if e.errno != errno.ENOENT:
472 if e.errno != errno.ENOENT:
470 raise
473 raise
471 fp = self.wfile('.hgtags', 'ab')
474 fp = self.wfile('.hgtags', 'ab')
472 else:
475 else:
473 prevtags = fp.read()
476 prevtags = fp.read()
474
477
475 # committed tags are stored in UTF-8
478 # committed tags are stored in UTF-8
476 writetags(fp, names, encoding.fromlocal, prevtags)
479 writetags(fp, names, encoding.fromlocal, prevtags)
477
480
478 fp.close()
481 fp.close()
479
482
480 self.invalidatecaches()
483 self.invalidatecaches()
481
484
482 if '.hgtags' not in self.dirstate:
485 if '.hgtags' not in self.dirstate:
483 self[None].add(['.hgtags'])
486 self[None].add(['.hgtags'])
484
487
485 m = matchmod.exact(self.root, '', ['.hgtags'])
488 m = matchmod.exact(self.root, '', ['.hgtags'])
486 tagnode = self.commit(message, user, date, extra=extra, match=m)
489 tagnode = self.commit(message, user, date, extra=extra, match=m)
487
490
488 for name in names:
491 for name in names:
489 self.hook('tag', node=hex(node), tag=name, local=local)
492 self.hook('tag', node=hex(node), tag=name, local=local)
490
493
491 return tagnode
494 return tagnode
492
495
493 def tag(self, names, node, message, local, user, date):
496 def tag(self, names, node, message, local, user, date):
494 '''tag a revision with one or more symbolic names.
497 '''tag a revision with one or more symbolic names.
495
498
496 names is a list of strings or, when adding a single tag, names may be a
499 names is a list of strings or, when adding a single tag, names may be a
497 string.
500 string.
498
501
499 if local is True, the tags are stored in a per-repository file.
502 if local is True, the tags are stored in a per-repository file.
500 otherwise, they are stored in the .hgtags file, and a new
503 otherwise, they are stored in the .hgtags file, and a new
501 changeset is committed with the change.
504 changeset is committed with the change.
502
505
503 keyword arguments:
506 keyword arguments:
504
507
505 local: whether to store tags in non-version-controlled file
508 local: whether to store tags in non-version-controlled file
506 (default False)
509 (default False)
507
510
508 message: commit message to use if committing
511 message: commit message to use if committing
509
512
510 user: name of user to use if committing
513 user: name of user to use if committing
511
514
512 date: date tuple to use if committing'''
515 date: date tuple to use if committing'''
513
516
514 if not local:
517 if not local:
515 for x in self.status()[:5]:
518 for x in self.status()[:5]:
516 if '.hgtags' in x:
519 if '.hgtags' in x:
517 raise util.Abort(_('working copy of .hgtags is changed '
520 raise util.Abort(_('working copy of .hgtags is changed '
518 '(please commit .hgtags manually)'))
521 '(please commit .hgtags manually)'))
519
522
520 self.tags() # instantiate the cache
523 self.tags() # instantiate the cache
521 self._tag(names, node, message, local, user, date)
524 self._tag(names, node, message, local, user, date)
522
525
523 @filteredpropertycache
526 @filteredpropertycache
524 def _tagscache(self):
527 def _tagscache(self):
525 '''Returns a tagscache object that contains various tags related
528 '''Returns a tagscache object that contains various tags related
526 caches.'''
529 caches.'''
527
530
528 # This simplifies its cache management by having one decorated
531 # This simplifies its cache management by having one decorated
529 # function (this one) and the rest simply fetch things from it.
532 # function (this one) and the rest simply fetch things from it.
530 class tagscache(object):
533 class tagscache(object):
531 def __init__(self):
534 def __init__(self):
532 # These two define the set of tags for this repository. tags
535 # These two define the set of tags for this repository. tags
533 # maps tag name to node; tagtypes maps tag name to 'global' or
536 # maps tag name to node; tagtypes maps tag name to 'global' or
534 # 'local'. (Global tags are defined by .hgtags across all
537 # 'local'. (Global tags are defined by .hgtags across all
535 # heads, and local tags are defined in .hg/localtags.)
538 # heads, and local tags are defined in .hg/localtags.)
536 # They constitute the in-memory cache of tags.
539 # They constitute the in-memory cache of tags.
537 self.tags = self.tagtypes = None
540 self.tags = self.tagtypes = None
538
541
539 self.nodetagscache = self.tagslist = None
542 self.nodetagscache = self.tagslist = None
540
543
541 cache = tagscache()
544 cache = tagscache()
542 cache.tags, cache.tagtypes = self._findtags()
545 cache.tags, cache.tagtypes = self._findtags()
543
546
544 return cache
547 return cache
545
548
546 def tags(self):
549 def tags(self):
547 '''return a mapping of tag to node'''
550 '''return a mapping of tag to node'''
548 t = {}
551 t = {}
549 if self.changelog.filteredrevs:
552 if self.changelog.filteredrevs:
550 tags, tt = self._findtags()
553 tags, tt = self._findtags()
551 else:
554 else:
552 tags = self._tagscache.tags
555 tags = self._tagscache.tags
553 for k, v in tags.iteritems():
556 for k, v in tags.iteritems():
554 try:
557 try:
555 # ignore tags to unknown nodes
558 # ignore tags to unknown nodes
556 self.changelog.rev(v)
559 self.changelog.rev(v)
557 t[k] = v
560 t[k] = v
558 except (error.LookupError, ValueError):
561 except (error.LookupError, ValueError):
559 pass
562 pass
560 return t
563 return t
561
564
562 def _findtags(self):
565 def _findtags(self):
563 '''Do the hard work of finding tags. Return a pair of dicts
566 '''Do the hard work of finding tags. Return a pair of dicts
564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
567 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 maps tag name to a string like \'global\' or \'local\'.
568 maps tag name to a string like \'global\' or \'local\'.
566 Subclasses or extensions are free to add their own tags, but
569 Subclasses or extensions are free to add their own tags, but
567 should be aware that the returned dicts will be retained for the
570 should be aware that the returned dicts will be retained for the
568 duration of the localrepo object.'''
571 duration of the localrepo object.'''
569
572
570 # XXX what tagtype should subclasses/extensions use? Currently
573 # XXX what tagtype should subclasses/extensions use? Currently
571 # mq and bookmarks add tags, but do not set the tagtype at all.
574 # mq and bookmarks add tags, but do not set the tagtype at all.
572 # Should each extension invent its own tag type? Should there
575 # Should each extension invent its own tag type? Should there
573 # be one tagtype for all such "virtual" tags? Or is the status
576 # be one tagtype for all such "virtual" tags? Or is the status
574 # quo fine?
577 # quo fine?
575
578
576 alltags = {} # map tag name to (node, hist)
579 alltags = {} # map tag name to (node, hist)
577 tagtypes = {}
580 tagtypes = {}
578
581
579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
582 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
583 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581
584
582 # Build the return dicts. Have to re-encode tag names because
585 # Build the return dicts. Have to re-encode tag names because
583 # the tags module always uses UTF-8 (in order not to lose info
586 # the tags module always uses UTF-8 (in order not to lose info
584 # writing to the cache), but the rest of Mercurial wants them in
587 # writing to the cache), but the rest of Mercurial wants them in
585 # local encoding.
588 # local encoding.
586 tags = {}
589 tags = {}
587 for (name, (node, hist)) in alltags.iteritems():
590 for (name, (node, hist)) in alltags.iteritems():
588 if node != nullid:
591 if node != nullid:
589 tags[encoding.tolocal(name)] = node
592 tags[encoding.tolocal(name)] = node
590 tags['tip'] = self.changelog.tip()
593 tags['tip'] = self.changelog.tip()
591 tagtypes = dict([(encoding.tolocal(name), value)
594 tagtypes = dict([(encoding.tolocal(name), value)
592 for (name, value) in tagtypes.iteritems()])
595 for (name, value) in tagtypes.iteritems()])
593 return (tags, tagtypes)
596 return (tags, tagtypes)
594
597
595 def tagtype(self, tagname):
598 def tagtype(self, tagname):
596 '''
599 '''
597 return the type of the given tag. result can be:
600 return the type of the given tag. result can be:
598
601
599 'local' : a local tag
602 'local' : a local tag
600 'global' : a global tag
603 'global' : a global tag
601 None : tag does not exist
604 None : tag does not exist
602 '''
605 '''
603
606
604 return self._tagscache.tagtypes.get(tagname)
607 return self._tagscache.tagtypes.get(tagname)
605
608
606 def tagslist(self):
609 def tagslist(self):
607 '''return a list of tags ordered by revision'''
610 '''return a list of tags ordered by revision'''
608 if not self._tagscache.tagslist:
611 if not self._tagscache.tagslist:
609 l = []
612 l = []
610 for t, n in self.tags().iteritems():
613 for t, n in self.tags().iteritems():
611 r = self.changelog.rev(n)
614 r = self.changelog.rev(n)
612 l.append((r, t, n))
615 l.append((r, t, n))
613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
616 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614
617
615 return self._tagscache.tagslist
618 return self._tagscache.tagslist
616
619
617 def nodetags(self, node):
620 def nodetags(self, node):
618 '''return the tags associated with a node'''
621 '''return the tags associated with a node'''
619 if not self._tagscache.nodetagscache:
622 if not self._tagscache.nodetagscache:
620 nodetagscache = {}
623 nodetagscache = {}
621 for t, n in self._tagscache.tags.iteritems():
624 for t, n in self._tagscache.tags.iteritems():
622 nodetagscache.setdefault(n, []).append(t)
625 nodetagscache.setdefault(n, []).append(t)
623 for tags in nodetagscache.itervalues():
626 for tags in nodetagscache.itervalues():
624 tags.sort()
627 tags.sort()
625 self._tagscache.nodetagscache = nodetagscache
628 self._tagscache.nodetagscache = nodetagscache
626 return self._tagscache.nodetagscache.get(node, [])
629 return self._tagscache.nodetagscache.get(node, [])
627
630
628 def nodebookmarks(self, node):
631 def nodebookmarks(self, node):
629 marks = []
632 marks = []
630 for bookmark, n in self._bookmarks.iteritems():
633 for bookmark, n in self._bookmarks.iteritems():
631 if n == node:
634 if n == node:
632 marks.append(bookmark)
635 marks.append(bookmark)
633 return sorted(marks)
636 return sorted(marks)
634
637
635 def branchmap(self):
638 def branchmap(self):
636 '''returns a dictionary {branch: [branchheads]}'''
639 '''returns a dictionary {branch: [branchheads]}'''
637 branchmap.updatecache(self)
640 branchmap.updatecache(self)
638 return self._branchcaches[self.filtername]
641 return self._branchcaches[self.filtername]
639
642
640
643
641 def _branchtip(self, heads):
644 def _branchtip(self, heads):
642 '''return the tipmost branch head in heads'''
645 '''return the tipmost branch head in heads'''
643 tip = heads[-1]
646 tip = heads[-1]
644 for h in reversed(heads):
647 for h in reversed(heads):
645 if not self[h].closesbranch():
648 if not self[h].closesbranch():
646 tip = h
649 tip = h
647 break
650 break
648 return tip
651 return tip
649
652
650 def branchtip(self, branch):
653 def branchtip(self, branch):
651 '''return the tip node for a given branch'''
654 '''return the tip node for a given branch'''
652 if branch not in self.branchmap():
655 if branch not in self.branchmap():
653 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
656 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
654 return self._branchtip(self.branchmap()[branch])
657 return self._branchtip(self.branchmap()[branch])
655
658
656 def branchtags(self):
659 def branchtags(self):
657 '''return a dict where branch names map to the tipmost head of
660 '''return a dict where branch names map to the tipmost head of
658 the branch, open heads come before closed'''
661 the branch, open heads come before closed'''
659 bt = {}
662 bt = {}
660 for bn, heads in self.branchmap().iteritems():
663 for bn, heads in self.branchmap().iteritems():
661 bt[bn] = self._branchtip(heads)
664 bt[bn] = self._branchtip(heads)
662 return bt
665 return bt
663
666
664 def lookup(self, key):
667 def lookup(self, key):
665 return self[key].node()
668 return self[key].node()
666
669
667 def lookupbranch(self, key, remote=None):
670 def lookupbranch(self, key, remote=None):
668 repo = remote or self
671 repo = remote or self
669 if key in repo.branchmap():
672 if key in repo.branchmap():
670 return key
673 return key
671
674
672 repo = (remote and remote.local()) and remote or self
675 repo = (remote and remote.local()) and remote or self
673 return repo[key].branch()
676 return repo[key].branch()
674
677
675 def known(self, nodes):
678 def known(self, nodes):
676 nm = self.changelog.nodemap
679 nm = self.changelog.nodemap
677 pc = self._phasecache
680 pc = self._phasecache
678 result = []
681 result = []
679 for n in nodes:
682 for n in nodes:
680 r = nm.get(n)
683 r = nm.get(n)
681 resp = not (r is None or pc.phase(self, r) >= phases.secret)
684 resp = not (r is None or pc.phase(self, r) >= phases.secret)
682 result.append(resp)
685 result.append(resp)
683 return result
686 return result
684
687
685 def local(self):
688 def local(self):
686 return self
689 return self
687
690
688 def cancopy(self):
691 def cancopy(self):
689 return self.local() # so statichttprepo's override of local() works
692 return self.local() # so statichttprepo's override of local() works
690
693
691 def join(self, f):
694 def join(self, f):
692 return os.path.join(self.path, f)
695 return os.path.join(self.path, f)
693
696
694 def wjoin(self, f):
697 def wjoin(self, f):
695 return os.path.join(self.root, f)
698 return os.path.join(self.root, f)
696
699
697 def file(self, f):
700 def file(self, f):
698 if f[0] == '/':
701 if f[0] == '/':
699 f = f[1:]
702 f = f[1:]
700 return filelog.filelog(self.sopener, f)
703 return filelog.filelog(self.sopener, f)
701
704
702 def changectx(self, changeid):
705 def changectx(self, changeid):
703 return self[changeid]
706 return self[changeid]
704
707
705 def parents(self, changeid=None):
708 def parents(self, changeid=None):
706 '''get list of changectxs for parents of changeid'''
709 '''get list of changectxs for parents of changeid'''
707 return self[changeid].parents()
710 return self[changeid].parents()
708
711
709 def setparents(self, p1, p2=nullid):
712 def setparents(self, p1, p2=nullid):
710 copies = self.dirstate.setparents(p1, p2)
713 copies = self.dirstate.setparents(p1, p2)
711 pctx = self[p1]
714 pctx = self[p1]
712 if copies:
715 if copies:
713 # Adjust copy records, the dirstate cannot do it, it
716 # Adjust copy records, the dirstate cannot do it, it
714 # requires access to parents manifests. Preserve them
717 # requires access to parents manifests. Preserve them
715 # only for entries added to first parent.
718 # only for entries added to first parent.
716 for f in copies:
719 for f in copies:
717 if f not in pctx and copies[f] in pctx:
720 if f not in pctx and copies[f] in pctx:
718 self.dirstate.copy(copies[f], f)
721 self.dirstate.copy(copies[f], f)
719 if p2 == nullid:
722 if p2 == nullid:
720 for f, s in sorted(self.dirstate.copies().items()):
723 for f, s in sorted(self.dirstate.copies().items()):
721 if f not in pctx and s not in pctx:
724 if f not in pctx and s not in pctx:
722 self.dirstate.copy(None, f)
725 self.dirstate.copy(None, f)
723
726
724 def filectx(self, path, changeid=None, fileid=None):
727 def filectx(self, path, changeid=None, fileid=None):
725 """changeid can be a changeset revision, node, or tag.
728 """changeid can be a changeset revision, node, or tag.
726 fileid can be a file revision or node."""
729 fileid can be a file revision or node."""
727 return context.filectx(self, path, changeid, fileid)
730 return context.filectx(self, path, changeid, fileid)
728
731
729 def getcwd(self):
732 def getcwd(self):
730 return self.dirstate.getcwd()
733 return self.dirstate.getcwd()
731
734
732 def pathto(self, f, cwd=None):
735 def pathto(self, f, cwd=None):
733 return self.dirstate.pathto(f, cwd)
736 return self.dirstate.pathto(f, cwd)
734
737
735 def wfile(self, f, mode='r'):
738 def wfile(self, f, mode='r'):
736 return self.wopener(f, mode)
739 return self.wopener(f, mode)
737
740
738 def _link(self, f):
741 def _link(self, f):
739 return self.wvfs.islink(f)
742 return self.wvfs.islink(f)
740
743
741 def _loadfilter(self, filter):
744 def _loadfilter(self, filter):
742 if filter not in self.filterpats:
745 if filter not in self.filterpats:
743 l = []
746 l = []
744 for pat, cmd in self.ui.configitems(filter):
747 for pat, cmd in self.ui.configitems(filter):
745 if cmd == '!':
748 if cmd == '!':
746 continue
749 continue
747 mf = matchmod.match(self.root, '', [pat])
750 mf = matchmod.match(self.root, '', [pat])
748 fn = None
751 fn = None
749 params = cmd
752 params = cmd
750 for name, filterfn in self._datafilters.iteritems():
753 for name, filterfn in self._datafilters.iteritems():
751 if cmd.startswith(name):
754 if cmd.startswith(name):
752 fn = filterfn
755 fn = filterfn
753 params = cmd[len(name):].lstrip()
756 params = cmd[len(name):].lstrip()
754 break
757 break
755 if not fn:
758 if not fn:
756 fn = lambda s, c, **kwargs: util.filter(s, c)
759 fn = lambda s, c, **kwargs: util.filter(s, c)
757 # Wrap old filters not supporting keyword arguments
760 # Wrap old filters not supporting keyword arguments
758 if not inspect.getargspec(fn)[2]:
761 if not inspect.getargspec(fn)[2]:
759 oldfn = fn
762 oldfn = fn
760 fn = lambda s, c, **kwargs: oldfn(s, c)
763 fn = lambda s, c, **kwargs: oldfn(s, c)
761 l.append((mf, fn, params))
764 l.append((mf, fn, params))
762 self.filterpats[filter] = l
765 self.filterpats[filter] = l
763 return self.filterpats[filter]
766 return self.filterpats[filter]
764
767
765 def _filter(self, filterpats, filename, data):
768 def _filter(self, filterpats, filename, data):
766 for mf, fn, cmd in filterpats:
769 for mf, fn, cmd in filterpats:
767 if mf(filename):
770 if mf(filename):
768 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
771 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
769 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
772 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
770 break
773 break
771
774
772 return data
775 return data
773
776
774 @unfilteredpropertycache
777 @unfilteredpropertycache
775 def _encodefilterpats(self):
778 def _encodefilterpats(self):
776 return self._loadfilter('encode')
779 return self._loadfilter('encode')
777
780
778 @unfilteredpropertycache
781 @unfilteredpropertycache
779 def _decodefilterpats(self):
782 def _decodefilterpats(self):
780 return self._loadfilter('decode')
783 return self._loadfilter('decode')
781
784
782 def adddatafilter(self, name, filter):
785 def adddatafilter(self, name, filter):
783 self._datafilters[name] = filter
786 self._datafilters[name] = filter
784
787
785 def wread(self, filename):
788 def wread(self, filename):
786 if self._link(filename):
789 if self._link(filename):
787 data = self.wvfs.readlink(filename)
790 data = self.wvfs.readlink(filename)
788 else:
791 else:
789 data = self.wopener.read(filename)
792 data = self.wopener.read(filename)
790 return self._filter(self._encodefilterpats, filename, data)
793 return self._filter(self._encodefilterpats, filename, data)
791
794
792 def wwrite(self, filename, data, flags):
795 def wwrite(self, filename, data, flags):
793 data = self._filter(self._decodefilterpats, filename, data)
796 data = self._filter(self._decodefilterpats, filename, data)
794 if 'l' in flags:
797 if 'l' in flags:
795 self.wopener.symlink(data, filename)
798 self.wopener.symlink(data, filename)
796 else:
799 else:
797 self.wopener.write(filename, data)
800 self.wopener.write(filename, data)
798 if 'x' in flags:
801 if 'x' in flags:
799 self.wvfs.setflags(filename, False, True)
802 self.wvfs.setflags(filename, False, True)
800
803
801 def wwritedata(self, filename, data):
804 def wwritedata(self, filename, data):
802 return self._filter(self._decodefilterpats, filename, data)
805 return self._filter(self._decodefilterpats, filename, data)
803
806
804 def transaction(self, desc):
807 def transaction(self, desc):
805 tr = self._transref and self._transref() or None
808 tr = self._transref and self._transref() or None
806 if tr and tr.running():
809 if tr and tr.running():
807 return tr.nest()
810 return tr.nest()
808
811
809 # abort here if the journal already exists
812 # abort here if the journal already exists
810 if self.svfs.exists("journal"):
813 if self.svfs.exists("journal"):
811 raise error.RepoError(
814 raise error.RepoError(
812 _("abandoned transaction found - run hg recover"))
815 _("abandoned transaction found - run hg recover"))
813
816
814 self._writejournal(desc)
817 self._writejournal(desc)
815 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
818 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
816
819
817 tr = transaction.transaction(self.ui.warn, self.sopener,
820 tr = transaction.transaction(self.ui.warn, self.sopener,
818 self.sjoin("journal"),
821 self.sjoin("journal"),
819 aftertrans(renames),
822 aftertrans(renames),
820 self.store.createmode)
823 self.store.createmode)
821 self._transref = weakref.ref(tr)
824 self._transref = weakref.ref(tr)
822 return tr
825 return tr
823
826
824 def _journalfiles(self):
827 def _journalfiles(self):
825 return ((self.svfs, 'journal'),
828 return ((self.svfs, 'journal'),
826 (self.vfs, 'journal.dirstate'),
829 (self.vfs, 'journal.dirstate'),
827 (self.vfs, 'journal.branch'),
830 (self.vfs, 'journal.branch'),
828 (self.vfs, 'journal.desc'),
831 (self.vfs, 'journal.desc'),
829 (self.vfs, 'journal.bookmarks'),
832 (self.vfs, 'journal.bookmarks'),
830 (self.svfs, 'journal.phaseroots'))
833 (self.svfs, 'journal.phaseroots'))
831
834
832 def undofiles(self):
835 def undofiles(self):
833 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
836 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
834
837
835 def _writejournal(self, desc):
838 def _writejournal(self, desc):
836 self.opener.write("journal.dirstate",
839 self.opener.write("journal.dirstate",
837 self.opener.tryread("dirstate"))
840 self.opener.tryread("dirstate"))
838 self.opener.write("journal.branch",
841 self.opener.write("journal.branch",
839 encoding.fromlocal(self.dirstate.branch()))
842 encoding.fromlocal(self.dirstate.branch()))
840 self.opener.write("journal.desc",
843 self.opener.write("journal.desc",
841 "%d\n%s\n" % (len(self), desc))
844 "%d\n%s\n" % (len(self), desc))
842 self.opener.write("journal.bookmarks",
845 self.opener.write("journal.bookmarks",
843 self.opener.tryread("bookmarks"))
846 self.opener.tryread("bookmarks"))
844 self.sopener.write("journal.phaseroots",
847 self.sopener.write("journal.phaseroots",
845 self.sopener.tryread("phaseroots"))
848 self.sopener.tryread("phaseroots"))
846
849
847 def recover(self):
850 def recover(self):
848 lock = self.lock()
851 lock = self.lock()
849 try:
852 try:
850 if self.svfs.exists("journal"):
853 if self.svfs.exists("journal"):
851 self.ui.status(_("rolling back interrupted transaction\n"))
854 self.ui.status(_("rolling back interrupted transaction\n"))
852 transaction.rollback(self.sopener, self.sjoin("journal"),
855 transaction.rollback(self.sopener, self.sjoin("journal"),
853 self.ui.warn)
856 self.ui.warn)
854 self.invalidate()
857 self.invalidate()
855 return True
858 return True
856 else:
859 else:
857 self.ui.warn(_("no interrupted transaction available\n"))
860 self.ui.warn(_("no interrupted transaction available\n"))
858 return False
861 return False
859 finally:
862 finally:
860 lock.release()
863 lock.release()
861
864
862 def rollback(self, dryrun=False, force=False):
865 def rollback(self, dryrun=False, force=False):
863 wlock = lock = None
866 wlock = lock = None
864 try:
867 try:
865 wlock = self.wlock()
868 wlock = self.wlock()
866 lock = self.lock()
869 lock = self.lock()
867 if self.svfs.exists("undo"):
870 if self.svfs.exists("undo"):
868 return self._rollback(dryrun, force)
871 return self._rollback(dryrun, force)
869 else:
872 else:
870 self.ui.warn(_("no rollback information available\n"))
873 self.ui.warn(_("no rollback information available\n"))
871 return 1
874 return 1
872 finally:
875 finally:
873 release(lock, wlock)
876 release(lock, wlock)
874
877
875 @unfilteredmethod # Until we get smarter cache management
878 @unfilteredmethod # Until we get smarter cache management
876 def _rollback(self, dryrun, force):
879 def _rollback(self, dryrun, force):
877 ui = self.ui
880 ui = self.ui
878 try:
881 try:
879 args = self.opener.read('undo.desc').splitlines()
882 args = self.opener.read('undo.desc').splitlines()
880 (oldlen, desc, detail) = (int(args[0]), args[1], None)
883 (oldlen, desc, detail) = (int(args[0]), args[1], None)
881 if len(args) >= 3:
884 if len(args) >= 3:
882 detail = args[2]
885 detail = args[2]
883 oldtip = oldlen - 1
886 oldtip = oldlen - 1
884
887
885 if detail and ui.verbose:
888 if detail and ui.verbose:
886 msg = (_('repository tip rolled back to revision %s'
889 msg = (_('repository tip rolled back to revision %s'
887 ' (undo %s: %s)\n')
890 ' (undo %s: %s)\n')
888 % (oldtip, desc, detail))
891 % (oldtip, desc, detail))
889 else:
892 else:
890 msg = (_('repository tip rolled back to revision %s'
893 msg = (_('repository tip rolled back to revision %s'
891 ' (undo %s)\n')
894 ' (undo %s)\n')
892 % (oldtip, desc))
895 % (oldtip, desc))
893 except IOError:
896 except IOError:
894 msg = _('rolling back unknown transaction\n')
897 msg = _('rolling back unknown transaction\n')
895 desc = None
898 desc = None
896
899
897 if not force and self['.'] != self['tip'] and desc == 'commit':
900 if not force and self['.'] != self['tip'] and desc == 'commit':
898 raise util.Abort(
901 raise util.Abort(
899 _('rollback of last commit while not checked out '
902 _('rollback of last commit while not checked out '
900 'may lose data'), hint=_('use -f to force'))
903 'may lose data'), hint=_('use -f to force'))
901
904
902 ui.status(msg)
905 ui.status(msg)
903 if dryrun:
906 if dryrun:
904 return 0
907 return 0
905
908
906 parents = self.dirstate.parents()
909 parents = self.dirstate.parents()
907 self.destroying()
910 self.destroying()
908 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
911 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
909 if self.vfs.exists('undo.bookmarks'):
912 if self.vfs.exists('undo.bookmarks'):
910 self.vfs.rename('undo.bookmarks', 'bookmarks')
913 self.vfs.rename('undo.bookmarks', 'bookmarks')
911 if self.svfs.exists('undo.phaseroots'):
914 if self.svfs.exists('undo.phaseroots'):
912 self.svfs.rename('undo.phaseroots', 'phaseroots')
915 self.svfs.rename('undo.phaseroots', 'phaseroots')
913 self.invalidate()
916 self.invalidate()
914
917
915 parentgone = (parents[0] not in self.changelog.nodemap or
918 parentgone = (parents[0] not in self.changelog.nodemap or
916 parents[1] not in self.changelog.nodemap)
919 parents[1] not in self.changelog.nodemap)
917 if parentgone:
920 if parentgone:
918 self.vfs.rename('undo.dirstate', 'dirstate')
921 self.vfs.rename('undo.dirstate', 'dirstate')
919 try:
922 try:
920 branch = self.opener.read('undo.branch')
923 branch = self.opener.read('undo.branch')
921 self.dirstate.setbranch(encoding.tolocal(branch))
924 self.dirstate.setbranch(encoding.tolocal(branch))
922 except IOError:
925 except IOError:
923 ui.warn(_('named branch could not be reset: '
926 ui.warn(_('named branch could not be reset: '
924 'current branch is still \'%s\'\n')
927 'current branch is still \'%s\'\n')
925 % self.dirstate.branch())
928 % self.dirstate.branch())
926
929
927 self.dirstate.invalidate()
930 self.dirstate.invalidate()
928 parents = tuple([p.rev() for p in self.parents()])
931 parents = tuple([p.rev() for p in self.parents()])
929 if len(parents) > 1:
932 if len(parents) > 1:
930 ui.status(_('working directory now based on '
933 ui.status(_('working directory now based on '
931 'revisions %d and %d\n') % parents)
934 'revisions %d and %d\n') % parents)
932 else:
935 else:
933 ui.status(_('working directory now based on '
936 ui.status(_('working directory now based on '
934 'revision %d\n') % parents)
937 'revision %d\n') % parents)
935 # TODO: if we know which new heads may result from this rollback, pass
938 # TODO: if we know which new heads may result from this rollback, pass
936 # them to destroy(), which will prevent the branchhead cache from being
939 # them to destroy(), which will prevent the branchhead cache from being
937 # invalidated.
940 # invalidated.
938 self.destroyed()
941 self.destroyed()
939 return 0
942 return 0
940
943
941 def invalidatecaches(self):
944 def invalidatecaches(self):
942
945
943 if '_tagscache' in vars(self):
946 if '_tagscache' in vars(self):
944 # can't use delattr on proxy
947 # can't use delattr on proxy
945 del self.__dict__['_tagscache']
948 del self.__dict__['_tagscache']
946
949
947 self.unfiltered()._branchcaches.clear()
950 self.unfiltered()._branchcaches.clear()
948 self.invalidatevolatilesets()
951 self.invalidatevolatilesets()
949
952
950 def invalidatevolatilesets(self):
953 def invalidatevolatilesets(self):
951 self.filteredrevcache.clear()
954 self.filteredrevcache.clear()
952 obsolete.clearobscaches(self)
955 obsolete.clearobscaches(self)
953
956
954 def invalidatedirstate(self):
957 def invalidatedirstate(self):
955 '''Invalidates the dirstate, causing the next call to dirstate
958 '''Invalidates the dirstate, causing the next call to dirstate
956 to check if it was modified since the last time it was read,
959 to check if it was modified since the last time it was read,
957 rereading it if it has.
960 rereading it if it has.
958
961
959 This is different to dirstate.invalidate() that it doesn't always
962 This is different to dirstate.invalidate() that it doesn't always
960 rereads the dirstate. Use dirstate.invalidate() if you want to
963 rereads the dirstate. Use dirstate.invalidate() if you want to
961 explicitly read the dirstate again (i.e. restoring it to a previous
964 explicitly read the dirstate again (i.e. restoring it to a previous
962 known good state).'''
965 known good state).'''
963 if hasunfilteredcache(self, 'dirstate'):
966 if hasunfilteredcache(self, 'dirstate'):
964 for k in self.dirstate._filecache:
967 for k in self.dirstate._filecache:
965 try:
968 try:
966 delattr(self.dirstate, k)
969 delattr(self.dirstate, k)
967 except AttributeError:
970 except AttributeError:
968 pass
971 pass
969 delattr(self.unfiltered(), 'dirstate')
972 delattr(self.unfiltered(), 'dirstate')
970
973
971 def invalidate(self):
974 def invalidate(self):
972 unfiltered = self.unfiltered() # all file caches are stored unfiltered
975 unfiltered = self.unfiltered() # all file caches are stored unfiltered
973 for k in self._filecache:
976 for k in self._filecache:
974 # dirstate is invalidated separately in invalidatedirstate()
977 # dirstate is invalidated separately in invalidatedirstate()
975 if k == 'dirstate':
978 if k == 'dirstate':
976 continue
979 continue
977
980
978 try:
981 try:
979 delattr(unfiltered, k)
982 delattr(unfiltered, k)
980 except AttributeError:
983 except AttributeError:
981 pass
984 pass
982 self.invalidatecaches()
985 self.invalidatecaches()
983
986
984 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
987 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
985 try:
988 try:
986 l = lock.lock(lockname, 0, releasefn, desc=desc)
989 l = lock.lock(lockname, 0, releasefn, desc=desc)
987 except error.LockHeld, inst:
990 except error.LockHeld, inst:
988 if not wait:
991 if not wait:
989 raise
992 raise
990 self.ui.warn(_("waiting for lock on %s held by %r\n") %
993 self.ui.warn(_("waiting for lock on %s held by %r\n") %
991 (desc, inst.locker))
994 (desc, inst.locker))
992 # default to 600 seconds timeout
995 # default to 600 seconds timeout
993 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
996 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
994 releasefn, desc=desc)
997 releasefn, desc=desc)
995 if acquirefn:
998 if acquirefn:
996 acquirefn()
999 acquirefn()
997 return l
1000 return l
998
1001
999 def _afterlock(self, callback):
1002 def _afterlock(self, callback):
1000 """add a callback to the current repository lock.
1003 """add a callback to the current repository lock.
1001
1004
1002 The callback will be executed on lock release."""
1005 The callback will be executed on lock release."""
1003 l = self._lockref and self._lockref()
1006 l = self._lockref and self._lockref()
1004 if l:
1007 if l:
1005 l.postrelease.append(callback)
1008 l.postrelease.append(callback)
1006 else:
1009 else:
1007 callback()
1010 callback()
1008
1011
1009 def lock(self, wait=True):
1012 def lock(self, wait=True):
1010 '''Lock the repository store (.hg/store) and return a weak reference
1013 '''Lock the repository store (.hg/store) and return a weak reference
1011 to the lock. Use this before modifying the store (e.g. committing or
1014 to the lock. Use this before modifying the store (e.g. committing or
1012 stripping). If you are opening a transaction, get a lock as well.)'''
1015 stripping). If you are opening a transaction, get a lock as well.)'''
1013 l = self._lockref and self._lockref()
1016 l = self._lockref and self._lockref()
1014 if l is not None and l.held:
1017 if l is not None and l.held:
1015 l.lock()
1018 l.lock()
1016 return l
1019 return l
1017
1020
1018 def unlock():
1021 def unlock():
1019 self.store.write()
1022 self.store.write()
1020 if hasunfilteredcache(self, '_phasecache'):
1023 if hasunfilteredcache(self, '_phasecache'):
1021 self._phasecache.write()
1024 self._phasecache.write()
1022 for k, ce in self._filecache.items():
1025 for k, ce in self._filecache.items():
1023 if k == 'dirstate' or k not in self.__dict__:
1026 if k == 'dirstate' or k not in self.__dict__:
1024 continue
1027 continue
1025 ce.refresh()
1028 ce.refresh()
1026
1029
1027 l = self._lock(self.sjoin("lock"), wait, unlock,
1030 l = self._lock(self.sjoin("lock"), wait, unlock,
1028 self.invalidate, _('repository %s') % self.origroot)
1031 self.invalidate, _('repository %s') % self.origroot)
1029 self._lockref = weakref.ref(l)
1032 self._lockref = weakref.ref(l)
1030 return l
1033 return l
1031
1034
1032 def wlock(self, wait=True):
1035 def wlock(self, wait=True):
1033 '''Lock the non-store parts of the repository (everything under
1036 '''Lock the non-store parts of the repository (everything under
1034 .hg except .hg/store) and return a weak reference to the lock.
1037 .hg except .hg/store) and return a weak reference to the lock.
1035 Use this before modifying files in .hg.'''
1038 Use this before modifying files in .hg.'''
1036 l = self._wlockref and self._wlockref()
1039 l = self._wlockref and self._wlockref()
1037 if l is not None and l.held:
1040 if l is not None and l.held:
1038 l.lock()
1041 l.lock()
1039 return l
1042 return l
1040
1043
1041 def unlock():
1044 def unlock():
1042 self.dirstate.write()
1045 self.dirstate.write()
1043 self._filecache['dirstate'].refresh()
1046 self._filecache['dirstate'].refresh()
1044
1047
1045 l = self._lock(self.join("wlock"), wait, unlock,
1048 l = self._lock(self.join("wlock"), wait, unlock,
1046 self.invalidatedirstate, _('working directory of %s') %
1049 self.invalidatedirstate, _('working directory of %s') %
1047 self.origroot)
1050 self.origroot)
1048 self._wlockref = weakref.ref(l)
1051 self._wlockref = weakref.ref(l)
1049 return l
1052 return l
1050
1053
1051 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1054 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1052 """
1055 """
1053 commit an individual file as part of a larger transaction
1056 commit an individual file as part of a larger transaction
1054 """
1057 """
1055
1058
1056 fname = fctx.path()
1059 fname = fctx.path()
1057 text = fctx.data()
1060 text = fctx.data()
1058 flog = self.file(fname)
1061 flog = self.file(fname)
1059 fparent1 = manifest1.get(fname, nullid)
1062 fparent1 = manifest1.get(fname, nullid)
1060 fparent2 = fparent2o = manifest2.get(fname, nullid)
1063 fparent2 = fparent2o = manifest2.get(fname, nullid)
1061
1064
1062 meta = {}
1065 meta = {}
1063 copy = fctx.renamed()
1066 copy = fctx.renamed()
1064 if copy and copy[0] != fname:
1067 if copy and copy[0] != fname:
1065 # Mark the new revision of this file as a copy of another
1068 # Mark the new revision of this file as a copy of another
1066 # file. This copy data will effectively act as a parent
1069 # file. This copy data will effectively act as a parent
1067 # of this new revision. If this is a merge, the first
1070 # of this new revision. If this is a merge, the first
1068 # parent will be the nullid (meaning "look up the copy data")
1071 # parent will be the nullid (meaning "look up the copy data")
1069 # and the second one will be the other parent. For example:
1072 # and the second one will be the other parent. For example:
1070 #
1073 #
1071 # 0 --- 1 --- 3 rev1 changes file foo
1074 # 0 --- 1 --- 3 rev1 changes file foo
1072 # \ / rev2 renames foo to bar and changes it
1075 # \ / rev2 renames foo to bar and changes it
1073 # \- 2 -/ rev3 should have bar with all changes and
1076 # \- 2 -/ rev3 should have bar with all changes and
1074 # should record that bar descends from
1077 # should record that bar descends from
1075 # bar in rev2 and foo in rev1
1078 # bar in rev2 and foo in rev1
1076 #
1079 #
1077 # this allows this merge to succeed:
1080 # this allows this merge to succeed:
1078 #
1081 #
1079 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1082 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1080 # \ / merging rev3 and rev4 should use bar@rev2
1083 # \ / merging rev3 and rev4 should use bar@rev2
1081 # \- 2 --- 4 as the merge base
1084 # \- 2 --- 4 as the merge base
1082 #
1085 #
1083
1086
1084 cfname = copy[0]
1087 cfname = copy[0]
1085 crev = manifest1.get(cfname)
1088 crev = manifest1.get(cfname)
1086 newfparent = fparent2
1089 newfparent = fparent2
1087
1090
1088 if manifest2: # branch merge
1091 if manifest2: # branch merge
1089 if fparent2 == nullid or crev is None: # copied on remote side
1092 if fparent2 == nullid or crev is None: # copied on remote side
1090 if cfname in manifest2:
1093 if cfname in manifest2:
1091 crev = manifest2[cfname]
1094 crev = manifest2[cfname]
1092 newfparent = fparent1
1095 newfparent = fparent1
1093
1096
1094 # find source in nearest ancestor if we've lost track
1097 # find source in nearest ancestor if we've lost track
1095 if not crev:
1098 if not crev:
1096 self.ui.debug(" %s: searching for copy revision for %s\n" %
1099 self.ui.debug(" %s: searching for copy revision for %s\n" %
1097 (fname, cfname))
1100 (fname, cfname))
1098 for ancestor in self[None].ancestors():
1101 for ancestor in self[None].ancestors():
1099 if cfname in ancestor:
1102 if cfname in ancestor:
1100 crev = ancestor[cfname].filenode()
1103 crev = ancestor[cfname].filenode()
1101 break
1104 break
1102
1105
1103 if crev:
1106 if crev:
1104 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1107 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1105 meta["copy"] = cfname
1108 meta["copy"] = cfname
1106 meta["copyrev"] = hex(crev)
1109 meta["copyrev"] = hex(crev)
1107 fparent1, fparent2 = nullid, newfparent
1110 fparent1, fparent2 = nullid, newfparent
1108 else:
1111 else:
1109 self.ui.warn(_("warning: can't find ancestor for '%s' "
1112 self.ui.warn(_("warning: can't find ancestor for '%s' "
1110 "copied from '%s'!\n") % (fname, cfname))
1113 "copied from '%s'!\n") % (fname, cfname))
1111
1114
1112 elif fparent2 != nullid:
1115 elif fparent2 != nullid:
1113 # is one parent an ancestor of the other?
1116 # is one parent an ancestor of the other?
1114 fparentancestor = flog.ancestor(fparent1, fparent2)
1117 fparentancestor = flog.ancestor(fparent1, fparent2)
1115 if fparentancestor == fparent1:
1118 if fparentancestor == fparent1:
1116 fparent1, fparent2 = fparent2, nullid
1119 fparent1, fparent2 = fparent2, nullid
1117 elif fparentancestor == fparent2:
1120 elif fparentancestor == fparent2:
1118 fparent2 = nullid
1121 fparent2 = nullid
1119
1122
1120 # is the file changed?
1123 # is the file changed?
1121 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1124 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1122 changelist.append(fname)
1125 changelist.append(fname)
1123 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1126 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1124
1127
1125 # are just the flags changed during merge?
1128 # are just the flags changed during merge?
1126 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1129 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1127 changelist.append(fname)
1130 changelist.append(fname)
1128
1131
1129 return fparent1
1132 return fparent1
1130
1133
1131 @unfilteredmethod
1134 @unfilteredmethod
1132 def commit(self, text="", user=None, date=None, match=None, force=False,
1135 def commit(self, text="", user=None, date=None, match=None, force=False,
1133 editor=False, extra={}):
1136 editor=False, extra={}):
1134 """Add a new revision to current repository.
1137 """Add a new revision to current repository.
1135
1138
1136 Revision information is gathered from the working directory,
1139 Revision information is gathered from the working directory,
1137 match can be used to filter the committed files. If editor is
1140 match can be used to filter the committed files. If editor is
1138 supplied, it is called to get a commit message.
1141 supplied, it is called to get a commit message.
1139 """
1142 """
1140
1143
1141 def fail(f, msg):
1144 def fail(f, msg):
1142 raise util.Abort('%s: %s' % (f, msg))
1145 raise util.Abort('%s: %s' % (f, msg))
1143
1146
1144 if not match:
1147 if not match:
1145 match = matchmod.always(self.root, '')
1148 match = matchmod.always(self.root, '')
1146
1149
1147 if not force:
1150 if not force:
1148 vdirs = []
1151 vdirs = []
1149 match.explicitdir = vdirs.append
1152 match.explicitdir = vdirs.append
1150 match.bad = fail
1153 match.bad = fail
1151
1154
1152 wlock = self.wlock()
1155 wlock = self.wlock()
1153 try:
1156 try:
1154 wctx = self[None]
1157 wctx = self[None]
1155 merge = len(wctx.parents()) > 1
1158 merge = len(wctx.parents()) > 1
1156
1159
1157 if (not force and merge and match and
1160 if (not force and merge and match and
1158 (match.files() or match.anypats())):
1161 (match.files() or match.anypats())):
1159 raise util.Abort(_('cannot partially commit a merge '
1162 raise util.Abort(_('cannot partially commit a merge '
1160 '(do not specify files or patterns)'))
1163 '(do not specify files or patterns)'))
1161
1164
1162 changes = self.status(match=match, clean=force)
1165 changes = self.status(match=match, clean=force)
1163 if force:
1166 if force:
1164 changes[0].extend(changes[6]) # mq may commit unchanged files
1167 changes[0].extend(changes[6]) # mq may commit unchanged files
1165
1168
1166 # check subrepos
1169 # check subrepos
1167 subs = []
1170 subs = []
1168 commitsubs = set()
1171 commitsubs = set()
1169 newstate = wctx.substate.copy()
1172 newstate = wctx.substate.copy()
1170 # only manage subrepos and .hgsubstate if .hgsub is present
1173 # only manage subrepos and .hgsubstate if .hgsub is present
1171 if '.hgsub' in wctx:
1174 if '.hgsub' in wctx:
1172 # we'll decide whether to track this ourselves, thanks
1175 # we'll decide whether to track this ourselves, thanks
1173 if '.hgsubstate' in changes[0]:
1176 if '.hgsubstate' in changes[0]:
1174 changes[0].remove('.hgsubstate')
1177 changes[0].remove('.hgsubstate')
1175 if '.hgsubstate' in changes[2]:
1178 if '.hgsubstate' in changes[2]:
1176 changes[2].remove('.hgsubstate')
1179 changes[2].remove('.hgsubstate')
1177
1180
1178 # compare current state to last committed state
1181 # compare current state to last committed state
1179 # build new substate based on last committed state
1182 # build new substate based on last committed state
1180 oldstate = wctx.p1().substate
1183 oldstate = wctx.p1().substate
1181 for s in sorted(newstate.keys()):
1184 for s in sorted(newstate.keys()):
1182 if not match(s):
1185 if not match(s):
1183 # ignore working copy, use old state if present
1186 # ignore working copy, use old state if present
1184 if s in oldstate:
1187 if s in oldstate:
1185 newstate[s] = oldstate[s]
1188 newstate[s] = oldstate[s]
1186 continue
1189 continue
1187 if not force:
1190 if not force:
1188 raise util.Abort(
1191 raise util.Abort(
1189 _("commit with new subrepo %s excluded") % s)
1192 _("commit with new subrepo %s excluded") % s)
1190 if wctx.sub(s).dirty(True):
1193 if wctx.sub(s).dirty(True):
1191 if not self.ui.configbool('ui', 'commitsubrepos'):
1194 if not self.ui.configbool('ui', 'commitsubrepos'):
1192 raise util.Abort(
1195 raise util.Abort(
1193 _("uncommitted changes in subrepo %s") % s,
1196 _("uncommitted changes in subrepo %s") % s,
1194 hint=_("use --subrepos for recursive commit"))
1197 hint=_("use --subrepos for recursive commit"))
1195 subs.append(s)
1198 subs.append(s)
1196 commitsubs.add(s)
1199 commitsubs.add(s)
1197 else:
1200 else:
1198 bs = wctx.sub(s).basestate()
1201 bs = wctx.sub(s).basestate()
1199 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1202 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1200 if oldstate.get(s, (None, None, None))[1] != bs:
1203 if oldstate.get(s, (None, None, None))[1] != bs:
1201 subs.append(s)
1204 subs.append(s)
1202
1205
1203 # check for removed subrepos
1206 # check for removed subrepos
1204 for p in wctx.parents():
1207 for p in wctx.parents():
1205 r = [s for s in p.substate if s not in newstate]
1208 r = [s for s in p.substate if s not in newstate]
1206 subs += [s for s in r if match(s)]
1209 subs += [s for s in r if match(s)]
1207 if subs:
1210 if subs:
1208 if (not match('.hgsub') and
1211 if (not match('.hgsub') and
1209 '.hgsub' in (wctx.modified() + wctx.added())):
1212 '.hgsub' in (wctx.modified() + wctx.added())):
1210 raise util.Abort(
1213 raise util.Abort(
1211 _("can't commit subrepos without .hgsub"))
1214 _("can't commit subrepos without .hgsub"))
1212 changes[0].insert(0, '.hgsubstate')
1215 changes[0].insert(0, '.hgsubstate')
1213
1216
1214 elif '.hgsub' in changes[2]:
1217 elif '.hgsub' in changes[2]:
1215 # clean up .hgsubstate when .hgsub is removed
1218 # clean up .hgsubstate when .hgsub is removed
1216 if ('.hgsubstate' in wctx and
1219 if ('.hgsubstate' in wctx and
1217 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1220 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1218 changes[2].insert(0, '.hgsubstate')
1221 changes[2].insert(0, '.hgsubstate')
1219
1222
1220 # make sure all explicit patterns are matched
1223 # make sure all explicit patterns are matched
1221 if not force and match.files():
1224 if not force and match.files():
1222 matched = set(changes[0] + changes[1] + changes[2])
1225 matched = set(changes[0] + changes[1] + changes[2])
1223
1226
1224 for f in match.files():
1227 for f in match.files():
1225 f = self.dirstate.normalize(f)
1228 f = self.dirstate.normalize(f)
1226 if f == '.' or f in matched or f in wctx.substate:
1229 if f == '.' or f in matched or f in wctx.substate:
1227 continue
1230 continue
1228 if f in changes[3]: # missing
1231 if f in changes[3]: # missing
1229 fail(f, _('file not found!'))
1232 fail(f, _('file not found!'))
1230 if f in vdirs: # visited directory
1233 if f in vdirs: # visited directory
1231 d = f + '/'
1234 d = f + '/'
1232 for mf in matched:
1235 for mf in matched:
1233 if mf.startswith(d):
1236 if mf.startswith(d):
1234 break
1237 break
1235 else:
1238 else:
1236 fail(f, _("no match under directory!"))
1239 fail(f, _("no match under directory!"))
1237 elif f not in self.dirstate:
1240 elif f not in self.dirstate:
1238 fail(f, _("file not tracked!"))
1241 fail(f, _("file not tracked!"))
1239
1242
1240 cctx = context.workingctx(self, text, user, date, extra, changes)
1243 cctx = context.workingctx(self, text, user, date, extra, changes)
1241
1244
1242 if (not force and not extra.get("close") and not merge
1245 if (not force and not extra.get("close") and not merge
1243 and not cctx.files()
1246 and not cctx.files()
1244 and wctx.branch() == wctx.p1().branch()):
1247 and wctx.branch() == wctx.p1().branch()):
1245 return None
1248 return None
1246
1249
1247 if merge and cctx.deleted():
1250 if merge and cctx.deleted():
1248 raise util.Abort(_("cannot commit merge with missing files"))
1251 raise util.Abort(_("cannot commit merge with missing files"))
1249
1252
1250 ms = mergemod.mergestate(self)
1253 ms = mergemod.mergestate(self)
1251 for f in changes[0]:
1254 for f in changes[0]:
1252 if f in ms and ms[f] == 'u':
1255 if f in ms and ms[f] == 'u':
1253 raise util.Abort(_("unresolved merge conflicts "
1256 raise util.Abort(_("unresolved merge conflicts "
1254 "(see hg help resolve)"))
1257 "(see hg help resolve)"))
1255
1258
1256 if editor:
1259 if editor:
1257 cctx._text = editor(self, cctx, subs)
1260 cctx._text = editor(self, cctx, subs)
1258 edited = (text != cctx._text)
1261 edited = (text != cctx._text)
1259
1262
1260 # commit subs and write new state
1263 # commit subs and write new state
1261 if subs:
1264 if subs:
1262 for s in sorted(commitsubs):
1265 for s in sorted(commitsubs):
1263 sub = wctx.sub(s)
1266 sub = wctx.sub(s)
1264 self.ui.status(_('committing subrepository %s\n') %
1267 self.ui.status(_('committing subrepository %s\n') %
1265 subrepo.subrelpath(sub))
1268 subrepo.subrelpath(sub))
1266 sr = sub.commit(cctx._text, user, date)
1269 sr = sub.commit(cctx._text, user, date)
1267 newstate[s] = (newstate[s][0], sr)
1270 newstate[s] = (newstate[s][0], sr)
1268 subrepo.writestate(self, newstate)
1271 subrepo.writestate(self, newstate)
1269
1272
1270 # Save commit message in case this transaction gets rolled back
1273 # Save commit message in case this transaction gets rolled back
1271 # (e.g. by a pretxncommit hook). Leave the content alone on
1274 # (e.g. by a pretxncommit hook). Leave the content alone on
1272 # the assumption that the user will use the same editor again.
1275 # the assumption that the user will use the same editor again.
1273 msgfn = self.savecommitmessage(cctx._text)
1276 msgfn = self.savecommitmessage(cctx._text)
1274
1277
1275 p1, p2 = self.dirstate.parents()
1278 p1, p2 = self.dirstate.parents()
1276 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1279 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1277 try:
1280 try:
1278 self.hook("precommit", throw=True, parent1=hookp1,
1281 self.hook("precommit", throw=True, parent1=hookp1,
1279 parent2=hookp2)
1282 parent2=hookp2)
1280 ret = self.commitctx(cctx, True)
1283 ret = self.commitctx(cctx, True)
1281 except: # re-raises
1284 except: # re-raises
1282 if edited:
1285 if edited:
1283 self.ui.write(
1286 self.ui.write(
1284 _('note: commit message saved in %s\n') % msgfn)
1287 _('note: commit message saved in %s\n') % msgfn)
1285 raise
1288 raise
1286
1289
1287 # update bookmarks, dirstate and mergestate
1290 # update bookmarks, dirstate and mergestate
1288 bookmarks.update(self, [p1, p2], ret)
1291 bookmarks.update(self, [p1, p2], ret)
1289 cctx.markcommitted(ret)
1292 cctx.markcommitted(ret)
1290 ms.reset()
1293 ms.reset()
1291 finally:
1294 finally:
1292 wlock.release()
1295 wlock.release()
1293
1296
1294 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1297 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1295 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1298 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1296 self._afterlock(commithook)
1299 self._afterlock(commithook)
1297 return ret
1300 return ret
1298
1301
1299 @unfilteredmethod
1302 @unfilteredmethod
1300 def commitctx(self, ctx, error=False):
1303 def commitctx(self, ctx, error=False):
1301 """Add a new revision to current repository.
1304 """Add a new revision to current repository.
1302 Revision information is passed via the context argument.
1305 Revision information is passed via the context argument.
1303 """
1306 """
1304
1307
1305 tr = lock = None
1308 tr = lock = None
1306 removed = list(ctx.removed())
1309 removed = list(ctx.removed())
1307 p1, p2 = ctx.p1(), ctx.p2()
1310 p1, p2 = ctx.p1(), ctx.p2()
1308 user = ctx.user()
1311 user = ctx.user()
1309
1312
1310 lock = self.lock()
1313 lock = self.lock()
1311 try:
1314 try:
1312 tr = self.transaction("commit")
1315 tr = self.transaction("commit")
1313 trp = weakref.proxy(tr)
1316 trp = weakref.proxy(tr)
1314
1317
1315 if ctx.files():
1318 if ctx.files():
1316 m1 = p1.manifest().copy()
1319 m1 = p1.manifest().copy()
1317 m2 = p2.manifest()
1320 m2 = p2.manifest()
1318
1321
1319 # check in files
1322 # check in files
1320 new = {}
1323 new = {}
1321 changed = []
1324 changed = []
1322 linkrev = len(self)
1325 linkrev = len(self)
1323 for f in sorted(ctx.modified() + ctx.added()):
1326 for f in sorted(ctx.modified() + ctx.added()):
1324 self.ui.note(f + "\n")
1327 self.ui.note(f + "\n")
1325 try:
1328 try:
1326 fctx = ctx[f]
1329 fctx = ctx[f]
1327 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1330 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1328 changed)
1331 changed)
1329 m1.set(f, fctx.flags())
1332 m1.set(f, fctx.flags())
1330 except OSError, inst:
1333 except OSError, inst:
1331 self.ui.warn(_("trouble committing %s!\n") % f)
1334 self.ui.warn(_("trouble committing %s!\n") % f)
1332 raise
1335 raise
1333 except IOError, inst:
1336 except IOError, inst:
1334 errcode = getattr(inst, 'errno', errno.ENOENT)
1337 errcode = getattr(inst, 'errno', errno.ENOENT)
1335 if error or errcode and errcode != errno.ENOENT:
1338 if error or errcode and errcode != errno.ENOENT:
1336 self.ui.warn(_("trouble committing %s!\n") % f)
1339 self.ui.warn(_("trouble committing %s!\n") % f)
1337 raise
1340 raise
1338 else:
1341 else:
1339 removed.append(f)
1342 removed.append(f)
1340
1343
1341 # update manifest
1344 # update manifest
1342 m1.update(new)
1345 m1.update(new)
1343 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1346 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1344 drop = [f for f in removed if f in m1]
1347 drop = [f for f in removed if f in m1]
1345 for f in drop:
1348 for f in drop:
1346 del m1[f]
1349 del m1[f]
1347 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1350 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1348 p2.manifestnode(), (new, drop))
1351 p2.manifestnode(), (new, drop))
1349 files = changed + removed
1352 files = changed + removed
1350 else:
1353 else:
1351 mn = p1.manifestnode()
1354 mn = p1.manifestnode()
1352 files = []
1355 files = []
1353
1356
1354 # update changelog
1357 # update changelog
1355 self.changelog.delayupdate()
1358 self.changelog.delayupdate()
1356 n = self.changelog.add(mn, files, ctx.description(),
1359 n = self.changelog.add(mn, files, ctx.description(),
1357 trp, p1.node(), p2.node(),
1360 trp, p1.node(), p2.node(),
1358 user, ctx.date(), ctx.extra().copy())
1361 user, ctx.date(), ctx.extra().copy())
1359 p = lambda: self.changelog.writepending() and self.root or ""
1362 p = lambda: self.changelog.writepending() and self.root or ""
1360 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1363 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1361 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1364 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1362 parent2=xp2, pending=p)
1365 parent2=xp2, pending=p)
1363 self.changelog.finalize(trp)
1366 self.changelog.finalize(trp)
1364 # set the new commit is proper phase
1367 # set the new commit is proper phase
1365 targetphase = phases.newcommitphase(self.ui)
1368 targetphase = phases.newcommitphase(self.ui)
1366 if targetphase:
1369 if targetphase:
1367 # retract boundary do not alter parent changeset.
1370 # retract boundary do not alter parent changeset.
1368 # if a parent have higher the resulting phase will
1371 # if a parent have higher the resulting phase will
1369 # be compliant anyway
1372 # be compliant anyway
1370 #
1373 #
1371 # if minimal phase was 0 we don't need to retract anything
1374 # if minimal phase was 0 we don't need to retract anything
1372 phases.retractboundary(self, targetphase, [n])
1375 phases.retractboundary(self, targetphase, [n])
1373 tr.close()
1376 tr.close()
1374 branchmap.updatecache(self.filtered('served'))
1377 branchmap.updatecache(self.filtered('served'))
1375 return n
1378 return n
1376 finally:
1379 finally:
1377 if tr:
1380 if tr:
1378 tr.release()
1381 tr.release()
1379 lock.release()
1382 lock.release()
1380
1383
1381 @unfilteredmethod
1384 @unfilteredmethod
1382 def destroying(self):
1385 def destroying(self):
1383 '''Inform the repository that nodes are about to be destroyed.
1386 '''Inform the repository that nodes are about to be destroyed.
1384 Intended for use by strip and rollback, so there's a common
1387 Intended for use by strip and rollback, so there's a common
1385 place for anything that has to be done before destroying history.
1388 place for anything that has to be done before destroying history.
1386
1389
1387 This is mostly useful for saving state that is in memory and waiting
1390 This is mostly useful for saving state that is in memory and waiting
1388 to be flushed when the current lock is released. Because a call to
1391 to be flushed when the current lock is released. Because a call to
1389 destroyed is imminent, the repo will be invalidated causing those
1392 destroyed is imminent, the repo will be invalidated causing those
1390 changes to stay in memory (waiting for the next unlock), or vanish
1393 changes to stay in memory (waiting for the next unlock), or vanish
1391 completely.
1394 completely.
1392 '''
1395 '''
1393 # When using the same lock to commit and strip, the phasecache is left
1396 # When using the same lock to commit and strip, the phasecache is left
1394 # dirty after committing. Then when we strip, the repo is invalidated,
1397 # dirty after committing. Then when we strip, the repo is invalidated,
1395 # causing those changes to disappear.
1398 # causing those changes to disappear.
1396 if '_phasecache' in vars(self):
1399 if '_phasecache' in vars(self):
1397 self._phasecache.write()
1400 self._phasecache.write()
1398
1401
1399 @unfilteredmethod
1402 @unfilteredmethod
1400 def destroyed(self):
1403 def destroyed(self):
1401 '''Inform the repository that nodes have been destroyed.
1404 '''Inform the repository that nodes have been destroyed.
1402 Intended for use by strip and rollback, so there's a common
1405 Intended for use by strip and rollback, so there's a common
1403 place for anything that has to be done after destroying history.
1406 place for anything that has to be done after destroying history.
1404 '''
1407 '''
1405 # When one tries to:
1408 # When one tries to:
1406 # 1) destroy nodes thus calling this method (e.g. strip)
1409 # 1) destroy nodes thus calling this method (e.g. strip)
1407 # 2) use phasecache somewhere (e.g. commit)
1410 # 2) use phasecache somewhere (e.g. commit)
1408 #
1411 #
1409 # then 2) will fail because the phasecache contains nodes that were
1412 # then 2) will fail because the phasecache contains nodes that were
1410 # removed. We can either remove phasecache from the filecache,
1413 # removed. We can either remove phasecache from the filecache,
1411 # causing it to reload next time it is accessed, or simply filter
1414 # causing it to reload next time it is accessed, or simply filter
1412 # the removed nodes now and write the updated cache.
1415 # the removed nodes now and write the updated cache.
1413 self._phasecache.filterunknown(self)
1416 self._phasecache.filterunknown(self)
1414 self._phasecache.write()
1417 self._phasecache.write()
1415
1418
1416 # update the 'served' branch cache to help read only server process
1419 # update the 'served' branch cache to help read only server process
1417 # Thanks to branchcache collaboration this is done from the nearest
1420 # Thanks to branchcache collaboration this is done from the nearest
1418 # filtered subset and it is expected to be fast.
1421 # filtered subset and it is expected to be fast.
1419 branchmap.updatecache(self.filtered('served'))
1422 branchmap.updatecache(self.filtered('served'))
1420
1423
1421 # Ensure the persistent tag cache is updated. Doing it now
1424 # Ensure the persistent tag cache is updated. Doing it now
1422 # means that the tag cache only has to worry about destroyed
1425 # means that the tag cache only has to worry about destroyed
1423 # heads immediately after a strip/rollback. That in turn
1426 # heads immediately after a strip/rollback. That in turn
1424 # guarantees that "cachetip == currenttip" (comparing both rev
1427 # guarantees that "cachetip == currenttip" (comparing both rev
1425 # and node) always means no nodes have been added or destroyed.
1428 # and node) always means no nodes have been added or destroyed.
1426
1429
1427 # XXX this is suboptimal when qrefresh'ing: we strip the current
1430 # XXX this is suboptimal when qrefresh'ing: we strip the current
1428 # head, refresh the tag cache, then immediately add a new head.
1431 # head, refresh the tag cache, then immediately add a new head.
1429 # But I think doing it this way is necessary for the "instant
1432 # But I think doing it this way is necessary for the "instant
1430 # tag cache retrieval" case to work.
1433 # tag cache retrieval" case to work.
1431 self.invalidate()
1434 self.invalidate()
1432
1435
1433 def walk(self, match, node=None):
1436 def walk(self, match, node=None):
1434 '''
1437 '''
1435 walk recursively through the directory tree or a given
1438 walk recursively through the directory tree or a given
1436 changeset, finding all files matched by the match
1439 changeset, finding all files matched by the match
1437 function
1440 function
1438 '''
1441 '''
1439 return self[node].walk(match)
1442 return self[node].walk(match)
1440
1443
1441 def status(self, node1='.', node2=None, match=None,
1444 def status(self, node1='.', node2=None, match=None,
1442 ignored=False, clean=False, unknown=False,
1445 ignored=False, clean=False, unknown=False,
1443 listsubrepos=False):
1446 listsubrepos=False):
1444 """return status of files between two nodes or node and working
1447 """return status of files between two nodes or node and working
1445 directory.
1448 directory.
1446
1449
1447 If node1 is None, use the first dirstate parent instead.
1450 If node1 is None, use the first dirstate parent instead.
1448 If node2 is None, compare node1 with working directory.
1451 If node2 is None, compare node1 with working directory.
1449 """
1452 """
1450
1453
1451 def mfmatches(ctx):
1454 def mfmatches(ctx):
1452 mf = ctx.manifest().copy()
1455 mf = ctx.manifest().copy()
1453 if match.always():
1456 if match.always():
1454 return mf
1457 return mf
1455 for fn in mf.keys():
1458 for fn in mf.keys():
1456 if not match(fn):
1459 if not match(fn):
1457 del mf[fn]
1460 del mf[fn]
1458 return mf
1461 return mf
1459
1462
1460 if isinstance(node1, context.changectx):
1463 if isinstance(node1, context.changectx):
1461 ctx1 = node1
1464 ctx1 = node1
1462 else:
1465 else:
1463 ctx1 = self[node1]
1466 ctx1 = self[node1]
1464 if isinstance(node2, context.changectx):
1467 if isinstance(node2, context.changectx):
1465 ctx2 = node2
1468 ctx2 = node2
1466 else:
1469 else:
1467 ctx2 = self[node2]
1470 ctx2 = self[node2]
1468
1471
1469 working = ctx2.rev() is None
1472 working = ctx2.rev() is None
1470 parentworking = working and ctx1 == self['.']
1473 parentworking = working and ctx1 == self['.']
1471 match = match or matchmod.always(self.root, self.getcwd())
1474 match = match or matchmod.always(self.root, self.getcwd())
1472 listignored, listclean, listunknown = ignored, clean, unknown
1475 listignored, listclean, listunknown = ignored, clean, unknown
1473
1476
1474 # load earliest manifest first for caching reasons
1477 # load earliest manifest first for caching reasons
1475 if not working and ctx2.rev() < ctx1.rev():
1478 if not working and ctx2.rev() < ctx1.rev():
1476 ctx2.manifest()
1479 ctx2.manifest()
1477
1480
1478 if not parentworking:
1481 if not parentworking:
1479 def bad(f, msg):
1482 def bad(f, msg):
1480 # 'f' may be a directory pattern from 'match.files()',
1483 # 'f' may be a directory pattern from 'match.files()',
1481 # so 'f not in ctx1' is not enough
1484 # so 'f not in ctx1' is not enough
1482 if f not in ctx1 and f not in ctx1.dirs():
1485 if f not in ctx1 and f not in ctx1.dirs():
1483 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1486 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1484 match.bad = bad
1487 match.bad = bad
1485
1488
1486 if working: # we need to scan the working dir
1489 if working: # we need to scan the working dir
1487 subrepos = []
1490 subrepos = []
1488 if '.hgsub' in self.dirstate:
1491 if '.hgsub' in self.dirstate:
1489 subrepos = sorted(ctx2.substate)
1492 subrepos = sorted(ctx2.substate)
1490 s = self.dirstate.status(match, subrepos, listignored,
1493 s = self.dirstate.status(match, subrepos, listignored,
1491 listclean, listunknown)
1494 listclean, listunknown)
1492 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1495 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1493
1496
1494 # check for any possibly clean files
1497 # check for any possibly clean files
1495 if parentworking and cmp:
1498 if parentworking and cmp:
1496 fixup = []
1499 fixup = []
1497 # do a full compare of any files that might have changed
1500 # do a full compare of any files that might have changed
1498 for f in sorted(cmp):
1501 for f in sorted(cmp):
1499 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1502 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1500 or ctx1[f].cmp(ctx2[f])):
1503 or ctx1[f].cmp(ctx2[f])):
1501 modified.append(f)
1504 modified.append(f)
1502 else:
1505 else:
1503 fixup.append(f)
1506 fixup.append(f)
1504
1507
1505 # update dirstate for files that are actually clean
1508 # update dirstate for files that are actually clean
1506 if fixup:
1509 if fixup:
1507 if listclean:
1510 if listclean:
1508 clean += fixup
1511 clean += fixup
1509
1512
1510 try:
1513 try:
1511 # updating the dirstate is optional
1514 # updating the dirstate is optional
1512 # so we don't wait on the lock
1515 # so we don't wait on the lock
1513 wlock = self.wlock(False)
1516 wlock = self.wlock(False)
1514 try:
1517 try:
1515 for f in fixup:
1518 for f in fixup:
1516 self.dirstate.normal(f)
1519 self.dirstate.normal(f)
1517 finally:
1520 finally:
1518 wlock.release()
1521 wlock.release()
1519 except error.LockError:
1522 except error.LockError:
1520 pass
1523 pass
1521
1524
1522 if not parentworking:
1525 if not parentworking:
1523 mf1 = mfmatches(ctx1)
1526 mf1 = mfmatches(ctx1)
1524 if working:
1527 if working:
1525 # we are comparing working dir against non-parent
1528 # we are comparing working dir against non-parent
1526 # generate a pseudo-manifest for the working dir
1529 # generate a pseudo-manifest for the working dir
1527 mf2 = mfmatches(self['.'])
1530 mf2 = mfmatches(self['.'])
1528 for f in cmp + modified + added:
1531 for f in cmp + modified + added:
1529 mf2[f] = None
1532 mf2[f] = None
1530 mf2.set(f, ctx2.flags(f))
1533 mf2.set(f, ctx2.flags(f))
1531 for f in removed:
1534 for f in removed:
1532 if f in mf2:
1535 if f in mf2:
1533 del mf2[f]
1536 del mf2[f]
1534 else:
1537 else:
1535 # we are comparing two revisions
1538 # we are comparing two revisions
1536 deleted, unknown, ignored = [], [], []
1539 deleted, unknown, ignored = [], [], []
1537 mf2 = mfmatches(ctx2)
1540 mf2 = mfmatches(ctx2)
1538
1541
1539 modified, added, clean = [], [], []
1542 modified, added, clean = [], [], []
1540 withflags = mf1.withflags() | mf2.withflags()
1543 withflags = mf1.withflags() | mf2.withflags()
1541 for fn, mf2node in mf2.iteritems():
1544 for fn, mf2node in mf2.iteritems():
1542 if fn in mf1:
1545 if fn in mf1:
1543 if (fn not in deleted and
1546 if (fn not in deleted and
1544 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1547 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1545 (mf1[fn] != mf2node and
1548 (mf1[fn] != mf2node and
1546 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1549 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1547 modified.append(fn)
1550 modified.append(fn)
1548 elif listclean:
1551 elif listclean:
1549 clean.append(fn)
1552 clean.append(fn)
1550 del mf1[fn]
1553 del mf1[fn]
1551 elif fn not in deleted:
1554 elif fn not in deleted:
1552 added.append(fn)
1555 added.append(fn)
1553 removed = mf1.keys()
1556 removed = mf1.keys()
1554
1557
1555 if working and modified and not self.dirstate._checklink:
1558 if working and modified and not self.dirstate._checklink:
1556 # Symlink placeholders may get non-symlink-like contents
1559 # Symlink placeholders may get non-symlink-like contents
1557 # via user error or dereferencing by NFS or Samba servers,
1560 # via user error or dereferencing by NFS or Samba servers,
1558 # so we filter out any placeholders that don't look like a
1561 # so we filter out any placeholders that don't look like a
1559 # symlink
1562 # symlink
1560 sane = []
1563 sane = []
1561 for f in modified:
1564 for f in modified:
1562 if ctx2.flags(f) == 'l':
1565 if ctx2.flags(f) == 'l':
1563 d = ctx2[f].data()
1566 d = ctx2[f].data()
1564 if len(d) >= 1024 or '\n' in d or util.binary(d):
1567 if len(d) >= 1024 or '\n' in d or util.binary(d):
1565 self.ui.debug('ignoring suspect symlink placeholder'
1568 self.ui.debug('ignoring suspect symlink placeholder'
1566 ' "%s"\n' % f)
1569 ' "%s"\n' % f)
1567 continue
1570 continue
1568 sane.append(f)
1571 sane.append(f)
1569 modified = sane
1572 modified = sane
1570
1573
1571 r = modified, added, removed, deleted, unknown, ignored, clean
1574 r = modified, added, removed, deleted, unknown, ignored, clean
1572
1575
1573 if listsubrepos:
1576 if listsubrepos:
1574 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1577 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1575 if working:
1578 if working:
1576 rev2 = None
1579 rev2 = None
1577 else:
1580 else:
1578 rev2 = ctx2.substate[subpath][1]
1581 rev2 = ctx2.substate[subpath][1]
1579 try:
1582 try:
1580 submatch = matchmod.narrowmatcher(subpath, match)
1583 submatch = matchmod.narrowmatcher(subpath, match)
1581 s = sub.status(rev2, match=submatch, ignored=listignored,
1584 s = sub.status(rev2, match=submatch, ignored=listignored,
1582 clean=listclean, unknown=listunknown,
1585 clean=listclean, unknown=listunknown,
1583 listsubrepos=True)
1586 listsubrepos=True)
1584 for rfiles, sfiles in zip(r, s):
1587 for rfiles, sfiles in zip(r, s):
1585 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1588 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1586 except error.LookupError:
1589 except error.LookupError:
1587 self.ui.status(_("skipping missing subrepository: %s\n")
1590 self.ui.status(_("skipping missing subrepository: %s\n")
1588 % subpath)
1591 % subpath)
1589
1592
1590 for l in r:
1593 for l in r:
1591 l.sort()
1594 l.sort()
1592 return r
1595 return r
1593
1596
1594 def heads(self, start=None):
1597 def heads(self, start=None):
1595 heads = self.changelog.heads(start)
1598 heads = self.changelog.heads(start)
1596 # sort the output in rev descending order
1599 # sort the output in rev descending order
1597 return sorted(heads, key=self.changelog.rev, reverse=True)
1600 return sorted(heads, key=self.changelog.rev, reverse=True)
1598
1601
1599 def branchheads(self, branch=None, start=None, closed=False):
1602 def branchheads(self, branch=None, start=None, closed=False):
1600 '''return a (possibly filtered) list of heads for the given branch
1603 '''return a (possibly filtered) list of heads for the given branch
1601
1604
1602 Heads are returned in topological order, from newest to oldest.
1605 Heads are returned in topological order, from newest to oldest.
1603 If branch is None, use the dirstate branch.
1606 If branch is None, use the dirstate branch.
1604 If start is not None, return only heads reachable from start.
1607 If start is not None, return only heads reachable from start.
1605 If closed is True, return heads that are marked as closed as well.
1608 If closed is True, return heads that are marked as closed as well.
1606 '''
1609 '''
1607 if branch is None:
1610 if branch is None:
1608 branch = self[None].branch()
1611 branch = self[None].branch()
1609 branches = self.branchmap()
1612 branches = self.branchmap()
1610 if branch not in branches:
1613 if branch not in branches:
1611 return []
1614 return []
1612 # the cache returns heads ordered lowest to highest
1615 # the cache returns heads ordered lowest to highest
1613 bheads = list(reversed(branches[branch]))
1616 bheads = list(reversed(branches[branch]))
1614 if start is not None:
1617 if start is not None:
1615 # filter out the heads that cannot be reached from startrev
1618 # filter out the heads that cannot be reached from startrev
1616 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1619 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1617 bheads = [h for h in bheads if h in fbheads]
1620 bheads = [h for h in bheads if h in fbheads]
1618 if not closed:
1621 if not closed:
1619 bheads = [h for h in bheads if not self[h].closesbranch()]
1622 bheads = [h for h in bheads if not self[h].closesbranch()]
1620 return bheads
1623 return bheads
1621
1624
1622 def branches(self, nodes):
1625 def branches(self, nodes):
1623 if not nodes:
1626 if not nodes:
1624 nodes = [self.changelog.tip()]
1627 nodes = [self.changelog.tip()]
1625 b = []
1628 b = []
1626 for n in nodes:
1629 for n in nodes:
1627 t = n
1630 t = n
1628 while True:
1631 while True:
1629 p = self.changelog.parents(n)
1632 p = self.changelog.parents(n)
1630 if p[1] != nullid or p[0] == nullid:
1633 if p[1] != nullid or p[0] == nullid:
1631 b.append((t, n, p[0], p[1]))
1634 b.append((t, n, p[0], p[1]))
1632 break
1635 break
1633 n = p[0]
1636 n = p[0]
1634 return b
1637 return b
1635
1638
1636 def between(self, pairs):
1639 def between(self, pairs):
1637 r = []
1640 r = []
1638
1641
1639 for top, bottom in pairs:
1642 for top, bottom in pairs:
1640 n, l, i = top, [], 0
1643 n, l, i = top, [], 0
1641 f = 1
1644 f = 1
1642
1645
1643 while n != bottom and n != nullid:
1646 while n != bottom and n != nullid:
1644 p = self.changelog.parents(n)[0]
1647 p = self.changelog.parents(n)[0]
1645 if i == f:
1648 if i == f:
1646 l.append(n)
1649 l.append(n)
1647 f = f * 2
1650 f = f * 2
1648 n = p
1651 n = p
1649 i += 1
1652 i += 1
1650
1653
1651 r.append(l)
1654 r.append(l)
1652
1655
1653 return r
1656 return r
1654
1657
1655 def pull(self, remote, heads=None, force=False):
1658 def pull(self, remote, heads=None, force=False):
1656 # don't open transaction for nothing or you break future useful
1659 # don't open transaction for nothing or you break future useful
1657 # rollback call
1660 # rollback call
1658 tr = None
1661 tr = None
1659 trname = 'pull\n' + util.hidepassword(remote.url())
1662 trname = 'pull\n' + util.hidepassword(remote.url())
1660 lock = self.lock()
1663 lock = self.lock()
1661 try:
1664 try:
1662 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1665 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1663 force=force)
1666 force=force)
1664 common, fetch, rheads = tmp
1667 common, fetch, rheads = tmp
1665 if not fetch:
1668 if not fetch:
1666 self.ui.status(_("no changes found\n"))
1669 self.ui.status(_("no changes found\n"))
1667 added = []
1670 added = []
1668 result = 0
1671 result = 0
1669 else:
1672 else:
1670 tr = self.transaction(trname)
1673 tr = self.transaction(trname)
1671 if heads is None and list(common) == [nullid]:
1674 if heads is None and list(common) == [nullid]:
1672 self.ui.status(_("requesting all changes\n"))
1675 self.ui.status(_("requesting all changes\n"))
1673 elif heads is None and remote.capable('changegroupsubset'):
1676 elif heads is None and remote.capable('changegroupsubset'):
1674 # issue1320, avoid a race if remote changed after discovery
1677 # issue1320, avoid a race if remote changed after discovery
1675 heads = rheads
1678 heads = rheads
1676
1679
1677 if remote.capable('getbundle'):
1680 if remote.capable('getbundle'):
1678 # TODO: get bundlecaps from remote
1681 # TODO: get bundlecaps from remote
1679 cg = remote.getbundle('pull', common=common,
1682 cg = remote.getbundle('pull', common=common,
1680 heads=heads or rheads)
1683 heads=heads or rheads)
1681 elif heads is None:
1684 elif heads is None:
1682 cg = remote.changegroup(fetch, 'pull')
1685 cg = remote.changegroup(fetch, 'pull')
1683 elif not remote.capable('changegroupsubset'):
1686 elif not remote.capable('changegroupsubset'):
1684 raise util.Abort(_("partial pull cannot be done because "
1687 raise util.Abort(_("partial pull cannot be done because "
1685 "other repository doesn't support "
1688 "other repository doesn't support "
1686 "changegroupsubset."))
1689 "changegroupsubset."))
1687 else:
1690 else:
1688 cg = remote.changegroupsubset(fetch, heads, 'pull')
1691 cg = remote.changegroupsubset(fetch, heads, 'pull')
1689 # we use unfiltered changelog here because hidden revision must
1692 # we use unfiltered changelog here because hidden revision must
1690 # be taken in account for phase synchronization. They may
1693 # be taken in account for phase synchronization. They may
1691 # becomes public and becomes visible again.
1694 # becomes public and becomes visible again.
1692 cl = self.unfiltered().changelog
1695 cl = self.unfiltered().changelog
1693 clstart = len(cl)
1696 clstart = len(cl)
1694 result = self.addchangegroup(cg, 'pull', remote.url())
1697 result = self.addchangegroup(cg, 'pull', remote.url())
1695 clend = len(cl)
1698 clend = len(cl)
1696 added = [cl.node(r) for r in xrange(clstart, clend)]
1699 added = [cl.node(r) for r in xrange(clstart, clend)]
1697
1700
1698 # compute target subset
1701 # compute target subset
1699 if heads is None:
1702 if heads is None:
1700 # We pulled every thing possible
1703 # We pulled every thing possible
1701 # sync on everything common
1704 # sync on everything common
1702 subset = common + added
1705 subset = common + added
1703 else:
1706 else:
1704 # We pulled a specific subset
1707 # We pulled a specific subset
1705 # sync on this subset
1708 # sync on this subset
1706 subset = heads
1709 subset = heads
1707
1710
1708 # Get remote phases data from remote
1711 # Get remote phases data from remote
1709 remotephases = remote.listkeys('phases')
1712 remotephases = remote.listkeys('phases')
1710 publishing = bool(remotephases.get('publishing', False))
1713 publishing = bool(remotephases.get('publishing', False))
1711 if remotephases and not publishing:
1714 if remotephases and not publishing:
1712 # remote is new and unpublishing
1715 # remote is new and unpublishing
1713 pheads, _dr = phases.analyzeremotephases(self, subset,
1716 pheads, _dr = phases.analyzeremotephases(self, subset,
1714 remotephases)
1717 remotephases)
1715 phases.advanceboundary(self, phases.public, pheads)
1718 phases.advanceboundary(self, phases.public, pheads)
1716 phases.advanceboundary(self, phases.draft, subset)
1719 phases.advanceboundary(self, phases.draft, subset)
1717 else:
1720 else:
1718 # Remote is old or publishing all common changesets
1721 # Remote is old or publishing all common changesets
1719 # should be seen as public
1722 # should be seen as public
1720 phases.advanceboundary(self, phases.public, subset)
1723 phases.advanceboundary(self, phases.public, subset)
1721
1724
1722 def gettransaction():
1725 def gettransaction():
1723 if tr is None:
1726 if tr is None:
1724 return self.transaction(trname)
1727 return self.transaction(trname)
1725 return tr
1728 return tr
1726
1729
1727 obstr = obsolete.syncpull(self, remote, gettransaction)
1730 obstr = obsolete.syncpull(self, remote, gettransaction)
1728 if obstr is not None:
1731 if obstr is not None:
1729 tr = obstr
1732 tr = obstr
1730
1733
1731 if tr is not None:
1734 if tr is not None:
1732 tr.close()
1735 tr.close()
1733 finally:
1736 finally:
1734 if tr is not None:
1737 if tr is not None:
1735 tr.release()
1738 tr.release()
1736 lock.release()
1739 lock.release()
1737
1740
1738 return result
1741 return result
1739
1742
1740 def checkpush(self, force, revs):
1743 def checkpush(self, force, revs):
1741 """Extensions can override this function if additional checks have
1744 """Extensions can override this function if additional checks have
1742 to be performed before pushing, or call it if they override push
1745 to be performed before pushing, or call it if they override push
1743 command.
1746 command.
1744 """
1747 """
1745 pass
1748 pass
1746
1749
1747 def push(self, remote, force=False, revs=None, newbranch=False):
1750 def push(self, remote, force=False, revs=None, newbranch=False):
1748 '''Push outgoing changesets (limited by revs) from the current
1751 '''Push outgoing changesets (limited by revs) from the current
1749 repository to remote. Return an integer:
1752 repository to remote. Return an integer:
1750 - None means nothing to push
1753 - None means nothing to push
1751 - 0 means HTTP error
1754 - 0 means HTTP error
1752 - 1 means we pushed and remote head count is unchanged *or*
1755 - 1 means we pushed and remote head count is unchanged *or*
1753 we have outgoing changesets but refused to push
1756 we have outgoing changesets but refused to push
1754 - other values as described by addchangegroup()
1757 - other values as described by addchangegroup()
1755 '''
1758 '''
1756 # there are two ways to push to remote repo:
1759 # there are two ways to push to remote repo:
1757 #
1760 #
1758 # addchangegroup assumes local user can lock remote
1761 # addchangegroup assumes local user can lock remote
1759 # repo (local filesystem, old ssh servers).
1762 # repo (local filesystem, old ssh servers).
1760 #
1763 #
1761 # unbundle assumes local user cannot lock remote repo (new ssh
1764 # unbundle assumes local user cannot lock remote repo (new ssh
1762 # servers, http servers).
1765 # servers, http servers).
1763
1766
1764 if not remote.canpush():
1767 if not remote.canpush():
1765 raise util.Abort(_("destination does not support push"))
1768 raise util.Abort(_("destination does not support push"))
1766 unfi = self.unfiltered()
1769 unfi = self.unfiltered()
1767 def localphasemove(nodes, phase=phases.public):
1770 def localphasemove(nodes, phase=phases.public):
1768 """move <nodes> to <phase> in the local source repo"""
1771 """move <nodes> to <phase> in the local source repo"""
1769 if locallock is not None:
1772 if locallock is not None:
1770 phases.advanceboundary(self, phase, nodes)
1773 phases.advanceboundary(self, phase, nodes)
1771 else:
1774 else:
1772 # repo is not locked, do not change any phases!
1775 # repo is not locked, do not change any phases!
1773 # Informs the user that phases should have been moved when
1776 # Informs the user that phases should have been moved when
1774 # applicable.
1777 # applicable.
1775 actualmoves = [n for n in nodes if phase < self[n].phase()]
1778 actualmoves = [n for n in nodes if phase < self[n].phase()]
1776 phasestr = phases.phasenames[phase]
1779 phasestr = phases.phasenames[phase]
1777 if actualmoves:
1780 if actualmoves:
1778 self.ui.status(_('cannot lock source repo, skipping local'
1781 self.ui.status(_('cannot lock source repo, skipping local'
1779 ' %s phase update\n') % phasestr)
1782 ' %s phase update\n') % phasestr)
1780 # get local lock as we might write phase data
1783 # get local lock as we might write phase data
1781 locallock = None
1784 locallock = None
1782 try:
1785 try:
1783 locallock = self.lock()
1786 locallock = self.lock()
1784 except IOError, err:
1787 except IOError, err:
1785 if err.errno != errno.EACCES:
1788 if err.errno != errno.EACCES:
1786 raise
1789 raise
1787 # source repo cannot be locked.
1790 # source repo cannot be locked.
1788 # We do not abort the push, but just disable the local phase
1791 # We do not abort the push, but just disable the local phase
1789 # synchronisation.
1792 # synchronisation.
1790 msg = 'cannot lock source repository: %s\n' % err
1793 msg = 'cannot lock source repository: %s\n' % err
1791 self.ui.debug(msg)
1794 self.ui.debug(msg)
1792 try:
1795 try:
1793 self.checkpush(force, revs)
1796 self.checkpush(force, revs)
1794 lock = None
1797 lock = None
1795 unbundle = remote.capable('unbundle')
1798 unbundle = remote.capable('unbundle')
1796 if not unbundle:
1799 if not unbundle:
1797 lock = remote.lock()
1800 lock = remote.lock()
1798 try:
1801 try:
1799 # discovery
1802 # discovery
1800 fci = discovery.findcommonincoming
1803 fci = discovery.findcommonincoming
1801 commoninc = fci(unfi, remote, force=force)
1804 commoninc = fci(unfi, remote, force=force)
1802 common, inc, remoteheads = commoninc
1805 common, inc, remoteheads = commoninc
1803 fco = discovery.findcommonoutgoing
1806 fco = discovery.findcommonoutgoing
1804 outgoing = fco(unfi, remote, onlyheads=revs,
1807 outgoing = fco(unfi, remote, onlyheads=revs,
1805 commoninc=commoninc, force=force)
1808 commoninc=commoninc, force=force)
1806
1809
1807
1810
1808 if not outgoing.missing:
1811 if not outgoing.missing:
1809 # nothing to push
1812 # nothing to push
1810 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1813 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1811 ret = None
1814 ret = None
1812 else:
1815 else:
1813 # something to push
1816 # something to push
1814 if not force:
1817 if not force:
1815 # if self.obsstore == False --> no obsolete
1818 # if self.obsstore == False --> no obsolete
1816 # then, save the iteration
1819 # then, save the iteration
1817 if unfi.obsstore:
1820 if unfi.obsstore:
1818 # this message are here for 80 char limit reason
1821 # this message are here for 80 char limit reason
1819 mso = _("push includes obsolete changeset: %s!")
1822 mso = _("push includes obsolete changeset: %s!")
1820 mst = "push includes %s changeset: %s!"
1823 mst = "push includes %s changeset: %s!"
1821 # plain versions for i18n tool to detect them
1824 # plain versions for i18n tool to detect them
1822 _("push includes unstable changeset: %s!")
1825 _("push includes unstable changeset: %s!")
1823 _("push includes bumped changeset: %s!")
1826 _("push includes bumped changeset: %s!")
1824 _("push includes divergent changeset: %s!")
1827 _("push includes divergent changeset: %s!")
1825 # If we are to push if there is at least one
1828 # If we are to push if there is at least one
1826 # obsolete or unstable changeset in missing, at
1829 # obsolete or unstable changeset in missing, at
1827 # least one of the missinghead will be obsolete or
1830 # least one of the missinghead will be obsolete or
1828 # unstable. So checking heads only is ok
1831 # unstable. So checking heads only is ok
1829 for node in outgoing.missingheads:
1832 for node in outgoing.missingheads:
1830 ctx = unfi[node]
1833 ctx = unfi[node]
1831 if ctx.obsolete():
1834 if ctx.obsolete():
1832 raise util.Abort(mso % ctx)
1835 raise util.Abort(mso % ctx)
1833 elif ctx.troubled():
1836 elif ctx.troubled():
1834 raise util.Abort(_(mst)
1837 raise util.Abort(_(mst)
1835 % (ctx.troubles()[0],
1838 % (ctx.troubles()[0],
1836 ctx))
1839 ctx))
1837 discovery.checkheads(unfi, remote, outgoing,
1840 discovery.checkheads(unfi, remote, outgoing,
1838 remoteheads, newbranch,
1841 remoteheads, newbranch,
1839 bool(inc))
1842 bool(inc))
1840
1843
1841 # TODO: get bundlecaps from remote
1844 # TODO: get bundlecaps from remote
1842 bundlecaps = None
1845 bundlecaps = None
1843 # create a changegroup from local
1846 # create a changegroup from local
1844 if revs is None and not outgoing.excluded:
1847 if revs is None and not outgoing.excluded:
1845 # push everything,
1848 # push everything,
1846 # use the fast path, no race possible on push
1849 # use the fast path, no race possible on push
1847 bundler = changegroup.bundle10(self, bundlecaps)
1850 bundler = changegroup.bundle10(self, bundlecaps)
1848 cg = self._changegroupsubset(outgoing,
1851 cg = self._changegroupsubset(outgoing,
1849 bundler,
1852 bundler,
1850 'push',
1853 'push',
1851 fastpath=True)
1854 fastpath=True)
1852 else:
1855 else:
1853 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1856 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1854
1857
1855 # apply changegroup to remote
1858 # apply changegroup to remote
1856 if unbundle:
1859 if unbundle:
1857 # local repo finds heads on server, finds out what
1860 # local repo finds heads on server, finds out what
1858 # revs it must push. once revs transferred, if server
1861 # revs it must push. once revs transferred, if server
1859 # finds it has different heads (someone else won
1862 # finds it has different heads (someone else won
1860 # commit/push race), server aborts.
1863 # commit/push race), server aborts.
1861 if force:
1864 if force:
1862 remoteheads = ['force']
1865 remoteheads = ['force']
1863 # ssh: return remote's addchangegroup()
1866 # ssh: return remote's addchangegroup()
1864 # http: return remote's addchangegroup() or 0 for error
1867 # http: return remote's addchangegroup() or 0 for error
1865 ret = remote.unbundle(cg, remoteheads, 'push')
1868 ret = remote.unbundle(cg, remoteheads, 'push')
1866 else:
1869 else:
1867 # we return an integer indicating remote head count
1870 # we return an integer indicating remote head count
1868 # change
1871 # change
1869 ret = remote.addchangegroup(cg, 'push', self.url())
1872 ret = remote.addchangegroup(cg, 'push', self.url())
1870
1873
1871 if ret:
1874 if ret:
1872 # push succeed, synchronize target of the push
1875 # push succeed, synchronize target of the push
1873 cheads = outgoing.missingheads
1876 cheads = outgoing.missingheads
1874 elif revs is None:
1877 elif revs is None:
1875 # All out push fails. synchronize all common
1878 # All out push fails. synchronize all common
1876 cheads = outgoing.commonheads
1879 cheads = outgoing.commonheads
1877 else:
1880 else:
1878 # I want cheads = heads(::missingheads and ::commonheads)
1881 # I want cheads = heads(::missingheads and ::commonheads)
1879 # (missingheads is revs with secret changeset filtered out)
1882 # (missingheads is revs with secret changeset filtered out)
1880 #
1883 #
1881 # This can be expressed as:
1884 # This can be expressed as:
1882 # cheads = ( (missingheads and ::commonheads)
1885 # cheads = ( (missingheads and ::commonheads)
1883 # + (commonheads and ::missingheads))"
1886 # + (commonheads and ::missingheads))"
1884 # )
1887 # )
1885 #
1888 #
1886 # while trying to push we already computed the following:
1889 # while trying to push we already computed the following:
1887 # common = (::commonheads)
1890 # common = (::commonheads)
1888 # missing = ((commonheads::missingheads) - commonheads)
1891 # missing = ((commonheads::missingheads) - commonheads)
1889 #
1892 #
1890 # We can pick:
1893 # We can pick:
1891 # * missingheads part of common (::commonheads)
1894 # * missingheads part of common (::commonheads)
1892 common = set(outgoing.common)
1895 common = set(outgoing.common)
1893 cheads = [node for node in revs if node in common]
1896 cheads = [node for node in revs if node in common]
1894 # and
1897 # and
1895 # * commonheads parents on missing
1898 # * commonheads parents on missing
1896 revset = unfi.set('%ln and parents(roots(%ln))',
1899 revset = unfi.set('%ln and parents(roots(%ln))',
1897 outgoing.commonheads,
1900 outgoing.commonheads,
1898 outgoing.missing)
1901 outgoing.missing)
1899 cheads.extend(c.node() for c in revset)
1902 cheads.extend(c.node() for c in revset)
1900 # even when we don't push, exchanging phase data is useful
1903 # even when we don't push, exchanging phase data is useful
1901 remotephases = remote.listkeys('phases')
1904 remotephases = remote.listkeys('phases')
1902 if (self.ui.configbool('ui', '_usedassubrepo', False)
1905 if (self.ui.configbool('ui', '_usedassubrepo', False)
1903 and remotephases # server supports phases
1906 and remotephases # server supports phases
1904 and ret is None # nothing was pushed
1907 and ret is None # nothing was pushed
1905 and remotephases.get('publishing', False)):
1908 and remotephases.get('publishing', False)):
1906 # When:
1909 # When:
1907 # - this is a subrepo push
1910 # - this is a subrepo push
1908 # - and remote support phase
1911 # - and remote support phase
1909 # - and no changeset was pushed
1912 # - and no changeset was pushed
1910 # - and remote is publishing
1913 # - and remote is publishing
1911 # We may be in issue 3871 case!
1914 # We may be in issue 3871 case!
1912 # We drop the possible phase synchronisation done by
1915 # We drop the possible phase synchronisation done by
1913 # courtesy to publish changesets possibly locally draft
1916 # courtesy to publish changesets possibly locally draft
1914 # on the remote.
1917 # on the remote.
1915 remotephases = {'publishing': 'True'}
1918 remotephases = {'publishing': 'True'}
1916 if not remotephases: # old server or public only repo
1919 if not remotephases: # old server or public only repo
1917 localphasemove(cheads)
1920 localphasemove(cheads)
1918 # don't push any phase data as there is nothing to push
1921 # don't push any phase data as there is nothing to push
1919 else:
1922 else:
1920 ana = phases.analyzeremotephases(self, cheads, remotephases)
1923 ana = phases.analyzeremotephases(self, cheads, remotephases)
1921 pheads, droots = ana
1924 pheads, droots = ana
1922 ### Apply remote phase on local
1925 ### Apply remote phase on local
1923 if remotephases.get('publishing', False):
1926 if remotephases.get('publishing', False):
1924 localphasemove(cheads)
1927 localphasemove(cheads)
1925 else: # publish = False
1928 else: # publish = False
1926 localphasemove(pheads)
1929 localphasemove(pheads)
1927 localphasemove(cheads, phases.draft)
1930 localphasemove(cheads, phases.draft)
1928 ### Apply local phase on remote
1931 ### Apply local phase on remote
1929
1932
1930 # Get the list of all revs draft on remote by public here.
1933 # Get the list of all revs draft on remote by public here.
1931 # XXX Beware that revset break if droots is not strictly
1934 # XXX Beware that revset break if droots is not strictly
1932 # XXX root we may want to ensure it is but it is costly
1935 # XXX root we may want to ensure it is but it is costly
1933 outdated = unfi.set('heads((%ln::%ln) and public())',
1936 outdated = unfi.set('heads((%ln::%ln) and public())',
1934 droots, cheads)
1937 droots, cheads)
1935 for newremotehead in outdated:
1938 for newremotehead in outdated:
1936 r = remote.pushkey('phases',
1939 r = remote.pushkey('phases',
1937 newremotehead.hex(),
1940 newremotehead.hex(),
1938 str(phases.draft),
1941 str(phases.draft),
1939 str(phases.public))
1942 str(phases.public))
1940 if not r:
1943 if not r:
1941 self.ui.warn(_('updating %s to public failed!\n')
1944 self.ui.warn(_('updating %s to public failed!\n')
1942 % newremotehead)
1945 % newremotehead)
1943 self.ui.debug('try to push obsolete markers to remote\n')
1946 self.ui.debug('try to push obsolete markers to remote\n')
1944 obsolete.syncpush(self, remote)
1947 obsolete.syncpush(self, remote)
1945 finally:
1948 finally:
1946 if lock is not None:
1949 if lock is not None:
1947 lock.release()
1950 lock.release()
1948 finally:
1951 finally:
1949 if locallock is not None:
1952 if locallock is not None:
1950 locallock.release()
1953 locallock.release()
1951
1954
1952 self.ui.debug("checking for updated bookmarks\n")
1955 self.ui.debug("checking for updated bookmarks\n")
1953 rb = remote.listkeys('bookmarks')
1956 rb = remote.listkeys('bookmarks')
1954 revnums = map(unfi.changelog.rev, revs or [])
1957 revnums = map(unfi.changelog.rev, revs or [])
1955 ancestors = [
1958 ancestors = [
1956 a for a in unfi.changelog.ancestors(revnums, inclusive=True)]
1959 a for a in unfi.changelog.ancestors(revnums, inclusive=True)]
1957 for k in rb.keys():
1960 for k in rb.keys():
1958 if k in unfi._bookmarks:
1961 if k in unfi._bookmarks:
1959 nr, nl = rb[k], hex(self._bookmarks[k])
1962 nr, nl = rb[k], hex(self._bookmarks[k])
1960 if nr in unfi:
1963 if nr in unfi:
1961 cr = unfi[nr]
1964 cr = unfi[nr]
1962 cl = unfi[nl]
1965 cl = unfi[nl]
1963 if bookmarks.validdest(unfi, cr, cl):
1966 if bookmarks.validdest(unfi, cr, cl):
1964 if ancestors and cl.rev() not in ancestors:
1967 if ancestors and cl.rev() not in ancestors:
1965 continue
1968 continue
1966 r = remote.pushkey('bookmarks', k, nr, nl)
1969 r = remote.pushkey('bookmarks', k, nr, nl)
1967 if r:
1970 if r:
1968 self.ui.status(_("updating bookmark %s\n") % k)
1971 self.ui.status(_("updating bookmark %s\n") % k)
1969 else:
1972 else:
1970 self.ui.warn(_('updating bookmark %s'
1973 self.ui.warn(_('updating bookmark %s'
1971 ' failed!\n') % k)
1974 ' failed!\n') % k)
1972
1975
1973 return ret
1976 return ret
1974
1977
1975 def changegroupinfo(self, nodes, source):
1978 def changegroupinfo(self, nodes, source):
1976 if self.ui.verbose or source == 'bundle':
1979 if self.ui.verbose or source == 'bundle':
1977 self.ui.status(_("%d changesets found\n") % len(nodes))
1980 self.ui.status(_("%d changesets found\n") % len(nodes))
1978 if self.ui.debugflag:
1981 if self.ui.debugflag:
1979 self.ui.debug("list of changesets:\n")
1982 self.ui.debug("list of changesets:\n")
1980 for node in nodes:
1983 for node in nodes:
1981 self.ui.debug("%s\n" % hex(node))
1984 self.ui.debug("%s\n" % hex(node))
1982
1985
1983 def changegroupsubset(self, bases, heads, source):
1986 def changegroupsubset(self, bases, heads, source):
1984 """Compute a changegroup consisting of all the nodes that are
1987 """Compute a changegroup consisting of all the nodes that are
1985 descendants of any of the bases and ancestors of any of the heads.
1988 descendants of any of the bases and ancestors of any of the heads.
1986 Return a chunkbuffer object whose read() method will return
1989 Return a chunkbuffer object whose read() method will return
1987 successive changegroup chunks.
1990 successive changegroup chunks.
1988
1991
1989 It is fairly complex as determining which filenodes and which
1992 It is fairly complex as determining which filenodes and which
1990 manifest nodes need to be included for the changeset to be complete
1993 manifest nodes need to be included for the changeset to be complete
1991 is non-trivial.
1994 is non-trivial.
1992
1995
1993 Another wrinkle is doing the reverse, figuring out which changeset in
1996 Another wrinkle is doing the reverse, figuring out which changeset in
1994 the changegroup a particular filenode or manifestnode belongs to.
1997 the changegroup a particular filenode or manifestnode belongs to.
1995 """
1998 """
1996 cl = self.changelog
1999 cl = self.changelog
1997 if not bases:
2000 if not bases:
1998 bases = [nullid]
2001 bases = [nullid]
1999 # TODO: remove call to nodesbetween.
2002 # TODO: remove call to nodesbetween.
2000 csets, bases, heads = cl.nodesbetween(bases, heads)
2003 csets, bases, heads = cl.nodesbetween(bases, heads)
2001 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
2004 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
2002 outgoing = discovery.outgoing(cl, bases, heads)
2005 outgoing = discovery.outgoing(cl, bases, heads)
2003 bundler = changegroup.bundle10(self)
2006 bundler = changegroup.bundle10(self)
2004 return self._changegroupsubset(outgoing, bundler, source)
2007 return self._changegroupsubset(outgoing, bundler, source)
2005
2008
2006 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2009 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2007 """Like getbundle, but taking a discovery.outgoing as an argument.
2010 """Like getbundle, but taking a discovery.outgoing as an argument.
2008
2011
2009 This is only implemented for local repos and reuses potentially
2012 This is only implemented for local repos and reuses potentially
2010 precomputed sets in outgoing."""
2013 precomputed sets in outgoing."""
2011 if not outgoing.missing:
2014 if not outgoing.missing:
2012 return None
2015 return None
2013 bundler = changegroup.bundle10(self, bundlecaps)
2016 bundler = changegroup.bundle10(self, bundlecaps)
2014 return self._changegroupsubset(outgoing, bundler, source)
2017 return self._changegroupsubset(outgoing, bundler, source)
2015
2018
2016 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2019 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2017 """Like changegroupsubset, but returns the set difference between the
2020 """Like changegroupsubset, but returns the set difference between the
2018 ancestors of heads and the ancestors common.
2021 ancestors of heads and the ancestors common.
2019
2022
2020 If heads is None, use the local heads. If common is None, use [nullid].
2023 If heads is None, use the local heads. If common is None, use [nullid].
2021
2024
2022 The nodes in common might not all be known locally due to the way the
2025 The nodes in common might not all be known locally due to the way the
2023 current discovery protocol works.
2026 current discovery protocol works.
2024 """
2027 """
2025 cl = self.changelog
2028 cl = self.changelog
2026 if common:
2029 if common:
2027 hasnode = cl.hasnode
2030 hasnode = cl.hasnode
2028 common = [n for n in common if hasnode(n)]
2031 common = [n for n in common if hasnode(n)]
2029 else:
2032 else:
2030 common = [nullid]
2033 common = [nullid]
2031 if not heads:
2034 if not heads:
2032 heads = cl.heads()
2035 heads = cl.heads()
2033 return self.getlocalbundle(source,
2036 return self.getlocalbundle(source,
2034 discovery.outgoing(cl, common, heads),
2037 discovery.outgoing(cl, common, heads),
2035 bundlecaps=bundlecaps)
2038 bundlecaps=bundlecaps)
2036
2039
2037 @unfilteredmethod
2040 @unfilteredmethod
2038 def _changegroupsubset(self, outgoing, bundler, source,
2041 def _changegroupsubset(self, outgoing, bundler, source,
2039 fastpath=False):
2042 fastpath=False):
2040 commonrevs = outgoing.common
2043 commonrevs = outgoing.common
2041 csets = outgoing.missing
2044 csets = outgoing.missing
2042 heads = outgoing.missingheads
2045 heads = outgoing.missingheads
2043 # We go through the fast path if we get told to, or if all (unfiltered
2046 # We go through the fast path if we get told to, or if all (unfiltered
2044 # heads have been requested (since we then know there all linkrevs will
2047 # heads have been requested (since we then know there all linkrevs will
2045 # be pulled by the client).
2048 # be pulled by the client).
2046 heads.sort()
2049 heads.sort()
2047 fastpathlinkrev = fastpath or (
2050 fastpathlinkrev = fastpath or (
2048 self.filtername is None and heads == sorted(self.heads()))
2051 self.filtername is None and heads == sorted(self.heads()))
2049
2052
2050 self.hook('preoutgoing', throw=True, source=source)
2053 self.hook('preoutgoing', throw=True, source=source)
2051 self.changegroupinfo(csets, source)
2054 self.changegroupinfo(csets, source)
2052 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2055 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2053 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2056 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2054
2057
2055 def changegroup(self, basenodes, source):
2058 def changegroup(self, basenodes, source):
2056 # to avoid a race we use changegroupsubset() (issue1320)
2059 # to avoid a race we use changegroupsubset() (issue1320)
2057 return self.changegroupsubset(basenodes, self.heads(), source)
2060 return self.changegroupsubset(basenodes, self.heads(), source)
2058
2061
2059 @unfilteredmethod
2062 @unfilteredmethod
2060 def addchangegroup(self, source, srctype, url, emptyok=False):
2063 def addchangegroup(self, source, srctype, url, emptyok=False):
2061 """Add the changegroup returned by source.read() to this repo.
2064 """Add the changegroup returned by source.read() to this repo.
2062 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2065 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2063 the URL of the repo where this changegroup is coming from.
2066 the URL of the repo where this changegroup is coming from.
2064
2067
2065 Return an integer summarizing the change to this repo:
2068 Return an integer summarizing the change to this repo:
2066 - nothing changed or no source: 0
2069 - nothing changed or no source: 0
2067 - more heads than before: 1+added heads (2..n)
2070 - more heads than before: 1+added heads (2..n)
2068 - fewer heads than before: -1-removed heads (-2..-n)
2071 - fewer heads than before: -1-removed heads (-2..-n)
2069 - number of heads stays the same: 1
2072 - number of heads stays the same: 1
2070 """
2073 """
2071 def csmap(x):
2074 def csmap(x):
2072 self.ui.debug("add changeset %s\n" % short(x))
2075 self.ui.debug("add changeset %s\n" % short(x))
2073 return len(cl)
2076 return len(cl)
2074
2077
2075 def revmap(x):
2078 def revmap(x):
2076 return cl.rev(x)
2079 return cl.rev(x)
2077
2080
2078 if not source:
2081 if not source:
2079 return 0
2082 return 0
2080
2083
2081 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2084 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2082
2085
2083 changesets = files = revisions = 0
2086 changesets = files = revisions = 0
2084 efiles = set()
2087 efiles = set()
2085
2088
2086 # write changelog data to temp files so concurrent readers will not see
2089 # write changelog data to temp files so concurrent readers will not see
2087 # inconsistent view
2090 # inconsistent view
2088 cl = self.changelog
2091 cl = self.changelog
2089 cl.delayupdate()
2092 cl.delayupdate()
2090 oldheads = cl.heads()
2093 oldheads = cl.heads()
2091
2094
2092 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2095 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2093 try:
2096 try:
2094 trp = weakref.proxy(tr)
2097 trp = weakref.proxy(tr)
2095 # pull off the changeset group
2098 # pull off the changeset group
2096 self.ui.status(_("adding changesets\n"))
2099 self.ui.status(_("adding changesets\n"))
2097 clstart = len(cl)
2100 clstart = len(cl)
2098 class prog(object):
2101 class prog(object):
2099 step = _('changesets')
2102 step = _('changesets')
2100 count = 1
2103 count = 1
2101 ui = self.ui
2104 ui = self.ui
2102 total = None
2105 total = None
2103 def __call__(self):
2106 def __call__(self):
2104 self.ui.progress(self.step, self.count, unit=_('chunks'),
2107 self.ui.progress(self.step, self.count, unit=_('chunks'),
2105 total=self.total)
2108 total=self.total)
2106 self.count += 1
2109 self.count += 1
2107 pr = prog()
2110 pr = prog()
2108 source.callback = pr
2111 source.callback = pr
2109
2112
2110 source.changelogheader()
2113 source.changelogheader()
2111 srccontent = cl.addgroup(source, csmap, trp)
2114 srccontent = cl.addgroup(source, csmap, trp)
2112 if not (srccontent or emptyok):
2115 if not (srccontent or emptyok):
2113 raise util.Abort(_("received changelog group is empty"))
2116 raise util.Abort(_("received changelog group is empty"))
2114 clend = len(cl)
2117 clend = len(cl)
2115 changesets = clend - clstart
2118 changesets = clend - clstart
2116 for c in xrange(clstart, clend):
2119 for c in xrange(clstart, clend):
2117 efiles.update(self[c].files())
2120 efiles.update(self[c].files())
2118 efiles = len(efiles)
2121 efiles = len(efiles)
2119 self.ui.progress(_('changesets'), None)
2122 self.ui.progress(_('changesets'), None)
2120
2123
2121 # pull off the manifest group
2124 # pull off the manifest group
2122 self.ui.status(_("adding manifests\n"))
2125 self.ui.status(_("adding manifests\n"))
2123 pr.step = _('manifests')
2126 pr.step = _('manifests')
2124 pr.count = 1
2127 pr.count = 1
2125 pr.total = changesets # manifests <= changesets
2128 pr.total = changesets # manifests <= changesets
2126 # no need to check for empty manifest group here:
2129 # no need to check for empty manifest group here:
2127 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2130 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2128 # no new manifest will be created and the manifest group will
2131 # no new manifest will be created and the manifest group will
2129 # be empty during the pull
2132 # be empty during the pull
2130 source.manifestheader()
2133 source.manifestheader()
2131 self.manifest.addgroup(source, revmap, trp)
2134 self.manifest.addgroup(source, revmap, trp)
2132 self.ui.progress(_('manifests'), None)
2135 self.ui.progress(_('manifests'), None)
2133
2136
2134 needfiles = {}
2137 needfiles = {}
2135 if self.ui.configbool('server', 'validate', default=False):
2138 if self.ui.configbool('server', 'validate', default=False):
2136 # validate incoming csets have their manifests
2139 # validate incoming csets have their manifests
2137 for cset in xrange(clstart, clend):
2140 for cset in xrange(clstart, clend):
2138 mfest = self.changelog.read(self.changelog.node(cset))[0]
2141 mfest = self.changelog.read(self.changelog.node(cset))[0]
2139 mfest = self.manifest.readdelta(mfest)
2142 mfest = self.manifest.readdelta(mfest)
2140 # store file nodes we must see
2143 # store file nodes we must see
2141 for f, n in mfest.iteritems():
2144 for f, n in mfest.iteritems():
2142 needfiles.setdefault(f, set()).add(n)
2145 needfiles.setdefault(f, set()).add(n)
2143
2146
2144 # process the files
2147 # process the files
2145 self.ui.status(_("adding file changes\n"))
2148 self.ui.status(_("adding file changes\n"))
2146 pr.step = _('files')
2149 pr.step = _('files')
2147 pr.count = 1
2150 pr.count = 1
2148 pr.total = efiles
2151 pr.total = efiles
2149 source.callback = None
2152 source.callback = None
2150
2153
2151 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2154 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2152 pr, needfiles)
2155 pr, needfiles)
2153 revisions += newrevs
2156 revisions += newrevs
2154 files += newfiles
2157 files += newfiles
2155
2158
2156 dh = 0
2159 dh = 0
2157 if oldheads:
2160 if oldheads:
2158 heads = cl.heads()
2161 heads = cl.heads()
2159 dh = len(heads) - len(oldheads)
2162 dh = len(heads) - len(oldheads)
2160 for h in heads:
2163 for h in heads:
2161 if h not in oldheads and self[h].closesbranch():
2164 if h not in oldheads and self[h].closesbranch():
2162 dh -= 1
2165 dh -= 1
2163 htext = ""
2166 htext = ""
2164 if dh:
2167 if dh:
2165 htext = _(" (%+d heads)") % dh
2168 htext = _(" (%+d heads)") % dh
2166
2169
2167 self.ui.status(_("added %d changesets"
2170 self.ui.status(_("added %d changesets"
2168 " with %d changes to %d files%s\n")
2171 " with %d changes to %d files%s\n")
2169 % (changesets, revisions, files, htext))
2172 % (changesets, revisions, files, htext))
2170 self.invalidatevolatilesets()
2173 self.invalidatevolatilesets()
2171
2174
2172 if changesets > 0:
2175 if changesets > 0:
2173 p = lambda: cl.writepending() and self.root or ""
2176 p = lambda: cl.writepending() and self.root or ""
2174 self.hook('pretxnchangegroup', throw=True,
2177 self.hook('pretxnchangegroup', throw=True,
2175 node=hex(cl.node(clstart)), source=srctype,
2178 node=hex(cl.node(clstart)), source=srctype,
2176 url=url, pending=p)
2179 url=url, pending=p)
2177
2180
2178 added = [cl.node(r) for r in xrange(clstart, clend)]
2181 added = [cl.node(r) for r in xrange(clstart, clend)]
2179 publishing = self.ui.configbool('phases', 'publish', True)
2182 publishing = self.ui.configbool('phases', 'publish', True)
2180 if srctype == 'push':
2183 if srctype == 'push':
2181 # Old server can not push the boundary themself.
2184 # Old server can not push the boundary themself.
2182 # New server won't push the boundary if changeset already
2185 # New server won't push the boundary if changeset already
2183 # existed locally as secrete
2186 # existed locally as secrete
2184 #
2187 #
2185 # We should not use added here but the list of all change in
2188 # We should not use added here but the list of all change in
2186 # the bundle
2189 # the bundle
2187 if publishing:
2190 if publishing:
2188 phases.advanceboundary(self, phases.public, srccontent)
2191 phases.advanceboundary(self, phases.public, srccontent)
2189 else:
2192 else:
2190 phases.advanceboundary(self, phases.draft, srccontent)
2193 phases.advanceboundary(self, phases.draft, srccontent)
2191 phases.retractboundary(self, phases.draft, added)
2194 phases.retractboundary(self, phases.draft, added)
2192 elif srctype != 'strip':
2195 elif srctype != 'strip':
2193 # publishing only alter behavior during push
2196 # publishing only alter behavior during push
2194 #
2197 #
2195 # strip should not touch boundary at all
2198 # strip should not touch boundary at all
2196 phases.retractboundary(self, phases.draft, added)
2199 phases.retractboundary(self, phases.draft, added)
2197
2200
2198 # make changelog see real files again
2201 # make changelog see real files again
2199 cl.finalize(trp)
2202 cl.finalize(trp)
2200
2203
2201 tr.close()
2204 tr.close()
2202
2205
2203 if changesets > 0:
2206 if changesets > 0:
2204 if srctype != 'strip':
2207 if srctype != 'strip':
2205 # During strip, branchcache is invalid but coming call to
2208 # During strip, branchcache is invalid but coming call to
2206 # `destroyed` will repair it.
2209 # `destroyed` will repair it.
2207 # In other case we can safely update cache on disk.
2210 # In other case we can safely update cache on disk.
2208 branchmap.updatecache(self.filtered('served'))
2211 branchmap.updatecache(self.filtered('served'))
2209 def runhooks():
2212 def runhooks():
2210 # forcefully update the on-disk branch cache
2213 # forcefully update the on-disk branch cache
2211 self.ui.debug("updating the branch cache\n")
2214 self.ui.debug("updating the branch cache\n")
2212 self.hook("changegroup", node=hex(cl.node(clstart)),
2215 self.hook("changegroup", node=hex(cl.node(clstart)),
2213 source=srctype, url=url)
2216 source=srctype, url=url)
2214
2217
2215 for n in added:
2218 for n in added:
2216 self.hook("incoming", node=hex(n), source=srctype,
2219 self.hook("incoming", node=hex(n), source=srctype,
2217 url=url)
2220 url=url)
2218
2221
2219 newheads = [h for h in self.heads() if h not in oldheads]
2222 newheads = [h for h in self.heads() if h not in oldheads]
2220 self.ui.log("incoming",
2223 self.ui.log("incoming",
2221 "%s incoming changes - new heads: %s\n",
2224 "%s incoming changes - new heads: %s\n",
2222 len(added),
2225 len(added),
2223 ', '.join([hex(c[:6]) for c in newheads]))
2226 ', '.join([hex(c[:6]) for c in newheads]))
2224 self._afterlock(runhooks)
2227 self._afterlock(runhooks)
2225
2228
2226 finally:
2229 finally:
2227 tr.release()
2230 tr.release()
2228 # never return 0 here:
2231 # never return 0 here:
2229 if dh < 0:
2232 if dh < 0:
2230 return dh - 1
2233 return dh - 1
2231 else:
2234 else:
2232 return dh + 1
2235 return dh + 1
2233
2236
2234 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2237 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2235 revisions = 0
2238 revisions = 0
2236 files = 0
2239 files = 0
2237 while True:
2240 while True:
2238 chunkdata = source.filelogheader()
2241 chunkdata = source.filelogheader()
2239 if not chunkdata:
2242 if not chunkdata:
2240 break
2243 break
2241 f = chunkdata["filename"]
2244 f = chunkdata["filename"]
2242 self.ui.debug("adding %s revisions\n" % f)
2245 self.ui.debug("adding %s revisions\n" % f)
2243 pr()
2246 pr()
2244 fl = self.file(f)
2247 fl = self.file(f)
2245 o = len(fl)
2248 o = len(fl)
2246 if not fl.addgroup(source, revmap, trp):
2249 if not fl.addgroup(source, revmap, trp):
2247 raise util.Abort(_("received file revlog group is empty"))
2250 raise util.Abort(_("received file revlog group is empty"))
2248 revisions += len(fl) - o
2251 revisions += len(fl) - o
2249 files += 1
2252 files += 1
2250 if f in needfiles:
2253 if f in needfiles:
2251 needs = needfiles[f]
2254 needs = needfiles[f]
2252 for new in xrange(o, len(fl)):
2255 for new in xrange(o, len(fl)):
2253 n = fl.node(new)
2256 n = fl.node(new)
2254 if n in needs:
2257 if n in needs:
2255 needs.remove(n)
2258 needs.remove(n)
2256 else:
2259 else:
2257 raise util.Abort(
2260 raise util.Abort(
2258 _("received spurious file revlog entry"))
2261 _("received spurious file revlog entry"))
2259 if not needs:
2262 if not needs:
2260 del needfiles[f]
2263 del needfiles[f]
2261 self.ui.progress(_('files'), None)
2264 self.ui.progress(_('files'), None)
2262
2265
2263 for f, needs in needfiles.iteritems():
2266 for f, needs in needfiles.iteritems():
2264 fl = self.file(f)
2267 fl = self.file(f)
2265 for n in needs:
2268 for n in needs:
2266 try:
2269 try:
2267 fl.rev(n)
2270 fl.rev(n)
2268 except error.LookupError:
2271 except error.LookupError:
2269 raise util.Abort(
2272 raise util.Abort(
2270 _('missing file data for %s:%s - run hg verify') %
2273 _('missing file data for %s:%s - run hg verify') %
2271 (f, hex(n)))
2274 (f, hex(n)))
2272
2275
2273 return revisions, files
2276 return revisions, files
2274
2277
2275 def stream_in(self, remote, requirements):
2278 def stream_in(self, remote, requirements):
2276 lock = self.lock()
2279 lock = self.lock()
2277 try:
2280 try:
2278 # Save remote branchmap. We will use it later
2281 # Save remote branchmap. We will use it later
2279 # to speed up branchcache creation
2282 # to speed up branchcache creation
2280 rbranchmap = None
2283 rbranchmap = None
2281 if remote.capable("branchmap"):
2284 if remote.capable("branchmap"):
2282 rbranchmap = remote.branchmap()
2285 rbranchmap = remote.branchmap()
2283
2286
2284 fp = remote.stream_out()
2287 fp = remote.stream_out()
2285 l = fp.readline()
2288 l = fp.readline()
2286 try:
2289 try:
2287 resp = int(l)
2290 resp = int(l)
2288 except ValueError:
2291 except ValueError:
2289 raise error.ResponseError(
2292 raise error.ResponseError(
2290 _('unexpected response from remote server:'), l)
2293 _('unexpected response from remote server:'), l)
2291 if resp == 1:
2294 if resp == 1:
2292 raise util.Abort(_('operation forbidden by server'))
2295 raise util.Abort(_('operation forbidden by server'))
2293 elif resp == 2:
2296 elif resp == 2:
2294 raise util.Abort(_('locking the remote repository failed'))
2297 raise util.Abort(_('locking the remote repository failed'))
2295 elif resp != 0:
2298 elif resp != 0:
2296 raise util.Abort(_('the server sent an unknown error code'))
2299 raise util.Abort(_('the server sent an unknown error code'))
2297 self.ui.status(_('streaming all changes\n'))
2300 self.ui.status(_('streaming all changes\n'))
2298 l = fp.readline()
2301 l = fp.readline()
2299 try:
2302 try:
2300 total_files, total_bytes = map(int, l.split(' ', 1))
2303 total_files, total_bytes = map(int, l.split(' ', 1))
2301 except (ValueError, TypeError):
2304 except (ValueError, TypeError):
2302 raise error.ResponseError(
2305 raise error.ResponseError(
2303 _('unexpected response from remote server:'), l)
2306 _('unexpected response from remote server:'), l)
2304 self.ui.status(_('%d files to transfer, %s of data\n') %
2307 self.ui.status(_('%d files to transfer, %s of data\n') %
2305 (total_files, util.bytecount(total_bytes)))
2308 (total_files, util.bytecount(total_bytes)))
2306 handled_bytes = 0
2309 handled_bytes = 0
2307 self.ui.progress(_('clone'), 0, total=total_bytes)
2310 self.ui.progress(_('clone'), 0, total=total_bytes)
2308 start = time.time()
2311 start = time.time()
2309 for i in xrange(total_files):
2312 for i in xrange(total_files):
2310 # XXX doesn't support '\n' or '\r' in filenames
2313 # XXX doesn't support '\n' or '\r' in filenames
2311 l = fp.readline()
2314 l = fp.readline()
2312 try:
2315 try:
2313 name, size = l.split('\0', 1)
2316 name, size = l.split('\0', 1)
2314 size = int(size)
2317 size = int(size)
2315 except (ValueError, TypeError):
2318 except (ValueError, TypeError):
2316 raise error.ResponseError(
2319 raise error.ResponseError(
2317 _('unexpected response from remote server:'), l)
2320 _('unexpected response from remote server:'), l)
2318 if self.ui.debugflag:
2321 if self.ui.debugflag:
2319 self.ui.debug('adding %s (%s)\n' %
2322 self.ui.debug('adding %s (%s)\n' %
2320 (name, util.bytecount(size)))
2323 (name, util.bytecount(size)))
2321 # for backwards compat, name was partially encoded
2324 # for backwards compat, name was partially encoded
2322 ofp = self.sopener(store.decodedir(name), 'w')
2325 ofp = self.sopener(store.decodedir(name), 'w')
2323 for chunk in util.filechunkiter(fp, limit=size):
2326 for chunk in util.filechunkiter(fp, limit=size):
2324 handled_bytes += len(chunk)
2327 handled_bytes += len(chunk)
2325 self.ui.progress(_('clone'), handled_bytes,
2328 self.ui.progress(_('clone'), handled_bytes,
2326 total=total_bytes)
2329 total=total_bytes)
2327 ofp.write(chunk)
2330 ofp.write(chunk)
2328 ofp.close()
2331 ofp.close()
2329 elapsed = time.time() - start
2332 elapsed = time.time() - start
2330 if elapsed <= 0:
2333 if elapsed <= 0:
2331 elapsed = 0.001
2334 elapsed = 0.001
2332 self.ui.progress(_('clone'), None)
2335 self.ui.progress(_('clone'), None)
2333 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2336 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2334 (util.bytecount(total_bytes), elapsed,
2337 (util.bytecount(total_bytes), elapsed,
2335 util.bytecount(total_bytes / elapsed)))
2338 util.bytecount(total_bytes / elapsed)))
2336
2339
2337 # new requirements = old non-format requirements +
2340 # new requirements = old non-format requirements +
2338 # new format-related
2341 # new format-related
2339 # requirements from the streamed-in repository
2342 # requirements from the streamed-in repository
2340 requirements.update(set(self.requirements) - self.supportedformats)
2343 requirements.update(set(self.requirements) - self.supportedformats)
2341 self._applyrequirements(requirements)
2344 self._applyrequirements(requirements)
2342 self._writerequirements()
2345 self._writerequirements()
2343
2346
2344 if rbranchmap:
2347 if rbranchmap:
2345 rbheads = []
2348 rbheads = []
2346 for bheads in rbranchmap.itervalues():
2349 for bheads in rbranchmap.itervalues():
2347 rbheads.extend(bheads)
2350 rbheads.extend(bheads)
2348
2351
2349 if rbheads:
2352 if rbheads:
2350 rtiprev = max((int(self.changelog.rev(node))
2353 rtiprev = max((int(self.changelog.rev(node))
2351 for node in rbheads))
2354 for node in rbheads))
2352 cache = branchmap.branchcache(rbranchmap,
2355 cache = branchmap.branchcache(rbranchmap,
2353 self[rtiprev].node(),
2356 self[rtiprev].node(),
2354 rtiprev)
2357 rtiprev)
2355 # Try to stick it as low as possible
2358 # Try to stick it as low as possible
2356 # filter above served are unlikely to be fetch from a clone
2359 # filter above served are unlikely to be fetch from a clone
2357 for candidate in ('base', 'immutable', 'served'):
2360 for candidate in ('base', 'immutable', 'served'):
2358 rview = self.filtered(candidate)
2361 rview = self.filtered(candidate)
2359 if cache.validfor(rview):
2362 if cache.validfor(rview):
2360 self._branchcaches[candidate] = cache
2363 self._branchcaches[candidate] = cache
2361 cache.write(rview)
2364 cache.write(rview)
2362 break
2365 break
2363 self.invalidate()
2366 self.invalidate()
2364 return len(self.heads()) + 1
2367 return len(self.heads()) + 1
2365 finally:
2368 finally:
2366 lock.release()
2369 lock.release()
2367
2370
2368 def clone(self, remote, heads=[], stream=False):
2371 def clone(self, remote, heads=[], stream=False):
2369 '''clone remote repository.
2372 '''clone remote repository.
2370
2373
2371 keyword arguments:
2374 keyword arguments:
2372 heads: list of revs to clone (forces use of pull)
2375 heads: list of revs to clone (forces use of pull)
2373 stream: use streaming clone if possible'''
2376 stream: use streaming clone if possible'''
2374
2377
2375 # now, all clients that can request uncompressed clones can
2378 # now, all clients that can request uncompressed clones can
2376 # read repo formats supported by all servers that can serve
2379 # read repo formats supported by all servers that can serve
2377 # them.
2380 # them.
2378
2381
2379 # if revlog format changes, client will have to check version
2382 # if revlog format changes, client will have to check version
2380 # and format flags on "stream" capability, and use
2383 # and format flags on "stream" capability, and use
2381 # uncompressed only if compatible.
2384 # uncompressed only if compatible.
2382
2385
2383 if not stream:
2386 if not stream:
2384 # if the server explicitly prefers to stream (for fast LANs)
2387 # if the server explicitly prefers to stream (for fast LANs)
2385 stream = remote.capable('stream-preferred')
2388 stream = remote.capable('stream-preferred')
2386
2389
2387 if stream and not heads:
2390 if stream and not heads:
2388 # 'stream' means remote revlog format is revlogv1 only
2391 # 'stream' means remote revlog format is revlogv1 only
2389 if remote.capable('stream'):
2392 if remote.capable('stream'):
2390 return self.stream_in(remote, set(('revlogv1',)))
2393 return self.stream_in(remote, set(('revlogv1',)))
2391 # otherwise, 'streamreqs' contains the remote revlog format
2394 # otherwise, 'streamreqs' contains the remote revlog format
2392 streamreqs = remote.capable('streamreqs')
2395 streamreqs = remote.capable('streamreqs')
2393 if streamreqs:
2396 if streamreqs:
2394 streamreqs = set(streamreqs.split(','))
2397 streamreqs = set(streamreqs.split(','))
2395 # if we support it, stream in and adjust our requirements
2398 # if we support it, stream in and adjust our requirements
2396 if not streamreqs - self.supportedformats:
2399 if not streamreqs - self.supportedformats:
2397 return self.stream_in(remote, streamreqs)
2400 return self.stream_in(remote, streamreqs)
2398 return self.pull(remote, heads)
2401 return self.pull(remote, heads)
2399
2402
2400 def pushkey(self, namespace, key, old, new):
2403 def pushkey(self, namespace, key, old, new):
2401 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2404 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2402 old=old, new=new)
2405 old=old, new=new)
2403 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2406 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2404 ret = pushkey.push(self, namespace, key, old, new)
2407 ret = pushkey.push(self, namespace, key, old, new)
2405 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2408 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2406 ret=ret)
2409 ret=ret)
2407 return ret
2410 return ret
2408
2411
2409 def listkeys(self, namespace):
2412 def listkeys(self, namespace):
2410 self.hook('prelistkeys', throw=True, namespace=namespace)
2413 self.hook('prelistkeys', throw=True, namespace=namespace)
2411 self.ui.debug('listing keys for "%s"\n' % namespace)
2414 self.ui.debug('listing keys for "%s"\n' % namespace)
2412 values = pushkey.list(self, namespace)
2415 values = pushkey.list(self, namespace)
2413 self.hook('listkeys', namespace=namespace, values=values)
2416 self.hook('listkeys', namespace=namespace, values=values)
2414 return values
2417 return values
2415
2418
2416 def debugwireargs(self, one, two, three=None, four=None, five=None):
2419 def debugwireargs(self, one, two, three=None, four=None, five=None):
2417 '''used to test argument passing over the wire'''
2420 '''used to test argument passing over the wire'''
2418 return "%s %s %s %s %s" % (one, two, three, four, five)
2421 return "%s %s %s %s %s" % (one, two, three, four, five)
2419
2422
2420 def savecommitmessage(self, text):
2423 def savecommitmessage(self, text):
2421 fp = self.opener('last-message.txt', 'wb')
2424 fp = self.opener('last-message.txt', 'wb')
2422 try:
2425 try:
2423 fp.write(text)
2426 fp.write(text)
2424 finally:
2427 finally:
2425 fp.close()
2428 fp.close()
2426 return self.pathto(fp.name[len(self.root) + 1:])
2429 return self.pathto(fp.name[len(self.root) + 1:])
2427
2430
2428 # used to avoid circular references so destructors work
2431 # used to avoid circular references so destructors work
2429 def aftertrans(files):
2432 def aftertrans(files):
2430 renamefiles = [tuple(t) for t in files]
2433 renamefiles = [tuple(t) for t in files]
2431 def a():
2434 def a():
2432 for vfs, src, dest in renamefiles:
2435 for vfs, src, dest in renamefiles:
2433 try:
2436 try:
2434 vfs.rename(src, dest)
2437 vfs.rename(src, dest)
2435 except OSError: # journal file does not yet exist
2438 except OSError: # journal file does not yet exist
2436 pass
2439 pass
2437 return a
2440 return a
2438
2441
2439 def undoname(fn):
2442 def undoname(fn):
2440 base, name = os.path.split(fn)
2443 base, name = os.path.split(fn)
2441 assert name.startswith('journal')
2444 assert name.startswith('journal')
2442 return os.path.join(base, name.replace('journal', 'undo', 1))
2445 return os.path.join(base, name.replace('journal', 'undo', 1))
2443
2446
2444 def instance(ui, path, create):
2447 def instance(ui, path, create):
2445 return localrepository(ui, util.urllocalpath(path), create)
2448 return localrepository(ui, util.urllocalpath(path), create)
2446
2449
2447 def islocal(path):
2450 def islocal(path):
2448 return True
2451 return True
@@ -1,94 +1,179 b''
1 """test behavior of propertycache and unfiltered propertycache
1 """test behavior of propertycache and unfiltered propertycache
2
2
3 The repoview overlay is quite complexe. We test the behavior of
3 The repoview overlay is quite complexe. We test the behavior of
4 property cache of both localrepo and repoview to prevent
4 property cache of both localrepo and repoview to prevent
5 regression."""
5 regression."""
6
6
7 import os, subprocess
7 import os, subprocess
8 import mercurial.localrepo
8 import mercurial.localrepo
9 import mercurial.repoview
9 import mercurial.repoview
10 import mercurial.util
10 import mercurial.util
11 import mercurial.hg
11 import mercurial.hg
12 import mercurial.ui as uimod
12 import mercurial.ui as uimod
13
13
14
14
15 # create some special property cache that trace they call
15 # create some special property cache that trace they call
16
16
17 calllog = []
17 calllog = []
18 @mercurial.util.propertycache
18 @mercurial.util.propertycache
19 def testcachedfoobar(repo):
19 def testcachedfoobar(repo):
20 name = repo.filtername
20 name = repo.filtername
21 if name is None:
21 if name is None:
22 name = ''
22 name = ''
23 val = len(name)
23 val = len(name)
24 calllog.append(val)
24 calllog.append(val)
25 return val
25 return val
26
26
27 unficalllog = []
28 @mercurial.localrepo.unfilteredpropertycache
29 def testcachedunfifoobar(repo):
30 name = repo.filtername
31 if name is None:
32 name = ''
33 val = 100 + len(name)
34 unficalllog.append(val)
35 return val
36
27 #plug them on repo
37 #plug them on repo
28 mercurial.localrepo.localrepository.testcachedfoobar = testcachedfoobar
38 mercurial.localrepo.localrepository.testcachedfoobar = testcachedfoobar
39 mercurial.localrepo.localrepository.testcachedunfifoobar = testcachedunfifoobar
29
40
30
41
31 # create an empty repo. and instanciate it. It is important to run
42 # create an empty repo. and instanciate it. It is important to run
32 # those test on the real object to detect regression.
43 # those test on the real object to detect regression.
33 repopath = os.path.join(os.environ['TESTTMP'], 'repo')
44 repopath = os.path.join(os.environ['TESTTMP'], 'repo')
34 subprocess.check_call(['hg', 'init', repopath])
45 subprocess.check_call(['hg', 'init', repopath])
35 ui = uimod.ui()
46 ui = uimod.ui()
36 repo = mercurial.hg.repository(ui, path=repopath).unfiltered()
47 repo = mercurial.hg.repository(ui, path=repopath).unfiltered()
37
48
38
49
39 print ''
50 print ''
40 print '=== property cache ==='
51 print '=== property cache ==='
41 print ''
52 print ''
42 print 'calllog:', calllog
53 print 'calllog:', calllog
43 print 'cached value (unfiltered):',
54 print 'cached value (unfiltered):',
44 print vars(repo).get('testcachedfoobar', 'NOCACHE')
55 print vars(repo).get('testcachedfoobar', 'NOCACHE')
45
56
46 print ''
57 print ''
47 print '= first access on unfiltered, should do a call'
58 print '= first access on unfiltered, should do a call'
48 print 'access:', repo.testcachedfoobar
59 print 'access:', repo.testcachedfoobar
49 print 'calllog:', calllog
60 print 'calllog:', calllog
50 print 'cached value (unfiltered):',
61 print 'cached value (unfiltered):',
51 print vars(repo).get('testcachedfoobar', 'NOCACHE')
62 print vars(repo).get('testcachedfoobar', 'NOCACHE')
52
63
53 print ''
64 print ''
54 print '= second access on unfiltered, should not do call'
65 print '= second access on unfiltered, should not do call'
55 print 'access', repo.testcachedfoobar
66 print 'access', repo.testcachedfoobar
56 print 'calllog:', calllog
67 print 'calllog:', calllog
57 print 'cached value (unfiltered):',
68 print 'cached value (unfiltered):',
58 print vars(repo).get('testcachedfoobar', 'NOCACHE')
69 print vars(repo).get('testcachedfoobar', 'NOCACHE')
59
70
60 print ''
71 print ''
61 print '= first access on "visible" view, should do a call'
72 print '= first access on "visible" view, should do a call'
62 visibleview = repo.filtered('visible')
73 visibleview = repo.filtered('visible')
63 print 'cached value ("visible" view):',
74 print 'cached value ("visible" view):',
64 print vars(visibleview).get('testcachedfoobar', 'NOCACHE')
75 print vars(visibleview).get('testcachedfoobar', 'NOCACHE')
65 print 'access:', visibleview.testcachedfoobar
76 print 'access:', visibleview.testcachedfoobar
66 print 'calllog:', calllog
77 print 'calllog:', calllog
67 print 'cached value (unfiltered):',
78 print 'cached value (unfiltered):',
68 print vars(repo).get('testcachedfoobar', 'NOCACHE')
79 print vars(repo).get('testcachedfoobar', 'NOCACHE')
69 print 'cached value ("visible" view):',
80 print 'cached value ("visible" view):',
70 print vars(visibleview).get('testcachedfoobar', 'NOCACHE')
81 print vars(visibleview).get('testcachedfoobar', 'NOCACHE')
71
82
72 print ''
83 print ''
73 print '= second access on "visible view", should not do call'
84 print '= second access on "visible view", should not do call'
74 print 'access:', visibleview.testcachedfoobar
85 print 'access:', visibleview.testcachedfoobar
75 print 'calllog:', calllog
86 print 'calllog:', calllog
76 print 'cached value (unfiltered):',
87 print 'cached value (unfiltered):',
77 print vars(repo).get('testcachedfoobar', 'NOCACHE')
88 print vars(repo).get('testcachedfoobar', 'NOCACHE')
78 print 'cached value ("visible" view):',
89 print 'cached value ("visible" view):',
79 print vars(visibleview).get('testcachedfoobar', 'NOCACHE')
90 print vars(visibleview).get('testcachedfoobar', 'NOCACHE')
80
91
81 print ''
92 print ''
82 print '= no effect on other view'
93 print '= no effect on other view'
83 immutableview = repo.filtered('immutable')
94 immutableview = repo.filtered('immutable')
84 print 'cached value ("immutable" view):',
95 print 'cached value ("immutable" view):',
85 print vars(immutableview).get('testcachedfoobar', 'NOCACHE')
96 print vars(immutableview).get('testcachedfoobar', 'NOCACHE')
86 print 'access:', immutableview.testcachedfoobar
97 print 'access:', immutableview.testcachedfoobar
87 print 'calllog:', calllog
98 print 'calllog:', calllog
88 print 'cached value (unfiltered):',
99 print 'cached value (unfiltered):',
89 print vars(repo).get('testcachedfoobar', 'NOCACHE')
100 print vars(repo).get('testcachedfoobar', 'NOCACHE')
90 print 'cached value ("visible" view):',
101 print 'cached value ("visible" view):',
91 print vars(visibleview).get('testcachedfoobar', 'NOCACHE')
102 print vars(visibleview).get('testcachedfoobar', 'NOCACHE')
92 print 'cached value ("immutable" view):',
103 print 'cached value ("immutable" view):',
93 print vars(immutableview).get('testcachedfoobar', 'NOCACHE')
104 print vars(immutableview).get('testcachedfoobar', 'NOCACHE')
94
105
106 # unfiltered property cache test
107 print ''
108 print ''
109 print '=== unfiltered property cache ==='
110 print ''
111 print 'unficalllog:', unficalllog
112 print 'cached value (unfiltered): ',
113 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
114 print 'cached value ("visible" view): ',
115 print vars(visibleview).get('testcachedunfifoobar', 'NOCACHE')
116 print 'cached value ("immutable" view):',
117 print vars(immutableview).get('testcachedunfifoobar', 'NOCACHE')
118
119 print ''
120 print '= first access on unfiltered, should do a call'
121 print 'access (unfiltered):', repo.testcachedunfifoobar
122 print 'unficalllog:', unficalllog
123 print 'cached value (unfiltered): ',
124 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
125
126 print ''
127 print '= second access on unfiltered, should not do call'
128 print 'access (unfiltered):', repo.testcachedunfifoobar
129 print 'unficalllog:', unficalllog
130 print 'cached value (unfiltered): ',
131 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
132
133 print ''
134 print '= access on view should use the unfiltered cache'
135 print 'access (unfiltered): ', repo.testcachedunfifoobar
136 print 'access ("visible" view): ', visibleview.testcachedunfifoobar
137 print 'access ("immutable" view):', immutableview.testcachedunfifoobar
138 print 'unficalllog:', unficalllog
139 print 'cached value (unfiltered): ',
140 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
141 print 'cached value ("visible" view): ',
142 print vars(visibleview).get('testcachedunfifoobar', 'NOCACHE')
143 print 'cached value ("immutable" view):',
144 print vars(immutableview).get('testcachedunfifoobar', 'NOCACHE')
145
146 print ''
147 print '= even if we clear the unfiltered cache'
148 del repo.__dict__['testcachedunfifoobar']
149 print 'cached value (unfiltered): ',
150 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
151 print 'cached value ("visible" view): ',
152 print vars(visibleview).get('testcachedunfifoobar', 'NOCACHE')
153 print 'cached value ("immutable" view):',
154 print vars(immutableview).get('testcachedunfifoobar', 'NOCACHE')
155 print 'unficalllog:', unficalllog
156 print 'access ("visible" view): ', visibleview.testcachedunfifoobar
157 print 'unficalllog:', unficalllog
158 print 'cached value (unfiltered): ',
159 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
160 print 'cached value ("visible" view): ',
161 print vars(visibleview).get('testcachedunfifoobar', 'NOCACHE')
162 print 'cached value ("immutable" view):',
163 print vars(immutableview).get('testcachedunfifoobar', 'NOCACHE')
164 print 'access ("immutable" view):', immutableview.testcachedunfifoobar
165 print 'unficalllog:', unficalllog
166 print 'cached value (unfiltered): ',
167 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
168 print 'cached value ("visible" view): ',
169 print vars(visibleview).get('testcachedunfifoobar', 'NOCACHE')
170 print 'cached value ("immutable" view):',
171 print vars(immutableview).get('testcachedunfifoobar', 'NOCACHE')
172 print 'access (unfiltered): ', repo.testcachedunfifoobar
173 print 'unficalllog:', unficalllog
174 print 'cached value (unfiltered): ',
175 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
176 print 'cached value ("visible" view): ',
177 print vars(visibleview).get('testcachedunfifoobar', 'NOCACHE')
178 print 'cached value ("immutable" view):',
179 print vars(immutableview).get('testcachedunfifoobar', 'NOCACHE')
@@ -1,36 +1,84 b''
1
1
2 === property cache ===
2 === property cache ===
3
3
4 calllog: []
4 calllog: []
5 cached value (unfiltered): NOCACHE
5 cached value (unfiltered): NOCACHE
6
6
7 = first access on unfiltered, should do a call
7 = first access on unfiltered, should do a call
8 access: 0
8 access: 0
9 calllog: [0]
9 calllog: [0]
10 cached value (unfiltered): 0
10 cached value (unfiltered): 0
11
11
12 = second access on unfiltered, should not do call
12 = second access on unfiltered, should not do call
13 access 0
13 access 0
14 calllog: [0]
14 calllog: [0]
15 cached value (unfiltered): 0
15 cached value (unfiltered): 0
16
16
17 = first access on "visible" view, should do a call
17 = first access on "visible" view, should do a call
18 cached value ("visible" view): NOCACHE
18 cached value ("visible" view): NOCACHE
19 access: 7
19 access: 7
20 calllog: [0, 7]
20 calllog: [0, 7]
21 cached value (unfiltered): 0
21 cached value (unfiltered): 0
22 cached value ("visible" view): 7
22 cached value ("visible" view): 7
23
23
24 = second access on "visible view", should not do call
24 = second access on "visible view", should not do call
25 access: 7
25 access: 7
26 calllog: [0, 7]
26 calllog: [0, 7]
27 cached value (unfiltered): 0
27 cached value (unfiltered): 0
28 cached value ("visible" view): 7
28 cached value ("visible" view): 7
29
29
30 = no effect on other view
30 = no effect on other view
31 cached value ("immutable" view): NOCACHE
31 cached value ("immutable" view): NOCACHE
32 access: 9
32 access: 9
33 calllog: [0, 7, 9]
33 calllog: [0, 7, 9]
34 cached value (unfiltered): 0
34 cached value (unfiltered): 0
35 cached value ("visible" view): 7
35 cached value ("visible" view): 7
36 cached value ("immutable" view): 9
36 cached value ("immutable" view): 9
37
38
39 === unfiltered property cache ===
40
41 unficalllog: []
42 cached value (unfiltered): NOCACHE
43 cached value ("visible" view): NOCACHE
44 cached value ("immutable" view): NOCACHE
45
46 = first access on unfiltered, should do a call
47 access (unfiltered): 100
48 unficalllog: [100]
49 cached value (unfiltered): 100
50
51 = second access on unfiltered, should not do call
52 access (unfiltered): 100
53 unficalllog: [100]
54 cached value (unfiltered): 100
55
56 = access on view should use the unfiltered cache
57 access (unfiltered): 100
58 access ("visible" view): 100
59 access ("immutable" view): 100
60 unficalllog: [100]
61 cached value (unfiltered): 100
62 cached value ("visible" view): NOCACHE
63 cached value ("immutable" view): NOCACHE
64
65 = even if we clear the unfiltered cache
66 cached value (unfiltered): NOCACHE
67 cached value ("visible" view): NOCACHE
68 cached value ("immutable" view): NOCACHE
69 unficalllog: [100]
70 access ("visible" view): 100
71 unficalllog: [100, 100]
72 cached value (unfiltered): 100
73 cached value ("visible" view): NOCACHE
74 cached value ("immutable" view): NOCACHE
75 access ("immutable" view): 100
76 unficalllog: [100, 100]
77 cached value (unfiltered): 100
78 cached value ("visible" view): NOCACHE
79 cached value ("immutable" view): NOCACHE
80 access (unfiltered): 100
81 unficalllog: [100, 100]
82 cached value (unfiltered): 100
83 cached value ("visible" view): NOCACHE
84 cached value ("immutable" view): NOCACHE
General Comments 0
You need to be logged in to leave comments. Login now