##// END OF EJS Templates
commit: catch changed exec bit on files from p1 (issue4382)
Matt Mackall -
r22492:d5261db0 stable
parent child Browse files
Show More
@@ -1,1781 +1,1780
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 bundle2caps = {'HG2X': (),
183 bundle2caps = {'HG2X': (),
184 'b2x:listkeys': (),
184 'b2x:listkeys': (),
185 'b2x:pushkey': ()}
185 'b2x:pushkey': ()}
186
186
187 # a list of (ui, featureset) functions.
187 # a list of (ui, featureset) functions.
188 # only functions defined in module of enabled extensions are invoked
188 # only functions defined in module of enabled extensions are invoked
189 featuresetupfuncs = set()
189 featuresetupfuncs = set()
190
190
191 def _baserequirements(self, create):
191 def _baserequirements(self, create):
192 return self.requirements[:]
192 return self.requirements[:]
193
193
194 def __init__(self, baseui, path=None, create=False):
194 def __init__(self, baseui, path=None, create=False):
195 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
195 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
196 self.wopener = self.wvfs
196 self.wopener = self.wvfs
197 self.root = self.wvfs.base
197 self.root = self.wvfs.base
198 self.path = self.wvfs.join(".hg")
198 self.path = self.wvfs.join(".hg")
199 self.origroot = path
199 self.origroot = path
200 self.auditor = pathutil.pathauditor(self.root, self._checknested)
200 self.auditor = pathutil.pathauditor(self.root, self._checknested)
201 self.vfs = scmutil.vfs(self.path)
201 self.vfs = scmutil.vfs(self.path)
202 self.opener = self.vfs
202 self.opener = self.vfs
203 self.baseui = baseui
203 self.baseui = baseui
204 self.ui = baseui.copy()
204 self.ui = baseui.copy()
205 self.ui.copy = baseui.copy # prevent copying repo configuration
205 self.ui.copy = baseui.copy # prevent copying repo configuration
206 # A list of callback to shape the phase if no data were found.
206 # A list of callback to shape the phase if no data were found.
207 # Callback are in the form: func(repo, roots) --> processed root.
207 # Callback are in the form: func(repo, roots) --> processed root.
208 # This list it to be filled by extension during repo setup
208 # This list it to be filled by extension during repo setup
209 self._phasedefaults = []
209 self._phasedefaults = []
210 try:
210 try:
211 self.ui.readconfig(self.join("hgrc"), self.root)
211 self.ui.readconfig(self.join("hgrc"), self.root)
212 extensions.loadall(self.ui)
212 extensions.loadall(self.ui)
213 except IOError:
213 except IOError:
214 pass
214 pass
215
215
216 if self.featuresetupfuncs:
216 if self.featuresetupfuncs:
217 self.supported = set(self._basesupported) # use private copy
217 self.supported = set(self._basesupported) # use private copy
218 extmods = set(m.__name__ for n, m
218 extmods = set(m.__name__ for n, m
219 in extensions.extensions(self.ui))
219 in extensions.extensions(self.ui))
220 for setupfunc in self.featuresetupfuncs:
220 for setupfunc in self.featuresetupfuncs:
221 if setupfunc.__module__ in extmods:
221 if setupfunc.__module__ in extmods:
222 setupfunc(self.ui, self.supported)
222 setupfunc(self.ui, self.supported)
223 else:
223 else:
224 self.supported = self._basesupported
224 self.supported = self._basesupported
225
225
226 if not self.vfs.isdir():
226 if not self.vfs.isdir():
227 if create:
227 if create:
228 if not self.wvfs.exists():
228 if not self.wvfs.exists():
229 self.wvfs.makedirs()
229 self.wvfs.makedirs()
230 self.vfs.makedir(notindexed=True)
230 self.vfs.makedir(notindexed=True)
231 requirements = self._baserequirements(create)
231 requirements = self._baserequirements(create)
232 if self.ui.configbool('format', 'usestore', True):
232 if self.ui.configbool('format', 'usestore', True):
233 self.vfs.mkdir("store")
233 self.vfs.mkdir("store")
234 requirements.append("store")
234 requirements.append("store")
235 if self.ui.configbool('format', 'usefncache', True):
235 if self.ui.configbool('format', 'usefncache', True):
236 requirements.append("fncache")
236 requirements.append("fncache")
237 if self.ui.configbool('format', 'dotencode', True):
237 if self.ui.configbool('format', 'dotencode', True):
238 requirements.append('dotencode')
238 requirements.append('dotencode')
239 # create an invalid changelog
239 # create an invalid changelog
240 self.vfs.append(
240 self.vfs.append(
241 "00changelog.i",
241 "00changelog.i",
242 '\0\0\0\2' # represents revlogv2
242 '\0\0\0\2' # represents revlogv2
243 ' dummy changelog to prevent using the old repo layout'
243 ' dummy changelog to prevent using the old repo layout'
244 )
244 )
245 if self.ui.configbool('format', 'generaldelta', False):
245 if self.ui.configbool('format', 'generaldelta', False):
246 requirements.append("generaldelta")
246 requirements.append("generaldelta")
247 requirements = set(requirements)
247 requirements = set(requirements)
248 else:
248 else:
249 raise error.RepoError(_("repository %s not found") % path)
249 raise error.RepoError(_("repository %s not found") % path)
250 elif create:
250 elif create:
251 raise error.RepoError(_("repository %s already exists") % path)
251 raise error.RepoError(_("repository %s already exists") % path)
252 else:
252 else:
253 try:
253 try:
254 requirements = scmutil.readrequires(self.vfs, self.supported)
254 requirements = scmutil.readrequires(self.vfs, self.supported)
255 except IOError, inst:
255 except IOError, inst:
256 if inst.errno != errno.ENOENT:
256 if inst.errno != errno.ENOENT:
257 raise
257 raise
258 requirements = set()
258 requirements = set()
259
259
260 self.sharedpath = self.path
260 self.sharedpath = self.path
261 try:
261 try:
262 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
262 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
263 realpath=True)
263 realpath=True)
264 s = vfs.base
264 s = vfs.base
265 if not vfs.exists():
265 if not vfs.exists():
266 raise error.RepoError(
266 raise error.RepoError(
267 _('.hg/sharedpath points to nonexistent directory %s') % s)
267 _('.hg/sharedpath points to nonexistent directory %s') % s)
268 self.sharedpath = s
268 self.sharedpath = s
269 except IOError, inst:
269 except IOError, inst:
270 if inst.errno != errno.ENOENT:
270 if inst.errno != errno.ENOENT:
271 raise
271 raise
272
272
273 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
273 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
274 self.spath = self.store.path
274 self.spath = self.store.path
275 self.svfs = self.store.vfs
275 self.svfs = self.store.vfs
276 self.sopener = self.svfs
276 self.sopener = self.svfs
277 self.sjoin = self.store.join
277 self.sjoin = self.store.join
278 self.vfs.createmode = self.store.createmode
278 self.vfs.createmode = self.store.createmode
279 self._applyrequirements(requirements)
279 self._applyrequirements(requirements)
280 if create:
280 if create:
281 self._writerequirements()
281 self._writerequirements()
282
282
283
283
284 self._branchcaches = {}
284 self._branchcaches = {}
285 self.filterpats = {}
285 self.filterpats = {}
286 self._datafilters = {}
286 self._datafilters = {}
287 self._transref = self._lockref = self._wlockref = None
287 self._transref = self._lockref = self._wlockref = None
288
288
289 # A cache for various files under .hg/ that tracks file changes,
289 # A cache for various files under .hg/ that tracks file changes,
290 # (used by the filecache decorator)
290 # (used by the filecache decorator)
291 #
291 #
292 # Maps a property name to its util.filecacheentry
292 # Maps a property name to its util.filecacheentry
293 self._filecache = {}
293 self._filecache = {}
294
294
295 # hold sets of revision to be filtered
295 # hold sets of revision to be filtered
296 # should be cleared when something might have changed the filter value:
296 # should be cleared when something might have changed the filter value:
297 # - new changesets,
297 # - new changesets,
298 # - phase change,
298 # - phase change,
299 # - new obsolescence marker,
299 # - new obsolescence marker,
300 # - working directory parent change,
300 # - working directory parent change,
301 # - bookmark changes
301 # - bookmark changes
302 self.filteredrevcache = {}
302 self.filteredrevcache = {}
303
303
304 def close(self):
304 def close(self):
305 pass
305 pass
306
306
307 def _restrictcapabilities(self, caps):
307 def _restrictcapabilities(self, caps):
308 # bundle2 is not ready for prime time, drop it unless explicitly
308 # bundle2 is not ready for prime time, drop it unless explicitly
309 # required by the tests (or some brave tester)
309 # required by the tests (or some brave tester)
310 if self.ui.configbool('experimental', 'bundle2-exp', False):
310 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 caps = set(caps)
311 caps = set(caps)
312 capsblob = bundle2.encodecaps(self.bundle2caps)
312 capsblob = bundle2.encodecaps(self.bundle2caps)
313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 return caps
314 return caps
315
315
316 def _applyrequirements(self, requirements):
316 def _applyrequirements(self, requirements):
317 self.requirements = requirements
317 self.requirements = requirements
318 self.sopener.options = dict((r, 1) for r in requirements
318 self.sopener.options = dict((r, 1) for r in requirements
319 if r in self.openerreqs)
319 if r in self.openerreqs)
320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 if chunkcachesize is not None:
321 if chunkcachesize is not None:
322 self.sopener.options['chunkcachesize'] = chunkcachesize
322 self.sopener.options['chunkcachesize'] = chunkcachesize
323
323
324 def _writerequirements(self):
324 def _writerequirements(self):
325 reqfile = self.opener("requires", "w")
325 reqfile = self.opener("requires", "w")
326 for r in sorted(self.requirements):
326 for r in sorted(self.requirements):
327 reqfile.write("%s\n" % r)
327 reqfile.write("%s\n" % r)
328 reqfile.close()
328 reqfile.close()
329
329
330 def _checknested(self, path):
330 def _checknested(self, path):
331 """Determine if path is a legal nested repository."""
331 """Determine if path is a legal nested repository."""
332 if not path.startswith(self.root):
332 if not path.startswith(self.root):
333 return False
333 return False
334 subpath = path[len(self.root) + 1:]
334 subpath = path[len(self.root) + 1:]
335 normsubpath = util.pconvert(subpath)
335 normsubpath = util.pconvert(subpath)
336
336
337 # XXX: Checking against the current working copy is wrong in
337 # XXX: Checking against the current working copy is wrong in
338 # the sense that it can reject things like
338 # the sense that it can reject things like
339 #
339 #
340 # $ hg cat -r 10 sub/x.txt
340 # $ hg cat -r 10 sub/x.txt
341 #
341 #
342 # if sub/ is no longer a subrepository in the working copy
342 # if sub/ is no longer a subrepository in the working copy
343 # parent revision.
343 # parent revision.
344 #
344 #
345 # However, it can of course also allow things that would have
345 # However, it can of course also allow things that would have
346 # been rejected before, such as the above cat command if sub/
346 # been rejected before, such as the above cat command if sub/
347 # is a subrepository now, but was a normal directory before.
347 # is a subrepository now, but was a normal directory before.
348 # The old path auditor would have rejected by mistake since it
348 # The old path auditor would have rejected by mistake since it
349 # panics when it sees sub/.hg/.
349 # panics when it sees sub/.hg/.
350 #
350 #
351 # All in all, checking against the working copy seems sensible
351 # All in all, checking against the working copy seems sensible
352 # since we want to prevent access to nested repositories on
352 # since we want to prevent access to nested repositories on
353 # the filesystem *now*.
353 # the filesystem *now*.
354 ctx = self[None]
354 ctx = self[None]
355 parts = util.splitpath(subpath)
355 parts = util.splitpath(subpath)
356 while parts:
356 while parts:
357 prefix = '/'.join(parts)
357 prefix = '/'.join(parts)
358 if prefix in ctx.substate:
358 if prefix in ctx.substate:
359 if prefix == normsubpath:
359 if prefix == normsubpath:
360 return True
360 return True
361 else:
361 else:
362 sub = ctx.sub(prefix)
362 sub = ctx.sub(prefix)
363 return sub.checknested(subpath[len(prefix) + 1:])
363 return sub.checknested(subpath[len(prefix) + 1:])
364 else:
364 else:
365 parts.pop()
365 parts.pop()
366 return False
366 return False
367
367
368 def peer(self):
368 def peer(self):
369 return localpeer(self) # not cached to avoid reference cycle
369 return localpeer(self) # not cached to avoid reference cycle
370
370
371 def unfiltered(self):
371 def unfiltered(self):
372 """Return unfiltered version of the repository
372 """Return unfiltered version of the repository
373
373
374 Intended to be overwritten by filtered repo."""
374 Intended to be overwritten by filtered repo."""
375 return self
375 return self
376
376
377 def filtered(self, name):
377 def filtered(self, name):
378 """Return a filtered version of a repository"""
378 """Return a filtered version of a repository"""
379 # build a new class with the mixin and the current class
379 # build a new class with the mixin and the current class
380 # (possibly subclass of the repo)
380 # (possibly subclass of the repo)
381 class proxycls(repoview.repoview, self.unfiltered().__class__):
381 class proxycls(repoview.repoview, self.unfiltered().__class__):
382 pass
382 pass
383 return proxycls(self, name)
383 return proxycls(self, name)
384
384
385 @repofilecache('bookmarks')
385 @repofilecache('bookmarks')
386 def _bookmarks(self):
386 def _bookmarks(self):
387 return bookmarks.bmstore(self)
387 return bookmarks.bmstore(self)
388
388
389 @repofilecache('bookmarks.current')
389 @repofilecache('bookmarks.current')
390 def _bookmarkcurrent(self):
390 def _bookmarkcurrent(self):
391 return bookmarks.readcurrent(self)
391 return bookmarks.readcurrent(self)
392
392
393 def bookmarkheads(self, bookmark):
393 def bookmarkheads(self, bookmark):
394 name = bookmark.split('@', 1)[0]
394 name = bookmark.split('@', 1)[0]
395 heads = []
395 heads = []
396 for mark, n in self._bookmarks.iteritems():
396 for mark, n in self._bookmarks.iteritems():
397 if mark.split('@', 1)[0] == name:
397 if mark.split('@', 1)[0] == name:
398 heads.append(n)
398 heads.append(n)
399 return heads
399 return heads
400
400
401 @storecache('phaseroots')
401 @storecache('phaseroots')
402 def _phasecache(self):
402 def _phasecache(self):
403 return phases.phasecache(self, self._phasedefaults)
403 return phases.phasecache(self, self._phasedefaults)
404
404
405 @storecache('obsstore')
405 @storecache('obsstore')
406 def obsstore(self):
406 def obsstore(self):
407 store = obsolete.obsstore(self.sopener)
407 store = obsolete.obsstore(self.sopener)
408 if store and not obsolete._enabled:
408 if store and not obsolete._enabled:
409 # message is rare enough to not be translated
409 # message is rare enough to not be translated
410 msg = 'obsolete feature not enabled but %i markers found!\n'
410 msg = 'obsolete feature not enabled but %i markers found!\n'
411 self.ui.warn(msg % len(list(store)))
411 self.ui.warn(msg % len(list(store)))
412 return store
412 return store
413
413
414 @storecache('00changelog.i')
414 @storecache('00changelog.i')
415 def changelog(self):
415 def changelog(self):
416 c = changelog.changelog(self.sopener)
416 c = changelog.changelog(self.sopener)
417 if 'HG_PENDING' in os.environ:
417 if 'HG_PENDING' in os.environ:
418 p = os.environ['HG_PENDING']
418 p = os.environ['HG_PENDING']
419 if p.startswith(self.root):
419 if p.startswith(self.root):
420 c.readpending('00changelog.i.a')
420 c.readpending('00changelog.i.a')
421 return c
421 return c
422
422
423 @storecache('00manifest.i')
423 @storecache('00manifest.i')
424 def manifest(self):
424 def manifest(self):
425 return manifest.manifest(self.sopener)
425 return manifest.manifest(self.sopener)
426
426
427 @repofilecache('dirstate')
427 @repofilecache('dirstate')
428 def dirstate(self):
428 def dirstate(self):
429 warned = [0]
429 warned = [0]
430 def validate(node):
430 def validate(node):
431 try:
431 try:
432 self.changelog.rev(node)
432 self.changelog.rev(node)
433 return node
433 return node
434 except error.LookupError:
434 except error.LookupError:
435 if not warned[0]:
435 if not warned[0]:
436 warned[0] = True
436 warned[0] = True
437 self.ui.warn(_("warning: ignoring unknown"
437 self.ui.warn(_("warning: ignoring unknown"
438 " working parent %s!\n") % short(node))
438 " working parent %s!\n") % short(node))
439 return nullid
439 return nullid
440
440
441 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
441 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
442
442
443 def __getitem__(self, changeid):
443 def __getitem__(self, changeid):
444 if changeid is None:
444 if changeid is None:
445 return context.workingctx(self)
445 return context.workingctx(self)
446 return context.changectx(self, changeid)
446 return context.changectx(self, changeid)
447
447
448 def __contains__(self, changeid):
448 def __contains__(self, changeid):
449 try:
449 try:
450 return bool(self.lookup(changeid))
450 return bool(self.lookup(changeid))
451 except error.RepoLookupError:
451 except error.RepoLookupError:
452 return False
452 return False
453
453
454 def __nonzero__(self):
454 def __nonzero__(self):
455 return True
455 return True
456
456
457 def __len__(self):
457 def __len__(self):
458 return len(self.changelog)
458 return len(self.changelog)
459
459
460 def __iter__(self):
460 def __iter__(self):
461 return iter(self.changelog)
461 return iter(self.changelog)
462
462
463 def revs(self, expr, *args):
463 def revs(self, expr, *args):
464 '''Return a list of revisions matching the given revset'''
464 '''Return a list of revisions matching the given revset'''
465 expr = revset.formatspec(expr, *args)
465 expr = revset.formatspec(expr, *args)
466 m = revset.match(None, expr)
466 m = revset.match(None, expr)
467 return m(self, revset.spanset(self))
467 return m(self, revset.spanset(self))
468
468
469 def set(self, expr, *args):
469 def set(self, expr, *args):
470 '''
470 '''
471 Yield a context for each matching revision, after doing arg
471 Yield a context for each matching revision, after doing arg
472 replacement via revset.formatspec
472 replacement via revset.formatspec
473 '''
473 '''
474 for r in self.revs(expr, *args):
474 for r in self.revs(expr, *args):
475 yield self[r]
475 yield self[r]
476
476
477 def url(self):
477 def url(self):
478 return 'file:' + self.root
478 return 'file:' + self.root
479
479
480 def hook(self, name, throw=False, **args):
480 def hook(self, name, throw=False, **args):
481 """Call a hook, passing this repo instance.
481 """Call a hook, passing this repo instance.
482
482
483 This a convenience method to aid invoking hooks. Extensions likely
483 This a convenience method to aid invoking hooks. Extensions likely
484 won't call this unless they have registered a custom hook or are
484 won't call this unless they have registered a custom hook or are
485 replacing code that is expected to call a hook.
485 replacing code that is expected to call a hook.
486 """
486 """
487 return hook.hook(self.ui, self, name, throw, **args)
487 return hook.hook(self.ui, self, name, throw, **args)
488
488
489 @unfilteredmethod
489 @unfilteredmethod
490 def _tag(self, names, node, message, local, user, date, extra={},
490 def _tag(self, names, node, message, local, user, date, extra={},
491 editor=False):
491 editor=False):
492 if isinstance(names, str):
492 if isinstance(names, str):
493 names = (names,)
493 names = (names,)
494
494
495 branches = self.branchmap()
495 branches = self.branchmap()
496 for name in names:
496 for name in names:
497 self.hook('pretag', throw=True, node=hex(node), tag=name,
497 self.hook('pretag', throw=True, node=hex(node), tag=name,
498 local=local)
498 local=local)
499 if name in branches:
499 if name in branches:
500 self.ui.warn(_("warning: tag %s conflicts with existing"
500 self.ui.warn(_("warning: tag %s conflicts with existing"
501 " branch name\n") % name)
501 " branch name\n") % name)
502
502
503 def writetags(fp, names, munge, prevtags):
503 def writetags(fp, names, munge, prevtags):
504 fp.seek(0, 2)
504 fp.seek(0, 2)
505 if prevtags and prevtags[-1] != '\n':
505 if prevtags and prevtags[-1] != '\n':
506 fp.write('\n')
506 fp.write('\n')
507 for name in names:
507 for name in names:
508 m = munge and munge(name) or name
508 m = munge and munge(name) or name
509 if (self._tagscache.tagtypes and
509 if (self._tagscache.tagtypes and
510 name in self._tagscache.tagtypes):
510 name in self._tagscache.tagtypes):
511 old = self.tags().get(name, nullid)
511 old = self.tags().get(name, nullid)
512 fp.write('%s %s\n' % (hex(old), m))
512 fp.write('%s %s\n' % (hex(old), m))
513 fp.write('%s %s\n' % (hex(node), m))
513 fp.write('%s %s\n' % (hex(node), m))
514 fp.close()
514 fp.close()
515
515
516 prevtags = ''
516 prevtags = ''
517 if local:
517 if local:
518 try:
518 try:
519 fp = self.opener('localtags', 'r+')
519 fp = self.opener('localtags', 'r+')
520 except IOError:
520 except IOError:
521 fp = self.opener('localtags', 'a')
521 fp = self.opener('localtags', 'a')
522 else:
522 else:
523 prevtags = fp.read()
523 prevtags = fp.read()
524
524
525 # local tags are stored in the current charset
525 # local tags are stored in the current charset
526 writetags(fp, names, None, prevtags)
526 writetags(fp, names, None, prevtags)
527 for name in names:
527 for name in names:
528 self.hook('tag', node=hex(node), tag=name, local=local)
528 self.hook('tag', node=hex(node), tag=name, local=local)
529 return
529 return
530
530
531 try:
531 try:
532 fp = self.wfile('.hgtags', 'rb+')
532 fp = self.wfile('.hgtags', 'rb+')
533 except IOError, e:
533 except IOError, e:
534 if e.errno != errno.ENOENT:
534 if e.errno != errno.ENOENT:
535 raise
535 raise
536 fp = self.wfile('.hgtags', 'ab')
536 fp = self.wfile('.hgtags', 'ab')
537 else:
537 else:
538 prevtags = fp.read()
538 prevtags = fp.read()
539
539
540 # committed tags are stored in UTF-8
540 # committed tags are stored in UTF-8
541 writetags(fp, names, encoding.fromlocal, prevtags)
541 writetags(fp, names, encoding.fromlocal, prevtags)
542
542
543 fp.close()
543 fp.close()
544
544
545 self.invalidatecaches()
545 self.invalidatecaches()
546
546
547 if '.hgtags' not in self.dirstate:
547 if '.hgtags' not in self.dirstate:
548 self[None].add(['.hgtags'])
548 self[None].add(['.hgtags'])
549
549
550 m = matchmod.exact(self.root, '', ['.hgtags'])
550 m = matchmod.exact(self.root, '', ['.hgtags'])
551 tagnode = self.commit(message, user, date, extra=extra, match=m,
551 tagnode = self.commit(message, user, date, extra=extra, match=m,
552 editor=editor)
552 editor=editor)
553
553
554 for name in names:
554 for name in names:
555 self.hook('tag', node=hex(node), tag=name, local=local)
555 self.hook('tag', node=hex(node), tag=name, local=local)
556
556
557 return tagnode
557 return tagnode
558
558
559 def tag(self, names, node, message, local, user, date, editor=False):
559 def tag(self, names, node, message, local, user, date, editor=False):
560 '''tag a revision with one or more symbolic names.
560 '''tag a revision with one or more symbolic names.
561
561
562 names is a list of strings or, when adding a single tag, names may be a
562 names is a list of strings or, when adding a single tag, names may be a
563 string.
563 string.
564
564
565 if local is True, the tags are stored in a per-repository file.
565 if local is True, the tags are stored in a per-repository file.
566 otherwise, they are stored in the .hgtags file, and a new
566 otherwise, they are stored in the .hgtags file, and a new
567 changeset is committed with the change.
567 changeset is committed with the change.
568
568
569 keyword arguments:
569 keyword arguments:
570
570
571 local: whether to store tags in non-version-controlled file
571 local: whether to store tags in non-version-controlled file
572 (default False)
572 (default False)
573
573
574 message: commit message to use if committing
574 message: commit message to use if committing
575
575
576 user: name of user to use if committing
576 user: name of user to use if committing
577
577
578 date: date tuple to use if committing'''
578 date: date tuple to use if committing'''
579
579
580 if not local:
580 if not local:
581 for x in self.status()[:5]:
581 for x in self.status()[:5]:
582 if '.hgtags' in x:
582 if '.hgtags' in x:
583 raise util.Abort(_('working copy of .hgtags is changed '
583 raise util.Abort(_('working copy of .hgtags is changed '
584 '(please commit .hgtags manually)'))
584 '(please commit .hgtags manually)'))
585
585
586 self.tags() # instantiate the cache
586 self.tags() # instantiate the cache
587 self._tag(names, node, message, local, user, date, editor=editor)
587 self._tag(names, node, message, local, user, date, editor=editor)
588
588
589 @filteredpropertycache
589 @filteredpropertycache
590 def _tagscache(self):
590 def _tagscache(self):
591 '''Returns a tagscache object that contains various tags related
591 '''Returns a tagscache object that contains various tags related
592 caches.'''
592 caches.'''
593
593
594 # This simplifies its cache management by having one decorated
594 # This simplifies its cache management by having one decorated
595 # function (this one) and the rest simply fetch things from it.
595 # function (this one) and the rest simply fetch things from it.
596 class tagscache(object):
596 class tagscache(object):
597 def __init__(self):
597 def __init__(self):
598 # These two define the set of tags for this repository. tags
598 # These two define the set of tags for this repository. tags
599 # maps tag name to node; tagtypes maps tag name to 'global' or
599 # maps tag name to node; tagtypes maps tag name to 'global' or
600 # 'local'. (Global tags are defined by .hgtags across all
600 # 'local'. (Global tags are defined by .hgtags across all
601 # heads, and local tags are defined in .hg/localtags.)
601 # heads, and local tags are defined in .hg/localtags.)
602 # They constitute the in-memory cache of tags.
602 # They constitute the in-memory cache of tags.
603 self.tags = self.tagtypes = None
603 self.tags = self.tagtypes = None
604
604
605 self.nodetagscache = self.tagslist = None
605 self.nodetagscache = self.tagslist = None
606
606
607 cache = tagscache()
607 cache = tagscache()
608 cache.tags, cache.tagtypes = self._findtags()
608 cache.tags, cache.tagtypes = self._findtags()
609
609
610 return cache
610 return cache
611
611
612 def tags(self):
612 def tags(self):
613 '''return a mapping of tag to node'''
613 '''return a mapping of tag to node'''
614 t = {}
614 t = {}
615 if self.changelog.filteredrevs:
615 if self.changelog.filteredrevs:
616 tags, tt = self._findtags()
616 tags, tt = self._findtags()
617 else:
617 else:
618 tags = self._tagscache.tags
618 tags = self._tagscache.tags
619 for k, v in tags.iteritems():
619 for k, v in tags.iteritems():
620 try:
620 try:
621 # ignore tags to unknown nodes
621 # ignore tags to unknown nodes
622 self.changelog.rev(v)
622 self.changelog.rev(v)
623 t[k] = v
623 t[k] = v
624 except (error.LookupError, ValueError):
624 except (error.LookupError, ValueError):
625 pass
625 pass
626 return t
626 return t
627
627
628 def _findtags(self):
628 def _findtags(self):
629 '''Do the hard work of finding tags. Return a pair of dicts
629 '''Do the hard work of finding tags. Return a pair of dicts
630 (tags, tagtypes) where tags maps tag name to node, and tagtypes
630 (tags, tagtypes) where tags maps tag name to node, and tagtypes
631 maps tag name to a string like \'global\' or \'local\'.
631 maps tag name to a string like \'global\' or \'local\'.
632 Subclasses or extensions are free to add their own tags, but
632 Subclasses or extensions are free to add their own tags, but
633 should be aware that the returned dicts will be retained for the
633 should be aware that the returned dicts will be retained for the
634 duration of the localrepo object.'''
634 duration of the localrepo object.'''
635
635
636 # XXX what tagtype should subclasses/extensions use? Currently
636 # XXX what tagtype should subclasses/extensions use? Currently
637 # mq and bookmarks add tags, but do not set the tagtype at all.
637 # mq and bookmarks add tags, but do not set the tagtype at all.
638 # Should each extension invent its own tag type? Should there
638 # Should each extension invent its own tag type? Should there
639 # be one tagtype for all such "virtual" tags? Or is the status
639 # be one tagtype for all such "virtual" tags? Or is the status
640 # quo fine?
640 # quo fine?
641
641
642 alltags = {} # map tag name to (node, hist)
642 alltags = {} # map tag name to (node, hist)
643 tagtypes = {}
643 tagtypes = {}
644
644
645 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
645 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
646 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
646 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
647
647
648 # Build the return dicts. Have to re-encode tag names because
648 # Build the return dicts. Have to re-encode tag names because
649 # the tags module always uses UTF-8 (in order not to lose info
649 # the tags module always uses UTF-8 (in order not to lose info
650 # writing to the cache), but the rest of Mercurial wants them in
650 # writing to the cache), but the rest of Mercurial wants them in
651 # local encoding.
651 # local encoding.
652 tags = {}
652 tags = {}
653 for (name, (node, hist)) in alltags.iteritems():
653 for (name, (node, hist)) in alltags.iteritems():
654 if node != nullid:
654 if node != nullid:
655 tags[encoding.tolocal(name)] = node
655 tags[encoding.tolocal(name)] = node
656 tags['tip'] = self.changelog.tip()
656 tags['tip'] = self.changelog.tip()
657 tagtypes = dict([(encoding.tolocal(name), value)
657 tagtypes = dict([(encoding.tolocal(name), value)
658 for (name, value) in tagtypes.iteritems()])
658 for (name, value) in tagtypes.iteritems()])
659 return (tags, tagtypes)
659 return (tags, tagtypes)
660
660
661 def tagtype(self, tagname):
661 def tagtype(self, tagname):
662 '''
662 '''
663 return the type of the given tag. result can be:
663 return the type of the given tag. result can be:
664
664
665 'local' : a local tag
665 'local' : a local tag
666 'global' : a global tag
666 'global' : a global tag
667 None : tag does not exist
667 None : tag does not exist
668 '''
668 '''
669
669
670 return self._tagscache.tagtypes.get(tagname)
670 return self._tagscache.tagtypes.get(tagname)
671
671
672 def tagslist(self):
672 def tagslist(self):
673 '''return a list of tags ordered by revision'''
673 '''return a list of tags ordered by revision'''
674 if not self._tagscache.tagslist:
674 if not self._tagscache.tagslist:
675 l = []
675 l = []
676 for t, n in self.tags().iteritems():
676 for t, n in self.tags().iteritems():
677 r = self.changelog.rev(n)
677 r = self.changelog.rev(n)
678 l.append((r, t, n))
678 l.append((r, t, n))
679 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
679 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
680
680
681 return self._tagscache.tagslist
681 return self._tagscache.tagslist
682
682
683 def nodetags(self, node):
683 def nodetags(self, node):
684 '''return the tags associated with a node'''
684 '''return the tags associated with a node'''
685 if not self._tagscache.nodetagscache:
685 if not self._tagscache.nodetagscache:
686 nodetagscache = {}
686 nodetagscache = {}
687 for t, n in self._tagscache.tags.iteritems():
687 for t, n in self._tagscache.tags.iteritems():
688 nodetagscache.setdefault(n, []).append(t)
688 nodetagscache.setdefault(n, []).append(t)
689 for tags in nodetagscache.itervalues():
689 for tags in nodetagscache.itervalues():
690 tags.sort()
690 tags.sort()
691 self._tagscache.nodetagscache = nodetagscache
691 self._tagscache.nodetagscache = nodetagscache
692 return self._tagscache.nodetagscache.get(node, [])
692 return self._tagscache.nodetagscache.get(node, [])
693
693
694 def nodebookmarks(self, node):
694 def nodebookmarks(self, node):
695 marks = []
695 marks = []
696 for bookmark, n in self._bookmarks.iteritems():
696 for bookmark, n in self._bookmarks.iteritems():
697 if n == node:
697 if n == node:
698 marks.append(bookmark)
698 marks.append(bookmark)
699 return sorted(marks)
699 return sorted(marks)
700
700
701 def branchmap(self):
701 def branchmap(self):
702 '''returns a dictionary {branch: [branchheads]} with branchheads
702 '''returns a dictionary {branch: [branchheads]} with branchheads
703 ordered by increasing revision number'''
703 ordered by increasing revision number'''
704 branchmap.updatecache(self)
704 branchmap.updatecache(self)
705 return self._branchcaches[self.filtername]
705 return self._branchcaches[self.filtername]
706
706
707 def branchtip(self, branch):
707 def branchtip(self, branch):
708 '''return the tip node for a given branch'''
708 '''return the tip node for a given branch'''
709 try:
709 try:
710 return self.branchmap().branchtip(branch)
710 return self.branchmap().branchtip(branch)
711 except KeyError:
711 except KeyError:
712 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
712 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
713
713
714 def lookup(self, key):
714 def lookup(self, key):
715 return self[key].node()
715 return self[key].node()
716
716
717 def lookupbranch(self, key, remote=None):
717 def lookupbranch(self, key, remote=None):
718 repo = remote or self
718 repo = remote or self
719 if key in repo.branchmap():
719 if key in repo.branchmap():
720 return key
720 return key
721
721
722 repo = (remote and remote.local()) and remote or self
722 repo = (remote and remote.local()) and remote or self
723 return repo[key].branch()
723 return repo[key].branch()
724
724
725 def known(self, nodes):
725 def known(self, nodes):
726 nm = self.changelog.nodemap
726 nm = self.changelog.nodemap
727 pc = self._phasecache
727 pc = self._phasecache
728 result = []
728 result = []
729 for n in nodes:
729 for n in nodes:
730 r = nm.get(n)
730 r = nm.get(n)
731 resp = not (r is None or pc.phase(self, r) >= phases.secret)
731 resp = not (r is None or pc.phase(self, r) >= phases.secret)
732 result.append(resp)
732 result.append(resp)
733 return result
733 return result
734
734
735 def local(self):
735 def local(self):
736 return self
736 return self
737
737
738 def cancopy(self):
738 def cancopy(self):
739 # so statichttprepo's override of local() works
739 # so statichttprepo's override of local() works
740 if not self.local():
740 if not self.local():
741 return False
741 return False
742 if not self.ui.configbool('phases', 'publish', True):
742 if not self.ui.configbool('phases', 'publish', True):
743 return True
743 return True
744 # if publishing we can't copy if there is filtered content
744 # if publishing we can't copy if there is filtered content
745 return not self.filtered('visible').changelog.filteredrevs
745 return not self.filtered('visible').changelog.filteredrevs
746
746
747 def join(self, f):
747 def join(self, f):
748 return os.path.join(self.path, f)
748 return os.path.join(self.path, f)
749
749
750 def wjoin(self, f):
750 def wjoin(self, f):
751 return os.path.join(self.root, f)
751 return os.path.join(self.root, f)
752
752
753 def file(self, f):
753 def file(self, f):
754 if f[0] == '/':
754 if f[0] == '/':
755 f = f[1:]
755 f = f[1:]
756 return filelog.filelog(self.sopener, f)
756 return filelog.filelog(self.sopener, f)
757
757
758 def changectx(self, changeid):
758 def changectx(self, changeid):
759 return self[changeid]
759 return self[changeid]
760
760
761 def parents(self, changeid=None):
761 def parents(self, changeid=None):
762 '''get list of changectxs for parents of changeid'''
762 '''get list of changectxs for parents of changeid'''
763 return self[changeid].parents()
763 return self[changeid].parents()
764
764
765 def setparents(self, p1, p2=nullid):
765 def setparents(self, p1, p2=nullid):
766 copies = self.dirstate.setparents(p1, p2)
766 copies = self.dirstate.setparents(p1, p2)
767 pctx = self[p1]
767 pctx = self[p1]
768 if copies:
768 if copies:
769 # Adjust copy records, the dirstate cannot do it, it
769 # Adjust copy records, the dirstate cannot do it, it
770 # requires access to parents manifests. Preserve them
770 # requires access to parents manifests. Preserve them
771 # only for entries added to first parent.
771 # only for entries added to first parent.
772 for f in copies:
772 for f in copies:
773 if f not in pctx and copies[f] in pctx:
773 if f not in pctx and copies[f] in pctx:
774 self.dirstate.copy(copies[f], f)
774 self.dirstate.copy(copies[f], f)
775 if p2 == nullid:
775 if p2 == nullid:
776 for f, s in sorted(self.dirstate.copies().items()):
776 for f, s in sorted(self.dirstate.copies().items()):
777 if f not in pctx and s not in pctx:
777 if f not in pctx and s not in pctx:
778 self.dirstate.copy(None, f)
778 self.dirstate.copy(None, f)
779
779
780 def filectx(self, path, changeid=None, fileid=None):
780 def filectx(self, path, changeid=None, fileid=None):
781 """changeid can be a changeset revision, node, or tag.
781 """changeid can be a changeset revision, node, or tag.
782 fileid can be a file revision or node."""
782 fileid can be a file revision or node."""
783 return context.filectx(self, path, changeid, fileid)
783 return context.filectx(self, path, changeid, fileid)
784
784
785 def getcwd(self):
785 def getcwd(self):
786 return self.dirstate.getcwd()
786 return self.dirstate.getcwd()
787
787
788 def pathto(self, f, cwd=None):
788 def pathto(self, f, cwd=None):
789 return self.dirstate.pathto(f, cwd)
789 return self.dirstate.pathto(f, cwd)
790
790
791 def wfile(self, f, mode='r'):
791 def wfile(self, f, mode='r'):
792 return self.wopener(f, mode)
792 return self.wopener(f, mode)
793
793
794 def _link(self, f):
794 def _link(self, f):
795 return self.wvfs.islink(f)
795 return self.wvfs.islink(f)
796
796
797 def _loadfilter(self, filter):
797 def _loadfilter(self, filter):
798 if filter not in self.filterpats:
798 if filter not in self.filterpats:
799 l = []
799 l = []
800 for pat, cmd in self.ui.configitems(filter):
800 for pat, cmd in self.ui.configitems(filter):
801 if cmd == '!':
801 if cmd == '!':
802 continue
802 continue
803 mf = matchmod.match(self.root, '', [pat])
803 mf = matchmod.match(self.root, '', [pat])
804 fn = None
804 fn = None
805 params = cmd
805 params = cmd
806 for name, filterfn in self._datafilters.iteritems():
806 for name, filterfn in self._datafilters.iteritems():
807 if cmd.startswith(name):
807 if cmd.startswith(name):
808 fn = filterfn
808 fn = filterfn
809 params = cmd[len(name):].lstrip()
809 params = cmd[len(name):].lstrip()
810 break
810 break
811 if not fn:
811 if not fn:
812 fn = lambda s, c, **kwargs: util.filter(s, c)
812 fn = lambda s, c, **kwargs: util.filter(s, c)
813 # Wrap old filters not supporting keyword arguments
813 # Wrap old filters not supporting keyword arguments
814 if not inspect.getargspec(fn)[2]:
814 if not inspect.getargspec(fn)[2]:
815 oldfn = fn
815 oldfn = fn
816 fn = lambda s, c, **kwargs: oldfn(s, c)
816 fn = lambda s, c, **kwargs: oldfn(s, c)
817 l.append((mf, fn, params))
817 l.append((mf, fn, params))
818 self.filterpats[filter] = l
818 self.filterpats[filter] = l
819 return self.filterpats[filter]
819 return self.filterpats[filter]
820
820
821 def _filter(self, filterpats, filename, data):
821 def _filter(self, filterpats, filename, data):
822 for mf, fn, cmd in filterpats:
822 for mf, fn, cmd in filterpats:
823 if mf(filename):
823 if mf(filename):
824 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
824 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
825 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
825 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
826 break
826 break
827
827
828 return data
828 return data
829
829
830 @unfilteredpropertycache
830 @unfilteredpropertycache
831 def _encodefilterpats(self):
831 def _encodefilterpats(self):
832 return self._loadfilter('encode')
832 return self._loadfilter('encode')
833
833
834 @unfilteredpropertycache
834 @unfilteredpropertycache
835 def _decodefilterpats(self):
835 def _decodefilterpats(self):
836 return self._loadfilter('decode')
836 return self._loadfilter('decode')
837
837
838 def adddatafilter(self, name, filter):
838 def adddatafilter(self, name, filter):
839 self._datafilters[name] = filter
839 self._datafilters[name] = filter
840
840
841 def wread(self, filename):
841 def wread(self, filename):
842 if self._link(filename):
842 if self._link(filename):
843 data = self.wvfs.readlink(filename)
843 data = self.wvfs.readlink(filename)
844 else:
844 else:
845 data = self.wopener.read(filename)
845 data = self.wopener.read(filename)
846 return self._filter(self._encodefilterpats, filename, data)
846 return self._filter(self._encodefilterpats, filename, data)
847
847
848 def wwrite(self, filename, data, flags):
848 def wwrite(self, filename, data, flags):
849 data = self._filter(self._decodefilterpats, filename, data)
849 data = self._filter(self._decodefilterpats, filename, data)
850 if 'l' in flags:
850 if 'l' in flags:
851 self.wopener.symlink(data, filename)
851 self.wopener.symlink(data, filename)
852 else:
852 else:
853 self.wopener.write(filename, data)
853 self.wopener.write(filename, data)
854 if 'x' in flags:
854 if 'x' in flags:
855 self.wvfs.setflags(filename, False, True)
855 self.wvfs.setflags(filename, False, True)
856
856
857 def wwritedata(self, filename, data):
857 def wwritedata(self, filename, data):
858 return self._filter(self._decodefilterpats, filename, data)
858 return self._filter(self._decodefilterpats, filename, data)
859
859
860 def transaction(self, desc, report=None):
860 def transaction(self, desc, report=None):
861 tr = self._transref and self._transref() or None
861 tr = self._transref and self._transref() or None
862 if tr and tr.running():
862 if tr and tr.running():
863 return tr.nest()
863 return tr.nest()
864
864
865 # abort here if the journal already exists
865 # abort here if the journal already exists
866 if self.svfs.exists("journal"):
866 if self.svfs.exists("journal"):
867 raise error.RepoError(
867 raise error.RepoError(
868 _("abandoned transaction found"),
868 _("abandoned transaction found"),
869 hint=_("run 'hg recover' to clean up transaction"))
869 hint=_("run 'hg recover' to clean up transaction"))
870
870
871 def onclose():
871 def onclose():
872 self.store.write(self._transref())
872 self.store.write(self._transref())
873
873
874 self._writejournal(desc)
874 self._writejournal(desc)
875 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
875 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
876 rp = report and report or self.ui.warn
876 rp = report and report or self.ui.warn
877 tr = transaction.transaction(rp, self.sopener,
877 tr = transaction.transaction(rp, self.sopener,
878 "journal",
878 "journal",
879 aftertrans(renames),
879 aftertrans(renames),
880 self.store.createmode,
880 self.store.createmode,
881 onclose)
881 onclose)
882 self._transref = weakref.ref(tr)
882 self._transref = weakref.ref(tr)
883 return tr
883 return tr
884
884
885 def _journalfiles(self):
885 def _journalfiles(self):
886 return ((self.svfs, 'journal'),
886 return ((self.svfs, 'journal'),
887 (self.vfs, 'journal.dirstate'),
887 (self.vfs, 'journal.dirstate'),
888 (self.vfs, 'journal.branch'),
888 (self.vfs, 'journal.branch'),
889 (self.vfs, 'journal.desc'),
889 (self.vfs, 'journal.desc'),
890 (self.vfs, 'journal.bookmarks'),
890 (self.vfs, 'journal.bookmarks'),
891 (self.svfs, 'journal.phaseroots'))
891 (self.svfs, 'journal.phaseroots'))
892
892
893 def undofiles(self):
893 def undofiles(self):
894 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
894 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
895
895
896 def _writejournal(self, desc):
896 def _writejournal(self, desc):
897 self.opener.write("journal.dirstate",
897 self.opener.write("journal.dirstate",
898 self.opener.tryread("dirstate"))
898 self.opener.tryread("dirstate"))
899 self.opener.write("journal.branch",
899 self.opener.write("journal.branch",
900 encoding.fromlocal(self.dirstate.branch()))
900 encoding.fromlocal(self.dirstate.branch()))
901 self.opener.write("journal.desc",
901 self.opener.write("journal.desc",
902 "%d\n%s\n" % (len(self), desc))
902 "%d\n%s\n" % (len(self), desc))
903 self.opener.write("journal.bookmarks",
903 self.opener.write("journal.bookmarks",
904 self.opener.tryread("bookmarks"))
904 self.opener.tryread("bookmarks"))
905 self.sopener.write("journal.phaseroots",
905 self.sopener.write("journal.phaseroots",
906 self.sopener.tryread("phaseroots"))
906 self.sopener.tryread("phaseroots"))
907
907
908 def recover(self):
908 def recover(self):
909 lock = self.lock()
909 lock = self.lock()
910 try:
910 try:
911 if self.svfs.exists("journal"):
911 if self.svfs.exists("journal"):
912 self.ui.status(_("rolling back interrupted transaction\n"))
912 self.ui.status(_("rolling back interrupted transaction\n"))
913 transaction.rollback(self.sopener, "journal",
913 transaction.rollback(self.sopener, "journal",
914 self.ui.warn)
914 self.ui.warn)
915 self.invalidate()
915 self.invalidate()
916 return True
916 return True
917 else:
917 else:
918 self.ui.warn(_("no interrupted transaction available\n"))
918 self.ui.warn(_("no interrupted transaction available\n"))
919 return False
919 return False
920 finally:
920 finally:
921 lock.release()
921 lock.release()
922
922
923 def rollback(self, dryrun=False, force=False):
923 def rollback(self, dryrun=False, force=False):
924 wlock = lock = None
924 wlock = lock = None
925 try:
925 try:
926 wlock = self.wlock()
926 wlock = self.wlock()
927 lock = self.lock()
927 lock = self.lock()
928 if self.svfs.exists("undo"):
928 if self.svfs.exists("undo"):
929 return self._rollback(dryrun, force)
929 return self._rollback(dryrun, force)
930 else:
930 else:
931 self.ui.warn(_("no rollback information available\n"))
931 self.ui.warn(_("no rollback information available\n"))
932 return 1
932 return 1
933 finally:
933 finally:
934 release(lock, wlock)
934 release(lock, wlock)
935
935
936 @unfilteredmethod # Until we get smarter cache management
936 @unfilteredmethod # Until we get smarter cache management
937 def _rollback(self, dryrun, force):
937 def _rollback(self, dryrun, force):
938 ui = self.ui
938 ui = self.ui
939 try:
939 try:
940 args = self.opener.read('undo.desc').splitlines()
940 args = self.opener.read('undo.desc').splitlines()
941 (oldlen, desc, detail) = (int(args[0]), args[1], None)
941 (oldlen, desc, detail) = (int(args[0]), args[1], None)
942 if len(args) >= 3:
942 if len(args) >= 3:
943 detail = args[2]
943 detail = args[2]
944 oldtip = oldlen - 1
944 oldtip = oldlen - 1
945
945
946 if detail and ui.verbose:
946 if detail and ui.verbose:
947 msg = (_('repository tip rolled back to revision %s'
947 msg = (_('repository tip rolled back to revision %s'
948 ' (undo %s: %s)\n')
948 ' (undo %s: %s)\n')
949 % (oldtip, desc, detail))
949 % (oldtip, desc, detail))
950 else:
950 else:
951 msg = (_('repository tip rolled back to revision %s'
951 msg = (_('repository tip rolled back to revision %s'
952 ' (undo %s)\n')
952 ' (undo %s)\n')
953 % (oldtip, desc))
953 % (oldtip, desc))
954 except IOError:
954 except IOError:
955 msg = _('rolling back unknown transaction\n')
955 msg = _('rolling back unknown transaction\n')
956 desc = None
956 desc = None
957
957
958 if not force and self['.'] != self['tip'] and desc == 'commit':
958 if not force and self['.'] != self['tip'] and desc == 'commit':
959 raise util.Abort(
959 raise util.Abort(
960 _('rollback of last commit while not checked out '
960 _('rollback of last commit while not checked out '
961 'may lose data'), hint=_('use -f to force'))
961 'may lose data'), hint=_('use -f to force'))
962
962
963 ui.status(msg)
963 ui.status(msg)
964 if dryrun:
964 if dryrun:
965 return 0
965 return 0
966
966
967 parents = self.dirstate.parents()
967 parents = self.dirstate.parents()
968 self.destroying()
968 self.destroying()
969 transaction.rollback(self.sopener, 'undo', ui.warn)
969 transaction.rollback(self.sopener, 'undo', ui.warn)
970 if self.vfs.exists('undo.bookmarks'):
970 if self.vfs.exists('undo.bookmarks'):
971 self.vfs.rename('undo.bookmarks', 'bookmarks')
971 self.vfs.rename('undo.bookmarks', 'bookmarks')
972 if self.svfs.exists('undo.phaseroots'):
972 if self.svfs.exists('undo.phaseroots'):
973 self.svfs.rename('undo.phaseroots', 'phaseroots')
973 self.svfs.rename('undo.phaseroots', 'phaseroots')
974 self.invalidate()
974 self.invalidate()
975
975
976 parentgone = (parents[0] not in self.changelog.nodemap or
976 parentgone = (parents[0] not in self.changelog.nodemap or
977 parents[1] not in self.changelog.nodemap)
977 parents[1] not in self.changelog.nodemap)
978 if parentgone:
978 if parentgone:
979 self.vfs.rename('undo.dirstate', 'dirstate')
979 self.vfs.rename('undo.dirstate', 'dirstate')
980 try:
980 try:
981 branch = self.opener.read('undo.branch')
981 branch = self.opener.read('undo.branch')
982 self.dirstate.setbranch(encoding.tolocal(branch))
982 self.dirstate.setbranch(encoding.tolocal(branch))
983 except IOError:
983 except IOError:
984 ui.warn(_('named branch could not be reset: '
984 ui.warn(_('named branch could not be reset: '
985 'current branch is still \'%s\'\n')
985 'current branch is still \'%s\'\n')
986 % self.dirstate.branch())
986 % self.dirstate.branch())
987
987
988 self.dirstate.invalidate()
988 self.dirstate.invalidate()
989 parents = tuple([p.rev() for p in self.parents()])
989 parents = tuple([p.rev() for p in self.parents()])
990 if len(parents) > 1:
990 if len(parents) > 1:
991 ui.status(_('working directory now based on '
991 ui.status(_('working directory now based on '
992 'revisions %d and %d\n') % parents)
992 'revisions %d and %d\n') % parents)
993 else:
993 else:
994 ui.status(_('working directory now based on '
994 ui.status(_('working directory now based on '
995 'revision %d\n') % parents)
995 'revision %d\n') % parents)
996 # TODO: if we know which new heads may result from this rollback, pass
996 # TODO: if we know which new heads may result from this rollback, pass
997 # them to destroy(), which will prevent the branchhead cache from being
997 # them to destroy(), which will prevent the branchhead cache from being
998 # invalidated.
998 # invalidated.
999 self.destroyed()
999 self.destroyed()
1000 return 0
1000 return 0
1001
1001
1002 def invalidatecaches(self):
1002 def invalidatecaches(self):
1003
1003
1004 if '_tagscache' in vars(self):
1004 if '_tagscache' in vars(self):
1005 # can't use delattr on proxy
1005 # can't use delattr on proxy
1006 del self.__dict__['_tagscache']
1006 del self.__dict__['_tagscache']
1007
1007
1008 self.unfiltered()._branchcaches.clear()
1008 self.unfiltered()._branchcaches.clear()
1009 self.invalidatevolatilesets()
1009 self.invalidatevolatilesets()
1010
1010
1011 def invalidatevolatilesets(self):
1011 def invalidatevolatilesets(self):
1012 self.filteredrevcache.clear()
1012 self.filteredrevcache.clear()
1013 obsolete.clearobscaches(self)
1013 obsolete.clearobscaches(self)
1014
1014
1015 def invalidatedirstate(self):
1015 def invalidatedirstate(self):
1016 '''Invalidates the dirstate, causing the next call to dirstate
1016 '''Invalidates the dirstate, causing the next call to dirstate
1017 to check if it was modified since the last time it was read,
1017 to check if it was modified since the last time it was read,
1018 rereading it if it has.
1018 rereading it if it has.
1019
1019
1020 This is different to dirstate.invalidate() that it doesn't always
1020 This is different to dirstate.invalidate() that it doesn't always
1021 rereads the dirstate. Use dirstate.invalidate() if you want to
1021 rereads the dirstate. Use dirstate.invalidate() if you want to
1022 explicitly read the dirstate again (i.e. restoring it to a previous
1022 explicitly read the dirstate again (i.e. restoring it to a previous
1023 known good state).'''
1023 known good state).'''
1024 if hasunfilteredcache(self, 'dirstate'):
1024 if hasunfilteredcache(self, 'dirstate'):
1025 for k in self.dirstate._filecache:
1025 for k in self.dirstate._filecache:
1026 try:
1026 try:
1027 delattr(self.dirstate, k)
1027 delattr(self.dirstate, k)
1028 except AttributeError:
1028 except AttributeError:
1029 pass
1029 pass
1030 delattr(self.unfiltered(), 'dirstate')
1030 delattr(self.unfiltered(), 'dirstate')
1031
1031
1032 def invalidate(self):
1032 def invalidate(self):
1033 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1033 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1034 for k in self._filecache:
1034 for k in self._filecache:
1035 # dirstate is invalidated separately in invalidatedirstate()
1035 # dirstate is invalidated separately in invalidatedirstate()
1036 if k == 'dirstate':
1036 if k == 'dirstate':
1037 continue
1037 continue
1038
1038
1039 try:
1039 try:
1040 delattr(unfiltered, k)
1040 delattr(unfiltered, k)
1041 except AttributeError:
1041 except AttributeError:
1042 pass
1042 pass
1043 self.invalidatecaches()
1043 self.invalidatecaches()
1044 self.store.invalidatecaches()
1044 self.store.invalidatecaches()
1045
1045
1046 def invalidateall(self):
1046 def invalidateall(self):
1047 '''Fully invalidates both store and non-store parts, causing the
1047 '''Fully invalidates both store and non-store parts, causing the
1048 subsequent operation to reread any outside changes.'''
1048 subsequent operation to reread any outside changes.'''
1049 # extension should hook this to invalidate its caches
1049 # extension should hook this to invalidate its caches
1050 self.invalidate()
1050 self.invalidate()
1051 self.invalidatedirstate()
1051 self.invalidatedirstate()
1052
1052
1053 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1053 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1054 try:
1054 try:
1055 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1055 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1056 except error.LockHeld, inst:
1056 except error.LockHeld, inst:
1057 if not wait:
1057 if not wait:
1058 raise
1058 raise
1059 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1059 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1060 (desc, inst.locker))
1060 (desc, inst.locker))
1061 # default to 600 seconds timeout
1061 # default to 600 seconds timeout
1062 l = lockmod.lock(vfs, lockname,
1062 l = lockmod.lock(vfs, lockname,
1063 int(self.ui.config("ui", "timeout", "600")),
1063 int(self.ui.config("ui", "timeout", "600")),
1064 releasefn, desc=desc)
1064 releasefn, desc=desc)
1065 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1065 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1066 if acquirefn:
1066 if acquirefn:
1067 acquirefn()
1067 acquirefn()
1068 return l
1068 return l
1069
1069
1070 def _afterlock(self, callback):
1070 def _afterlock(self, callback):
1071 """add a callback to the current repository lock.
1071 """add a callback to the current repository lock.
1072
1072
1073 The callback will be executed on lock release."""
1073 The callback will be executed on lock release."""
1074 l = self._lockref and self._lockref()
1074 l = self._lockref and self._lockref()
1075 if l:
1075 if l:
1076 l.postrelease.append(callback)
1076 l.postrelease.append(callback)
1077 else:
1077 else:
1078 callback()
1078 callback()
1079
1079
1080 def lock(self, wait=True):
1080 def lock(self, wait=True):
1081 '''Lock the repository store (.hg/store) and return a weak reference
1081 '''Lock the repository store (.hg/store) and return a weak reference
1082 to the lock. Use this before modifying the store (e.g. committing or
1082 to the lock. Use this before modifying the store (e.g. committing or
1083 stripping). If you are opening a transaction, get a lock as well.)'''
1083 stripping). If you are opening a transaction, get a lock as well.)'''
1084 l = self._lockref and self._lockref()
1084 l = self._lockref and self._lockref()
1085 if l is not None and l.held:
1085 if l is not None and l.held:
1086 l.lock()
1086 l.lock()
1087 return l
1087 return l
1088
1088
1089 def unlock():
1089 def unlock():
1090 if hasunfilteredcache(self, '_phasecache'):
1090 if hasunfilteredcache(self, '_phasecache'):
1091 self._phasecache.write()
1091 self._phasecache.write()
1092 for k, ce in self._filecache.items():
1092 for k, ce in self._filecache.items():
1093 if k == 'dirstate' or k not in self.__dict__:
1093 if k == 'dirstate' or k not in self.__dict__:
1094 continue
1094 continue
1095 ce.refresh()
1095 ce.refresh()
1096
1096
1097 l = self._lock(self.svfs, "lock", wait, unlock,
1097 l = self._lock(self.svfs, "lock", wait, unlock,
1098 self.invalidate, _('repository %s') % self.origroot)
1098 self.invalidate, _('repository %s') % self.origroot)
1099 self._lockref = weakref.ref(l)
1099 self._lockref = weakref.ref(l)
1100 return l
1100 return l
1101
1101
1102 def wlock(self, wait=True):
1102 def wlock(self, wait=True):
1103 '''Lock the non-store parts of the repository (everything under
1103 '''Lock the non-store parts of the repository (everything under
1104 .hg except .hg/store) and return a weak reference to the lock.
1104 .hg except .hg/store) and return a weak reference to the lock.
1105 Use this before modifying files in .hg.'''
1105 Use this before modifying files in .hg.'''
1106 l = self._wlockref and self._wlockref()
1106 l = self._wlockref and self._wlockref()
1107 if l is not None and l.held:
1107 if l is not None and l.held:
1108 l.lock()
1108 l.lock()
1109 return l
1109 return l
1110
1110
1111 def unlock():
1111 def unlock():
1112 self.dirstate.write()
1112 self.dirstate.write()
1113 self._filecache['dirstate'].refresh()
1113 self._filecache['dirstate'].refresh()
1114
1114
1115 l = self._lock(self.vfs, "wlock", wait, unlock,
1115 l = self._lock(self.vfs, "wlock", wait, unlock,
1116 self.invalidatedirstate, _('working directory of %s') %
1116 self.invalidatedirstate, _('working directory of %s') %
1117 self.origroot)
1117 self.origroot)
1118 self._wlockref = weakref.ref(l)
1118 self._wlockref = weakref.ref(l)
1119 return l
1119 return l
1120
1120
1121 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1121 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1122 """
1122 """
1123 commit an individual file as part of a larger transaction
1123 commit an individual file as part of a larger transaction
1124 """
1124 """
1125
1125
1126 fname = fctx.path()
1126 fname = fctx.path()
1127 text = fctx.data()
1127 text = fctx.data()
1128 flog = self.file(fname)
1128 flog = self.file(fname)
1129 fparent1 = manifest1.get(fname, nullid)
1129 fparent1 = manifest1.get(fname, nullid)
1130 fparent2 = fparent2o = manifest2.get(fname, nullid)
1130 fparent2 = manifest2.get(fname, nullid)
1131
1131
1132 meta = {}
1132 meta = {}
1133 copy = fctx.renamed()
1133 copy = fctx.renamed()
1134 if copy and copy[0] != fname:
1134 if copy and copy[0] != fname:
1135 # Mark the new revision of this file as a copy of another
1135 # Mark the new revision of this file as a copy of another
1136 # file. This copy data will effectively act as a parent
1136 # file. This copy data will effectively act as a parent
1137 # of this new revision. If this is a merge, the first
1137 # of this new revision. If this is a merge, the first
1138 # parent will be the nullid (meaning "look up the copy data")
1138 # parent will be the nullid (meaning "look up the copy data")
1139 # and the second one will be the other parent. For example:
1139 # and the second one will be the other parent. For example:
1140 #
1140 #
1141 # 0 --- 1 --- 3 rev1 changes file foo
1141 # 0 --- 1 --- 3 rev1 changes file foo
1142 # \ / rev2 renames foo to bar and changes it
1142 # \ / rev2 renames foo to bar and changes it
1143 # \- 2 -/ rev3 should have bar with all changes and
1143 # \- 2 -/ rev3 should have bar with all changes and
1144 # should record that bar descends from
1144 # should record that bar descends from
1145 # bar in rev2 and foo in rev1
1145 # bar in rev2 and foo in rev1
1146 #
1146 #
1147 # this allows this merge to succeed:
1147 # this allows this merge to succeed:
1148 #
1148 #
1149 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1149 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1150 # \ / merging rev3 and rev4 should use bar@rev2
1150 # \ / merging rev3 and rev4 should use bar@rev2
1151 # \- 2 --- 4 as the merge base
1151 # \- 2 --- 4 as the merge base
1152 #
1152 #
1153
1153
1154 cfname = copy[0]
1154 cfname = copy[0]
1155 crev = manifest1.get(cfname)
1155 crev = manifest1.get(cfname)
1156 newfparent = fparent2
1156 newfparent = fparent2
1157
1157
1158 if manifest2: # branch merge
1158 if manifest2: # branch merge
1159 if fparent2 == nullid or crev is None: # copied on remote side
1159 if fparent2 == nullid or crev is None: # copied on remote side
1160 if cfname in manifest2:
1160 if cfname in manifest2:
1161 crev = manifest2[cfname]
1161 crev = manifest2[cfname]
1162 newfparent = fparent1
1162 newfparent = fparent1
1163
1163
1164 # find source in nearest ancestor if we've lost track
1164 # find source in nearest ancestor if we've lost track
1165 if not crev:
1165 if not crev:
1166 self.ui.debug(" %s: searching for copy revision for %s\n" %
1166 self.ui.debug(" %s: searching for copy revision for %s\n" %
1167 (fname, cfname))
1167 (fname, cfname))
1168 for ancestor in self[None].ancestors():
1168 for ancestor in self[None].ancestors():
1169 if cfname in ancestor:
1169 if cfname in ancestor:
1170 crev = ancestor[cfname].filenode()
1170 crev = ancestor[cfname].filenode()
1171 break
1171 break
1172
1172
1173 if crev:
1173 if crev:
1174 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1174 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1175 meta["copy"] = cfname
1175 meta["copy"] = cfname
1176 meta["copyrev"] = hex(crev)
1176 meta["copyrev"] = hex(crev)
1177 fparent1, fparent2 = nullid, newfparent
1177 fparent1, fparent2 = nullid, newfparent
1178 else:
1178 else:
1179 self.ui.warn(_("warning: can't find ancestor for '%s' "
1179 self.ui.warn(_("warning: can't find ancestor for '%s' "
1180 "copied from '%s'!\n") % (fname, cfname))
1180 "copied from '%s'!\n") % (fname, cfname))
1181
1181
1182 elif fparent1 == nullid:
1182 elif fparent1 == nullid:
1183 fparent1, fparent2 = fparent2, nullid
1183 fparent1, fparent2 = fparent2, nullid
1184 elif fparent2 != nullid:
1184 elif fparent2 != nullid:
1185 # is one parent an ancestor of the other?
1185 # is one parent an ancestor of the other?
1186 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1186 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1187 if fparent1 in fparentancestors:
1187 if fparent1 in fparentancestors:
1188 fparent1, fparent2 = fparent2, nullid
1188 fparent1, fparent2 = fparent2, nullid
1189 elif fparent2 in fparentancestors:
1189 elif fparent2 in fparentancestors:
1190 fparent2 = nullid
1190 fparent2 = nullid
1191
1191
1192 # is the file changed?
1192 # is the file changed?
1193 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1193 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1194 changelist.append(fname)
1194 changelist.append(fname)
1195 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1195 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1196
1197 # are just the flags changed during merge?
1196 # are just the flags changed during merge?
1198 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1197 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1199 changelist.append(fname)
1198 changelist.append(fname)
1200
1199
1201 return fparent1
1200 return fparent1
1202
1201
1203 @unfilteredmethod
1202 @unfilteredmethod
1204 def commit(self, text="", user=None, date=None, match=None, force=False,
1203 def commit(self, text="", user=None, date=None, match=None, force=False,
1205 editor=False, extra={}):
1204 editor=False, extra={}):
1206 """Add a new revision to current repository.
1205 """Add a new revision to current repository.
1207
1206
1208 Revision information is gathered from the working directory,
1207 Revision information is gathered from the working directory,
1209 match can be used to filter the committed files. If editor is
1208 match can be used to filter the committed files. If editor is
1210 supplied, it is called to get a commit message.
1209 supplied, it is called to get a commit message.
1211 """
1210 """
1212
1211
1213 def fail(f, msg):
1212 def fail(f, msg):
1214 raise util.Abort('%s: %s' % (f, msg))
1213 raise util.Abort('%s: %s' % (f, msg))
1215
1214
1216 if not match:
1215 if not match:
1217 match = matchmod.always(self.root, '')
1216 match = matchmod.always(self.root, '')
1218
1217
1219 if not force:
1218 if not force:
1220 vdirs = []
1219 vdirs = []
1221 match.explicitdir = vdirs.append
1220 match.explicitdir = vdirs.append
1222 match.bad = fail
1221 match.bad = fail
1223
1222
1224 wlock = self.wlock()
1223 wlock = self.wlock()
1225 try:
1224 try:
1226 wctx = self[None]
1225 wctx = self[None]
1227 merge = len(wctx.parents()) > 1
1226 merge = len(wctx.parents()) > 1
1228
1227
1229 if (not force and merge and match and
1228 if (not force and merge and match and
1230 (match.files() or match.anypats())):
1229 (match.files() or match.anypats())):
1231 raise util.Abort(_('cannot partially commit a merge '
1230 raise util.Abort(_('cannot partially commit a merge '
1232 '(do not specify files or patterns)'))
1231 '(do not specify files or patterns)'))
1233
1232
1234 changes = self.status(match=match, clean=force)
1233 changes = self.status(match=match, clean=force)
1235 if force:
1234 if force:
1236 changes[0].extend(changes[6]) # mq may commit unchanged files
1235 changes[0].extend(changes[6]) # mq may commit unchanged files
1237
1236
1238 # check subrepos
1237 # check subrepos
1239 subs = []
1238 subs = []
1240 commitsubs = set()
1239 commitsubs = set()
1241 newstate = wctx.substate.copy()
1240 newstate = wctx.substate.copy()
1242 # only manage subrepos and .hgsubstate if .hgsub is present
1241 # only manage subrepos and .hgsubstate if .hgsub is present
1243 if '.hgsub' in wctx:
1242 if '.hgsub' in wctx:
1244 # we'll decide whether to track this ourselves, thanks
1243 # we'll decide whether to track this ourselves, thanks
1245 for c in changes[:3]:
1244 for c in changes[:3]:
1246 if '.hgsubstate' in c:
1245 if '.hgsubstate' in c:
1247 c.remove('.hgsubstate')
1246 c.remove('.hgsubstate')
1248
1247
1249 # compare current state to last committed state
1248 # compare current state to last committed state
1250 # build new substate based on last committed state
1249 # build new substate based on last committed state
1251 oldstate = wctx.p1().substate
1250 oldstate = wctx.p1().substate
1252 for s in sorted(newstate.keys()):
1251 for s in sorted(newstate.keys()):
1253 if not match(s):
1252 if not match(s):
1254 # ignore working copy, use old state if present
1253 # ignore working copy, use old state if present
1255 if s in oldstate:
1254 if s in oldstate:
1256 newstate[s] = oldstate[s]
1255 newstate[s] = oldstate[s]
1257 continue
1256 continue
1258 if not force:
1257 if not force:
1259 raise util.Abort(
1258 raise util.Abort(
1260 _("commit with new subrepo %s excluded") % s)
1259 _("commit with new subrepo %s excluded") % s)
1261 if wctx.sub(s).dirty(True):
1260 if wctx.sub(s).dirty(True):
1262 if not self.ui.configbool('ui', 'commitsubrepos'):
1261 if not self.ui.configbool('ui', 'commitsubrepos'):
1263 raise util.Abort(
1262 raise util.Abort(
1264 _("uncommitted changes in subrepo %s") % s,
1263 _("uncommitted changes in subrepo %s") % s,
1265 hint=_("use --subrepos for recursive commit"))
1264 hint=_("use --subrepos for recursive commit"))
1266 subs.append(s)
1265 subs.append(s)
1267 commitsubs.add(s)
1266 commitsubs.add(s)
1268 else:
1267 else:
1269 bs = wctx.sub(s).basestate()
1268 bs = wctx.sub(s).basestate()
1270 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1269 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1271 if oldstate.get(s, (None, None, None))[1] != bs:
1270 if oldstate.get(s, (None, None, None))[1] != bs:
1272 subs.append(s)
1271 subs.append(s)
1273
1272
1274 # check for removed subrepos
1273 # check for removed subrepos
1275 for p in wctx.parents():
1274 for p in wctx.parents():
1276 r = [s for s in p.substate if s not in newstate]
1275 r = [s for s in p.substate if s not in newstate]
1277 subs += [s for s in r if match(s)]
1276 subs += [s for s in r if match(s)]
1278 if subs:
1277 if subs:
1279 if (not match('.hgsub') and
1278 if (not match('.hgsub') and
1280 '.hgsub' in (wctx.modified() + wctx.added())):
1279 '.hgsub' in (wctx.modified() + wctx.added())):
1281 raise util.Abort(
1280 raise util.Abort(
1282 _("can't commit subrepos without .hgsub"))
1281 _("can't commit subrepos without .hgsub"))
1283 changes[0].insert(0, '.hgsubstate')
1282 changes[0].insert(0, '.hgsubstate')
1284
1283
1285 elif '.hgsub' in changes[2]:
1284 elif '.hgsub' in changes[2]:
1286 # clean up .hgsubstate when .hgsub is removed
1285 # clean up .hgsubstate when .hgsub is removed
1287 if ('.hgsubstate' in wctx and
1286 if ('.hgsubstate' in wctx and
1288 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1287 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1289 changes[2].insert(0, '.hgsubstate')
1288 changes[2].insert(0, '.hgsubstate')
1290
1289
1291 # make sure all explicit patterns are matched
1290 # make sure all explicit patterns are matched
1292 if not force and match.files():
1291 if not force and match.files():
1293 matched = set(changes[0] + changes[1] + changes[2])
1292 matched = set(changes[0] + changes[1] + changes[2])
1294
1293
1295 for f in match.files():
1294 for f in match.files():
1296 f = self.dirstate.normalize(f)
1295 f = self.dirstate.normalize(f)
1297 if f == '.' or f in matched or f in wctx.substate:
1296 if f == '.' or f in matched or f in wctx.substate:
1298 continue
1297 continue
1299 if f in changes[3]: # missing
1298 if f in changes[3]: # missing
1300 fail(f, _('file not found!'))
1299 fail(f, _('file not found!'))
1301 if f in vdirs: # visited directory
1300 if f in vdirs: # visited directory
1302 d = f + '/'
1301 d = f + '/'
1303 for mf in matched:
1302 for mf in matched:
1304 if mf.startswith(d):
1303 if mf.startswith(d):
1305 break
1304 break
1306 else:
1305 else:
1307 fail(f, _("no match under directory!"))
1306 fail(f, _("no match under directory!"))
1308 elif f not in self.dirstate:
1307 elif f not in self.dirstate:
1309 fail(f, _("file not tracked!"))
1308 fail(f, _("file not tracked!"))
1310
1309
1311 cctx = context.workingctx(self, text, user, date, extra, changes)
1310 cctx = context.workingctx(self, text, user, date, extra, changes)
1312
1311
1313 if (not force and not extra.get("close") and not merge
1312 if (not force and not extra.get("close") and not merge
1314 and not cctx.files()
1313 and not cctx.files()
1315 and wctx.branch() == wctx.p1().branch()):
1314 and wctx.branch() == wctx.p1().branch()):
1316 return None
1315 return None
1317
1316
1318 if merge and cctx.deleted():
1317 if merge and cctx.deleted():
1319 raise util.Abort(_("cannot commit merge with missing files"))
1318 raise util.Abort(_("cannot commit merge with missing files"))
1320
1319
1321 ms = mergemod.mergestate(self)
1320 ms = mergemod.mergestate(self)
1322 for f in changes[0]:
1321 for f in changes[0]:
1323 if f in ms and ms[f] == 'u':
1322 if f in ms and ms[f] == 'u':
1324 raise util.Abort(_("unresolved merge conflicts "
1323 raise util.Abort(_("unresolved merge conflicts "
1325 "(see hg help resolve)"))
1324 "(see hg help resolve)"))
1326
1325
1327 if editor:
1326 if editor:
1328 cctx._text = editor(self, cctx, subs)
1327 cctx._text = editor(self, cctx, subs)
1329 edited = (text != cctx._text)
1328 edited = (text != cctx._text)
1330
1329
1331 # Save commit message in case this transaction gets rolled back
1330 # Save commit message in case this transaction gets rolled back
1332 # (e.g. by a pretxncommit hook). Leave the content alone on
1331 # (e.g. by a pretxncommit hook). Leave the content alone on
1333 # the assumption that the user will use the same editor again.
1332 # the assumption that the user will use the same editor again.
1334 msgfn = self.savecommitmessage(cctx._text)
1333 msgfn = self.savecommitmessage(cctx._text)
1335
1334
1336 # commit subs and write new state
1335 # commit subs and write new state
1337 if subs:
1336 if subs:
1338 for s in sorted(commitsubs):
1337 for s in sorted(commitsubs):
1339 sub = wctx.sub(s)
1338 sub = wctx.sub(s)
1340 self.ui.status(_('committing subrepository %s\n') %
1339 self.ui.status(_('committing subrepository %s\n') %
1341 subrepo.subrelpath(sub))
1340 subrepo.subrelpath(sub))
1342 sr = sub.commit(cctx._text, user, date)
1341 sr = sub.commit(cctx._text, user, date)
1343 newstate[s] = (newstate[s][0], sr)
1342 newstate[s] = (newstate[s][0], sr)
1344 subrepo.writestate(self, newstate)
1343 subrepo.writestate(self, newstate)
1345
1344
1346 p1, p2 = self.dirstate.parents()
1345 p1, p2 = self.dirstate.parents()
1347 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1346 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1348 try:
1347 try:
1349 self.hook("precommit", throw=True, parent1=hookp1,
1348 self.hook("precommit", throw=True, parent1=hookp1,
1350 parent2=hookp2)
1349 parent2=hookp2)
1351 ret = self.commitctx(cctx, True)
1350 ret = self.commitctx(cctx, True)
1352 except: # re-raises
1351 except: # re-raises
1353 if edited:
1352 if edited:
1354 self.ui.write(
1353 self.ui.write(
1355 _('note: commit message saved in %s\n') % msgfn)
1354 _('note: commit message saved in %s\n') % msgfn)
1356 raise
1355 raise
1357
1356
1358 # update bookmarks, dirstate and mergestate
1357 # update bookmarks, dirstate and mergestate
1359 bookmarks.update(self, [p1, p2], ret)
1358 bookmarks.update(self, [p1, p2], ret)
1360 cctx.markcommitted(ret)
1359 cctx.markcommitted(ret)
1361 ms.reset()
1360 ms.reset()
1362 finally:
1361 finally:
1363 wlock.release()
1362 wlock.release()
1364
1363
1365 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1364 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1366 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1365 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1367 self._afterlock(commithook)
1366 self._afterlock(commithook)
1368 return ret
1367 return ret
1369
1368
1370 @unfilteredmethod
1369 @unfilteredmethod
1371 def commitctx(self, ctx, error=False):
1370 def commitctx(self, ctx, error=False):
1372 """Add a new revision to current repository.
1371 """Add a new revision to current repository.
1373 Revision information is passed via the context argument.
1372 Revision information is passed via the context argument.
1374 """
1373 """
1375
1374
1376 tr = lock = None
1375 tr = lock = None
1377 removed = list(ctx.removed())
1376 removed = list(ctx.removed())
1378 p1, p2 = ctx.p1(), ctx.p2()
1377 p1, p2 = ctx.p1(), ctx.p2()
1379 user = ctx.user()
1378 user = ctx.user()
1380
1379
1381 lock = self.lock()
1380 lock = self.lock()
1382 try:
1381 try:
1383 tr = self.transaction("commit")
1382 tr = self.transaction("commit")
1384 trp = weakref.proxy(tr)
1383 trp = weakref.proxy(tr)
1385
1384
1386 if ctx.files():
1385 if ctx.files():
1387 m1 = p1.manifest().copy()
1386 m1 = p1.manifest().copy()
1388 m2 = p2.manifest()
1387 m2 = p2.manifest()
1389
1388
1390 # check in files
1389 # check in files
1391 new = {}
1390 new = {}
1392 changed = []
1391 changed = []
1393 linkrev = len(self)
1392 linkrev = len(self)
1394 for f in sorted(ctx.modified() + ctx.added()):
1393 for f in sorted(ctx.modified() + ctx.added()):
1395 self.ui.note(f + "\n")
1394 self.ui.note(f + "\n")
1396 try:
1395 try:
1397 fctx = ctx[f]
1396 fctx = ctx[f]
1398 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1397 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1399 changed)
1398 changed)
1400 m1.set(f, fctx.flags())
1399 m1.set(f, fctx.flags())
1401 except OSError, inst:
1400 except OSError, inst:
1402 self.ui.warn(_("trouble committing %s!\n") % f)
1401 self.ui.warn(_("trouble committing %s!\n") % f)
1403 raise
1402 raise
1404 except IOError, inst:
1403 except IOError, inst:
1405 errcode = getattr(inst, 'errno', errno.ENOENT)
1404 errcode = getattr(inst, 'errno', errno.ENOENT)
1406 if error or errcode and errcode != errno.ENOENT:
1405 if error or errcode and errcode != errno.ENOENT:
1407 self.ui.warn(_("trouble committing %s!\n") % f)
1406 self.ui.warn(_("trouble committing %s!\n") % f)
1408 raise
1407 raise
1409 else:
1408 else:
1410 removed.append(f)
1409 removed.append(f)
1411
1410
1412 # update manifest
1411 # update manifest
1413 m1.update(new)
1412 m1.update(new)
1414 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1413 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1415 drop = [f for f in removed if f in m1]
1414 drop = [f for f in removed if f in m1]
1416 for f in drop:
1415 for f in drop:
1417 del m1[f]
1416 del m1[f]
1418 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1417 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1419 p2.manifestnode(), (new, drop))
1418 p2.manifestnode(), (new, drop))
1420 files = changed + removed
1419 files = changed + removed
1421 else:
1420 else:
1422 mn = p1.manifestnode()
1421 mn = p1.manifestnode()
1423 files = []
1422 files = []
1424
1423
1425 # update changelog
1424 # update changelog
1426 self.changelog.delayupdate()
1425 self.changelog.delayupdate()
1427 n = self.changelog.add(mn, files, ctx.description(),
1426 n = self.changelog.add(mn, files, ctx.description(),
1428 trp, p1.node(), p2.node(),
1427 trp, p1.node(), p2.node(),
1429 user, ctx.date(), ctx.extra().copy())
1428 user, ctx.date(), ctx.extra().copy())
1430 p = lambda: self.changelog.writepending() and self.root or ""
1429 p = lambda: self.changelog.writepending() and self.root or ""
1431 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1430 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1432 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1431 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1433 parent2=xp2, pending=p)
1432 parent2=xp2, pending=p)
1434 self.changelog.finalize(trp)
1433 self.changelog.finalize(trp)
1435 # set the new commit is proper phase
1434 # set the new commit is proper phase
1436 targetphase = subrepo.newcommitphase(self.ui, ctx)
1435 targetphase = subrepo.newcommitphase(self.ui, ctx)
1437 if targetphase:
1436 if targetphase:
1438 # retract boundary do not alter parent changeset.
1437 # retract boundary do not alter parent changeset.
1439 # if a parent have higher the resulting phase will
1438 # if a parent have higher the resulting phase will
1440 # be compliant anyway
1439 # be compliant anyway
1441 #
1440 #
1442 # if minimal phase was 0 we don't need to retract anything
1441 # if minimal phase was 0 we don't need to retract anything
1443 phases.retractboundary(self, targetphase, [n])
1442 phases.retractboundary(self, targetphase, [n])
1444 tr.close()
1443 tr.close()
1445 branchmap.updatecache(self.filtered('served'))
1444 branchmap.updatecache(self.filtered('served'))
1446 return n
1445 return n
1447 finally:
1446 finally:
1448 if tr:
1447 if tr:
1449 tr.release()
1448 tr.release()
1450 lock.release()
1449 lock.release()
1451
1450
1452 @unfilteredmethod
1451 @unfilteredmethod
1453 def destroying(self):
1452 def destroying(self):
1454 '''Inform the repository that nodes are about to be destroyed.
1453 '''Inform the repository that nodes are about to be destroyed.
1455 Intended for use by strip and rollback, so there's a common
1454 Intended for use by strip and rollback, so there's a common
1456 place for anything that has to be done before destroying history.
1455 place for anything that has to be done before destroying history.
1457
1456
1458 This is mostly useful for saving state that is in memory and waiting
1457 This is mostly useful for saving state that is in memory and waiting
1459 to be flushed when the current lock is released. Because a call to
1458 to be flushed when the current lock is released. Because a call to
1460 destroyed is imminent, the repo will be invalidated causing those
1459 destroyed is imminent, the repo will be invalidated causing those
1461 changes to stay in memory (waiting for the next unlock), or vanish
1460 changes to stay in memory (waiting for the next unlock), or vanish
1462 completely.
1461 completely.
1463 '''
1462 '''
1464 # When using the same lock to commit and strip, the phasecache is left
1463 # When using the same lock to commit and strip, the phasecache is left
1465 # dirty after committing. Then when we strip, the repo is invalidated,
1464 # dirty after committing. Then when we strip, the repo is invalidated,
1466 # causing those changes to disappear.
1465 # causing those changes to disappear.
1467 if '_phasecache' in vars(self):
1466 if '_phasecache' in vars(self):
1468 self._phasecache.write()
1467 self._phasecache.write()
1469
1468
1470 @unfilteredmethod
1469 @unfilteredmethod
1471 def destroyed(self):
1470 def destroyed(self):
1472 '''Inform the repository that nodes have been destroyed.
1471 '''Inform the repository that nodes have been destroyed.
1473 Intended for use by strip and rollback, so there's a common
1472 Intended for use by strip and rollback, so there's a common
1474 place for anything that has to be done after destroying history.
1473 place for anything that has to be done after destroying history.
1475 '''
1474 '''
1476 # When one tries to:
1475 # When one tries to:
1477 # 1) destroy nodes thus calling this method (e.g. strip)
1476 # 1) destroy nodes thus calling this method (e.g. strip)
1478 # 2) use phasecache somewhere (e.g. commit)
1477 # 2) use phasecache somewhere (e.g. commit)
1479 #
1478 #
1480 # then 2) will fail because the phasecache contains nodes that were
1479 # then 2) will fail because the phasecache contains nodes that were
1481 # removed. We can either remove phasecache from the filecache,
1480 # removed. We can either remove phasecache from the filecache,
1482 # causing it to reload next time it is accessed, or simply filter
1481 # causing it to reload next time it is accessed, or simply filter
1483 # the removed nodes now and write the updated cache.
1482 # the removed nodes now and write the updated cache.
1484 self._phasecache.filterunknown(self)
1483 self._phasecache.filterunknown(self)
1485 self._phasecache.write()
1484 self._phasecache.write()
1486
1485
1487 # update the 'served' branch cache to help read only server process
1486 # update the 'served' branch cache to help read only server process
1488 # Thanks to branchcache collaboration this is done from the nearest
1487 # Thanks to branchcache collaboration this is done from the nearest
1489 # filtered subset and it is expected to be fast.
1488 # filtered subset and it is expected to be fast.
1490 branchmap.updatecache(self.filtered('served'))
1489 branchmap.updatecache(self.filtered('served'))
1491
1490
1492 # Ensure the persistent tag cache is updated. Doing it now
1491 # Ensure the persistent tag cache is updated. Doing it now
1493 # means that the tag cache only has to worry about destroyed
1492 # means that the tag cache only has to worry about destroyed
1494 # heads immediately after a strip/rollback. That in turn
1493 # heads immediately after a strip/rollback. That in turn
1495 # guarantees that "cachetip == currenttip" (comparing both rev
1494 # guarantees that "cachetip == currenttip" (comparing both rev
1496 # and node) always means no nodes have been added or destroyed.
1495 # and node) always means no nodes have been added or destroyed.
1497
1496
1498 # XXX this is suboptimal when qrefresh'ing: we strip the current
1497 # XXX this is suboptimal when qrefresh'ing: we strip the current
1499 # head, refresh the tag cache, then immediately add a new head.
1498 # head, refresh the tag cache, then immediately add a new head.
1500 # But I think doing it this way is necessary for the "instant
1499 # But I think doing it this way is necessary for the "instant
1501 # tag cache retrieval" case to work.
1500 # tag cache retrieval" case to work.
1502 self.invalidate()
1501 self.invalidate()
1503
1502
1504 def walk(self, match, node=None):
1503 def walk(self, match, node=None):
1505 '''
1504 '''
1506 walk recursively through the directory tree or a given
1505 walk recursively through the directory tree or a given
1507 changeset, finding all files matched by the match
1506 changeset, finding all files matched by the match
1508 function
1507 function
1509 '''
1508 '''
1510 return self[node].walk(match)
1509 return self[node].walk(match)
1511
1510
1512 def status(self, node1='.', node2=None, match=None,
1511 def status(self, node1='.', node2=None, match=None,
1513 ignored=False, clean=False, unknown=False,
1512 ignored=False, clean=False, unknown=False,
1514 listsubrepos=False):
1513 listsubrepos=False):
1515 '''a convenience method that calls node1.status(node2)'''
1514 '''a convenience method that calls node1.status(node2)'''
1516 return self[node1].status(node2, match, ignored, clean, unknown,
1515 return self[node1].status(node2, match, ignored, clean, unknown,
1517 listsubrepos)
1516 listsubrepos)
1518
1517
1519 def heads(self, start=None):
1518 def heads(self, start=None):
1520 heads = self.changelog.heads(start)
1519 heads = self.changelog.heads(start)
1521 # sort the output in rev descending order
1520 # sort the output in rev descending order
1522 return sorted(heads, key=self.changelog.rev, reverse=True)
1521 return sorted(heads, key=self.changelog.rev, reverse=True)
1523
1522
1524 def branchheads(self, branch=None, start=None, closed=False):
1523 def branchheads(self, branch=None, start=None, closed=False):
1525 '''return a (possibly filtered) list of heads for the given branch
1524 '''return a (possibly filtered) list of heads for the given branch
1526
1525
1527 Heads are returned in topological order, from newest to oldest.
1526 Heads are returned in topological order, from newest to oldest.
1528 If branch is None, use the dirstate branch.
1527 If branch is None, use the dirstate branch.
1529 If start is not None, return only heads reachable from start.
1528 If start is not None, return only heads reachable from start.
1530 If closed is True, return heads that are marked as closed as well.
1529 If closed is True, return heads that are marked as closed as well.
1531 '''
1530 '''
1532 if branch is None:
1531 if branch is None:
1533 branch = self[None].branch()
1532 branch = self[None].branch()
1534 branches = self.branchmap()
1533 branches = self.branchmap()
1535 if branch not in branches:
1534 if branch not in branches:
1536 return []
1535 return []
1537 # the cache returns heads ordered lowest to highest
1536 # the cache returns heads ordered lowest to highest
1538 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1537 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1539 if start is not None:
1538 if start is not None:
1540 # filter out the heads that cannot be reached from startrev
1539 # filter out the heads that cannot be reached from startrev
1541 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1540 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1542 bheads = [h for h in bheads if h in fbheads]
1541 bheads = [h for h in bheads if h in fbheads]
1543 return bheads
1542 return bheads
1544
1543
1545 def branches(self, nodes):
1544 def branches(self, nodes):
1546 if not nodes:
1545 if not nodes:
1547 nodes = [self.changelog.tip()]
1546 nodes = [self.changelog.tip()]
1548 b = []
1547 b = []
1549 for n in nodes:
1548 for n in nodes:
1550 t = n
1549 t = n
1551 while True:
1550 while True:
1552 p = self.changelog.parents(n)
1551 p = self.changelog.parents(n)
1553 if p[1] != nullid or p[0] == nullid:
1552 if p[1] != nullid or p[0] == nullid:
1554 b.append((t, n, p[0], p[1]))
1553 b.append((t, n, p[0], p[1]))
1555 break
1554 break
1556 n = p[0]
1555 n = p[0]
1557 return b
1556 return b
1558
1557
1559 def between(self, pairs):
1558 def between(self, pairs):
1560 r = []
1559 r = []
1561
1560
1562 for top, bottom in pairs:
1561 for top, bottom in pairs:
1563 n, l, i = top, [], 0
1562 n, l, i = top, [], 0
1564 f = 1
1563 f = 1
1565
1564
1566 while n != bottom and n != nullid:
1565 while n != bottom and n != nullid:
1567 p = self.changelog.parents(n)[0]
1566 p = self.changelog.parents(n)[0]
1568 if i == f:
1567 if i == f:
1569 l.append(n)
1568 l.append(n)
1570 f = f * 2
1569 f = f * 2
1571 n = p
1570 n = p
1572 i += 1
1571 i += 1
1573
1572
1574 r.append(l)
1573 r.append(l)
1575
1574
1576 return r
1575 return r
1577
1576
1578 def pull(self, remote, heads=None, force=False):
1577 def pull(self, remote, heads=None, force=False):
1579 return exchange.pull (self, remote, heads, force)
1578 return exchange.pull (self, remote, heads, force)
1580
1579
1581 def checkpush(self, pushop):
1580 def checkpush(self, pushop):
1582 """Extensions can override this function if additional checks have
1581 """Extensions can override this function if additional checks have
1583 to be performed before pushing, or call it if they override push
1582 to be performed before pushing, or call it if they override push
1584 command.
1583 command.
1585 """
1584 """
1586 pass
1585 pass
1587
1586
1588 @unfilteredpropertycache
1587 @unfilteredpropertycache
1589 def prepushoutgoinghooks(self):
1588 def prepushoutgoinghooks(self):
1590 """Return util.hooks consists of "(repo, remote, outgoing)"
1589 """Return util.hooks consists of "(repo, remote, outgoing)"
1591 functions, which are called before pushing changesets.
1590 functions, which are called before pushing changesets.
1592 """
1591 """
1593 return util.hooks()
1592 return util.hooks()
1594
1593
1595 def push(self, remote, force=False, revs=None, newbranch=False):
1594 def push(self, remote, force=False, revs=None, newbranch=False):
1596 return exchange.push(self, remote, force, revs, newbranch)
1595 return exchange.push(self, remote, force, revs, newbranch)
1597
1596
1598 def stream_in(self, remote, requirements):
1597 def stream_in(self, remote, requirements):
1599 lock = self.lock()
1598 lock = self.lock()
1600 try:
1599 try:
1601 # Save remote branchmap. We will use it later
1600 # Save remote branchmap. We will use it later
1602 # to speed up branchcache creation
1601 # to speed up branchcache creation
1603 rbranchmap = None
1602 rbranchmap = None
1604 if remote.capable("branchmap"):
1603 if remote.capable("branchmap"):
1605 rbranchmap = remote.branchmap()
1604 rbranchmap = remote.branchmap()
1606
1605
1607 fp = remote.stream_out()
1606 fp = remote.stream_out()
1608 l = fp.readline()
1607 l = fp.readline()
1609 try:
1608 try:
1610 resp = int(l)
1609 resp = int(l)
1611 except ValueError:
1610 except ValueError:
1612 raise error.ResponseError(
1611 raise error.ResponseError(
1613 _('unexpected response from remote server:'), l)
1612 _('unexpected response from remote server:'), l)
1614 if resp == 1:
1613 if resp == 1:
1615 raise util.Abort(_('operation forbidden by server'))
1614 raise util.Abort(_('operation forbidden by server'))
1616 elif resp == 2:
1615 elif resp == 2:
1617 raise util.Abort(_('locking the remote repository failed'))
1616 raise util.Abort(_('locking the remote repository failed'))
1618 elif resp != 0:
1617 elif resp != 0:
1619 raise util.Abort(_('the server sent an unknown error code'))
1618 raise util.Abort(_('the server sent an unknown error code'))
1620 self.ui.status(_('streaming all changes\n'))
1619 self.ui.status(_('streaming all changes\n'))
1621 l = fp.readline()
1620 l = fp.readline()
1622 try:
1621 try:
1623 total_files, total_bytes = map(int, l.split(' ', 1))
1622 total_files, total_bytes = map(int, l.split(' ', 1))
1624 except (ValueError, TypeError):
1623 except (ValueError, TypeError):
1625 raise error.ResponseError(
1624 raise error.ResponseError(
1626 _('unexpected response from remote server:'), l)
1625 _('unexpected response from remote server:'), l)
1627 self.ui.status(_('%d files to transfer, %s of data\n') %
1626 self.ui.status(_('%d files to transfer, %s of data\n') %
1628 (total_files, util.bytecount(total_bytes)))
1627 (total_files, util.bytecount(total_bytes)))
1629 handled_bytes = 0
1628 handled_bytes = 0
1630 self.ui.progress(_('clone'), 0, total=total_bytes)
1629 self.ui.progress(_('clone'), 0, total=total_bytes)
1631 start = time.time()
1630 start = time.time()
1632
1631
1633 tr = self.transaction(_('clone'))
1632 tr = self.transaction(_('clone'))
1634 try:
1633 try:
1635 for i in xrange(total_files):
1634 for i in xrange(total_files):
1636 # XXX doesn't support '\n' or '\r' in filenames
1635 # XXX doesn't support '\n' or '\r' in filenames
1637 l = fp.readline()
1636 l = fp.readline()
1638 try:
1637 try:
1639 name, size = l.split('\0', 1)
1638 name, size = l.split('\0', 1)
1640 size = int(size)
1639 size = int(size)
1641 except (ValueError, TypeError):
1640 except (ValueError, TypeError):
1642 raise error.ResponseError(
1641 raise error.ResponseError(
1643 _('unexpected response from remote server:'), l)
1642 _('unexpected response from remote server:'), l)
1644 if self.ui.debugflag:
1643 if self.ui.debugflag:
1645 self.ui.debug('adding %s (%s)\n' %
1644 self.ui.debug('adding %s (%s)\n' %
1646 (name, util.bytecount(size)))
1645 (name, util.bytecount(size)))
1647 # for backwards compat, name was partially encoded
1646 # for backwards compat, name was partially encoded
1648 ofp = self.sopener(store.decodedir(name), 'w')
1647 ofp = self.sopener(store.decodedir(name), 'w')
1649 for chunk in util.filechunkiter(fp, limit=size):
1648 for chunk in util.filechunkiter(fp, limit=size):
1650 handled_bytes += len(chunk)
1649 handled_bytes += len(chunk)
1651 self.ui.progress(_('clone'), handled_bytes,
1650 self.ui.progress(_('clone'), handled_bytes,
1652 total=total_bytes)
1651 total=total_bytes)
1653 ofp.write(chunk)
1652 ofp.write(chunk)
1654 ofp.close()
1653 ofp.close()
1655 tr.close()
1654 tr.close()
1656 finally:
1655 finally:
1657 tr.release()
1656 tr.release()
1658
1657
1659 # Writing straight to files circumvented the inmemory caches
1658 # Writing straight to files circumvented the inmemory caches
1660 self.invalidate()
1659 self.invalidate()
1661
1660
1662 elapsed = time.time() - start
1661 elapsed = time.time() - start
1663 if elapsed <= 0:
1662 if elapsed <= 0:
1664 elapsed = 0.001
1663 elapsed = 0.001
1665 self.ui.progress(_('clone'), None)
1664 self.ui.progress(_('clone'), None)
1666 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1665 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1667 (util.bytecount(total_bytes), elapsed,
1666 (util.bytecount(total_bytes), elapsed,
1668 util.bytecount(total_bytes / elapsed)))
1667 util.bytecount(total_bytes / elapsed)))
1669
1668
1670 # new requirements = old non-format requirements +
1669 # new requirements = old non-format requirements +
1671 # new format-related
1670 # new format-related
1672 # requirements from the streamed-in repository
1671 # requirements from the streamed-in repository
1673 requirements.update(set(self.requirements) - self.supportedformats)
1672 requirements.update(set(self.requirements) - self.supportedformats)
1674 self._applyrequirements(requirements)
1673 self._applyrequirements(requirements)
1675 self._writerequirements()
1674 self._writerequirements()
1676
1675
1677 if rbranchmap:
1676 if rbranchmap:
1678 rbheads = []
1677 rbheads = []
1679 for bheads in rbranchmap.itervalues():
1678 for bheads in rbranchmap.itervalues():
1680 rbheads.extend(bheads)
1679 rbheads.extend(bheads)
1681
1680
1682 if rbheads:
1681 if rbheads:
1683 rtiprev = max((int(self.changelog.rev(node))
1682 rtiprev = max((int(self.changelog.rev(node))
1684 for node in rbheads))
1683 for node in rbheads))
1685 cache = branchmap.branchcache(rbranchmap,
1684 cache = branchmap.branchcache(rbranchmap,
1686 self[rtiprev].node(),
1685 self[rtiprev].node(),
1687 rtiprev)
1686 rtiprev)
1688 # Try to stick it as low as possible
1687 # Try to stick it as low as possible
1689 # filter above served are unlikely to be fetch from a clone
1688 # filter above served are unlikely to be fetch from a clone
1690 for candidate in ('base', 'immutable', 'served'):
1689 for candidate in ('base', 'immutable', 'served'):
1691 rview = self.filtered(candidate)
1690 rview = self.filtered(candidate)
1692 if cache.validfor(rview):
1691 if cache.validfor(rview):
1693 self._branchcaches[candidate] = cache
1692 self._branchcaches[candidate] = cache
1694 cache.write(rview)
1693 cache.write(rview)
1695 break
1694 break
1696 self.invalidate()
1695 self.invalidate()
1697 return len(self.heads()) + 1
1696 return len(self.heads()) + 1
1698 finally:
1697 finally:
1699 lock.release()
1698 lock.release()
1700
1699
1701 def clone(self, remote, heads=[], stream=False):
1700 def clone(self, remote, heads=[], stream=False):
1702 '''clone remote repository.
1701 '''clone remote repository.
1703
1702
1704 keyword arguments:
1703 keyword arguments:
1705 heads: list of revs to clone (forces use of pull)
1704 heads: list of revs to clone (forces use of pull)
1706 stream: use streaming clone if possible'''
1705 stream: use streaming clone if possible'''
1707
1706
1708 # now, all clients that can request uncompressed clones can
1707 # now, all clients that can request uncompressed clones can
1709 # read repo formats supported by all servers that can serve
1708 # read repo formats supported by all servers that can serve
1710 # them.
1709 # them.
1711
1710
1712 # if revlog format changes, client will have to check version
1711 # if revlog format changes, client will have to check version
1713 # and format flags on "stream" capability, and use
1712 # and format flags on "stream" capability, and use
1714 # uncompressed only if compatible.
1713 # uncompressed only if compatible.
1715
1714
1716 if not stream:
1715 if not stream:
1717 # if the server explicitly prefers to stream (for fast LANs)
1716 # if the server explicitly prefers to stream (for fast LANs)
1718 stream = remote.capable('stream-preferred')
1717 stream = remote.capable('stream-preferred')
1719
1718
1720 if stream and not heads:
1719 if stream and not heads:
1721 # 'stream' means remote revlog format is revlogv1 only
1720 # 'stream' means remote revlog format is revlogv1 only
1722 if remote.capable('stream'):
1721 if remote.capable('stream'):
1723 return self.stream_in(remote, set(('revlogv1',)))
1722 return self.stream_in(remote, set(('revlogv1',)))
1724 # otherwise, 'streamreqs' contains the remote revlog format
1723 # otherwise, 'streamreqs' contains the remote revlog format
1725 streamreqs = remote.capable('streamreqs')
1724 streamreqs = remote.capable('streamreqs')
1726 if streamreqs:
1725 if streamreqs:
1727 streamreqs = set(streamreqs.split(','))
1726 streamreqs = set(streamreqs.split(','))
1728 # if we support it, stream in and adjust our requirements
1727 # if we support it, stream in and adjust our requirements
1729 if not streamreqs - self.supportedformats:
1728 if not streamreqs - self.supportedformats:
1730 return self.stream_in(remote, streamreqs)
1729 return self.stream_in(remote, streamreqs)
1731 return self.pull(remote, heads)
1730 return self.pull(remote, heads)
1732
1731
1733 def pushkey(self, namespace, key, old, new):
1732 def pushkey(self, namespace, key, old, new):
1734 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1733 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1735 old=old, new=new)
1734 old=old, new=new)
1736 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1735 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1737 ret = pushkey.push(self, namespace, key, old, new)
1736 ret = pushkey.push(self, namespace, key, old, new)
1738 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1737 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1739 ret=ret)
1738 ret=ret)
1740 return ret
1739 return ret
1741
1740
1742 def listkeys(self, namespace):
1741 def listkeys(self, namespace):
1743 self.hook('prelistkeys', throw=True, namespace=namespace)
1742 self.hook('prelistkeys', throw=True, namespace=namespace)
1744 self.ui.debug('listing keys for "%s"\n' % namespace)
1743 self.ui.debug('listing keys for "%s"\n' % namespace)
1745 values = pushkey.list(self, namespace)
1744 values = pushkey.list(self, namespace)
1746 self.hook('listkeys', namespace=namespace, values=values)
1745 self.hook('listkeys', namespace=namespace, values=values)
1747 return values
1746 return values
1748
1747
1749 def debugwireargs(self, one, two, three=None, four=None, five=None):
1748 def debugwireargs(self, one, two, three=None, four=None, five=None):
1750 '''used to test argument passing over the wire'''
1749 '''used to test argument passing over the wire'''
1751 return "%s %s %s %s %s" % (one, two, three, four, five)
1750 return "%s %s %s %s %s" % (one, two, three, four, five)
1752
1751
1753 def savecommitmessage(self, text):
1752 def savecommitmessage(self, text):
1754 fp = self.opener('last-message.txt', 'wb')
1753 fp = self.opener('last-message.txt', 'wb')
1755 try:
1754 try:
1756 fp.write(text)
1755 fp.write(text)
1757 finally:
1756 finally:
1758 fp.close()
1757 fp.close()
1759 return self.pathto(fp.name[len(self.root) + 1:])
1758 return self.pathto(fp.name[len(self.root) + 1:])
1760
1759
1761 # used to avoid circular references so destructors work
1760 # used to avoid circular references so destructors work
1762 def aftertrans(files):
1761 def aftertrans(files):
1763 renamefiles = [tuple(t) for t in files]
1762 renamefiles = [tuple(t) for t in files]
1764 def a():
1763 def a():
1765 for vfs, src, dest in renamefiles:
1764 for vfs, src, dest in renamefiles:
1766 try:
1765 try:
1767 vfs.rename(src, dest)
1766 vfs.rename(src, dest)
1768 except OSError: # journal file does not yet exist
1767 except OSError: # journal file does not yet exist
1769 pass
1768 pass
1770 return a
1769 return a
1771
1770
1772 def undoname(fn):
1771 def undoname(fn):
1773 base, name = os.path.split(fn)
1772 base, name = os.path.split(fn)
1774 assert name.startswith('journal')
1773 assert name.startswith('journal')
1775 return os.path.join(base, name.replace('journal', 'undo', 1))
1774 return os.path.join(base, name.replace('journal', 'undo', 1))
1776
1775
1777 def instance(ui, path, create):
1776 def instance(ui, path, create):
1778 return localrepository(ui, util.urllocalpath(path), create)
1777 return localrepository(ui, util.urllocalpath(path), create)
1779
1778
1780 def islocal(path):
1779 def islocal(path):
1781 return True
1780 return True
@@ -1,55 +1,57
1 b51a8138292a introduced a regression where we would mention in the
1 b51a8138292a introduced a regression where we would mention in the
2 changelog executable files added by the second parent of a merge. Test
2 changelog executable files added by the second parent of a merge. Test
3 that that doesn't happen anymore
3 that that doesn't happen anymore
4
4
5 $ "$TESTDIR/hghave" execbit || exit 80
5 $ "$TESTDIR/hghave" execbit || exit 80
6
6
7 $ hg init repo
7 $ hg init repo
8 $ cd repo
8 $ cd repo
9 $ echo foo > foo
9 $ echo foo > foo
10 $ hg ci -qAm 'add foo'
10 $ hg ci -qAm 'add foo'
11
11
12 $ echo bar > bar
12 $ echo bar > bar
13 $ chmod +x bar
13 $ chmod +x bar
14 $ hg ci -qAm 'add bar'
14 $ hg ci -qAm 'add bar'
15
15
16 manifest of p2:
16 manifest of p2:
17
17
18 $ hg manifest
18 $ hg manifest
19 bar
19 bar
20 foo
20 foo
21
21
22 $ hg up -qC 0
22 $ hg up -qC 0
23 $ echo >> foo
23 $ echo >> foo
24 $ hg ci -m 'change foo'
24 $ hg ci -m 'change foo'
25 created new head
25 created new head
26
26
27 manifest of p1:
27 manifest of p1:
28
28
29 $ hg manifest
29 $ hg manifest
30 foo
30 foo
31
31
32 $ hg merge
32 $ hg merge
33 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
33 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
34 (branch merge, don't forget to commit)
34 (branch merge, don't forget to commit)
35 $ chmod +x foo
35 $ hg ci -m 'merge'
36 $ hg ci -m 'merge'
36
37
37 this should not mention bar:
38 this should not mention bar but should mention foo:
38
39
39 $ hg tip -v
40 $ hg tip -v
40 changeset: 3:ef2fc9b4a51b
41 changeset: 3:c53d17ff3380
41 tag: tip
42 tag: tip
42 parent: 2:ed1b79f46b9a
43 parent: 2:ed1b79f46b9a
43 parent: 1:d394a8db219b
44 parent: 1:d394a8db219b
44 user: test
45 user: test
45 date: Thu Jan 01 00:00:00 1970 +0000
46 date: Thu Jan 01 00:00:00 1970 +0000
47 files: foo
46 description:
48 description:
47 merge
49 merge
48
50
49
51
50
52
51 $ hg debugindex bar
53 $ hg debugindex bar
52 rev offset length ..... linkrev nodeid p1 p2 (re)
54 rev offset length ..... linkrev nodeid p1 p2 (re)
53 0 0 5 ..... 1 b004912a8510 000000000000 000000000000 (re)
55 0 0 5 ..... 1 b004912a8510 000000000000 000000000000 (re)
54
56
55 $ cd ..
57 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now