##// END OF EJS Templates
afterlock: add the callback to the top level lock (issue4608)...
Pierre-Yves David -
r24821:57f1dbc9 stable
parent child Browse files
Show More
@@ -1,1955 +1,1958 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception, exc:
138 except Exception, exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced, exc:
155 except error.PushRaced, exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 'dotencode'))
197 'dotencode'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
199 requirements = ['revlogv1']
199 requirements = ['revlogv1']
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return self.requirements[:]
207 return self.requirements[:]
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wopener = self.wvfs
211 self.wopener = self.wvfs
212 self.root = self.wvfs.base
212 self.root = self.wvfs.base
213 self.path = self.wvfs.join(".hg")
213 self.path = self.wvfs.join(".hg")
214 self.origroot = path
214 self.origroot = path
215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.vfs = scmutil.vfs(self.path)
216 self.vfs = scmutil.vfs(self.path)
217 self.opener = self.vfs
217 self.opener = self.vfs
218 self.baseui = baseui
218 self.baseui = baseui
219 self.ui = baseui.copy()
219 self.ui = baseui.copy()
220 self.ui.copy = baseui.copy # prevent copying repo configuration
220 self.ui.copy = baseui.copy # prevent copying repo configuration
221 # A list of callback to shape the phase if no data were found.
221 # A list of callback to shape the phase if no data were found.
222 # Callback are in the form: func(repo, roots) --> processed root.
222 # Callback are in the form: func(repo, roots) --> processed root.
223 # This list it to be filled by extension during repo setup
223 # This list it to be filled by extension during repo setup
224 self._phasedefaults = []
224 self._phasedefaults = []
225 try:
225 try:
226 self.ui.readconfig(self.join("hgrc"), self.root)
226 self.ui.readconfig(self.join("hgrc"), self.root)
227 extensions.loadall(self.ui)
227 extensions.loadall(self.ui)
228 except IOError:
228 except IOError:
229 pass
229 pass
230
230
231 if self.featuresetupfuncs:
231 if self.featuresetupfuncs:
232 self.supported = set(self._basesupported) # use private copy
232 self.supported = set(self._basesupported) # use private copy
233 extmods = set(m.__name__ for n, m
233 extmods = set(m.__name__ for n, m
234 in extensions.extensions(self.ui))
234 in extensions.extensions(self.ui))
235 for setupfunc in self.featuresetupfuncs:
235 for setupfunc in self.featuresetupfuncs:
236 if setupfunc.__module__ in extmods:
236 if setupfunc.__module__ in extmods:
237 setupfunc(self.ui, self.supported)
237 setupfunc(self.ui, self.supported)
238 else:
238 else:
239 self.supported = self._basesupported
239 self.supported = self._basesupported
240
240
241 if not self.vfs.isdir():
241 if not self.vfs.isdir():
242 if create:
242 if create:
243 if not self.wvfs.exists():
243 if not self.wvfs.exists():
244 self.wvfs.makedirs()
244 self.wvfs.makedirs()
245 self.vfs.makedir(notindexed=True)
245 self.vfs.makedir(notindexed=True)
246 requirements = self._baserequirements(create)
246 requirements = self._baserequirements(create)
247 if self.ui.configbool('format', 'usestore', True):
247 if self.ui.configbool('format', 'usestore', True):
248 self.vfs.mkdir("store")
248 self.vfs.mkdir("store")
249 requirements.append("store")
249 requirements.append("store")
250 if self.ui.configbool('format', 'usefncache', True):
250 if self.ui.configbool('format', 'usefncache', True):
251 requirements.append("fncache")
251 requirements.append("fncache")
252 if self.ui.configbool('format', 'dotencode', True):
252 if self.ui.configbool('format', 'dotencode', True):
253 requirements.append('dotencode')
253 requirements.append('dotencode')
254 # create an invalid changelog
254 # create an invalid changelog
255 self.vfs.append(
255 self.vfs.append(
256 "00changelog.i",
256 "00changelog.i",
257 '\0\0\0\2' # represents revlogv2
257 '\0\0\0\2' # represents revlogv2
258 ' dummy changelog to prevent using the old repo layout'
258 ' dummy changelog to prevent using the old repo layout'
259 )
259 )
260 if self.ui.configbool('format', 'generaldelta', False):
260 if self.ui.configbool('format', 'generaldelta', False):
261 requirements.append("generaldelta")
261 requirements.append("generaldelta")
262 if self.ui.configbool('experimental', 'manifestv2', False):
262 if self.ui.configbool('experimental', 'manifestv2', False):
263 requirements.append("manifestv2")
263 requirements.append("manifestv2")
264 requirements = set(requirements)
264 requirements = set(requirements)
265 else:
265 else:
266 raise error.RepoError(_("repository %s not found") % path)
266 raise error.RepoError(_("repository %s not found") % path)
267 elif create:
267 elif create:
268 raise error.RepoError(_("repository %s already exists") % path)
268 raise error.RepoError(_("repository %s already exists") % path)
269 else:
269 else:
270 try:
270 try:
271 requirements = scmutil.readrequires(self.vfs, self.supported)
271 requirements = scmutil.readrequires(self.vfs, self.supported)
272 except IOError, inst:
272 except IOError, inst:
273 if inst.errno != errno.ENOENT:
273 if inst.errno != errno.ENOENT:
274 raise
274 raise
275 requirements = set()
275 requirements = set()
276
276
277 self.sharedpath = self.path
277 self.sharedpath = self.path
278 try:
278 try:
279 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
279 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
280 realpath=True)
280 realpath=True)
281 s = vfs.base
281 s = vfs.base
282 if not vfs.exists():
282 if not vfs.exists():
283 raise error.RepoError(
283 raise error.RepoError(
284 _('.hg/sharedpath points to nonexistent directory %s') % s)
284 _('.hg/sharedpath points to nonexistent directory %s') % s)
285 self.sharedpath = s
285 self.sharedpath = s
286 except IOError, inst:
286 except IOError, inst:
287 if inst.errno != errno.ENOENT:
287 if inst.errno != errno.ENOENT:
288 raise
288 raise
289
289
290 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
290 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
291 self.spath = self.store.path
291 self.spath = self.store.path
292 self.svfs = self.store.vfs
292 self.svfs = self.store.vfs
293 self.sopener = self.svfs
293 self.sopener = self.svfs
294 self.sjoin = self.store.join
294 self.sjoin = self.store.join
295 self.vfs.createmode = self.store.createmode
295 self.vfs.createmode = self.store.createmode
296 self._applyrequirements(requirements)
296 self._applyrequirements(requirements)
297 if create:
297 if create:
298 self._writerequirements()
298 self._writerequirements()
299
299
300
300
301 self._branchcaches = {}
301 self._branchcaches = {}
302 self._revbranchcache = None
302 self._revbranchcache = None
303 self.filterpats = {}
303 self.filterpats = {}
304 self._datafilters = {}
304 self._datafilters = {}
305 self._transref = self._lockref = self._wlockref = None
305 self._transref = self._lockref = self._wlockref = None
306
306
307 # A cache for various files under .hg/ that tracks file changes,
307 # A cache for various files under .hg/ that tracks file changes,
308 # (used by the filecache decorator)
308 # (used by the filecache decorator)
309 #
309 #
310 # Maps a property name to its util.filecacheentry
310 # Maps a property name to its util.filecacheentry
311 self._filecache = {}
311 self._filecache = {}
312
312
313 # hold sets of revision to be filtered
313 # hold sets of revision to be filtered
314 # should be cleared when something might have changed the filter value:
314 # should be cleared when something might have changed the filter value:
315 # - new changesets,
315 # - new changesets,
316 # - phase change,
316 # - phase change,
317 # - new obsolescence marker,
317 # - new obsolescence marker,
318 # - working directory parent change,
318 # - working directory parent change,
319 # - bookmark changes
319 # - bookmark changes
320 self.filteredrevcache = {}
320 self.filteredrevcache = {}
321
321
322 # generic mapping between names and nodes
322 # generic mapping between names and nodes
323 self.names = namespaces.namespaces()
323 self.names = namespaces.namespaces()
324
324
325 def close(self):
325 def close(self):
326 self._writecaches()
326 self._writecaches()
327
327
328 def _writecaches(self):
328 def _writecaches(self):
329 if self._revbranchcache:
329 if self._revbranchcache:
330 self._revbranchcache.write()
330 self._revbranchcache.write()
331
331
332 def _restrictcapabilities(self, caps):
332 def _restrictcapabilities(self, caps):
333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
334 caps = set(caps)
334 caps = set(caps)
335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
336 caps.add('bundle2=' + urllib.quote(capsblob))
336 caps.add('bundle2=' + urllib.quote(capsblob))
337 return caps
337 return caps
338
338
339 def _applyrequirements(self, requirements):
339 def _applyrequirements(self, requirements):
340 self.requirements = requirements
340 self.requirements = requirements
341 self.svfs.options = dict((r, 1) for r in requirements
341 self.svfs.options = dict((r, 1) for r in requirements
342 if r in self.openerreqs)
342 if r in self.openerreqs)
343 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
343 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
344 if chunkcachesize is not None:
344 if chunkcachesize is not None:
345 self.svfs.options['chunkcachesize'] = chunkcachesize
345 self.svfs.options['chunkcachesize'] = chunkcachesize
346 maxchainlen = self.ui.configint('format', 'maxchainlen')
346 maxchainlen = self.ui.configint('format', 'maxchainlen')
347 if maxchainlen is not None:
347 if maxchainlen is not None:
348 self.svfs.options['maxchainlen'] = maxchainlen
348 self.svfs.options['maxchainlen'] = maxchainlen
349 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
349 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
350 if manifestcachesize is not None:
350 if manifestcachesize is not None:
351 self.svfs.options['manifestcachesize'] = manifestcachesize
351 self.svfs.options['manifestcachesize'] = manifestcachesize
352 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
352 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
353 if usetreemanifest is not None:
353 if usetreemanifest is not None:
354 self.svfs.options['usetreemanifest'] = usetreemanifest
354 self.svfs.options['usetreemanifest'] = usetreemanifest
355
355
356 def _writerequirements(self):
356 def _writerequirements(self):
357 reqfile = self.vfs("requires", "w")
357 reqfile = self.vfs("requires", "w")
358 for r in sorted(self.requirements):
358 for r in sorted(self.requirements):
359 reqfile.write("%s\n" % r)
359 reqfile.write("%s\n" % r)
360 reqfile.close()
360 reqfile.close()
361
361
362 def _checknested(self, path):
362 def _checknested(self, path):
363 """Determine if path is a legal nested repository."""
363 """Determine if path is a legal nested repository."""
364 if not path.startswith(self.root):
364 if not path.startswith(self.root):
365 return False
365 return False
366 subpath = path[len(self.root) + 1:]
366 subpath = path[len(self.root) + 1:]
367 normsubpath = util.pconvert(subpath)
367 normsubpath = util.pconvert(subpath)
368
368
369 # XXX: Checking against the current working copy is wrong in
369 # XXX: Checking against the current working copy is wrong in
370 # the sense that it can reject things like
370 # the sense that it can reject things like
371 #
371 #
372 # $ hg cat -r 10 sub/x.txt
372 # $ hg cat -r 10 sub/x.txt
373 #
373 #
374 # if sub/ is no longer a subrepository in the working copy
374 # if sub/ is no longer a subrepository in the working copy
375 # parent revision.
375 # parent revision.
376 #
376 #
377 # However, it can of course also allow things that would have
377 # However, it can of course also allow things that would have
378 # been rejected before, such as the above cat command if sub/
378 # been rejected before, such as the above cat command if sub/
379 # is a subrepository now, but was a normal directory before.
379 # is a subrepository now, but was a normal directory before.
380 # The old path auditor would have rejected by mistake since it
380 # The old path auditor would have rejected by mistake since it
381 # panics when it sees sub/.hg/.
381 # panics when it sees sub/.hg/.
382 #
382 #
383 # All in all, checking against the working copy seems sensible
383 # All in all, checking against the working copy seems sensible
384 # since we want to prevent access to nested repositories on
384 # since we want to prevent access to nested repositories on
385 # the filesystem *now*.
385 # the filesystem *now*.
386 ctx = self[None]
386 ctx = self[None]
387 parts = util.splitpath(subpath)
387 parts = util.splitpath(subpath)
388 while parts:
388 while parts:
389 prefix = '/'.join(parts)
389 prefix = '/'.join(parts)
390 if prefix in ctx.substate:
390 if prefix in ctx.substate:
391 if prefix == normsubpath:
391 if prefix == normsubpath:
392 return True
392 return True
393 else:
393 else:
394 sub = ctx.sub(prefix)
394 sub = ctx.sub(prefix)
395 return sub.checknested(subpath[len(prefix) + 1:])
395 return sub.checknested(subpath[len(prefix) + 1:])
396 else:
396 else:
397 parts.pop()
397 parts.pop()
398 return False
398 return False
399
399
400 def peer(self):
400 def peer(self):
401 return localpeer(self) # not cached to avoid reference cycle
401 return localpeer(self) # not cached to avoid reference cycle
402
402
403 def unfiltered(self):
403 def unfiltered(self):
404 """Return unfiltered version of the repository
404 """Return unfiltered version of the repository
405
405
406 Intended to be overwritten by filtered repo."""
406 Intended to be overwritten by filtered repo."""
407 return self
407 return self
408
408
409 def filtered(self, name):
409 def filtered(self, name):
410 """Return a filtered version of a repository"""
410 """Return a filtered version of a repository"""
411 # build a new class with the mixin and the current class
411 # build a new class with the mixin and the current class
412 # (possibly subclass of the repo)
412 # (possibly subclass of the repo)
413 class proxycls(repoview.repoview, self.unfiltered().__class__):
413 class proxycls(repoview.repoview, self.unfiltered().__class__):
414 pass
414 pass
415 return proxycls(self, name)
415 return proxycls(self, name)
416
416
417 @repofilecache('bookmarks')
417 @repofilecache('bookmarks')
418 def _bookmarks(self):
418 def _bookmarks(self):
419 return bookmarks.bmstore(self)
419 return bookmarks.bmstore(self)
420
420
421 @repofilecache('bookmarks.current')
421 @repofilecache('bookmarks.current')
422 def _bookmarkcurrent(self):
422 def _bookmarkcurrent(self):
423 return bookmarks.readcurrent(self)
423 return bookmarks.readcurrent(self)
424
424
425 def bookmarkheads(self, bookmark):
425 def bookmarkheads(self, bookmark):
426 name = bookmark.split('@', 1)[0]
426 name = bookmark.split('@', 1)[0]
427 heads = []
427 heads = []
428 for mark, n in self._bookmarks.iteritems():
428 for mark, n in self._bookmarks.iteritems():
429 if mark.split('@', 1)[0] == name:
429 if mark.split('@', 1)[0] == name:
430 heads.append(n)
430 heads.append(n)
431 return heads
431 return heads
432
432
433 @storecache('phaseroots')
433 @storecache('phaseroots')
434 def _phasecache(self):
434 def _phasecache(self):
435 return phases.phasecache(self, self._phasedefaults)
435 return phases.phasecache(self, self._phasedefaults)
436
436
437 @storecache('obsstore')
437 @storecache('obsstore')
438 def obsstore(self):
438 def obsstore(self):
439 # read default format for new obsstore.
439 # read default format for new obsstore.
440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
441 # rely on obsstore class default when possible.
441 # rely on obsstore class default when possible.
442 kwargs = {}
442 kwargs = {}
443 if defaultformat is not None:
443 if defaultformat is not None:
444 kwargs['defaultformat'] = defaultformat
444 kwargs['defaultformat'] = defaultformat
445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
446 store = obsolete.obsstore(self.svfs, readonly=readonly,
446 store = obsolete.obsstore(self.svfs, readonly=readonly,
447 **kwargs)
447 **kwargs)
448 if store and readonly:
448 if store and readonly:
449 self.ui.warn(
449 self.ui.warn(
450 _('obsolete feature not enabled but %i markers found!\n')
450 _('obsolete feature not enabled but %i markers found!\n')
451 % len(list(store)))
451 % len(list(store)))
452 return store
452 return store
453
453
454 @storecache('00changelog.i')
454 @storecache('00changelog.i')
455 def changelog(self):
455 def changelog(self):
456 c = changelog.changelog(self.svfs)
456 c = changelog.changelog(self.svfs)
457 if 'HG_PENDING' in os.environ:
457 if 'HG_PENDING' in os.environ:
458 p = os.environ['HG_PENDING']
458 p = os.environ['HG_PENDING']
459 if p.startswith(self.root):
459 if p.startswith(self.root):
460 c.readpending('00changelog.i.a')
460 c.readpending('00changelog.i.a')
461 return c
461 return c
462
462
463 @storecache('00manifest.i')
463 @storecache('00manifest.i')
464 def manifest(self):
464 def manifest(self):
465 return manifest.manifest(self.svfs)
465 return manifest.manifest(self.svfs)
466
466
467 @repofilecache('dirstate')
467 @repofilecache('dirstate')
468 def dirstate(self):
468 def dirstate(self):
469 warned = [0]
469 warned = [0]
470 def validate(node):
470 def validate(node):
471 try:
471 try:
472 self.changelog.rev(node)
472 self.changelog.rev(node)
473 return node
473 return node
474 except error.LookupError:
474 except error.LookupError:
475 if not warned[0]:
475 if not warned[0]:
476 warned[0] = True
476 warned[0] = True
477 self.ui.warn(_("warning: ignoring unknown"
477 self.ui.warn(_("warning: ignoring unknown"
478 " working parent %s!\n") % short(node))
478 " working parent %s!\n") % short(node))
479 return nullid
479 return nullid
480
480
481 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
481 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
482
482
483 def __getitem__(self, changeid):
483 def __getitem__(self, changeid):
484 if changeid is None:
484 if changeid is None:
485 return context.workingctx(self)
485 return context.workingctx(self)
486 if isinstance(changeid, slice):
486 if isinstance(changeid, slice):
487 return [context.changectx(self, i)
487 return [context.changectx(self, i)
488 for i in xrange(*changeid.indices(len(self)))
488 for i in xrange(*changeid.indices(len(self)))
489 if i not in self.changelog.filteredrevs]
489 if i not in self.changelog.filteredrevs]
490 return context.changectx(self, changeid)
490 return context.changectx(self, changeid)
491
491
492 def __contains__(self, changeid):
492 def __contains__(self, changeid):
493 try:
493 try:
494 self[changeid]
494 self[changeid]
495 return True
495 return True
496 except error.RepoLookupError:
496 except error.RepoLookupError:
497 return False
497 return False
498
498
499 def __nonzero__(self):
499 def __nonzero__(self):
500 return True
500 return True
501
501
502 def __len__(self):
502 def __len__(self):
503 return len(self.changelog)
503 return len(self.changelog)
504
504
505 def __iter__(self):
505 def __iter__(self):
506 return iter(self.changelog)
506 return iter(self.changelog)
507
507
508 def revs(self, expr, *args):
508 def revs(self, expr, *args):
509 '''Return a list of revisions matching the given revset'''
509 '''Return a list of revisions matching the given revset'''
510 expr = revset.formatspec(expr, *args)
510 expr = revset.formatspec(expr, *args)
511 m = revset.match(None, expr)
511 m = revset.match(None, expr)
512 return m(self)
512 return m(self)
513
513
514 def set(self, expr, *args):
514 def set(self, expr, *args):
515 '''
515 '''
516 Yield a context for each matching revision, after doing arg
516 Yield a context for each matching revision, after doing arg
517 replacement via revset.formatspec
517 replacement via revset.formatspec
518 '''
518 '''
519 for r in self.revs(expr, *args):
519 for r in self.revs(expr, *args):
520 yield self[r]
520 yield self[r]
521
521
522 def url(self):
522 def url(self):
523 return 'file:' + self.root
523 return 'file:' + self.root
524
524
525 def hook(self, name, throw=False, **args):
525 def hook(self, name, throw=False, **args):
526 """Call a hook, passing this repo instance.
526 """Call a hook, passing this repo instance.
527
527
528 This a convenience method to aid invoking hooks. Extensions likely
528 This a convenience method to aid invoking hooks. Extensions likely
529 won't call this unless they have registered a custom hook or are
529 won't call this unless they have registered a custom hook or are
530 replacing code that is expected to call a hook.
530 replacing code that is expected to call a hook.
531 """
531 """
532 return hook.hook(self.ui, self, name, throw, **args)
532 return hook.hook(self.ui, self, name, throw, **args)
533
533
534 @unfilteredmethod
534 @unfilteredmethod
535 def _tag(self, names, node, message, local, user, date, extra={},
535 def _tag(self, names, node, message, local, user, date, extra={},
536 editor=False):
536 editor=False):
537 if isinstance(names, str):
537 if isinstance(names, str):
538 names = (names,)
538 names = (names,)
539
539
540 branches = self.branchmap()
540 branches = self.branchmap()
541 for name in names:
541 for name in names:
542 self.hook('pretag', throw=True, node=hex(node), tag=name,
542 self.hook('pretag', throw=True, node=hex(node), tag=name,
543 local=local)
543 local=local)
544 if name in branches:
544 if name in branches:
545 self.ui.warn(_("warning: tag %s conflicts with existing"
545 self.ui.warn(_("warning: tag %s conflicts with existing"
546 " branch name\n") % name)
546 " branch name\n") % name)
547
547
548 def writetags(fp, names, munge, prevtags):
548 def writetags(fp, names, munge, prevtags):
549 fp.seek(0, 2)
549 fp.seek(0, 2)
550 if prevtags and prevtags[-1] != '\n':
550 if prevtags and prevtags[-1] != '\n':
551 fp.write('\n')
551 fp.write('\n')
552 for name in names:
552 for name in names:
553 if munge:
553 if munge:
554 m = munge(name)
554 m = munge(name)
555 else:
555 else:
556 m = name
556 m = name
557
557
558 if (self._tagscache.tagtypes and
558 if (self._tagscache.tagtypes and
559 name in self._tagscache.tagtypes):
559 name in self._tagscache.tagtypes):
560 old = self.tags().get(name, nullid)
560 old = self.tags().get(name, nullid)
561 fp.write('%s %s\n' % (hex(old), m))
561 fp.write('%s %s\n' % (hex(old), m))
562 fp.write('%s %s\n' % (hex(node), m))
562 fp.write('%s %s\n' % (hex(node), m))
563 fp.close()
563 fp.close()
564
564
565 prevtags = ''
565 prevtags = ''
566 if local:
566 if local:
567 try:
567 try:
568 fp = self.vfs('localtags', 'r+')
568 fp = self.vfs('localtags', 'r+')
569 except IOError:
569 except IOError:
570 fp = self.vfs('localtags', 'a')
570 fp = self.vfs('localtags', 'a')
571 else:
571 else:
572 prevtags = fp.read()
572 prevtags = fp.read()
573
573
574 # local tags are stored in the current charset
574 # local tags are stored in the current charset
575 writetags(fp, names, None, prevtags)
575 writetags(fp, names, None, prevtags)
576 for name in names:
576 for name in names:
577 self.hook('tag', node=hex(node), tag=name, local=local)
577 self.hook('tag', node=hex(node), tag=name, local=local)
578 return
578 return
579
579
580 try:
580 try:
581 fp = self.wfile('.hgtags', 'rb+')
581 fp = self.wfile('.hgtags', 'rb+')
582 except IOError, e:
582 except IOError, e:
583 if e.errno != errno.ENOENT:
583 if e.errno != errno.ENOENT:
584 raise
584 raise
585 fp = self.wfile('.hgtags', 'ab')
585 fp = self.wfile('.hgtags', 'ab')
586 else:
586 else:
587 prevtags = fp.read()
587 prevtags = fp.read()
588
588
589 # committed tags are stored in UTF-8
589 # committed tags are stored in UTF-8
590 writetags(fp, names, encoding.fromlocal, prevtags)
590 writetags(fp, names, encoding.fromlocal, prevtags)
591
591
592 fp.close()
592 fp.close()
593
593
594 self.invalidatecaches()
594 self.invalidatecaches()
595
595
596 if '.hgtags' not in self.dirstate:
596 if '.hgtags' not in self.dirstate:
597 self[None].add(['.hgtags'])
597 self[None].add(['.hgtags'])
598
598
599 m = matchmod.exact(self.root, '', ['.hgtags'])
599 m = matchmod.exact(self.root, '', ['.hgtags'])
600 tagnode = self.commit(message, user, date, extra=extra, match=m,
600 tagnode = self.commit(message, user, date, extra=extra, match=m,
601 editor=editor)
601 editor=editor)
602
602
603 for name in names:
603 for name in names:
604 self.hook('tag', node=hex(node), tag=name, local=local)
604 self.hook('tag', node=hex(node), tag=name, local=local)
605
605
606 return tagnode
606 return tagnode
607
607
608 def tag(self, names, node, message, local, user, date, editor=False):
608 def tag(self, names, node, message, local, user, date, editor=False):
609 '''tag a revision with one or more symbolic names.
609 '''tag a revision with one or more symbolic names.
610
610
611 names is a list of strings or, when adding a single tag, names may be a
611 names is a list of strings or, when adding a single tag, names may be a
612 string.
612 string.
613
613
614 if local is True, the tags are stored in a per-repository file.
614 if local is True, the tags are stored in a per-repository file.
615 otherwise, they are stored in the .hgtags file, and a new
615 otherwise, they are stored in the .hgtags file, and a new
616 changeset is committed with the change.
616 changeset is committed with the change.
617
617
618 keyword arguments:
618 keyword arguments:
619
619
620 local: whether to store tags in non-version-controlled file
620 local: whether to store tags in non-version-controlled file
621 (default False)
621 (default False)
622
622
623 message: commit message to use if committing
623 message: commit message to use if committing
624
624
625 user: name of user to use if committing
625 user: name of user to use if committing
626
626
627 date: date tuple to use if committing'''
627 date: date tuple to use if committing'''
628
628
629 if not local:
629 if not local:
630 m = matchmod.exact(self.root, '', ['.hgtags'])
630 m = matchmod.exact(self.root, '', ['.hgtags'])
631 if util.any(self.status(match=m, unknown=True, ignored=True)):
631 if util.any(self.status(match=m, unknown=True, ignored=True)):
632 raise util.Abort(_('working copy of .hgtags is changed'),
632 raise util.Abort(_('working copy of .hgtags is changed'),
633 hint=_('please commit .hgtags manually'))
633 hint=_('please commit .hgtags manually'))
634
634
635 self.tags() # instantiate the cache
635 self.tags() # instantiate the cache
636 self._tag(names, node, message, local, user, date, editor=editor)
636 self._tag(names, node, message, local, user, date, editor=editor)
637
637
638 @filteredpropertycache
638 @filteredpropertycache
639 def _tagscache(self):
639 def _tagscache(self):
640 '''Returns a tagscache object that contains various tags related
640 '''Returns a tagscache object that contains various tags related
641 caches.'''
641 caches.'''
642
642
643 # This simplifies its cache management by having one decorated
643 # This simplifies its cache management by having one decorated
644 # function (this one) and the rest simply fetch things from it.
644 # function (this one) and the rest simply fetch things from it.
645 class tagscache(object):
645 class tagscache(object):
646 def __init__(self):
646 def __init__(self):
647 # These two define the set of tags for this repository. tags
647 # These two define the set of tags for this repository. tags
648 # maps tag name to node; tagtypes maps tag name to 'global' or
648 # maps tag name to node; tagtypes maps tag name to 'global' or
649 # 'local'. (Global tags are defined by .hgtags across all
649 # 'local'. (Global tags are defined by .hgtags across all
650 # heads, and local tags are defined in .hg/localtags.)
650 # heads, and local tags are defined in .hg/localtags.)
651 # They constitute the in-memory cache of tags.
651 # They constitute the in-memory cache of tags.
652 self.tags = self.tagtypes = None
652 self.tags = self.tagtypes = None
653
653
654 self.nodetagscache = self.tagslist = None
654 self.nodetagscache = self.tagslist = None
655
655
656 cache = tagscache()
656 cache = tagscache()
657 cache.tags, cache.tagtypes = self._findtags()
657 cache.tags, cache.tagtypes = self._findtags()
658
658
659 return cache
659 return cache
660
660
661 def tags(self):
661 def tags(self):
662 '''return a mapping of tag to node'''
662 '''return a mapping of tag to node'''
663 t = {}
663 t = {}
664 if self.changelog.filteredrevs:
664 if self.changelog.filteredrevs:
665 tags, tt = self._findtags()
665 tags, tt = self._findtags()
666 else:
666 else:
667 tags = self._tagscache.tags
667 tags = self._tagscache.tags
668 for k, v in tags.iteritems():
668 for k, v in tags.iteritems():
669 try:
669 try:
670 # ignore tags to unknown nodes
670 # ignore tags to unknown nodes
671 self.changelog.rev(v)
671 self.changelog.rev(v)
672 t[k] = v
672 t[k] = v
673 except (error.LookupError, ValueError):
673 except (error.LookupError, ValueError):
674 pass
674 pass
675 return t
675 return t
676
676
677 def _findtags(self):
677 def _findtags(self):
678 '''Do the hard work of finding tags. Return a pair of dicts
678 '''Do the hard work of finding tags. Return a pair of dicts
679 (tags, tagtypes) where tags maps tag name to node, and tagtypes
679 (tags, tagtypes) where tags maps tag name to node, and tagtypes
680 maps tag name to a string like \'global\' or \'local\'.
680 maps tag name to a string like \'global\' or \'local\'.
681 Subclasses or extensions are free to add their own tags, but
681 Subclasses or extensions are free to add their own tags, but
682 should be aware that the returned dicts will be retained for the
682 should be aware that the returned dicts will be retained for the
683 duration of the localrepo object.'''
683 duration of the localrepo object.'''
684
684
685 # XXX what tagtype should subclasses/extensions use? Currently
685 # XXX what tagtype should subclasses/extensions use? Currently
686 # mq and bookmarks add tags, but do not set the tagtype at all.
686 # mq and bookmarks add tags, but do not set the tagtype at all.
687 # Should each extension invent its own tag type? Should there
687 # Should each extension invent its own tag type? Should there
688 # be one tagtype for all such "virtual" tags? Or is the status
688 # be one tagtype for all such "virtual" tags? Or is the status
689 # quo fine?
689 # quo fine?
690
690
691 alltags = {} # map tag name to (node, hist)
691 alltags = {} # map tag name to (node, hist)
692 tagtypes = {}
692 tagtypes = {}
693
693
694 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
694 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
695 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
695 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
696
696
697 # Build the return dicts. Have to re-encode tag names because
697 # Build the return dicts. Have to re-encode tag names because
698 # the tags module always uses UTF-8 (in order not to lose info
698 # the tags module always uses UTF-8 (in order not to lose info
699 # writing to the cache), but the rest of Mercurial wants them in
699 # writing to the cache), but the rest of Mercurial wants them in
700 # local encoding.
700 # local encoding.
701 tags = {}
701 tags = {}
702 for (name, (node, hist)) in alltags.iteritems():
702 for (name, (node, hist)) in alltags.iteritems():
703 if node != nullid:
703 if node != nullid:
704 tags[encoding.tolocal(name)] = node
704 tags[encoding.tolocal(name)] = node
705 tags['tip'] = self.changelog.tip()
705 tags['tip'] = self.changelog.tip()
706 tagtypes = dict([(encoding.tolocal(name), value)
706 tagtypes = dict([(encoding.tolocal(name), value)
707 for (name, value) in tagtypes.iteritems()])
707 for (name, value) in tagtypes.iteritems()])
708 return (tags, tagtypes)
708 return (tags, tagtypes)
709
709
710 def tagtype(self, tagname):
710 def tagtype(self, tagname):
711 '''
711 '''
712 return the type of the given tag. result can be:
712 return the type of the given tag. result can be:
713
713
714 'local' : a local tag
714 'local' : a local tag
715 'global' : a global tag
715 'global' : a global tag
716 None : tag does not exist
716 None : tag does not exist
717 '''
717 '''
718
718
719 return self._tagscache.tagtypes.get(tagname)
719 return self._tagscache.tagtypes.get(tagname)
720
720
721 def tagslist(self):
721 def tagslist(self):
722 '''return a list of tags ordered by revision'''
722 '''return a list of tags ordered by revision'''
723 if not self._tagscache.tagslist:
723 if not self._tagscache.tagslist:
724 l = []
724 l = []
725 for t, n in self.tags().iteritems():
725 for t, n in self.tags().iteritems():
726 l.append((self.changelog.rev(n), t, n))
726 l.append((self.changelog.rev(n), t, n))
727 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
727 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
728
728
729 return self._tagscache.tagslist
729 return self._tagscache.tagslist
730
730
731 def nodetags(self, node):
731 def nodetags(self, node):
732 '''return the tags associated with a node'''
732 '''return the tags associated with a node'''
733 if not self._tagscache.nodetagscache:
733 if not self._tagscache.nodetagscache:
734 nodetagscache = {}
734 nodetagscache = {}
735 for t, n in self._tagscache.tags.iteritems():
735 for t, n in self._tagscache.tags.iteritems():
736 nodetagscache.setdefault(n, []).append(t)
736 nodetagscache.setdefault(n, []).append(t)
737 for tags in nodetagscache.itervalues():
737 for tags in nodetagscache.itervalues():
738 tags.sort()
738 tags.sort()
739 self._tagscache.nodetagscache = nodetagscache
739 self._tagscache.nodetagscache = nodetagscache
740 return self._tagscache.nodetagscache.get(node, [])
740 return self._tagscache.nodetagscache.get(node, [])
741
741
742 def nodebookmarks(self, node):
742 def nodebookmarks(self, node):
743 marks = []
743 marks = []
744 for bookmark, n in self._bookmarks.iteritems():
744 for bookmark, n in self._bookmarks.iteritems():
745 if n == node:
745 if n == node:
746 marks.append(bookmark)
746 marks.append(bookmark)
747 return sorted(marks)
747 return sorted(marks)
748
748
749 def branchmap(self):
749 def branchmap(self):
750 '''returns a dictionary {branch: [branchheads]} with branchheads
750 '''returns a dictionary {branch: [branchheads]} with branchheads
751 ordered by increasing revision number'''
751 ordered by increasing revision number'''
752 branchmap.updatecache(self)
752 branchmap.updatecache(self)
753 return self._branchcaches[self.filtername]
753 return self._branchcaches[self.filtername]
754
754
755 @unfilteredmethod
755 @unfilteredmethod
756 def revbranchcache(self):
756 def revbranchcache(self):
757 if not self._revbranchcache:
757 if not self._revbranchcache:
758 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
758 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
759 return self._revbranchcache
759 return self._revbranchcache
760
760
761 def branchtip(self, branch, ignoremissing=False):
761 def branchtip(self, branch, ignoremissing=False):
762 '''return the tip node for a given branch
762 '''return the tip node for a given branch
763
763
764 If ignoremissing is True, then this method will not raise an error.
764 If ignoremissing is True, then this method will not raise an error.
765 This is helpful for callers that only expect None for a missing branch
765 This is helpful for callers that only expect None for a missing branch
766 (e.g. namespace).
766 (e.g. namespace).
767
767
768 '''
768 '''
769 try:
769 try:
770 return self.branchmap().branchtip(branch)
770 return self.branchmap().branchtip(branch)
771 except KeyError:
771 except KeyError:
772 if not ignoremissing:
772 if not ignoremissing:
773 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
773 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
774 else:
774 else:
775 pass
775 pass
776
776
777 def lookup(self, key):
777 def lookup(self, key):
778 return self[key].node()
778 return self[key].node()
779
779
780 def lookupbranch(self, key, remote=None):
780 def lookupbranch(self, key, remote=None):
781 repo = remote or self
781 repo = remote or self
782 if key in repo.branchmap():
782 if key in repo.branchmap():
783 return key
783 return key
784
784
785 repo = (remote and remote.local()) and remote or self
785 repo = (remote and remote.local()) and remote or self
786 return repo[key].branch()
786 return repo[key].branch()
787
787
788 def known(self, nodes):
788 def known(self, nodes):
789 nm = self.changelog.nodemap
789 nm = self.changelog.nodemap
790 pc = self._phasecache
790 pc = self._phasecache
791 result = []
791 result = []
792 for n in nodes:
792 for n in nodes:
793 r = nm.get(n)
793 r = nm.get(n)
794 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 resp = not (r is None or pc.phase(self, r) >= phases.secret)
795 result.append(resp)
795 result.append(resp)
796 return result
796 return result
797
797
798 def local(self):
798 def local(self):
799 return self
799 return self
800
800
801 def cancopy(self):
801 def cancopy(self):
802 # so statichttprepo's override of local() works
802 # so statichttprepo's override of local() works
803 if not self.local():
803 if not self.local():
804 return False
804 return False
805 if not self.ui.configbool('phases', 'publish', True):
805 if not self.ui.configbool('phases', 'publish', True):
806 return True
806 return True
807 # if publishing we can't copy if there is filtered content
807 # if publishing we can't copy if there is filtered content
808 return not self.filtered('visible').changelog.filteredrevs
808 return not self.filtered('visible').changelog.filteredrevs
809
809
810 def shared(self):
810 def shared(self):
811 '''the type of shared repository (None if not shared)'''
811 '''the type of shared repository (None if not shared)'''
812 if self.sharedpath != self.path:
812 if self.sharedpath != self.path:
813 return 'store'
813 return 'store'
814 return None
814 return None
815
815
816 def join(self, f, *insidef):
816 def join(self, f, *insidef):
817 return self.vfs.join(os.path.join(f, *insidef))
817 return self.vfs.join(os.path.join(f, *insidef))
818
818
819 def wjoin(self, f, *insidef):
819 def wjoin(self, f, *insidef):
820 return self.vfs.reljoin(self.root, f, *insidef)
820 return self.vfs.reljoin(self.root, f, *insidef)
821
821
822 def file(self, f):
822 def file(self, f):
823 if f[0] == '/':
823 if f[0] == '/':
824 f = f[1:]
824 f = f[1:]
825 return filelog.filelog(self.svfs, f)
825 return filelog.filelog(self.svfs, f)
826
826
827 def changectx(self, changeid):
827 def changectx(self, changeid):
828 return self[changeid]
828 return self[changeid]
829
829
830 def parents(self, changeid=None):
830 def parents(self, changeid=None):
831 '''get list of changectxs for parents of changeid'''
831 '''get list of changectxs for parents of changeid'''
832 return self[changeid].parents()
832 return self[changeid].parents()
833
833
834 def setparents(self, p1, p2=nullid):
834 def setparents(self, p1, p2=nullid):
835 self.dirstate.beginparentchange()
835 self.dirstate.beginparentchange()
836 copies = self.dirstate.setparents(p1, p2)
836 copies = self.dirstate.setparents(p1, p2)
837 pctx = self[p1]
837 pctx = self[p1]
838 if copies:
838 if copies:
839 # Adjust copy records, the dirstate cannot do it, it
839 # Adjust copy records, the dirstate cannot do it, it
840 # requires access to parents manifests. Preserve them
840 # requires access to parents manifests. Preserve them
841 # only for entries added to first parent.
841 # only for entries added to first parent.
842 for f in copies:
842 for f in copies:
843 if f not in pctx and copies[f] in pctx:
843 if f not in pctx and copies[f] in pctx:
844 self.dirstate.copy(copies[f], f)
844 self.dirstate.copy(copies[f], f)
845 if p2 == nullid:
845 if p2 == nullid:
846 for f, s in sorted(self.dirstate.copies().items()):
846 for f, s in sorted(self.dirstate.copies().items()):
847 if f not in pctx and s not in pctx:
847 if f not in pctx and s not in pctx:
848 self.dirstate.copy(None, f)
848 self.dirstate.copy(None, f)
849 self.dirstate.endparentchange()
849 self.dirstate.endparentchange()
850
850
851 def filectx(self, path, changeid=None, fileid=None):
851 def filectx(self, path, changeid=None, fileid=None):
852 """changeid can be a changeset revision, node, or tag.
852 """changeid can be a changeset revision, node, or tag.
853 fileid can be a file revision or node."""
853 fileid can be a file revision or node."""
854 return context.filectx(self, path, changeid, fileid)
854 return context.filectx(self, path, changeid, fileid)
855
855
856 def getcwd(self):
856 def getcwd(self):
857 return self.dirstate.getcwd()
857 return self.dirstate.getcwd()
858
858
859 def pathto(self, f, cwd=None):
859 def pathto(self, f, cwd=None):
860 return self.dirstate.pathto(f, cwd)
860 return self.dirstate.pathto(f, cwd)
861
861
862 def wfile(self, f, mode='r'):
862 def wfile(self, f, mode='r'):
863 return self.wvfs(f, mode)
863 return self.wvfs(f, mode)
864
864
865 def _link(self, f):
865 def _link(self, f):
866 return self.wvfs.islink(f)
866 return self.wvfs.islink(f)
867
867
868 def _loadfilter(self, filter):
868 def _loadfilter(self, filter):
869 if filter not in self.filterpats:
869 if filter not in self.filterpats:
870 l = []
870 l = []
871 for pat, cmd in self.ui.configitems(filter):
871 for pat, cmd in self.ui.configitems(filter):
872 if cmd == '!':
872 if cmd == '!':
873 continue
873 continue
874 mf = matchmod.match(self.root, '', [pat])
874 mf = matchmod.match(self.root, '', [pat])
875 fn = None
875 fn = None
876 params = cmd
876 params = cmd
877 for name, filterfn in self._datafilters.iteritems():
877 for name, filterfn in self._datafilters.iteritems():
878 if cmd.startswith(name):
878 if cmd.startswith(name):
879 fn = filterfn
879 fn = filterfn
880 params = cmd[len(name):].lstrip()
880 params = cmd[len(name):].lstrip()
881 break
881 break
882 if not fn:
882 if not fn:
883 fn = lambda s, c, **kwargs: util.filter(s, c)
883 fn = lambda s, c, **kwargs: util.filter(s, c)
884 # Wrap old filters not supporting keyword arguments
884 # Wrap old filters not supporting keyword arguments
885 if not inspect.getargspec(fn)[2]:
885 if not inspect.getargspec(fn)[2]:
886 oldfn = fn
886 oldfn = fn
887 fn = lambda s, c, **kwargs: oldfn(s, c)
887 fn = lambda s, c, **kwargs: oldfn(s, c)
888 l.append((mf, fn, params))
888 l.append((mf, fn, params))
889 self.filterpats[filter] = l
889 self.filterpats[filter] = l
890 return self.filterpats[filter]
890 return self.filterpats[filter]
891
891
892 def _filter(self, filterpats, filename, data):
892 def _filter(self, filterpats, filename, data):
893 for mf, fn, cmd in filterpats:
893 for mf, fn, cmd in filterpats:
894 if mf(filename):
894 if mf(filename):
895 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
895 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
896 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
896 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
897 break
897 break
898
898
899 return data
899 return data
900
900
901 @unfilteredpropertycache
901 @unfilteredpropertycache
902 def _encodefilterpats(self):
902 def _encodefilterpats(self):
903 return self._loadfilter('encode')
903 return self._loadfilter('encode')
904
904
905 @unfilteredpropertycache
905 @unfilteredpropertycache
906 def _decodefilterpats(self):
906 def _decodefilterpats(self):
907 return self._loadfilter('decode')
907 return self._loadfilter('decode')
908
908
909 def adddatafilter(self, name, filter):
909 def adddatafilter(self, name, filter):
910 self._datafilters[name] = filter
910 self._datafilters[name] = filter
911
911
912 def wread(self, filename):
912 def wread(self, filename):
913 if self._link(filename):
913 if self._link(filename):
914 data = self.wvfs.readlink(filename)
914 data = self.wvfs.readlink(filename)
915 else:
915 else:
916 data = self.wvfs.read(filename)
916 data = self.wvfs.read(filename)
917 return self._filter(self._encodefilterpats, filename, data)
917 return self._filter(self._encodefilterpats, filename, data)
918
918
919 def wwrite(self, filename, data, flags):
919 def wwrite(self, filename, data, flags):
920 data = self._filter(self._decodefilterpats, filename, data)
920 data = self._filter(self._decodefilterpats, filename, data)
921 if 'l' in flags:
921 if 'l' in flags:
922 self.wvfs.symlink(data, filename)
922 self.wvfs.symlink(data, filename)
923 else:
923 else:
924 self.wvfs.write(filename, data)
924 self.wvfs.write(filename, data)
925 if 'x' in flags:
925 if 'x' in flags:
926 self.wvfs.setflags(filename, False, True)
926 self.wvfs.setflags(filename, False, True)
927
927
928 def wwritedata(self, filename, data):
928 def wwritedata(self, filename, data):
929 return self._filter(self._decodefilterpats, filename, data)
929 return self._filter(self._decodefilterpats, filename, data)
930
930
931 def currenttransaction(self):
931 def currenttransaction(self):
932 """return the current transaction or None if non exists"""
932 """return the current transaction or None if non exists"""
933 if self._transref:
933 if self._transref:
934 tr = self._transref()
934 tr = self._transref()
935 else:
935 else:
936 tr = None
936 tr = None
937
937
938 if tr and tr.running():
938 if tr and tr.running():
939 return tr
939 return tr
940 return None
940 return None
941
941
942 def transaction(self, desc, report=None):
942 def transaction(self, desc, report=None):
943 if (self.ui.configbool('devel', 'all')
943 if (self.ui.configbool('devel', 'all')
944 or self.ui.configbool('devel', 'check-locks')):
944 or self.ui.configbool('devel', 'check-locks')):
945 l = self._lockref and self._lockref()
945 l = self._lockref and self._lockref()
946 if l is None or not l.held:
946 if l is None or not l.held:
947 scmutil.develwarn(self.ui, 'transaction with no lock')
947 scmutil.develwarn(self.ui, 'transaction with no lock')
948 tr = self.currenttransaction()
948 tr = self.currenttransaction()
949 if tr is not None:
949 if tr is not None:
950 return tr.nest()
950 return tr.nest()
951
951
952 # abort here if the journal already exists
952 # abort here if the journal already exists
953 if self.svfs.exists("journal"):
953 if self.svfs.exists("journal"):
954 raise error.RepoError(
954 raise error.RepoError(
955 _("abandoned transaction found"),
955 _("abandoned transaction found"),
956 hint=_("run 'hg recover' to clean up transaction"))
956 hint=_("run 'hg recover' to clean up transaction"))
957
957
958 self.hook('pretxnopen', throw=True, txnname=desc)
958 self.hook('pretxnopen', throw=True, txnname=desc)
959
959
960 self._writejournal(desc)
960 self._writejournal(desc)
961 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
961 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
962 if report:
962 if report:
963 rp = report
963 rp = report
964 else:
964 else:
965 rp = self.ui.warn
965 rp = self.ui.warn
966 vfsmap = {'plain': self.vfs} # root of .hg/
966 vfsmap = {'plain': self.vfs} # root of .hg/
967 # we must avoid cyclic reference between repo and transaction.
967 # we must avoid cyclic reference between repo and transaction.
968 reporef = weakref.ref(self)
968 reporef = weakref.ref(self)
969 def validate(tr):
969 def validate(tr):
970 """will run pre-closing hooks"""
970 """will run pre-closing hooks"""
971 pending = lambda: tr.writepending() and self.root or ""
971 pending = lambda: tr.writepending() and self.root or ""
972 reporef().hook('pretxnclose', throw=True, pending=pending,
972 reporef().hook('pretxnclose', throw=True, pending=pending,
973 xnname=desc, **tr.hookargs)
973 xnname=desc, **tr.hookargs)
974
974
975 tr = transaction.transaction(rp, self.sopener, vfsmap,
975 tr = transaction.transaction(rp, self.sopener, vfsmap,
976 "journal",
976 "journal",
977 "undo",
977 "undo",
978 aftertrans(renames),
978 aftertrans(renames),
979 self.store.createmode,
979 self.store.createmode,
980 validator=validate)
980 validator=validate)
981
981
982 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
982 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
983 tr.hookargs['TXNID'] = trid
983 tr.hookargs['TXNID'] = trid
984 # note: writing the fncache only during finalize mean that the file is
984 # note: writing the fncache only during finalize mean that the file is
985 # outdated when running hooks. As fncache is used for streaming clone,
985 # outdated when running hooks. As fncache is used for streaming clone,
986 # this is not expected to break anything that happen during the hooks.
986 # this is not expected to break anything that happen during the hooks.
987 tr.addfinalize('flush-fncache', self.store.write)
987 tr.addfinalize('flush-fncache', self.store.write)
988 def txnclosehook(tr2):
988 def txnclosehook(tr2):
989 """To be run if transaction is successful, will schedule a hook run
989 """To be run if transaction is successful, will schedule a hook run
990 """
990 """
991 def hook():
991 def hook():
992 reporef().hook('txnclose', throw=False, txnname=desc,
992 reporef().hook('txnclose', throw=False, txnname=desc,
993 **tr2.hookargs)
993 **tr2.hookargs)
994 reporef()._afterlock(hook)
994 reporef()._afterlock(hook)
995 tr.addfinalize('txnclose-hook', txnclosehook)
995 tr.addfinalize('txnclose-hook', txnclosehook)
996 def txnaborthook(tr2):
996 def txnaborthook(tr2):
997 """To be run if transaction is aborted
997 """To be run if transaction is aborted
998 """
998 """
999 reporef().hook('txnabort', throw=False, txnname=desc,
999 reporef().hook('txnabort', throw=False, txnname=desc,
1000 **tr2.hookargs)
1000 **tr2.hookargs)
1001 tr.addabort('txnabort-hook', txnaborthook)
1001 tr.addabort('txnabort-hook', txnaborthook)
1002 self._transref = weakref.ref(tr)
1002 self._transref = weakref.ref(tr)
1003 return tr
1003 return tr
1004
1004
1005 def _journalfiles(self):
1005 def _journalfiles(self):
1006 return ((self.svfs, 'journal'),
1006 return ((self.svfs, 'journal'),
1007 (self.vfs, 'journal.dirstate'),
1007 (self.vfs, 'journal.dirstate'),
1008 (self.vfs, 'journal.branch'),
1008 (self.vfs, 'journal.branch'),
1009 (self.vfs, 'journal.desc'),
1009 (self.vfs, 'journal.desc'),
1010 (self.vfs, 'journal.bookmarks'),
1010 (self.vfs, 'journal.bookmarks'),
1011 (self.svfs, 'journal.phaseroots'))
1011 (self.svfs, 'journal.phaseroots'))
1012
1012
1013 def undofiles(self):
1013 def undofiles(self):
1014 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1014 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1015
1015
1016 def _writejournal(self, desc):
1016 def _writejournal(self, desc):
1017 self.vfs.write("journal.dirstate",
1017 self.vfs.write("journal.dirstate",
1018 self.vfs.tryread("dirstate"))
1018 self.vfs.tryread("dirstate"))
1019 self.vfs.write("journal.branch",
1019 self.vfs.write("journal.branch",
1020 encoding.fromlocal(self.dirstate.branch()))
1020 encoding.fromlocal(self.dirstate.branch()))
1021 self.vfs.write("journal.desc",
1021 self.vfs.write("journal.desc",
1022 "%d\n%s\n" % (len(self), desc))
1022 "%d\n%s\n" % (len(self), desc))
1023 self.vfs.write("journal.bookmarks",
1023 self.vfs.write("journal.bookmarks",
1024 self.vfs.tryread("bookmarks"))
1024 self.vfs.tryread("bookmarks"))
1025 self.svfs.write("journal.phaseroots",
1025 self.svfs.write("journal.phaseroots",
1026 self.svfs.tryread("phaseroots"))
1026 self.svfs.tryread("phaseroots"))
1027
1027
1028 def recover(self):
1028 def recover(self):
1029 lock = self.lock()
1029 lock = self.lock()
1030 try:
1030 try:
1031 if self.svfs.exists("journal"):
1031 if self.svfs.exists("journal"):
1032 self.ui.status(_("rolling back interrupted transaction\n"))
1032 self.ui.status(_("rolling back interrupted transaction\n"))
1033 vfsmap = {'': self.svfs,
1033 vfsmap = {'': self.svfs,
1034 'plain': self.vfs,}
1034 'plain': self.vfs,}
1035 transaction.rollback(self.svfs, vfsmap, "journal",
1035 transaction.rollback(self.svfs, vfsmap, "journal",
1036 self.ui.warn)
1036 self.ui.warn)
1037 self.invalidate()
1037 self.invalidate()
1038 return True
1038 return True
1039 else:
1039 else:
1040 self.ui.warn(_("no interrupted transaction available\n"))
1040 self.ui.warn(_("no interrupted transaction available\n"))
1041 return False
1041 return False
1042 finally:
1042 finally:
1043 lock.release()
1043 lock.release()
1044
1044
1045 def rollback(self, dryrun=False, force=False):
1045 def rollback(self, dryrun=False, force=False):
1046 wlock = lock = None
1046 wlock = lock = None
1047 try:
1047 try:
1048 wlock = self.wlock()
1048 wlock = self.wlock()
1049 lock = self.lock()
1049 lock = self.lock()
1050 if self.svfs.exists("undo"):
1050 if self.svfs.exists("undo"):
1051 return self._rollback(dryrun, force)
1051 return self._rollback(dryrun, force)
1052 else:
1052 else:
1053 self.ui.warn(_("no rollback information available\n"))
1053 self.ui.warn(_("no rollback information available\n"))
1054 return 1
1054 return 1
1055 finally:
1055 finally:
1056 release(lock, wlock)
1056 release(lock, wlock)
1057
1057
1058 @unfilteredmethod # Until we get smarter cache management
1058 @unfilteredmethod # Until we get smarter cache management
1059 def _rollback(self, dryrun, force):
1059 def _rollback(self, dryrun, force):
1060 ui = self.ui
1060 ui = self.ui
1061 try:
1061 try:
1062 args = self.vfs.read('undo.desc').splitlines()
1062 args = self.vfs.read('undo.desc').splitlines()
1063 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1063 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1064 if len(args) >= 3:
1064 if len(args) >= 3:
1065 detail = args[2]
1065 detail = args[2]
1066 oldtip = oldlen - 1
1066 oldtip = oldlen - 1
1067
1067
1068 if detail and ui.verbose:
1068 if detail and ui.verbose:
1069 msg = (_('repository tip rolled back to revision %s'
1069 msg = (_('repository tip rolled back to revision %s'
1070 ' (undo %s: %s)\n')
1070 ' (undo %s: %s)\n')
1071 % (oldtip, desc, detail))
1071 % (oldtip, desc, detail))
1072 else:
1072 else:
1073 msg = (_('repository tip rolled back to revision %s'
1073 msg = (_('repository tip rolled back to revision %s'
1074 ' (undo %s)\n')
1074 ' (undo %s)\n')
1075 % (oldtip, desc))
1075 % (oldtip, desc))
1076 except IOError:
1076 except IOError:
1077 msg = _('rolling back unknown transaction\n')
1077 msg = _('rolling back unknown transaction\n')
1078 desc = None
1078 desc = None
1079
1079
1080 if not force and self['.'] != self['tip'] and desc == 'commit':
1080 if not force and self['.'] != self['tip'] and desc == 'commit':
1081 raise util.Abort(
1081 raise util.Abort(
1082 _('rollback of last commit while not checked out '
1082 _('rollback of last commit while not checked out '
1083 'may lose data'), hint=_('use -f to force'))
1083 'may lose data'), hint=_('use -f to force'))
1084
1084
1085 ui.status(msg)
1085 ui.status(msg)
1086 if dryrun:
1086 if dryrun:
1087 return 0
1087 return 0
1088
1088
1089 parents = self.dirstate.parents()
1089 parents = self.dirstate.parents()
1090 self.destroying()
1090 self.destroying()
1091 vfsmap = {'plain': self.vfs, '': self.svfs}
1091 vfsmap = {'plain': self.vfs, '': self.svfs}
1092 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1092 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1093 if self.vfs.exists('undo.bookmarks'):
1093 if self.vfs.exists('undo.bookmarks'):
1094 self.vfs.rename('undo.bookmarks', 'bookmarks')
1094 self.vfs.rename('undo.bookmarks', 'bookmarks')
1095 if self.svfs.exists('undo.phaseroots'):
1095 if self.svfs.exists('undo.phaseroots'):
1096 self.svfs.rename('undo.phaseroots', 'phaseroots')
1096 self.svfs.rename('undo.phaseroots', 'phaseroots')
1097 self.invalidate()
1097 self.invalidate()
1098
1098
1099 parentgone = (parents[0] not in self.changelog.nodemap or
1099 parentgone = (parents[0] not in self.changelog.nodemap or
1100 parents[1] not in self.changelog.nodemap)
1100 parents[1] not in self.changelog.nodemap)
1101 if parentgone:
1101 if parentgone:
1102 self.vfs.rename('undo.dirstate', 'dirstate')
1102 self.vfs.rename('undo.dirstate', 'dirstate')
1103 try:
1103 try:
1104 branch = self.vfs.read('undo.branch')
1104 branch = self.vfs.read('undo.branch')
1105 self.dirstate.setbranch(encoding.tolocal(branch))
1105 self.dirstate.setbranch(encoding.tolocal(branch))
1106 except IOError:
1106 except IOError:
1107 ui.warn(_('named branch could not be reset: '
1107 ui.warn(_('named branch could not be reset: '
1108 'current branch is still \'%s\'\n')
1108 'current branch is still \'%s\'\n')
1109 % self.dirstate.branch())
1109 % self.dirstate.branch())
1110
1110
1111 self.dirstate.invalidate()
1111 self.dirstate.invalidate()
1112 parents = tuple([p.rev() for p in self.parents()])
1112 parents = tuple([p.rev() for p in self.parents()])
1113 if len(parents) > 1:
1113 if len(parents) > 1:
1114 ui.status(_('working directory now based on '
1114 ui.status(_('working directory now based on '
1115 'revisions %d and %d\n') % parents)
1115 'revisions %d and %d\n') % parents)
1116 else:
1116 else:
1117 ui.status(_('working directory now based on '
1117 ui.status(_('working directory now based on '
1118 'revision %d\n') % parents)
1118 'revision %d\n') % parents)
1119 ms = mergemod.mergestate(self)
1119 ms = mergemod.mergestate(self)
1120 ms.reset(self['.'].node())
1120 ms.reset(self['.'].node())
1121
1121
1122 # TODO: if we know which new heads may result from this rollback, pass
1122 # TODO: if we know which new heads may result from this rollback, pass
1123 # them to destroy(), which will prevent the branchhead cache from being
1123 # them to destroy(), which will prevent the branchhead cache from being
1124 # invalidated.
1124 # invalidated.
1125 self.destroyed()
1125 self.destroyed()
1126 return 0
1126 return 0
1127
1127
1128 def invalidatecaches(self):
1128 def invalidatecaches(self):
1129
1129
1130 if '_tagscache' in vars(self):
1130 if '_tagscache' in vars(self):
1131 # can't use delattr on proxy
1131 # can't use delattr on proxy
1132 del self.__dict__['_tagscache']
1132 del self.__dict__['_tagscache']
1133
1133
1134 self.unfiltered()._branchcaches.clear()
1134 self.unfiltered()._branchcaches.clear()
1135 self.invalidatevolatilesets()
1135 self.invalidatevolatilesets()
1136
1136
1137 def invalidatevolatilesets(self):
1137 def invalidatevolatilesets(self):
1138 self.filteredrevcache.clear()
1138 self.filteredrevcache.clear()
1139 obsolete.clearobscaches(self)
1139 obsolete.clearobscaches(self)
1140
1140
1141 def invalidatedirstate(self):
1141 def invalidatedirstate(self):
1142 '''Invalidates the dirstate, causing the next call to dirstate
1142 '''Invalidates the dirstate, causing the next call to dirstate
1143 to check if it was modified since the last time it was read,
1143 to check if it was modified since the last time it was read,
1144 rereading it if it has.
1144 rereading it if it has.
1145
1145
1146 This is different to dirstate.invalidate() that it doesn't always
1146 This is different to dirstate.invalidate() that it doesn't always
1147 rereads the dirstate. Use dirstate.invalidate() if you want to
1147 rereads the dirstate. Use dirstate.invalidate() if you want to
1148 explicitly read the dirstate again (i.e. restoring it to a previous
1148 explicitly read the dirstate again (i.e. restoring it to a previous
1149 known good state).'''
1149 known good state).'''
1150 if hasunfilteredcache(self, 'dirstate'):
1150 if hasunfilteredcache(self, 'dirstate'):
1151 for k in self.dirstate._filecache:
1151 for k in self.dirstate._filecache:
1152 try:
1152 try:
1153 delattr(self.dirstate, k)
1153 delattr(self.dirstate, k)
1154 except AttributeError:
1154 except AttributeError:
1155 pass
1155 pass
1156 delattr(self.unfiltered(), 'dirstate')
1156 delattr(self.unfiltered(), 'dirstate')
1157
1157
1158 def invalidate(self):
1158 def invalidate(self):
1159 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1159 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1160 for k in self._filecache:
1160 for k in self._filecache:
1161 # dirstate is invalidated separately in invalidatedirstate()
1161 # dirstate is invalidated separately in invalidatedirstate()
1162 if k == 'dirstate':
1162 if k == 'dirstate':
1163 continue
1163 continue
1164
1164
1165 try:
1165 try:
1166 delattr(unfiltered, k)
1166 delattr(unfiltered, k)
1167 except AttributeError:
1167 except AttributeError:
1168 pass
1168 pass
1169 self.invalidatecaches()
1169 self.invalidatecaches()
1170 self.store.invalidatecaches()
1170 self.store.invalidatecaches()
1171
1171
1172 def invalidateall(self):
1172 def invalidateall(self):
1173 '''Fully invalidates both store and non-store parts, causing the
1173 '''Fully invalidates both store and non-store parts, causing the
1174 subsequent operation to reread any outside changes.'''
1174 subsequent operation to reread any outside changes.'''
1175 # extension should hook this to invalidate its caches
1175 # extension should hook this to invalidate its caches
1176 self.invalidate()
1176 self.invalidate()
1177 self.invalidatedirstate()
1177 self.invalidatedirstate()
1178
1178
1179 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1179 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1180 try:
1180 try:
1181 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1181 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1182 except error.LockHeld, inst:
1182 except error.LockHeld, inst:
1183 if not wait:
1183 if not wait:
1184 raise
1184 raise
1185 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1185 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1186 (desc, inst.locker))
1186 (desc, inst.locker))
1187 # default to 600 seconds timeout
1187 # default to 600 seconds timeout
1188 l = lockmod.lock(vfs, lockname,
1188 l = lockmod.lock(vfs, lockname,
1189 int(self.ui.config("ui", "timeout", "600")),
1189 int(self.ui.config("ui", "timeout", "600")),
1190 releasefn, desc=desc)
1190 releasefn, desc=desc)
1191 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1191 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1192 if acquirefn:
1192 if acquirefn:
1193 acquirefn()
1193 acquirefn()
1194 return l
1194 return l
1195
1195
1196 def _afterlock(self, callback):
1196 def _afterlock(self, callback):
1197 """add a callback to the current repository lock.
1197 """add a callback to be run when the repository is fully unlocked
1198
1198
1199 The callback will be executed on lock release."""
1199 The callback will be executed when the outermost lock is released
1200 l = self._lockref and self._lockref()
1200 (with wlock being higher level than 'lock')."""
1201 if l:
1201 for ref in (self._wlockref, self._lockref):
1202 l.postrelease.append(callback)
1202 l = ref and ref()
1203 else:
1203 if l and l.held:
1204 l.postrelease.append(callback)
1205 break
1206 else: # no lock have been found.
1204 callback()
1207 callback()
1205
1208
1206 def lock(self, wait=True):
1209 def lock(self, wait=True):
1207 '''Lock the repository store (.hg/store) and return a weak reference
1210 '''Lock the repository store (.hg/store) and return a weak reference
1208 to the lock. Use this before modifying the store (e.g. committing or
1211 to the lock. Use this before modifying the store (e.g. committing or
1209 stripping). If you are opening a transaction, get a lock as well.)
1212 stripping). If you are opening a transaction, get a lock as well.)
1210
1213
1211 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1214 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1212 'wlock' first to avoid a dead-lock hazard.'''
1215 'wlock' first to avoid a dead-lock hazard.'''
1213 l = self._lockref and self._lockref()
1216 l = self._lockref and self._lockref()
1214 if l is not None and l.held:
1217 if l is not None and l.held:
1215 l.lock()
1218 l.lock()
1216 return l
1219 return l
1217
1220
1218 def unlock():
1221 def unlock():
1219 for k, ce in self._filecache.items():
1222 for k, ce in self._filecache.items():
1220 if k == 'dirstate' or k not in self.__dict__:
1223 if k == 'dirstate' or k not in self.__dict__:
1221 continue
1224 continue
1222 ce.refresh()
1225 ce.refresh()
1223
1226
1224 l = self._lock(self.svfs, "lock", wait, unlock,
1227 l = self._lock(self.svfs, "lock", wait, unlock,
1225 self.invalidate, _('repository %s') % self.origroot)
1228 self.invalidate, _('repository %s') % self.origroot)
1226 self._lockref = weakref.ref(l)
1229 self._lockref = weakref.ref(l)
1227 return l
1230 return l
1228
1231
1229 def wlock(self, wait=True):
1232 def wlock(self, wait=True):
1230 '''Lock the non-store parts of the repository (everything under
1233 '''Lock the non-store parts of the repository (everything under
1231 .hg except .hg/store) and return a weak reference to the lock.
1234 .hg except .hg/store) and return a weak reference to the lock.
1232
1235
1233 Use this before modifying files in .hg.
1236 Use this before modifying files in .hg.
1234
1237
1235 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1238 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1236 'wlock' first to avoid a dead-lock hazard.'''
1239 'wlock' first to avoid a dead-lock hazard.'''
1237 l = self._wlockref and self._wlockref()
1240 l = self._wlockref and self._wlockref()
1238 if l is not None and l.held:
1241 if l is not None and l.held:
1239 l.lock()
1242 l.lock()
1240 return l
1243 return l
1241
1244
1242 # We do not need to check for non-waiting lock aquisition. Such
1245 # We do not need to check for non-waiting lock aquisition. Such
1243 # acquisition would not cause dead-lock as they would just fail.
1246 # acquisition would not cause dead-lock as they would just fail.
1244 if wait and (self.ui.configbool('devel', 'all')
1247 if wait and (self.ui.configbool('devel', 'all')
1245 or self.ui.configbool('devel', 'check-locks')):
1248 or self.ui.configbool('devel', 'check-locks')):
1246 l = self._lockref and self._lockref()
1249 l = self._lockref and self._lockref()
1247 if l is not None and l.held:
1250 if l is not None and l.held:
1248 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1251 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1249
1252
1250 def unlock():
1253 def unlock():
1251 if self.dirstate.pendingparentchange():
1254 if self.dirstate.pendingparentchange():
1252 self.dirstate.invalidate()
1255 self.dirstate.invalidate()
1253 else:
1256 else:
1254 self.dirstate.write()
1257 self.dirstate.write()
1255
1258
1256 self._filecache['dirstate'].refresh()
1259 self._filecache['dirstate'].refresh()
1257
1260
1258 l = self._lock(self.vfs, "wlock", wait, unlock,
1261 l = self._lock(self.vfs, "wlock", wait, unlock,
1259 self.invalidatedirstate, _('working directory of %s') %
1262 self.invalidatedirstate, _('working directory of %s') %
1260 self.origroot)
1263 self.origroot)
1261 self._wlockref = weakref.ref(l)
1264 self._wlockref = weakref.ref(l)
1262 return l
1265 return l
1263
1266
1264 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1267 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1265 """
1268 """
1266 commit an individual file as part of a larger transaction
1269 commit an individual file as part of a larger transaction
1267 """
1270 """
1268
1271
1269 fname = fctx.path()
1272 fname = fctx.path()
1270 fparent1 = manifest1.get(fname, nullid)
1273 fparent1 = manifest1.get(fname, nullid)
1271 fparent2 = manifest2.get(fname, nullid)
1274 fparent2 = manifest2.get(fname, nullid)
1272 if isinstance(fctx, context.filectx):
1275 if isinstance(fctx, context.filectx):
1273 node = fctx.filenode()
1276 node = fctx.filenode()
1274 if node in [fparent1, fparent2]:
1277 if node in [fparent1, fparent2]:
1275 self.ui.debug('reusing %s filelog entry\n' % fname)
1278 self.ui.debug('reusing %s filelog entry\n' % fname)
1276 return node
1279 return node
1277
1280
1278 flog = self.file(fname)
1281 flog = self.file(fname)
1279 meta = {}
1282 meta = {}
1280 copy = fctx.renamed()
1283 copy = fctx.renamed()
1281 if copy and copy[0] != fname:
1284 if copy and copy[0] != fname:
1282 # Mark the new revision of this file as a copy of another
1285 # Mark the new revision of this file as a copy of another
1283 # file. This copy data will effectively act as a parent
1286 # file. This copy data will effectively act as a parent
1284 # of this new revision. If this is a merge, the first
1287 # of this new revision. If this is a merge, the first
1285 # parent will be the nullid (meaning "look up the copy data")
1288 # parent will be the nullid (meaning "look up the copy data")
1286 # and the second one will be the other parent. For example:
1289 # and the second one will be the other parent. For example:
1287 #
1290 #
1288 # 0 --- 1 --- 3 rev1 changes file foo
1291 # 0 --- 1 --- 3 rev1 changes file foo
1289 # \ / rev2 renames foo to bar and changes it
1292 # \ / rev2 renames foo to bar and changes it
1290 # \- 2 -/ rev3 should have bar with all changes and
1293 # \- 2 -/ rev3 should have bar with all changes and
1291 # should record that bar descends from
1294 # should record that bar descends from
1292 # bar in rev2 and foo in rev1
1295 # bar in rev2 and foo in rev1
1293 #
1296 #
1294 # this allows this merge to succeed:
1297 # this allows this merge to succeed:
1295 #
1298 #
1296 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1299 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1297 # \ / merging rev3 and rev4 should use bar@rev2
1300 # \ / merging rev3 and rev4 should use bar@rev2
1298 # \- 2 --- 4 as the merge base
1301 # \- 2 --- 4 as the merge base
1299 #
1302 #
1300
1303
1301 cfname = copy[0]
1304 cfname = copy[0]
1302 crev = manifest1.get(cfname)
1305 crev = manifest1.get(cfname)
1303 newfparent = fparent2
1306 newfparent = fparent2
1304
1307
1305 if manifest2: # branch merge
1308 if manifest2: # branch merge
1306 if fparent2 == nullid or crev is None: # copied on remote side
1309 if fparent2 == nullid or crev is None: # copied on remote side
1307 if cfname in manifest2:
1310 if cfname in manifest2:
1308 crev = manifest2[cfname]
1311 crev = manifest2[cfname]
1309 newfparent = fparent1
1312 newfparent = fparent1
1310
1313
1311 # Here, we used to search backwards through history to try to find
1314 # Here, we used to search backwards through history to try to find
1312 # where the file copy came from if the source of a copy was not in
1315 # where the file copy came from if the source of a copy was not in
1313 # the parent directory. However, this doesn't actually make sense to
1316 # the parent directory. However, this doesn't actually make sense to
1314 # do (what does a copy from something not in your working copy even
1317 # do (what does a copy from something not in your working copy even
1315 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1318 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1316 # the user that copy information was dropped, so if they didn't
1319 # the user that copy information was dropped, so if they didn't
1317 # expect this outcome it can be fixed, but this is the correct
1320 # expect this outcome it can be fixed, but this is the correct
1318 # behavior in this circumstance.
1321 # behavior in this circumstance.
1319
1322
1320 if crev:
1323 if crev:
1321 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1324 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1322 meta["copy"] = cfname
1325 meta["copy"] = cfname
1323 meta["copyrev"] = hex(crev)
1326 meta["copyrev"] = hex(crev)
1324 fparent1, fparent2 = nullid, newfparent
1327 fparent1, fparent2 = nullid, newfparent
1325 else:
1328 else:
1326 self.ui.warn(_("warning: can't find ancestor for '%s' "
1329 self.ui.warn(_("warning: can't find ancestor for '%s' "
1327 "copied from '%s'!\n") % (fname, cfname))
1330 "copied from '%s'!\n") % (fname, cfname))
1328
1331
1329 elif fparent1 == nullid:
1332 elif fparent1 == nullid:
1330 fparent1, fparent2 = fparent2, nullid
1333 fparent1, fparent2 = fparent2, nullid
1331 elif fparent2 != nullid:
1334 elif fparent2 != nullid:
1332 # is one parent an ancestor of the other?
1335 # is one parent an ancestor of the other?
1333 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1336 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1334 if fparent1 in fparentancestors:
1337 if fparent1 in fparentancestors:
1335 fparent1, fparent2 = fparent2, nullid
1338 fparent1, fparent2 = fparent2, nullid
1336 elif fparent2 in fparentancestors:
1339 elif fparent2 in fparentancestors:
1337 fparent2 = nullid
1340 fparent2 = nullid
1338
1341
1339 # is the file changed?
1342 # is the file changed?
1340 text = fctx.data()
1343 text = fctx.data()
1341 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1344 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1342 changelist.append(fname)
1345 changelist.append(fname)
1343 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1346 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1344 # are just the flags changed during merge?
1347 # are just the flags changed during merge?
1345 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1348 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1346 changelist.append(fname)
1349 changelist.append(fname)
1347
1350
1348 return fparent1
1351 return fparent1
1349
1352
1350 @unfilteredmethod
1353 @unfilteredmethod
1351 def commit(self, text="", user=None, date=None, match=None, force=False,
1354 def commit(self, text="", user=None, date=None, match=None, force=False,
1352 editor=False, extra={}):
1355 editor=False, extra={}):
1353 """Add a new revision to current repository.
1356 """Add a new revision to current repository.
1354
1357
1355 Revision information is gathered from the working directory,
1358 Revision information is gathered from the working directory,
1356 match can be used to filter the committed files. If editor is
1359 match can be used to filter the committed files. If editor is
1357 supplied, it is called to get a commit message.
1360 supplied, it is called to get a commit message.
1358 """
1361 """
1359
1362
1360 def fail(f, msg):
1363 def fail(f, msg):
1361 raise util.Abort('%s: %s' % (f, msg))
1364 raise util.Abort('%s: %s' % (f, msg))
1362
1365
1363 if not match:
1366 if not match:
1364 match = matchmod.always(self.root, '')
1367 match = matchmod.always(self.root, '')
1365
1368
1366 if not force:
1369 if not force:
1367 vdirs = []
1370 vdirs = []
1368 match.explicitdir = vdirs.append
1371 match.explicitdir = vdirs.append
1369 match.bad = fail
1372 match.bad = fail
1370
1373
1371 wlock = self.wlock()
1374 wlock = self.wlock()
1372 try:
1375 try:
1373 wctx = self[None]
1376 wctx = self[None]
1374 merge = len(wctx.parents()) > 1
1377 merge = len(wctx.parents()) > 1
1375
1378
1376 if not force and merge and not match.always():
1379 if not force and merge and not match.always():
1377 raise util.Abort(_('cannot partially commit a merge '
1380 raise util.Abort(_('cannot partially commit a merge '
1378 '(do not specify files or patterns)'))
1381 '(do not specify files or patterns)'))
1379
1382
1380 status = self.status(match=match, clean=force)
1383 status = self.status(match=match, clean=force)
1381 if force:
1384 if force:
1382 status.modified.extend(status.clean) # mq may commit clean files
1385 status.modified.extend(status.clean) # mq may commit clean files
1383
1386
1384 # check subrepos
1387 # check subrepos
1385 subs = []
1388 subs = []
1386 commitsubs = set()
1389 commitsubs = set()
1387 newstate = wctx.substate.copy()
1390 newstate = wctx.substate.copy()
1388 # only manage subrepos and .hgsubstate if .hgsub is present
1391 # only manage subrepos and .hgsubstate if .hgsub is present
1389 if '.hgsub' in wctx:
1392 if '.hgsub' in wctx:
1390 # we'll decide whether to track this ourselves, thanks
1393 # we'll decide whether to track this ourselves, thanks
1391 for c in status.modified, status.added, status.removed:
1394 for c in status.modified, status.added, status.removed:
1392 if '.hgsubstate' in c:
1395 if '.hgsubstate' in c:
1393 c.remove('.hgsubstate')
1396 c.remove('.hgsubstate')
1394
1397
1395 # compare current state to last committed state
1398 # compare current state to last committed state
1396 # build new substate based on last committed state
1399 # build new substate based on last committed state
1397 oldstate = wctx.p1().substate
1400 oldstate = wctx.p1().substate
1398 for s in sorted(newstate.keys()):
1401 for s in sorted(newstate.keys()):
1399 if not match(s):
1402 if not match(s):
1400 # ignore working copy, use old state if present
1403 # ignore working copy, use old state if present
1401 if s in oldstate:
1404 if s in oldstate:
1402 newstate[s] = oldstate[s]
1405 newstate[s] = oldstate[s]
1403 continue
1406 continue
1404 if not force:
1407 if not force:
1405 raise util.Abort(
1408 raise util.Abort(
1406 _("commit with new subrepo %s excluded") % s)
1409 _("commit with new subrepo %s excluded") % s)
1407 dirtyreason = wctx.sub(s).dirtyreason(True)
1410 dirtyreason = wctx.sub(s).dirtyreason(True)
1408 if dirtyreason:
1411 if dirtyreason:
1409 if not self.ui.configbool('ui', 'commitsubrepos'):
1412 if not self.ui.configbool('ui', 'commitsubrepos'):
1410 raise util.Abort(dirtyreason,
1413 raise util.Abort(dirtyreason,
1411 hint=_("use --subrepos for recursive commit"))
1414 hint=_("use --subrepos for recursive commit"))
1412 subs.append(s)
1415 subs.append(s)
1413 commitsubs.add(s)
1416 commitsubs.add(s)
1414 else:
1417 else:
1415 bs = wctx.sub(s).basestate()
1418 bs = wctx.sub(s).basestate()
1416 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1419 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1417 if oldstate.get(s, (None, None, None))[1] != bs:
1420 if oldstate.get(s, (None, None, None))[1] != bs:
1418 subs.append(s)
1421 subs.append(s)
1419
1422
1420 # check for removed subrepos
1423 # check for removed subrepos
1421 for p in wctx.parents():
1424 for p in wctx.parents():
1422 r = [s for s in p.substate if s not in newstate]
1425 r = [s for s in p.substate if s not in newstate]
1423 subs += [s for s in r if match(s)]
1426 subs += [s for s in r if match(s)]
1424 if subs:
1427 if subs:
1425 if (not match('.hgsub') and
1428 if (not match('.hgsub') and
1426 '.hgsub' in (wctx.modified() + wctx.added())):
1429 '.hgsub' in (wctx.modified() + wctx.added())):
1427 raise util.Abort(
1430 raise util.Abort(
1428 _("can't commit subrepos without .hgsub"))
1431 _("can't commit subrepos without .hgsub"))
1429 status.modified.insert(0, '.hgsubstate')
1432 status.modified.insert(0, '.hgsubstate')
1430
1433
1431 elif '.hgsub' in status.removed:
1434 elif '.hgsub' in status.removed:
1432 # clean up .hgsubstate when .hgsub is removed
1435 # clean up .hgsubstate when .hgsub is removed
1433 if ('.hgsubstate' in wctx and
1436 if ('.hgsubstate' in wctx and
1434 '.hgsubstate' not in (status.modified + status.added +
1437 '.hgsubstate' not in (status.modified + status.added +
1435 status.removed)):
1438 status.removed)):
1436 status.removed.insert(0, '.hgsubstate')
1439 status.removed.insert(0, '.hgsubstate')
1437
1440
1438 # make sure all explicit patterns are matched
1441 # make sure all explicit patterns are matched
1439 if not force and match.files():
1442 if not force and match.files():
1440 matched = set(status.modified + status.added + status.removed)
1443 matched = set(status.modified + status.added + status.removed)
1441
1444
1442 for f in match.files():
1445 for f in match.files():
1443 f = self.dirstate.normalize(f)
1446 f = self.dirstate.normalize(f)
1444 if f == '.' or f in matched or f in wctx.substate:
1447 if f == '.' or f in matched or f in wctx.substate:
1445 continue
1448 continue
1446 if f in status.deleted:
1449 if f in status.deleted:
1447 fail(f, _('file not found!'))
1450 fail(f, _('file not found!'))
1448 if f in vdirs: # visited directory
1451 if f in vdirs: # visited directory
1449 d = f + '/'
1452 d = f + '/'
1450 for mf in matched:
1453 for mf in matched:
1451 if mf.startswith(d):
1454 if mf.startswith(d):
1452 break
1455 break
1453 else:
1456 else:
1454 fail(f, _("no match under directory!"))
1457 fail(f, _("no match under directory!"))
1455 elif f not in self.dirstate:
1458 elif f not in self.dirstate:
1456 fail(f, _("file not tracked!"))
1459 fail(f, _("file not tracked!"))
1457
1460
1458 cctx = context.workingcommitctx(self, status,
1461 cctx = context.workingcommitctx(self, status,
1459 text, user, date, extra)
1462 text, user, date, extra)
1460
1463
1461 if (not force and not extra.get("close") and not merge
1464 if (not force and not extra.get("close") and not merge
1462 and not cctx.files()
1465 and not cctx.files()
1463 and wctx.branch() == wctx.p1().branch()):
1466 and wctx.branch() == wctx.p1().branch()):
1464 return None
1467 return None
1465
1468
1466 if merge and cctx.deleted():
1469 if merge and cctx.deleted():
1467 raise util.Abort(_("cannot commit merge with missing files"))
1470 raise util.Abort(_("cannot commit merge with missing files"))
1468
1471
1469 ms = mergemod.mergestate(self)
1472 ms = mergemod.mergestate(self)
1470 for f in status.modified:
1473 for f in status.modified:
1471 if f in ms and ms[f] == 'u':
1474 if f in ms and ms[f] == 'u':
1472 raise util.Abort(_('unresolved merge conflicts '
1475 raise util.Abort(_('unresolved merge conflicts '
1473 '(see "hg help resolve")'))
1476 '(see "hg help resolve")'))
1474
1477
1475 if editor:
1478 if editor:
1476 cctx._text = editor(self, cctx, subs)
1479 cctx._text = editor(self, cctx, subs)
1477 edited = (text != cctx._text)
1480 edited = (text != cctx._text)
1478
1481
1479 # Save commit message in case this transaction gets rolled back
1482 # Save commit message in case this transaction gets rolled back
1480 # (e.g. by a pretxncommit hook). Leave the content alone on
1483 # (e.g. by a pretxncommit hook). Leave the content alone on
1481 # the assumption that the user will use the same editor again.
1484 # the assumption that the user will use the same editor again.
1482 msgfn = self.savecommitmessage(cctx._text)
1485 msgfn = self.savecommitmessage(cctx._text)
1483
1486
1484 # commit subs and write new state
1487 # commit subs and write new state
1485 if subs:
1488 if subs:
1486 for s in sorted(commitsubs):
1489 for s in sorted(commitsubs):
1487 sub = wctx.sub(s)
1490 sub = wctx.sub(s)
1488 self.ui.status(_('committing subrepository %s\n') %
1491 self.ui.status(_('committing subrepository %s\n') %
1489 subrepo.subrelpath(sub))
1492 subrepo.subrelpath(sub))
1490 sr = sub.commit(cctx._text, user, date)
1493 sr = sub.commit(cctx._text, user, date)
1491 newstate[s] = (newstate[s][0], sr)
1494 newstate[s] = (newstate[s][0], sr)
1492 subrepo.writestate(self, newstate)
1495 subrepo.writestate(self, newstate)
1493
1496
1494 p1, p2 = self.dirstate.parents()
1497 p1, p2 = self.dirstate.parents()
1495 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1498 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1496 try:
1499 try:
1497 self.hook("precommit", throw=True, parent1=hookp1,
1500 self.hook("precommit", throw=True, parent1=hookp1,
1498 parent2=hookp2)
1501 parent2=hookp2)
1499 ret = self.commitctx(cctx, True)
1502 ret = self.commitctx(cctx, True)
1500 except: # re-raises
1503 except: # re-raises
1501 if edited:
1504 if edited:
1502 self.ui.write(
1505 self.ui.write(
1503 _('note: commit message saved in %s\n') % msgfn)
1506 _('note: commit message saved in %s\n') % msgfn)
1504 raise
1507 raise
1505
1508
1506 # update bookmarks, dirstate and mergestate
1509 # update bookmarks, dirstate and mergestate
1507 bookmarks.update(self, [p1, p2], ret)
1510 bookmarks.update(self, [p1, p2], ret)
1508 cctx.markcommitted(ret)
1511 cctx.markcommitted(ret)
1509 ms.reset()
1512 ms.reset()
1510 finally:
1513 finally:
1511 wlock.release()
1514 wlock.release()
1512
1515
1513 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1516 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1514 # hack for command that use a temporary commit (eg: histedit)
1517 # hack for command that use a temporary commit (eg: histedit)
1515 # temporary commit got stripped before hook release
1518 # temporary commit got stripped before hook release
1516 if node in self:
1519 if node in self:
1517 self.hook("commit", node=node, parent1=parent1,
1520 self.hook("commit", node=node, parent1=parent1,
1518 parent2=parent2)
1521 parent2=parent2)
1519 self._afterlock(commithook)
1522 self._afterlock(commithook)
1520 return ret
1523 return ret
1521
1524
1522 @unfilteredmethod
1525 @unfilteredmethod
1523 def commitctx(self, ctx, error=False):
1526 def commitctx(self, ctx, error=False):
1524 """Add a new revision to current repository.
1527 """Add a new revision to current repository.
1525 Revision information is passed via the context argument.
1528 Revision information is passed via the context argument.
1526 """
1529 """
1527
1530
1528 tr = None
1531 tr = None
1529 p1, p2 = ctx.p1(), ctx.p2()
1532 p1, p2 = ctx.p1(), ctx.p2()
1530 user = ctx.user()
1533 user = ctx.user()
1531
1534
1532 lock = self.lock()
1535 lock = self.lock()
1533 try:
1536 try:
1534 tr = self.transaction("commit")
1537 tr = self.transaction("commit")
1535 trp = weakref.proxy(tr)
1538 trp = weakref.proxy(tr)
1536
1539
1537 if ctx.files():
1540 if ctx.files():
1538 m1 = p1.manifest()
1541 m1 = p1.manifest()
1539 m2 = p2.manifest()
1542 m2 = p2.manifest()
1540 m = m1.copy()
1543 m = m1.copy()
1541
1544
1542 # check in files
1545 # check in files
1543 added = []
1546 added = []
1544 changed = []
1547 changed = []
1545 removed = list(ctx.removed())
1548 removed = list(ctx.removed())
1546 linkrev = len(self)
1549 linkrev = len(self)
1547 self.ui.note(_("committing files:\n"))
1550 self.ui.note(_("committing files:\n"))
1548 for f in sorted(ctx.modified() + ctx.added()):
1551 for f in sorted(ctx.modified() + ctx.added()):
1549 self.ui.note(f + "\n")
1552 self.ui.note(f + "\n")
1550 try:
1553 try:
1551 fctx = ctx[f]
1554 fctx = ctx[f]
1552 if fctx is None:
1555 if fctx is None:
1553 removed.append(f)
1556 removed.append(f)
1554 else:
1557 else:
1555 added.append(f)
1558 added.append(f)
1556 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1559 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1557 trp, changed)
1560 trp, changed)
1558 m.setflag(f, fctx.flags())
1561 m.setflag(f, fctx.flags())
1559 except OSError, inst:
1562 except OSError, inst:
1560 self.ui.warn(_("trouble committing %s!\n") % f)
1563 self.ui.warn(_("trouble committing %s!\n") % f)
1561 raise
1564 raise
1562 except IOError, inst:
1565 except IOError, inst:
1563 errcode = getattr(inst, 'errno', errno.ENOENT)
1566 errcode = getattr(inst, 'errno', errno.ENOENT)
1564 if error or errcode and errcode != errno.ENOENT:
1567 if error or errcode and errcode != errno.ENOENT:
1565 self.ui.warn(_("trouble committing %s!\n") % f)
1568 self.ui.warn(_("trouble committing %s!\n") % f)
1566 raise
1569 raise
1567
1570
1568 # update manifest
1571 # update manifest
1569 self.ui.note(_("committing manifest\n"))
1572 self.ui.note(_("committing manifest\n"))
1570 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1573 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1571 drop = [f for f in removed if f in m]
1574 drop = [f for f in removed if f in m]
1572 for f in drop:
1575 for f in drop:
1573 del m[f]
1576 del m[f]
1574 mn = self.manifest.add(m, trp, linkrev,
1577 mn = self.manifest.add(m, trp, linkrev,
1575 p1.manifestnode(), p2.manifestnode(),
1578 p1.manifestnode(), p2.manifestnode(),
1576 added, drop)
1579 added, drop)
1577 files = changed + removed
1580 files = changed + removed
1578 else:
1581 else:
1579 mn = p1.manifestnode()
1582 mn = p1.manifestnode()
1580 files = []
1583 files = []
1581
1584
1582 # update changelog
1585 # update changelog
1583 self.ui.note(_("committing changelog\n"))
1586 self.ui.note(_("committing changelog\n"))
1584 self.changelog.delayupdate(tr)
1587 self.changelog.delayupdate(tr)
1585 n = self.changelog.add(mn, files, ctx.description(),
1588 n = self.changelog.add(mn, files, ctx.description(),
1586 trp, p1.node(), p2.node(),
1589 trp, p1.node(), p2.node(),
1587 user, ctx.date(), ctx.extra().copy())
1590 user, ctx.date(), ctx.extra().copy())
1588 p = lambda: tr.writepending() and self.root or ""
1591 p = lambda: tr.writepending() and self.root or ""
1589 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1592 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1590 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1593 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1591 parent2=xp2, pending=p)
1594 parent2=xp2, pending=p)
1592 # set the new commit is proper phase
1595 # set the new commit is proper phase
1593 targetphase = subrepo.newcommitphase(self.ui, ctx)
1596 targetphase = subrepo.newcommitphase(self.ui, ctx)
1594 if targetphase:
1597 if targetphase:
1595 # retract boundary do not alter parent changeset.
1598 # retract boundary do not alter parent changeset.
1596 # if a parent have higher the resulting phase will
1599 # if a parent have higher the resulting phase will
1597 # be compliant anyway
1600 # be compliant anyway
1598 #
1601 #
1599 # if minimal phase was 0 we don't need to retract anything
1602 # if minimal phase was 0 we don't need to retract anything
1600 phases.retractboundary(self, tr, targetphase, [n])
1603 phases.retractboundary(self, tr, targetphase, [n])
1601 tr.close()
1604 tr.close()
1602 branchmap.updatecache(self.filtered('served'))
1605 branchmap.updatecache(self.filtered('served'))
1603 return n
1606 return n
1604 finally:
1607 finally:
1605 if tr:
1608 if tr:
1606 tr.release()
1609 tr.release()
1607 lock.release()
1610 lock.release()
1608
1611
1609 @unfilteredmethod
1612 @unfilteredmethod
1610 def destroying(self):
1613 def destroying(self):
1611 '''Inform the repository that nodes are about to be destroyed.
1614 '''Inform the repository that nodes are about to be destroyed.
1612 Intended for use by strip and rollback, so there's a common
1615 Intended for use by strip and rollback, so there's a common
1613 place for anything that has to be done before destroying history.
1616 place for anything that has to be done before destroying history.
1614
1617
1615 This is mostly useful for saving state that is in memory and waiting
1618 This is mostly useful for saving state that is in memory and waiting
1616 to be flushed when the current lock is released. Because a call to
1619 to be flushed when the current lock is released. Because a call to
1617 destroyed is imminent, the repo will be invalidated causing those
1620 destroyed is imminent, the repo will be invalidated causing those
1618 changes to stay in memory (waiting for the next unlock), or vanish
1621 changes to stay in memory (waiting for the next unlock), or vanish
1619 completely.
1622 completely.
1620 '''
1623 '''
1621 # When using the same lock to commit and strip, the phasecache is left
1624 # When using the same lock to commit and strip, the phasecache is left
1622 # dirty after committing. Then when we strip, the repo is invalidated,
1625 # dirty after committing. Then when we strip, the repo is invalidated,
1623 # causing those changes to disappear.
1626 # causing those changes to disappear.
1624 if '_phasecache' in vars(self):
1627 if '_phasecache' in vars(self):
1625 self._phasecache.write()
1628 self._phasecache.write()
1626
1629
1627 @unfilteredmethod
1630 @unfilteredmethod
1628 def destroyed(self):
1631 def destroyed(self):
1629 '''Inform the repository that nodes have been destroyed.
1632 '''Inform the repository that nodes have been destroyed.
1630 Intended for use by strip and rollback, so there's a common
1633 Intended for use by strip and rollback, so there's a common
1631 place for anything that has to be done after destroying history.
1634 place for anything that has to be done after destroying history.
1632 '''
1635 '''
1633 # When one tries to:
1636 # When one tries to:
1634 # 1) destroy nodes thus calling this method (e.g. strip)
1637 # 1) destroy nodes thus calling this method (e.g. strip)
1635 # 2) use phasecache somewhere (e.g. commit)
1638 # 2) use phasecache somewhere (e.g. commit)
1636 #
1639 #
1637 # then 2) will fail because the phasecache contains nodes that were
1640 # then 2) will fail because the phasecache contains nodes that were
1638 # removed. We can either remove phasecache from the filecache,
1641 # removed. We can either remove phasecache from the filecache,
1639 # causing it to reload next time it is accessed, or simply filter
1642 # causing it to reload next time it is accessed, or simply filter
1640 # the removed nodes now and write the updated cache.
1643 # the removed nodes now and write the updated cache.
1641 self._phasecache.filterunknown(self)
1644 self._phasecache.filterunknown(self)
1642 self._phasecache.write()
1645 self._phasecache.write()
1643
1646
1644 # update the 'served' branch cache to help read only server process
1647 # update the 'served' branch cache to help read only server process
1645 # Thanks to branchcache collaboration this is done from the nearest
1648 # Thanks to branchcache collaboration this is done from the nearest
1646 # filtered subset and it is expected to be fast.
1649 # filtered subset and it is expected to be fast.
1647 branchmap.updatecache(self.filtered('served'))
1650 branchmap.updatecache(self.filtered('served'))
1648
1651
1649 # Ensure the persistent tag cache is updated. Doing it now
1652 # Ensure the persistent tag cache is updated. Doing it now
1650 # means that the tag cache only has to worry about destroyed
1653 # means that the tag cache only has to worry about destroyed
1651 # heads immediately after a strip/rollback. That in turn
1654 # heads immediately after a strip/rollback. That in turn
1652 # guarantees that "cachetip == currenttip" (comparing both rev
1655 # guarantees that "cachetip == currenttip" (comparing both rev
1653 # and node) always means no nodes have been added or destroyed.
1656 # and node) always means no nodes have been added or destroyed.
1654
1657
1655 # XXX this is suboptimal when qrefresh'ing: we strip the current
1658 # XXX this is suboptimal when qrefresh'ing: we strip the current
1656 # head, refresh the tag cache, then immediately add a new head.
1659 # head, refresh the tag cache, then immediately add a new head.
1657 # But I think doing it this way is necessary for the "instant
1660 # But I think doing it this way is necessary for the "instant
1658 # tag cache retrieval" case to work.
1661 # tag cache retrieval" case to work.
1659 self.invalidate()
1662 self.invalidate()
1660
1663
1661 def walk(self, match, node=None):
1664 def walk(self, match, node=None):
1662 '''
1665 '''
1663 walk recursively through the directory tree or a given
1666 walk recursively through the directory tree or a given
1664 changeset, finding all files matched by the match
1667 changeset, finding all files matched by the match
1665 function
1668 function
1666 '''
1669 '''
1667 return self[node].walk(match)
1670 return self[node].walk(match)
1668
1671
1669 def status(self, node1='.', node2=None, match=None,
1672 def status(self, node1='.', node2=None, match=None,
1670 ignored=False, clean=False, unknown=False,
1673 ignored=False, clean=False, unknown=False,
1671 listsubrepos=False):
1674 listsubrepos=False):
1672 '''a convenience method that calls node1.status(node2)'''
1675 '''a convenience method that calls node1.status(node2)'''
1673 return self[node1].status(node2, match, ignored, clean, unknown,
1676 return self[node1].status(node2, match, ignored, clean, unknown,
1674 listsubrepos)
1677 listsubrepos)
1675
1678
1676 def heads(self, start=None):
1679 def heads(self, start=None):
1677 heads = self.changelog.heads(start)
1680 heads = self.changelog.heads(start)
1678 # sort the output in rev descending order
1681 # sort the output in rev descending order
1679 return sorted(heads, key=self.changelog.rev, reverse=True)
1682 return sorted(heads, key=self.changelog.rev, reverse=True)
1680
1683
1681 def branchheads(self, branch=None, start=None, closed=False):
1684 def branchheads(self, branch=None, start=None, closed=False):
1682 '''return a (possibly filtered) list of heads for the given branch
1685 '''return a (possibly filtered) list of heads for the given branch
1683
1686
1684 Heads are returned in topological order, from newest to oldest.
1687 Heads are returned in topological order, from newest to oldest.
1685 If branch is None, use the dirstate branch.
1688 If branch is None, use the dirstate branch.
1686 If start is not None, return only heads reachable from start.
1689 If start is not None, return only heads reachable from start.
1687 If closed is True, return heads that are marked as closed as well.
1690 If closed is True, return heads that are marked as closed as well.
1688 '''
1691 '''
1689 if branch is None:
1692 if branch is None:
1690 branch = self[None].branch()
1693 branch = self[None].branch()
1691 branches = self.branchmap()
1694 branches = self.branchmap()
1692 if branch not in branches:
1695 if branch not in branches:
1693 return []
1696 return []
1694 # the cache returns heads ordered lowest to highest
1697 # the cache returns heads ordered lowest to highest
1695 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1698 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1696 if start is not None:
1699 if start is not None:
1697 # filter out the heads that cannot be reached from startrev
1700 # filter out the heads that cannot be reached from startrev
1698 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1701 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1699 bheads = [h for h in bheads if h in fbheads]
1702 bheads = [h for h in bheads if h in fbheads]
1700 return bheads
1703 return bheads
1701
1704
1702 def branches(self, nodes):
1705 def branches(self, nodes):
1703 if not nodes:
1706 if not nodes:
1704 nodes = [self.changelog.tip()]
1707 nodes = [self.changelog.tip()]
1705 b = []
1708 b = []
1706 for n in nodes:
1709 for n in nodes:
1707 t = n
1710 t = n
1708 while True:
1711 while True:
1709 p = self.changelog.parents(n)
1712 p = self.changelog.parents(n)
1710 if p[1] != nullid or p[0] == nullid:
1713 if p[1] != nullid or p[0] == nullid:
1711 b.append((t, n, p[0], p[1]))
1714 b.append((t, n, p[0], p[1]))
1712 break
1715 break
1713 n = p[0]
1716 n = p[0]
1714 return b
1717 return b
1715
1718
1716 def between(self, pairs):
1719 def between(self, pairs):
1717 r = []
1720 r = []
1718
1721
1719 for top, bottom in pairs:
1722 for top, bottom in pairs:
1720 n, l, i = top, [], 0
1723 n, l, i = top, [], 0
1721 f = 1
1724 f = 1
1722
1725
1723 while n != bottom and n != nullid:
1726 while n != bottom and n != nullid:
1724 p = self.changelog.parents(n)[0]
1727 p = self.changelog.parents(n)[0]
1725 if i == f:
1728 if i == f:
1726 l.append(n)
1729 l.append(n)
1727 f = f * 2
1730 f = f * 2
1728 n = p
1731 n = p
1729 i += 1
1732 i += 1
1730
1733
1731 r.append(l)
1734 r.append(l)
1732
1735
1733 return r
1736 return r
1734
1737
1735 def checkpush(self, pushop):
1738 def checkpush(self, pushop):
1736 """Extensions can override this function if additional checks have
1739 """Extensions can override this function if additional checks have
1737 to be performed before pushing, or call it if they override push
1740 to be performed before pushing, or call it if they override push
1738 command.
1741 command.
1739 """
1742 """
1740 pass
1743 pass
1741
1744
1742 @unfilteredpropertycache
1745 @unfilteredpropertycache
1743 def prepushoutgoinghooks(self):
1746 def prepushoutgoinghooks(self):
1744 """Return util.hooks consists of "(repo, remote, outgoing)"
1747 """Return util.hooks consists of "(repo, remote, outgoing)"
1745 functions, which are called before pushing changesets.
1748 functions, which are called before pushing changesets.
1746 """
1749 """
1747 return util.hooks()
1750 return util.hooks()
1748
1751
1749 def stream_in(self, remote, requirements):
1752 def stream_in(self, remote, requirements):
1750 lock = self.lock()
1753 lock = self.lock()
1751 try:
1754 try:
1752 # Save remote branchmap. We will use it later
1755 # Save remote branchmap. We will use it later
1753 # to speed up branchcache creation
1756 # to speed up branchcache creation
1754 rbranchmap = None
1757 rbranchmap = None
1755 if remote.capable("branchmap"):
1758 if remote.capable("branchmap"):
1756 rbranchmap = remote.branchmap()
1759 rbranchmap = remote.branchmap()
1757
1760
1758 fp = remote.stream_out()
1761 fp = remote.stream_out()
1759 l = fp.readline()
1762 l = fp.readline()
1760 try:
1763 try:
1761 resp = int(l)
1764 resp = int(l)
1762 except ValueError:
1765 except ValueError:
1763 raise error.ResponseError(
1766 raise error.ResponseError(
1764 _('unexpected response from remote server:'), l)
1767 _('unexpected response from remote server:'), l)
1765 if resp == 1:
1768 if resp == 1:
1766 raise util.Abort(_('operation forbidden by server'))
1769 raise util.Abort(_('operation forbidden by server'))
1767 elif resp == 2:
1770 elif resp == 2:
1768 raise util.Abort(_('locking the remote repository failed'))
1771 raise util.Abort(_('locking the remote repository failed'))
1769 elif resp != 0:
1772 elif resp != 0:
1770 raise util.Abort(_('the server sent an unknown error code'))
1773 raise util.Abort(_('the server sent an unknown error code'))
1771 self.ui.status(_('streaming all changes\n'))
1774 self.ui.status(_('streaming all changes\n'))
1772 l = fp.readline()
1775 l = fp.readline()
1773 try:
1776 try:
1774 total_files, total_bytes = map(int, l.split(' ', 1))
1777 total_files, total_bytes = map(int, l.split(' ', 1))
1775 except (ValueError, TypeError):
1778 except (ValueError, TypeError):
1776 raise error.ResponseError(
1779 raise error.ResponseError(
1777 _('unexpected response from remote server:'), l)
1780 _('unexpected response from remote server:'), l)
1778 self.ui.status(_('%d files to transfer, %s of data\n') %
1781 self.ui.status(_('%d files to transfer, %s of data\n') %
1779 (total_files, util.bytecount(total_bytes)))
1782 (total_files, util.bytecount(total_bytes)))
1780 handled_bytes = 0
1783 handled_bytes = 0
1781 self.ui.progress(_('clone'), 0, total=total_bytes)
1784 self.ui.progress(_('clone'), 0, total=total_bytes)
1782 start = time.time()
1785 start = time.time()
1783
1786
1784 tr = self.transaction(_('clone'))
1787 tr = self.transaction(_('clone'))
1785 try:
1788 try:
1786 for i in xrange(total_files):
1789 for i in xrange(total_files):
1787 # XXX doesn't support '\n' or '\r' in filenames
1790 # XXX doesn't support '\n' or '\r' in filenames
1788 l = fp.readline()
1791 l = fp.readline()
1789 try:
1792 try:
1790 name, size = l.split('\0', 1)
1793 name, size = l.split('\0', 1)
1791 size = int(size)
1794 size = int(size)
1792 except (ValueError, TypeError):
1795 except (ValueError, TypeError):
1793 raise error.ResponseError(
1796 raise error.ResponseError(
1794 _('unexpected response from remote server:'), l)
1797 _('unexpected response from remote server:'), l)
1795 if self.ui.debugflag:
1798 if self.ui.debugflag:
1796 self.ui.debug('adding %s (%s)\n' %
1799 self.ui.debug('adding %s (%s)\n' %
1797 (name, util.bytecount(size)))
1800 (name, util.bytecount(size)))
1798 # for backwards compat, name was partially encoded
1801 # for backwards compat, name was partially encoded
1799 ofp = self.svfs(store.decodedir(name), 'w')
1802 ofp = self.svfs(store.decodedir(name), 'w')
1800 for chunk in util.filechunkiter(fp, limit=size):
1803 for chunk in util.filechunkiter(fp, limit=size):
1801 handled_bytes += len(chunk)
1804 handled_bytes += len(chunk)
1802 self.ui.progress(_('clone'), handled_bytes,
1805 self.ui.progress(_('clone'), handled_bytes,
1803 total=total_bytes)
1806 total=total_bytes)
1804 ofp.write(chunk)
1807 ofp.write(chunk)
1805 ofp.close()
1808 ofp.close()
1806 tr.close()
1809 tr.close()
1807 finally:
1810 finally:
1808 tr.release()
1811 tr.release()
1809
1812
1810 # Writing straight to files circumvented the inmemory caches
1813 # Writing straight to files circumvented the inmemory caches
1811 self.invalidate()
1814 self.invalidate()
1812
1815
1813 elapsed = time.time() - start
1816 elapsed = time.time() - start
1814 if elapsed <= 0:
1817 if elapsed <= 0:
1815 elapsed = 0.001
1818 elapsed = 0.001
1816 self.ui.progress(_('clone'), None)
1819 self.ui.progress(_('clone'), None)
1817 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1820 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1818 (util.bytecount(total_bytes), elapsed,
1821 (util.bytecount(total_bytes), elapsed,
1819 util.bytecount(total_bytes / elapsed)))
1822 util.bytecount(total_bytes / elapsed)))
1820
1823
1821 # new requirements = old non-format requirements +
1824 # new requirements = old non-format requirements +
1822 # new format-related
1825 # new format-related
1823 # requirements from the streamed-in repository
1826 # requirements from the streamed-in repository
1824 requirements.update(set(self.requirements) - self.supportedformats)
1827 requirements.update(set(self.requirements) - self.supportedformats)
1825 self._applyrequirements(requirements)
1828 self._applyrequirements(requirements)
1826 self._writerequirements()
1829 self._writerequirements()
1827
1830
1828 if rbranchmap:
1831 if rbranchmap:
1829 rbheads = []
1832 rbheads = []
1830 closed = []
1833 closed = []
1831 for bheads in rbranchmap.itervalues():
1834 for bheads in rbranchmap.itervalues():
1832 rbheads.extend(bheads)
1835 rbheads.extend(bheads)
1833 for h in bheads:
1836 for h in bheads:
1834 r = self.changelog.rev(h)
1837 r = self.changelog.rev(h)
1835 b, c = self.changelog.branchinfo(r)
1838 b, c = self.changelog.branchinfo(r)
1836 if c:
1839 if c:
1837 closed.append(h)
1840 closed.append(h)
1838
1841
1839 if rbheads:
1842 if rbheads:
1840 rtiprev = max((int(self.changelog.rev(node))
1843 rtiprev = max((int(self.changelog.rev(node))
1841 for node in rbheads))
1844 for node in rbheads))
1842 cache = branchmap.branchcache(rbranchmap,
1845 cache = branchmap.branchcache(rbranchmap,
1843 self[rtiprev].node(),
1846 self[rtiprev].node(),
1844 rtiprev,
1847 rtiprev,
1845 closednodes=closed)
1848 closednodes=closed)
1846 # Try to stick it as low as possible
1849 # Try to stick it as low as possible
1847 # filter above served are unlikely to be fetch from a clone
1850 # filter above served are unlikely to be fetch from a clone
1848 for candidate in ('base', 'immutable', 'served'):
1851 for candidate in ('base', 'immutable', 'served'):
1849 rview = self.filtered(candidate)
1852 rview = self.filtered(candidate)
1850 if cache.validfor(rview):
1853 if cache.validfor(rview):
1851 self._branchcaches[candidate] = cache
1854 self._branchcaches[candidate] = cache
1852 cache.write(rview)
1855 cache.write(rview)
1853 break
1856 break
1854 self.invalidate()
1857 self.invalidate()
1855 return len(self.heads()) + 1
1858 return len(self.heads()) + 1
1856 finally:
1859 finally:
1857 lock.release()
1860 lock.release()
1858
1861
1859 def clone(self, remote, heads=[], stream=None):
1862 def clone(self, remote, heads=[], stream=None):
1860 '''clone remote repository.
1863 '''clone remote repository.
1861
1864
1862 keyword arguments:
1865 keyword arguments:
1863 heads: list of revs to clone (forces use of pull)
1866 heads: list of revs to clone (forces use of pull)
1864 stream: use streaming clone if possible'''
1867 stream: use streaming clone if possible'''
1865
1868
1866 # now, all clients that can request uncompressed clones can
1869 # now, all clients that can request uncompressed clones can
1867 # read repo formats supported by all servers that can serve
1870 # read repo formats supported by all servers that can serve
1868 # them.
1871 # them.
1869
1872
1870 # if revlog format changes, client will have to check version
1873 # if revlog format changes, client will have to check version
1871 # and format flags on "stream" capability, and use
1874 # and format flags on "stream" capability, and use
1872 # uncompressed only if compatible.
1875 # uncompressed only if compatible.
1873
1876
1874 if stream is None:
1877 if stream is None:
1875 # if the server explicitly prefers to stream (for fast LANs)
1878 # if the server explicitly prefers to stream (for fast LANs)
1876 stream = remote.capable('stream-preferred')
1879 stream = remote.capable('stream-preferred')
1877
1880
1878 if stream and not heads:
1881 if stream and not heads:
1879 # 'stream' means remote revlog format is revlogv1 only
1882 # 'stream' means remote revlog format is revlogv1 only
1880 if remote.capable('stream'):
1883 if remote.capable('stream'):
1881 self.stream_in(remote, set(('revlogv1',)))
1884 self.stream_in(remote, set(('revlogv1',)))
1882 else:
1885 else:
1883 # otherwise, 'streamreqs' contains the remote revlog format
1886 # otherwise, 'streamreqs' contains the remote revlog format
1884 streamreqs = remote.capable('streamreqs')
1887 streamreqs = remote.capable('streamreqs')
1885 if streamreqs:
1888 if streamreqs:
1886 streamreqs = set(streamreqs.split(','))
1889 streamreqs = set(streamreqs.split(','))
1887 # if we support it, stream in and adjust our requirements
1890 # if we support it, stream in and adjust our requirements
1888 if not streamreqs - self.supportedformats:
1891 if not streamreqs - self.supportedformats:
1889 self.stream_in(remote, streamreqs)
1892 self.stream_in(remote, streamreqs)
1890
1893
1891 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1894 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1892 try:
1895 try:
1893 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1896 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1894 ret = exchange.pull(self, remote, heads).cgresult
1897 ret = exchange.pull(self, remote, heads).cgresult
1895 finally:
1898 finally:
1896 self.ui.restoreconfig(quiet)
1899 self.ui.restoreconfig(quiet)
1897 return ret
1900 return ret
1898
1901
1899 def pushkey(self, namespace, key, old, new):
1902 def pushkey(self, namespace, key, old, new):
1900 try:
1903 try:
1901 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1904 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1902 old=old, new=new)
1905 old=old, new=new)
1903 except error.HookAbort, exc:
1906 except error.HookAbort, exc:
1904 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1907 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1905 if exc.hint:
1908 if exc.hint:
1906 self.ui.write_err(_("(%s)\n") % exc.hint)
1909 self.ui.write_err(_("(%s)\n") % exc.hint)
1907 return False
1910 return False
1908 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1911 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1909 ret = pushkey.push(self, namespace, key, old, new)
1912 ret = pushkey.push(self, namespace, key, old, new)
1910 def runhook():
1913 def runhook():
1911 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1914 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1912 ret=ret)
1915 ret=ret)
1913 self._afterlock(runhook)
1916 self._afterlock(runhook)
1914 return ret
1917 return ret
1915
1918
1916 def listkeys(self, namespace):
1919 def listkeys(self, namespace):
1917 self.hook('prelistkeys', throw=True, namespace=namespace)
1920 self.hook('prelistkeys', throw=True, namespace=namespace)
1918 self.ui.debug('listing keys for "%s"\n' % namespace)
1921 self.ui.debug('listing keys for "%s"\n' % namespace)
1919 values = pushkey.list(self, namespace)
1922 values = pushkey.list(self, namespace)
1920 self.hook('listkeys', namespace=namespace, values=values)
1923 self.hook('listkeys', namespace=namespace, values=values)
1921 return values
1924 return values
1922
1925
1923 def debugwireargs(self, one, two, three=None, four=None, five=None):
1926 def debugwireargs(self, one, two, three=None, four=None, five=None):
1924 '''used to test argument passing over the wire'''
1927 '''used to test argument passing over the wire'''
1925 return "%s %s %s %s %s" % (one, two, three, four, five)
1928 return "%s %s %s %s %s" % (one, two, three, four, five)
1926
1929
1927 def savecommitmessage(self, text):
1930 def savecommitmessage(self, text):
1928 fp = self.vfs('last-message.txt', 'wb')
1931 fp = self.vfs('last-message.txt', 'wb')
1929 try:
1932 try:
1930 fp.write(text)
1933 fp.write(text)
1931 finally:
1934 finally:
1932 fp.close()
1935 fp.close()
1933 return self.pathto(fp.name[len(self.root) + 1:])
1936 return self.pathto(fp.name[len(self.root) + 1:])
1934
1937
1935 # used to avoid circular references so destructors work
1938 # used to avoid circular references so destructors work
1936 def aftertrans(files):
1939 def aftertrans(files):
1937 renamefiles = [tuple(t) for t in files]
1940 renamefiles = [tuple(t) for t in files]
1938 def a():
1941 def a():
1939 for vfs, src, dest in renamefiles:
1942 for vfs, src, dest in renamefiles:
1940 try:
1943 try:
1941 vfs.rename(src, dest)
1944 vfs.rename(src, dest)
1942 except OSError: # journal file does not yet exist
1945 except OSError: # journal file does not yet exist
1943 pass
1946 pass
1944 return a
1947 return a
1945
1948
1946 def undoname(fn):
1949 def undoname(fn):
1947 base, name = os.path.split(fn)
1950 base, name = os.path.split(fn)
1948 assert name.startswith('journal')
1951 assert name.startswith('journal')
1949 return os.path.join(base, name.replace('journal', 'undo', 1))
1952 return os.path.join(base, name.replace('journal', 'undo', 1))
1950
1953
1951 def instance(ui, path, create):
1954 def instance(ui, path, create):
1952 return localrepository(ui, util.urllocalpath(path), create)
1955 return localrepository(ui, util.urllocalpath(path), create)
1953
1956
1954 def islocal(path):
1957 def islocal(path):
1955 return True
1958 return True
@@ -1,686 +1,688 b''
1 commit hooks can see env vars
1 commit hooks can see env vars
2 (and post-transaction one are run unlocked)
2
3
3 $ hg init a
4 $ hg init a
4 $ cd a
5 $ cd a
5 $ cat > .hg/hgrc <<EOF
6 $ cat > .hg/hgrc <<EOF
6 > [hooks]
7 > [hooks]
7 > commit = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" commit"
8 > commit = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" commit"
8 > commit.b = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" commit.b"
9 > commit.b = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" commit.b"
9 > precommit = sh -c "HG_LOCAL= HG_NODE= HG_TAG= python \"$TESTDIR/printenv.py\" precommit"
10 > precommit = sh -c "HG_LOCAL= HG_NODE= HG_TAG= python \"$TESTDIR/printenv.py\" precommit"
10 > pretxncommit = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" pretxncommit"
11 > pretxncommit = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" pretxncommit"
11 > pretxncommit.tip = hg -q tip
12 > pretxncommit.tip = hg -q tip
12 > pre-identify = python "$TESTDIR/printenv.py" pre-identify 1
13 > pre-identify = python "$TESTDIR/printenv.py" pre-identify 1
13 > pre-cat = python "$TESTDIR/printenv.py" pre-cat
14 > pre-cat = python "$TESTDIR/printenv.py" pre-cat
14 > post-cat = python "$TESTDIR/printenv.py" post-cat
15 > post-cat = python "$TESTDIR/printenv.py" post-cat
15 > pretxnopen = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" pretxnopen"
16 > pretxnopen = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" pretxnopen"
16 > pretxnclose = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" pretxnclose"
17 > pretxnclose = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" pretxnclose"
17 > txnclose = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" txnclose"
18 > txnclose = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" txnclose"
18 > txnabort = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" txnabort"
19 > txnabort = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" txnabort"
20 > txnclose.checklock = hg debuglock > /dev/null
19 > EOF
21 > EOF
20 $ echo a > a
22 $ echo a > a
21 $ hg add a
23 $ hg add a
22 $ hg commit -m a
24 $ hg commit -m a
23 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
25 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
24 pretxnopen hook: HG_TXNNAME=commit
26 pretxnopen hook: HG_TXNNAME=commit
25 pretxncommit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a
27 pretxncommit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a
26 0:cb9a9f314b8b
28 0:cb9a9f314b8b
27 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_XNNAME=commit (glob)
29 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_XNNAME=commit (glob)
28 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
30 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
29 commit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
31 commit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
30 commit.b hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
32 commit.b hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
31
33
32 $ hg clone . ../b
34 $ hg clone . ../b
33 updating to branch default
35 updating to branch default
34 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
36 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
35 $ cd ../b
37 $ cd ../b
36
38
37 changegroup hooks can see env vars
39 changegroup hooks can see env vars
38
40
39 $ cat > .hg/hgrc <<EOF
41 $ cat > .hg/hgrc <<EOF
40 > [hooks]
42 > [hooks]
41 > prechangegroup = python "$TESTDIR/printenv.py" prechangegroup
43 > prechangegroup = python "$TESTDIR/printenv.py" prechangegroup
42 > changegroup = python "$TESTDIR/printenv.py" changegroup
44 > changegroup = python "$TESTDIR/printenv.py" changegroup
43 > incoming = python "$TESTDIR/printenv.py" incoming
45 > incoming = python "$TESTDIR/printenv.py" incoming
44 > EOF
46 > EOF
45
47
46 pretxncommit and commit hooks can see both parents of merge
48 pretxncommit and commit hooks can see both parents of merge
47
49
48 $ cd ../a
50 $ cd ../a
49 $ echo b >> a
51 $ echo b >> a
50 $ hg commit -m a1 -d "1 0"
52 $ hg commit -m a1 -d "1 0"
51 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
53 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
52 pretxnopen hook: HG_TXNNAME=commit
54 pretxnopen hook: HG_TXNNAME=commit
53 pretxncommit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
55 pretxncommit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
54 1:ab228980c14d
56 1:ab228980c14d
55 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=commit (glob)
57 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=commit (glob)
56 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
58 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
57 commit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
59 commit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
58 commit.b hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
60 commit.b hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
59 $ hg update -C 0
61 $ hg update -C 0
60 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
61 $ echo b > b
63 $ echo b > b
62 $ hg add b
64 $ hg add b
63 $ hg commit -m b -d '1 0'
65 $ hg commit -m b -d '1 0'
64 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
66 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
65 pretxnopen hook: HG_TXNNAME=commit
67 pretxnopen hook: HG_TXNNAME=commit
66 pretxncommit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
68 pretxncommit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
67 2:ee9deb46ab31
69 2:ee9deb46ab31
68 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=commit (glob)
70 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=commit (glob)
69 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
71 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
70 commit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
72 commit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
71 commit.b hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
73 commit.b hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
72 created new head
74 created new head
73 $ hg merge 1
75 $ hg merge 1
74 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
76 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
75 (branch merge, don't forget to commit)
77 (branch merge, don't forget to commit)
76 $ hg commit -m merge -d '2 0'
78 $ hg commit -m merge -d '2 0'
77 precommit hook: HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
79 precommit hook: HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
78 pretxnopen hook: HG_TXNNAME=commit
80 pretxnopen hook: HG_TXNNAME=commit
79 pretxncommit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd HG_PENDING=$TESTTMP/a
81 pretxncommit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd HG_PENDING=$TESTTMP/a
80 3:07f3376c1e65
82 3:07f3376c1e65
81 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=commit (glob)
83 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=commit (glob)
82 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
84 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
83 commit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
85 commit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
84 commit.b hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
86 commit.b hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
85
87
86 test generic hooks
88 test generic hooks
87
89
88 $ hg id
90 $ hg id
89 pre-identify hook: HG_ARGS=id HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None} HG_PATS=[]
91 pre-identify hook: HG_ARGS=id HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None} HG_PATS=[]
90 abort: pre-identify hook exited with status 1
92 abort: pre-identify hook exited with status 1
91 [255]
93 [255]
92 $ hg cat b
94 $ hg cat b
93 pre-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b']
95 pre-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b']
94 b
96 b
95 post-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b'] HG_RESULT=0
97 post-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b'] HG_RESULT=0
96
98
97 $ cd ../b
99 $ cd ../b
98 $ hg pull ../a
100 $ hg pull ../a
99 pulling from ../a
101 pulling from ../a
100 searching for changes
102 searching for changes
101 prechangegroup hook: HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
103 prechangegroup hook: HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
102 adding changesets
104 adding changesets
103 adding manifests
105 adding manifests
104 adding file changes
106 adding file changes
105 added 3 changesets with 2 changes to 2 files
107 added 3 changesets with 2 changes to 2 files
106 changegroup hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
108 changegroup hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
107 incoming hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
109 incoming hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
108 incoming hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
110 incoming hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
109 incoming hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
111 incoming hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
110 (run 'hg update' to get a working copy)
112 (run 'hg update' to get a working copy)
111
113
112 tag hooks can see env vars
114 tag hooks can see env vars
113
115
114 $ cd ../a
116 $ cd ../a
115 $ cat >> .hg/hgrc <<EOF
117 $ cat >> .hg/hgrc <<EOF
116 > pretag = python "$TESTDIR/printenv.py" pretag
118 > pretag = python "$TESTDIR/printenv.py" pretag
117 > tag = sh -c "HG_PARENT1= HG_PARENT2= python \"$TESTDIR/printenv.py\" tag"
119 > tag = sh -c "HG_PARENT1= HG_PARENT2= python \"$TESTDIR/printenv.py\" tag"
118 > EOF
120 > EOF
119 $ hg tag -d '3 0' a
121 $ hg tag -d '3 0' a
120 pretag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
122 pretag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
121 precommit hook: HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
123 precommit hook: HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
122 pretxnopen hook: HG_TXNNAME=commit
124 pretxnopen hook: HG_TXNNAME=commit
123 pretxncommit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PENDING=$TESTTMP/a
125 pretxncommit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PENDING=$TESTTMP/a
124 4:539e4b31b6dc
126 4:539e4b31b6dc
125 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=commit (glob)
127 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=commit (glob)
126 tag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
128 tag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
127 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
129 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
128 commit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
130 commit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
129 commit.b hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
131 commit.b hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
130 $ hg tag -l la
132 $ hg tag -l la
131 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
133 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
132 tag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
134 tag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
133
135
134 pretag hook can forbid tagging
136 pretag hook can forbid tagging
135
137
136 $ echo "pretag.forbid = python \"$TESTDIR/printenv.py\" pretag.forbid 1" >> .hg/hgrc
138 $ echo "pretag.forbid = python \"$TESTDIR/printenv.py\" pretag.forbid 1" >> .hg/hgrc
137 $ hg tag -d '4 0' fa
139 $ hg tag -d '4 0' fa
138 pretag hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
140 pretag hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
139 pretag.forbid hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
141 pretag.forbid hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
140 abort: pretag.forbid hook exited with status 1
142 abort: pretag.forbid hook exited with status 1
141 [255]
143 [255]
142 $ hg tag -l fla
144 $ hg tag -l fla
143 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
145 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
144 pretag.forbid hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
146 pretag.forbid hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
145 abort: pretag.forbid hook exited with status 1
147 abort: pretag.forbid hook exited with status 1
146 [255]
148 [255]
147
149
148 pretxncommit hook can see changeset, can roll back txn, changeset no
150 pretxncommit hook can see changeset, can roll back txn, changeset no
149 more there after
151 more there after
150
152
151 $ echo "pretxncommit.forbid0 = hg tip -q" >> .hg/hgrc
153 $ echo "pretxncommit.forbid0 = hg tip -q" >> .hg/hgrc
152 $ echo "pretxncommit.forbid1 = python \"$TESTDIR/printenv.py\" pretxncommit.forbid 1" >> .hg/hgrc
154 $ echo "pretxncommit.forbid1 = python \"$TESTDIR/printenv.py\" pretxncommit.forbid 1" >> .hg/hgrc
153 $ echo z > z
155 $ echo z > z
154 $ hg add z
156 $ hg add z
155 $ hg -q tip
157 $ hg -q tip
156 4:539e4b31b6dc
158 4:539e4b31b6dc
157 $ hg commit -m 'fail' -d '4 0'
159 $ hg commit -m 'fail' -d '4 0'
158 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
160 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
159 pretxnopen hook: HG_TXNNAME=commit
161 pretxnopen hook: HG_TXNNAME=commit
160 pretxncommit hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
162 pretxncommit hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
161 5:6f611f8018c1
163 5:6f611f8018c1
162 5:6f611f8018c1
164 5:6f611f8018c1
163 pretxncommit.forbid hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
165 pretxncommit.forbid hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
164 transaction abort!
166 transaction abort!
165 txnabort hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
167 txnabort hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
166 rollback completed
168 rollback completed
167 abort: pretxncommit.forbid1 hook exited with status 1
169 abort: pretxncommit.forbid1 hook exited with status 1
168 [255]
170 [255]
169 $ hg -q tip
171 $ hg -q tip
170 4:539e4b31b6dc
172 4:539e4b31b6dc
171
173
172 (Check that no 'changelog.i.a' file were left behind)
174 (Check that no 'changelog.i.a' file were left behind)
173
175
174 $ ls -1 .hg/store/
176 $ ls -1 .hg/store/
175 00changelog.i
177 00changelog.i
176 00manifest.i
178 00manifest.i
177 data
179 data
178 fncache
180 fncache
179 journal.phaseroots
181 journal.phaseroots
180 phaseroots
182 phaseroots
181 undo
183 undo
182 undo.backup.fncache
184 undo.backup.fncache
183 undo.backupfiles
185 undo.backupfiles
184 undo.phaseroots
186 undo.phaseroots
185
187
186
188
187 precommit hook can prevent commit
189 precommit hook can prevent commit
188
190
189 $ echo "precommit.forbid = python \"$TESTDIR/printenv.py\" precommit.forbid 1" >> .hg/hgrc
191 $ echo "precommit.forbid = python \"$TESTDIR/printenv.py\" precommit.forbid 1" >> .hg/hgrc
190 $ hg commit -m 'fail' -d '4 0'
192 $ hg commit -m 'fail' -d '4 0'
191 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
193 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
192 precommit.forbid hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
194 precommit.forbid hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
193 abort: precommit.forbid hook exited with status 1
195 abort: precommit.forbid hook exited with status 1
194 [255]
196 [255]
195 $ hg -q tip
197 $ hg -q tip
196 4:539e4b31b6dc
198 4:539e4b31b6dc
197
199
198 preupdate hook can prevent update
200 preupdate hook can prevent update
199
201
200 $ echo "preupdate = python \"$TESTDIR/printenv.py\" preupdate" >> .hg/hgrc
202 $ echo "preupdate = python \"$TESTDIR/printenv.py\" preupdate" >> .hg/hgrc
201 $ hg update 1
203 $ hg update 1
202 preupdate hook: HG_PARENT1=ab228980c14d
204 preupdate hook: HG_PARENT1=ab228980c14d
203 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
205 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
204
206
205 update hook
207 update hook
206
208
207 $ echo "update = python \"$TESTDIR/printenv.py\" update" >> .hg/hgrc
209 $ echo "update = python \"$TESTDIR/printenv.py\" update" >> .hg/hgrc
208 $ hg update
210 $ hg update
209 preupdate hook: HG_PARENT1=539e4b31b6dc
211 preupdate hook: HG_PARENT1=539e4b31b6dc
210 update hook: HG_ERROR=0 HG_PARENT1=539e4b31b6dc
212 update hook: HG_ERROR=0 HG_PARENT1=539e4b31b6dc
211 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
213 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
212
214
213 pushkey hook
215 pushkey hook
214
216
215 $ echo "pushkey = python \"$TESTDIR/printenv.py\" pushkey" >> .hg/hgrc
217 $ echo "pushkey = python \"$TESTDIR/printenv.py\" pushkey" >> .hg/hgrc
216 $ cd ../b
218 $ cd ../b
217 $ hg bookmark -r null foo
219 $ hg bookmark -r null foo
218 $ hg push -B foo ../a
220 $ hg push -B foo ../a
219 pushing to ../a
221 pushing to ../a
220 searching for changes
222 searching for changes
221 no changes found
223 no changes found
222 pretxnopen hook: HG_TXNNAME=bookmarks
224 pretxnopen hook: HG_TXNNAME=bookmarks
223 pretxnclose hook: HG_BOOKMARK_MOVED=1 HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=bookmarks (glob)
225 pretxnclose hook: HG_BOOKMARK_MOVED=1 HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=bookmarks (glob)
224 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmarks (glob)
226 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmarks (glob)
225 pushkey hook: HG_KEY=foo HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_RET=1
227 pushkey hook: HG_KEY=foo HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_RET=1
226 exporting bookmark foo
228 exporting bookmark foo
227 [1]
229 [1]
228 $ cd ../a
230 $ cd ../a
229
231
230 listkeys hook
232 listkeys hook
231
233
232 $ echo "listkeys = python \"$TESTDIR/printenv.py\" listkeys" >> .hg/hgrc
234 $ echo "listkeys = python \"$TESTDIR/printenv.py\" listkeys" >> .hg/hgrc
233 $ hg bookmark -r null bar
235 $ hg bookmark -r null bar
234 $ cd ../b
236 $ cd ../b
235 $ hg pull -B bar ../a
237 $ hg pull -B bar ../a
236 pulling from ../a
238 pulling from ../a
237 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
239 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
238 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
240 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
239 no changes found
241 no changes found
240 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
242 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
241 adding remote bookmark bar
243 adding remote bookmark bar
242 $ cd ../a
244 $ cd ../a
243
245
244 test that prepushkey can prevent incoming keys
246 test that prepushkey can prevent incoming keys
245
247
246 $ echo "prepushkey = python \"$TESTDIR/printenv.py\" prepushkey.forbid 1" >> .hg/hgrc
248 $ echo "prepushkey = python \"$TESTDIR/printenv.py\" prepushkey.forbid 1" >> .hg/hgrc
247 $ cd ../b
249 $ cd ../b
248 $ hg bookmark -r null baz
250 $ hg bookmark -r null baz
249 $ hg push -B baz ../a
251 $ hg push -B baz ../a
250 pushing to ../a
252 pushing to ../a
251 searching for changes
253 searching for changes
252 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
254 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
253 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
255 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
254 no changes found
256 no changes found
255 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
257 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
256 prepushkey.forbid hook: HG_KEY=baz HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000
258 prepushkey.forbid hook: HG_KEY=baz HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000
257 pushkey-abort: prepushkey hook exited with status 1
259 pushkey-abort: prepushkey hook exited with status 1
258 exporting bookmark baz failed!
260 exporting bookmark baz failed!
259 [1]
261 [1]
260 $ cd ../a
262 $ cd ../a
261
263
262 test that prelistkeys can prevent listing keys
264 test that prelistkeys can prevent listing keys
263
265
264 $ echo "prelistkeys = python \"$TESTDIR/printenv.py\" prelistkeys.forbid 1" >> .hg/hgrc
266 $ echo "prelistkeys = python \"$TESTDIR/printenv.py\" prelistkeys.forbid 1" >> .hg/hgrc
265 $ hg bookmark -r null quux
267 $ hg bookmark -r null quux
266 $ cd ../b
268 $ cd ../b
267 $ hg pull -B quux ../a
269 $ hg pull -B quux ../a
268 pulling from ../a
270 pulling from ../a
269 prelistkeys.forbid hook: HG_NAMESPACE=bookmarks
271 prelistkeys.forbid hook: HG_NAMESPACE=bookmarks
270 abort: prelistkeys hook exited with status 1
272 abort: prelistkeys hook exited with status 1
271 [255]
273 [255]
272 $ cd ../a
274 $ cd ../a
273 $ rm .hg/hgrc
275 $ rm .hg/hgrc
274
276
275 prechangegroup hook can prevent incoming changes
277 prechangegroup hook can prevent incoming changes
276
278
277 $ cd ../b
279 $ cd ../b
278 $ hg -q tip
280 $ hg -q tip
279 3:07f3376c1e65
281 3:07f3376c1e65
280 $ cat > .hg/hgrc <<EOF
282 $ cat > .hg/hgrc <<EOF
281 > [hooks]
283 > [hooks]
282 > prechangegroup.forbid = python "$TESTDIR/printenv.py" prechangegroup.forbid 1
284 > prechangegroup.forbid = python "$TESTDIR/printenv.py" prechangegroup.forbid 1
283 > EOF
285 > EOF
284 $ hg pull ../a
286 $ hg pull ../a
285 pulling from ../a
287 pulling from ../a
286 searching for changes
288 searching for changes
287 prechangegroup.forbid hook: HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
289 prechangegroup.forbid hook: HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
288 abort: prechangegroup.forbid hook exited with status 1
290 abort: prechangegroup.forbid hook exited with status 1
289 [255]
291 [255]
290
292
291 pretxnchangegroup hook can see incoming changes, can roll back txn,
293 pretxnchangegroup hook can see incoming changes, can roll back txn,
292 incoming changes no longer there after
294 incoming changes no longer there after
293
295
294 $ cat > .hg/hgrc <<EOF
296 $ cat > .hg/hgrc <<EOF
295 > [hooks]
297 > [hooks]
296 > pretxnchangegroup.forbid0 = hg tip -q
298 > pretxnchangegroup.forbid0 = hg tip -q
297 > pretxnchangegroup.forbid1 = python "$TESTDIR/printenv.py" pretxnchangegroup.forbid 1
299 > pretxnchangegroup.forbid1 = python "$TESTDIR/printenv.py" pretxnchangegroup.forbid 1
298 > EOF
300 > EOF
299 $ hg pull ../a
301 $ hg pull ../a
300 pulling from ../a
302 pulling from ../a
301 searching for changes
303 searching for changes
302 adding changesets
304 adding changesets
303 adding manifests
305 adding manifests
304 adding file changes
306 adding file changes
305 added 1 changesets with 1 changes to 1 files
307 added 1 changesets with 1 changes to 1 files
306 4:539e4b31b6dc
308 4:539e4b31b6dc
307 pretxnchangegroup.forbid hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
309 pretxnchangegroup.forbid hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
308 transaction abort!
310 transaction abort!
309 rollback completed
311 rollback completed
310 abort: pretxnchangegroup.forbid1 hook exited with status 1
312 abort: pretxnchangegroup.forbid1 hook exited with status 1
311 [255]
313 [255]
312 $ hg -q tip
314 $ hg -q tip
313 3:07f3376c1e65
315 3:07f3376c1e65
314
316
315 outgoing hooks can see env vars
317 outgoing hooks can see env vars
316
318
317 $ rm .hg/hgrc
319 $ rm .hg/hgrc
318 $ cat > ../a/.hg/hgrc <<EOF
320 $ cat > ../a/.hg/hgrc <<EOF
319 > [hooks]
321 > [hooks]
320 > preoutgoing = python "$TESTDIR/printenv.py" preoutgoing
322 > preoutgoing = python "$TESTDIR/printenv.py" preoutgoing
321 > outgoing = python "$TESTDIR/printenv.py" outgoing
323 > outgoing = python "$TESTDIR/printenv.py" outgoing
322 > EOF
324 > EOF
323 $ hg pull ../a
325 $ hg pull ../a
324 pulling from ../a
326 pulling from ../a
325 searching for changes
327 searching for changes
326 preoutgoing hook: HG_SOURCE=pull
328 preoutgoing hook: HG_SOURCE=pull
327 adding changesets
329 adding changesets
328 outgoing hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_SOURCE=pull
330 outgoing hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_SOURCE=pull
329 adding manifests
331 adding manifests
330 adding file changes
332 adding file changes
331 added 1 changesets with 1 changes to 1 files
333 added 1 changesets with 1 changes to 1 files
332 adding remote bookmark quux
334 adding remote bookmark quux
333 (run 'hg update' to get a working copy)
335 (run 'hg update' to get a working copy)
334 $ hg rollback
336 $ hg rollback
335 repository tip rolled back to revision 3 (undo pull)
337 repository tip rolled back to revision 3 (undo pull)
336
338
337 preoutgoing hook can prevent outgoing changes
339 preoutgoing hook can prevent outgoing changes
338
340
339 $ echo "preoutgoing.forbid = python \"$TESTDIR/printenv.py\" preoutgoing.forbid 1" >> ../a/.hg/hgrc
341 $ echo "preoutgoing.forbid = python \"$TESTDIR/printenv.py\" preoutgoing.forbid 1" >> ../a/.hg/hgrc
340 $ hg pull ../a
342 $ hg pull ../a
341 pulling from ../a
343 pulling from ../a
342 searching for changes
344 searching for changes
343 preoutgoing hook: HG_SOURCE=pull
345 preoutgoing hook: HG_SOURCE=pull
344 preoutgoing.forbid hook: HG_SOURCE=pull
346 preoutgoing.forbid hook: HG_SOURCE=pull
345 abort: preoutgoing.forbid hook exited with status 1
347 abort: preoutgoing.forbid hook exited with status 1
346 [255]
348 [255]
347
349
348 outgoing hooks work for local clones
350 outgoing hooks work for local clones
349
351
350 $ cd ..
352 $ cd ..
351 $ cat > a/.hg/hgrc <<EOF
353 $ cat > a/.hg/hgrc <<EOF
352 > [hooks]
354 > [hooks]
353 > preoutgoing = python "$TESTDIR/printenv.py" preoutgoing
355 > preoutgoing = python "$TESTDIR/printenv.py" preoutgoing
354 > outgoing = python "$TESTDIR/printenv.py" outgoing
356 > outgoing = python "$TESTDIR/printenv.py" outgoing
355 > EOF
357 > EOF
356 $ hg clone a c
358 $ hg clone a c
357 preoutgoing hook: HG_SOURCE=clone
359 preoutgoing hook: HG_SOURCE=clone
358 outgoing hook: HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
360 outgoing hook: HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
359 updating to branch default
361 updating to branch default
360 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
362 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
361 $ rm -rf c
363 $ rm -rf c
362
364
363 preoutgoing hook can prevent outgoing changes for local clones
365 preoutgoing hook can prevent outgoing changes for local clones
364
366
365 $ echo "preoutgoing.forbid = python \"$TESTDIR/printenv.py\" preoutgoing.forbid 1" >> a/.hg/hgrc
367 $ echo "preoutgoing.forbid = python \"$TESTDIR/printenv.py\" preoutgoing.forbid 1" >> a/.hg/hgrc
366 $ hg clone a zzz
368 $ hg clone a zzz
367 preoutgoing hook: HG_SOURCE=clone
369 preoutgoing hook: HG_SOURCE=clone
368 preoutgoing.forbid hook: HG_SOURCE=clone
370 preoutgoing.forbid hook: HG_SOURCE=clone
369 abort: preoutgoing.forbid hook exited with status 1
371 abort: preoutgoing.forbid hook exited with status 1
370 [255]
372 [255]
371
373
372 $ cd "$TESTTMP/b"
374 $ cd "$TESTTMP/b"
373
375
374 $ cat > hooktests.py <<EOF
376 $ cat > hooktests.py <<EOF
375 > from mercurial import util
377 > from mercurial import util
376 >
378 >
377 > uncallable = 0
379 > uncallable = 0
378 >
380 >
379 > def printargs(args):
381 > def printargs(args):
380 > args.pop('ui', None)
382 > args.pop('ui', None)
381 > args.pop('repo', None)
383 > args.pop('repo', None)
382 > a = list(args.items())
384 > a = list(args.items())
383 > a.sort()
385 > a.sort()
384 > print 'hook args:'
386 > print 'hook args:'
385 > for k, v in a:
387 > for k, v in a:
386 > print ' ', k, v
388 > print ' ', k, v
387 >
389 >
388 > def passhook(**args):
390 > def passhook(**args):
389 > printargs(args)
391 > printargs(args)
390 >
392 >
391 > def failhook(**args):
393 > def failhook(**args):
392 > printargs(args)
394 > printargs(args)
393 > return True
395 > return True
394 >
396 >
395 > class LocalException(Exception):
397 > class LocalException(Exception):
396 > pass
398 > pass
397 >
399 >
398 > def raisehook(**args):
400 > def raisehook(**args):
399 > raise LocalException('exception from hook')
401 > raise LocalException('exception from hook')
400 >
402 >
401 > def aborthook(**args):
403 > def aborthook(**args):
402 > raise util.Abort('raise abort from hook')
404 > raise util.Abort('raise abort from hook')
403 >
405 >
404 > def brokenhook(**args):
406 > def brokenhook(**args):
405 > return 1 + {}
407 > return 1 + {}
406 >
408 >
407 > def verbosehook(ui, **args):
409 > def verbosehook(ui, **args):
408 > ui.note('verbose output from hook\n')
410 > ui.note('verbose output from hook\n')
409 >
411 >
410 > def printtags(ui, repo, **args):
412 > def printtags(ui, repo, **args):
411 > print sorted(repo.tags())
413 > print sorted(repo.tags())
412 >
414 >
413 > class container:
415 > class container:
414 > unreachable = 1
416 > unreachable = 1
415 > EOF
417 > EOF
416
418
417 test python hooks
419 test python hooks
418
420
419 #if windows
421 #if windows
420 $ PYTHONPATH="$TESTTMP/b;$PYTHONPATH"
422 $ PYTHONPATH="$TESTTMP/b;$PYTHONPATH"
421 #else
423 #else
422 $ PYTHONPATH="$TESTTMP/b:$PYTHONPATH"
424 $ PYTHONPATH="$TESTTMP/b:$PYTHONPATH"
423 #endif
425 #endif
424 $ export PYTHONPATH
426 $ export PYTHONPATH
425
427
426 $ echo '[hooks]' > ../a/.hg/hgrc
428 $ echo '[hooks]' > ../a/.hg/hgrc
427 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
429 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
428 $ hg pull ../a 2>&1 | grep 'raised an exception'
430 $ hg pull ../a 2>&1 | grep 'raised an exception'
429 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
431 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
430
432
431 $ echo '[hooks]' > ../a/.hg/hgrc
433 $ echo '[hooks]' > ../a/.hg/hgrc
432 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
434 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
433 $ hg pull ../a 2>&1 | grep 'raised an exception'
435 $ hg pull ../a 2>&1 | grep 'raised an exception'
434 error: preoutgoing.raise hook raised an exception: exception from hook
436 error: preoutgoing.raise hook raised an exception: exception from hook
435
437
436 $ echo '[hooks]' > ../a/.hg/hgrc
438 $ echo '[hooks]' > ../a/.hg/hgrc
437 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
439 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
438 $ hg pull ../a
440 $ hg pull ../a
439 pulling from ../a
441 pulling from ../a
440 searching for changes
442 searching for changes
441 error: preoutgoing.abort hook failed: raise abort from hook
443 error: preoutgoing.abort hook failed: raise abort from hook
442 abort: raise abort from hook
444 abort: raise abort from hook
443 [255]
445 [255]
444
446
445 $ echo '[hooks]' > ../a/.hg/hgrc
447 $ echo '[hooks]' > ../a/.hg/hgrc
446 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
448 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
447 $ hg pull ../a
449 $ hg pull ../a
448 pulling from ../a
450 pulling from ../a
449 searching for changes
451 searching for changes
450 hook args:
452 hook args:
451 hooktype preoutgoing
453 hooktype preoutgoing
452 source pull
454 source pull
453 abort: preoutgoing.fail hook failed
455 abort: preoutgoing.fail hook failed
454 [255]
456 [255]
455
457
456 $ echo '[hooks]' > ../a/.hg/hgrc
458 $ echo '[hooks]' > ../a/.hg/hgrc
457 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
459 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
458 $ hg pull ../a
460 $ hg pull ../a
459 pulling from ../a
461 pulling from ../a
460 searching for changes
462 searching for changes
461 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
463 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
462 [255]
464 [255]
463
465
464 $ echo '[hooks]' > ../a/.hg/hgrc
466 $ echo '[hooks]' > ../a/.hg/hgrc
465 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
467 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
466 $ hg pull ../a
468 $ hg pull ../a
467 pulling from ../a
469 pulling from ../a
468 searching for changes
470 searching for changes
469 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
471 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
470 [255]
472 [255]
471
473
472 $ echo '[hooks]' > ../a/.hg/hgrc
474 $ echo '[hooks]' > ../a/.hg/hgrc
473 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
475 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
474 $ hg pull ../a
476 $ hg pull ../a
475 pulling from ../a
477 pulling from ../a
476 searching for changes
478 searching for changes
477 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
479 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
478 [255]
480 [255]
479
481
480 $ echo '[hooks]' > ../a/.hg/hgrc
482 $ echo '[hooks]' > ../a/.hg/hgrc
481 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
483 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
482 $ hg pull ../a
484 $ hg pull ../a
483 pulling from ../a
485 pulling from ../a
484 searching for changes
486 searching for changes
485 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
487 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
486 [255]
488 [255]
487
489
488 $ echo '[hooks]' > ../a/.hg/hgrc
490 $ echo '[hooks]' > ../a/.hg/hgrc
489 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
491 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
490 $ hg pull ../a
492 $ hg pull ../a
491 pulling from ../a
493 pulling from ../a
492 searching for changes
494 searching for changes
493 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
495 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
494 [255]
496 [255]
495
497
496 $ echo '[hooks]' > ../a/.hg/hgrc
498 $ echo '[hooks]' > ../a/.hg/hgrc
497 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
499 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
498 $ hg pull ../a
500 $ hg pull ../a
499 pulling from ../a
501 pulling from ../a
500 searching for changes
502 searching for changes
501 hook args:
503 hook args:
502 hooktype preoutgoing
504 hooktype preoutgoing
503 source pull
505 source pull
504 adding changesets
506 adding changesets
505 adding manifests
507 adding manifests
506 adding file changes
508 adding file changes
507 added 1 changesets with 1 changes to 1 files
509 added 1 changesets with 1 changes to 1 files
508 adding remote bookmark quux
510 adding remote bookmark quux
509 (run 'hg update' to get a working copy)
511 (run 'hg update' to get a working copy)
510
512
511 make sure --traceback works
513 make sure --traceback works
512
514
513 $ echo '[hooks]' > .hg/hgrc
515 $ echo '[hooks]' > .hg/hgrc
514 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
516 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
515
517
516 $ echo aa > a
518 $ echo aa > a
517 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
519 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
518 Traceback (most recent call last):
520 Traceback (most recent call last):
519
521
520 $ cd ..
522 $ cd ..
521 $ hg init c
523 $ hg init c
522 $ cd c
524 $ cd c
523
525
524 $ cat > hookext.py <<EOF
526 $ cat > hookext.py <<EOF
525 > def autohook(**args):
527 > def autohook(**args):
526 > print "Automatically installed hook"
528 > print "Automatically installed hook"
527 >
529 >
528 > def reposetup(ui, repo):
530 > def reposetup(ui, repo):
529 > repo.ui.setconfig("hooks", "commit.auto", autohook)
531 > repo.ui.setconfig("hooks", "commit.auto", autohook)
530 > EOF
532 > EOF
531 $ echo '[extensions]' >> .hg/hgrc
533 $ echo '[extensions]' >> .hg/hgrc
532 $ echo 'hookext = hookext.py' >> .hg/hgrc
534 $ echo 'hookext = hookext.py' >> .hg/hgrc
533
535
534 $ touch foo
536 $ touch foo
535 $ hg add foo
537 $ hg add foo
536 $ hg ci -d '0 0' -m 'add foo'
538 $ hg ci -d '0 0' -m 'add foo'
537 Automatically installed hook
539 Automatically installed hook
538 $ echo >> foo
540 $ echo >> foo
539 $ hg ci --debug -d '0 0' -m 'change foo'
541 $ hg ci --debug -d '0 0' -m 'change foo'
540 committing files:
542 committing files:
541 foo
543 foo
542 committing manifest
544 committing manifest
543 committing changelog
545 committing changelog
544 calling hook commit.auto: hgext_hookext.autohook
546 calling hook commit.auto: hgext_hookext.autohook
545 Automatically installed hook
547 Automatically installed hook
546 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
548 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
547
549
548 $ hg showconfig hooks
550 $ hg showconfig hooks
549 hooks.commit.auto=<function autohook at *> (glob)
551 hooks.commit.auto=<function autohook at *> (glob)
550
552
551 test python hook configured with python:[file]:[hook] syntax
553 test python hook configured with python:[file]:[hook] syntax
552
554
553 $ cd ..
555 $ cd ..
554 $ mkdir d
556 $ mkdir d
555 $ cd d
557 $ cd d
556 $ hg init repo
558 $ hg init repo
557 $ mkdir hooks
559 $ mkdir hooks
558
560
559 $ cd hooks
561 $ cd hooks
560 $ cat > testhooks.py <<EOF
562 $ cat > testhooks.py <<EOF
561 > def testhook(**args):
563 > def testhook(**args):
562 > print 'hook works'
564 > print 'hook works'
563 > EOF
565 > EOF
564 $ echo '[hooks]' > ../repo/.hg/hgrc
566 $ echo '[hooks]' > ../repo/.hg/hgrc
565 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
567 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
566
568
567 $ cd ../repo
569 $ cd ../repo
568 $ hg commit -d '0 0'
570 $ hg commit -d '0 0'
569 hook works
571 hook works
570 nothing changed
572 nothing changed
571 [1]
573 [1]
572
574
573 $ echo '[hooks]' > .hg/hgrc
575 $ echo '[hooks]' > .hg/hgrc
574 $ echo "update.ne = python:`pwd`/nonexistent.py:testhook" >> .hg/hgrc
576 $ echo "update.ne = python:`pwd`/nonexistent.py:testhook" >> .hg/hgrc
575 $ echo "pre-identify.npmd = python:`pwd`/:no_python_module_dir" >> .hg/hgrc
577 $ echo "pre-identify.npmd = python:`pwd`/:no_python_module_dir" >> .hg/hgrc
576
578
577 $ hg up null
579 $ hg up null
578 loading update.ne hook failed:
580 loading update.ne hook failed:
579 abort: No such file or directory: $TESTTMP/d/repo/nonexistent.py
581 abort: No such file or directory: $TESTTMP/d/repo/nonexistent.py
580 [255]
582 [255]
581
583
582 $ hg id
584 $ hg id
583 loading pre-identify.npmd hook failed:
585 loading pre-identify.npmd hook failed:
584 abort: No module named repo!
586 abort: No module named repo!
585 [255]
587 [255]
586
588
587 $ cd ../../b
589 $ cd ../../b
588
590
589 make sure --traceback works on hook import failure
591 make sure --traceback works on hook import failure
590
592
591 $ cat > importfail.py <<EOF
593 $ cat > importfail.py <<EOF
592 > import somebogusmodule
594 > import somebogusmodule
593 > # dereference something in the module to force demandimport to load it
595 > # dereference something in the module to force demandimport to load it
594 > somebogusmodule.whatever
596 > somebogusmodule.whatever
595 > EOF
597 > EOF
596
598
597 $ echo '[hooks]' > .hg/hgrc
599 $ echo '[hooks]' > .hg/hgrc
598 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
600 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
599
601
600 $ echo a >> a
602 $ echo a >> a
601 $ hg --traceback commit -ma 2>&1 | egrep -v '^( +File| [a-zA-Z(])'
603 $ hg --traceback commit -ma 2>&1 | egrep -v '^( +File| [a-zA-Z(])'
602 exception from first failed import attempt:
604 exception from first failed import attempt:
603 Traceback (most recent call last):
605 Traceback (most recent call last):
604 ImportError: No module named somebogusmodule
606 ImportError: No module named somebogusmodule
605 exception from second failed import attempt:
607 exception from second failed import attempt:
606 Traceback (most recent call last):
608 Traceback (most recent call last):
607 ImportError: No module named hgext_importfail
609 ImportError: No module named hgext_importfail
608 Traceback (most recent call last):
610 Traceback (most recent call last):
609 Abort: precommit.importfail hook is invalid (import of "importfail" failed)
611 Abort: precommit.importfail hook is invalid (import of "importfail" failed)
610 abort: precommit.importfail hook is invalid (import of "importfail" failed)
612 abort: precommit.importfail hook is invalid (import of "importfail" failed)
611
613
612 Issue1827: Hooks Update & Commit not completely post operation
614 Issue1827: Hooks Update & Commit not completely post operation
613
615
614 commit and update hooks should run after command completion
616 commit and update hooks should run after command completion
615
617
616 $ echo '[hooks]' > .hg/hgrc
618 $ echo '[hooks]' > .hg/hgrc
617 $ echo 'commit = hg id' >> .hg/hgrc
619 $ echo 'commit = hg id' >> .hg/hgrc
618 $ echo 'update = hg id' >> .hg/hgrc
620 $ echo 'update = hg id' >> .hg/hgrc
619 $ echo bb > a
621 $ echo bb > a
620 $ hg ci -ma
622 $ hg ci -ma
621 223eafe2750c tip
623 223eafe2750c tip
622 $ hg up 0
624 $ hg up 0
623 cb9a9f314b8b
625 cb9a9f314b8b
624 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
626 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
625
627
626 make sure --verbose (and --quiet/--debug etc.) are propagated to the local ui
628 make sure --verbose (and --quiet/--debug etc.) are propagated to the local ui
627 that is passed to pre/post hooks
629 that is passed to pre/post hooks
628
630
629 $ echo '[hooks]' > .hg/hgrc
631 $ echo '[hooks]' > .hg/hgrc
630 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
632 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
631 $ hg id
633 $ hg id
632 cb9a9f314b8b
634 cb9a9f314b8b
633 $ hg id --verbose
635 $ hg id --verbose
634 calling hook pre-identify: hooktests.verbosehook
636 calling hook pre-identify: hooktests.verbosehook
635 verbose output from hook
637 verbose output from hook
636 cb9a9f314b8b
638 cb9a9f314b8b
637
639
638 Ensure hooks can be prioritized
640 Ensure hooks can be prioritized
639
641
640 $ echo '[hooks]' > .hg/hgrc
642 $ echo '[hooks]' > .hg/hgrc
641 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
643 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
642 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
644 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
643 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
645 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
644 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
646 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
645 $ hg id --verbose
647 $ hg id --verbose
646 calling hook pre-identify.b: hooktests.verbosehook
648 calling hook pre-identify.b: hooktests.verbosehook
647 verbose output from hook
649 verbose output from hook
648 calling hook pre-identify.a: hooktests.verbosehook
650 calling hook pre-identify.a: hooktests.verbosehook
649 verbose output from hook
651 verbose output from hook
650 calling hook pre-identify.c: hooktests.verbosehook
652 calling hook pre-identify.c: hooktests.verbosehook
651 verbose output from hook
653 verbose output from hook
652 cb9a9f314b8b
654 cb9a9f314b8b
653
655
654 new tags must be visible in pretxncommit (issue3210)
656 new tags must be visible in pretxncommit (issue3210)
655
657
656 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
658 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
657 $ hg tag -f foo
659 $ hg tag -f foo
658 ['a', 'foo', 'tip']
660 ['a', 'foo', 'tip']
659
661
660 new commits must be visible in pretxnchangegroup (issue3428)
662 new commits must be visible in pretxnchangegroup (issue3428)
661
663
662 $ cd ..
664 $ cd ..
663 $ hg init to
665 $ hg init to
664 $ echo '[hooks]' >> to/.hg/hgrc
666 $ echo '[hooks]' >> to/.hg/hgrc
665 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
667 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
666 $ echo a >> to/a
668 $ echo a >> to/a
667 $ hg --cwd to ci -Ama
669 $ hg --cwd to ci -Ama
668 adding a
670 adding a
669 $ hg clone to from
671 $ hg clone to from
670 updating to branch default
672 updating to branch default
671 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
673 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
672 $ echo aa >> from/a
674 $ echo aa >> from/a
673 $ hg --cwd from ci -mb
675 $ hg --cwd from ci -mb
674 $ hg --cwd from push
676 $ hg --cwd from push
675 pushing to $TESTTMP/to (glob)
677 pushing to $TESTTMP/to (glob)
676 searching for changes
678 searching for changes
677 adding changesets
679 adding changesets
678 adding manifests
680 adding manifests
679 adding file changes
681 adding file changes
680 added 1 changesets with 1 changes to 1 files
682 added 1 changesets with 1 changes to 1 files
681 changeset: 1:9836a07b9b9d
683 changeset: 1:9836a07b9b9d
682 tag: tip
684 tag: tip
683 user: test
685 user: test
684 date: Thu Jan 01 00:00:00 1970 +0000
686 date: Thu Jan 01 00:00:00 1970 +0000
685 summary: b
687 summary: b
686
688
General Comments 0
You need to be logged in to leave comments. Login now