##// END OF EJS Templates
tag: invalidate tag cache immediately after adding new tag (issue3210)...
Mads Kiilerich -
r15929:4091660d default
parent child Browse files
Show More
@@ -1,2252 +1,2254
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39 self._dirtyphases = False
39 self._dirtyphases = False
40 # A list of callback to shape the phase if no data were found.
40 # A list of callback to shape the phase if no data were found.
41 # Callback are in the form: func(repo, roots) --> processed root.
41 # Callback are in the form: func(repo, roots) --> processed root.
42 # This list it to be filled by extension during repo setup
42 # This list it to be filled by extension during repo setup
43 self._phasedefaults = []
43 self._phasedefaults = []
44
44
45 try:
45 try:
46 self.ui.readconfig(self.join("hgrc"), self.root)
46 self.ui.readconfig(self.join("hgrc"), self.root)
47 extensions.loadall(self.ui)
47 extensions.loadall(self.ui)
48 except IOError:
48 except IOError:
49 pass
49 pass
50
50
51 if not os.path.isdir(self.path):
51 if not os.path.isdir(self.path):
52 if create:
52 if create:
53 if not os.path.exists(path):
53 if not os.path.exists(path):
54 util.makedirs(path)
54 util.makedirs(path)
55 util.makedir(self.path, notindexed=True)
55 util.makedir(self.path, notindexed=True)
56 requirements = ["revlogv1"]
56 requirements = ["revlogv1"]
57 if self.ui.configbool('format', 'usestore', True):
57 if self.ui.configbool('format', 'usestore', True):
58 os.mkdir(os.path.join(self.path, "store"))
58 os.mkdir(os.path.join(self.path, "store"))
59 requirements.append("store")
59 requirements.append("store")
60 if self.ui.configbool('format', 'usefncache', True):
60 if self.ui.configbool('format', 'usefncache', True):
61 requirements.append("fncache")
61 requirements.append("fncache")
62 if self.ui.configbool('format', 'dotencode', True):
62 if self.ui.configbool('format', 'dotencode', True):
63 requirements.append('dotencode')
63 requirements.append('dotencode')
64 # create an invalid changelog
64 # create an invalid changelog
65 self.opener.append(
65 self.opener.append(
66 "00changelog.i",
66 "00changelog.i",
67 '\0\0\0\2' # represents revlogv2
67 '\0\0\0\2' # represents revlogv2
68 ' dummy changelog to prevent using the old repo layout'
68 ' dummy changelog to prevent using the old repo layout'
69 )
69 )
70 if self.ui.configbool('format', 'generaldelta', False):
70 if self.ui.configbool('format', 'generaldelta', False):
71 requirements.append("generaldelta")
71 requirements.append("generaldelta")
72 requirements = set(requirements)
72 requirements = set(requirements)
73 else:
73 else:
74 raise error.RepoError(_("repository %s not found") % path)
74 raise error.RepoError(_("repository %s not found") % path)
75 elif create:
75 elif create:
76 raise error.RepoError(_("repository %s already exists") % path)
76 raise error.RepoError(_("repository %s already exists") % path)
77 else:
77 else:
78 try:
78 try:
79 requirements = scmutil.readrequires(self.opener, self.supported)
79 requirements = scmutil.readrequires(self.opener, self.supported)
80 except IOError, inst:
80 except IOError, inst:
81 if inst.errno != errno.ENOENT:
81 if inst.errno != errno.ENOENT:
82 raise
82 raise
83 requirements = set()
83 requirements = set()
84
84
85 self.sharedpath = self.path
85 self.sharedpath = self.path
86 try:
86 try:
87 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
87 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
88 if not os.path.exists(s):
88 if not os.path.exists(s):
89 raise error.RepoError(
89 raise error.RepoError(
90 _('.hg/sharedpath points to nonexistent directory %s') % s)
90 _('.hg/sharedpath points to nonexistent directory %s') % s)
91 self.sharedpath = s
91 self.sharedpath = s
92 except IOError, inst:
92 except IOError, inst:
93 if inst.errno != errno.ENOENT:
93 if inst.errno != errno.ENOENT:
94 raise
94 raise
95
95
96 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
96 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
97 self.spath = self.store.path
97 self.spath = self.store.path
98 self.sopener = self.store.opener
98 self.sopener = self.store.opener
99 self.sjoin = self.store.join
99 self.sjoin = self.store.join
100 self.opener.createmode = self.store.createmode
100 self.opener.createmode = self.store.createmode
101 self._applyrequirements(requirements)
101 self._applyrequirements(requirements)
102 if create:
102 if create:
103 self._writerequirements()
103 self._writerequirements()
104
104
105
105
106 self._branchcache = None
106 self._branchcache = None
107 self._branchcachetip = None
107 self._branchcachetip = None
108 self.filterpats = {}
108 self.filterpats = {}
109 self._datafilters = {}
109 self._datafilters = {}
110 self._transref = self._lockref = self._wlockref = None
110 self._transref = self._lockref = self._wlockref = None
111
111
112 # A cache for various files under .hg/ that tracks file changes,
112 # A cache for various files under .hg/ that tracks file changes,
113 # (used by the filecache decorator)
113 # (used by the filecache decorator)
114 #
114 #
115 # Maps a property name to its util.filecacheentry
115 # Maps a property name to its util.filecacheentry
116 self._filecache = {}
116 self._filecache = {}
117
117
118 def _applyrequirements(self, requirements):
118 def _applyrequirements(self, requirements):
119 self.requirements = requirements
119 self.requirements = requirements
120 openerreqs = set(('revlogv1', 'generaldelta'))
120 openerreqs = set(('revlogv1', 'generaldelta'))
121 self.sopener.options = dict((r, 1) for r in requirements
121 self.sopener.options = dict((r, 1) for r in requirements
122 if r in openerreqs)
122 if r in openerreqs)
123
123
124 def _writerequirements(self):
124 def _writerequirements(self):
125 reqfile = self.opener("requires", "w")
125 reqfile = self.opener("requires", "w")
126 for r in self.requirements:
126 for r in self.requirements:
127 reqfile.write("%s\n" % r)
127 reqfile.write("%s\n" % r)
128 reqfile.close()
128 reqfile.close()
129
129
130 def _checknested(self, path):
130 def _checknested(self, path):
131 """Determine if path is a legal nested repository."""
131 """Determine if path is a legal nested repository."""
132 if not path.startswith(self.root):
132 if not path.startswith(self.root):
133 return False
133 return False
134 subpath = path[len(self.root) + 1:]
134 subpath = path[len(self.root) + 1:]
135 normsubpath = util.pconvert(subpath)
135 normsubpath = util.pconvert(subpath)
136
136
137 # XXX: Checking against the current working copy is wrong in
137 # XXX: Checking against the current working copy is wrong in
138 # the sense that it can reject things like
138 # the sense that it can reject things like
139 #
139 #
140 # $ hg cat -r 10 sub/x.txt
140 # $ hg cat -r 10 sub/x.txt
141 #
141 #
142 # if sub/ is no longer a subrepository in the working copy
142 # if sub/ is no longer a subrepository in the working copy
143 # parent revision.
143 # parent revision.
144 #
144 #
145 # However, it can of course also allow things that would have
145 # However, it can of course also allow things that would have
146 # been rejected before, such as the above cat command if sub/
146 # been rejected before, such as the above cat command if sub/
147 # is a subrepository now, but was a normal directory before.
147 # is a subrepository now, but was a normal directory before.
148 # The old path auditor would have rejected by mistake since it
148 # The old path auditor would have rejected by mistake since it
149 # panics when it sees sub/.hg/.
149 # panics when it sees sub/.hg/.
150 #
150 #
151 # All in all, checking against the working copy seems sensible
151 # All in all, checking against the working copy seems sensible
152 # since we want to prevent access to nested repositories on
152 # since we want to prevent access to nested repositories on
153 # the filesystem *now*.
153 # the filesystem *now*.
154 ctx = self[None]
154 ctx = self[None]
155 parts = util.splitpath(subpath)
155 parts = util.splitpath(subpath)
156 while parts:
156 while parts:
157 prefix = '/'.join(parts)
157 prefix = '/'.join(parts)
158 if prefix in ctx.substate:
158 if prefix in ctx.substate:
159 if prefix == normsubpath:
159 if prefix == normsubpath:
160 return True
160 return True
161 else:
161 else:
162 sub = ctx.sub(prefix)
162 sub = ctx.sub(prefix)
163 return sub.checknested(subpath[len(prefix) + 1:])
163 return sub.checknested(subpath[len(prefix) + 1:])
164 else:
164 else:
165 parts.pop()
165 parts.pop()
166 return False
166 return False
167
167
168 @filecache('bookmarks')
168 @filecache('bookmarks')
169 def _bookmarks(self):
169 def _bookmarks(self):
170 return bookmarks.read(self)
170 return bookmarks.read(self)
171
171
172 @filecache('bookmarks.current')
172 @filecache('bookmarks.current')
173 def _bookmarkcurrent(self):
173 def _bookmarkcurrent(self):
174 return bookmarks.readcurrent(self)
174 return bookmarks.readcurrent(self)
175
175
176 def _writebookmarks(self, marks):
176 def _writebookmarks(self, marks):
177 bookmarks.write(self)
177 bookmarks.write(self)
178
178
179 @filecache('phaseroots')
179 @filecache('phaseroots')
180 def _phaseroots(self):
180 def _phaseroots(self):
181 self._dirtyphases = False
181 self._dirtyphases = False
182 phaseroots = phases.readroots(self)
182 phaseroots = phases.readroots(self)
183 phases.filterunknown(self, phaseroots)
183 phases.filterunknown(self, phaseroots)
184 return phaseroots
184 return phaseroots
185
185
186 @propertycache
186 @propertycache
187 def _phaserev(self):
187 def _phaserev(self):
188 cache = [phases.public] * len(self)
188 cache = [phases.public] * len(self)
189 for phase in phases.trackedphases:
189 for phase in phases.trackedphases:
190 roots = map(self.changelog.rev, self._phaseroots[phase])
190 roots = map(self.changelog.rev, self._phaseroots[phase])
191 if roots:
191 if roots:
192 for rev in roots:
192 for rev in roots:
193 cache[rev] = phase
193 cache[rev] = phase
194 for rev in self.changelog.descendants(*roots):
194 for rev in self.changelog.descendants(*roots):
195 cache[rev] = phase
195 cache[rev] = phase
196 return cache
196 return cache
197
197
198 @filecache('00changelog.i', True)
198 @filecache('00changelog.i', True)
199 def changelog(self):
199 def changelog(self):
200 c = changelog.changelog(self.sopener)
200 c = changelog.changelog(self.sopener)
201 if 'HG_PENDING' in os.environ:
201 if 'HG_PENDING' in os.environ:
202 p = os.environ['HG_PENDING']
202 p = os.environ['HG_PENDING']
203 if p.startswith(self.root):
203 if p.startswith(self.root):
204 c.readpending('00changelog.i.a')
204 c.readpending('00changelog.i.a')
205 return c
205 return c
206
206
207 @filecache('00manifest.i', True)
207 @filecache('00manifest.i', True)
208 def manifest(self):
208 def manifest(self):
209 return manifest.manifest(self.sopener)
209 return manifest.manifest(self.sopener)
210
210
211 @filecache('dirstate')
211 @filecache('dirstate')
212 def dirstate(self):
212 def dirstate(self):
213 warned = [0]
213 warned = [0]
214 def validate(node):
214 def validate(node):
215 try:
215 try:
216 self.changelog.rev(node)
216 self.changelog.rev(node)
217 return node
217 return node
218 except error.LookupError:
218 except error.LookupError:
219 if not warned[0]:
219 if not warned[0]:
220 warned[0] = True
220 warned[0] = True
221 self.ui.warn(_("warning: ignoring unknown"
221 self.ui.warn(_("warning: ignoring unknown"
222 " working parent %s!\n") % short(node))
222 " working parent %s!\n") % short(node))
223 return nullid
223 return nullid
224
224
225 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
225 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
226
226
227 def __getitem__(self, changeid):
227 def __getitem__(self, changeid):
228 if changeid is None:
228 if changeid is None:
229 return context.workingctx(self)
229 return context.workingctx(self)
230 return context.changectx(self, changeid)
230 return context.changectx(self, changeid)
231
231
232 def __contains__(self, changeid):
232 def __contains__(self, changeid):
233 try:
233 try:
234 return bool(self.lookup(changeid))
234 return bool(self.lookup(changeid))
235 except error.RepoLookupError:
235 except error.RepoLookupError:
236 return False
236 return False
237
237
238 def __nonzero__(self):
238 def __nonzero__(self):
239 return True
239 return True
240
240
241 def __len__(self):
241 def __len__(self):
242 return len(self.changelog)
242 return len(self.changelog)
243
243
244 def __iter__(self):
244 def __iter__(self):
245 for i in xrange(len(self)):
245 for i in xrange(len(self)):
246 yield i
246 yield i
247
247
248 def revs(self, expr, *args):
248 def revs(self, expr, *args):
249 '''Return a list of revisions matching the given revset'''
249 '''Return a list of revisions matching the given revset'''
250 expr = revset.formatspec(expr, *args)
250 expr = revset.formatspec(expr, *args)
251 m = revset.match(None, expr)
251 m = revset.match(None, expr)
252 return [r for r in m(self, range(len(self)))]
252 return [r for r in m(self, range(len(self)))]
253
253
254 def set(self, expr, *args):
254 def set(self, expr, *args):
255 '''
255 '''
256 Yield a context for each matching revision, after doing arg
256 Yield a context for each matching revision, after doing arg
257 replacement via revset.formatspec
257 replacement via revset.formatspec
258 '''
258 '''
259 for r in self.revs(expr, *args):
259 for r in self.revs(expr, *args):
260 yield self[r]
260 yield self[r]
261
261
262 def url(self):
262 def url(self):
263 return 'file:' + self.root
263 return 'file:' + self.root
264
264
265 def hook(self, name, throw=False, **args):
265 def hook(self, name, throw=False, **args):
266 return hook.hook(self.ui, self, name, throw, **args)
266 return hook.hook(self.ui, self, name, throw, **args)
267
267
268 tag_disallowed = ':\r\n'
268 tag_disallowed = ':\r\n'
269
269
270 def _tag(self, names, node, message, local, user, date, extra={}):
270 def _tag(self, names, node, message, local, user, date, extra={}):
271 if isinstance(names, str):
271 if isinstance(names, str):
272 allchars = names
272 allchars = names
273 names = (names,)
273 names = (names,)
274 else:
274 else:
275 allchars = ''.join(names)
275 allchars = ''.join(names)
276 for c in self.tag_disallowed:
276 for c in self.tag_disallowed:
277 if c in allchars:
277 if c in allchars:
278 raise util.Abort(_('%r cannot be used in a tag name') % c)
278 raise util.Abort(_('%r cannot be used in a tag name') % c)
279
279
280 branches = self.branchmap()
280 branches = self.branchmap()
281 for name in names:
281 for name in names:
282 self.hook('pretag', throw=True, node=hex(node), tag=name,
282 self.hook('pretag', throw=True, node=hex(node), tag=name,
283 local=local)
283 local=local)
284 if name in branches:
284 if name in branches:
285 self.ui.warn(_("warning: tag %s conflicts with existing"
285 self.ui.warn(_("warning: tag %s conflicts with existing"
286 " branch name\n") % name)
286 " branch name\n") % name)
287
287
288 def writetags(fp, names, munge, prevtags):
288 def writetags(fp, names, munge, prevtags):
289 fp.seek(0, 2)
289 fp.seek(0, 2)
290 if prevtags and prevtags[-1] != '\n':
290 if prevtags and prevtags[-1] != '\n':
291 fp.write('\n')
291 fp.write('\n')
292 for name in names:
292 for name in names:
293 m = munge and munge(name) or name
293 m = munge and munge(name) or name
294 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
294 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
295 old = self.tags().get(name, nullid)
295 old = self.tags().get(name, nullid)
296 fp.write('%s %s\n' % (hex(old), m))
296 fp.write('%s %s\n' % (hex(old), m))
297 fp.write('%s %s\n' % (hex(node), m))
297 fp.write('%s %s\n' % (hex(node), m))
298 fp.close()
298 fp.close()
299
299
300 prevtags = ''
300 prevtags = ''
301 if local:
301 if local:
302 try:
302 try:
303 fp = self.opener('localtags', 'r+')
303 fp = self.opener('localtags', 'r+')
304 except IOError:
304 except IOError:
305 fp = self.opener('localtags', 'a')
305 fp = self.opener('localtags', 'a')
306 else:
306 else:
307 prevtags = fp.read()
307 prevtags = fp.read()
308
308
309 # local tags are stored in the current charset
309 # local tags are stored in the current charset
310 writetags(fp, names, None, prevtags)
310 writetags(fp, names, None, prevtags)
311 for name in names:
311 for name in names:
312 self.hook('tag', node=hex(node), tag=name, local=local)
312 self.hook('tag', node=hex(node), tag=name, local=local)
313 return
313 return
314
314
315 try:
315 try:
316 fp = self.wfile('.hgtags', 'rb+')
316 fp = self.wfile('.hgtags', 'rb+')
317 except IOError, e:
317 except IOError, e:
318 if e.errno != errno.ENOENT:
318 if e.errno != errno.ENOENT:
319 raise
319 raise
320 fp = self.wfile('.hgtags', 'ab')
320 fp = self.wfile('.hgtags', 'ab')
321 else:
321 else:
322 prevtags = fp.read()
322 prevtags = fp.read()
323
323
324 # committed tags are stored in UTF-8
324 # committed tags are stored in UTF-8
325 writetags(fp, names, encoding.fromlocal, prevtags)
325 writetags(fp, names, encoding.fromlocal, prevtags)
326
326
327 fp.close()
327 fp.close()
328
328
329 self.invalidatecaches()
330
329 if '.hgtags' not in self.dirstate:
331 if '.hgtags' not in self.dirstate:
330 self[None].add(['.hgtags'])
332 self[None].add(['.hgtags'])
331
333
332 m = matchmod.exact(self.root, '', ['.hgtags'])
334 m = matchmod.exact(self.root, '', ['.hgtags'])
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
335 tagnode = self.commit(message, user, date, extra=extra, match=m)
334
336
335 for name in names:
337 for name in names:
336 self.hook('tag', node=hex(node), tag=name, local=local)
338 self.hook('tag', node=hex(node), tag=name, local=local)
337
339
338 return tagnode
340 return tagnode
339
341
340 def tag(self, names, node, message, local, user, date):
342 def tag(self, names, node, message, local, user, date):
341 '''tag a revision with one or more symbolic names.
343 '''tag a revision with one or more symbolic names.
342
344
343 names is a list of strings or, when adding a single tag, names may be a
345 names is a list of strings or, when adding a single tag, names may be a
344 string.
346 string.
345
347
346 if local is True, the tags are stored in a per-repository file.
348 if local is True, the tags are stored in a per-repository file.
347 otherwise, they are stored in the .hgtags file, and a new
349 otherwise, they are stored in the .hgtags file, and a new
348 changeset is committed with the change.
350 changeset is committed with the change.
349
351
350 keyword arguments:
352 keyword arguments:
351
353
352 local: whether to store tags in non-version-controlled file
354 local: whether to store tags in non-version-controlled file
353 (default False)
355 (default False)
354
356
355 message: commit message to use if committing
357 message: commit message to use if committing
356
358
357 user: name of user to use if committing
359 user: name of user to use if committing
358
360
359 date: date tuple to use if committing'''
361 date: date tuple to use if committing'''
360
362
361 if not local:
363 if not local:
362 for x in self.status()[:5]:
364 for x in self.status()[:5]:
363 if '.hgtags' in x:
365 if '.hgtags' in x:
364 raise util.Abort(_('working copy of .hgtags is changed '
366 raise util.Abort(_('working copy of .hgtags is changed '
365 '(please commit .hgtags manually)'))
367 '(please commit .hgtags manually)'))
366
368
367 self.tags() # instantiate the cache
369 self.tags() # instantiate the cache
368 self._tag(names, node, message, local, user, date)
370 self._tag(names, node, message, local, user, date)
369
371
370 @propertycache
372 @propertycache
371 def _tagscache(self):
373 def _tagscache(self):
372 '''Returns a tagscache object that contains various tags related caches.'''
374 '''Returns a tagscache object that contains various tags related caches.'''
373
375
374 # This simplifies its cache management by having one decorated
376 # This simplifies its cache management by having one decorated
375 # function (this one) and the rest simply fetch things from it.
377 # function (this one) and the rest simply fetch things from it.
376 class tagscache(object):
378 class tagscache(object):
377 def __init__(self):
379 def __init__(self):
378 # These two define the set of tags for this repository. tags
380 # These two define the set of tags for this repository. tags
379 # maps tag name to node; tagtypes maps tag name to 'global' or
381 # maps tag name to node; tagtypes maps tag name to 'global' or
380 # 'local'. (Global tags are defined by .hgtags across all
382 # 'local'. (Global tags are defined by .hgtags across all
381 # heads, and local tags are defined in .hg/localtags.)
383 # heads, and local tags are defined in .hg/localtags.)
382 # They constitute the in-memory cache of tags.
384 # They constitute the in-memory cache of tags.
383 self.tags = self.tagtypes = None
385 self.tags = self.tagtypes = None
384
386
385 self.nodetagscache = self.tagslist = None
387 self.nodetagscache = self.tagslist = None
386
388
387 cache = tagscache()
389 cache = tagscache()
388 cache.tags, cache.tagtypes = self._findtags()
390 cache.tags, cache.tagtypes = self._findtags()
389
391
390 return cache
392 return cache
391
393
392 def tags(self):
394 def tags(self):
393 '''return a mapping of tag to node'''
395 '''return a mapping of tag to node'''
394 return self._tagscache.tags
396 return self._tagscache.tags
395
397
396 def _findtags(self):
398 def _findtags(self):
397 '''Do the hard work of finding tags. Return a pair of dicts
399 '''Do the hard work of finding tags. Return a pair of dicts
398 (tags, tagtypes) where tags maps tag name to node, and tagtypes
400 (tags, tagtypes) where tags maps tag name to node, and tagtypes
399 maps tag name to a string like \'global\' or \'local\'.
401 maps tag name to a string like \'global\' or \'local\'.
400 Subclasses or extensions are free to add their own tags, but
402 Subclasses or extensions are free to add their own tags, but
401 should be aware that the returned dicts will be retained for the
403 should be aware that the returned dicts will be retained for the
402 duration of the localrepo object.'''
404 duration of the localrepo object.'''
403
405
404 # XXX what tagtype should subclasses/extensions use? Currently
406 # XXX what tagtype should subclasses/extensions use? Currently
405 # mq and bookmarks add tags, but do not set the tagtype at all.
407 # mq and bookmarks add tags, but do not set the tagtype at all.
406 # Should each extension invent its own tag type? Should there
408 # Should each extension invent its own tag type? Should there
407 # be one tagtype for all such "virtual" tags? Or is the status
409 # be one tagtype for all such "virtual" tags? Or is the status
408 # quo fine?
410 # quo fine?
409
411
410 alltags = {} # map tag name to (node, hist)
412 alltags = {} # map tag name to (node, hist)
411 tagtypes = {}
413 tagtypes = {}
412
414
413 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
415 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
414 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
416 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
415
417
416 # Build the return dicts. Have to re-encode tag names because
418 # Build the return dicts. Have to re-encode tag names because
417 # the tags module always uses UTF-8 (in order not to lose info
419 # the tags module always uses UTF-8 (in order not to lose info
418 # writing to the cache), but the rest of Mercurial wants them in
420 # writing to the cache), but the rest of Mercurial wants them in
419 # local encoding.
421 # local encoding.
420 tags = {}
422 tags = {}
421 for (name, (node, hist)) in alltags.iteritems():
423 for (name, (node, hist)) in alltags.iteritems():
422 if node != nullid:
424 if node != nullid:
423 try:
425 try:
424 # ignore tags to unknown nodes
426 # ignore tags to unknown nodes
425 self.changelog.lookup(node)
427 self.changelog.lookup(node)
426 tags[encoding.tolocal(name)] = node
428 tags[encoding.tolocal(name)] = node
427 except error.LookupError:
429 except error.LookupError:
428 pass
430 pass
429 tags['tip'] = self.changelog.tip()
431 tags['tip'] = self.changelog.tip()
430 tagtypes = dict([(encoding.tolocal(name), value)
432 tagtypes = dict([(encoding.tolocal(name), value)
431 for (name, value) in tagtypes.iteritems()])
433 for (name, value) in tagtypes.iteritems()])
432 return (tags, tagtypes)
434 return (tags, tagtypes)
433
435
434 def tagtype(self, tagname):
436 def tagtype(self, tagname):
435 '''
437 '''
436 return the type of the given tag. result can be:
438 return the type of the given tag. result can be:
437
439
438 'local' : a local tag
440 'local' : a local tag
439 'global' : a global tag
441 'global' : a global tag
440 None : tag does not exist
442 None : tag does not exist
441 '''
443 '''
442
444
443 return self._tagscache.tagtypes.get(tagname)
445 return self._tagscache.tagtypes.get(tagname)
444
446
445 def tagslist(self):
447 def tagslist(self):
446 '''return a list of tags ordered by revision'''
448 '''return a list of tags ordered by revision'''
447 if not self._tagscache.tagslist:
449 if not self._tagscache.tagslist:
448 l = []
450 l = []
449 for t, n in self.tags().iteritems():
451 for t, n in self.tags().iteritems():
450 r = self.changelog.rev(n)
452 r = self.changelog.rev(n)
451 l.append((r, t, n))
453 l.append((r, t, n))
452 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
453
455
454 return self._tagscache.tagslist
456 return self._tagscache.tagslist
455
457
456 def nodetags(self, node):
458 def nodetags(self, node):
457 '''return the tags associated with a node'''
459 '''return the tags associated with a node'''
458 if not self._tagscache.nodetagscache:
460 if not self._tagscache.nodetagscache:
459 nodetagscache = {}
461 nodetagscache = {}
460 for t, n in self.tags().iteritems():
462 for t, n in self.tags().iteritems():
461 nodetagscache.setdefault(n, []).append(t)
463 nodetagscache.setdefault(n, []).append(t)
462 for tags in nodetagscache.itervalues():
464 for tags in nodetagscache.itervalues():
463 tags.sort()
465 tags.sort()
464 self._tagscache.nodetagscache = nodetagscache
466 self._tagscache.nodetagscache = nodetagscache
465 return self._tagscache.nodetagscache.get(node, [])
467 return self._tagscache.nodetagscache.get(node, [])
466
468
467 def nodebookmarks(self, node):
469 def nodebookmarks(self, node):
468 marks = []
470 marks = []
469 for bookmark, n in self._bookmarks.iteritems():
471 for bookmark, n in self._bookmarks.iteritems():
470 if n == node:
472 if n == node:
471 marks.append(bookmark)
473 marks.append(bookmark)
472 return sorted(marks)
474 return sorted(marks)
473
475
474 def _branchtags(self, partial, lrev):
476 def _branchtags(self, partial, lrev):
475 # TODO: rename this function?
477 # TODO: rename this function?
476 tiprev = len(self) - 1
478 tiprev = len(self) - 1
477 if lrev != tiprev:
479 if lrev != tiprev:
478 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
480 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
479 self._updatebranchcache(partial, ctxgen)
481 self._updatebranchcache(partial, ctxgen)
480 self._writebranchcache(partial, self.changelog.tip(), tiprev)
482 self._writebranchcache(partial, self.changelog.tip(), tiprev)
481
483
482 return partial
484 return partial
483
485
484 def updatebranchcache(self):
486 def updatebranchcache(self):
485 tip = self.changelog.tip()
487 tip = self.changelog.tip()
486 if self._branchcache is not None and self._branchcachetip == tip:
488 if self._branchcache is not None and self._branchcachetip == tip:
487 return
489 return
488
490
489 oldtip = self._branchcachetip
491 oldtip = self._branchcachetip
490 self._branchcachetip = tip
492 self._branchcachetip = tip
491 if oldtip is None or oldtip not in self.changelog.nodemap:
493 if oldtip is None or oldtip not in self.changelog.nodemap:
492 partial, last, lrev = self._readbranchcache()
494 partial, last, lrev = self._readbranchcache()
493 else:
495 else:
494 lrev = self.changelog.rev(oldtip)
496 lrev = self.changelog.rev(oldtip)
495 partial = self._branchcache
497 partial = self._branchcache
496
498
497 self._branchtags(partial, lrev)
499 self._branchtags(partial, lrev)
498 # this private cache holds all heads (not just tips)
500 # this private cache holds all heads (not just tips)
499 self._branchcache = partial
501 self._branchcache = partial
500
502
501 def branchmap(self):
503 def branchmap(self):
502 '''returns a dictionary {branch: [branchheads]}'''
504 '''returns a dictionary {branch: [branchheads]}'''
503 self.updatebranchcache()
505 self.updatebranchcache()
504 return self._branchcache
506 return self._branchcache
505
507
506 def branchtags(self):
508 def branchtags(self):
507 '''return a dict where branch names map to the tipmost head of
509 '''return a dict where branch names map to the tipmost head of
508 the branch, open heads come before closed'''
510 the branch, open heads come before closed'''
509 bt = {}
511 bt = {}
510 for bn, heads in self.branchmap().iteritems():
512 for bn, heads in self.branchmap().iteritems():
511 tip = heads[-1]
513 tip = heads[-1]
512 for h in reversed(heads):
514 for h in reversed(heads):
513 if 'close' not in self.changelog.read(h)[5]:
515 if 'close' not in self.changelog.read(h)[5]:
514 tip = h
516 tip = h
515 break
517 break
516 bt[bn] = tip
518 bt[bn] = tip
517 return bt
519 return bt
518
520
519 def _readbranchcache(self):
521 def _readbranchcache(self):
520 partial = {}
522 partial = {}
521 try:
523 try:
522 f = self.opener("cache/branchheads")
524 f = self.opener("cache/branchheads")
523 lines = f.read().split('\n')
525 lines = f.read().split('\n')
524 f.close()
526 f.close()
525 except (IOError, OSError):
527 except (IOError, OSError):
526 return {}, nullid, nullrev
528 return {}, nullid, nullrev
527
529
528 try:
530 try:
529 last, lrev = lines.pop(0).split(" ", 1)
531 last, lrev = lines.pop(0).split(" ", 1)
530 last, lrev = bin(last), int(lrev)
532 last, lrev = bin(last), int(lrev)
531 if lrev >= len(self) or self[lrev].node() != last:
533 if lrev >= len(self) or self[lrev].node() != last:
532 # invalidate the cache
534 # invalidate the cache
533 raise ValueError('invalidating branch cache (tip differs)')
535 raise ValueError('invalidating branch cache (tip differs)')
534 for l in lines:
536 for l in lines:
535 if not l:
537 if not l:
536 continue
538 continue
537 node, label = l.split(" ", 1)
539 node, label = l.split(" ", 1)
538 label = encoding.tolocal(label.strip())
540 label = encoding.tolocal(label.strip())
539 partial.setdefault(label, []).append(bin(node))
541 partial.setdefault(label, []).append(bin(node))
540 except KeyboardInterrupt:
542 except KeyboardInterrupt:
541 raise
543 raise
542 except Exception, inst:
544 except Exception, inst:
543 if self.ui.debugflag:
545 if self.ui.debugflag:
544 self.ui.warn(str(inst), '\n')
546 self.ui.warn(str(inst), '\n')
545 partial, last, lrev = {}, nullid, nullrev
547 partial, last, lrev = {}, nullid, nullrev
546 return partial, last, lrev
548 return partial, last, lrev
547
549
548 def _writebranchcache(self, branches, tip, tiprev):
550 def _writebranchcache(self, branches, tip, tiprev):
549 try:
551 try:
550 f = self.opener("cache/branchheads", "w", atomictemp=True)
552 f = self.opener("cache/branchheads", "w", atomictemp=True)
551 f.write("%s %s\n" % (hex(tip), tiprev))
553 f.write("%s %s\n" % (hex(tip), tiprev))
552 for label, nodes in branches.iteritems():
554 for label, nodes in branches.iteritems():
553 for node in nodes:
555 for node in nodes:
554 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
556 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
555 f.close()
557 f.close()
556 except (IOError, OSError):
558 except (IOError, OSError):
557 pass
559 pass
558
560
559 def _updatebranchcache(self, partial, ctxgen):
561 def _updatebranchcache(self, partial, ctxgen):
560 # collect new branch entries
562 # collect new branch entries
561 newbranches = {}
563 newbranches = {}
562 for c in ctxgen:
564 for c in ctxgen:
563 newbranches.setdefault(c.branch(), []).append(c.node())
565 newbranches.setdefault(c.branch(), []).append(c.node())
564 # if older branchheads are reachable from new ones, they aren't
566 # if older branchheads are reachable from new ones, they aren't
565 # really branchheads. Note checking parents is insufficient:
567 # really branchheads. Note checking parents is insufficient:
566 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
568 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
567 for branch, newnodes in newbranches.iteritems():
569 for branch, newnodes in newbranches.iteritems():
568 bheads = partial.setdefault(branch, [])
570 bheads = partial.setdefault(branch, [])
569 bheads.extend(newnodes)
571 bheads.extend(newnodes)
570 if len(bheads) <= 1:
572 if len(bheads) <= 1:
571 continue
573 continue
572 bheads = sorted(bheads, key=lambda x: self[x].rev())
574 bheads = sorted(bheads, key=lambda x: self[x].rev())
573 # starting from tip means fewer passes over reachable
575 # starting from tip means fewer passes over reachable
574 while newnodes:
576 while newnodes:
575 latest = newnodes.pop()
577 latest = newnodes.pop()
576 if latest not in bheads:
578 if latest not in bheads:
577 continue
579 continue
578 minbhrev = self[bheads[0]].node()
580 minbhrev = self[bheads[0]].node()
579 reachable = self.changelog.reachable(latest, minbhrev)
581 reachable = self.changelog.reachable(latest, minbhrev)
580 reachable.remove(latest)
582 reachable.remove(latest)
581 if reachable:
583 if reachable:
582 bheads = [b for b in bheads if b not in reachable]
584 bheads = [b for b in bheads if b not in reachable]
583 partial[branch] = bheads
585 partial[branch] = bheads
584
586
585 def lookup(self, key):
587 def lookup(self, key):
586 if isinstance(key, int):
588 if isinstance(key, int):
587 return self.changelog.node(key)
589 return self.changelog.node(key)
588 elif key == '.':
590 elif key == '.':
589 return self.dirstate.p1()
591 return self.dirstate.p1()
590 elif key == 'null':
592 elif key == 'null':
591 return nullid
593 return nullid
592 elif key == 'tip':
594 elif key == 'tip':
593 return self.changelog.tip()
595 return self.changelog.tip()
594 n = self.changelog._match(key)
596 n = self.changelog._match(key)
595 if n:
597 if n:
596 return n
598 return n
597 if key in self._bookmarks:
599 if key in self._bookmarks:
598 return self._bookmarks[key]
600 return self._bookmarks[key]
599 if key in self.tags():
601 if key in self.tags():
600 return self.tags()[key]
602 return self.tags()[key]
601 if key in self.branchtags():
603 if key in self.branchtags():
602 return self.branchtags()[key]
604 return self.branchtags()[key]
603 n = self.changelog._partialmatch(key)
605 n = self.changelog._partialmatch(key)
604 if n:
606 if n:
605 return n
607 return n
606
608
607 # can't find key, check if it might have come from damaged dirstate
609 # can't find key, check if it might have come from damaged dirstate
608 if key in self.dirstate.parents():
610 if key in self.dirstate.parents():
609 raise error.Abort(_("working directory has unknown parent '%s'!")
611 raise error.Abort(_("working directory has unknown parent '%s'!")
610 % short(key))
612 % short(key))
611 try:
613 try:
612 if len(key) == 20:
614 if len(key) == 20:
613 key = hex(key)
615 key = hex(key)
614 except TypeError:
616 except TypeError:
615 pass
617 pass
616 raise error.RepoLookupError(_("unknown revision '%s'") % key)
618 raise error.RepoLookupError(_("unknown revision '%s'") % key)
617
619
618 def lookupbranch(self, key, remote=None):
620 def lookupbranch(self, key, remote=None):
619 repo = remote or self
621 repo = remote or self
620 if key in repo.branchmap():
622 if key in repo.branchmap():
621 return key
623 return key
622
624
623 repo = (remote and remote.local()) and remote or self
625 repo = (remote and remote.local()) and remote or self
624 return repo[key].branch()
626 return repo[key].branch()
625
627
626 def known(self, nodes):
628 def known(self, nodes):
627 nm = self.changelog.nodemap
629 nm = self.changelog.nodemap
628 result = []
630 result = []
629 for n in nodes:
631 for n in nodes:
630 r = nm.get(n)
632 r = nm.get(n)
631 resp = not (r is None or self._phaserev[r] >= phases.secret)
633 resp = not (r is None or self._phaserev[r] >= phases.secret)
632 result.append(resp)
634 result.append(resp)
633 return result
635 return result
634
636
635 def local(self):
637 def local(self):
636 return self
638 return self
637
639
638 def cancopy(self):
640 def cancopy(self):
639 return (repo.repository.cancopy(self)
641 return (repo.repository.cancopy(self)
640 and not self._phaseroots[phases.secret])
642 and not self._phaseroots[phases.secret])
641
643
642 def join(self, f):
644 def join(self, f):
643 return os.path.join(self.path, f)
645 return os.path.join(self.path, f)
644
646
645 def wjoin(self, f):
647 def wjoin(self, f):
646 return os.path.join(self.root, f)
648 return os.path.join(self.root, f)
647
649
648 def file(self, f):
650 def file(self, f):
649 if f[0] == '/':
651 if f[0] == '/':
650 f = f[1:]
652 f = f[1:]
651 return filelog.filelog(self.sopener, f)
653 return filelog.filelog(self.sopener, f)
652
654
653 def changectx(self, changeid):
655 def changectx(self, changeid):
654 return self[changeid]
656 return self[changeid]
655
657
656 def parents(self, changeid=None):
658 def parents(self, changeid=None):
657 '''get list of changectxs for parents of changeid'''
659 '''get list of changectxs for parents of changeid'''
658 return self[changeid].parents()
660 return self[changeid].parents()
659
661
660 def filectx(self, path, changeid=None, fileid=None):
662 def filectx(self, path, changeid=None, fileid=None):
661 """changeid can be a changeset revision, node, or tag.
663 """changeid can be a changeset revision, node, or tag.
662 fileid can be a file revision or node."""
664 fileid can be a file revision or node."""
663 return context.filectx(self, path, changeid, fileid)
665 return context.filectx(self, path, changeid, fileid)
664
666
665 def getcwd(self):
667 def getcwd(self):
666 return self.dirstate.getcwd()
668 return self.dirstate.getcwd()
667
669
668 def pathto(self, f, cwd=None):
670 def pathto(self, f, cwd=None):
669 return self.dirstate.pathto(f, cwd)
671 return self.dirstate.pathto(f, cwd)
670
672
671 def wfile(self, f, mode='r'):
673 def wfile(self, f, mode='r'):
672 return self.wopener(f, mode)
674 return self.wopener(f, mode)
673
675
674 def _link(self, f):
676 def _link(self, f):
675 return os.path.islink(self.wjoin(f))
677 return os.path.islink(self.wjoin(f))
676
678
677 def _loadfilter(self, filter):
679 def _loadfilter(self, filter):
678 if filter not in self.filterpats:
680 if filter not in self.filterpats:
679 l = []
681 l = []
680 for pat, cmd in self.ui.configitems(filter):
682 for pat, cmd in self.ui.configitems(filter):
681 if cmd == '!':
683 if cmd == '!':
682 continue
684 continue
683 mf = matchmod.match(self.root, '', [pat])
685 mf = matchmod.match(self.root, '', [pat])
684 fn = None
686 fn = None
685 params = cmd
687 params = cmd
686 for name, filterfn in self._datafilters.iteritems():
688 for name, filterfn in self._datafilters.iteritems():
687 if cmd.startswith(name):
689 if cmd.startswith(name):
688 fn = filterfn
690 fn = filterfn
689 params = cmd[len(name):].lstrip()
691 params = cmd[len(name):].lstrip()
690 break
692 break
691 if not fn:
693 if not fn:
692 fn = lambda s, c, **kwargs: util.filter(s, c)
694 fn = lambda s, c, **kwargs: util.filter(s, c)
693 # Wrap old filters not supporting keyword arguments
695 # Wrap old filters not supporting keyword arguments
694 if not inspect.getargspec(fn)[2]:
696 if not inspect.getargspec(fn)[2]:
695 oldfn = fn
697 oldfn = fn
696 fn = lambda s, c, **kwargs: oldfn(s, c)
698 fn = lambda s, c, **kwargs: oldfn(s, c)
697 l.append((mf, fn, params))
699 l.append((mf, fn, params))
698 self.filterpats[filter] = l
700 self.filterpats[filter] = l
699 return self.filterpats[filter]
701 return self.filterpats[filter]
700
702
701 def _filter(self, filterpats, filename, data):
703 def _filter(self, filterpats, filename, data):
702 for mf, fn, cmd in filterpats:
704 for mf, fn, cmd in filterpats:
703 if mf(filename):
705 if mf(filename):
704 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
706 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
705 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
707 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
706 break
708 break
707
709
708 return data
710 return data
709
711
710 @propertycache
712 @propertycache
711 def _encodefilterpats(self):
713 def _encodefilterpats(self):
712 return self._loadfilter('encode')
714 return self._loadfilter('encode')
713
715
714 @propertycache
716 @propertycache
715 def _decodefilterpats(self):
717 def _decodefilterpats(self):
716 return self._loadfilter('decode')
718 return self._loadfilter('decode')
717
719
718 def adddatafilter(self, name, filter):
720 def adddatafilter(self, name, filter):
719 self._datafilters[name] = filter
721 self._datafilters[name] = filter
720
722
721 def wread(self, filename):
723 def wread(self, filename):
722 if self._link(filename):
724 if self._link(filename):
723 data = os.readlink(self.wjoin(filename))
725 data = os.readlink(self.wjoin(filename))
724 else:
726 else:
725 data = self.wopener.read(filename)
727 data = self.wopener.read(filename)
726 return self._filter(self._encodefilterpats, filename, data)
728 return self._filter(self._encodefilterpats, filename, data)
727
729
728 def wwrite(self, filename, data, flags):
730 def wwrite(self, filename, data, flags):
729 data = self._filter(self._decodefilterpats, filename, data)
731 data = self._filter(self._decodefilterpats, filename, data)
730 if 'l' in flags:
732 if 'l' in flags:
731 self.wopener.symlink(data, filename)
733 self.wopener.symlink(data, filename)
732 else:
734 else:
733 self.wopener.write(filename, data)
735 self.wopener.write(filename, data)
734 if 'x' in flags:
736 if 'x' in flags:
735 util.setflags(self.wjoin(filename), False, True)
737 util.setflags(self.wjoin(filename), False, True)
736
738
737 def wwritedata(self, filename, data):
739 def wwritedata(self, filename, data):
738 return self._filter(self._decodefilterpats, filename, data)
740 return self._filter(self._decodefilterpats, filename, data)
739
741
740 def transaction(self, desc):
742 def transaction(self, desc):
741 tr = self._transref and self._transref() or None
743 tr = self._transref and self._transref() or None
742 if tr and tr.running():
744 if tr and tr.running():
743 return tr.nest()
745 return tr.nest()
744
746
745 # abort here if the journal already exists
747 # abort here if the journal already exists
746 if os.path.exists(self.sjoin("journal")):
748 if os.path.exists(self.sjoin("journal")):
747 raise error.RepoError(
749 raise error.RepoError(
748 _("abandoned transaction found - run hg recover"))
750 _("abandoned transaction found - run hg recover"))
749
751
750 journalfiles = self._writejournal(desc)
752 journalfiles = self._writejournal(desc)
751 renames = [(x, undoname(x)) for x in journalfiles]
753 renames = [(x, undoname(x)) for x in journalfiles]
752
754
753 tr = transaction.transaction(self.ui.warn, self.sopener,
755 tr = transaction.transaction(self.ui.warn, self.sopener,
754 self.sjoin("journal"),
756 self.sjoin("journal"),
755 aftertrans(renames),
757 aftertrans(renames),
756 self.store.createmode)
758 self.store.createmode)
757 self._transref = weakref.ref(tr)
759 self._transref = weakref.ref(tr)
758 return tr
760 return tr
759
761
760 def _writejournal(self, desc):
762 def _writejournal(self, desc):
761 # save dirstate for rollback
763 # save dirstate for rollback
762 try:
764 try:
763 ds = self.opener.read("dirstate")
765 ds = self.opener.read("dirstate")
764 except IOError:
766 except IOError:
765 ds = ""
767 ds = ""
766 self.opener.write("journal.dirstate", ds)
768 self.opener.write("journal.dirstate", ds)
767 self.opener.write("journal.branch",
769 self.opener.write("journal.branch",
768 encoding.fromlocal(self.dirstate.branch()))
770 encoding.fromlocal(self.dirstate.branch()))
769 self.opener.write("journal.desc",
771 self.opener.write("journal.desc",
770 "%d\n%s\n" % (len(self), desc))
772 "%d\n%s\n" % (len(self), desc))
771
773
772 bkname = self.join('bookmarks')
774 bkname = self.join('bookmarks')
773 if os.path.exists(bkname):
775 if os.path.exists(bkname):
774 util.copyfile(bkname, self.join('journal.bookmarks'))
776 util.copyfile(bkname, self.join('journal.bookmarks'))
775 else:
777 else:
776 self.opener.write('journal.bookmarks', '')
778 self.opener.write('journal.bookmarks', '')
777 phasesname = self.sjoin('phaseroots')
779 phasesname = self.sjoin('phaseroots')
778 if os.path.exists(phasesname):
780 if os.path.exists(phasesname):
779 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
781 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
780 else:
782 else:
781 self.sopener.write('journal.phaseroots', '')
783 self.sopener.write('journal.phaseroots', '')
782
784
783 return (self.sjoin('journal'), self.join('journal.dirstate'),
785 return (self.sjoin('journal'), self.join('journal.dirstate'),
784 self.join('journal.branch'), self.join('journal.desc'),
786 self.join('journal.branch'), self.join('journal.desc'),
785 self.join('journal.bookmarks'),
787 self.join('journal.bookmarks'),
786 self.sjoin('journal.phaseroots'))
788 self.sjoin('journal.phaseroots'))
787
789
788 def recover(self):
790 def recover(self):
789 lock = self.lock()
791 lock = self.lock()
790 try:
792 try:
791 if os.path.exists(self.sjoin("journal")):
793 if os.path.exists(self.sjoin("journal")):
792 self.ui.status(_("rolling back interrupted transaction\n"))
794 self.ui.status(_("rolling back interrupted transaction\n"))
793 transaction.rollback(self.sopener, self.sjoin("journal"),
795 transaction.rollback(self.sopener, self.sjoin("journal"),
794 self.ui.warn)
796 self.ui.warn)
795 self.invalidate()
797 self.invalidate()
796 return True
798 return True
797 else:
799 else:
798 self.ui.warn(_("no interrupted transaction available\n"))
800 self.ui.warn(_("no interrupted transaction available\n"))
799 return False
801 return False
800 finally:
802 finally:
801 lock.release()
803 lock.release()
802
804
803 def rollback(self, dryrun=False, force=False):
805 def rollback(self, dryrun=False, force=False):
804 wlock = lock = None
806 wlock = lock = None
805 try:
807 try:
806 wlock = self.wlock()
808 wlock = self.wlock()
807 lock = self.lock()
809 lock = self.lock()
808 if os.path.exists(self.sjoin("undo")):
810 if os.path.exists(self.sjoin("undo")):
809 return self._rollback(dryrun, force)
811 return self._rollback(dryrun, force)
810 else:
812 else:
811 self.ui.warn(_("no rollback information available\n"))
813 self.ui.warn(_("no rollback information available\n"))
812 return 1
814 return 1
813 finally:
815 finally:
814 release(lock, wlock)
816 release(lock, wlock)
815
817
816 def _rollback(self, dryrun, force):
818 def _rollback(self, dryrun, force):
817 ui = self.ui
819 ui = self.ui
818 try:
820 try:
819 args = self.opener.read('undo.desc').splitlines()
821 args = self.opener.read('undo.desc').splitlines()
820 (oldlen, desc, detail) = (int(args[0]), args[1], None)
822 (oldlen, desc, detail) = (int(args[0]), args[1], None)
821 if len(args) >= 3:
823 if len(args) >= 3:
822 detail = args[2]
824 detail = args[2]
823 oldtip = oldlen - 1
825 oldtip = oldlen - 1
824
826
825 if detail and ui.verbose:
827 if detail and ui.verbose:
826 msg = (_('repository tip rolled back to revision %s'
828 msg = (_('repository tip rolled back to revision %s'
827 ' (undo %s: %s)\n')
829 ' (undo %s: %s)\n')
828 % (oldtip, desc, detail))
830 % (oldtip, desc, detail))
829 else:
831 else:
830 msg = (_('repository tip rolled back to revision %s'
832 msg = (_('repository tip rolled back to revision %s'
831 ' (undo %s)\n')
833 ' (undo %s)\n')
832 % (oldtip, desc))
834 % (oldtip, desc))
833 except IOError:
835 except IOError:
834 msg = _('rolling back unknown transaction\n')
836 msg = _('rolling back unknown transaction\n')
835 desc = None
837 desc = None
836
838
837 if not force and self['.'] != self['tip'] and desc == 'commit':
839 if not force and self['.'] != self['tip'] and desc == 'commit':
838 raise util.Abort(
840 raise util.Abort(
839 _('rollback of last commit while not checked out '
841 _('rollback of last commit while not checked out '
840 'may lose data'), hint=_('use -f to force'))
842 'may lose data'), hint=_('use -f to force'))
841
843
842 ui.status(msg)
844 ui.status(msg)
843 if dryrun:
845 if dryrun:
844 return 0
846 return 0
845
847
846 parents = self.dirstate.parents()
848 parents = self.dirstate.parents()
847 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
849 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
848 if os.path.exists(self.join('undo.bookmarks')):
850 if os.path.exists(self.join('undo.bookmarks')):
849 util.rename(self.join('undo.bookmarks'),
851 util.rename(self.join('undo.bookmarks'),
850 self.join('bookmarks'))
852 self.join('bookmarks'))
851 if os.path.exists(self.sjoin('undo.phaseroots')):
853 if os.path.exists(self.sjoin('undo.phaseroots')):
852 util.rename(self.sjoin('undo.phaseroots'),
854 util.rename(self.sjoin('undo.phaseroots'),
853 self.sjoin('phaseroots'))
855 self.sjoin('phaseroots'))
854 self.invalidate()
856 self.invalidate()
855
857
856 parentgone = (parents[0] not in self.changelog.nodemap or
858 parentgone = (parents[0] not in self.changelog.nodemap or
857 parents[1] not in self.changelog.nodemap)
859 parents[1] not in self.changelog.nodemap)
858 if parentgone:
860 if parentgone:
859 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
861 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
860 try:
862 try:
861 branch = self.opener.read('undo.branch')
863 branch = self.opener.read('undo.branch')
862 self.dirstate.setbranch(branch)
864 self.dirstate.setbranch(branch)
863 except IOError:
865 except IOError:
864 ui.warn(_('named branch could not be reset: '
866 ui.warn(_('named branch could not be reset: '
865 'current branch is still \'%s\'\n')
867 'current branch is still \'%s\'\n')
866 % self.dirstate.branch())
868 % self.dirstate.branch())
867
869
868 self.dirstate.invalidate()
870 self.dirstate.invalidate()
869 parents = tuple([p.rev() for p in self.parents()])
871 parents = tuple([p.rev() for p in self.parents()])
870 if len(parents) > 1:
872 if len(parents) > 1:
871 ui.status(_('working directory now based on '
873 ui.status(_('working directory now based on '
872 'revisions %d and %d\n') % parents)
874 'revisions %d and %d\n') % parents)
873 else:
875 else:
874 ui.status(_('working directory now based on '
876 ui.status(_('working directory now based on '
875 'revision %d\n') % parents)
877 'revision %d\n') % parents)
876 self.destroyed()
878 self.destroyed()
877 return 0
879 return 0
878
880
879 def invalidatecaches(self):
881 def invalidatecaches(self):
880 try:
882 try:
881 delattr(self, '_tagscache')
883 delattr(self, '_tagscache')
882 except AttributeError:
884 except AttributeError:
883 pass
885 pass
884
886
885 self._branchcache = None # in UTF-8
887 self._branchcache = None # in UTF-8
886 self._branchcachetip = None
888 self._branchcachetip = None
887
889
888 def invalidatedirstate(self):
890 def invalidatedirstate(self):
889 '''Invalidates the dirstate, causing the next call to dirstate
891 '''Invalidates the dirstate, causing the next call to dirstate
890 to check if it was modified since the last time it was read,
892 to check if it was modified since the last time it was read,
891 rereading it if it has.
893 rereading it if it has.
892
894
893 This is different to dirstate.invalidate() that it doesn't always
895 This is different to dirstate.invalidate() that it doesn't always
894 rereads the dirstate. Use dirstate.invalidate() if you want to
896 rereads the dirstate. Use dirstate.invalidate() if you want to
895 explicitly read the dirstate again (i.e. restoring it to a previous
897 explicitly read the dirstate again (i.e. restoring it to a previous
896 known good state).'''
898 known good state).'''
897 try:
899 try:
898 delattr(self, 'dirstate')
900 delattr(self, 'dirstate')
899 except AttributeError:
901 except AttributeError:
900 pass
902 pass
901
903
902 def invalidate(self):
904 def invalidate(self):
903 for k in self._filecache:
905 for k in self._filecache:
904 # dirstate is invalidated separately in invalidatedirstate()
906 # dirstate is invalidated separately in invalidatedirstate()
905 if k == 'dirstate':
907 if k == 'dirstate':
906 continue
908 continue
907
909
908 try:
910 try:
909 delattr(self, k)
911 delattr(self, k)
910 except AttributeError:
912 except AttributeError:
911 pass
913 pass
912 self.invalidatecaches()
914 self.invalidatecaches()
913
915
914 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
916 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
915 try:
917 try:
916 l = lock.lock(lockname, 0, releasefn, desc=desc)
918 l = lock.lock(lockname, 0, releasefn, desc=desc)
917 except error.LockHeld, inst:
919 except error.LockHeld, inst:
918 if not wait:
920 if not wait:
919 raise
921 raise
920 self.ui.warn(_("waiting for lock on %s held by %r\n") %
922 self.ui.warn(_("waiting for lock on %s held by %r\n") %
921 (desc, inst.locker))
923 (desc, inst.locker))
922 # default to 600 seconds timeout
924 # default to 600 seconds timeout
923 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
925 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
924 releasefn, desc=desc)
926 releasefn, desc=desc)
925 if acquirefn:
927 if acquirefn:
926 acquirefn()
928 acquirefn()
927 return l
929 return l
928
930
929 def _afterlock(self, callback):
931 def _afterlock(self, callback):
930 """add a callback to the current repository lock.
932 """add a callback to the current repository lock.
931
933
932 The callback will be executed on lock release."""
934 The callback will be executed on lock release."""
933 l = self._lockref and self._lockref()
935 l = self._lockref and self._lockref()
934 if l:
936 if l:
935 l.postrelease.append(callback)
937 l.postrelease.append(callback)
936
938
937 def lock(self, wait=True):
939 def lock(self, wait=True):
938 '''Lock the repository store (.hg/store) and return a weak reference
940 '''Lock the repository store (.hg/store) and return a weak reference
939 to the lock. Use this before modifying the store (e.g. committing or
941 to the lock. Use this before modifying the store (e.g. committing or
940 stripping). If you are opening a transaction, get a lock as well.)'''
942 stripping). If you are opening a transaction, get a lock as well.)'''
941 l = self._lockref and self._lockref()
943 l = self._lockref and self._lockref()
942 if l is not None and l.held:
944 if l is not None and l.held:
943 l.lock()
945 l.lock()
944 return l
946 return l
945
947
946 def unlock():
948 def unlock():
947 self.store.write()
949 self.store.write()
948 if self._dirtyphases:
950 if self._dirtyphases:
949 phases.writeroots(self)
951 phases.writeroots(self)
950 for k, ce in self._filecache.items():
952 for k, ce in self._filecache.items():
951 if k == 'dirstate':
953 if k == 'dirstate':
952 continue
954 continue
953 ce.refresh()
955 ce.refresh()
954
956
955 l = self._lock(self.sjoin("lock"), wait, unlock,
957 l = self._lock(self.sjoin("lock"), wait, unlock,
956 self.invalidate, _('repository %s') % self.origroot)
958 self.invalidate, _('repository %s') % self.origroot)
957 self._lockref = weakref.ref(l)
959 self._lockref = weakref.ref(l)
958 return l
960 return l
959
961
960 def wlock(self, wait=True):
962 def wlock(self, wait=True):
961 '''Lock the non-store parts of the repository (everything under
963 '''Lock the non-store parts of the repository (everything under
962 .hg except .hg/store) and return a weak reference to the lock.
964 .hg except .hg/store) and return a weak reference to the lock.
963 Use this before modifying files in .hg.'''
965 Use this before modifying files in .hg.'''
964 l = self._wlockref and self._wlockref()
966 l = self._wlockref and self._wlockref()
965 if l is not None and l.held:
967 if l is not None and l.held:
966 l.lock()
968 l.lock()
967 return l
969 return l
968
970
969 def unlock():
971 def unlock():
970 self.dirstate.write()
972 self.dirstate.write()
971 ce = self._filecache.get('dirstate')
973 ce = self._filecache.get('dirstate')
972 if ce:
974 if ce:
973 ce.refresh()
975 ce.refresh()
974
976
975 l = self._lock(self.join("wlock"), wait, unlock,
977 l = self._lock(self.join("wlock"), wait, unlock,
976 self.invalidatedirstate, _('working directory of %s') %
978 self.invalidatedirstate, _('working directory of %s') %
977 self.origroot)
979 self.origroot)
978 self._wlockref = weakref.ref(l)
980 self._wlockref = weakref.ref(l)
979 return l
981 return l
980
982
981 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
983 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
982 """
984 """
983 commit an individual file as part of a larger transaction
985 commit an individual file as part of a larger transaction
984 """
986 """
985
987
986 fname = fctx.path()
988 fname = fctx.path()
987 text = fctx.data()
989 text = fctx.data()
988 flog = self.file(fname)
990 flog = self.file(fname)
989 fparent1 = manifest1.get(fname, nullid)
991 fparent1 = manifest1.get(fname, nullid)
990 fparent2 = fparent2o = manifest2.get(fname, nullid)
992 fparent2 = fparent2o = manifest2.get(fname, nullid)
991
993
992 meta = {}
994 meta = {}
993 copy = fctx.renamed()
995 copy = fctx.renamed()
994 if copy and copy[0] != fname:
996 if copy and copy[0] != fname:
995 # Mark the new revision of this file as a copy of another
997 # Mark the new revision of this file as a copy of another
996 # file. This copy data will effectively act as a parent
998 # file. This copy data will effectively act as a parent
997 # of this new revision. If this is a merge, the first
999 # of this new revision. If this is a merge, the first
998 # parent will be the nullid (meaning "look up the copy data")
1000 # parent will be the nullid (meaning "look up the copy data")
999 # and the second one will be the other parent. For example:
1001 # and the second one will be the other parent. For example:
1000 #
1002 #
1001 # 0 --- 1 --- 3 rev1 changes file foo
1003 # 0 --- 1 --- 3 rev1 changes file foo
1002 # \ / rev2 renames foo to bar and changes it
1004 # \ / rev2 renames foo to bar and changes it
1003 # \- 2 -/ rev3 should have bar with all changes and
1005 # \- 2 -/ rev3 should have bar with all changes and
1004 # should record that bar descends from
1006 # should record that bar descends from
1005 # bar in rev2 and foo in rev1
1007 # bar in rev2 and foo in rev1
1006 #
1008 #
1007 # this allows this merge to succeed:
1009 # this allows this merge to succeed:
1008 #
1010 #
1009 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1011 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1010 # \ / merging rev3 and rev4 should use bar@rev2
1012 # \ / merging rev3 and rev4 should use bar@rev2
1011 # \- 2 --- 4 as the merge base
1013 # \- 2 --- 4 as the merge base
1012 #
1014 #
1013
1015
1014 cfname = copy[0]
1016 cfname = copy[0]
1015 crev = manifest1.get(cfname)
1017 crev = manifest1.get(cfname)
1016 newfparent = fparent2
1018 newfparent = fparent2
1017
1019
1018 if manifest2: # branch merge
1020 if manifest2: # branch merge
1019 if fparent2 == nullid or crev is None: # copied on remote side
1021 if fparent2 == nullid or crev is None: # copied on remote side
1020 if cfname in manifest2:
1022 if cfname in manifest2:
1021 crev = manifest2[cfname]
1023 crev = manifest2[cfname]
1022 newfparent = fparent1
1024 newfparent = fparent1
1023
1025
1024 # find source in nearest ancestor if we've lost track
1026 # find source in nearest ancestor if we've lost track
1025 if not crev:
1027 if not crev:
1026 self.ui.debug(" %s: searching for copy revision for %s\n" %
1028 self.ui.debug(" %s: searching for copy revision for %s\n" %
1027 (fname, cfname))
1029 (fname, cfname))
1028 for ancestor in self[None].ancestors():
1030 for ancestor in self[None].ancestors():
1029 if cfname in ancestor:
1031 if cfname in ancestor:
1030 crev = ancestor[cfname].filenode()
1032 crev = ancestor[cfname].filenode()
1031 break
1033 break
1032
1034
1033 if crev:
1035 if crev:
1034 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1036 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1035 meta["copy"] = cfname
1037 meta["copy"] = cfname
1036 meta["copyrev"] = hex(crev)
1038 meta["copyrev"] = hex(crev)
1037 fparent1, fparent2 = nullid, newfparent
1039 fparent1, fparent2 = nullid, newfparent
1038 else:
1040 else:
1039 self.ui.warn(_("warning: can't find ancestor for '%s' "
1041 self.ui.warn(_("warning: can't find ancestor for '%s' "
1040 "copied from '%s'!\n") % (fname, cfname))
1042 "copied from '%s'!\n") % (fname, cfname))
1041
1043
1042 elif fparent2 != nullid:
1044 elif fparent2 != nullid:
1043 # is one parent an ancestor of the other?
1045 # is one parent an ancestor of the other?
1044 fparentancestor = flog.ancestor(fparent1, fparent2)
1046 fparentancestor = flog.ancestor(fparent1, fparent2)
1045 if fparentancestor == fparent1:
1047 if fparentancestor == fparent1:
1046 fparent1, fparent2 = fparent2, nullid
1048 fparent1, fparent2 = fparent2, nullid
1047 elif fparentancestor == fparent2:
1049 elif fparentancestor == fparent2:
1048 fparent2 = nullid
1050 fparent2 = nullid
1049
1051
1050 # is the file changed?
1052 # is the file changed?
1051 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1053 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1052 changelist.append(fname)
1054 changelist.append(fname)
1053 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1055 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1054
1056
1055 # are just the flags changed during merge?
1057 # are just the flags changed during merge?
1056 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1058 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1057 changelist.append(fname)
1059 changelist.append(fname)
1058
1060
1059 return fparent1
1061 return fparent1
1060
1062
1061 def commit(self, text="", user=None, date=None, match=None, force=False,
1063 def commit(self, text="", user=None, date=None, match=None, force=False,
1062 editor=False, extra={}):
1064 editor=False, extra={}):
1063 """Add a new revision to current repository.
1065 """Add a new revision to current repository.
1064
1066
1065 Revision information is gathered from the working directory,
1067 Revision information is gathered from the working directory,
1066 match can be used to filter the committed files. If editor is
1068 match can be used to filter the committed files. If editor is
1067 supplied, it is called to get a commit message.
1069 supplied, it is called to get a commit message.
1068 """
1070 """
1069
1071
1070 def fail(f, msg):
1072 def fail(f, msg):
1071 raise util.Abort('%s: %s' % (f, msg))
1073 raise util.Abort('%s: %s' % (f, msg))
1072
1074
1073 if not match:
1075 if not match:
1074 match = matchmod.always(self.root, '')
1076 match = matchmod.always(self.root, '')
1075
1077
1076 if not force:
1078 if not force:
1077 vdirs = []
1079 vdirs = []
1078 match.dir = vdirs.append
1080 match.dir = vdirs.append
1079 match.bad = fail
1081 match.bad = fail
1080
1082
1081 wlock = self.wlock()
1083 wlock = self.wlock()
1082 try:
1084 try:
1083 wctx = self[None]
1085 wctx = self[None]
1084 merge = len(wctx.parents()) > 1
1086 merge = len(wctx.parents()) > 1
1085
1087
1086 if (not force and merge and match and
1088 if (not force and merge and match and
1087 (match.files() or match.anypats())):
1089 (match.files() or match.anypats())):
1088 raise util.Abort(_('cannot partially commit a merge '
1090 raise util.Abort(_('cannot partially commit a merge '
1089 '(do not specify files or patterns)'))
1091 '(do not specify files or patterns)'))
1090
1092
1091 changes = self.status(match=match, clean=force)
1093 changes = self.status(match=match, clean=force)
1092 if force:
1094 if force:
1093 changes[0].extend(changes[6]) # mq may commit unchanged files
1095 changes[0].extend(changes[6]) # mq may commit unchanged files
1094
1096
1095 # check subrepos
1097 # check subrepos
1096 subs = []
1098 subs = []
1097 removedsubs = set()
1099 removedsubs = set()
1098 if '.hgsub' in wctx:
1100 if '.hgsub' in wctx:
1099 # only manage subrepos and .hgsubstate if .hgsub is present
1101 # only manage subrepos and .hgsubstate if .hgsub is present
1100 for p in wctx.parents():
1102 for p in wctx.parents():
1101 removedsubs.update(s for s in p.substate if match(s))
1103 removedsubs.update(s for s in p.substate if match(s))
1102 for s in wctx.substate:
1104 for s in wctx.substate:
1103 removedsubs.discard(s)
1105 removedsubs.discard(s)
1104 if match(s) and wctx.sub(s).dirty():
1106 if match(s) and wctx.sub(s).dirty():
1105 subs.append(s)
1107 subs.append(s)
1106 if (subs or removedsubs):
1108 if (subs or removedsubs):
1107 if (not match('.hgsub') and
1109 if (not match('.hgsub') and
1108 '.hgsub' in (wctx.modified() + wctx.added())):
1110 '.hgsub' in (wctx.modified() + wctx.added())):
1109 raise util.Abort(
1111 raise util.Abort(
1110 _("can't commit subrepos without .hgsub"))
1112 _("can't commit subrepos without .hgsub"))
1111 if '.hgsubstate' not in changes[0]:
1113 if '.hgsubstate' not in changes[0]:
1112 changes[0].insert(0, '.hgsubstate')
1114 changes[0].insert(0, '.hgsubstate')
1113 if '.hgsubstate' in changes[2]:
1115 if '.hgsubstate' in changes[2]:
1114 changes[2].remove('.hgsubstate')
1116 changes[2].remove('.hgsubstate')
1115 elif '.hgsub' in changes[2]:
1117 elif '.hgsub' in changes[2]:
1116 # clean up .hgsubstate when .hgsub is removed
1118 # clean up .hgsubstate when .hgsub is removed
1117 if ('.hgsubstate' in wctx and
1119 if ('.hgsubstate' in wctx and
1118 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1120 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1119 changes[2].insert(0, '.hgsubstate')
1121 changes[2].insert(0, '.hgsubstate')
1120
1122
1121 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1123 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1122 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1124 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1123 if changedsubs:
1125 if changedsubs:
1124 raise util.Abort(_("uncommitted changes in subrepo %s")
1126 raise util.Abort(_("uncommitted changes in subrepo %s")
1125 % changedsubs[0],
1127 % changedsubs[0],
1126 hint=_("use --subrepos for recursive commit"))
1128 hint=_("use --subrepos for recursive commit"))
1127
1129
1128 # make sure all explicit patterns are matched
1130 # make sure all explicit patterns are matched
1129 if not force and match.files():
1131 if not force and match.files():
1130 matched = set(changes[0] + changes[1] + changes[2])
1132 matched = set(changes[0] + changes[1] + changes[2])
1131
1133
1132 for f in match.files():
1134 for f in match.files():
1133 if f == '.' or f in matched or f in wctx.substate:
1135 if f == '.' or f in matched or f in wctx.substate:
1134 continue
1136 continue
1135 if f in changes[3]: # missing
1137 if f in changes[3]: # missing
1136 fail(f, _('file not found!'))
1138 fail(f, _('file not found!'))
1137 if f in vdirs: # visited directory
1139 if f in vdirs: # visited directory
1138 d = f + '/'
1140 d = f + '/'
1139 for mf in matched:
1141 for mf in matched:
1140 if mf.startswith(d):
1142 if mf.startswith(d):
1141 break
1143 break
1142 else:
1144 else:
1143 fail(f, _("no match under directory!"))
1145 fail(f, _("no match under directory!"))
1144 elif f not in self.dirstate:
1146 elif f not in self.dirstate:
1145 fail(f, _("file not tracked!"))
1147 fail(f, _("file not tracked!"))
1146
1148
1147 if (not force and not extra.get("close") and not merge
1149 if (not force and not extra.get("close") and not merge
1148 and not (changes[0] or changes[1] or changes[2])
1150 and not (changes[0] or changes[1] or changes[2])
1149 and wctx.branch() == wctx.p1().branch()):
1151 and wctx.branch() == wctx.p1().branch()):
1150 return None
1152 return None
1151
1153
1152 ms = mergemod.mergestate(self)
1154 ms = mergemod.mergestate(self)
1153 for f in changes[0]:
1155 for f in changes[0]:
1154 if f in ms and ms[f] == 'u':
1156 if f in ms and ms[f] == 'u':
1155 raise util.Abort(_("unresolved merge conflicts "
1157 raise util.Abort(_("unresolved merge conflicts "
1156 "(see hg help resolve)"))
1158 "(see hg help resolve)"))
1157
1159
1158 cctx = context.workingctx(self, text, user, date, extra, changes)
1160 cctx = context.workingctx(self, text, user, date, extra, changes)
1159 if editor:
1161 if editor:
1160 cctx._text = editor(self, cctx, subs)
1162 cctx._text = editor(self, cctx, subs)
1161 edited = (text != cctx._text)
1163 edited = (text != cctx._text)
1162
1164
1163 # commit subs
1165 # commit subs
1164 if subs or removedsubs:
1166 if subs or removedsubs:
1165 state = wctx.substate.copy()
1167 state = wctx.substate.copy()
1166 for s in sorted(subs):
1168 for s in sorted(subs):
1167 sub = wctx.sub(s)
1169 sub = wctx.sub(s)
1168 self.ui.status(_('committing subrepository %s\n') %
1170 self.ui.status(_('committing subrepository %s\n') %
1169 subrepo.subrelpath(sub))
1171 subrepo.subrelpath(sub))
1170 sr = sub.commit(cctx._text, user, date)
1172 sr = sub.commit(cctx._text, user, date)
1171 state[s] = (state[s][0], sr)
1173 state[s] = (state[s][0], sr)
1172 subrepo.writestate(self, state)
1174 subrepo.writestate(self, state)
1173
1175
1174 # Save commit message in case this transaction gets rolled back
1176 # Save commit message in case this transaction gets rolled back
1175 # (e.g. by a pretxncommit hook). Leave the content alone on
1177 # (e.g. by a pretxncommit hook). Leave the content alone on
1176 # the assumption that the user will use the same editor again.
1178 # the assumption that the user will use the same editor again.
1177 msgfn = self.savecommitmessage(cctx._text)
1179 msgfn = self.savecommitmessage(cctx._text)
1178
1180
1179 p1, p2 = self.dirstate.parents()
1181 p1, p2 = self.dirstate.parents()
1180 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1182 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1181 try:
1183 try:
1182 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1184 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1183 ret = self.commitctx(cctx, True)
1185 ret = self.commitctx(cctx, True)
1184 except:
1186 except:
1185 if edited:
1187 if edited:
1186 self.ui.write(
1188 self.ui.write(
1187 _('note: commit message saved in %s\n') % msgfn)
1189 _('note: commit message saved in %s\n') % msgfn)
1188 raise
1190 raise
1189
1191
1190 # update bookmarks, dirstate and mergestate
1192 # update bookmarks, dirstate and mergestate
1191 bookmarks.update(self, p1, ret)
1193 bookmarks.update(self, p1, ret)
1192 for f in changes[0] + changes[1]:
1194 for f in changes[0] + changes[1]:
1193 self.dirstate.normal(f)
1195 self.dirstate.normal(f)
1194 for f in changes[2]:
1196 for f in changes[2]:
1195 self.dirstate.drop(f)
1197 self.dirstate.drop(f)
1196 self.dirstate.setparents(ret)
1198 self.dirstate.setparents(ret)
1197 ms.reset()
1199 ms.reset()
1198 finally:
1200 finally:
1199 wlock.release()
1201 wlock.release()
1200
1202
1201 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1203 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1202 return ret
1204 return ret
1203
1205
1204 def commitctx(self, ctx, error=False):
1206 def commitctx(self, ctx, error=False):
1205 """Add a new revision to current repository.
1207 """Add a new revision to current repository.
1206 Revision information is passed via the context argument.
1208 Revision information is passed via the context argument.
1207 """
1209 """
1208
1210
1209 tr = lock = None
1211 tr = lock = None
1210 removed = list(ctx.removed())
1212 removed = list(ctx.removed())
1211 p1, p2 = ctx.p1(), ctx.p2()
1213 p1, p2 = ctx.p1(), ctx.p2()
1212 user = ctx.user()
1214 user = ctx.user()
1213
1215
1214 lock = self.lock()
1216 lock = self.lock()
1215 try:
1217 try:
1216 tr = self.transaction("commit")
1218 tr = self.transaction("commit")
1217 trp = weakref.proxy(tr)
1219 trp = weakref.proxy(tr)
1218
1220
1219 if ctx.files():
1221 if ctx.files():
1220 m1 = p1.manifest().copy()
1222 m1 = p1.manifest().copy()
1221 m2 = p2.manifest()
1223 m2 = p2.manifest()
1222
1224
1223 # check in files
1225 # check in files
1224 new = {}
1226 new = {}
1225 changed = []
1227 changed = []
1226 linkrev = len(self)
1228 linkrev = len(self)
1227 for f in sorted(ctx.modified() + ctx.added()):
1229 for f in sorted(ctx.modified() + ctx.added()):
1228 self.ui.note(f + "\n")
1230 self.ui.note(f + "\n")
1229 try:
1231 try:
1230 fctx = ctx[f]
1232 fctx = ctx[f]
1231 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1233 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1232 changed)
1234 changed)
1233 m1.set(f, fctx.flags())
1235 m1.set(f, fctx.flags())
1234 except OSError, inst:
1236 except OSError, inst:
1235 self.ui.warn(_("trouble committing %s!\n") % f)
1237 self.ui.warn(_("trouble committing %s!\n") % f)
1236 raise
1238 raise
1237 except IOError, inst:
1239 except IOError, inst:
1238 errcode = getattr(inst, 'errno', errno.ENOENT)
1240 errcode = getattr(inst, 'errno', errno.ENOENT)
1239 if error or errcode and errcode != errno.ENOENT:
1241 if error or errcode and errcode != errno.ENOENT:
1240 self.ui.warn(_("trouble committing %s!\n") % f)
1242 self.ui.warn(_("trouble committing %s!\n") % f)
1241 raise
1243 raise
1242 else:
1244 else:
1243 removed.append(f)
1245 removed.append(f)
1244
1246
1245 # update manifest
1247 # update manifest
1246 m1.update(new)
1248 m1.update(new)
1247 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1249 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1248 drop = [f for f in removed if f in m1]
1250 drop = [f for f in removed if f in m1]
1249 for f in drop:
1251 for f in drop:
1250 del m1[f]
1252 del m1[f]
1251 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1253 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1252 p2.manifestnode(), (new, drop))
1254 p2.manifestnode(), (new, drop))
1253 files = changed + removed
1255 files = changed + removed
1254 else:
1256 else:
1255 mn = p1.manifestnode()
1257 mn = p1.manifestnode()
1256 files = []
1258 files = []
1257
1259
1258 # update changelog
1260 # update changelog
1259 self.changelog.delayupdate()
1261 self.changelog.delayupdate()
1260 n = self.changelog.add(mn, files, ctx.description(),
1262 n = self.changelog.add(mn, files, ctx.description(),
1261 trp, p1.node(), p2.node(),
1263 trp, p1.node(), p2.node(),
1262 user, ctx.date(), ctx.extra().copy())
1264 user, ctx.date(), ctx.extra().copy())
1263 p = lambda: self.changelog.writepending() and self.root or ""
1265 p = lambda: self.changelog.writepending() and self.root or ""
1264 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1266 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1265 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1267 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1266 parent2=xp2, pending=p)
1268 parent2=xp2, pending=p)
1267 self.changelog.finalize(trp)
1269 self.changelog.finalize(trp)
1268 # set the new commit is proper phase
1270 # set the new commit is proper phase
1269 targetphase = self.ui.configint('phases', 'new-commit',
1271 targetphase = self.ui.configint('phases', 'new-commit',
1270 phases.draft)
1272 phases.draft)
1271 if targetphase:
1273 if targetphase:
1272 # retract boundary do not alter parent changeset.
1274 # retract boundary do not alter parent changeset.
1273 # if a parent have higher the resulting phase will
1275 # if a parent have higher the resulting phase will
1274 # be compliant anyway
1276 # be compliant anyway
1275 #
1277 #
1276 # if minimal phase was 0 we don't need to retract anything
1278 # if minimal phase was 0 we don't need to retract anything
1277 phases.retractboundary(self, targetphase, [n])
1279 phases.retractboundary(self, targetphase, [n])
1278 tr.close()
1280 tr.close()
1279 self.updatebranchcache()
1281 self.updatebranchcache()
1280 return n
1282 return n
1281 finally:
1283 finally:
1282 if tr:
1284 if tr:
1283 tr.release()
1285 tr.release()
1284 lock.release()
1286 lock.release()
1285
1287
1286 def destroyed(self):
1288 def destroyed(self):
1287 '''Inform the repository that nodes have been destroyed.
1289 '''Inform the repository that nodes have been destroyed.
1288 Intended for use by strip and rollback, so there's a common
1290 Intended for use by strip and rollback, so there's a common
1289 place for anything that has to be done after destroying history.'''
1291 place for anything that has to be done after destroying history.'''
1290 # XXX it might be nice if we could take the list of destroyed
1292 # XXX it might be nice if we could take the list of destroyed
1291 # nodes, but I don't see an easy way for rollback() to do that
1293 # nodes, but I don't see an easy way for rollback() to do that
1292
1294
1293 # Ensure the persistent tag cache is updated. Doing it now
1295 # Ensure the persistent tag cache is updated. Doing it now
1294 # means that the tag cache only has to worry about destroyed
1296 # means that the tag cache only has to worry about destroyed
1295 # heads immediately after a strip/rollback. That in turn
1297 # heads immediately after a strip/rollback. That in turn
1296 # guarantees that "cachetip == currenttip" (comparing both rev
1298 # guarantees that "cachetip == currenttip" (comparing both rev
1297 # and node) always means no nodes have been added or destroyed.
1299 # and node) always means no nodes have been added or destroyed.
1298
1300
1299 # XXX this is suboptimal when qrefresh'ing: we strip the current
1301 # XXX this is suboptimal when qrefresh'ing: we strip the current
1300 # head, refresh the tag cache, then immediately add a new head.
1302 # head, refresh the tag cache, then immediately add a new head.
1301 # But I think doing it this way is necessary for the "instant
1303 # But I think doing it this way is necessary for the "instant
1302 # tag cache retrieval" case to work.
1304 # tag cache retrieval" case to work.
1303 self.invalidatecaches()
1305 self.invalidatecaches()
1304
1306
1305 def walk(self, match, node=None):
1307 def walk(self, match, node=None):
1306 '''
1308 '''
1307 walk recursively through the directory tree or a given
1309 walk recursively through the directory tree or a given
1308 changeset, finding all files matched by the match
1310 changeset, finding all files matched by the match
1309 function
1311 function
1310 '''
1312 '''
1311 return self[node].walk(match)
1313 return self[node].walk(match)
1312
1314
1313 def status(self, node1='.', node2=None, match=None,
1315 def status(self, node1='.', node2=None, match=None,
1314 ignored=False, clean=False, unknown=False,
1316 ignored=False, clean=False, unknown=False,
1315 listsubrepos=False):
1317 listsubrepos=False):
1316 """return status of files between two nodes or node and working directory
1318 """return status of files between two nodes or node and working directory
1317
1319
1318 If node1 is None, use the first dirstate parent instead.
1320 If node1 is None, use the first dirstate parent instead.
1319 If node2 is None, compare node1 with working directory.
1321 If node2 is None, compare node1 with working directory.
1320 """
1322 """
1321
1323
1322 def mfmatches(ctx):
1324 def mfmatches(ctx):
1323 mf = ctx.manifest().copy()
1325 mf = ctx.manifest().copy()
1324 for fn in mf.keys():
1326 for fn in mf.keys():
1325 if not match(fn):
1327 if not match(fn):
1326 del mf[fn]
1328 del mf[fn]
1327 return mf
1329 return mf
1328
1330
1329 if isinstance(node1, context.changectx):
1331 if isinstance(node1, context.changectx):
1330 ctx1 = node1
1332 ctx1 = node1
1331 else:
1333 else:
1332 ctx1 = self[node1]
1334 ctx1 = self[node1]
1333 if isinstance(node2, context.changectx):
1335 if isinstance(node2, context.changectx):
1334 ctx2 = node2
1336 ctx2 = node2
1335 else:
1337 else:
1336 ctx2 = self[node2]
1338 ctx2 = self[node2]
1337
1339
1338 working = ctx2.rev() is None
1340 working = ctx2.rev() is None
1339 parentworking = working and ctx1 == self['.']
1341 parentworking = working and ctx1 == self['.']
1340 match = match or matchmod.always(self.root, self.getcwd())
1342 match = match or matchmod.always(self.root, self.getcwd())
1341 listignored, listclean, listunknown = ignored, clean, unknown
1343 listignored, listclean, listunknown = ignored, clean, unknown
1342
1344
1343 # load earliest manifest first for caching reasons
1345 # load earliest manifest first for caching reasons
1344 if not working and ctx2.rev() < ctx1.rev():
1346 if not working and ctx2.rev() < ctx1.rev():
1345 ctx2.manifest()
1347 ctx2.manifest()
1346
1348
1347 if not parentworking:
1349 if not parentworking:
1348 def bad(f, msg):
1350 def bad(f, msg):
1349 if f not in ctx1:
1351 if f not in ctx1:
1350 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1352 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1351 match.bad = bad
1353 match.bad = bad
1352
1354
1353 if working: # we need to scan the working dir
1355 if working: # we need to scan the working dir
1354 subrepos = []
1356 subrepos = []
1355 if '.hgsub' in self.dirstate:
1357 if '.hgsub' in self.dirstate:
1356 subrepos = ctx2.substate.keys()
1358 subrepos = ctx2.substate.keys()
1357 s = self.dirstate.status(match, subrepos, listignored,
1359 s = self.dirstate.status(match, subrepos, listignored,
1358 listclean, listunknown)
1360 listclean, listunknown)
1359 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1361 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1360
1362
1361 # check for any possibly clean files
1363 # check for any possibly clean files
1362 if parentworking and cmp:
1364 if parentworking and cmp:
1363 fixup = []
1365 fixup = []
1364 # do a full compare of any files that might have changed
1366 # do a full compare of any files that might have changed
1365 for f in sorted(cmp):
1367 for f in sorted(cmp):
1366 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1368 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1367 or ctx1[f].cmp(ctx2[f])):
1369 or ctx1[f].cmp(ctx2[f])):
1368 modified.append(f)
1370 modified.append(f)
1369 else:
1371 else:
1370 fixup.append(f)
1372 fixup.append(f)
1371
1373
1372 # update dirstate for files that are actually clean
1374 # update dirstate for files that are actually clean
1373 if fixup:
1375 if fixup:
1374 if listclean:
1376 if listclean:
1375 clean += fixup
1377 clean += fixup
1376
1378
1377 try:
1379 try:
1378 # updating the dirstate is optional
1380 # updating the dirstate is optional
1379 # so we don't wait on the lock
1381 # so we don't wait on the lock
1380 wlock = self.wlock(False)
1382 wlock = self.wlock(False)
1381 try:
1383 try:
1382 for f in fixup:
1384 for f in fixup:
1383 self.dirstate.normal(f)
1385 self.dirstate.normal(f)
1384 finally:
1386 finally:
1385 wlock.release()
1387 wlock.release()
1386 except error.LockError:
1388 except error.LockError:
1387 pass
1389 pass
1388
1390
1389 if not parentworking:
1391 if not parentworking:
1390 mf1 = mfmatches(ctx1)
1392 mf1 = mfmatches(ctx1)
1391 if working:
1393 if working:
1392 # we are comparing working dir against non-parent
1394 # we are comparing working dir against non-parent
1393 # generate a pseudo-manifest for the working dir
1395 # generate a pseudo-manifest for the working dir
1394 mf2 = mfmatches(self['.'])
1396 mf2 = mfmatches(self['.'])
1395 for f in cmp + modified + added:
1397 for f in cmp + modified + added:
1396 mf2[f] = None
1398 mf2[f] = None
1397 mf2.set(f, ctx2.flags(f))
1399 mf2.set(f, ctx2.flags(f))
1398 for f in removed:
1400 for f in removed:
1399 if f in mf2:
1401 if f in mf2:
1400 del mf2[f]
1402 del mf2[f]
1401 else:
1403 else:
1402 # we are comparing two revisions
1404 # we are comparing two revisions
1403 deleted, unknown, ignored = [], [], []
1405 deleted, unknown, ignored = [], [], []
1404 mf2 = mfmatches(ctx2)
1406 mf2 = mfmatches(ctx2)
1405
1407
1406 modified, added, clean = [], [], []
1408 modified, added, clean = [], [], []
1407 for fn in mf2:
1409 for fn in mf2:
1408 if fn in mf1:
1410 if fn in mf1:
1409 if (fn not in deleted and
1411 if (fn not in deleted and
1410 (mf1.flags(fn) != mf2.flags(fn) or
1412 (mf1.flags(fn) != mf2.flags(fn) or
1411 (mf1[fn] != mf2[fn] and
1413 (mf1[fn] != mf2[fn] and
1412 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1414 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1413 modified.append(fn)
1415 modified.append(fn)
1414 elif listclean:
1416 elif listclean:
1415 clean.append(fn)
1417 clean.append(fn)
1416 del mf1[fn]
1418 del mf1[fn]
1417 elif fn not in deleted:
1419 elif fn not in deleted:
1418 added.append(fn)
1420 added.append(fn)
1419 removed = mf1.keys()
1421 removed = mf1.keys()
1420
1422
1421 if working and modified and not self.dirstate._checklink:
1423 if working and modified and not self.dirstate._checklink:
1422 # Symlink placeholders may get non-symlink-like contents
1424 # Symlink placeholders may get non-symlink-like contents
1423 # via user error or dereferencing by NFS or Samba servers,
1425 # via user error or dereferencing by NFS or Samba servers,
1424 # so we filter out any placeholders that don't look like a
1426 # so we filter out any placeholders that don't look like a
1425 # symlink
1427 # symlink
1426 sane = []
1428 sane = []
1427 for f in modified:
1429 for f in modified:
1428 if ctx2.flags(f) == 'l':
1430 if ctx2.flags(f) == 'l':
1429 d = ctx2[f].data()
1431 d = ctx2[f].data()
1430 if len(d) >= 1024 or '\n' in d or util.binary(d):
1432 if len(d) >= 1024 or '\n' in d or util.binary(d):
1431 self.ui.debug('ignoring suspect symlink placeholder'
1433 self.ui.debug('ignoring suspect symlink placeholder'
1432 ' "%s"\n' % f)
1434 ' "%s"\n' % f)
1433 continue
1435 continue
1434 sane.append(f)
1436 sane.append(f)
1435 modified = sane
1437 modified = sane
1436
1438
1437 r = modified, added, removed, deleted, unknown, ignored, clean
1439 r = modified, added, removed, deleted, unknown, ignored, clean
1438
1440
1439 if listsubrepos:
1441 if listsubrepos:
1440 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1442 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1441 if working:
1443 if working:
1442 rev2 = None
1444 rev2 = None
1443 else:
1445 else:
1444 rev2 = ctx2.substate[subpath][1]
1446 rev2 = ctx2.substate[subpath][1]
1445 try:
1447 try:
1446 submatch = matchmod.narrowmatcher(subpath, match)
1448 submatch = matchmod.narrowmatcher(subpath, match)
1447 s = sub.status(rev2, match=submatch, ignored=listignored,
1449 s = sub.status(rev2, match=submatch, ignored=listignored,
1448 clean=listclean, unknown=listunknown,
1450 clean=listclean, unknown=listunknown,
1449 listsubrepos=True)
1451 listsubrepos=True)
1450 for rfiles, sfiles in zip(r, s):
1452 for rfiles, sfiles in zip(r, s):
1451 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1453 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1452 except error.LookupError:
1454 except error.LookupError:
1453 self.ui.status(_("skipping missing subrepository: %s\n")
1455 self.ui.status(_("skipping missing subrepository: %s\n")
1454 % subpath)
1456 % subpath)
1455
1457
1456 for l in r:
1458 for l in r:
1457 l.sort()
1459 l.sort()
1458 return r
1460 return r
1459
1461
1460 def heads(self, start=None):
1462 def heads(self, start=None):
1461 heads = self.changelog.heads(start)
1463 heads = self.changelog.heads(start)
1462 # sort the output in rev descending order
1464 # sort the output in rev descending order
1463 return sorted(heads, key=self.changelog.rev, reverse=True)
1465 return sorted(heads, key=self.changelog.rev, reverse=True)
1464
1466
1465 def branchheads(self, branch=None, start=None, closed=False):
1467 def branchheads(self, branch=None, start=None, closed=False):
1466 '''return a (possibly filtered) list of heads for the given branch
1468 '''return a (possibly filtered) list of heads for the given branch
1467
1469
1468 Heads are returned in topological order, from newest to oldest.
1470 Heads are returned in topological order, from newest to oldest.
1469 If branch is None, use the dirstate branch.
1471 If branch is None, use the dirstate branch.
1470 If start is not None, return only heads reachable from start.
1472 If start is not None, return only heads reachable from start.
1471 If closed is True, return heads that are marked as closed as well.
1473 If closed is True, return heads that are marked as closed as well.
1472 '''
1474 '''
1473 if branch is None:
1475 if branch is None:
1474 branch = self[None].branch()
1476 branch = self[None].branch()
1475 branches = self.branchmap()
1477 branches = self.branchmap()
1476 if branch not in branches:
1478 if branch not in branches:
1477 return []
1479 return []
1478 # the cache returns heads ordered lowest to highest
1480 # the cache returns heads ordered lowest to highest
1479 bheads = list(reversed(branches[branch]))
1481 bheads = list(reversed(branches[branch]))
1480 if start is not None:
1482 if start is not None:
1481 # filter out the heads that cannot be reached from startrev
1483 # filter out the heads that cannot be reached from startrev
1482 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1484 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1483 bheads = [h for h in bheads if h in fbheads]
1485 bheads = [h for h in bheads if h in fbheads]
1484 if not closed:
1486 if not closed:
1485 bheads = [h for h in bheads if
1487 bheads = [h for h in bheads if
1486 ('close' not in self.changelog.read(h)[5])]
1488 ('close' not in self.changelog.read(h)[5])]
1487 return bheads
1489 return bheads
1488
1490
1489 def branches(self, nodes):
1491 def branches(self, nodes):
1490 if not nodes:
1492 if not nodes:
1491 nodes = [self.changelog.tip()]
1493 nodes = [self.changelog.tip()]
1492 b = []
1494 b = []
1493 for n in nodes:
1495 for n in nodes:
1494 t = n
1496 t = n
1495 while True:
1497 while True:
1496 p = self.changelog.parents(n)
1498 p = self.changelog.parents(n)
1497 if p[1] != nullid or p[0] == nullid:
1499 if p[1] != nullid or p[0] == nullid:
1498 b.append((t, n, p[0], p[1]))
1500 b.append((t, n, p[0], p[1]))
1499 break
1501 break
1500 n = p[0]
1502 n = p[0]
1501 return b
1503 return b
1502
1504
1503 def between(self, pairs):
1505 def between(self, pairs):
1504 r = []
1506 r = []
1505
1507
1506 for top, bottom in pairs:
1508 for top, bottom in pairs:
1507 n, l, i = top, [], 0
1509 n, l, i = top, [], 0
1508 f = 1
1510 f = 1
1509
1511
1510 while n != bottom and n != nullid:
1512 while n != bottom and n != nullid:
1511 p = self.changelog.parents(n)[0]
1513 p = self.changelog.parents(n)[0]
1512 if i == f:
1514 if i == f:
1513 l.append(n)
1515 l.append(n)
1514 f = f * 2
1516 f = f * 2
1515 n = p
1517 n = p
1516 i += 1
1518 i += 1
1517
1519
1518 r.append(l)
1520 r.append(l)
1519
1521
1520 return r
1522 return r
1521
1523
1522 def pull(self, remote, heads=None, force=False):
1524 def pull(self, remote, heads=None, force=False):
1523 lock = self.lock()
1525 lock = self.lock()
1524 try:
1526 try:
1525 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1527 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1526 force=force)
1528 force=force)
1527 common, fetch, rheads = tmp
1529 common, fetch, rheads = tmp
1528 if not fetch:
1530 if not fetch:
1529 self.ui.status(_("no changes found\n"))
1531 self.ui.status(_("no changes found\n"))
1530 added = []
1532 added = []
1531 result = 0
1533 result = 0
1532 else:
1534 else:
1533 if heads is None and list(common) == [nullid]:
1535 if heads is None and list(common) == [nullid]:
1534 self.ui.status(_("requesting all changes\n"))
1536 self.ui.status(_("requesting all changes\n"))
1535 elif heads is None and remote.capable('changegroupsubset'):
1537 elif heads is None and remote.capable('changegroupsubset'):
1536 # issue1320, avoid a race if remote changed after discovery
1538 # issue1320, avoid a race if remote changed after discovery
1537 heads = rheads
1539 heads = rheads
1538
1540
1539 if remote.capable('getbundle'):
1541 if remote.capable('getbundle'):
1540 cg = remote.getbundle('pull', common=common,
1542 cg = remote.getbundle('pull', common=common,
1541 heads=heads or rheads)
1543 heads=heads or rheads)
1542 elif heads is None:
1544 elif heads is None:
1543 cg = remote.changegroup(fetch, 'pull')
1545 cg = remote.changegroup(fetch, 'pull')
1544 elif not remote.capable('changegroupsubset'):
1546 elif not remote.capable('changegroupsubset'):
1545 raise util.Abort(_("partial pull cannot be done because "
1547 raise util.Abort(_("partial pull cannot be done because "
1546 "other repository doesn't support "
1548 "other repository doesn't support "
1547 "changegroupsubset."))
1549 "changegroupsubset."))
1548 else:
1550 else:
1549 cg = remote.changegroupsubset(fetch, heads, 'pull')
1551 cg = remote.changegroupsubset(fetch, heads, 'pull')
1550 clstart = len(self.changelog)
1552 clstart = len(self.changelog)
1551 result = self.addchangegroup(cg, 'pull', remote.url())
1553 result = self.addchangegroup(cg, 'pull', remote.url())
1552 clend = len(self.changelog)
1554 clend = len(self.changelog)
1553 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1555 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1554
1556
1555
1557
1556 # Get remote phases data from remote
1558 # Get remote phases data from remote
1557 remotephases = remote.listkeys('phases')
1559 remotephases = remote.listkeys('phases')
1558 publishing = bool(remotephases.get('publishing', False))
1560 publishing = bool(remotephases.get('publishing', False))
1559 if remotephases and not publishing:
1561 if remotephases and not publishing:
1560 # remote is new and unpublishing
1562 # remote is new and unpublishing
1561 subset = common + added
1563 subset = common + added
1562 pheads, _dr = phases.analyzeremotephases(self, subset,
1564 pheads, _dr = phases.analyzeremotephases(self, subset,
1563 remotephases)
1565 remotephases)
1564 phases.advanceboundary(self, phases.public, pheads)
1566 phases.advanceboundary(self, phases.public, pheads)
1565 phases.advanceboundary(self, phases.draft, common + added)
1567 phases.advanceboundary(self, phases.draft, common + added)
1566 else:
1568 else:
1567 # Remote is old or publishing all common changesets
1569 # Remote is old or publishing all common changesets
1568 # should be seen as public
1570 # should be seen as public
1569 phases.advanceboundary(self, phases.public, common + added)
1571 phases.advanceboundary(self, phases.public, common + added)
1570 finally:
1572 finally:
1571 lock.release()
1573 lock.release()
1572
1574
1573 return result
1575 return result
1574
1576
1575 def checkpush(self, force, revs):
1577 def checkpush(self, force, revs):
1576 """Extensions can override this function if additional checks have
1578 """Extensions can override this function if additional checks have
1577 to be performed before pushing, or call it if they override push
1579 to be performed before pushing, or call it if they override push
1578 command.
1580 command.
1579 """
1581 """
1580 pass
1582 pass
1581
1583
1582 def push(self, remote, force=False, revs=None, newbranch=False):
1584 def push(self, remote, force=False, revs=None, newbranch=False):
1583 '''Push outgoing changesets (limited by revs) from the current
1585 '''Push outgoing changesets (limited by revs) from the current
1584 repository to remote. Return an integer:
1586 repository to remote. Return an integer:
1585 - 0 means HTTP error *or* nothing to push
1587 - 0 means HTTP error *or* nothing to push
1586 - 1 means we pushed and remote head count is unchanged *or*
1588 - 1 means we pushed and remote head count is unchanged *or*
1587 we have outgoing changesets but refused to push
1589 we have outgoing changesets but refused to push
1588 - other values as described by addchangegroup()
1590 - other values as described by addchangegroup()
1589 '''
1591 '''
1590 # there are two ways to push to remote repo:
1592 # there are two ways to push to remote repo:
1591 #
1593 #
1592 # addchangegroup assumes local user can lock remote
1594 # addchangegroup assumes local user can lock remote
1593 # repo (local filesystem, old ssh servers).
1595 # repo (local filesystem, old ssh servers).
1594 #
1596 #
1595 # unbundle assumes local user cannot lock remote repo (new ssh
1597 # unbundle assumes local user cannot lock remote repo (new ssh
1596 # servers, http servers).
1598 # servers, http servers).
1597
1599
1598 self.checkpush(force, revs)
1600 self.checkpush(force, revs)
1599 lock = None
1601 lock = None
1600 unbundle = remote.capable('unbundle')
1602 unbundle = remote.capable('unbundle')
1601 if not unbundle:
1603 if not unbundle:
1602 lock = remote.lock()
1604 lock = remote.lock()
1603 try:
1605 try:
1604 # get local lock as we might write phase data
1606 # get local lock as we might write phase data
1605 locallock = self.lock()
1607 locallock = self.lock()
1606 try:
1608 try:
1607 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1609 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1608 revs, newbranch)
1610 revs, newbranch)
1609 ret = remote_heads
1611 ret = remote_heads
1610 # create a callback for addchangegroup.
1612 # create a callback for addchangegroup.
1611 # If will be used branch of the conditionnal too.
1613 # If will be used branch of the conditionnal too.
1612 if cg is not None:
1614 if cg is not None:
1613 if unbundle:
1615 if unbundle:
1614 # local repo finds heads on server, finds out what
1616 # local repo finds heads on server, finds out what
1615 # revs it must push. once revs transferred, if server
1617 # revs it must push. once revs transferred, if server
1616 # finds it has different heads (someone else won
1618 # finds it has different heads (someone else won
1617 # commit/push race), server aborts.
1619 # commit/push race), server aborts.
1618 if force:
1620 if force:
1619 remote_heads = ['force']
1621 remote_heads = ['force']
1620 # ssh: return remote's addchangegroup()
1622 # ssh: return remote's addchangegroup()
1621 # http: return remote's addchangegroup() or 0 for error
1623 # http: return remote's addchangegroup() or 0 for error
1622 ret = remote.unbundle(cg, remote_heads, 'push')
1624 ret = remote.unbundle(cg, remote_heads, 'push')
1623 else:
1625 else:
1624 # we return an integer indicating remote head count change
1626 # we return an integer indicating remote head count change
1625 ret = remote.addchangegroup(cg, 'push', self.url())
1627 ret = remote.addchangegroup(cg, 'push', self.url())
1626
1628
1627 # even when we don't push, exchanging phase data is useful
1629 # even when we don't push, exchanging phase data is useful
1628 remotephases = remote.listkeys('phases')
1630 remotephases = remote.listkeys('phases')
1629 if not remotephases: # old server or public only repo
1631 if not remotephases: # old server or public only repo
1630 phases.advanceboundary(self, phases.public, fut)
1632 phases.advanceboundary(self, phases.public, fut)
1631 # don't push any phase data as there is nothing to push
1633 # don't push any phase data as there is nothing to push
1632 else:
1634 else:
1633 ana = phases.analyzeremotephases(self, fut, remotephases)
1635 ana = phases.analyzeremotephases(self, fut, remotephases)
1634 pheads, droots = ana
1636 pheads, droots = ana
1635 ### Apply remote phase on local
1637 ### Apply remote phase on local
1636 if remotephases.get('publishing', False):
1638 if remotephases.get('publishing', False):
1637 phases.advanceboundary(self, phases.public, fut)
1639 phases.advanceboundary(self, phases.public, fut)
1638 else: # publish = False
1640 else: # publish = False
1639 phases.advanceboundary(self, phases.public, pheads)
1641 phases.advanceboundary(self, phases.public, pheads)
1640 phases.advanceboundary(self, phases.draft, fut)
1642 phases.advanceboundary(self, phases.draft, fut)
1641 ### Apply local phase on remote
1643 ### Apply local phase on remote
1642 #
1644 #
1643 # XXX If push failed we should use strict common and not
1645 # XXX If push failed we should use strict common and not
1644 # future to avoid pushing phase data on unknown changeset.
1646 # future to avoid pushing phase data on unknown changeset.
1645 # This is to done later.
1647 # This is to done later.
1646
1648
1647 # Get the list of all revs draft on remote by public here.
1649 # Get the list of all revs draft on remote by public here.
1648 # XXX Beware that revset break if droots is not strictly
1650 # XXX Beware that revset break if droots is not strictly
1649 # XXX root we may want to ensure it is but it is costly
1651 # XXX root we may want to ensure it is but it is costly
1650 outdated = self.set('heads((%ln::%ln) and public())',
1652 outdated = self.set('heads((%ln::%ln) and public())',
1651 droots, fut)
1653 droots, fut)
1652 for newremotehead in outdated:
1654 for newremotehead in outdated:
1653 r = remote.pushkey('phases',
1655 r = remote.pushkey('phases',
1654 newremotehead.hex(),
1656 newremotehead.hex(),
1655 str(phases.draft),
1657 str(phases.draft),
1656 str(phases.public))
1658 str(phases.public))
1657 if not r:
1659 if not r:
1658 self.ui.warn(_('updating %s to public failed!\n')
1660 self.ui.warn(_('updating %s to public failed!\n')
1659 % newremotehead)
1661 % newremotehead)
1660 finally:
1662 finally:
1661 locallock.release()
1663 locallock.release()
1662 finally:
1664 finally:
1663 if lock is not None:
1665 if lock is not None:
1664 lock.release()
1666 lock.release()
1665
1667
1666 self.ui.debug("checking for updated bookmarks\n")
1668 self.ui.debug("checking for updated bookmarks\n")
1667 rb = remote.listkeys('bookmarks')
1669 rb = remote.listkeys('bookmarks')
1668 for k in rb.keys():
1670 for k in rb.keys():
1669 if k in self._bookmarks:
1671 if k in self._bookmarks:
1670 nr, nl = rb[k], hex(self._bookmarks[k])
1672 nr, nl = rb[k], hex(self._bookmarks[k])
1671 if nr in self:
1673 if nr in self:
1672 cr = self[nr]
1674 cr = self[nr]
1673 cl = self[nl]
1675 cl = self[nl]
1674 if cl in cr.descendants():
1676 if cl in cr.descendants():
1675 r = remote.pushkey('bookmarks', k, nr, nl)
1677 r = remote.pushkey('bookmarks', k, nr, nl)
1676 if r:
1678 if r:
1677 self.ui.status(_("updating bookmark %s\n") % k)
1679 self.ui.status(_("updating bookmark %s\n") % k)
1678 else:
1680 else:
1679 self.ui.warn(_('updating bookmark %s'
1681 self.ui.warn(_('updating bookmark %s'
1680 ' failed!\n') % k)
1682 ' failed!\n') % k)
1681
1683
1682 return ret
1684 return ret
1683
1685
1684 def changegroupinfo(self, nodes, source):
1686 def changegroupinfo(self, nodes, source):
1685 if self.ui.verbose or source == 'bundle':
1687 if self.ui.verbose or source == 'bundle':
1686 self.ui.status(_("%d changesets found\n") % len(nodes))
1688 self.ui.status(_("%d changesets found\n") % len(nodes))
1687 if self.ui.debugflag:
1689 if self.ui.debugflag:
1688 self.ui.debug("list of changesets:\n")
1690 self.ui.debug("list of changesets:\n")
1689 for node in nodes:
1691 for node in nodes:
1690 self.ui.debug("%s\n" % hex(node))
1692 self.ui.debug("%s\n" % hex(node))
1691
1693
1692 def changegroupsubset(self, bases, heads, source):
1694 def changegroupsubset(self, bases, heads, source):
1693 """Compute a changegroup consisting of all the nodes that are
1695 """Compute a changegroup consisting of all the nodes that are
1694 descendants of any of the bases and ancestors of any of the heads.
1696 descendants of any of the bases and ancestors of any of the heads.
1695 Return a chunkbuffer object whose read() method will return
1697 Return a chunkbuffer object whose read() method will return
1696 successive changegroup chunks.
1698 successive changegroup chunks.
1697
1699
1698 It is fairly complex as determining which filenodes and which
1700 It is fairly complex as determining which filenodes and which
1699 manifest nodes need to be included for the changeset to be complete
1701 manifest nodes need to be included for the changeset to be complete
1700 is non-trivial.
1702 is non-trivial.
1701
1703
1702 Another wrinkle is doing the reverse, figuring out which changeset in
1704 Another wrinkle is doing the reverse, figuring out which changeset in
1703 the changegroup a particular filenode or manifestnode belongs to.
1705 the changegroup a particular filenode or manifestnode belongs to.
1704 """
1706 """
1705 cl = self.changelog
1707 cl = self.changelog
1706 if not bases:
1708 if not bases:
1707 bases = [nullid]
1709 bases = [nullid]
1708 csets, bases, heads = cl.nodesbetween(bases, heads)
1710 csets, bases, heads = cl.nodesbetween(bases, heads)
1709 # We assume that all ancestors of bases are known
1711 # We assume that all ancestors of bases are known
1710 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1712 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1711 return self._changegroupsubset(common, csets, heads, source)
1713 return self._changegroupsubset(common, csets, heads, source)
1712
1714
1713 def getlocalbundle(self, source, outgoing):
1715 def getlocalbundle(self, source, outgoing):
1714 """Like getbundle, but taking a discovery.outgoing as an argument.
1716 """Like getbundle, but taking a discovery.outgoing as an argument.
1715
1717
1716 This is only implemented for local repos and reuses potentially
1718 This is only implemented for local repos and reuses potentially
1717 precomputed sets in outgoing."""
1719 precomputed sets in outgoing."""
1718 if not outgoing.missing:
1720 if not outgoing.missing:
1719 return None
1721 return None
1720 return self._changegroupsubset(outgoing.common,
1722 return self._changegroupsubset(outgoing.common,
1721 outgoing.missing,
1723 outgoing.missing,
1722 outgoing.missingheads,
1724 outgoing.missingheads,
1723 source)
1725 source)
1724
1726
1725 def getbundle(self, source, heads=None, common=None):
1727 def getbundle(self, source, heads=None, common=None):
1726 """Like changegroupsubset, but returns the set difference between the
1728 """Like changegroupsubset, but returns the set difference between the
1727 ancestors of heads and the ancestors common.
1729 ancestors of heads and the ancestors common.
1728
1730
1729 If heads is None, use the local heads. If common is None, use [nullid].
1731 If heads is None, use the local heads. If common is None, use [nullid].
1730
1732
1731 The nodes in common might not all be known locally due to the way the
1733 The nodes in common might not all be known locally due to the way the
1732 current discovery protocol works.
1734 current discovery protocol works.
1733 """
1735 """
1734 cl = self.changelog
1736 cl = self.changelog
1735 if common:
1737 if common:
1736 nm = cl.nodemap
1738 nm = cl.nodemap
1737 common = [n for n in common if n in nm]
1739 common = [n for n in common if n in nm]
1738 else:
1740 else:
1739 common = [nullid]
1741 common = [nullid]
1740 if not heads:
1742 if not heads:
1741 heads = cl.heads()
1743 heads = cl.heads()
1742 return self.getlocalbundle(source,
1744 return self.getlocalbundle(source,
1743 discovery.outgoing(cl, common, heads))
1745 discovery.outgoing(cl, common, heads))
1744
1746
1745 def _changegroupsubset(self, commonrevs, csets, heads, source):
1747 def _changegroupsubset(self, commonrevs, csets, heads, source):
1746
1748
1747 cl = self.changelog
1749 cl = self.changelog
1748 mf = self.manifest
1750 mf = self.manifest
1749 mfs = {} # needed manifests
1751 mfs = {} # needed manifests
1750 fnodes = {} # needed file nodes
1752 fnodes = {} # needed file nodes
1751 changedfiles = set()
1753 changedfiles = set()
1752 fstate = ['', {}]
1754 fstate = ['', {}]
1753 count = [0]
1755 count = [0]
1754
1756
1755 # can we go through the fast path ?
1757 # can we go through the fast path ?
1756 heads.sort()
1758 heads.sort()
1757 if heads == sorted(self.heads()):
1759 if heads == sorted(self.heads()):
1758 return self._changegroup(csets, source)
1760 return self._changegroup(csets, source)
1759
1761
1760 # slow path
1762 # slow path
1761 self.hook('preoutgoing', throw=True, source=source)
1763 self.hook('preoutgoing', throw=True, source=source)
1762 self.changegroupinfo(csets, source)
1764 self.changegroupinfo(csets, source)
1763
1765
1764 # filter any nodes that claim to be part of the known set
1766 # filter any nodes that claim to be part of the known set
1765 def prune(revlog, missing):
1767 def prune(revlog, missing):
1766 return [n for n in missing
1768 return [n for n in missing
1767 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1769 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1768
1770
1769 def lookup(revlog, x):
1771 def lookup(revlog, x):
1770 if revlog == cl:
1772 if revlog == cl:
1771 c = cl.read(x)
1773 c = cl.read(x)
1772 changedfiles.update(c[3])
1774 changedfiles.update(c[3])
1773 mfs.setdefault(c[0], x)
1775 mfs.setdefault(c[0], x)
1774 count[0] += 1
1776 count[0] += 1
1775 self.ui.progress(_('bundling'), count[0],
1777 self.ui.progress(_('bundling'), count[0],
1776 unit=_('changesets'), total=len(csets))
1778 unit=_('changesets'), total=len(csets))
1777 return x
1779 return x
1778 elif revlog == mf:
1780 elif revlog == mf:
1779 clnode = mfs[x]
1781 clnode = mfs[x]
1780 mdata = mf.readfast(x)
1782 mdata = mf.readfast(x)
1781 for f in changedfiles:
1783 for f in changedfiles:
1782 if f in mdata:
1784 if f in mdata:
1783 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1785 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1784 count[0] += 1
1786 count[0] += 1
1785 self.ui.progress(_('bundling'), count[0],
1787 self.ui.progress(_('bundling'), count[0],
1786 unit=_('manifests'), total=len(mfs))
1788 unit=_('manifests'), total=len(mfs))
1787 return mfs[x]
1789 return mfs[x]
1788 else:
1790 else:
1789 self.ui.progress(
1791 self.ui.progress(
1790 _('bundling'), count[0], item=fstate[0],
1792 _('bundling'), count[0], item=fstate[0],
1791 unit=_('files'), total=len(changedfiles))
1793 unit=_('files'), total=len(changedfiles))
1792 return fstate[1][x]
1794 return fstate[1][x]
1793
1795
1794 bundler = changegroup.bundle10(lookup)
1796 bundler = changegroup.bundle10(lookup)
1795 reorder = self.ui.config('bundle', 'reorder', 'auto')
1797 reorder = self.ui.config('bundle', 'reorder', 'auto')
1796 if reorder == 'auto':
1798 if reorder == 'auto':
1797 reorder = None
1799 reorder = None
1798 else:
1800 else:
1799 reorder = util.parsebool(reorder)
1801 reorder = util.parsebool(reorder)
1800
1802
1801 def gengroup():
1803 def gengroup():
1802 # Create a changenode group generator that will call our functions
1804 # Create a changenode group generator that will call our functions
1803 # back to lookup the owning changenode and collect information.
1805 # back to lookup the owning changenode and collect information.
1804 for chunk in cl.group(csets, bundler, reorder=reorder):
1806 for chunk in cl.group(csets, bundler, reorder=reorder):
1805 yield chunk
1807 yield chunk
1806 self.ui.progress(_('bundling'), None)
1808 self.ui.progress(_('bundling'), None)
1807
1809
1808 # Create a generator for the manifestnodes that calls our lookup
1810 # Create a generator for the manifestnodes that calls our lookup
1809 # and data collection functions back.
1811 # and data collection functions back.
1810 count[0] = 0
1812 count[0] = 0
1811 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1813 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1812 yield chunk
1814 yield chunk
1813 self.ui.progress(_('bundling'), None)
1815 self.ui.progress(_('bundling'), None)
1814
1816
1815 mfs.clear()
1817 mfs.clear()
1816
1818
1817 # Go through all our files in order sorted by name.
1819 # Go through all our files in order sorted by name.
1818 count[0] = 0
1820 count[0] = 0
1819 for fname in sorted(changedfiles):
1821 for fname in sorted(changedfiles):
1820 filerevlog = self.file(fname)
1822 filerevlog = self.file(fname)
1821 if not len(filerevlog):
1823 if not len(filerevlog):
1822 raise util.Abort(_("empty or missing revlog for %s") % fname)
1824 raise util.Abort(_("empty or missing revlog for %s") % fname)
1823 fstate[0] = fname
1825 fstate[0] = fname
1824 fstate[1] = fnodes.pop(fname, {})
1826 fstate[1] = fnodes.pop(fname, {})
1825
1827
1826 nodelist = prune(filerevlog, fstate[1])
1828 nodelist = prune(filerevlog, fstate[1])
1827 if nodelist:
1829 if nodelist:
1828 count[0] += 1
1830 count[0] += 1
1829 yield bundler.fileheader(fname)
1831 yield bundler.fileheader(fname)
1830 for chunk in filerevlog.group(nodelist, bundler, reorder):
1832 for chunk in filerevlog.group(nodelist, bundler, reorder):
1831 yield chunk
1833 yield chunk
1832
1834
1833 # Signal that no more groups are left.
1835 # Signal that no more groups are left.
1834 yield bundler.close()
1836 yield bundler.close()
1835 self.ui.progress(_('bundling'), None)
1837 self.ui.progress(_('bundling'), None)
1836
1838
1837 if csets:
1839 if csets:
1838 self.hook('outgoing', node=hex(csets[0]), source=source)
1840 self.hook('outgoing', node=hex(csets[0]), source=source)
1839
1841
1840 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1842 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1841
1843
1842 def changegroup(self, basenodes, source):
1844 def changegroup(self, basenodes, source):
1843 # to avoid a race we use changegroupsubset() (issue1320)
1845 # to avoid a race we use changegroupsubset() (issue1320)
1844 return self.changegroupsubset(basenodes, self.heads(), source)
1846 return self.changegroupsubset(basenodes, self.heads(), source)
1845
1847
1846 def _changegroup(self, nodes, source):
1848 def _changegroup(self, nodes, source):
1847 """Compute the changegroup of all nodes that we have that a recipient
1849 """Compute the changegroup of all nodes that we have that a recipient
1848 doesn't. Return a chunkbuffer object whose read() method will return
1850 doesn't. Return a chunkbuffer object whose read() method will return
1849 successive changegroup chunks.
1851 successive changegroup chunks.
1850
1852
1851 This is much easier than the previous function as we can assume that
1853 This is much easier than the previous function as we can assume that
1852 the recipient has any changenode we aren't sending them.
1854 the recipient has any changenode we aren't sending them.
1853
1855
1854 nodes is the set of nodes to send"""
1856 nodes is the set of nodes to send"""
1855
1857
1856 cl = self.changelog
1858 cl = self.changelog
1857 mf = self.manifest
1859 mf = self.manifest
1858 mfs = {}
1860 mfs = {}
1859 changedfiles = set()
1861 changedfiles = set()
1860 fstate = ['']
1862 fstate = ['']
1861 count = [0]
1863 count = [0]
1862
1864
1863 self.hook('preoutgoing', throw=True, source=source)
1865 self.hook('preoutgoing', throw=True, source=source)
1864 self.changegroupinfo(nodes, source)
1866 self.changegroupinfo(nodes, source)
1865
1867
1866 revset = set([cl.rev(n) for n in nodes])
1868 revset = set([cl.rev(n) for n in nodes])
1867
1869
1868 def gennodelst(log):
1870 def gennodelst(log):
1869 return [log.node(r) for r in log if log.linkrev(r) in revset]
1871 return [log.node(r) for r in log if log.linkrev(r) in revset]
1870
1872
1871 def lookup(revlog, x):
1873 def lookup(revlog, x):
1872 if revlog == cl:
1874 if revlog == cl:
1873 c = cl.read(x)
1875 c = cl.read(x)
1874 changedfiles.update(c[3])
1876 changedfiles.update(c[3])
1875 mfs.setdefault(c[0], x)
1877 mfs.setdefault(c[0], x)
1876 count[0] += 1
1878 count[0] += 1
1877 self.ui.progress(_('bundling'), count[0],
1879 self.ui.progress(_('bundling'), count[0],
1878 unit=_('changesets'), total=len(nodes))
1880 unit=_('changesets'), total=len(nodes))
1879 return x
1881 return x
1880 elif revlog == mf:
1882 elif revlog == mf:
1881 count[0] += 1
1883 count[0] += 1
1882 self.ui.progress(_('bundling'), count[0],
1884 self.ui.progress(_('bundling'), count[0],
1883 unit=_('manifests'), total=len(mfs))
1885 unit=_('manifests'), total=len(mfs))
1884 return cl.node(revlog.linkrev(revlog.rev(x)))
1886 return cl.node(revlog.linkrev(revlog.rev(x)))
1885 else:
1887 else:
1886 self.ui.progress(
1888 self.ui.progress(
1887 _('bundling'), count[0], item=fstate[0],
1889 _('bundling'), count[0], item=fstate[0],
1888 total=len(changedfiles), unit=_('files'))
1890 total=len(changedfiles), unit=_('files'))
1889 return cl.node(revlog.linkrev(revlog.rev(x)))
1891 return cl.node(revlog.linkrev(revlog.rev(x)))
1890
1892
1891 bundler = changegroup.bundle10(lookup)
1893 bundler = changegroup.bundle10(lookup)
1892 reorder = self.ui.config('bundle', 'reorder', 'auto')
1894 reorder = self.ui.config('bundle', 'reorder', 'auto')
1893 if reorder == 'auto':
1895 if reorder == 'auto':
1894 reorder = None
1896 reorder = None
1895 else:
1897 else:
1896 reorder = util.parsebool(reorder)
1898 reorder = util.parsebool(reorder)
1897
1899
1898 def gengroup():
1900 def gengroup():
1899 '''yield a sequence of changegroup chunks (strings)'''
1901 '''yield a sequence of changegroup chunks (strings)'''
1900 # construct a list of all changed files
1902 # construct a list of all changed files
1901
1903
1902 for chunk in cl.group(nodes, bundler, reorder=reorder):
1904 for chunk in cl.group(nodes, bundler, reorder=reorder):
1903 yield chunk
1905 yield chunk
1904 self.ui.progress(_('bundling'), None)
1906 self.ui.progress(_('bundling'), None)
1905
1907
1906 count[0] = 0
1908 count[0] = 0
1907 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1909 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1908 yield chunk
1910 yield chunk
1909 self.ui.progress(_('bundling'), None)
1911 self.ui.progress(_('bundling'), None)
1910
1912
1911 count[0] = 0
1913 count[0] = 0
1912 for fname in sorted(changedfiles):
1914 for fname in sorted(changedfiles):
1913 filerevlog = self.file(fname)
1915 filerevlog = self.file(fname)
1914 if not len(filerevlog):
1916 if not len(filerevlog):
1915 raise util.Abort(_("empty or missing revlog for %s") % fname)
1917 raise util.Abort(_("empty or missing revlog for %s") % fname)
1916 fstate[0] = fname
1918 fstate[0] = fname
1917 nodelist = gennodelst(filerevlog)
1919 nodelist = gennodelst(filerevlog)
1918 if nodelist:
1920 if nodelist:
1919 count[0] += 1
1921 count[0] += 1
1920 yield bundler.fileheader(fname)
1922 yield bundler.fileheader(fname)
1921 for chunk in filerevlog.group(nodelist, bundler, reorder):
1923 for chunk in filerevlog.group(nodelist, bundler, reorder):
1922 yield chunk
1924 yield chunk
1923 yield bundler.close()
1925 yield bundler.close()
1924 self.ui.progress(_('bundling'), None)
1926 self.ui.progress(_('bundling'), None)
1925
1927
1926 if nodes:
1928 if nodes:
1927 self.hook('outgoing', node=hex(nodes[0]), source=source)
1929 self.hook('outgoing', node=hex(nodes[0]), source=source)
1928
1930
1929 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1931 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1930
1932
1931 def addchangegroup(self, source, srctype, url, emptyok=False):
1933 def addchangegroup(self, source, srctype, url, emptyok=False):
1932 """Add the changegroup returned by source.read() to this repo.
1934 """Add the changegroup returned by source.read() to this repo.
1933 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1935 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1934 the URL of the repo where this changegroup is coming from.
1936 the URL of the repo where this changegroup is coming from.
1935
1937
1936 Return an integer summarizing the change to this repo:
1938 Return an integer summarizing the change to this repo:
1937 - nothing changed or no source: 0
1939 - nothing changed or no source: 0
1938 - more heads than before: 1+added heads (2..n)
1940 - more heads than before: 1+added heads (2..n)
1939 - fewer heads than before: -1-removed heads (-2..-n)
1941 - fewer heads than before: -1-removed heads (-2..-n)
1940 - number of heads stays the same: 1
1942 - number of heads stays the same: 1
1941 """
1943 """
1942 def csmap(x):
1944 def csmap(x):
1943 self.ui.debug("add changeset %s\n" % short(x))
1945 self.ui.debug("add changeset %s\n" % short(x))
1944 return len(cl)
1946 return len(cl)
1945
1947
1946 def revmap(x):
1948 def revmap(x):
1947 return cl.rev(x)
1949 return cl.rev(x)
1948
1950
1949 if not source:
1951 if not source:
1950 return 0
1952 return 0
1951
1953
1952 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1954 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1953
1955
1954 changesets = files = revisions = 0
1956 changesets = files = revisions = 0
1955 efiles = set()
1957 efiles = set()
1956
1958
1957 # write changelog data to temp files so concurrent readers will not see
1959 # write changelog data to temp files so concurrent readers will not see
1958 # inconsistent view
1960 # inconsistent view
1959 cl = self.changelog
1961 cl = self.changelog
1960 cl.delayupdate()
1962 cl.delayupdate()
1961 oldheads = cl.heads()
1963 oldheads = cl.heads()
1962
1964
1963 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1965 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1964 try:
1966 try:
1965 trp = weakref.proxy(tr)
1967 trp = weakref.proxy(tr)
1966 # pull off the changeset group
1968 # pull off the changeset group
1967 self.ui.status(_("adding changesets\n"))
1969 self.ui.status(_("adding changesets\n"))
1968 clstart = len(cl)
1970 clstart = len(cl)
1969 class prog(object):
1971 class prog(object):
1970 step = _('changesets')
1972 step = _('changesets')
1971 count = 1
1973 count = 1
1972 ui = self.ui
1974 ui = self.ui
1973 total = None
1975 total = None
1974 def __call__(self):
1976 def __call__(self):
1975 self.ui.progress(self.step, self.count, unit=_('chunks'),
1977 self.ui.progress(self.step, self.count, unit=_('chunks'),
1976 total=self.total)
1978 total=self.total)
1977 self.count += 1
1979 self.count += 1
1978 pr = prog()
1980 pr = prog()
1979 source.callback = pr
1981 source.callback = pr
1980
1982
1981 source.changelogheader()
1983 source.changelogheader()
1982 srccontent = cl.addgroup(source, csmap, trp)
1984 srccontent = cl.addgroup(source, csmap, trp)
1983 if not (srccontent or emptyok):
1985 if not (srccontent or emptyok):
1984 raise util.Abort(_("received changelog group is empty"))
1986 raise util.Abort(_("received changelog group is empty"))
1985 clend = len(cl)
1987 clend = len(cl)
1986 changesets = clend - clstart
1988 changesets = clend - clstart
1987 for c in xrange(clstart, clend):
1989 for c in xrange(clstart, clend):
1988 efiles.update(self[c].files())
1990 efiles.update(self[c].files())
1989 efiles = len(efiles)
1991 efiles = len(efiles)
1990 self.ui.progress(_('changesets'), None)
1992 self.ui.progress(_('changesets'), None)
1991
1993
1992 # pull off the manifest group
1994 # pull off the manifest group
1993 self.ui.status(_("adding manifests\n"))
1995 self.ui.status(_("adding manifests\n"))
1994 pr.step = _('manifests')
1996 pr.step = _('manifests')
1995 pr.count = 1
1997 pr.count = 1
1996 pr.total = changesets # manifests <= changesets
1998 pr.total = changesets # manifests <= changesets
1997 # no need to check for empty manifest group here:
1999 # no need to check for empty manifest group here:
1998 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2000 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1999 # no new manifest will be created and the manifest group will
2001 # no new manifest will be created and the manifest group will
2000 # be empty during the pull
2002 # be empty during the pull
2001 source.manifestheader()
2003 source.manifestheader()
2002 self.manifest.addgroup(source, revmap, trp)
2004 self.manifest.addgroup(source, revmap, trp)
2003 self.ui.progress(_('manifests'), None)
2005 self.ui.progress(_('manifests'), None)
2004
2006
2005 needfiles = {}
2007 needfiles = {}
2006 if self.ui.configbool('server', 'validate', default=False):
2008 if self.ui.configbool('server', 'validate', default=False):
2007 # validate incoming csets have their manifests
2009 # validate incoming csets have their manifests
2008 for cset in xrange(clstart, clend):
2010 for cset in xrange(clstart, clend):
2009 mfest = self.changelog.read(self.changelog.node(cset))[0]
2011 mfest = self.changelog.read(self.changelog.node(cset))[0]
2010 mfest = self.manifest.readdelta(mfest)
2012 mfest = self.manifest.readdelta(mfest)
2011 # store file nodes we must see
2013 # store file nodes we must see
2012 for f, n in mfest.iteritems():
2014 for f, n in mfest.iteritems():
2013 needfiles.setdefault(f, set()).add(n)
2015 needfiles.setdefault(f, set()).add(n)
2014
2016
2015 # process the files
2017 # process the files
2016 self.ui.status(_("adding file changes\n"))
2018 self.ui.status(_("adding file changes\n"))
2017 pr.step = _('files')
2019 pr.step = _('files')
2018 pr.count = 1
2020 pr.count = 1
2019 pr.total = efiles
2021 pr.total = efiles
2020 source.callback = None
2022 source.callback = None
2021
2023
2022 while True:
2024 while True:
2023 chunkdata = source.filelogheader()
2025 chunkdata = source.filelogheader()
2024 if not chunkdata:
2026 if not chunkdata:
2025 break
2027 break
2026 f = chunkdata["filename"]
2028 f = chunkdata["filename"]
2027 self.ui.debug("adding %s revisions\n" % f)
2029 self.ui.debug("adding %s revisions\n" % f)
2028 pr()
2030 pr()
2029 fl = self.file(f)
2031 fl = self.file(f)
2030 o = len(fl)
2032 o = len(fl)
2031 if not fl.addgroup(source, revmap, trp):
2033 if not fl.addgroup(source, revmap, trp):
2032 raise util.Abort(_("received file revlog group is empty"))
2034 raise util.Abort(_("received file revlog group is empty"))
2033 revisions += len(fl) - o
2035 revisions += len(fl) - o
2034 files += 1
2036 files += 1
2035 if f in needfiles:
2037 if f in needfiles:
2036 needs = needfiles[f]
2038 needs = needfiles[f]
2037 for new in xrange(o, len(fl)):
2039 for new in xrange(o, len(fl)):
2038 n = fl.node(new)
2040 n = fl.node(new)
2039 if n in needs:
2041 if n in needs:
2040 needs.remove(n)
2042 needs.remove(n)
2041 if not needs:
2043 if not needs:
2042 del needfiles[f]
2044 del needfiles[f]
2043 self.ui.progress(_('files'), None)
2045 self.ui.progress(_('files'), None)
2044
2046
2045 for f, needs in needfiles.iteritems():
2047 for f, needs in needfiles.iteritems():
2046 fl = self.file(f)
2048 fl = self.file(f)
2047 for n in needs:
2049 for n in needs:
2048 try:
2050 try:
2049 fl.rev(n)
2051 fl.rev(n)
2050 except error.LookupError:
2052 except error.LookupError:
2051 raise util.Abort(
2053 raise util.Abort(
2052 _('missing file data for %s:%s - run hg verify') %
2054 _('missing file data for %s:%s - run hg verify') %
2053 (f, hex(n)))
2055 (f, hex(n)))
2054
2056
2055 dh = 0
2057 dh = 0
2056 if oldheads:
2058 if oldheads:
2057 heads = cl.heads()
2059 heads = cl.heads()
2058 dh = len(heads) - len(oldheads)
2060 dh = len(heads) - len(oldheads)
2059 for h in heads:
2061 for h in heads:
2060 if h not in oldheads and 'close' in self[h].extra():
2062 if h not in oldheads and 'close' in self[h].extra():
2061 dh -= 1
2063 dh -= 1
2062 htext = ""
2064 htext = ""
2063 if dh:
2065 if dh:
2064 htext = _(" (%+d heads)") % dh
2066 htext = _(" (%+d heads)") % dh
2065
2067
2066 self.ui.status(_("added %d changesets"
2068 self.ui.status(_("added %d changesets"
2067 " with %d changes to %d files%s\n")
2069 " with %d changes to %d files%s\n")
2068 % (changesets, revisions, files, htext))
2070 % (changesets, revisions, files, htext))
2069
2071
2070 if changesets > 0:
2072 if changesets > 0:
2071 p = lambda: cl.writepending() and self.root or ""
2073 p = lambda: cl.writepending() and self.root or ""
2072 self.hook('pretxnchangegroup', throw=True,
2074 self.hook('pretxnchangegroup', throw=True,
2073 node=hex(cl.node(clstart)), source=srctype,
2075 node=hex(cl.node(clstart)), source=srctype,
2074 url=url, pending=p)
2076 url=url, pending=p)
2075
2077
2076 added = [cl.node(r) for r in xrange(clstart, clend)]
2078 added = [cl.node(r) for r in xrange(clstart, clend)]
2077 publishing = self.ui.configbool('phases', 'publish', True)
2079 publishing = self.ui.configbool('phases', 'publish', True)
2078 if srctype == 'push':
2080 if srctype == 'push':
2079 # Old server can not push the boundary themself.
2081 # Old server can not push the boundary themself.
2080 # New server won't push the boundary if changeset already
2082 # New server won't push the boundary if changeset already
2081 # existed locally as secrete
2083 # existed locally as secrete
2082 #
2084 #
2083 # We should not use added here but the list of all change in
2085 # We should not use added here but the list of all change in
2084 # the bundle
2086 # the bundle
2085 if publishing:
2087 if publishing:
2086 phases.advanceboundary(self, phases.public, srccontent)
2088 phases.advanceboundary(self, phases.public, srccontent)
2087 else:
2089 else:
2088 phases.advanceboundary(self, phases.draft, srccontent)
2090 phases.advanceboundary(self, phases.draft, srccontent)
2089 phases.retractboundary(self, phases.draft, added)
2091 phases.retractboundary(self, phases.draft, added)
2090 elif srctype != 'strip':
2092 elif srctype != 'strip':
2091 # publishing only alter behavior during push
2093 # publishing only alter behavior during push
2092 #
2094 #
2093 # strip should not touch boundary at all
2095 # strip should not touch boundary at all
2094 phases.retractboundary(self, phases.draft, added)
2096 phases.retractboundary(self, phases.draft, added)
2095
2097
2096 # make changelog see real files again
2098 # make changelog see real files again
2097 cl.finalize(trp)
2099 cl.finalize(trp)
2098
2100
2099 tr.close()
2101 tr.close()
2100
2102
2101 if changesets > 0:
2103 if changesets > 0:
2102 def runhooks():
2104 def runhooks():
2103 # forcefully update the on-disk branch cache
2105 # forcefully update the on-disk branch cache
2104 self.ui.debug("updating the branch cache\n")
2106 self.ui.debug("updating the branch cache\n")
2105 self.updatebranchcache()
2107 self.updatebranchcache()
2106 self.hook("changegroup", node=hex(cl.node(clstart)),
2108 self.hook("changegroup", node=hex(cl.node(clstart)),
2107 source=srctype, url=url)
2109 source=srctype, url=url)
2108
2110
2109 for n in added:
2111 for n in added:
2110 self.hook("incoming", node=hex(n), source=srctype,
2112 self.hook("incoming", node=hex(n), source=srctype,
2111 url=url)
2113 url=url)
2112 self._afterlock(runhooks)
2114 self._afterlock(runhooks)
2113
2115
2114 finally:
2116 finally:
2115 tr.release()
2117 tr.release()
2116 # never return 0 here:
2118 # never return 0 here:
2117 if dh < 0:
2119 if dh < 0:
2118 return dh - 1
2120 return dh - 1
2119 else:
2121 else:
2120 return dh + 1
2122 return dh + 1
2121
2123
2122 def stream_in(self, remote, requirements):
2124 def stream_in(self, remote, requirements):
2123 lock = self.lock()
2125 lock = self.lock()
2124 try:
2126 try:
2125 fp = remote.stream_out()
2127 fp = remote.stream_out()
2126 l = fp.readline()
2128 l = fp.readline()
2127 try:
2129 try:
2128 resp = int(l)
2130 resp = int(l)
2129 except ValueError:
2131 except ValueError:
2130 raise error.ResponseError(
2132 raise error.ResponseError(
2131 _('Unexpected response from remote server:'), l)
2133 _('Unexpected response from remote server:'), l)
2132 if resp == 1:
2134 if resp == 1:
2133 raise util.Abort(_('operation forbidden by server'))
2135 raise util.Abort(_('operation forbidden by server'))
2134 elif resp == 2:
2136 elif resp == 2:
2135 raise util.Abort(_('locking the remote repository failed'))
2137 raise util.Abort(_('locking the remote repository failed'))
2136 elif resp != 0:
2138 elif resp != 0:
2137 raise util.Abort(_('the server sent an unknown error code'))
2139 raise util.Abort(_('the server sent an unknown error code'))
2138 self.ui.status(_('streaming all changes\n'))
2140 self.ui.status(_('streaming all changes\n'))
2139 l = fp.readline()
2141 l = fp.readline()
2140 try:
2142 try:
2141 total_files, total_bytes = map(int, l.split(' ', 1))
2143 total_files, total_bytes = map(int, l.split(' ', 1))
2142 except (ValueError, TypeError):
2144 except (ValueError, TypeError):
2143 raise error.ResponseError(
2145 raise error.ResponseError(
2144 _('Unexpected response from remote server:'), l)
2146 _('Unexpected response from remote server:'), l)
2145 self.ui.status(_('%d files to transfer, %s of data\n') %
2147 self.ui.status(_('%d files to transfer, %s of data\n') %
2146 (total_files, util.bytecount(total_bytes)))
2148 (total_files, util.bytecount(total_bytes)))
2147 start = time.time()
2149 start = time.time()
2148 for i in xrange(total_files):
2150 for i in xrange(total_files):
2149 # XXX doesn't support '\n' or '\r' in filenames
2151 # XXX doesn't support '\n' or '\r' in filenames
2150 l = fp.readline()
2152 l = fp.readline()
2151 try:
2153 try:
2152 name, size = l.split('\0', 1)
2154 name, size = l.split('\0', 1)
2153 size = int(size)
2155 size = int(size)
2154 except (ValueError, TypeError):
2156 except (ValueError, TypeError):
2155 raise error.ResponseError(
2157 raise error.ResponseError(
2156 _('Unexpected response from remote server:'), l)
2158 _('Unexpected response from remote server:'), l)
2157 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2159 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2158 # for backwards compat, name was partially encoded
2160 # for backwards compat, name was partially encoded
2159 ofp = self.sopener(store.decodedir(name), 'w')
2161 ofp = self.sopener(store.decodedir(name), 'w')
2160 for chunk in util.filechunkiter(fp, limit=size):
2162 for chunk in util.filechunkiter(fp, limit=size):
2161 ofp.write(chunk)
2163 ofp.write(chunk)
2162 ofp.close()
2164 ofp.close()
2163 elapsed = time.time() - start
2165 elapsed = time.time() - start
2164 if elapsed <= 0:
2166 if elapsed <= 0:
2165 elapsed = 0.001
2167 elapsed = 0.001
2166 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2168 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2167 (util.bytecount(total_bytes), elapsed,
2169 (util.bytecount(total_bytes), elapsed,
2168 util.bytecount(total_bytes / elapsed)))
2170 util.bytecount(total_bytes / elapsed)))
2169
2171
2170 # new requirements = old non-format requirements + new format-related
2172 # new requirements = old non-format requirements + new format-related
2171 # requirements from the streamed-in repository
2173 # requirements from the streamed-in repository
2172 requirements.update(set(self.requirements) - self.supportedformats)
2174 requirements.update(set(self.requirements) - self.supportedformats)
2173 self._applyrequirements(requirements)
2175 self._applyrequirements(requirements)
2174 self._writerequirements()
2176 self._writerequirements()
2175
2177
2176 self.invalidate()
2178 self.invalidate()
2177 return len(self.heads()) + 1
2179 return len(self.heads()) + 1
2178 finally:
2180 finally:
2179 lock.release()
2181 lock.release()
2180
2182
2181 def clone(self, remote, heads=[], stream=False):
2183 def clone(self, remote, heads=[], stream=False):
2182 '''clone remote repository.
2184 '''clone remote repository.
2183
2185
2184 keyword arguments:
2186 keyword arguments:
2185 heads: list of revs to clone (forces use of pull)
2187 heads: list of revs to clone (forces use of pull)
2186 stream: use streaming clone if possible'''
2188 stream: use streaming clone if possible'''
2187
2189
2188 # now, all clients that can request uncompressed clones can
2190 # now, all clients that can request uncompressed clones can
2189 # read repo formats supported by all servers that can serve
2191 # read repo formats supported by all servers that can serve
2190 # them.
2192 # them.
2191
2193
2192 # if revlog format changes, client will have to check version
2194 # if revlog format changes, client will have to check version
2193 # and format flags on "stream" capability, and use
2195 # and format flags on "stream" capability, and use
2194 # uncompressed only if compatible.
2196 # uncompressed only if compatible.
2195
2197
2196 if stream and not heads:
2198 if stream and not heads:
2197 # 'stream' means remote revlog format is revlogv1 only
2199 # 'stream' means remote revlog format is revlogv1 only
2198 if remote.capable('stream'):
2200 if remote.capable('stream'):
2199 return self.stream_in(remote, set(('revlogv1',)))
2201 return self.stream_in(remote, set(('revlogv1',)))
2200 # otherwise, 'streamreqs' contains the remote revlog format
2202 # otherwise, 'streamreqs' contains the remote revlog format
2201 streamreqs = remote.capable('streamreqs')
2203 streamreqs = remote.capable('streamreqs')
2202 if streamreqs:
2204 if streamreqs:
2203 streamreqs = set(streamreqs.split(','))
2205 streamreqs = set(streamreqs.split(','))
2204 # if we support it, stream in and adjust our requirements
2206 # if we support it, stream in and adjust our requirements
2205 if not streamreqs - self.supportedformats:
2207 if not streamreqs - self.supportedformats:
2206 return self.stream_in(remote, streamreqs)
2208 return self.stream_in(remote, streamreqs)
2207 return self.pull(remote, heads)
2209 return self.pull(remote, heads)
2208
2210
2209 def pushkey(self, namespace, key, old, new):
2211 def pushkey(self, namespace, key, old, new):
2210 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2212 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2211 old=old, new=new)
2213 old=old, new=new)
2212 ret = pushkey.push(self, namespace, key, old, new)
2214 ret = pushkey.push(self, namespace, key, old, new)
2213 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2215 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2214 ret=ret)
2216 ret=ret)
2215 return ret
2217 return ret
2216
2218
2217 def listkeys(self, namespace):
2219 def listkeys(self, namespace):
2218 self.hook('prelistkeys', throw=True, namespace=namespace)
2220 self.hook('prelistkeys', throw=True, namespace=namespace)
2219 values = pushkey.list(self, namespace)
2221 values = pushkey.list(self, namespace)
2220 self.hook('listkeys', namespace=namespace, values=values)
2222 self.hook('listkeys', namespace=namespace, values=values)
2221 return values
2223 return values
2222
2224
2223 def debugwireargs(self, one, two, three=None, four=None, five=None):
2225 def debugwireargs(self, one, two, three=None, four=None, five=None):
2224 '''used to test argument passing over the wire'''
2226 '''used to test argument passing over the wire'''
2225 return "%s %s %s %s %s" % (one, two, three, four, five)
2227 return "%s %s %s %s %s" % (one, two, three, four, five)
2226
2228
2227 def savecommitmessage(self, text):
2229 def savecommitmessage(self, text):
2228 fp = self.opener('last-message.txt', 'wb')
2230 fp = self.opener('last-message.txt', 'wb')
2229 try:
2231 try:
2230 fp.write(text)
2232 fp.write(text)
2231 finally:
2233 finally:
2232 fp.close()
2234 fp.close()
2233 return self.pathto(fp.name[len(self.root)+1:])
2235 return self.pathto(fp.name[len(self.root)+1:])
2234
2236
2235 # used to avoid circular references so destructors work
2237 # used to avoid circular references so destructors work
2236 def aftertrans(files):
2238 def aftertrans(files):
2237 renamefiles = [tuple(t) for t in files]
2239 renamefiles = [tuple(t) for t in files]
2238 def a():
2240 def a():
2239 for src, dest in renamefiles:
2241 for src, dest in renamefiles:
2240 util.rename(src, dest)
2242 util.rename(src, dest)
2241 return a
2243 return a
2242
2244
2243 def undoname(fn):
2245 def undoname(fn):
2244 base, name = os.path.split(fn)
2246 base, name = os.path.split(fn)
2245 assert name.startswith('journal')
2247 assert name.startswith('journal')
2246 return os.path.join(base, name.replace('journal', 'undo', 1))
2248 return os.path.join(base, name.replace('journal', 'undo', 1))
2247
2249
2248 def instance(ui, path, create):
2250 def instance(ui, path, create):
2249 return localrepository(ui, util.urllocalpath(path), create)
2251 return localrepository(ui, util.urllocalpath(path), create)
2250
2252
2251 def islocal(path):
2253 def islocal(path):
2252 return True
2254 return True
@@ -1,571 +1,581
1 $ "$TESTDIR/hghave" system-sh || exit 80
1 $ "$TESTDIR/hghave" system-sh || exit 80
2
2
3 commit hooks can see env vars
3 commit hooks can see env vars
4
4
5 $ hg init a
5 $ hg init a
6 $ cd a
6 $ cd a
7 $ echo "[hooks]" > .hg/hgrc
7 $ echo "[hooks]" > .hg/hgrc
8 $ echo 'commit = unset HG_LOCAL HG_TAG; python "$TESTDIR"/printenv.py commit' >> .hg/hgrc
8 $ echo 'commit = unset HG_LOCAL HG_TAG; python "$TESTDIR"/printenv.py commit' >> .hg/hgrc
9 $ echo 'commit.b = unset HG_LOCAL HG_TAG; python "$TESTDIR"/printenv.py commit.b' >> .hg/hgrc
9 $ echo 'commit.b = unset HG_LOCAL HG_TAG; python "$TESTDIR"/printenv.py commit.b' >> .hg/hgrc
10 $ echo 'precommit = unset HG_LOCAL HG_NODE HG_TAG; python "$TESTDIR"/printenv.py precommit' >> .hg/hgrc
10 $ echo 'precommit = unset HG_LOCAL HG_NODE HG_TAG; python "$TESTDIR"/printenv.py precommit' >> .hg/hgrc
11 $ echo 'pretxncommit = unset HG_LOCAL HG_TAG; python "$TESTDIR"/printenv.py pretxncommit' >> .hg/hgrc
11 $ echo 'pretxncommit = unset HG_LOCAL HG_TAG; python "$TESTDIR"/printenv.py pretxncommit' >> .hg/hgrc
12 $ echo 'pretxncommit.tip = hg -q tip' >> .hg/hgrc
12 $ echo 'pretxncommit.tip = hg -q tip' >> .hg/hgrc
13 $ echo 'pre-identify = python "$TESTDIR"/printenv.py pre-identify 1' >> .hg/hgrc
13 $ echo 'pre-identify = python "$TESTDIR"/printenv.py pre-identify 1' >> .hg/hgrc
14 $ echo 'pre-cat = python "$TESTDIR"/printenv.py pre-cat' >> .hg/hgrc
14 $ echo 'pre-cat = python "$TESTDIR"/printenv.py pre-cat' >> .hg/hgrc
15 $ echo 'post-cat = python "$TESTDIR"/printenv.py post-cat' >> .hg/hgrc
15 $ echo 'post-cat = python "$TESTDIR"/printenv.py post-cat' >> .hg/hgrc
16 $ echo a > a
16 $ echo a > a
17 $ hg add a
17 $ hg add a
18 $ hg commit -m a
18 $ hg commit -m a
19 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
19 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
20 pretxncommit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a
20 pretxncommit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a
21 0:cb9a9f314b8b
21 0:cb9a9f314b8b
22 commit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
22 commit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
23 commit.b hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
23 commit.b hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
24
24
25 $ hg clone . ../b
25 $ hg clone . ../b
26 updating to branch default
26 updating to branch default
27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 $ cd ../b
28 $ cd ../b
29
29
30 changegroup hooks can see env vars
30 changegroup hooks can see env vars
31
31
32 $ echo '[hooks]' > .hg/hgrc
32 $ echo '[hooks]' > .hg/hgrc
33 $ echo 'prechangegroup = python "$TESTDIR"/printenv.py prechangegroup' >> .hg/hgrc
33 $ echo 'prechangegroup = python "$TESTDIR"/printenv.py prechangegroup' >> .hg/hgrc
34 $ echo 'changegroup = python "$TESTDIR"/printenv.py changegroup' >> .hg/hgrc
34 $ echo 'changegroup = python "$TESTDIR"/printenv.py changegroup' >> .hg/hgrc
35 $ echo 'incoming = python "$TESTDIR"/printenv.py incoming' >> .hg/hgrc
35 $ echo 'incoming = python "$TESTDIR"/printenv.py incoming' >> .hg/hgrc
36
36
37 pretxncommit and commit hooks can see both parents of merge
37 pretxncommit and commit hooks can see both parents of merge
38
38
39 $ cd ../a
39 $ cd ../a
40 $ echo b >> a
40 $ echo b >> a
41 $ hg commit -m a1 -d "1 0"
41 $ hg commit -m a1 -d "1 0"
42 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
42 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
43 pretxncommit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
43 pretxncommit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
44 1:ab228980c14d
44 1:ab228980c14d
45 commit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
45 commit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
46 commit.b hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
46 commit.b hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
47 $ hg update -C 0
47 $ hg update -C 0
48 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 $ echo b > b
49 $ echo b > b
50 $ hg add b
50 $ hg add b
51 $ hg commit -m b -d '1 0'
51 $ hg commit -m b -d '1 0'
52 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
52 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
53 pretxncommit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
53 pretxncommit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
54 2:ee9deb46ab31
54 2:ee9deb46ab31
55 commit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
55 commit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
56 commit.b hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
56 commit.b hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
57 created new head
57 created new head
58 $ hg merge 1
58 $ hg merge 1
59 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
59 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 (branch merge, don't forget to commit)
60 (branch merge, don't forget to commit)
61 $ hg commit -m merge -d '2 0'
61 $ hg commit -m merge -d '2 0'
62 precommit hook: HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
62 precommit hook: HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
63 pretxncommit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd HG_PENDING=$TESTTMP/a
63 pretxncommit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd HG_PENDING=$TESTTMP/a
64 3:07f3376c1e65
64 3:07f3376c1e65
65 commit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
65 commit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
66 commit.b hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
66 commit.b hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
67
67
68 test generic hooks
68 test generic hooks
69
69
70 $ hg id
70 $ hg id
71 pre-identify hook: HG_ARGS=id HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None} HG_PATS=[]
71 pre-identify hook: HG_ARGS=id HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None} HG_PATS=[]
72 warning: pre-identify hook exited with status 1
72 warning: pre-identify hook exited with status 1
73 [1]
73 [1]
74 $ hg cat b
74 $ hg cat b
75 pre-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b']
75 pre-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b']
76 b
76 b
77 post-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b'] HG_RESULT=0
77 post-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b'] HG_RESULT=0
78
78
79 $ cd ../b
79 $ cd ../b
80 $ hg pull ../a
80 $ hg pull ../a
81 pulling from ../a
81 pulling from ../a
82 searching for changes
82 searching for changes
83 prechangegroup hook: HG_SOURCE=pull HG_URL=file:$TESTTMP/a
83 prechangegroup hook: HG_SOURCE=pull HG_URL=file:$TESTTMP/a
84 adding changesets
84 adding changesets
85 adding manifests
85 adding manifests
86 adding file changes
86 adding file changes
87 added 3 changesets with 2 changes to 2 files
87 added 3 changesets with 2 changes to 2 files
88 changegroup hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_URL=file:$TESTTMP/a
88 changegroup hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_URL=file:$TESTTMP/a
89 incoming hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_URL=file:$TESTTMP/a
89 incoming hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_URL=file:$TESTTMP/a
90 incoming hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_SOURCE=pull HG_URL=file:$TESTTMP/a
90 incoming hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_SOURCE=pull HG_URL=file:$TESTTMP/a
91 incoming hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_URL=file:$TESTTMP/a
91 incoming hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_URL=file:$TESTTMP/a
92 (run 'hg update' to get a working copy)
92 (run 'hg update' to get a working copy)
93
93
94 tag hooks can see env vars
94 tag hooks can see env vars
95
95
96 $ cd ../a
96 $ cd ../a
97 $ echo 'pretag = python "$TESTDIR"/printenv.py pretag' >> .hg/hgrc
97 $ echo 'pretag = python "$TESTDIR"/printenv.py pretag' >> .hg/hgrc
98 $ echo 'tag = unset HG_PARENT1 HG_PARENT2; python "$TESTDIR"/printenv.py tag' >> .hg/hgrc
98 $ echo 'tag = unset HG_PARENT1 HG_PARENT2; python "$TESTDIR"/printenv.py tag' >> .hg/hgrc
99 $ hg tag -d '3 0' a
99 $ hg tag -d '3 0' a
100 pretag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
100 pretag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
101 precommit hook: HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
101 precommit hook: HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
102 pretxncommit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PENDING=$TESTTMP/a
102 pretxncommit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PENDING=$TESTTMP/a
103 4:539e4b31b6dc
103 4:539e4b31b6dc
104 commit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
104 commit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
105 commit.b hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
105 commit.b hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
106 tag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
106 tag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
107 $ hg tag -l la
107 $ hg tag -l la
108 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
108 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
109 tag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
109 tag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
110
110
111 pretag hook can forbid tagging
111 pretag hook can forbid tagging
112
112
113 $ echo 'pretag.forbid = python "$TESTDIR"/printenv.py pretag.forbid 1' >> .hg/hgrc
113 $ echo 'pretag.forbid = python "$TESTDIR"/printenv.py pretag.forbid 1' >> .hg/hgrc
114 $ hg tag -d '4 0' fa
114 $ hg tag -d '4 0' fa
115 pretag hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
115 pretag hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
116 pretag.forbid hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
116 pretag.forbid hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
117 abort: pretag.forbid hook exited with status 1
117 abort: pretag.forbid hook exited with status 1
118 [255]
118 [255]
119 $ hg tag -l fla
119 $ hg tag -l fla
120 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
120 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
121 pretag.forbid hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
121 pretag.forbid hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
122 abort: pretag.forbid hook exited with status 1
122 abort: pretag.forbid hook exited with status 1
123 [255]
123 [255]
124
124
125 pretxncommit hook can see changeset, can roll back txn, changeset no
125 pretxncommit hook can see changeset, can roll back txn, changeset no
126 more there after
126 more there after
127
127
128 $ echo 'pretxncommit.forbid0 = hg tip -q' >> .hg/hgrc
128 $ echo 'pretxncommit.forbid0 = hg tip -q' >> .hg/hgrc
129 $ echo 'pretxncommit.forbid1 = python "$TESTDIR"/printenv.py pretxncommit.forbid 1' >> .hg/hgrc
129 $ echo 'pretxncommit.forbid1 = python "$TESTDIR"/printenv.py pretxncommit.forbid 1' >> .hg/hgrc
130 $ echo z > z
130 $ echo z > z
131 $ hg add z
131 $ hg add z
132 $ hg -q tip
132 $ hg -q tip
133 4:539e4b31b6dc
133 4:539e4b31b6dc
134 $ hg commit -m 'fail' -d '4 0'
134 $ hg commit -m 'fail' -d '4 0'
135 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
135 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
136 pretxncommit hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
136 pretxncommit hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
137 5:6f611f8018c1
137 5:6f611f8018c1
138 5:6f611f8018c1
138 5:6f611f8018c1
139 pretxncommit.forbid hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
139 pretxncommit.forbid hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
140 transaction abort!
140 transaction abort!
141 rollback completed
141 rollback completed
142 abort: pretxncommit.forbid1 hook exited with status 1
142 abort: pretxncommit.forbid1 hook exited with status 1
143 [255]
143 [255]
144 $ hg -q tip
144 $ hg -q tip
145 4:539e4b31b6dc
145 4:539e4b31b6dc
146
146
147 precommit hook can prevent commit
147 precommit hook can prevent commit
148
148
149 $ echo 'precommit.forbid = python "$TESTDIR"/printenv.py precommit.forbid 1' >> .hg/hgrc
149 $ echo 'precommit.forbid = python "$TESTDIR"/printenv.py precommit.forbid 1' >> .hg/hgrc
150 $ hg commit -m 'fail' -d '4 0'
150 $ hg commit -m 'fail' -d '4 0'
151 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
151 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
152 precommit.forbid hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
152 precommit.forbid hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
153 abort: precommit.forbid hook exited with status 1
153 abort: precommit.forbid hook exited with status 1
154 [255]
154 [255]
155 $ hg -q tip
155 $ hg -q tip
156 4:539e4b31b6dc
156 4:539e4b31b6dc
157
157
158 preupdate hook can prevent update
158 preupdate hook can prevent update
159
159
160 $ echo 'preupdate = python "$TESTDIR"/printenv.py preupdate' >> .hg/hgrc
160 $ echo 'preupdate = python "$TESTDIR"/printenv.py preupdate' >> .hg/hgrc
161 $ hg update 1
161 $ hg update 1
162 preupdate hook: HG_PARENT1=ab228980c14d
162 preupdate hook: HG_PARENT1=ab228980c14d
163 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
163 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
164
164
165 update hook
165 update hook
166
166
167 $ echo 'update = python "$TESTDIR"/printenv.py update' >> .hg/hgrc
167 $ echo 'update = python "$TESTDIR"/printenv.py update' >> .hg/hgrc
168 $ hg update
168 $ hg update
169 preupdate hook: HG_PARENT1=539e4b31b6dc
169 preupdate hook: HG_PARENT1=539e4b31b6dc
170 update hook: HG_ERROR=0 HG_PARENT1=539e4b31b6dc
170 update hook: HG_ERROR=0 HG_PARENT1=539e4b31b6dc
171 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
171 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
172
172
173 pushkey hook
173 pushkey hook
174
174
175 $ echo 'pushkey = python "$TESTDIR"/printenv.py pushkey' >> .hg/hgrc
175 $ echo 'pushkey = python "$TESTDIR"/printenv.py pushkey' >> .hg/hgrc
176 $ cd ../b
176 $ cd ../b
177 $ hg bookmark -r null foo
177 $ hg bookmark -r null foo
178 $ hg push -B foo ../a
178 $ hg push -B foo ../a
179 pushing to ../a
179 pushing to ../a
180 searching for changes
180 searching for changes
181 no changes found
181 no changes found
182 pushkey hook: HG_KEY=07f3376c1e655977439df2a814e3cc14b27abac2 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
182 pushkey hook: HG_KEY=07f3376c1e655977439df2a814e3cc14b27abac2 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
183 exporting bookmark foo
183 exporting bookmark foo
184 pushkey hook: HG_KEY=foo HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_RET=1
184 pushkey hook: HG_KEY=foo HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_RET=1
185 $ cd ../a
185 $ cd ../a
186
186
187 listkeys hook
187 listkeys hook
188
188
189 $ echo 'listkeys = python "$TESTDIR"/printenv.py listkeys' >> .hg/hgrc
189 $ echo 'listkeys = python "$TESTDIR"/printenv.py listkeys' >> .hg/hgrc
190 $ hg bookmark -r null bar
190 $ hg bookmark -r null bar
191 $ cd ../b
191 $ cd ../b
192 $ hg pull -B bar ../a
192 $ hg pull -B bar ../a
193 pulling from ../a
193 pulling from ../a
194 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
194 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
195 no changes found
195 no changes found
196 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10': '1', 'publishing': 'True'}
196 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10': '1', 'publishing': 'True'}
197 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
197 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
198 importing bookmark bar
198 importing bookmark bar
199 $ cd ../a
199 $ cd ../a
200
200
201 test that prepushkey can prevent incoming keys
201 test that prepushkey can prevent incoming keys
202
202
203 $ echo 'prepushkey = python "$TESTDIR"/printenv.py prepushkey.forbid 1' >> .hg/hgrc
203 $ echo 'prepushkey = python "$TESTDIR"/printenv.py prepushkey.forbid 1' >> .hg/hgrc
204 $ cd ../b
204 $ cd ../b
205 $ hg bookmark -r null baz
205 $ hg bookmark -r null baz
206 $ hg push -B baz ../a
206 $ hg push -B baz ../a
207 pushing to ../a
207 pushing to ../a
208 searching for changes
208 searching for changes
209 no changes found
209 no changes found
210 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10': '1', 'publishing': 'True'}
210 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10': '1', 'publishing': 'True'}
211 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
211 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
212 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
212 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
213 exporting bookmark baz
213 exporting bookmark baz
214 prepushkey.forbid hook: HG_KEY=baz HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000
214 prepushkey.forbid hook: HG_KEY=baz HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000
215 abort: prepushkey hook exited with status 1
215 abort: prepushkey hook exited with status 1
216 [255]
216 [255]
217 $ cd ../a
217 $ cd ../a
218
218
219 test that prelistkeys can prevent listing keys
219 test that prelistkeys can prevent listing keys
220
220
221 $ echo 'prelistkeys = python "$TESTDIR"/printenv.py prelistkeys.forbid 1' >> .hg/hgrc
221 $ echo 'prelistkeys = python "$TESTDIR"/printenv.py prelistkeys.forbid 1' >> .hg/hgrc
222 $ hg bookmark -r null quux
222 $ hg bookmark -r null quux
223 $ cd ../b
223 $ cd ../b
224 $ hg pull -B quux ../a
224 $ hg pull -B quux ../a
225 pulling from ../a
225 pulling from ../a
226 prelistkeys.forbid hook: HG_NAMESPACE=bookmarks
226 prelistkeys.forbid hook: HG_NAMESPACE=bookmarks
227 abort: prelistkeys hook exited with status 1
227 abort: prelistkeys hook exited with status 1
228 [255]
228 [255]
229 $ cd ../a
229 $ cd ../a
230
230
231 prechangegroup hook can prevent incoming changes
231 prechangegroup hook can prevent incoming changes
232
232
233 $ cd ../b
233 $ cd ../b
234 $ hg -q tip
234 $ hg -q tip
235 3:07f3376c1e65
235 3:07f3376c1e65
236 $ echo '[hooks]' > .hg/hgrc
236 $ echo '[hooks]' > .hg/hgrc
237 $ echo 'prechangegroup.forbid = python "$TESTDIR"/printenv.py prechangegroup.forbid 1' >> .hg/hgrc
237 $ echo 'prechangegroup.forbid = python "$TESTDIR"/printenv.py prechangegroup.forbid 1' >> .hg/hgrc
238 $ hg pull ../a
238 $ hg pull ../a
239 pulling from ../a
239 pulling from ../a
240 searching for changes
240 searching for changes
241 prechangegroup.forbid hook: HG_SOURCE=pull HG_URL=file:$TESTTMP/a
241 prechangegroup.forbid hook: HG_SOURCE=pull HG_URL=file:$TESTTMP/a
242 abort: prechangegroup.forbid hook exited with status 1
242 abort: prechangegroup.forbid hook exited with status 1
243 [255]
243 [255]
244
244
245 pretxnchangegroup hook can see incoming changes, can roll back txn,
245 pretxnchangegroup hook can see incoming changes, can roll back txn,
246 incoming changes no longer there after
246 incoming changes no longer there after
247
247
248 $ echo '[hooks]' > .hg/hgrc
248 $ echo '[hooks]' > .hg/hgrc
249 $ echo 'pretxnchangegroup.forbid0 = hg tip -q' >> .hg/hgrc
249 $ echo 'pretxnchangegroup.forbid0 = hg tip -q' >> .hg/hgrc
250 $ echo 'pretxnchangegroup.forbid1 = python "$TESTDIR"/printenv.py pretxnchangegroup.forbid 1' >> .hg/hgrc
250 $ echo 'pretxnchangegroup.forbid1 = python "$TESTDIR"/printenv.py pretxnchangegroup.forbid 1' >> .hg/hgrc
251 $ hg pull ../a
251 $ hg pull ../a
252 pulling from ../a
252 pulling from ../a
253 searching for changes
253 searching for changes
254 adding changesets
254 adding changesets
255 adding manifests
255 adding manifests
256 adding file changes
256 adding file changes
257 added 1 changesets with 1 changes to 1 files
257 added 1 changesets with 1 changes to 1 files
258 4:539e4b31b6dc
258 4:539e4b31b6dc
259 pretxnchangegroup.forbid hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_URL=file:$TESTTMP/a
259 pretxnchangegroup.forbid hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_URL=file:$TESTTMP/a
260 transaction abort!
260 transaction abort!
261 rollback completed
261 rollback completed
262 abort: pretxnchangegroup.forbid1 hook exited with status 1
262 abort: pretxnchangegroup.forbid1 hook exited with status 1
263 [255]
263 [255]
264 $ hg -q tip
264 $ hg -q tip
265 3:07f3376c1e65
265 3:07f3376c1e65
266
266
267 outgoing hooks can see env vars
267 outgoing hooks can see env vars
268
268
269 $ rm .hg/hgrc
269 $ rm .hg/hgrc
270 $ echo '[hooks]' > ../a/.hg/hgrc
270 $ echo '[hooks]' > ../a/.hg/hgrc
271 $ echo 'preoutgoing = python "$TESTDIR"/printenv.py preoutgoing' >> ../a/.hg/hgrc
271 $ echo 'preoutgoing = python "$TESTDIR"/printenv.py preoutgoing' >> ../a/.hg/hgrc
272 $ echo 'outgoing = python "$TESTDIR"/printenv.py outgoing' >> ../a/.hg/hgrc
272 $ echo 'outgoing = python "$TESTDIR"/printenv.py outgoing' >> ../a/.hg/hgrc
273 $ hg pull ../a
273 $ hg pull ../a
274 pulling from ../a
274 pulling from ../a
275 searching for changes
275 searching for changes
276 preoutgoing hook: HG_SOURCE=pull
276 preoutgoing hook: HG_SOURCE=pull
277 adding changesets
277 adding changesets
278 outgoing hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_SOURCE=pull
278 outgoing hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_SOURCE=pull
279 adding manifests
279 adding manifests
280 adding file changes
280 adding file changes
281 added 1 changesets with 1 changes to 1 files
281 added 1 changesets with 1 changes to 1 files
282 (run 'hg update' to get a working copy)
282 (run 'hg update' to get a working copy)
283 $ hg rollback
283 $ hg rollback
284 repository tip rolled back to revision 3 (undo pull)
284 repository tip rolled back to revision 3 (undo pull)
285
285
286 preoutgoing hook can prevent outgoing changes
286 preoutgoing hook can prevent outgoing changes
287
287
288 $ echo 'preoutgoing.forbid = python "$TESTDIR"/printenv.py preoutgoing.forbid 1' >> ../a/.hg/hgrc
288 $ echo 'preoutgoing.forbid = python "$TESTDIR"/printenv.py preoutgoing.forbid 1' >> ../a/.hg/hgrc
289 $ hg pull ../a
289 $ hg pull ../a
290 pulling from ../a
290 pulling from ../a
291 searching for changes
291 searching for changes
292 preoutgoing hook: HG_SOURCE=pull
292 preoutgoing hook: HG_SOURCE=pull
293 preoutgoing.forbid hook: HG_SOURCE=pull
293 preoutgoing.forbid hook: HG_SOURCE=pull
294 abort: preoutgoing.forbid hook exited with status 1
294 abort: preoutgoing.forbid hook exited with status 1
295 [255]
295 [255]
296
296
297 outgoing hooks work for local clones
297 outgoing hooks work for local clones
298
298
299 $ cd ..
299 $ cd ..
300 $ echo '[hooks]' > a/.hg/hgrc
300 $ echo '[hooks]' > a/.hg/hgrc
301 $ echo 'preoutgoing = python "$TESTDIR"/printenv.py preoutgoing' >> a/.hg/hgrc
301 $ echo 'preoutgoing = python "$TESTDIR"/printenv.py preoutgoing' >> a/.hg/hgrc
302 $ echo 'outgoing = python "$TESTDIR"/printenv.py outgoing' >> a/.hg/hgrc
302 $ echo 'outgoing = python "$TESTDIR"/printenv.py outgoing' >> a/.hg/hgrc
303 $ hg clone a c
303 $ hg clone a c
304 preoutgoing hook: HG_SOURCE=clone
304 preoutgoing hook: HG_SOURCE=clone
305 outgoing hook: HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
305 outgoing hook: HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
306 updating to branch default
306 updating to branch default
307 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
307 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
308 $ rm -rf c
308 $ rm -rf c
309
309
310 preoutgoing hook can prevent outgoing changes for local clones
310 preoutgoing hook can prevent outgoing changes for local clones
311
311
312 $ echo 'preoutgoing.forbid = python "$TESTDIR"/printenv.py preoutgoing.forbid 1' >> a/.hg/hgrc
312 $ echo 'preoutgoing.forbid = python "$TESTDIR"/printenv.py preoutgoing.forbid 1' >> a/.hg/hgrc
313 $ hg clone a zzz
313 $ hg clone a zzz
314 preoutgoing hook: HG_SOURCE=clone
314 preoutgoing hook: HG_SOURCE=clone
315 preoutgoing.forbid hook: HG_SOURCE=clone
315 preoutgoing.forbid hook: HG_SOURCE=clone
316 abort: preoutgoing.forbid hook exited with status 1
316 abort: preoutgoing.forbid hook exited with status 1
317 [255]
317 [255]
318 $ cd b
318 $ cd b
319
319
320 $ cat > hooktests.py <<EOF
320 $ cat > hooktests.py <<EOF
321 > from mercurial import util
321 > from mercurial import util
322 >
322 >
323 > uncallable = 0
323 > uncallable = 0
324 >
324 >
325 > def printargs(args):
325 > def printargs(args):
326 > args.pop('ui', None)
326 > args.pop('ui', None)
327 > args.pop('repo', None)
327 > args.pop('repo', None)
328 > a = list(args.items())
328 > a = list(args.items())
329 > a.sort()
329 > a.sort()
330 > print 'hook args:'
330 > print 'hook args:'
331 > for k, v in a:
331 > for k, v in a:
332 > print ' ', k, v
332 > print ' ', k, v
333 >
333 >
334 > def passhook(**args):
334 > def passhook(**args):
335 > printargs(args)
335 > printargs(args)
336 >
336 >
337 > def failhook(**args):
337 > def failhook(**args):
338 > printargs(args)
338 > printargs(args)
339 > return True
339 > return True
340 >
340 >
341 > class LocalException(Exception):
341 > class LocalException(Exception):
342 > pass
342 > pass
343 >
343 >
344 > def raisehook(**args):
344 > def raisehook(**args):
345 > raise LocalException('exception from hook')
345 > raise LocalException('exception from hook')
346 >
346 >
347 > def aborthook(**args):
347 > def aborthook(**args):
348 > raise util.Abort('raise abort from hook')
348 > raise util.Abort('raise abort from hook')
349 >
349 >
350 > def brokenhook(**args):
350 > def brokenhook(**args):
351 > return 1 + {}
351 > return 1 + {}
352 >
352 >
353 > def verbosehook(ui, **args):
353 > def verbosehook(ui, **args):
354 > ui.note('verbose output from hook\n')
354 > ui.note('verbose output from hook\n')
355 >
355 >
356 > def printtags(ui, repo, **args):
357 > print repo.tags().keys()
358 >
356 > class container:
359 > class container:
357 > unreachable = 1
360 > unreachable = 1
358 > EOF
361 > EOF
359
362
360 test python hooks
363 test python hooks
361
364
362 $ PYTHONPATH="`pwd`:$PYTHONPATH"
365 $ PYTHONPATH="`pwd`:$PYTHONPATH"
363 $ export PYTHONPATH
366 $ export PYTHONPATH
364
367
365 $ echo '[hooks]' > ../a/.hg/hgrc
368 $ echo '[hooks]' > ../a/.hg/hgrc
366 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
369 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
367 $ hg pull ../a 2>&1 | grep 'raised an exception'
370 $ hg pull ../a 2>&1 | grep 'raised an exception'
368 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
371 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
369
372
370 $ echo '[hooks]' > ../a/.hg/hgrc
373 $ echo '[hooks]' > ../a/.hg/hgrc
371 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
374 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
372 $ hg pull ../a 2>&1 | grep 'raised an exception'
375 $ hg pull ../a 2>&1 | grep 'raised an exception'
373 error: preoutgoing.raise hook raised an exception: exception from hook
376 error: preoutgoing.raise hook raised an exception: exception from hook
374
377
375 $ echo '[hooks]' > ../a/.hg/hgrc
378 $ echo '[hooks]' > ../a/.hg/hgrc
376 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
379 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
377 $ hg pull ../a
380 $ hg pull ../a
378 pulling from ../a
381 pulling from ../a
379 searching for changes
382 searching for changes
380 error: preoutgoing.abort hook failed: raise abort from hook
383 error: preoutgoing.abort hook failed: raise abort from hook
381 abort: raise abort from hook
384 abort: raise abort from hook
382 [255]
385 [255]
383
386
384 $ echo '[hooks]' > ../a/.hg/hgrc
387 $ echo '[hooks]' > ../a/.hg/hgrc
385 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
388 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
386 $ hg pull ../a
389 $ hg pull ../a
387 pulling from ../a
390 pulling from ../a
388 searching for changes
391 searching for changes
389 hook args:
392 hook args:
390 hooktype preoutgoing
393 hooktype preoutgoing
391 source pull
394 source pull
392 abort: preoutgoing.fail hook failed
395 abort: preoutgoing.fail hook failed
393 [255]
396 [255]
394
397
395 $ echo '[hooks]' > ../a/.hg/hgrc
398 $ echo '[hooks]' > ../a/.hg/hgrc
396 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
399 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
397 $ hg pull ../a
400 $ hg pull ../a
398 pulling from ../a
401 pulling from ../a
399 searching for changes
402 searching for changes
400 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
403 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
401 [255]
404 [255]
402
405
403 $ echo '[hooks]' > ../a/.hg/hgrc
406 $ echo '[hooks]' > ../a/.hg/hgrc
404 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
407 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
405 $ hg pull ../a
408 $ hg pull ../a
406 pulling from ../a
409 pulling from ../a
407 searching for changes
410 searching for changes
408 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
411 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
409 [255]
412 [255]
410
413
411 $ echo '[hooks]' > ../a/.hg/hgrc
414 $ echo '[hooks]' > ../a/.hg/hgrc
412 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
415 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
413 $ hg pull ../a
416 $ hg pull ../a
414 pulling from ../a
417 pulling from ../a
415 searching for changes
418 searching for changes
416 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
419 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
417 [255]
420 [255]
418
421
419 $ echo '[hooks]' > ../a/.hg/hgrc
422 $ echo '[hooks]' > ../a/.hg/hgrc
420 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
423 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
421 $ hg pull ../a
424 $ hg pull ../a
422 pulling from ../a
425 pulling from ../a
423 searching for changes
426 searching for changes
424 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
427 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
425 [255]
428 [255]
426
429
427 $ echo '[hooks]' > ../a/.hg/hgrc
430 $ echo '[hooks]' > ../a/.hg/hgrc
428 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
431 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
429 $ hg pull ../a
432 $ hg pull ../a
430 pulling from ../a
433 pulling from ../a
431 searching for changes
434 searching for changes
432 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
435 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
433 [255]
436 [255]
434
437
435 $ echo '[hooks]' > ../a/.hg/hgrc
438 $ echo '[hooks]' > ../a/.hg/hgrc
436 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
439 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
437 $ hg pull ../a
440 $ hg pull ../a
438 pulling from ../a
441 pulling from ../a
439 searching for changes
442 searching for changes
440 hook args:
443 hook args:
441 hooktype preoutgoing
444 hooktype preoutgoing
442 source pull
445 source pull
443 adding changesets
446 adding changesets
444 adding manifests
447 adding manifests
445 adding file changes
448 adding file changes
446 added 1 changesets with 1 changes to 1 files
449 added 1 changesets with 1 changes to 1 files
447 (run 'hg update' to get a working copy)
450 (run 'hg update' to get a working copy)
448
451
449 make sure --traceback works
452 make sure --traceback works
450
453
451 $ echo '[hooks]' > .hg/hgrc
454 $ echo '[hooks]' > .hg/hgrc
452 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
455 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
453
456
454 $ echo aa > a
457 $ echo aa > a
455 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
458 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
456 Traceback (most recent call last):
459 Traceback (most recent call last):
457
460
458 $ cd ..
461 $ cd ..
459 $ hg init c
462 $ hg init c
460 $ cd c
463 $ cd c
461
464
462 $ cat > hookext.py <<EOF
465 $ cat > hookext.py <<EOF
463 > def autohook(**args):
466 > def autohook(**args):
464 > print "Automatically installed hook"
467 > print "Automatically installed hook"
465 >
468 >
466 > def reposetup(ui, repo):
469 > def reposetup(ui, repo):
467 > repo.ui.setconfig("hooks", "commit.auto", autohook)
470 > repo.ui.setconfig("hooks", "commit.auto", autohook)
468 > EOF
471 > EOF
469 $ echo '[extensions]' >> .hg/hgrc
472 $ echo '[extensions]' >> .hg/hgrc
470 $ echo 'hookext = hookext.py' >> .hg/hgrc
473 $ echo 'hookext = hookext.py' >> .hg/hgrc
471
474
472 $ touch foo
475 $ touch foo
473 $ hg add foo
476 $ hg add foo
474 $ hg ci -d '0 0' -m 'add foo'
477 $ hg ci -d '0 0' -m 'add foo'
475 Automatically installed hook
478 Automatically installed hook
476 $ echo >> foo
479 $ echo >> foo
477 $ hg ci --debug -d '0 0' -m 'change foo'
480 $ hg ci --debug -d '0 0' -m 'change foo'
478 foo
481 foo
479 calling hook commit.auto: <function autohook at *> (glob)
482 calling hook commit.auto: <function autohook at *> (glob)
480 Automatically installed hook
483 Automatically installed hook
481 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
484 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
482
485
483 $ hg showconfig hooks
486 $ hg showconfig hooks
484 hooks.commit.auto=<function autohook at *> (glob)
487 hooks.commit.auto=<function autohook at *> (glob)
485
488
486 test python hook configured with python:[file]:[hook] syntax
489 test python hook configured with python:[file]:[hook] syntax
487
490
488 $ cd ..
491 $ cd ..
489 $ mkdir d
492 $ mkdir d
490 $ cd d
493 $ cd d
491 $ hg init repo
494 $ hg init repo
492 $ mkdir hooks
495 $ mkdir hooks
493
496
494 $ cd hooks
497 $ cd hooks
495 $ cat > testhooks.py <<EOF
498 $ cat > testhooks.py <<EOF
496 > def testhook(**args):
499 > def testhook(**args):
497 > print 'hook works'
500 > print 'hook works'
498 > EOF
501 > EOF
499 $ echo '[hooks]' > ../repo/.hg/hgrc
502 $ echo '[hooks]' > ../repo/.hg/hgrc
500 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
503 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
501
504
502 $ cd ../repo
505 $ cd ../repo
503 $ hg commit -d '0 0'
506 $ hg commit -d '0 0'
504 hook works
507 hook works
505 nothing changed
508 nothing changed
506 [1]
509 [1]
507
510
508 $ cd ../../b
511 $ cd ../../b
509
512
510 make sure --traceback works on hook import failure
513 make sure --traceback works on hook import failure
511
514
512 $ cat > importfail.py <<EOF
515 $ cat > importfail.py <<EOF
513 > import somebogusmodule
516 > import somebogusmodule
514 > # dereference something in the module to force demandimport to load it
517 > # dereference something in the module to force demandimport to load it
515 > somebogusmodule.whatever
518 > somebogusmodule.whatever
516 > EOF
519 > EOF
517
520
518 $ echo '[hooks]' > .hg/hgrc
521 $ echo '[hooks]' > .hg/hgrc
519 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
522 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
520
523
521 $ echo a >> a
524 $ echo a >> a
522 $ hg --traceback commit -ma 2>&1 | egrep '^(exception|Traceback|ImportError)'
525 $ hg --traceback commit -ma 2>&1 | egrep '^(exception|Traceback|ImportError)'
523 exception from first failed import attempt:
526 exception from first failed import attempt:
524 Traceback (most recent call last):
527 Traceback (most recent call last):
525 ImportError: No module named somebogusmodule
528 ImportError: No module named somebogusmodule
526 exception from second failed import attempt:
529 exception from second failed import attempt:
527 Traceback (most recent call last):
530 Traceback (most recent call last):
528 ImportError: No module named hgext_importfail
531 ImportError: No module named hgext_importfail
529 Traceback (most recent call last):
532 Traceback (most recent call last):
530
533
531 Issue1827: Hooks Update & Commit not completely post operation
534 Issue1827: Hooks Update & Commit not completely post operation
532
535
533 commit and update hooks should run after command completion
536 commit and update hooks should run after command completion
534
537
535 $ echo '[hooks]' > .hg/hgrc
538 $ echo '[hooks]' > .hg/hgrc
536 $ echo 'commit = hg id' >> .hg/hgrc
539 $ echo 'commit = hg id' >> .hg/hgrc
537 $ echo 'update = hg id' >> .hg/hgrc
540 $ echo 'update = hg id' >> .hg/hgrc
538 $ echo bb > a
541 $ echo bb > a
539 $ hg ci -ma
542 $ hg ci -ma
540 223eafe2750c tip
543 223eafe2750c tip
541 $ hg up 0
544 $ hg up 0
542 cb9a9f314b8b
545 cb9a9f314b8b
543 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
546 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
544
547
545 make sure --verbose (and --quiet/--debug etc.) are propogated to the local ui
548 make sure --verbose (and --quiet/--debug etc.) are propogated to the local ui
546 that is passed to pre/post hooks
549 that is passed to pre/post hooks
547
550
548 $ echo '[hooks]' > .hg/hgrc
551 $ echo '[hooks]' > .hg/hgrc
549 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
552 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
550 $ hg id
553 $ hg id
551 cb9a9f314b8b
554 cb9a9f314b8b
552 $ hg id --verbose
555 $ hg id --verbose
553 calling hook pre-identify: hooktests.verbosehook
556 calling hook pre-identify: hooktests.verbosehook
554 verbose output from hook
557 verbose output from hook
555 cb9a9f314b8b
558 cb9a9f314b8b
556
559
557 Ensure hooks can be prioritized
560 Ensure hooks can be prioritized
558
561
559 $ echo '[hooks]' > .hg/hgrc
562 $ echo '[hooks]' > .hg/hgrc
560 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
563 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
561 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
564 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
562 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
565 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
563 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
566 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
564 $ hg id --verbose
567 $ hg id --verbose
565 calling hook pre-identify.b: hooktests.verbosehook
568 calling hook pre-identify.b: hooktests.verbosehook
566 verbose output from hook
569 verbose output from hook
567 calling hook pre-identify.a: hooktests.verbosehook
570 calling hook pre-identify.a: hooktests.verbosehook
568 verbose output from hook
571 verbose output from hook
569 calling hook pre-identify.c: hooktests.verbosehook
572 calling hook pre-identify.c: hooktests.verbosehook
570 verbose output from hook
573 verbose output from hook
571 cb9a9f314b8b
574 cb9a9f314b8b
575
576 new tags must be visible in pretxncommit (issue3210)
577
578 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
579 $ hg tag -f foo
580 ['a', 'foo', 'tip']
581
General Comments 0
You need to be logged in to leave comments. Login now