##// END OF EJS Templates
repovfs: add a ward to check if locks are properly taken...
Boris Feld -
r33436:9bb4decd default
parent child Browse files
Show More
@@ -1,264 +1,265 b''
1 # blackbox.py - log repository events to a file for post-mortem debugging
1 # blackbox.py - log repository events to a file for post-mortem debugging
2 #
2 #
3 # Copyright 2010 Nicolas Dumazet
3 # Copyright 2010 Nicolas Dumazet
4 # Copyright 2013 Facebook, Inc.
4 # Copyright 2013 Facebook, Inc.
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """log repository events to a blackbox for debugging
9 """log repository events to a blackbox for debugging
10
10
11 Logs event information to .hg/blackbox.log to help debug and diagnose problems.
11 Logs event information to .hg/blackbox.log to help debug and diagnose problems.
12 The events that get logged can be configured via the blackbox.track config key.
12 The events that get logged can be configured via the blackbox.track config key.
13
13
14 Examples::
14 Examples::
15
15
16 [blackbox]
16 [blackbox]
17 track = *
17 track = *
18 # dirty is *EXPENSIVE* (slow);
18 # dirty is *EXPENSIVE* (slow);
19 # each log entry indicates `+` if the repository is dirty, like :hg:`id`.
19 # each log entry indicates `+` if the repository is dirty, like :hg:`id`.
20 dirty = True
20 dirty = True
21 # record the source of log messages
21 # record the source of log messages
22 logsource = True
22 logsource = True
23
23
24 [blackbox]
24 [blackbox]
25 track = command, commandfinish, commandexception, exthook, pythonhook
25 track = command, commandfinish, commandexception, exthook, pythonhook
26
26
27 [blackbox]
27 [blackbox]
28 track = incoming
28 track = incoming
29
29
30 [blackbox]
30 [blackbox]
31 # limit the size of a log file
31 # limit the size of a log file
32 maxsize = 1.5 MB
32 maxsize = 1.5 MB
33 # rotate up to N log files when the current one gets too big
33 # rotate up to N log files when the current one gets too big
34 maxfiles = 3
34 maxfiles = 3
35
35
36 """
36 """
37
37
38 from __future__ import absolute_import
38 from __future__ import absolute_import
39
39
40 import errno
40 import errno
41 import re
41 import re
42
42
43 from mercurial.i18n import _
43 from mercurial.i18n import _
44 from mercurial.node import hex
44 from mercurial.node import hex
45
45
46 from mercurial import (
46 from mercurial import (
47 registrar,
47 registrar,
48 ui as uimod,
48 ui as uimod,
49 util,
49 util,
50 )
50 )
51
51
52 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
52 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
53 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
53 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
54 # be specifying the version(s) of Mercurial they are tested with, or
54 # be specifying the version(s) of Mercurial they are tested with, or
55 # leave the attribute unspecified.
55 # leave the attribute unspecified.
56 testedwith = 'ships-with-hg-core'
56 testedwith = 'ships-with-hg-core'
57
57
58 cmdtable = {}
58 cmdtable = {}
59 command = registrar.command(cmdtable)
59 command = registrar.command(cmdtable)
60
60
61 configtable = {}
61 configtable = {}
62 configitem = registrar.configitem(configtable)
62 configitem = registrar.configitem(configtable)
63
63
64 configitem('blackbox', 'dirty',
64 configitem('blackbox', 'dirty',
65 default=False,
65 default=False,
66 )
66 )
67 configitem('blackbox', 'maxsize',
67 configitem('blackbox', 'maxsize',
68 default='1 MB',
68 default='1 MB',
69 )
69 )
70 configitem('blackbox', 'logsource',
70 configitem('blackbox', 'logsource',
71 default=False,
71 default=False,
72 )
72 )
73
73
74 lastui = None
74 lastui = None
75
75
76 filehandles = {}
76 filehandles = {}
77
77
78 def _openlog(vfs):
78 def _openlog(vfs):
79 path = vfs.join('blackbox.log')
79 path = vfs.join('blackbox.log')
80 if path in filehandles:
80 if path in filehandles:
81 return filehandles[path]
81 return filehandles[path]
82 filehandles[path] = fp = vfs('blackbox.log', 'a')
82 filehandles[path] = fp = vfs('blackbox.log', 'a')
83 return fp
83 return fp
84
84
85 def _closelog(vfs):
85 def _closelog(vfs):
86 path = vfs.join('blackbox.log')
86 path = vfs.join('blackbox.log')
87 fp = filehandles[path]
87 fp = filehandles[path]
88 del filehandles[path]
88 del filehandles[path]
89 fp.close()
89 fp.close()
90
90
91 def wrapui(ui):
91 def wrapui(ui):
92 class blackboxui(ui.__class__):
92 class blackboxui(ui.__class__):
93 def __init__(self, src=None):
93 def __init__(self, src=None):
94 super(blackboxui, self).__init__(src)
94 super(blackboxui, self).__init__(src)
95 if src is None:
95 if src is None:
96 self._partialinit()
96 self._partialinit()
97 else:
97 else:
98 self._bbfp = getattr(src, '_bbfp', None)
98 self._bbfp = getattr(src, '_bbfp', None)
99 self._bbinlog = False
99 self._bbinlog = False
100 self._bbrepo = getattr(src, '_bbrepo', None)
100 self._bbrepo = getattr(src, '_bbrepo', None)
101 self._bbvfs = getattr(src, '_bbvfs', None)
101 self._bbvfs = getattr(src, '_bbvfs', None)
102
102
103 def _partialinit(self):
103 def _partialinit(self):
104 if util.safehasattr(self, '_bbvfs'):
104 if util.safehasattr(self, '_bbvfs'):
105 return
105 return
106 self._bbfp = None
106 self._bbfp = None
107 self._bbinlog = False
107 self._bbinlog = False
108 self._bbrepo = None
108 self._bbrepo = None
109 self._bbvfs = None
109 self._bbvfs = None
110
110
111 def copy(self):
111 def copy(self):
112 self._partialinit()
112 self._partialinit()
113 return self.__class__(self)
113 return self.__class__(self)
114
114
115 @util.propertycache
115 @util.propertycache
116 def track(self):
116 def track(self):
117 return self.configlist('blackbox', 'track', ['*'])
117 return self.configlist('blackbox', 'track', ['*'])
118
118
119 def _openlogfile(self):
119 def _openlogfile(self):
120 def rotate(oldpath, newpath):
120 def rotate(oldpath, newpath):
121 try:
121 try:
122 self._bbvfs.unlink(newpath)
122 self._bbvfs.unlink(newpath)
123 except OSError as err:
123 except OSError as err:
124 if err.errno != errno.ENOENT:
124 if err.errno != errno.ENOENT:
125 self.debug("warning: cannot remove '%s': %s\n" %
125 self.debug("warning: cannot remove '%s': %s\n" %
126 (newpath, err.strerror))
126 (newpath, err.strerror))
127 try:
127 try:
128 if newpath:
128 if newpath:
129 self._bbvfs.rename(oldpath, newpath)
129 self._bbvfs.rename(oldpath, newpath)
130 except OSError as err:
130 except OSError as err:
131 if err.errno != errno.ENOENT:
131 if err.errno != errno.ENOENT:
132 self.debug("warning: cannot rename '%s' to '%s': %s\n" %
132 self.debug("warning: cannot rename '%s' to '%s': %s\n" %
133 (newpath, oldpath, err.strerror))
133 (newpath, oldpath, err.strerror))
134
134
135 fp = _openlog(self._bbvfs)
135 fp = _openlog(self._bbvfs)
136 maxsize = self.configbytes('blackbox', 'maxsize')
136 maxsize = self.configbytes('blackbox', 'maxsize')
137 if maxsize > 0:
137 if maxsize > 0:
138 st = self._bbvfs.fstat(fp)
138 st = self._bbvfs.fstat(fp)
139 if st.st_size >= maxsize:
139 if st.st_size >= maxsize:
140 path = fp.name
140 path = fp.name
141 _closelog(self._bbvfs)
141 _closelog(self._bbvfs)
142 maxfiles = self.configint('blackbox', 'maxfiles', 7)
142 maxfiles = self.configint('blackbox', 'maxfiles', 7)
143 for i in xrange(maxfiles - 1, 1, -1):
143 for i in xrange(maxfiles - 1, 1, -1):
144 rotate(oldpath='%s.%d' % (path, i - 1),
144 rotate(oldpath='%s.%d' % (path, i - 1),
145 newpath='%s.%d' % (path, i))
145 newpath='%s.%d' % (path, i))
146 rotate(oldpath=path,
146 rotate(oldpath=path,
147 newpath=maxfiles > 0 and path + '.1')
147 newpath=maxfiles > 0 and path + '.1')
148 fp = _openlog(self._bbvfs)
148 fp = _openlog(self._bbvfs)
149 return fp
149 return fp
150
150
151 def _bbwrite(self, fmt, *args):
151 def _bbwrite(self, fmt, *args):
152 self._bbfp.write(fmt % args)
152 self._bbfp.write(fmt % args)
153 self._bbfp.flush()
153 self._bbfp.flush()
154
154
155 def log(self, event, *msg, **opts):
155 def log(self, event, *msg, **opts):
156 global lastui
156 global lastui
157 super(blackboxui, self).log(event, *msg, **opts)
157 super(blackboxui, self).log(event, *msg, **opts)
158 self._partialinit()
158 self._partialinit()
159
159
160 if not '*' in self.track and not event in self.track:
160 if not '*' in self.track and not event in self.track:
161 return
161 return
162
162
163 if self._bbfp:
163 if self._bbfp:
164 ui = self
164 ui = self
165 elif self._bbvfs:
165 elif self._bbvfs:
166 try:
166 try:
167 self._bbfp = self._openlogfile()
167 self._bbfp = self._openlogfile()
168 except (IOError, OSError) as err:
168 except (IOError, OSError) as err:
169 self.debug('warning: cannot write to blackbox.log: %s\n' %
169 self.debug('warning: cannot write to blackbox.log: %s\n' %
170 err.strerror)
170 err.strerror)
171 del self._bbvfs
171 del self._bbvfs
172 self._bbfp = None
172 self._bbfp = None
173 ui = self
173 ui = self
174 else:
174 else:
175 # certain ui instances exist outside the context of
175 # certain ui instances exist outside the context of
176 # a repo, so just default to the last blackbox that
176 # a repo, so just default to the last blackbox that
177 # was seen.
177 # was seen.
178 ui = lastui
178 ui = lastui
179
179
180 if not ui or not ui._bbfp:
180 if not ui or not ui._bbfp:
181 return
181 return
182 if not lastui or ui._bbrepo:
182 if not lastui or ui._bbrepo:
183 lastui = ui
183 lastui = ui
184 if ui._bbinlog:
184 if ui._bbinlog:
185 # recursion guard
185 # recursion guard
186 return
186 return
187 try:
187 try:
188 ui._bbinlog = True
188 ui._bbinlog = True
189 default = self.configdate('devel', 'default-date')
189 default = self.configdate('devel', 'default-date')
190 date = util.datestr(default, '%Y/%m/%d %H:%M:%S')
190 date = util.datestr(default, '%Y/%m/%d %H:%M:%S')
191 user = util.getuser()
191 user = util.getuser()
192 pid = '%d' % util.getpid()
192 pid = '%d' % util.getpid()
193 formattedmsg = msg[0] % msg[1:]
193 formattedmsg = msg[0] % msg[1:]
194 rev = '(unknown)'
194 rev = '(unknown)'
195 changed = ''
195 changed = ''
196 if ui._bbrepo:
196 if ui._bbrepo:
197 ctx = ui._bbrepo[None]
197 ctx = ui._bbrepo[None]
198 parents = ctx.parents()
198 parents = ctx.parents()
199 rev = ('+'.join([hex(p.node()) for p in parents]))
199 rev = ('+'.join([hex(p.node()) for p in parents]))
200 if (ui.configbool('blackbox', 'dirty') and
200 if (ui.configbool('blackbox', 'dirty') and
201 ctx.dirty(missing=True, merge=False, branch=False)):
201 ctx.dirty(missing=True, merge=False, branch=False)):
202 changed = '+'
202 changed = '+'
203 if ui.configbool('blackbox', 'logsource'):
203 if ui.configbool('blackbox', 'logsource'):
204 src = ' [%s]' % event
204 src = ' [%s]' % event
205 else:
205 else:
206 src = ''
206 src = ''
207 try:
207 try:
208 ui._bbwrite('%s %s @%s%s (%s)%s> %s',
208 ui._bbwrite('%s %s @%s%s (%s)%s> %s',
209 date, user, rev, changed, pid, src, formattedmsg)
209 date, user, rev, changed, pid, src, formattedmsg)
210 except IOError as err:
210 except IOError as err:
211 self.debug('warning: cannot write to blackbox.log: %s\n' %
211 self.debug('warning: cannot write to blackbox.log: %s\n' %
212 err.strerror)
212 err.strerror)
213 finally:
213 finally:
214 ui._bbinlog = False
214 ui._bbinlog = False
215
215
216 def setrepo(self, repo):
216 def setrepo(self, repo):
217 self._bbfp = None
217 self._bbfp = None
218 self._bbinlog = False
218 self._bbinlog = False
219 self._bbrepo = repo
219 self._bbrepo = repo
220 self._bbvfs = repo.vfs
220 self._bbvfs = repo.vfs
221
221
222 ui.__class__ = blackboxui
222 ui.__class__ = blackboxui
223 uimod.ui = blackboxui
223 uimod.ui = blackboxui
224
224
225 def uisetup(ui):
225 def uisetup(ui):
226 wrapui(ui)
226 wrapui(ui)
227
227
228 def reposetup(ui, repo):
228 def reposetup(ui, repo):
229 # During 'hg pull' a httppeer repo is created to represent the remote repo.
229 # During 'hg pull' a httppeer repo is created to represent the remote repo.
230 # It doesn't have a .hg directory to put a blackbox in, so we don't do
230 # It doesn't have a .hg directory to put a blackbox in, so we don't do
231 # the blackbox setup for it.
231 # the blackbox setup for it.
232 if not repo.local():
232 if not repo.local():
233 return
233 return
234
234
235 if util.safehasattr(ui, 'setrepo'):
235 if util.safehasattr(ui, 'setrepo'):
236 ui.setrepo(repo)
236 ui.setrepo(repo)
237 repo._wlockfreeprefix.add('blackbox.log')
237
238
238 @command('^blackbox',
239 @command('^blackbox',
239 [('l', 'limit', 10, _('the number of events to show')),
240 [('l', 'limit', 10, _('the number of events to show')),
240 ],
241 ],
241 _('hg blackbox [OPTION]...'))
242 _('hg blackbox [OPTION]...'))
242 def blackbox(ui, repo, *revs, **opts):
243 def blackbox(ui, repo, *revs, **opts):
243 '''view the recent repository events
244 '''view the recent repository events
244 '''
245 '''
245
246
246 if not repo.vfs.exists('blackbox.log'):
247 if not repo.vfs.exists('blackbox.log'):
247 return
248 return
248
249
249 limit = opts.get('limit')
250 limit = opts.get('limit')
250 fp = repo.vfs('blackbox.log', 'r')
251 fp = repo.vfs('blackbox.log', 'r')
251 lines = fp.read().split('\n')
252 lines = fp.read().split('\n')
252
253
253 count = 0
254 count = 0
254 output = []
255 output = []
255 for line in reversed(lines):
256 for line in reversed(lines):
256 if count >= limit:
257 if count >= limit:
257 break
258 break
258
259
259 # count the commands by matching lines like: 2013/01/23 19:13:36 root>
260 # count the commands by matching lines like: 2013/01/23 19:13:36 root>
260 if re.match('^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} .*> .*', line):
261 if re.match('^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} .*> .*', line):
261 count += 1
262 count += 1
262 output.append(line)
263 output.append(line)
263
264
264 ui.status('\n'.join(reversed(output)))
265 ui.status('\n'.join(reversed(output)))
@@ -1,513 +1,514 b''
1 # journal.py
1 # journal.py
2 #
2 #
3 # Copyright 2014-2016 Facebook, Inc.
3 # Copyright 2014-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """track previous positions of bookmarks (EXPERIMENTAL)
7 """track previous positions of bookmarks (EXPERIMENTAL)
8
8
9 This extension adds a new command: `hg journal`, which shows you where
9 This extension adds a new command: `hg journal`, which shows you where
10 bookmarks were previously located.
10 bookmarks were previously located.
11
11
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import errno
17 import errno
18 import os
18 import os
19 import weakref
19 import weakref
20
20
21 from mercurial.i18n import _
21 from mercurial.i18n import _
22
22
23 from mercurial import (
23 from mercurial import (
24 bookmarks,
24 bookmarks,
25 cmdutil,
25 cmdutil,
26 dispatch,
26 dispatch,
27 error,
27 error,
28 extensions,
28 extensions,
29 hg,
29 hg,
30 localrepo,
30 localrepo,
31 lock,
31 lock,
32 node,
32 node,
33 registrar,
33 registrar,
34 util,
34 util,
35 )
35 )
36
36
37 from . import share
37 from . import share
38
38
39 cmdtable = {}
39 cmdtable = {}
40 command = registrar.command(cmdtable)
40 command = registrar.command(cmdtable)
41
41
42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 # be specifying the version(s) of Mercurial they are tested with, or
44 # be specifying the version(s) of Mercurial they are tested with, or
45 # leave the attribute unspecified.
45 # leave the attribute unspecified.
46 testedwith = 'ships-with-hg-core'
46 testedwith = 'ships-with-hg-core'
47
47
48 # storage format version; increment when the format changes
48 # storage format version; increment when the format changes
49 storageversion = 0
49 storageversion = 0
50
50
51 # namespaces
51 # namespaces
52 bookmarktype = 'bookmark'
52 bookmarktype = 'bookmark'
53 wdirparenttype = 'wdirparent'
53 wdirparenttype = 'wdirparent'
54 # In a shared repository, what shared feature name is used
54 # In a shared repository, what shared feature name is used
55 # to indicate this namespace is shared with the source?
55 # to indicate this namespace is shared with the source?
56 sharednamespaces = {
56 sharednamespaces = {
57 bookmarktype: hg.sharedbookmarks,
57 bookmarktype: hg.sharedbookmarks,
58 }
58 }
59
59
60 # Journal recording, register hooks and storage object
60 # Journal recording, register hooks and storage object
61 def extsetup(ui):
61 def extsetup(ui):
62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
64 extensions.wrapfilecache(
64 extensions.wrapfilecache(
65 localrepo.localrepository, 'dirstate', wrapdirstate)
65 localrepo.localrepository, 'dirstate', wrapdirstate)
66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
68
68
69 def reposetup(ui, repo):
69 def reposetup(ui, repo):
70 if repo.local():
70 if repo.local():
71 repo.journal = journalstorage(repo)
71 repo.journal = journalstorage(repo)
72 repo._wlockfreeprefix.add('namejournal')
72
73
73 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
74 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
74 if cached:
75 if cached:
75 # already instantiated dirstate isn't yet marked as
76 # already instantiated dirstate isn't yet marked as
76 # "journal"-ing, even though repo.dirstate() was already
77 # "journal"-ing, even though repo.dirstate() was already
77 # wrapped by own wrapdirstate()
78 # wrapped by own wrapdirstate()
78 _setupdirstate(repo, dirstate)
79 _setupdirstate(repo, dirstate)
79
80
80 def runcommand(orig, lui, repo, cmd, fullargs, *args):
81 def runcommand(orig, lui, repo, cmd, fullargs, *args):
81 """Track the command line options for recording in the journal"""
82 """Track the command line options for recording in the journal"""
82 journalstorage.recordcommand(*fullargs)
83 journalstorage.recordcommand(*fullargs)
83 return orig(lui, repo, cmd, fullargs, *args)
84 return orig(lui, repo, cmd, fullargs, *args)
84
85
85 def _setupdirstate(repo, dirstate):
86 def _setupdirstate(repo, dirstate):
86 dirstate.journalstorage = repo.journal
87 dirstate.journalstorage = repo.journal
87 dirstate.addparentchangecallback('journal', recorddirstateparents)
88 dirstate.addparentchangecallback('journal', recorddirstateparents)
88
89
89 # hooks to record dirstate changes
90 # hooks to record dirstate changes
90 def wrapdirstate(orig, repo):
91 def wrapdirstate(orig, repo):
91 """Make journal storage available to the dirstate object"""
92 """Make journal storage available to the dirstate object"""
92 dirstate = orig(repo)
93 dirstate = orig(repo)
93 if util.safehasattr(repo, 'journal'):
94 if util.safehasattr(repo, 'journal'):
94 _setupdirstate(repo, dirstate)
95 _setupdirstate(repo, dirstate)
95 return dirstate
96 return dirstate
96
97
97 def recorddirstateparents(dirstate, old, new):
98 def recorddirstateparents(dirstate, old, new):
98 """Records all dirstate parent changes in the journal."""
99 """Records all dirstate parent changes in the journal."""
99 old = list(old)
100 old = list(old)
100 new = list(new)
101 new = list(new)
101 if util.safehasattr(dirstate, 'journalstorage'):
102 if util.safehasattr(dirstate, 'journalstorage'):
102 # only record two hashes if there was a merge
103 # only record two hashes if there was a merge
103 oldhashes = old[:1] if old[1] == node.nullid else old
104 oldhashes = old[:1] if old[1] == node.nullid else old
104 newhashes = new[:1] if new[1] == node.nullid else new
105 newhashes = new[:1] if new[1] == node.nullid else new
105 dirstate.journalstorage.record(
106 dirstate.journalstorage.record(
106 wdirparenttype, '.', oldhashes, newhashes)
107 wdirparenttype, '.', oldhashes, newhashes)
107
108
108 # hooks to record bookmark changes (both local and remote)
109 # hooks to record bookmark changes (both local and remote)
109 def recordbookmarks(orig, store, fp):
110 def recordbookmarks(orig, store, fp):
110 """Records all bookmark changes in the journal."""
111 """Records all bookmark changes in the journal."""
111 repo = store._repo
112 repo = store._repo
112 if util.safehasattr(repo, 'journal'):
113 if util.safehasattr(repo, 'journal'):
113 oldmarks = bookmarks.bmstore(repo)
114 oldmarks = bookmarks.bmstore(repo)
114 for mark, value in store.iteritems():
115 for mark, value in store.iteritems():
115 oldvalue = oldmarks.get(mark, node.nullid)
116 oldvalue = oldmarks.get(mark, node.nullid)
116 if value != oldvalue:
117 if value != oldvalue:
117 repo.journal.record(bookmarktype, mark, oldvalue, value)
118 repo.journal.record(bookmarktype, mark, oldvalue, value)
118 return orig(store, fp)
119 return orig(store, fp)
119
120
120 # shared repository support
121 # shared repository support
121 def _readsharedfeatures(repo):
122 def _readsharedfeatures(repo):
122 """A set of shared features for this repository"""
123 """A set of shared features for this repository"""
123 try:
124 try:
124 return set(repo.vfs.read('shared').splitlines())
125 return set(repo.vfs.read('shared').splitlines())
125 except IOError as inst:
126 except IOError as inst:
126 if inst.errno != errno.ENOENT:
127 if inst.errno != errno.ENOENT:
127 raise
128 raise
128 return set()
129 return set()
129
130
130 def _mergeentriesiter(*iterables, **kwargs):
131 def _mergeentriesiter(*iterables, **kwargs):
131 """Given a set of sorted iterables, yield the next entry in merged order
132 """Given a set of sorted iterables, yield the next entry in merged order
132
133
133 Note that by default entries go from most recent to oldest.
134 Note that by default entries go from most recent to oldest.
134 """
135 """
135 order = kwargs.pop('order', max)
136 order = kwargs.pop('order', max)
136 iterables = [iter(it) for it in iterables]
137 iterables = [iter(it) for it in iterables]
137 # this tracks still active iterables; iterables are deleted as they are
138 # this tracks still active iterables; iterables are deleted as they are
138 # exhausted, which is why this is a dictionary and why each entry also
139 # exhausted, which is why this is a dictionary and why each entry also
139 # stores the key. Entries are mutable so we can store the next value each
140 # stores the key. Entries are mutable so we can store the next value each
140 # time.
141 # time.
141 iterable_map = {}
142 iterable_map = {}
142 for key, it in enumerate(iterables):
143 for key, it in enumerate(iterables):
143 try:
144 try:
144 iterable_map[key] = [next(it), key, it]
145 iterable_map[key] = [next(it), key, it]
145 except StopIteration:
146 except StopIteration:
146 # empty entry, can be ignored
147 # empty entry, can be ignored
147 pass
148 pass
148
149
149 while iterable_map:
150 while iterable_map:
150 value, key, it = order(iterable_map.itervalues())
151 value, key, it = order(iterable_map.itervalues())
151 yield value
152 yield value
152 try:
153 try:
153 iterable_map[key][0] = next(it)
154 iterable_map[key][0] = next(it)
154 except StopIteration:
155 except StopIteration:
155 # this iterable is empty, remove it from consideration
156 # this iterable is empty, remove it from consideration
156 del iterable_map[key]
157 del iterable_map[key]
157
158
158 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
159 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
159 """Mark this shared working copy as sharing journal information"""
160 """Mark this shared working copy as sharing journal information"""
160 with destrepo.wlock():
161 with destrepo.wlock():
161 orig(sourcerepo, destrepo, **kwargs)
162 orig(sourcerepo, destrepo, **kwargs)
162 with destrepo.vfs('shared', 'a') as fp:
163 with destrepo.vfs('shared', 'a') as fp:
163 fp.write('journal\n')
164 fp.write('journal\n')
164
165
165 def unsharejournal(orig, ui, repo, repopath):
166 def unsharejournal(orig, ui, repo, repopath):
166 """Copy shared journal entries into this repo when unsharing"""
167 """Copy shared journal entries into this repo when unsharing"""
167 if (repo.path == repopath and repo.shared() and
168 if (repo.path == repopath and repo.shared() and
168 util.safehasattr(repo, 'journal')):
169 util.safehasattr(repo, 'journal')):
169 sharedrepo = share._getsrcrepo(repo)
170 sharedrepo = share._getsrcrepo(repo)
170 sharedfeatures = _readsharedfeatures(repo)
171 sharedfeatures = _readsharedfeatures(repo)
171 if sharedrepo and sharedfeatures > {'journal'}:
172 if sharedrepo and sharedfeatures > {'journal'}:
172 # there is a shared repository and there are shared journal entries
173 # there is a shared repository and there are shared journal entries
173 # to copy. move shared date over from source to destination but
174 # to copy. move shared date over from source to destination but
174 # move the local file first
175 # move the local file first
175 if repo.vfs.exists('namejournal'):
176 if repo.vfs.exists('namejournal'):
176 journalpath = repo.vfs.join('namejournal')
177 journalpath = repo.vfs.join('namejournal')
177 util.rename(journalpath, journalpath + '.bak')
178 util.rename(journalpath, journalpath + '.bak')
178 storage = repo.journal
179 storage = repo.journal
179 local = storage._open(
180 local = storage._open(
180 repo.vfs, filename='namejournal.bak', _newestfirst=False)
181 repo.vfs, filename='namejournal.bak', _newestfirst=False)
181 shared = (
182 shared = (
182 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
183 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
183 if sharednamespaces.get(e.namespace) in sharedfeatures)
184 if sharednamespaces.get(e.namespace) in sharedfeatures)
184 for entry in _mergeentriesiter(local, shared, order=min):
185 for entry in _mergeentriesiter(local, shared, order=min):
185 storage._write(repo.vfs, entry)
186 storage._write(repo.vfs, entry)
186
187
187 return orig(ui, repo, repopath)
188 return orig(ui, repo, repopath)
188
189
189 class journalentry(collections.namedtuple(
190 class journalentry(collections.namedtuple(
190 u'journalentry',
191 u'journalentry',
191 u'timestamp user command namespace name oldhashes newhashes')):
192 u'timestamp user command namespace name oldhashes newhashes')):
192 """Individual journal entry
193 """Individual journal entry
193
194
194 * timestamp: a mercurial (time, timezone) tuple
195 * timestamp: a mercurial (time, timezone) tuple
195 * user: the username that ran the command
196 * user: the username that ran the command
196 * namespace: the entry namespace, an opaque string
197 * namespace: the entry namespace, an opaque string
197 * name: the name of the changed item, opaque string with meaning in the
198 * name: the name of the changed item, opaque string with meaning in the
198 namespace
199 namespace
199 * command: the hg command that triggered this record
200 * command: the hg command that triggered this record
200 * oldhashes: a tuple of one or more binary hashes for the old location
201 * oldhashes: a tuple of one or more binary hashes for the old location
201 * newhashes: a tuple of one or more binary hashes for the new location
202 * newhashes: a tuple of one or more binary hashes for the new location
202
203
203 Handles serialisation from and to the storage format. Fields are
204 Handles serialisation from and to the storage format. Fields are
204 separated by newlines, hashes are written out in hex separated by commas,
205 separated by newlines, hashes are written out in hex separated by commas,
205 timestamp and timezone are separated by a space.
206 timestamp and timezone are separated by a space.
206
207
207 """
208 """
208 @classmethod
209 @classmethod
209 def fromstorage(cls, line):
210 def fromstorage(cls, line):
210 (time, user, command, namespace, name,
211 (time, user, command, namespace, name,
211 oldhashes, newhashes) = line.split('\n')
212 oldhashes, newhashes) = line.split('\n')
212 timestamp, tz = time.split()
213 timestamp, tz = time.split()
213 timestamp, tz = float(timestamp), int(tz)
214 timestamp, tz = float(timestamp), int(tz)
214 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
215 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
215 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
216 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
216 return cls(
217 return cls(
217 (timestamp, tz), user, command, namespace, name,
218 (timestamp, tz), user, command, namespace, name,
218 oldhashes, newhashes)
219 oldhashes, newhashes)
219
220
220 def __str__(self):
221 def __str__(self):
221 """String representation for storage"""
222 """String representation for storage"""
222 time = ' '.join(map(str, self.timestamp))
223 time = ' '.join(map(str, self.timestamp))
223 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
224 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
224 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
225 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
225 return '\n'.join((
226 return '\n'.join((
226 time, self.user, self.command, self.namespace, self.name,
227 time, self.user, self.command, self.namespace, self.name,
227 oldhashes, newhashes))
228 oldhashes, newhashes))
228
229
229 class journalstorage(object):
230 class journalstorage(object):
230 """Storage for journal entries
231 """Storage for journal entries
231
232
232 Entries are divided over two files; one with entries that pertain to the
233 Entries are divided over two files; one with entries that pertain to the
233 local working copy *only*, and one with entries that are shared across
234 local working copy *only*, and one with entries that are shared across
234 multiple working copies when shared using the share extension.
235 multiple working copies when shared using the share extension.
235
236
236 Entries are stored with NUL bytes as separators. See the journalentry
237 Entries are stored with NUL bytes as separators. See the journalentry
237 class for the per-entry structure.
238 class for the per-entry structure.
238
239
239 The file format starts with an integer version, delimited by a NUL.
240 The file format starts with an integer version, delimited by a NUL.
240
241
241 This storage uses a dedicated lock; this makes it easier to avoid issues
242 This storage uses a dedicated lock; this makes it easier to avoid issues
242 with adding entries that added when the regular wlock is unlocked (e.g.
243 with adding entries that added when the regular wlock is unlocked (e.g.
243 the dirstate).
244 the dirstate).
244
245
245 """
246 """
246 _currentcommand = ()
247 _currentcommand = ()
247 _lockref = None
248 _lockref = None
248
249
249 def __init__(self, repo):
250 def __init__(self, repo):
250 self.user = util.getuser()
251 self.user = util.getuser()
251 self.ui = repo.ui
252 self.ui = repo.ui
252 self.vfs = repo.vfs
253 self.vfs = repo.vfs
253
254
254 # is this working copy using a shared storage?
255 # is this working copy using a shared storage?
255 self.sharedfeatures = self.sharedvfs = None
256 self.sharedfeatures = self.sharedvfs = None
256 if repo.shared():
257 if repo.shared():
257 features = _readsharedfeatures(repo)
258 features = _readsharedfeatures(repo)
258 sharedrepo = share._getsrcrepo(repo)
259 sharedrepo = share._getsrcrepo(repo)
259 if sharedrepo is not None and 'journal' in features:
260 if sharedrepo is not None and 'journal' in features:
260 self.sharedvfs = sharedrepo.vfs
261 self.sharedvfs = sharedrepo.vfs
261 self.sharedfeatures = features
262 self.sharedfeatures = features
262
263
263 # track the current command for recording in journal entries
264 # track the current command for recording in journal entries
264 @property
265 @property
265 def command(self):
266 def command(self):
266 commandstr = ' '.join(
267 commandstr = ' '.join(
267 map(util.shellquote, journalstorage._currentcommand))
268 map(util.shellquote, journalstorage._currentcommand))
268 if '\n' in commandstr:
269 if '\n' in commandstr:
269 # truncate multi-line commands
270 # truncate multi-line commands
270 commandstr = commandstr.partition('\n')[0] + ' ...'
271 commandstr = commandstr.partition('\n')[0] + ' ...'
271 return commandstr
272 return commandstr
272
273
273 @classmethod
274 @classmethod
274 def recordcommand(cls, *fullargs):
275 def recordcommand(cls, *fullargs):
275 """Set the current hg arguments, stored with recorded entries"""
276 """Set the current hg arguments, stored with recorded entries"""
276 # Set the current command on the class because we may have started
277 # Set the current command on the class because we may have started
277 # with a non-local repo (cloning for example).
278 # with a non-local repo (cloning for example).
278 cls._currentcommand = fullargs
279 cls._currentcommand = fullargs
279
280
280 def _currentlock(self, lockref):
281 def _currentlock(self, lockref):
281 """Returns the lock if it's held, or None if it's not.
282 """Returns the lock if it's held, or None if it's not.
282
283
283 (This is copied from the localrepo class)
284 (This is copied from the localrepo class)
284 """
285 """
285 if lockref is None:
286 if lockref is None:
286 return None
287 return None
287 l = lockref()
288 l = lockref()
288 if l is None or not l.held:
289 if l is None or not l.held:
289 return None
290 return None
290 return l
291 return l
291
292
292 def jlock(self, vfs):
293 def jlock(self, vfs):
293 """Create a lock for the journal file"""
294 """Create a lock for the journal file"""
294 if self._currentlock(self._lockref) is not None:
295 if self._currentlock(self._lockref) is not None:
295 raise error.Abort(_('journal lock does not support nesting'))
296 raise error.Abort(_('journal lock does not support nesting'))
296 desc = _('journal of %s') % vfs.base
297 desc = _('journal of %s') % vfs.base
297 try:
298 try:
298 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
299 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
299 except error.LockHeld as inst:
300 except error.LockHeld as inst:
300 self.ui.warn(
301 self.ui.warn(
301 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
302 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
302 # default to 600 seconds timeout
303 # default to 600 seconds timeout
303 l = lock.lock(
304 l = lock.lock(
304 vfs, 'namejournal.lock',
305 vfs, 'namejournal.lock',
305 int(self.ui.config("ui", "timeout", "600")), desc=desc)
306 int(self.ui.config("ui", "timeout", "600")), desc=desc)
306 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
307 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
307 self._lockref = weakref.ref(l)
308 self._lockref = weakref.ref(l)
308 return l
309 return l
309
310
310 def record(self, namespace, name, oldhashes, newhashes):
311 def record(self, namespace, name, oldhashes, newhashes):
311 """Record a new journal entry
312 """Record a new journal entry
312
313
313 * namespace: an opaque string; this can be used to filter on the type
314 * namespace: an opaque string; this can be used to filter on the type
314 of recorded entries.
315 of recorded entries.
315 * name: the name defining this entry; for bookmarks, this is the
316 * name: the name defining this entry; for bookmarks, this is the
316 bookmark name. Can be filtered on when retrieving entries.
317 bookmark name. Can be filtered on when retrieving entries.
317 * oldhashes and newhashes: each a single binary hash, or a list of
318 * oldhashes and newhashes: each a single binary hash, or a list of
318 binary hashes. These represent the old and new position of the named
319 binary hashes. These represent the old and new position of the named
319 item.
320 item.
320
321
321 """
322 """
322 if not isinstance(oldhashes, list):
323 if not isinstance(oldhashes, list):
323 oldhashes = [oldhashes]
324 oldhashes = [oldhashes]
324 if not isinstance(newhashes, list):
325 if not isinstance(newhashes, list):
325 newhashes = [newhashes]
326 newhashes = [newhashes]
326
327
327 entry = journalentry(
328 entry = journalentry(
328 util.makedate(), self.user, self.command, namespace, name,
329 util.makedate(), self.user, self.command, namespace, name,
329 oldhashes, newhashes)
330 oldhashes, newhashes)
330
331
331 vfs = self.vfs
332 vfs = self.vfs
332 if self.sharedvfs is not None:
333 if self.sharedvfs is not None:
333 # write to the shared repository if this feature is being
334 # write to the shared repository if this feature is being
334 # shared between working copies.
335 # shared between working copies.
335 if sharednamespaces.get(namespace) in self.sharedfeatures:
336 if sharednamespaces.get(namespace) in self.sharedfeatures:
336 vfs = self.sharedvfs
337 vfs = self.sharedvfs
337
338
338 self._write(vfs, entry)
339 self._write(vfs, entry)
339
340
340 def _write(self, vfs, entry):
341 def _write(self, vfs, entry):
341 with self.jlock(vfs):
342 with self.jlock(vfs):
342 version = None
343 version = None
343 # open file in amend mode to ensure it is created if missing
344 # open file in amend mode to ensure it is created if missing
344 with vfs('namejournal', mode='a+b', atomictemp=True) as f:
345 with vfs('namejournal', mode='a+b', atomictemp=True) as f:
345 f.seek(0, os.SEEK_SET)
346 f.seek(0, os.SEEK_SET)
346 # Read just enough bytes to get a version number (up to 2
347 # Read just enough bytes to get a version number (up to 2
347 # digits plus separator)
348 # digits plus separator)
348 version = f.read(3).partition('\0')[0]
349 version = f.read(3).partition('\0')[0]
349 if version and version != str(storageversion):
350 if version and version != str(storageversion):
350 # different version of the storage. Exit early (and not
351 # different version of the storage. Exit early (and not
351 # write anything) if this is not a version we can handle or
352 # write anything) if this is not a version we can handle or
352 # the file is corrupt. In future, perhaps rotate the file
353 # the file is corrupt. In future, perhaps rotate the file
353 # instead?
354 # instead?
354 self.ui.warn(
355 self.ui.warn(
355 _("unsupported journal file version '%s'\n") % version)
356 _("unsupported journal file version '%s'\n") % version)
356 return
357 return
357 if not version:
358 if not version:
358 # empty file, write version first
359 # empty file, write version first
359 f.write(str(storageversion) + '\0')
360 f.write(str(storageversion) + '\0')
360 f.seek(0, os.SEEK_END)
361 f.seek(0, os.SEEK_END)
361 f.write(str(entry) + '\0')
362 f.write(str(entry) + '\0')
362
363
363 def filtered(self, namespace=None, name=None):
364 def filtered(self, namespace=None, name=None):
364 """Yield all journal entries with the given namespace or name
365 """Yield all journal entries with the given namespace or name
365
366
366 Both the namespace and the name are optional; if neither is given all
367 Both the namespace and the name are optional; if neither is given all
367 entries in the journal are produced.
368 entries in the journal are produced.
368
369
369 Matching supports regular expressions by using the `re:` prefix
370 Matching supports regular expressions by using the `re:` prefix
370 (use `literal:` to match names or namespaces that start with `re:`)
371 (use `literal:` to match names or namespaces that start with `re:`)
371
372
372 """
373 """
373 if namespace is not None:
374 if namespace is not None:
374 namespace = util.stringmatcher(namespace)[-1]
375 namespace = util.stringmatcher(namespace)[-1]
375 if name is not None:
376 if name is not None:
376 name = util.stringmatcher(name)[-1]
377 name = util.stringmatcher(name)[-1]
377 for entry in self:
378 for entry in self:
378 if namespace is not None and not namespace(entry.namespace):
379 if namespace is not None and not namespace(entry.namespace):
379 continue
380 continue
380 if name is not None and not name(entry.name):
381 if name is not None and not name(entry.name):
381 continue
382 continue
382 yield entry
383 yield entry
383
384
384 def __iter__(self):
385 def __iter__(self):
385 """Iterate over the storage
386 """Iterate over the storage
386
387
387 Yields journalentry instances for each contained journal record.
388 Yields journalentry instances for each contained journal record.
388
389
389 """
390 """
390 local = self._open(self.vfs)
391 local = self._open(self.vfs)
391
392
392 if self.sharedvfs is None:
393 if self.sharedvfs is None:
393 return local
394 return local
394
395
395 # iterate over both local and shared entries, but only those
396 # iterate over both local and shared entries, but only those
396 # shared entries that are among the currently shared features
397 # shared entries that are among the currently shared features
397 shared = (
398 shared = (
398 e for e in self._open(self.sharedvfs)
399 e for e in self._open(self.sharedvfs)
399 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
400 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
400 return _mergeentriesiter(local, shared)
401 return _mergeentriesiter(local, shared)
401
402
402 def _open(self, vfs, filename='namejournal', _newestfirst=True):
403 def _open(self, vfs, filename='namejournal', _newestfirst=True):
403 if not vfs.exists(filename):
404 if not vfs.exists(filename):
404 return
405 return
405
406
406 with vfs(filename) as f:
407 with vfs(filename) as f:
407 raw = f.read()
408 raw = f.read()
408
409
409 lines = raw.split('\0')
410 lines = raw.split('\0')
410 version = lines and lines[0]
411 version = lines and lines[0]
411 if version != str(storageversion):
412 if version != str(storageversion):
412 version = version or _('not available')
413 version = version or _('not available')
413 raise error.Abort(_("unknown journal file version '%s'") % version)
414 raise error.Abort(_("unknown journal file version '%s'") % version)
414
415
415 # Skip the first line, it's a version number. Normally we iterate over
416 # Skip the first line, it's a version number. Normally we iterate over
416 # these in reverse order to list newest first; only when copying across
417 # these in reverse order to list newest first; only when copying across
417 # a shared storage do we forgo reversing.
418 # a shared storage do we forgo reversing.
418 lines = lines[1:]
419 lines = lines[1:]
419 if _newestfirst:
420 if _newestfirst:
420 lines = reversed(lines)
421 lines = reversed(lines)
421 for line in lines:
422 for line in lines:
422 if not line:
423 if not line:
423 continue
424 continue
424 yield journalentry.fromstorage(line)
425 yield journalentry.fromstorage(line)
425
426
426 # journal reading
427 # journal reading
427 # log options that don't make sense for journal
428 # log options that don't make sense for journal
428 _ignoreopts = ('no-merges', 'graph')
429 _ignoreopts = ('no-merges', 'graph')
429 @command(
430 @command(
430 'journal', [
431 'journal', [
431 ('', 'all', None, 'show history for all names'),
432 ('', 'all', None, 'show history for all names'),
432 ('c', 'commits', None, 'show commit metadata'),
433 ('c', 'commits', None, 'show commit metadata'),
433 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
434 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
434 '[OPTION]... [BOOKMARKNAME]')
435 '[OPTION]... [BOOKMARKNAME]')
435 def journal(ui, repo, *args, **opts):
436 def journal(ui, repo, *args, **opts):
436 """show the previous position of bookmarks and the working copy
437 """show the previous position of bookmarks and the working copy
437
438
438 The journal is used to see the previous commits that bookmarks and the
439 The journal is used to see the previous commits that bookmarks and the
439 working copy pointed to. By default the previous locations for the working
440 working copy pointed to. By default the previous locations for the working
440 copy. Passing a bookmark name will show all the previous positions of
441 copy. Passing a bookmark name will show all the previous positions of
441 that bookmark. Use the --all switch to show previous locations for all
442 that bookmark. Use the --all switch to show previous locations for all
442 bookmarks and the working copy; each line will then include the bookmark
443 bookmarks and the working copy; each line will then include the bookmark
443 name, or '.' for the working copy, as well.
444 name, or '.' for the working copy, as well.
444
445
445 If `name` starts with `re:`, the remainder of the name is treated as
446 If `name` starts with `re:`, the remainder of the name is treated as
446 a regular expression. To match a name that actually starts with `re:`,
447 a regular expression. To match a name that actually starts with `re:`,
447 use the prefix `literal:`.
448 use the prefix `literal:`.
448
449
449 By default hg journal only shows the commit hash and the command that was
450 By default hg journal only shows the commit hash and the command that was
450 running at that time. -v/--verbose will show the prior hash, the user, and
451 running at that time. -v/--verbose will show the prior hash, the user, and
451 the time at which it happened.
452 the time at which it happened.
452
453
453 Use -c/--commits to output log information on each commit hash; at this
454 Use -c/--commits to output log information on each commit hash; at this
454 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
455 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
455 switches to alter the log output for these.
456 switches to alter the log output for these.
456
457
457 `hg journal -T json` can be used to produce machine readable output.
458 `hg journal -T json` can be used to produce machine readable output.
458
459
459 """
460 """
460 name = '.'
461 name = '.'
461 if opts.get('all'):
462 if opts.get('all'):
462 if args:
463 if args:
463 raise error.Abort(
464 raise error.Abort(
464 _("You can't combine --all and filtering on a name"))
465 _("You can't combine --all and filtering on a name"))
465 name = None
466 name = None
466 if args:
467 if args:
467 name = args[0]
468 name = args[0]
468
469
469 fm = ui.formatter('journal', opts)
470 fm = ui.formatter('journal', opts)
470
471
471 if opts.get("template") != "json":
472 if opts.get("template") != "json":
472 if name is None:
473 if name is None:
473 displayname = _('the working copy and bookmarks')
474 displayname = _('the working copy and bookmarks')
474 else:
475 else:
475 displayname = "'%s'" % name
476 displayname = "'%s'" % name
476 ui.status(_("previous locations of %s:\n") % displayname)
477 ui.status(_("previous locations of %s:\n") % displayname)
477
478
478 limit = cmdutil.loglimit(opts)
479 limit = cmdutil.loglimit(opts)
479 entry = None
480 entry = None
480 for count, entry in enumerate(repo.journal.filtered(name=name)):
481 for count, entry in enumerate(repo.journal.filtered(name=name)):
481 if count == limit:
482 if count == limit:
482 break
483 break
483 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
484 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
484 name='node', sep=',')
485 name='node', sep=',')
485 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
486 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
486 name='node', sep=',')
487 name='node', sep=',')
487
488
488 fm.startitem()
489 fm.startitem()
489 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
490 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
490 fm.write('newhashes', '%s', newhashesstr)
491 fm.write('newhashes', '%s', newhashesstr)
491 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
492 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
492 fm.condwrite(
493 fm.condwrite(
493 opts.get('all') or name.startswith('re:'),
494 opts.get('all') or name.startswith('re:'),
494 'name', ' %-8s', entry.name)
495 'name', ' %-8s', entry.name)
495
496
496 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
497 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
497 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
498 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
498 fm.write('command', ' %s\n', entry.command)
499 fm.write('command', ' %s\n', entry.command)
499
500
500 if opts.get("commits"):
501 if opts.get("commits"):
501 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
502 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
502 for hash in entry.newhashes:
503 for hash in entry.newhashes:
503 try:
504 try:
504 ctx = repo[hash]
505 ctx = repo[hash]
505 displayer.show(ctx)
506 displayer.show(ctx)
506 except error.RepoLookupError as e:
507 except error.RepoLookupError as e:
507 fm.write('repolookuperror', "%s\n\n", str(e))
508 fm.write('repolookuperror', "%s\n\n", str(e))
508 displayer.close()
509 displayer.close()
509
510
510 fm.end()
511 fm.end()
511
512
512 if entry is None:
513 if entry is None:
513 ui.status(_("no recorded locations\n"))
514 ui.status(_("no recorded locations\n"))
@@ -1,758 +1,762 b''
1 # patchbomb.py - sending Mercurial changesets as patch emails
1 # patchbomb.py - sending Mercurial changesets as patch emails
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to send changesets as (a series of) patch emails
8 '''command to send changesets as (a series of) patch emails
9
9
10 The series is started off with a "[PATCH 0 of N]" introduction, which
10 The series is started off with a "[PATCH 0 of N]" introduction, which
11 describes the series as a whole.
11 describes the series as a whole.
12
12
13 Each patch email has a Subject line of "[PATCH M of N] ...", using the
13 Each patch email has a Subject line of "[PATCH M of N] ...", using the
14 first line of the changeset description as the subject text. The
14 first line of the changeset description as the subject text. The
15 message contains two or three body parts:
15 message contains two or three body parts:
16
16
17 - The changeset description.
17 - The changeset description.
18 - [Optional] The result of running diffstat on the patch.
18 - [Optional] The result of running diffstat on the patch.
19 - The patch itself, as generated by :hg:`export`.
19 - The patch itself, as generated by :hg:`export`.
20
20
21 Each message refers to the first in the series using the In-Reply-To
21 Each message refers to the first in the series using the In-Reply-To
22 and References headers, so they will show up as a sequence in threaded
22 and References headers, so they will show up as a sequence in threaded
23 mail and news readers, and in mail archives.
23 mail and news readers, and in mail archives.
24
24
25 To configure other defaults, add a section like this to your
25 To configure other defaults, add a section like this to your
26 configuration file::
26 configuration file::
27
27
28 [email]
28 [email]
29 from = My Name <my@email>
29 from = My Name <my@email>
30 to = recipient1, recipient2, ...
30 to = recipient1, recipient2, ...
31 cc = cc1, cc2, ...
31 cc = cc1, cc2, ...
32 bcc = bcc1, bcc2, ...
32 bcc = bcc1, bcc2, ...
33 reply-to = address1, address2, ...
33 reply-to = address1, address2, ...
34
34
35 Use ``[patchbomb]`` as configuration section name if you need to
35 Use ``[patchbomb]`` as configuration section name if you need to
36 override global ``[email]`` address settings.
36 override global ``[email]`` address settings.
37
37
38 Then you can use the :hg:`email` command to mail a series of
38 Then you can use the :hg:`email` command to mail a series of
39 changesets as a patchbomb.
39 changesets as a patchbomb.
40
40
41 You can also either configure the method option in the email section
41 You can also either configure the method option in the email section
42 to be a sendmail compatible mailer or fill out the [smtp] section so
42 to be a sendmail compatible mailer or fill out the [smtp] section so
43 that the patchbomb extension can automatically send patchbombs
43 that the patchbomb extension can automatically send patchbombs
44 directly from the commandline. See the [email] and [smtp] sections in
44 directly from the commandline. See the [email] and [smtp] sections in
45 hgrc(5) for details.
45 hgrc(5) for details.
46
46
47 By default, :hg:`email` will prompt for a ``To`` or ``CC`` header if
47 By default, :hg:`email` will prompt for a ``To`` or ``CC`` header if
48 you do not supply one via configuration or the command line. You can
48 you do not supply one via configuration or the command line. You can
49 override this to never prompt by configuring an empty value::
49 override this to never prompt by configuring an empty value::
50
50
51 [email]
51 [email]
52 cc =
52 cc =
53
53
54 You can control the default inclusion of an introduction message with the
54 You can control the default inclusion of an introduction message with the
55 ``patchbomb.intro`` configuration option. The configuration is always
55 ``patchbomb.intro`` configuration option. The configuration is always
56 overwritten by command line flags like --intro and --desc::
56 overwritten by command line flags like --intro and --desc::
57
57
58 [patchbomb]
58 [patchbomb]
59 intro=auto # include introduction message if more than 1 patch (default)
59 intro=auto # include introduction message if more than 1 patch (default)
60 intro=never # never include an introduction message
60 intro=never # never include an introduction message
61 intro=always # always include an introduction message
61 intro=always # always include an introduction message
62
62
63 You can specify a template for flags to be added in subject prefixes. Flags
63 You can specify a template for flags to be added in subject prefixes. Flags
64 specified by --flag option are exported as ``{flags}`` keyword::
64 specified by --flag option are exported as ``{flags}`` keyword::
65
65
66 [patchbomb]
66 [patchbomb]
67 flagtemplate = "{separate(' ',
67 flagtemplate = "{separate(' ',
68 ifeq(branch, 'default', '', branch|upper),
68 ifeq(branch, 'default', '', branch|upper),
69 flags)}"
69 flags)}"
70
70
71 You can set patchbomb to always ask for confirmation by setting
71 You can set patchbomb to always ask for confirmation by setting
72 ``patchbomb.confirm`` to true.
72 ``patchbomb.confirm`` to true.
73 '''
73 '''
74 from __future__ import absolute_import
74 from __future__ import absolute_import
75
75
76 import email as emailmod
76 import email as emailmod
77 import errno
77 import errno
78 import os
78 import os
79 import socket
79 import socket
80 import tempfile
80 import tempfile
81
81
82 from mercurial.i18n import _
82 from mercurial.i18n import _
83 from mercurial import (
83 from mercurial import (
84 cmdutil,
84 cmdutil,
85 commands,
85 commands,
86 error,
86 error,
87 formatter,
87 formatter,
88 hg,
88 hg,
89 mail,
89 mail,
90 node as nodemod,
90 node as nodemod,
91 patch,
91 patch,
92 registrar,
92 registrar,
93 repair,
93 repair,
94 scmutil,
94 scmutil,
95 templater,
95 templater,
96 util,
96 util,
97 )
97 )
98 stringio = util.stringio
98 stringio = util.stringio
99
99
100 cmdtable = {}
100 cmdtable = {}
101 command = registrar.command(cmdtable)
101 command = registrar.command(cmdtable)
102 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
102 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
103 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
103 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
104 # be specifying the version(s) of Mercurial they are tested with, or
104 # be specifying the version(s) of Mercurial they are tested with, or
105 # leave the attribute unspecified.
105 # leave the attribute unspecified.
106 testedwith = 'ships-with-hg-core'
106 testedwith = 'ships-with-hg-core'
107
107
108 def _addpullheader(seq, ctx):
108 def _addpullheader(seq, ctx):
109 """Add a header pointing to a public URL where the changeset is available
109 """Add a header pointing to a public URL where the changeset is available
110 """
110 """
111 repo = ctx.repo()
111 repo = ctx.repo()
112 # experimental config: patchbomb.publicurl
112 # experimental config: patchbomb.publicurl
113 # waiting for some logic that check that the changeset are available on the
113 # waiting for some logic that check that the changeset are available on the
114 # destination before patchbombing anything.
114 # destination before patchbombing anything.
115 publicurl = repo.ui.config('patchbomb', 'publicurl')
115 publicurl = repo.ui.config('patchbomb', 'publicurl')
116 if publicurl:
116 if publicurl:
117 return ('Available At %s\n'
117 return ('Available At %s\n'
118 '# hg pull %s -r %s' % (publicurl, publicurl, ctx))
118 '# hg pull %s -r %s' % (publicurl, publicurl, ctx))
119 return None
119 return None
120
120
121 def uisetup(ui):
121 def uisetup(ui):
122 cmdutil.extraexport.append('pullurl')
122 cmdutil.extraexport.append('pullurl')
123 cmdutil.extraexportmap['pullurl'] = _addpullheader
123 cmdutil.extraexportmap['pullurl'] = _addpullheader
124
124
125 def reposetup(ui, repo):
126 if not repo.local():
127 return
128 repo._wlockfreeprefix.add('last-email.txt')
125
129
126 def prompt(ui, prompt, default=None, rest=':'):
130 def prompt(ui, prompt, default=None, rest=':'):
127 if default:
131 if default:
128 prompt += ' [%s]' % default
132 prompt += ' [%s]' % default
129 return ui.prompt(prompt + rest, default)
133 return ui.prompt(prompt + rest, default)
130
134
131 def introwanted(ui, opts, number):
135 def introwanted(ui, opts, number):
132 '''is an introductory message apparently wanted?'''
136 '''is an introductory message apparently wanted?'''
133 introconfig = ui.config('patchbomb', 'intro', 'auto')
137 introconfig = ui.config('patchbomb', 'intro', 'auto')
134 if opts.get('intro') or opts.get('desc'):
138 if opts.get('intro') or opts.get('desc'):
135 intro = True
139 intro = True
136 elif introconfig == 'always':
140 elif introconfig == 'always':
137 intro = True
141 intro = True
138 elif introconfig == 'never':
142 elif introconfig == 'never':
139 intro = False
143 intro = False
140 elif introconfig == 'auto':
144 elif introconfig == 'auto':
141 intro = 1 < number
145 intro = 1 < number
142 else:
146 else:
143 ui.write_err(_('warning: invalid patchbomb.intro value "%s"\n')
147 ui.write_err(_('warning: invalid patchbomb.intro value "%s"\n')
144 % introconfig)
148 % introconfig)
145 ui.write_err(_('(should be one of always, never, auto)\n'))
149 ui.write_err(_('(should be one of always, never, auto)\n'))
146 intro = 1 < number
150 intro = 1 < number
147 return intro
151 return intro
148
152
149 def _formatflags(ui, repo, rev, flags):
153 def _formatflags(ui, repo, rev, flags):
150 """build flag string optionally by template"""
154 """build flag string optionally by template"""
151 tmpl = ui.config('patchbomb', 'flagtemplate')
155 tmpl = ui.config('patchbomb', 'flagtemplate')
152 if not tmpl:
156 if not tmpl:
153 return ' '.join(flags)
157 return ' '.join(flags)
154 out = util.stringio()
158 out = util.stringio()
155 opts = {'template': templater.unquotestring(tmpl)}
159 opts = {'template': templater.unquotestring(tmpl)}
156 with formatter.templateformatter(ui, out, 'patchbombflag', opts) as fm:
160 with formatter.templateformatter(ui, out, 'patchbombflag', opts) as fm:
157 fm.startitem()
161 fm.startitem()
158 fm.context(ctx=repo[rev])
162 fm.context(ctx=repo[rev])
159 fm.write('flags', '%s', fm.formatlist(flags, name='flag'))
163 fm.write('flags', '%s', fm.formatlist(flags, name='flag'))
160 return out.getvalue()
164 return out.getvalue()
161
165
162 def _formatprefix(ui, repo, rev, flags, idx, total, numbered):
166 def _formatprefix(ui, repo, rev, flags, idx, total, numbered):
163 """build prefix to patch subject"""
167 """build prefix to patch subject"""
164 flag = _formatflags(ui, repo, rev, flags)
168 flag = _formatflags(ui, repo, rev, flags)
165 if flag:
169 if flag:
166 flag = ' ' + flag
170 flag = ' ' + flag
167
171
168 if not numbered:
172 if not numbered:
169 return '[PATCH%s]' % flag
173 return '[PATCH%s]' % flag
170 else:
174 else:
171 tlen = len(str(total))
175 tlen = len(str(total))
172 return '[PATCH %0*d of %d%s]' % (tlen, idx, total, flag)
176 return '[PATCH %0*d of %d%s]' % (tlen, idx, total, flag)
173
177
174 def makepatch(ui, repo, rev, patchlines, opts, _charsets, idx, total, numbered,
178 def makepatch(ui, repo, rev, patchlines, opts, _charsets, idx, total, numbered,
175 patchname=None):
179 patchname=None):
176
180
177 desc = []
181 desc = []
178 node = None
182 node = None
179 body = ''
183 body = ''
180
184
181 for line in patchlines:
185 for line in patchlines:
182 if line.startswith('#'):
186 if line.startswith('#'):
183 if line.startswith('# Node ID'):
187 if line.startswith('# Node ID'):
184 node = line.split()[-1]
188 node = line.split()[-1]
185 continue
189 continue
186 if line.startswith('diff -r') or line.startswith('diff --git'):
190 if line.startswith('diff -r') or line.startswith('diff --git'):
187 break
191 break
188 desc.append(line)
192 desc.append(line)
189
193
190 if not patchname and not node:
194 if not patchname and not node:
191 raise ValueError
195 raise ValueError
192
196
193 if opts.get('attach') and not opts.get('body'):
197 if opts.get('attach') and not opts.get('body'):
194 body = ('\n'.join(desc[1:]).strip() or
198 body = ('\n'.join(desc[1:]).strip() or
195 'Patch subject is complete summary.')
199 'Patch subject is complete summary.')
196 body += '\n\n\n'
200 body += '\n\n\n'
197
201
198 if opts.get('plain'):
202 if opts.get('plain'):
199 while patchlines and patchlines[0].startswith('# '):
203 while patchlines and patchlines[0].startswith('# '):
200 patchlines.pop(0)
204 patchlines.pop(0)
201 if patchlines:
205 if patchlines:
202 patchlines.pop(0)
206 patchlines.pop(0)
203 while patchlines and not patchlines[0].strip():
207 while patchlines and not patchlines[0].strip():
204 patchlines.pop(0)
208 patchlines.pop(0)
205
209
206 ds = patch.diffstat(patchlines)
210 ds = patch.diffstat(patchlines)
207 if opts.get('diffstat'):
211 if opts.get('diffstat'):
208 body += ds + '\n\n'
212 body += ds + '\n\n'
209
213
210 addattachment = opts.get('attach') or opts.get('inline')
214 addattachment = opts.get('attach') or opts.get('inline')
211 if not addattachment or opts.get('body'):
215 if not addattachment or opts.get('body'):
212 body += '\n'.join(patchlines)
216 body += '\n'.join(patchlines)
213
217
214 if addattachment:
218 if addattachment:
215 msg = emailmod.MIMEMultipart.MIMEMultipart()
219 msg = emailmod.MIMEMultipart.MIMEMultipart()
216 if body:
220 if body:
217 msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
221 msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
218 p = mail.mimetextpatch('\n'.join(patchlines), 'x-patch',
222 p = mail.mimetextpatch('\n'.join(patchlines), 'x-patch',
219 opts.get('test'))
223 opts.get('test'))
220 binnode = nodemod.bin(node)
224 binnode = nodemod.bin(node)
221 # if node is mq patch, it will have the patch file's name as a tag
225 # if node is mq patch, it will have the patch file's name as a tag
222 if not patchname:
226 if not patchname:
223 patchtags = [t for t in repo.nodetags(binnode)
227 patchtags = [t for t in repo.nodetags(binnode)
224 if t.endswith('.patch') or t.endswith('.diff')]
228 if t.endswith('.patch') or t.endswith('.diff')]
225 if patchtags:
229 if patchtags:
226 patchname = patchtags[0]
230 patchname = patchtags[0]
227 elif total > 1:
231 elif total > 1:
228 patchname = cmdutil.makefilename(repo, '%b-%n.patch',
232 patchname = cmdutil.makefilename(repo, '%b-%n.patch',
229 binnode, seqno=idx,
233 binnode, seqno=idx,
230 total=total)
234 total=total)
231 else:
235 else:
232 patchname = cmdutil.makefilename(repo, '%b.patch', binnode)
236 patchname = cmdutil.makefilename(repo, '%b.patch', binnode)
233 disposition = 'inline'
237 disposition = 'inline'
234 if opts.get('attach'):
238 if opts.get('attach'):
235 disposition = 'attachment'
239 disposition = 'attachment'
236 p['Content-Disposition'] = disposition + '; filename=' + patchname
240 p['Content-Disposition'] = disposition + '; filename=' + patchname
237 msg.attach(p)
241 msg.attach(p)
238 else:
242 else:
239 msg = mail.mimetextpatch(body, display=opts.get('test'))
243 msg = mail.mimetextpatch(body, display=opts.get('test'))
240
244
241 prefix = _formatprefix(ui, repo, rev, opts.get('flag'), idx, total,
245 prefix = _formatprefix(ui, repo, rev, opts.get('flag'), idx, total,
242 numbered)
246 numbered)
243 subj = desc[0].strip().rstrip('. ')
247 subj = desc[0].strip().rstrip('. ')
244 if not numbered:
248 if not numbered:
245 subj = ' '.join([prefix, opts.get('subject') or subj])
249 subj = ' '.join([prefix, opts.get('subject') or subj])
246 else:
250 else:
247 subj = ' '.join([prefix, subj])
251 subj = ' '.join([prefix, subj])
248 msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
252 msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
249 msg['X-Mercurial-Node'] = node
253 msg['X-Mercurial-Node'] = node
250 msg['X-Mercurial-Series-Index'] = '%i' % idx
254 msg['X-Mercurial-Series-Index'] = '%i' % idx
251 msg['X-Mercurial-Series-Total'] = '%i' % total
255 msg['X-Mercurial-Series-Total'] = '%i' % total
252 return msg, subj, ds
256 return msg, subj, ds
253
257
254 def _getpatches(repo, revs, **opts):
258 def _getpatches(repo, revs, **opts):
255 """return a list of patches for a list of revisions
259 """return a list of patches for a list of revisions
256
260
257 Each patch in the list is itself a list of lines.
261 Each patch in the list is itself a list of lines.
258 """
262 """
259 ui = repo.ui
263 ui = repo.ui
260 prev = repo['.'].rev()
264 prev = repo['.'].rev()
261 for r in revs:
265 for r in revs:
262 if r == prev and (repo[None].files() or repo[None].deleted()):
266 if r == prev and (repo[None].files() or repo[None].deleted()):
263 ui.warn(_('warning: working directory has '
267 ui.warn(_('warning: working directory has '
264 'uncommitted changes\n'))
268 'uncommitted changes\n'))
265 output = stringio()
269 output = stringio()
266 cmdutil.export(repo, [r], fp=output,
270 cmdutil.export(repo, [r], fp=output,
267 opts=patch.difffeatureopts(ui, opts, git=True))
271 opts=patch.difffeatureopts(ui, opts, git=True))
268 yield output.getvalue().split('\n')
272 yield output.getvalue().split('\n')
269 def _getbundle(repo, dest, **opts):
273 def _getbundle(repo, dest, **opts):
270 """return a bundle containing changesets missing in "dest"
274 """return a bundle containing changesets missing in "dest"
271
275
272 The `opts` keyword-arguments are the same as the one accepted by the
276 The `opts` keyword-arguments are the same as the one accepted by the
273 `bundle` command.
277 `bundle` command.
274
278
275 The bundle is a returned as a single in-memory binary blob.
279 The bundle is a returned as a single in-memory binary blob.
276 """
280 """
277 ui = repo.ui
281 ui = repo.ui
278 tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
282 tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
279 tmpfn = os.path.join(tmpdir, 'bundle')
283 tmpfn = os.path.join(tmpdir, 'bundle')
280 btype = ui.config('patchbomb', 'bundletype')
284 btype = ui.config('patchbomb', 'bundletype')
281 if btype:
285 if btype:
282 opts['type'] = btype
286 opts['type'] = btype
283 try:
287 try:
284 commands.bundle(ui, repo, tmpfn, dest, **opts)
288 commands.bundle(ui, repo, tmpfn, dest, **opts)
285 return util.readfile(tmpfn)
289 return util.readfile(tmpfn)
286 finally:
290 finally:
287 try:
291 try:
288 os.unlink(tmpfn)
292 os.unlink(tmpfn)
289 except OSError:
293 except OSError:
290 pass
294 pass
291 os.rmdir(tmpdir)
295 os.rmdir(tmpdir)
292
296
293 def _getdescription(repo, defaultbody, sender, **opts):
297 def _getdescription(repo, defaultbody, sender, **opts):
294 """obtain the body of the introduction message and return it
298 """obtain the body of the introduction message and return it
295
299
296 This is also used for the body of email with an attached bundle.
300 This is also used for the body of email with an attached bundle.
297
301
298 The body can be obtained either from the command line option or entered by
302 The body can be obtained either from the command line option or entered by
299 the user through the editor.
303 the user through the editor.
300 """
304 """
301 ui = repo.ui
305 ui = repo.ui
302 if opts.get('desc'):
306 if opts.get('desc'):
303 body = open(opts.get('desc')).read()
307 body = open(opts.get('desc')).read()
304 else:
308 else:
305 ui.write(_('\nWrite the introductory message for the '
309 ui.write(_('\nWrite the introductory message for the '
306 'patch series.\n\n'))
310 'patch series.\n\n'))
307 body = ui.edit(defaultbody, sender, repopath=repo.path)
311 body = ui.edit(defaultbody, sender, repopath=repo.path)
308 # Save series description in case sendmail fails
312 # Save series description in case sendmail fails
309 msgfile = repo.vfs('last-email.txt', 'wb')
313 msgfile = repo.vfs('last-email.txt', 'wb')
310 msgfile.write(body)
314 msgfile.write(body)
311 msgfile.close()
315 msgfile.close()
312 return body
316 return body
313
317
314 def _getbundlemsgs(repo, sender, bundle, **opts):
318 def _getbundlemsgs(repo, sender, bundle, **opts):
315 """Get the full email for sending a given bundle
319 """Get the full email for sending a given bundle
316
320
317 This function returns a list of "email" tuples (subject, content, None).
321 This function returns a list of "email" tuples (subject, content, None).
318 The list is always one message long in that case.
322 The list is always one message long in that case.
319 """
323 """
320 ui = repo.ui
324 ui = repo.ui
321 _charsets = mail._charsets(ui)
325 _charsets = mail._charsets(ui)
322 subj = (opts.get('subject')
326 subj = (opts.get('subject')
323 or prompt(ui, 'Subject:', 'A bundle for your repository'))
327 or prompt(ui, 'Subject:', 'A bundle for your repository'))
324
328
325 body = _getdescription(repo, '', sender, **opts)
329 body = _getdescription(repo, '', sender, **opts)
326 msg = emailmod.MIMEMultipart.MIMEMultipart()
330 msg = emailmod.MIMEMultipart.MIMEMultipart()
327 if body:
331 if body:
328 msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
332 msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
329 datapart = emailmod.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
333 datapart = emailmod.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
330 datapart.set_payload(bundle)
334 datapart.set_payload(bundle)
331 bundlename = '%s.hg' % opts.get('bundlename', 'bundle')
335 bundlename = '%s.hg' % opts.get('bundlename', 'bundle')
332 datapart.add_header('Content-Disposition', 'attachment',
336 datapart.add_header('Content-Disposition', 'attachment',
333 filename=bundlename)
337 filename=bundlename)
334 emailmod.Encoders.encode_base64(datapart)
338 emailmod.Encoders.encode_base64(datapart)
335 msg.attach(datapart)
339 msg.attach(datapart)
336 msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
340 msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
337 return [(msg, subj, None)]
341 return [(msg, subj, None)]
338
342
339 def _makeintro(repo, sender, revs, patches, **opts):
343 def _makeintro(repo, sender, revs, patches, **opts):
340 """make an introduction email, asking the user for content if needed
344 """make an introduction email, asking the user for content if needed
341
345
342 email is returned as (subject, body, cumulative-diffstat)"""
346 email is returned as (subject, body, cumulative-diffstat)"""
343 ui = repo.ui
347 ui = repo.ui
344 _charsets = mail._charsets(ui)
348 _charsets = mail._charsets(ui)
345
349
346 # use the last revision which is likely to be a bookmarked head
350 # use the last revision which is likely to be a bookmarked head
347 prefix = _formatprefix(ui, repo, revs.last(), opts.get('flag'),
351 prefix = _formatprefix(ui, repo, revs.last(), opts.get('flag'),
348 0, len(patches), numbered=True)
352 0, len(patches), numbered=True)
349 subj = (opts.get('subject') or
353 subj = (opts.get('subject') or
350 prompt(ui, '(optional) Subject: ', rest=prefix, default=''))
354 prompt(ui, '(optional) Subject: ', rest=prefix, default=''))
351 if not subj:
355 if not subj:
352 return None # skip intro if the user doesn't bother
356 return None # skip intro if the user doesn't bother
353
357
354 subj = prefix + ' ' + subj
358 subj = prefix + ' ' + subj
355
359
356 body = ''
360 body = ''
357 if opts.get('diffstat'):
361 if opts.get('diffstat'):
358 # generate a cumulative diffstat of the whole patch series
362 # generate a cumulative diffstat of the whole patch series
359 diffstat = patch.diffstat(sum(patches, []))
363 diffstat = patch.diffstat(sum(patches, []))
360 body = '\n' + diffstat
364 body = '\n' + diffstat
361 else:
365 else:
362 diffstat = None
366 diffstat = None
363
367
364 body = _getdescription(repo, body, sender, **opts)
368 body = _getdescription(repo, body, sender, **opts)
365 msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
369 msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
366 msg['Subject'] = mail.headencode(ui, subj, _charsets,
370 msg['Subject'] = mail.headencode(ui, subj, _charsets,
367 opts.get('test'))
371 opts.get('test'))
368 return (msg, subj, diffstat)
372 return (msg, subj, diffstat)
369
373
370 def _getpatchmsgs(repo, sender, revs, patchnames=None, **opts):
374 def _getpatchmsgs(repo, sender, revs, patchnames=None, **opts):
371 """return a list of emails from a list of patches
375 """return a list of emails from a list of patches
372
376
373 This involves introduction message creation if necessary.
377 This involves introduction message creation if necessary.
374
378
375 This function returns a list of "email" tuples (subject, content, None).
379 This function returns a list of "email" tuples (subject, content, None).
376 """
380 """
377 ui = repo.ui
381 ui = repo.ui
378 _charsets = mail._charsets(ui)
382 _charsets = mail._charsets(ui)
379 patches = list(_getpatches(repo, revs, **opts))
383 patches = list(_getpatches(repo, revs, **opts))
380 msgs = []
384 msgs = []
381
385
382 ui.write(_('this patch series consists of %d patches.\n\n')
386 ui.write(_('this patch series consists of %d patches.\n\n')
383 % len(patches))
387 % len(patches))
384
388
385 # build the intro message, or skip it if the user declines
389 # build the intro message, or skip it if the user declines
386 if introwanted(ui, opts, len(patches)):
390 if introwanted(ui, opts, len(patches)):
387 msg = _makeintro(repo, sender, revs, patches, **opts)
391 msg = _makeintro(repo, sender, revs, patches, **opts)
388 if msg:
392 if msg:
389 msgs.append(msg)
393 msgs.append(msg)
390
394
391 # are we going to send more than one message?
395 # are we going to send more than one message?
392 numbered = len(msgs) + len(patches) > 1
396 numbered = len(msgs) + len(patches) > 1
393
397
394 # now generate the actual patch messages
398 # now generate the actual patch messages
395 name = None
399 name = None
396 assert len(revs) == len(patches)
400 assert len(revs) == len(patches)
397 for i, (r, p) in enumerate(zip(revs, patches)):
401 for i, (r, p) in enumerate(zip(revs, patches)):
398 if patchnames:
402 if patchnames:
399 name = patchnames[i]
403 name = patchnames[i]
400 msg = makepatch(ui, repo, r, p, opts, _charsets, i + 1,
404 msg = makepatch(ui, repo, r, p, opts, _charsets, i + 1,
401 len(patches), numbered, name)
405 len(patches), numbered, name)
402 msgs.append(msg)
406 msgs.append(msg)
403
407
404 return msgs
408 return msgs
405
409
406 def _getoutgoing(repo, dest, revs):
410 def _getoutgoing(repo, dest, revs):
407 '''Return the revisions present locally but not in dest'''
411 '''Return the revisions present locally but not in dest'''
408 ui = repo.ui
412 ui = repo.ui
409 url = ui.expandpath(dest or 'default-push', dest or 'default')
413 url = ui.expandpath(dest or 'default-push', dest or 'default')
410 url = hg.parseurl(url)[0]
414 url = hg.parseurl(url)[0]
411 ui.status(_('comparing with %s\n') % util.hidepassword(url))
415 ui.status(_('comparing with %s\n') % util.hidepassword(url))
412
416
413 revs = [r for r in revs if r >= 0]
417 revs = [r for r in revs if r >= 0]
414 if not revs:
418 if not revs:
415 revs = [len(repo) - 1]
419 revs = [len(repo) - 1]
416 revs = repo.revs('outgoing(%s) and ::%ld', dest or '', revs)
420 revs = repo.revs('outgoing(%s) and ::%ld', dest or '', revs)
417 if not revs:
421 if not revs:
418 ui.status(_("no changes found\n"))
422 ui.status(_("no changes found\n"))
419 return revs
423 return revs
420
424
421 emailopts = [
425 emailopts = [
422 ('', 'body', None, _('send patches as inline message text (default)')),
426 ('', 'body', None, _('send patches as inline message text (default)')),
423 ('a', 'attach', None, _('send patches as attachments')),
427 ('a', 'attach', None, _('send patches as attachments')),
424 ('i', 'inline', None, _('send patches as inline attachments')),
428 ('i', 'inline', None, _('send patches as inline attachments')),
425 ('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
429 ('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
426 ('c', 'cc', [], _('email addresses of copy recipients')),
430 ('c', 'cc', [], _('email addresses of copy recipients')),
427 ('', 'confirm', None, _('ask for confirmation before sending')),
431 ('', 'confirm', None, _('ask for confirmation before sending')),
428 ('d', 'diffstat', None, _('add diffstat output to messages')),
432 ('d', 'diffstat', None, _('add diffstat output to messages')),
429 ('', 'date', '', _('use the given date as the sending date')),
433 ('', 'date', '', _('use the given date as the sending date')),
430 ('', 'desc', '', _('use the given file as the series description')),
434 ('', 'desc', '', _('use the given file as the series description')),
431 ('f', 'from', '', _('email address of sender')),
435 ('f', 'from', '', _('email address of sender')),
432 ('n', 'test', None, _('print messages that would be sent')),
436 ('n', 'test', None, _('print messages that would be sent')),
433 ('m', 'mbox', '', _('write messages to mbox file instead of sending them')),
437 ('m', 'mbox', '', _('write messages to mbox file instead of sending them')),
434 ('', 'reply-to', [], _('email addresses replies should be sent to')),
438 ('', 'reply-to', [], _('email addresses replies should be sent to')),
435 ('s', 'subject', '', _('subject of first message (intro or single patch)')),
439 ('s', 'subject', '', _('subject of first message (intro or single patch)')),
436 ('', 'in-reply-to', '', _('message identifier to reply to')),
440 ('', 'in-reply-to', '', _('message identifier to reply to')),
437 ('', 'flag', [], _('flags to add in subject prefixes')),
441 ('', 'flag', [], _('flags to add in subject prefixes')),
438 ('t', 'to', [], _('email addresses of recipients'))]
442 ('t', 'to', [], _('email addresses of recipients'))]
439
443
440 @command('email',
444 @command('email',
441 [('g', 'git', None, _('use git extended diff format')),
445 [('g', 'git', None, _('use git extended diff format')),
442 ('', 'plain', None, _('omit hg patch header')),
446 ('', 'plain', None, _('omit hg patch header')),
443 ('o', 'outgoing', None,
447 ('o', 'outgoing', None,
444 _('send changes not found in the target repository')),
448 _('send changes not found in the target repository')),
445 ('b', 'bundle', None, _('send changes not in target as a binary bundle')),
449 ('b', 'bundle', None, _('send changes not in target as a binary bundle')),
446 ('B', 'bookmark', '', _('send changes only reachable by given bookmark')),
450 ('B', 'bookmark', '', _('send changes only reachable by given bookmark')),
447 ('', 'bundlename', 'bundle',
451 ('', 'bundlename', 'bundle',
448 _('name of the bundle attachment file'), _('NAME')),
452 _('name of the bundle attachment file'), _('NAME')),
449 ('r', 'rev', [], _('a revision to send'), _('REV')),
453 ('r', 'rev', [], _('a revision to send'), _('REV')),
450 ('', 'force', None, _('run even when remote repository is unrelated '
454 ('', 'force', None, _('run even when remote repository is unrelated '
451 '(with -b/--bundle)')),
455 '(with -b/--bundle)')),
452 ('', 'base', [], _('a base changeset to specify instead of a destination '
456 ('', 'base', [], _('a base changeset to specify instead of a destination '
453 '(with -b/--bundle)'), _('REV')),
457 '(with -b/--bundle)'), _('REV')),
454 ('', 'intro', None, _('send an introduction email for a single patch')),
458 ('', 'intro', None, _('send an introduction email for a single patch')),
455 ] + emailopts + cmdutil.remoteopts,
459 ] + emailopts + cmdutil.remoteopts,
456 _('hg email [OPTION]... [DEST]...'))
460 _('hg email [OPTION]... [DEST]...'))
457 def email(ui, repo, *revs, **opts):
461 def email(ui, repo, *revs, **opts):
458 '''send changesets by email
462 '''send changesets by email
459
463
460 By default, diffs are sent in the format generated by
464 By default, diffs are sent in the format generated by
461 :hg:`export`, one per message. The series starts with a "[PATCH 0
465 :hg:`export`, one per message. The series starts with a "[PATCH 0
462 of N]" introduction, which describes the series as a whole.
466 of N]" introduction, which describes the series as a whole.
463
467
464 Each patch email has a Subject line of "[PATCH M of N] ...", using
468 Each patch email has a Subject line of "[PATCH M of N] ...", using
465 the first line of the changeset description as the subject text.
469 the first line of the changeset description as the subject text.
466 The message contains two or three parts. First, the changeset
470 The message contains two or three parts. First, the changeset
467 description.
471 description.
468
472
469 With the -d/--diffstat option, if the diffstat program is
473 With the -d/--diffstat option, if the diffstat program is
470 installed, the result of running diffstat on the patch is inserted.
474 installed, the result of running diffstat on the patch is inserted.
471
475
472 Finally, the patch itself, as generated by :hg:`export`.
476 Finally, the patch itself, as generated by :hg:`export`.
473
477
474 With the -d/--diffstat or --confirm options, you will be presented
478 With the -d/--diffstat or --confirm options, you will be presented
475 with a final summary of all messages and asked for confirmation before
479 with a final summary of all messages and asked for confirmation before
476 the messages are sent.
480 the messages are sent.
477
481
478 By default the patch is included as text in the email body for
482 By default the patch is included as text in the email body for
479 easy reviewing. Using the -a/--attach option will instead create
483 easy reviewing. Using the -a/--attach option will instead create
480 an attachment for the patch. With -i/--inline an inline attachment
484 an attachment for the patch. With -i/--inline an inline attachment
481 will be created. You can include a patch both as text in the email
485 will be created. You can include a patch both as text in the email
482 body and as a regular or an inline attachment by combining the
486 body and as a regular or an inline attachment by combining the
483 -a/--attach or -i/--inline with the --body option.
487 -a/--attach or -i/--inline with the --body option.
484
488
485 With -B/--bookmark changesets reachable by the given bookmark are
489 With -B/--bookmark changesets reachable by the given bookmark are
486 selected.
490 selected.
487
491
488 With -o/--outgoing, emails will be generated for patches not found
492 With -o/--outgoing, emails will be generated for patches not found
489 in the destination repository (or only those which are ancestors
493 in the destination repository (or only those which are ancestors
490 of the specified revisions if any are provided)
494 of the specified revisions if any are provided)
491
495
492 With -b/--bundle, changesets are selected as for --outgoing, but a
496 With -b/--bundle, changesets are selected as for --outgoing, but a
493 single email containing a binary Mercurial bundle as an attachment
497 single email containing a binary Mercurial bundle as an attachment
494 will be sent. Use the ``patchbomb.bundletype`` config option to
498 will be sent. Use the ``patchbomb.bundletype`` config option to
495 control the bundle type as with :hg:`bundle --type`.
499 control the bundle type as with :hg:`bundle --type`.
496
500
497 With -m/--mbox, instead of previewing each patchbomb message in a
501 With -m/--mbox, instead of previewing each patchbomb message in a
498 pager or sending the messages directly, it will create a UNIX
502 pager or sending the messages directly, it will create a UNIX
499 mailbox file with the patch emails. This mailbox file can be
503 mailbox file with the patch emails. This mailbox file can be
500 previewed with any mail user agent which supports UNIX mbox
504 previewed with any mail user agent which supports UNIX mbox
501 files.
505 files.
502
506
503 With -n/--test, all steps will run, but mail will not be sent.
507 With -n/--test, all steps will run, but mail will not be sent.
504 You will be prompted for an email recipient address, a subject and
508 You will be prompted for an email recipient address, a subject and
505 an introductory message describing the patches of your patchbomb.
509 an introductory message describing the patches of your patchbomb.
506 Then when all is done, patchbomb messages are displayed.
510 Then when all is done, patchbomb messages are displayed.
507
511
508 In case email sending fails, you will find a backup of your series
512 In case email sending fails, you will find a backup of your series
509 introductory message in ``.hg/last-email.txt``.
513 introductory message in ``.hg/last-email.txt``.
510
514
511 The default behavior of this command can be customized through
515 The default behavior of this command can be customized through
512 configuration. (See :hg:`help patchbomb` for details)
516 configuration. (See :hg:`help patchbomb` for details)
513
517
514 Examples::
518 Examples::
515
519
516 hg email -r 3000 # send patch 3000 only
520 hg email -r 3000 # send patch 3000 only
517 hg email -r 3000 -r 3001 # send patches 3000 and 3001
521 hg email -r 3000 -r 3001 # send patches 3000 and 3001
518 hg email -r 3000:3005 # send patches 3000 through 3005
522 hg email -r 3000:3005 # send patches 3000 through 3005
519 hg email 3000 # send patch 3000 (deprecated)
523 hg email 3000 # send patch 3000 (deprecated)
520
524
521 hg email -o # send all patches not in default
525 hg email -o # send all patches not in default
522 hg email -o DEST # send all patches not in DEST
526 hg email -o DEST # send all patches not in DEST
523 hg email -o -r 3000 # send all ancestors of 3000 not in default
527 hg email -o -r 3000 # send all ancestors of 3000 not in default
524 hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST
528 hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST
525
529
526 hg email -B feature # send all ancestors of feature bookmark
530 hg email -B feature # send all ancestors of feature bookmark
527
531
528 hg email -b # send bundle of all patches not in default
532 hg email -b # send bundle of all patches not in default
529 hg email -b DEST # send bundle of all patches not in DEST
533 hg email -b DEST # send bundle of all patches not in DEST
530 hg email -b -r 3000 # bundle of all ancestors of 3000 not in default
534 hg email -b -r 3000 # bundle of all ancestors of 3000 not in default
531 hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST
535 hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST
532
536
533 hg email -o -m mbox && # generate an mbox file...
537 hg email -o -m mbox && # generate an mbox file...
534 mutt -R -f mbox # ... and view it with mutt
538 mutt -R -f mbox # ... and view it with mutt
535 hg email -o -m mbox && # generate an mbox file ...
539 hg email -o -m mbox && # generate an mbox file ...
536 formail -s sendmail \\ # ... and use formail to send from the mbox
540 formail -s sendmail \\ # ... and use formail to send from the mbox
537 -bm -t < mbox # ... using sendmail
541 -bm -t < mbox # ... using sendmail
538
542
539 Before using this command, you will need to enable email in your
543 Before using this command, you will need to enable email in your
540 hgrc. See the [email] section in hgrc(5) for details.
544 hgrc. See the [email] section in hgrc(5) for details.
541 '''
545 '''
542
546
543 _charsets = mail._charsets(ui)
547 _charsets = mail._charsets(ui)
544
548
545 bundle = opts.get('bundle')
549 bundle = opts.get('bundle')
546 date = opts.get('date')
550 date = opts.get('date')
547 mbox = opts.get('mbox')
551 mbox = opts.get('mbox')
548 outgoing = opts.get('outgoing')
552 outgoing = opts.get('outgoing')
549 rev = opts.get('rev')
553 rev = opts.get('rev')
550 bookmark = opts.get('bookmark')
554 bookmark = opts.get('bookmark')
551
555
552 if not (opts.get('test') or mbox):
556 if not (opts.get('test') or mbox):
553 # really sending
557 # really sending
554 mail.validateconfig(ui)
558 mail.validateconfig(ui)
555
559
556 if not (revs or rev or outgoing or bundle or bookmark):
560 if not (revs or rev or outgoing or bundle or bookmark):
557 raise error.Abort(_('specify at least one changeset with -B, -r or -o'))
561 raise error.Abort(_('specify at least one changeset with -B, -r or -o'))
558
562
559 if outgoing and bundle:
563 if outgoing and bundle:
560 raise error.Abort(_("--outgoing mode always on with --bundle;"
564 raise error.Abort(_("--outgoing mode always on with --bundle;"
561 " do not re-specify --outgoing"))
565 " do not re-specify --outgoing"))
562 if rev and bookmark:
566 if rev and bookmark:
563 raise error.Abort(_("-r and -B are mutually exclusive"))
567 raise error.Abort(_("-r and -B are mutually exclusive"))
564
568
565 if outgoing or bundle:
569 if outgoing or bundle:
566 if len(revs) > 1:
570 if len(revs) > 1:
567 raise error.Abort(_("too many destinations"))
571 raise error.Abort(_("too many destinations"))
568 if revs:
572 if revs:
569 dest = revs[0]
573 dest = revs[0]
570 else:
574 else:
571 dest = None
575 dest = None
572 revs = []
576 revs = []
573
577
574 if rev:
578 if rev:
575 if revs:
579 if revs:
576 raise error.Abort(_('use only one form to specify the revision'))
580 raise error.Abort(_('use only one form to specify the revision'))
577 revs = rev
581 revs = rev
578 elif bookmark:
582 elif bookmark:
579 if bookmark not in repo._bookmarks:
583 if bookmark not in repo._bookmarks:
580 raise error.Abort(_("bookmark '%s' not found") % bookmark)
584 raise error.Abort(_("bookmark '%s' not found") % bookmark)
581 revs = repair.stripbmrevset(repo, bookmark)
585 revs = repair.stripbmrevset(repo, bookmark)
582
586
583 revs = scmutil.revrange(repo, revs)
587 revs = scmutil.revrange(repo, revs)
584 if outgoing:
588 if outgoing:
585 revs = _getoutgoing(repo, dest, revs)
589 revs = _getoutgoing(repo, dest, revs)
586 if bundle:
590 if bundle:
587 opts['revs'] = [str(r) for r in revs]
591 opts['revs'] = [str(r) for r in revs]
588
592
589 # check if revision exist on the public destination
593 # check if revision exist on the public destination
590 publicurl = repo.ui.config('patchbomb', 'publicurl')
594 publicurl = repo.ui.config('patchbomb', 'publicurl')
591 if publicurl:
595 if publicurl:
592 repo.ui.debug('checking that revision exist in the public repo')
596 repo.ui.debug('checking that revision exist in the public repo')
593 try:
597 try:
594 publicpeer = hg.peer(repo, {}, publicurl)
598 publicpeer = hg.peer(repo, {}, publicurl)
595 except error.RepoError:
599 except error.RepoError:
596 repo.ui.write_err(_('unable to access public repo: %s\n')
600 repo.ui.write_err(_('unable to access public repo: %s\n')
597 % publicurl)
601 % publicurl)
598 raise
602 raise
599 if not publicpeer.capable('known'):
603 if not publicpeer.capable('known'):
600 repo.ui.debug('skipping existence checks: public repo too old')
604 repo.ui.debug('skipping existence checks: public repo too old')
601 else:
605 else:
602 out = [repo[r] for r in revs]
606 out = [repo[r] for r in revs]
603 known = publicpeer.known(h.node() for h in out)
607 known = publicpeer.known(h.node() for h in out)
604 missing = []
608 missing = []
605 for idx, h in enumerate(out):
609 for idx, h in enumerate(out):
606 if not known[idx]:
610 if not known[idx]:
607 missing.append(h)
611 missing.append(h)
608 if missing:
612 if missing:
609 if 1 < len(missing):
613 if 1 < len(missing):
610 msg = _('public "%s" is missing %s and %i others')
614 msg = _('public "%s" is missing %s and %i others')
611 msg %= (publicurl, missing[0], len(missing) - 1)
615 msg %= (publicurl, missing[0], len(missing) - 1)
612 else:
616 else:
613 msg = _('public url %s is missing %s')
617 msg = _('public url %s is missing %s')
614 msg %= (publicurl, missing[0])
618 msg %= (publicurl, missing[0])
615 revhint = ' '.join('-r %s' % h
619 revhint = ' '.join('-r %s' % h
616 for h in repo.set('heads(%ld)', missing))
620 for h in repo.set('heads(%ld)', missing))
617 hint = _("use 'hg push %s %s'") % (publicurl, revhint)
621 hint = _("use 'hg push %s %s'") % (publicurl, revhint)
618 raise error.Abort(msg, hint=hint)
622 raise error.Abort(msg, hint=hint)
619
623
620 # start
624 # start
621 if date:
625 if date:
622 start_time = util.parsedate(date)
626 start_time = util.parsedate(date)
623 else:
627 else:
624 start_time = util.makedate()
628 start_time = util.makedate()
625
629
626 def genmsgid(id):
630 def genmsgid(id):
627 return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
631 return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
628
632
629 # deprecated config: patchbomb.from
633 # deprecated config: patchbomb.from
630 sender = (opts.get('from') or ui.config('email', 'from') or
634 sender = (opts.get('from') or ui.config('email', 'from') or
631 ui.config('patchbomb', 'from') or
635 ui.config('patchbomb', 'from') or
632 prompt(ui, 'From', ui.username()))
636 prompt(ui, 'From', ui.username()))
633
637
634 if bundle:
638 if bundle:
635 bundledata = _getbundle(repo, dest, **opts)
639 bundledata = _getbundle(repo, dest, **opts)
636 bundleopts = opts.copy()
640 bundleopts = opts.copy()
637 bundleopts.pop('bundle', None) # already processed
641 bundleopts.pop('bundle', None) # already processed
638 msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts)
642 msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts)
639 else:
643 else:
640 msgs = _getpatchmsgs(repo, sender, revs, **opts)
644 msgs = _getpatchmsgs(repo, sender, revs, **opts)
641
645
642 showaddrs = []
646 showaddrs = []
643
647
644 def getaddrs(header, ask=False, default=None):
648 def getaddrs(header, ask=False, default=None):
645 configkey = header.lower()
649 configkey = header.lower()
646 opt = header.replace('-', '_').lower()
650 opt = header.replace('-', '_').lower()
647 addrs = opts.get(opt)
651 addrs = opts.get(opt)
648 if addrs:
652 if addrs:
649 showaddrs.append('%s: %s' % (header, ', '.join(addrs)))
653 showaddrs.append('%s: %s' % (header, ', '.join(addrs)))
650 return mail.addrlistencode(ui, addrs, _charsets, opts.get('test'))
654 return mail.addrlistencode(ui, addrs, _charsets, opts.get('test'))
651
655
652 # not on the command line: fallback to config and then maybe ask
656 # not on the command line: fallback to config and then maybe ask
653 addr = (ui.config('email', configkey) or
657 addr = (ui.config('email', configkey) or
654 ui.config('patchbomb', configkey))
658 ui.config('patchbomb', configkey))
655 if not addr:
659 if not addr:
656 specified = (ui.hasconfig('email', configkey) or
660 specified = (ui.hasconfig('email', configkey) or
657 ui.hasconfig('patchbomb', configkey))
661 ui.hasconfig('patchbomb', configkey))
658 if not specified and ask:
662 if not specified and ask:
659 addr = prompt(ui, header, default=default)
663 addr = prompt(ui, header, default=default)
660 if addr:
664 if addr:
661 showaddrs.append('%s: %s' % (header, addr))
665 showaddrs.append('%s: %s' % (header, addr))
662 return mail.addrlistencode(ui, [addr], _charsets, opts.get('test'))
666 return mail.addrlistencode(ui, [addr], _charsets, opts.get('test'))
663 elif default:
667 elif default:
664 return mail.addrlistencode(
668 return mail.addrlistencode(
665 ui, [default], _charsets, opts.get('test'))
669 ui, [default], _charsets, opts.get('test'))
666 return []
670 return []
667
671
668 to = getaddrs('To', ask=True)
672 to = getaddrs('To', ask=True)
669 if not to:
673 if not to:
670 # we can get here in non-interactive mode
674 # we can get here in non-interactive mode
671 raise error.Abort(_('no recipient addresses provided'))
675 raise error.Abort(_('no recipient addresses provided'))
672 cc = getaddrs('Cc', ask=True, default='')
676 cc = getaddrs('Cc', ask=True, default='')
673 bcc = getaddrs('Bcc')
677 bcc = getaddrs('Bcc')
674 replyto = getaddrs('Reply-To')
678 replyto = getaddrs('Reply-To')
675
679
676 confirm = ui.configbool('patchbomb', 'confirm')
680 confirm = ui.configbool('patchbomb', 'confirm')
677 confirm |= bool(opts.get('diffstat') or opts.get('confirm'))
681 confirm |= bool(opts.get('diffstat') or opts.get('confirm'))
678
682
679 if confirm:
683 if confirm:
680 ui.write(_('\nFinal summary:\n\n'), label='patchbomb.finalsummary')
684 ui.write(_('\nFinal summary:\n\n'), label='patchbomb.finalsummary')
681 ui.write(('From: %s\n' % sender), label='patchbomb.from')
685 ui.write(('From: %s\n' % sender), label='patchbomb.from')
682 for addr in showaddrs:
686 for addr in showaddrs:
683 ui.write('%s\n' % addr, label='patchbomb.to')
687 ui.write('%s\n' % addr, label='patchbomb.to')
684 for m, subj, ds in msgs:
688 for m, subj, ds in msgs:
685 ui.write(('Subject: %s\n' % subj), label='patchbomb.subject')
689 ui.write(('Subject: %s\n' % subj), label='patchbomb.subject')
686 if ds:
690 if ds:
687 ui.write(ds, label='patchbomb.diffstats')
691 ui.write(ds, label='patchbomb.diffstats')
688 ui.write('\n')
692 ui.write('\n')
689 if ui.promptchoice(_('are you sure you want to send (yn)?'
693 if ui.promptchoice(_('are you sure you want to send (yn)?'
690 '$$ &Yes $$ &No')):
694 '$$ &Yes $$ &No')):
691 raise error.Abort(_('patchbomb canceled'))
695 raise error.Abort(_('patchbomb canceled'))
692
696
693 ui.write('\n')
697 ui.write('\n')
694
698
695 parent = opts.get('in_reply_to') or None
699 parent = opts.get('in_reply_to') or None
696 # angle brackets may be omitted, they're not semantically part of the msg-id
700 # angle brackets may be omitted, they're not semantically part of the msg-id
697 if parent is not None:
701 if parent is not None:
698 if not parent.startswith('<'):
702 if not parent.startswith('<'):
699 parent = '<' + parent
703 parent = '<' + parent
700 if not parent.endswith('>'):
704 if not parent.endswith('>'):
701 parent += '>'
705 parent += '>'
702
706
703 sender_addr = emailmod.Utils.parseaddr(sender)[1]
707 sender_addr = emailmod.Utils.parseaddr(sender)[1]
704 sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
708 sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
705 sendmail = None
709 sendmail = None
706 firstpatch = None
710 firstpatch = None
707 for i, (m, subj, ds) in enumerate(msgs):
711 for i, (m, subj, ds) in enumerate(msgs):
708 try:
712 try:
709 m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
713 m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
710 if not firstpatch:
714 if not firstpatch:
711 firstpatch = m['Message-Id']
715 firstpatch = m['Message-Id']
712 m['X-Mercurial-Series-Id'] = firstpatch
716 m['X-Mercurial-Series-Id'] = firstpatch
713 except TypeError:
717 except TypeError:
714 m['Message-Id'] = genmsgid('patchbomb')
718 m['Message-Id'] = genmsgid('patchbomb')
715 if parent:
719 if parent:
716 m['In-Reply-To'] = parent
720 m['In-Reply-To'] = parent
717 m['References'] = parent
721 m['References'] = parent
718 if not parent or 'X-Mercurial-Node' not in m:
722 if not parent or 'X-Mercurial-Node' not in m:
719 parent = m['Message-Id']
723 parent = m['Message-Id']
720
724
721 m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version()
725 m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version()
722 m['Date'] = emailmod.Utils.formatdate(start_time[0], localtime=True)
726 m['Date'] = emailmod.Utils.formatdate(start_time[0], localtime=True)
723
727
724 start_time = (start_time[0] + 1, start_time[1])
728 start_time = (start_time[0] + 1, start_time[1])
725 m['From'] = sender
729 m['From'] = sender
726 m['To'] = ', '.join(to)
730 m['To'] = ', '.join(to)
727 if cc:
731 if cc:
728 m['Cc'] = ', '.join(cc)
732 m['Cc'] = ', '.join(cc)
729 if bcc:
733 if bcc:
730 m['Bcc'] = ', '.join(bcc)
734 m['Bcc'] = ', '.join(bcc)
731 if replyto:
735 if replyto:
732 m['Reply-To'] = ', '.join(replyto)
736 m['Reply-To'] = ', '.join(replyto)
733 if opts.get('test'):
737 if opts.get('test'):
734 ui.status(_('displaying '), subj, ' ...\n')
738 ui.status(_('displaying '), subj, ' ...\n')
735 ui.pager('email')
739 ui.pager('email')
736 generator = emailmod.Generator.Generator(ui, mangle_from_=False)
740 generator = emailmod.Generator.Generator(ui, mangle_from_=False)
737 try:
741 try:
738 generator.flatten(m, 0)
742 generator.flatten(m, 0)
739 ui.write('\n')
743 ui.write('\n')
740 except IOError as inst:
744 except IOError as inst:
741 if inst.errno != errno.EPIPE:
745 if inst.errno != errno.EPIPE:
742 raise
746 raise
743 else:
747 else:
744 if not sendmail:
748 if not sendmail:
745 sendmail = mail.connect(ui, mbox=mbox)
749 sendmail = mail.connect(ui, mbox=mbox)
746 ui.status(_('sending '), subj, ' ...\n')
750 ui.status(_('sending '), subj, ' ...\n')
747 ui.progress(_('sending'), i, item=subj, total=len(msgs),
751 ui.progress(_('sending'), i, item=subj, total=len(msgs),
748 unit=_('emails'))
752 unit=_('emails'))
749 if not mbox:
753 if not mbox:
750 # Exim does not remove the Bcc field
754 # Exim does not remove the Bcc field
751 del m['Bcc']
755 del m['Bcc']
752 fp = stringio()
756 fp = stringio()
753 generator = emailmod.Generator.Generator(fp, mangle_from_=False)
757 generator = emailmod.Generator.Generator(fp, mangle_from_=False)
754 generator.flatten(m, 0)
758 generator.flatten(m, 0)
755 sendmail(sender_addr, to + bcc + cc, fp.getvalue())
759 sendmail(sender_addr, to + bcc + cc, fp.getvalue())
756
760
757 ui.progress(_('writing'), None)
761 ui.progress(_('writing'), None)
758 ui.progress(_('sending'), None)
762 ui.progress(_('sending'), None)
@@ -1,2166 +1,2221 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 pycompat,
51 pycompat,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 sparse,
56 sparse,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 # set of (path, vfs-location) tuples. vfs-location is:
70 # set of (path, vfs-location) tuples. vfs-location is:
71 # - 'plain for vfs relative paths
71 # - 'plain for vfs relative paths
72 # - '' for svfs relative paths
72 # - '' for svfs relative paths
73 _cachedfiles = set()
73 _cachedfiles = set()
74
74
75 class _basefilecache(scmutil.filecache):
75 class _basefilecache(scmutil.filecache):
76 """All filecache usage on repo are done for logic that should be unfiltered
76 """All filecache usage on repo are done for logic that should be unfiltered
77 """
77 """
78 def __get__(self, repo, type=None):
78 def __get__(self, repo, type=None):
79 if repo is None:
79 if repo is None:
80 return self
80 return self
81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 def __set__(self, repo, value):
82 def __set__(self, repo, value):
83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 def __delete__(self, repo):
84 def __delete__(self, repo):
85 return super(_basefilecache, self).__delete__(repo.unfiltered())
85 return super(_basefilecache, self).__delete__(repo.unfiltered())
86
86
87 class repofilecache(_basefilecache):
87 class repofilecache(_basefilecache):
88 """filecache for files in .hg but outside of .hg/store"""
88 """filecache for files in .hg but outside of .hg/store"""
89 def __init__(self, *paths):
89 def __init__(self, *paths):
90 super(repofilecache, self).__init__(*paths)
90 super(repofilecache, self).__init__(*paths)
91 for path in paths:
91 for path in paths:
92 _cachedfiles.add((path, 'plain'))
92 _cachedfiles.add((path, 'plain'))
93
93
94 def join(self, obj, fname):
94 def join(self, obj, fname):
95 return obj.vfs.join(fname)
95 return obj.vfs.join(fname)
96
96
97 class storecache(_basefilecache):
97 class storecache(_basefilecache):
98 """filecache for files in the store"""
98 """filecache for files in the store"""
99 def __init__(self, *paths):
99 def __init__(self, *paths):
100 super(storecache, self).__init__(*paths)
100 super(storecache, self).__init__(*paths)
101 for path in paths:
101 for path in paths:
102 _cachedfiles.add((path, ''))
102 _cachedfiles.add((path, ''))
103
103
104 def join(self, obj, fname):
104 def join(self, obj, fname):
105 return obj.sjoin(fname)
105 return obj.sjoin(fname)
106
106
107 def isfilecached(repo, name):
107 def isfilecached(repo, name):
108 """check if a repo has already cached "name" filecache-ed property
108 """check if a repo has already cached "name" filecache-ed property
109
109
110 This returns (cachedobj-or-None, iscached) tuple.
110 This returns (cachedobj-or-None, iscached) tuple.
111 """
111 """
112 cacheentry = repo.unfiltered()._filecache.get(name, None)
112 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 if not cacheentry:
113 if not cacheentry:
114 return None, False
114 return None, False
115 return cacheentry.obj, True
115 return cacheentry.obj, True
116
116
117 class unfilteredpropertycache(util.propertycache):
117 class unfilteredpropertycache(util.propertycache):
118 """propertycache that apply to unfiltered repo only"""
118 """propertycache that apply to unfiltered repo only"""
119
119
120 def __get__(self, repo, type=None):
120 def __get__(self, repo, type=None):
121 unfi = repo.unfiltered()
121 unfi = repo.unfiltered()
122 if unfi is repo:
122 if unfi is repo:
123 return super(unfilteredpropertycache, self).__get__(unfi)
123 return super(unfilteredpropertycache, self).__get__(unfi)
124 return getattr(unfi, self.name)
124 return getattr(unfi, self.name)
125
125
126 class filteredpropertycache(util.propertycache):
126 class filteredpropertycache(util.propertycache):
127 """propertycache that must take filtering in account"""
127 """propertycache that must take filtering in account"""
128
128
129 def cachevalue(self, obj, value):
129 def cachevalue(self, obj, value):
130 object.__setattr__(obj, self.name, value)
130 object.__setattr__(obj, self.name, value)
131
131
132
132
133 def hasunfilteredcache(repo, name):
133 def hasunfilteredcache(repo, name):
134 """check if a repo has an unfilteredpropertycache value for <name>"""
134 """check if a repo has an unfilteredpropertycache value for <name>"""
135 return name in vars(repo.unfiltered())
135 return name in vars(repo.unfiltered())
136
136
137 def unfilteredmethod(orig):
137 def unfilteredmethod(orig):
138 """decorate method that always need to be run on unfiltered version"""
138 """decorate method that always need to be run on unfiltered version"""
139 def wrapper(repo, *args, **kwargs):
139 def wrapper(repo, *args, **kwargs):
140 return orig(repo.unfiltered(), *args, **kwargs)
140 return orig(repo.unfiltered(), *args, **kwargs)
141 return wrapper
141 return wrapper
142
142
143 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
143 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
144 'unbundle'}
144 'unbundle'}
145 legacycaps = moderncaps.union({'changegroupsubset'})
145 legacycaps = moderncaps.union({'changegroupsubset'})
146
146
147 class localpeer(peer.peerrepository):
147 class localpeer(peer.peerrepository):
148 '''peer for a local repo; reflects only the most recent API'''
148 '''peer for a local repo; reflects only the most recent API'''
149
149
150 def __init__(self, repo, caps=None):
150 def __init__(self, repo, caps=None):
151 if caps is None:
151 if caps is None:
152 caps = moderncaps.copy()
152 caps = moderncaps.copy()
153 peer.peerrepository.__init__(self)
153 peer.peerrepository.__init__(self)
154 self._repo = repo.filtered('served')
154 self._repo = repo.filtered('served')
155 self.ui = repo.ui
155 self.ui = repo.ui
156 self._caps = repo._restrictcapabilities(caps)
156 self._caps = repo._restrictcapabilities(caps)
157 self.requirements = repo.requirements
157 self.requirements = repo.requirements
158 self.supportedformats = repo.supportedformats
158 self.supportedformats = repo.supportedformats
159
159
160 def close(self):
160 def close(self):
161 self._repo.close()
161 self._repo.close()
162
162
163 def _capabilities(self):
163 def _capabilities(self):
164 return self._caps
164 return self._caps
165
165
166 def local(self):
166 def local(self):
167 return self._repo
167 return self._repo
168
168
169 def canpush(self):
169 def canpush(self):
170 return True
170 return True
171
171
172 def url(self):
172 def url(self):
173 return self._repo.url()
173 return self._repo.url()
174
174
175 def lookup(self, key):
175 def lookup(self, key):
176 return self._repo.lookup(key)
176 return self._repo.lookup(key)
177
177
178 def branchmap(self):
178 def branchmap(self):
179 return self._repo.branchmap()
179 return self._repo.branchmap()
180
180
181 def heads(self):
181 def heads(self):
182 return self._repo.heads()
182 return self._repo.heads()
183
183
184 def known(self, nodes):
184 def known(self, nodes):
185 return self._repo.known(nodes)
185 return self._repo.known(nodes)
186
186
187 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
187 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
188 **kwargs):
188 **kwargs):
189 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
189 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
190 common=common, bundlecaps=bundlecaps,
190 common=common, bundlecaps=bundlecaps,
191 **kwargs)
191 **kwargs)
192 cb = util.chunkbuffer(chunks)
192 cb = util.chunkbuffer(chunks)
193
193
194 if exchange.bundle2requested(bundlecaps):
194 if exchange.bundle2requested(bundlecaps):
195 # When requesting a bundle2, getbundle returns a stream to make the
195 # When requesting a bundle2, getbundle returns a stream to make the
196 # wire level function happier. We need to build a proper object
196 # wire level function happier. We need to build a proper object
197 # from it in local peer.
197 # from it in local peer.
198 return bundle2.getunbundler(self.ui, cb)
198 return bundle2.getunbundler(self.ui, cb)
199 else:
199 else:
200 return changegroup.getunbundler('01', cb, None)
200 return changegroup.getunbundler('01', cb, None)
201
201
202 # TODO We might want to move the next two calls into legacypeer and add
202 # TODO We might want to move the next two calls into legacypeer and add
203 # unbundle instead.
203 # unbundle instead.
204
204
205 def unbundle(self, cg, heads, url):
205 def unbundle(self, cg, heads, url):
206 """apply a bundle on a repo
206 """apply a bundle on a repo
207
207
208 This function handles the repo locking itself."""
208 This function handles the repo locking itself."""
209 try:
209 try:
210 try:
210 try:
211 cg = exchange.readbundle(self.ui, cg, None)
211 cg = exchange.readbundle(self.ui, cg, None)
212 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
212 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
213 if util.safehasattr(ret, 'getchunks'):
213 if util.safehasattr(ret, 'getchunks'):
214 # This is a bundle20 object, turn it into an unbundler.
214 # This is a bundle20 object, turn it into an unbundler.
215 # This little dance should be dropped eventually when the
215 # This little dance should be dropped eventually when the
216 # API is finally improved.
216 # API is finally improved.
217 stream = util.chunkbuffer(ret.getchunks())
217 stream = util.chunkbuffer(ret.getchunks())
218 ret = bundle2.getunbundler(self.ui, stream)
218 ret = bundle2.getunbundler(self.ui, stream)
219 return ret
219 return ret
220 except Exception as exc:
220 except Exception as exc:
221 # If the exception contains output salvaged from a bundle2
221 # If the exception contains output salvaged from a bundle2
222 # reply, we need to make sure it is printed before continuing
222 # reply, we need to make sure it is printed before continuing
223 # to fail. So we build a bundle2 with such output and consume
223 # to fail. So we build a bundle2 with such output and consume
224 # it directly.
224 # it directly.
225 #
225 #
226 # This is not very elegant but allows a "simple" solution for
226 # This is not very elegant but allows a "simple" solution for
227 # issue4594
227 # issue4594
228 output = getattr(exc, '_bundle2salvagedoutput', ())
228 output = getattr(exc, '_bundle2salvagedoutput', ())
229 if output:
229 if output:
230 bundler = bundle2.bundle20(self._repo.ui)
230 bundler = bundle2.bundle20(self._repo.ui)
231 for out in output:
231 for out in output:
232 bundler.addpart(out)
232 bundler.addpart(out)
233 stream = util.chunkbuffer(bundler.getchunks())
233 stream = util.chunkbuffer(bundler.getchunks())
234 b = bundle2.getunbundler(self.ui, stream)
234 b = bundle2.getunbundler(self.ui, stream)
235 bundle2.processbundle(self._repo, b)
235 bundle2.processbundle(self._repo, b)
236 raise
236 raise
237 except error.PushRaced as exc:
237 except error.PushRaced as exc:
238 raise error.ResponseError(_('push failed:'), str(exc))
238 raise error.ResponseError(_('push failed:'), str(exc))
239
239
240 def lock(self):
240 def lock(self):
241 return self._repo.lock()
241 return self._repo.lock()
242
242
243 def pushkey(self, namespace, key, old, new):
243 def pushkey(self, namespace, key, old, new):
244 return self._repo.pushkey(namespace, key, old, new)
244 return self._repo.pushkey(namespace, key, old, new)
245
245
246 def listkeys(self, namespace):
246 def listkeys(self, namespace):
247 return self._repo.listkeys(namespace)
247 return self._repo.listkeys(namespace)
248
248
249 def debugwireargs(self, one, two, three=None, four=None, five=None):
249 def debugwireargs(self, one, two, three=None, four=None, five=None):
250 '''used to test argument passing over the wire'''
250 '''used to test argument passing over the wire'''
251 return "%s %s %s %s %s" % (one, two, three, four, five)
251 return "%s %s %s %s %s" % (one, two, three, four, five)
252
252
253 class locallegacypeer(localpeer):
253 class locallegacypeer(localpeer):
254 '''peer extension which implements legacy methods too; used for tests with
254 '''peer extension which implements legacy methods too; used for tests with
255 restricted capabilities'''
255 restricted capabilities'''
256
256
257 def __init__(self, repo):
257 def __init__(self, repo):
258 localpeer.__init__(self, repo, caps=legacycaps)
258 localpeer.__init__(self, repo, caps=legacycaps)
259
259
260 def branches(self, nodes):
260 def branches(self, nodes):
261 return self._repo.branches(nodes)
261 return self._repo.branches(nodes)
262
262
263 def between(self, pairs):
263 def between(self, pairs):
264 return self._repo.between(pairs)
264 return self._repo.between(pairs)
265
265
266 def changegroup(self, basenodes, source):
266 def changegroup(self, basenodes, source):
267 return changegroup.changegroup(self._repo, basenodes, source)
267 return changegroup.changegroup(self._repo, basenodes, source)
268
268
269 def changegroupsubset(self, bases, heads, source):
269 def changegroupsubset(self, bases, heads, source):
270 return changegroup.changegroupsubset(self._repo, bases, heads, source)
270 return changegroup.changegroupsubset(self._repo, bases, heads, source)
271
271
272 # Increment the sub-version when the revlog v2 format changes to lock out old
272 # Increment the sub-version when the revlog v2 format changes to lock out old
273 # clients.
273 # clients.
274 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
274 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
275
275
276 class localrepository(object):
276 class localrepository(object):
277
277
278 supportedformats = {
278 supportedformats = {
279 'revlogv1',
279 'revlogv1',
280 'generaldelta',
280 'generaldelta',
281 'treemanifest',
281 'treemanifest',
282 'manifestv2',
282 'manifestv2',
283 REVLOGV2_REQUIREMENT,
283 REVLOGV2_REQUIREMENT,
284 }
284 }
285 _basesupported = supportedformats | {
285 _basesupported = supportedformats | {
286 'store',
286 'store',
287 'fncache',
287 'fncache',
288 'shared',
288 'shared',
289 'relshared',
289 'relshared',
290 'dotencode',
290 'dotencode',
291 }
291 }
292 openerreqs = {
292 openerreqs = {
293 'revlogv1',
293 'revlogv1',
294 'generaldelta',
294 'generaldelta',
295 'treemanifest',
295 'treemanifest',
296 'manifestv2',
296 'manifestv2',
297 }
297 }
298
298
299 # a list of (ui, featureset) functions.
299 # a list of (ui, featureset) functions.
300 # only functions defined in module of enabled extensions are invoked
300 # only functions defined in module of enabled extensions are invoked
301 featuresetupfuncs = set()
301 featuresetupfuncs = set()
302
302
303 # list of prefix for file which can be written without 'wlock'
304 # Extensions should extend this list when needed
305 _wlockfreeprefix = {
306 # We migh consider requiring 'wlock' for the next
307 # two, but pretty much all the existing code assume
308 # wlock is not needed so we keep them excluded for
309 # now.
310 'hgrc',
311 'requires',
312 # XXX cache is a complicatged business someone
313 # should investigate this in depth at some point
314 'cache/',
315 # XXX shouldn't be dirstate covered by the wlock?
316 'dirstate',
317 # XXX bisect was still a bit too messy at the time
318 # this changeset was introduced. Someone should fix
319 # the remainig bit and drop this line
320 'bisect.state',
321 }
322
303 def __init__(self, baseui, path, create=False):
323 def __init__(self, baseui, path, create=False):
304 self.requirements = set()
324 self.requirements = set()
305 self.filtername = None
325 self.filtername = None
306 # wvfs: rooted at the repository root, used to access the working copy
326 # wvfs: rooted at the repository root, used to access the working copy
307 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
327 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
308 # vfs: rooted at .hg, used to access repo files outside of .hg/store
328 # vfs: rooted at .hg, used to access repo files outside of .hg/store
309 self.vfs = None
329 self.vfs = None
310 # svfs: usually rooted at .hg/store, used to access repository history
330 # svfs: usually rooted at .hg/store, used to access repository history
311 # If this is a shared repository, this vfs may point to another
331 # If this is a shared repository, this vfs may point to another
312 # repository's .hg/store directory.
332 # repository's .hg/store directory.
313 self.svfs = None
333 self.svfs = None
314 self.root = self.wvfs.base
334 self.root = self.wvfs.base
315 self.path = self.wvfs.join(".hg")
335 self.path = self.wvfs.join(".hg")
316 self.origroot = path
336 self.origroot = path
317 # These auditor are not used by the vfs,
337 # These auditor are not used by the vfs,
318 # only used when writing this comment: basectx.match
338 # only used when writing this comment: basectx.match
319 self.auditor = pathutil.pathauditor(self.root, self._checknested)
339 self.auditor = pathutil.pathauditor(self.root, self._checknested)
320 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
340 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
321 realfs=False)
341 realfs=False)
322 self.vfs = vfsmod.vfs(self.path)
323 self.baseui = baseui
342 self.baseui = baseui
324 self.ui = baseui.copy()
343 self.ui = baseui.copy()
325 self.ui.copy = baseui.copy # prevent copying repo configuration
344 self.ui.copy = baseui.copy # prevent copying repo configuration
345 self.vfs = vfsmod.vfs(self.path)
346 if (self.ui.configbool('devel', 'all-warnings') or
347 self.ui.configbool('devel', 'check-locks')):
348 self.vfs.audit = self._getvfsward(self.vfs.audit)
326 # A list of callback to shape the phase if no data were found.
349 # A list of callback to shape the phase if no data were found.
327 # Callback are in the form: func(repo, roots) --> processed root.
350 # Callback are in the form: func(repo, roots) --> processed root.
328 # This list it to be filled by extension during repo setup
351 # This list it to be filled by extension during repo setup
329 self._phasedefaults = []
352 self._phasedefaults = []
330 try:
353 try:
331 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
354 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
332 self._loadextensions()
355 self._loadextensions()
333 except IOError:
356 except IOError:
334 pass
357 pass
335
358
336 if self.featuresetupfuncs:
359 if self.featuresetupfuncs:
337 self.supported = set(self._basesupported) # use private copy
360 self.supported = set(self._basesupported) # use private copy
338 extmods = set(m.__name__ for n, m
361 extmods = set(m.__name__ for n, m
339 in extensions.extensions(self.ui))
362 in extensions.extensions(self.ui))
340 for setupfunc in self.featuresetupfuncs:
363 for setupfunc in self.featuresetupfuncs:
341 if setupfunc.__module__ in extmods:
364 if setupfunc.__module__ in extmods:
342 setupfunc(self.ui, self.supported)
365 setupfunc(self.ui, self.supported)
343 else:
366 else:
344 self.supported = self._basesupported
367 self.supported = self._basesupported
345 color.setup(self.ui)
368 color.setup(self.ui)
346
369
347 # Add compression engines.
370 # Add compression engines.
348 for name in util.compengines:
371 for name in util.compengines:
349 engine = util.compengines[name]
372 engine = util.compengines[name]
350 if engine.revlogheader():
373 if engine.revlogheader():
351 self.supported.add('exp-compression-%s' % name)
374 self.supported.add('exp-compression-%s' % name)
352
375
353 if not self.vfs.isdir():
376 if not self.vfs.isdir():
354 if create:
377 if create:
355 self.requirements = newreporequirements(self)
378 self.requirements = newreporequirements(self)
356
379
357 if not self.wvfs.exists():
380 if not self.wvfs.exists():
358 self.wvfs.makedirs()
381 self.wvfs.makedirs()
359 self.vfs.makedir(notindexed=True)
382 self.vfs.makedir(notindexed=True)
360
383
361 if 'store' in self.requirements:
384 if 'store' in self.requirements:
362 self.vfs.mkdir("store")
385 self.vfs.mkdir("store")
363
386
364 # create an invalid changelog
387 # create an invalid changelog
365 self.vfs.append(
388 self.vfs.append(
366 "00changelog.i",
389 "00changelog.i",
367 '\0\0\0\2' # represents revlogv2
390 '\0\0\0\2' # represents revlogv2
368 ' dummy changelog to prevent using the old repo layout'
391 ' dummy changelog to prevent using the old repo layout'
369 )
392 )
370 else:
393 else:
371 raise error.RepoError(_("repository %s not found") % path)
394 raise error.RepoError(_("repository %s not found") % path)
372 elif create:
395 elif create:
373 raise error.RepoError(_("repository %s already exists") % path)
396 raise error.RepoError(_("repository %s already exists") % path)
374 else:
397 else:
375 try:
398 try:
376 self.requirements = scmutil.readrequires(
399 self.requirements = scmutil.readrequires(
377 self.vfs, self.supported)
400 self.vfs, self.supported)
378 except IOError as inst:
401 except IOError as inst:
379 if inst.errno != errno.ENOENT:
402 if inst.errno != errno.ENOENT:
380 raise
403 raise
381
404
382 self.sharedpath = self.path
405 self.sharedpath = self.path
383 try:
406 try:
384 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
407 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
385 if 'relshared' in self.requirements:
408 if 'relshared' in self.requirements:
386 sharedpath = self.vfs.join(sharedpath)
409 sharedpath = self.vfs.join(sharedpath)
387 vfs = vfsmod.vfs(sharedpath, realpath=True)
410 vfs = vfsmod.vfs(sharedpath, realpath=True)
388 s = vfs.base
411 s = vfs.base
389 if not vfs.exists():
412 if not vfs.exists():
390 raise error.RepoError(
413 raise error.RepoError(
391 _('.hg/sharedpath points to nonexistent directory %s') % s)
414 _('.hg/sharedpath points to nonexistent directory %s') % s)
392 self.sharedpath = s
415 self.sharedpath = s
393 except IOError as inst:
416 except IOError as inst:
394 if inst.errno != errno.ENOENT:
417 if inst.errno != errno.ENOENT:
395 raise
418 raise
396
419
397 self.store = store.store(
420 self.store = store.store(
398 self.requirements, self.sharedpath, vfsmod.vfs)
421 self.requirements, self.sharedpath, vfsmod.vfs)
399 self.spath = self.store.path
422 self.spath = self.store.path
400 self.svfs = self.store.vfs
423 self.svfs = self.store.vfs
401 self.sjoin = self.store.join
424 self.sjoin = self.store.join
402 self.vfs.createmode = self.store.createmode
425 self.vfs.createmode = self.store.createmode
403 self._applyopenerreqs()
426 self._applyopenerreqs()
404 if create:
427 if create:
405 self._writerequirements()
428 self._writerequirements()
406
429
407 self._dirstatevalidatewarned = False
430 self._dirstatevalidatewarned = False
408
431
409 self._branchcaches = {}
432 self._branchcaches = {}
410 self._revbranchcache = None
433 self._revbranchcache = None
411 self.filterpats = {}
434 self.filterpats = {}
412 self._datafilters = {}
435 self._datafilters = {}
413 self._transref = self._lockref = self._wlockref = None
436 self._transref = self._lockref = self._wlockref = None
414
437
415 # A cache for various files under .hg/ that tracks file changes,
438 # A cache for various files under .hg/ that tracks file changes,
416 # (used by the filecache decorator)
439 # (used by the filecache decorator)
417 #
440 #
418 # Maps a property name to its util.filecacheentry
441 # Maps a property name to its util.filecacheentry
419 self._filecache = {}
442 self._filecache = {}
420
443
421 # hold sets of revision to be filtered
444 # hold sets of revision to be filtered
422 # should be cleared when something might have changed the filter value:
445 # should be cleared when something might have changed the filter value:
423 # - new changesets,
446 # - new changesets,
424 # - phase change,
447 # - phase change,
425 # - new obsolescence marker,
448 # - new obsolescence marker,
426 # - working directory parent change,
449 # - working directory parent change,
427 # - bookmark changes
450 # - bookmark changes
428 self.filteredrevcache = {}
451 self.filteredrevcache = {}
429
452
430 # post-dirstate-status hooks
453 # post-dirstate-status hooks
431 self._postdsstatus = []
454 self._postdsstatus = []
432
455
433 # Cache of types representing filtered repos.
456 # Cache of types representing filtered repos.
434 self._filteredrepotypes = weakref.WeakKeyDictionary()
457 self._filteredrepotypes = weakref.WeakKeyDictionary()
435
458
436 # generic mapping between names and nodes
459 # generic mapping between names and nodes
437 self.names = namespaces.namespaces()
460 self.names = namespaces.namespaces()
438
461
439 # Key to signature value.
462 # Key to signature value.
440 self._sparsesignaturecache = {}
463 self._sparsesignaturecache = {}
441 # Signature to cached matcher instance.
464 # Signature to cached matcher instance.
442 self._sparsematchercache = {}
465 self._sparsematchercache = {}
443
466
467 def _getvfsward(self, origfunc):
468 """build a ward for self.vfs"""
469 rref = weakref.ref(self)
470 def checkvfs(path, mode=None):
471 ret = origfunc(path, mode=mode)
472 repo = rref()
473 if (repo is None
474 or not util.safehasattr(repo, '_wlockref')
475 or not util.safehasattr(repo, '_lockref')):
476 return
477 if mode in (None, 'r', 'rb'):
478 return
479 if path.startswith(repo.path):
480 # truncate name relative to the repository (.hg)
481 path = path[len(repo.path) + 1:]
482 if path.startswith('journal.'):
483 # journal is covered by 'lock'
484 if repo._currentlock(repo._lockref) is None:
485 repo.ui.develwarn('write with no lock: "%s"' % path,
486 stacklevel=2)
487 elif repo._currentlock(repo._wlockref) is None:
488 # rest of vfs files are covered by 'wlock'
489 #
490 # exclude special files
491 for prefix in self._wlockfreeprefix:
492 if path.startswith(prefix):
493 return
494 repo.ui.develwarn('write with no wlock: "%s"' % path,
495 stacklevel=2)
496 return ret
497 return checkvfs
498
444 def close(self):
499 def close(self):
445 self._writecaches()
500 self._writecaches()
446
501
447 def _loadextensions(self):
502 def _loadextensions(self):
448 extensions.loadall(self.ui)
503 extensions.loadall(self.ui)
449
504
450 def _writecaches(self):
505 def _writecaches(self):
451 if self._revbranchcache:
506 if self._revbranchcache:
452 self._revbranchcache.write()
507 self._revbranchcache.write()
453
508
454 def _restrictcapabilities(self, caps):
509 def _restrictcapabilities(self, caps):
455 if self.ui.configbool('experimental', 'bundle2-advertise', True):
510 if self.ui.configbool('experimental', 'bundle2-advertise', True):
456 caps = set(caps)
511 caps = set(caps)
457 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
512 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
458 caps.add('bundle2=' + urlreq.quote(capsblob))
513 caps.add('bundle2=' + urlreq.quote(capsblob))
459 return caps
514 return caps
460
515
461 def _applyopenerreqs(self):
516 def _applyopenerreqs(self):
462 self.svfs.options = dict((r, 1) for r in self.requirements
517 self.svfs.options = dict((r, 1) for r in self.requirements
463 if r in self.openerreqs)
518 if r in self.openerreqs)
464 # experimental config: format.chunkcachesize
519 # experimental config: format.chunkcachesize
465 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
520 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
466 if chunkcachesize is not None:
521 if chunkcachesize is not None:
467 self.svfs.options['chunkcachesize'] = chunkcachesize
522 self.svfs.options['chunkcachesize'] = chunkcachesize
468 # experimental config: format.maxchainlen
523 # experimental config: format.maxchainlen
469 maxchainlen = self.ui.configint('format', 'maxchainlen')
524 maxchainlen = self.ui.configint('format', 'maxchainlen')
470 if maxchainlen is not None:
525 if maxchainlen is not None:
471 self.svfs.options['maxchainlen'] = maxchainlen
526 self.svfs.options['maxchainlen'] = maxchainlen
472 # experimental config: format.manifestcachesize
527 # experimental config: format.manifestcachesize
473 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
528 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
474 if manifestcachesize is not None:
529 if manifestcachesize is not None:
475 self.svfs.options['manifestcachesize'] = manifestcachesize
530 self.svfs.options['manifestcachesize'] = manifestcachesize
476 # experimental config: format.aggressivemergedeltas
531 # experimental config: format.aggressivemergedeltas
477 aggressivemergedeltas = self.ui.configbool('format',
532 aggressivemergedeltas = self.ui.configbool('format',
478 'aggressivemergedeltas')
533 'aggressivemergedeltas')
479 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
534 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
480 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
535 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
481 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
536 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
482 if 0 <= chainspan:
537 if 0 <= chainspan:
483 self.svfs.options['maxdeltachainspan'] = chainspan
538 self.svfs.options['maxdeltachainspan'] = chainspan
484
539
485 for r in self.requirements:
540 for r in self.requirements:
486 if r.startswith('exp-compression-'):
541 if r.startswith('exp-compression-'):
487 self.svfs.options['compengine'] = r[len('exp-compression-'):]
542 self.svfs.options['compengine'] = r[len('exp-compression-'):]
488
543
489 # TODO move "revlogv2" to openerreqs once finalized.
544 # TODO move "revlogv2" to openerreqs once finalized.
490 if REVLOGV2_REQUIREMENT in self.requirements:
545 if REVLOGV2_REQUIREMENT in self.requirements:
491 self.svfs.options['revlogv2'] = True
546 self.svfs.options['revlogv2'] = True
492
547
493 def _writerequirements(self):
548 def _writerequirements(self):
494 scmutil.writerequires(self.vfs, self.requirements)
549 scmutil.writerequires(self.vfs, self.requirements)
495
550
496 def _checknested(self, path):
551 def _checknested(self, path):
497 """Determine if path is a legal nested repository."""
552 """Determine if path is a legal nested repository."""
498 if not path.startswith(self.root):
553 if not path.startswith(self.root):
499 return False
554 return False
500 subpath = path[len(self.root) + 1:]
555 subpath = path[len(self.root) + 1:]
501 normsubpath = util.pconvert(subpath)
556 normsubpath = util.pconvert(subpath)
502
557
503 # XXX: Checking against the current working copy is wrong in
558 # XXX: Checking against the current working copy is wrong in
504 # the sense that it can reject things like
559 # the sense that it can reject things like
505 #
560 #
506 # $ hg cat -r 10 sub/x.txt
561 # $ hg cat -r 10 sub/x.txt
507 #
562 #
508 # if sub/ is no longer a subrepository in the working copy
563 # if sub/ is no longer a subrepository in the working copy
509 # parent revision.
564 # parent revision.
510 #
565 #
511 # However, it can of course also allow things that would have
566 # However, it can of course also allow things that would have
512 # been rejected before, such as the above cat command if sub/
567 # been rejected before, such as the above cat command if sub/
513 # is a subrepository now, but was a normal directory before.
568 # is a subrepository now, but was a normal directory before.
514 # The old path auditor would have rejected by mistake since it
569 # The old path auditor would have rejected by mistake since it
515 # panics when it sees sub/.hg/.
570 # panics when it sees sub/.hg/.
516 #
571 #
517 # All in all, checking against the working copy seems sensible
572 # All in all, checking against the working copy seems sensible
518 # since we want to prevent access to nested repositories on
573 # since we want to prevent access to nested repositories on
519 # the filesystem *now*.
574 # the filesystem *now*.
520 ctx = self[None]
575 ctx = self[None]
521 parts = util.splitpath(subpath)
576 parts = util.splitpath(subpath)
522 while parts:
577 while parts:
523 prefix = '/'.join(parts)
578 prefix = '/'.join(parts)
524 if prefix in ctx.substate:
579 if prefix in ctx.substate:
525 if prefix == normsubpath:
580 if prefix == normsubpath:
526 return True
581 return True
527 else:
582 else:
528 sub = ctx.sub(prefix)
583 sub = ctx.sub(prefix)
529 return sub.checknested(subpath[len(prefix) + 1:])
584 return sub.checknested(subpath[len(prefix) + 1:])
530 else:
585 else:
531 parts.pop()
586 parts.pop()
532 return False
587 return False
533
588
534 def peer(self):
589 def peer(self):
535 return localpeer(self) # not cached to avoid reference cycle
590 return localpeer(self) # not cached to avoid reference cycle
536
591
537 def unfiltered(self):
592 def unfiltered(self):
538 """Return unfiltered version of the repository
593 """Return unfiltered version of the repository
539
594
540 Intended to be overwritten by filtered repo."""
595 Intended to be overwritten by filtered repo."""
541 return self
596 return self
542
597
543 def filtered(self, name):
598 def filtered(self, name):
544 """Return a filtered version of a repository"""
599 """Return a filtered version of a repository"""
545 # Python <3.4 easily leaks types via __mro__. See
600 # Python <3.4 easily leaks types via __mro__. See
546 # https://bugs.python.org/issue17950. We cache dynamically
601 # https://bugs.python.org/issue17950. We cache dynamically
547 # created types so this method doesn't leak on every
602 # created types so this method doesn't leak on every
548 # invocation.
603 # invocation.
549
604
550 key = self.unfiltered().__class__
605 key = self.unfiltered().__class__
551 if key not in self._filteredrepotypes:
606 if key not in self._filteredrepotypes:
552 # Build a new type with the repoview mixin and the base
607 # Build a new type with the repoview mixin and the base
553 # class of this repo. Give it a name containing the
608 # class of this repo. Give it a name containing the
554 # filter name to aid debugging.
609 # filter name to aid debugging.
555 bases = (repoview.repoview, key)
610 bases = (repoview.repoview, key)
556 cls = type(r'%sfilteredrepo' % name, bases, {})
611 cls = type(r'%sfilteredrepo' % name, bases, {})
557 self._filteredrepotypes[key] = cls
612 self._filteredrepotypes[key] = cls
558
613
559 return self._filteredrepotypes[key](self, name)
614 return self._filteredrepotypes[key](self, name)
560
615
561 @repofilecache('bookmarks', 'bookmarks.current')
616 @repofilecache('bookmarks', 'bookmarks.current')
562 def _bookmarks(self):
617 def _bookmarks(self):
563 return bookmarks.bmstore(self)
618 return bookmarks.bmstore(self)
564
619
565 @property
620 @property
566 def _activebookmark(self):
621 def _activebookmark(self):
567 return self._bookmarks.active
622 return self._bookmarks.active
568
623
569 # _phaserevs and _phasesets depend on changelog. what we need is to
624 # _phaserevs and _phasesets depend on changelog. what we need is to
570 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
625 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
571 # can't be easily expressed in filecache mechanism.
626 # can't be easily expressed in filecache mechanism.
572 @storecache('phaseroots', '00changelog.i')
627 @storecache('phaseroots', '00changelog.i')
573 def _phasecache(self):
628 def _phasecache(self):
574 return phases.phasecache(self, self._phasedefaults)
629 return phases.phasecache(self, self._phasedefaults)
575
630
576 @storecache('obsstore')
631 @storecache('obsstore')
577 def obsstore(self):
632 def obsstore(self):
578 return obsolete.makestore(self.ui, self)
633 return obsolete.makestore(self.ui, self)
579
634
580 @storecache('00changelog.i')
635 @storecache('00changelog.i')
581 def changelog(self):
636 def changelog(self):
582 return changelog.changelog(self.svfs,
637 return changelog.changelog(self.svfs,
583 trypending=txnutil.mayhavepending(self.root))
638 trypending=txnutil.mayhavepending(self.root))
584
639
585 def _constructmanifest(self):
640 def _constructmanifest(self):
586 # This is a temporary function while we migrate from manifest to
641 # This is a temporary function while we migrate from manifest to
587 # manifestlog. It allows bundlerepo and unionrepo to intercept the
642 # manifestlog. It allows bundlerepo and unionrepo to intercept the
588 # manifest creation.
643 # manifest creation.
589 return manifest.manifestrevlog(self.svfs)
644 return manifest.manifestrevlog(self.svfs)
590
645
591 @storecache('00manifest.i')
646 @storecache('00manifest.i')
592 def manifestlog(self):
647 def manifestlog(self):
593 return manifest.manifestlog(self.svfs, self)
648 return manifest.manifestlog(self.svfs, self)
594
649
595 @repofilecache('dirstate')
650 @repofilecache('dirstate')
596 def dirstate(self):
651 def dirstate(self):
597 sparsematchfn = lambda: sparse.matcher(self)
652 sparsematchfn = lambda: sparse.matcher(self)
598
653
599 return dirstate.dirstate(self.vfs, self.ui, self.root,
654 return dirstate.dirstate(self.vfs, self.ui, self.root,
600 self._dirstatevalidate, sparsematchfn)
655 self._dirstatevalidate, sparsematchfn)
601
656
602 def _dirstatevalidate(self, node):
657 def _dirstatevalidate(self, node):
603 try:
658 try:
604 self.changelog.rev(node)
659 self.changelog.rev(node)
605 return node
660 return node
606 except error.LookupError:
661 except error.LookupError:
607 if not self._dirstatevalidatewarned:
662 if not self._dirstatevalidatewarned:
608 self._dirstatevalidatewarned = True
663 self._dirstatevalidatewarned = True
609 self.ui.warn(_("warning: ignoring unknown"
664 self.ui.warn(_("warning: ignoring unknown"
610 " working parent %s!\n") % short(node))
665 " working parent %s!\n") % short(node))
611 return nullid
666 return nullid
612
667
613 def __getitem__(self, changeid):
668 def __getitem__(self, changeid):
614 if changeid is None:
669 if changeid is None:
615 return context.workingctx(self)
670 return context.workingctx(self)
616 if isinstance(changeid, slice):
671 if isinstance(changeid, slice):
617 # wdirrev isn't contiguous so the slice shouldn't include it
672 # wdirrev isn't contiguous so the slice shouldn't include it
618 return [context.changectx(self, i)
673 return [context.changectx(self, i)
619 for i in xrange(*changeid.indices(len(self)))
674 for i in xrange(*changeid.indices(len(self)))
620 if i not in self.changelog.filteredrevs]
675 if i not in self.changelog.filteredrevs]
621 try:
676 try:
622 return context.changectx(self, changeid)
677 return context.changectx(self, changeid)
623 except error.WdirUnsupported:
678 except error.WdirUnsupported:
624 return context.workingctx(self)
679 return context.workingctx(self)
625
680
626 def __contains__(self, changeid):
681 def __contains__(self, changeid):
627 """True if the given changeid exists
682 """True if the given changeid exists
628
683
629 error.LookupError is raised if an ambiguous node specified.
684 error.LookupError is raised if an ambiguous node specified.
630 """
685 """
631 try:
686 try:
632 self[changeid]
687 self[changeid]
633 return True
688 return True
634 except error.RepoLookupError:
689 except error.RepoLookupError:
635 return False
690 return False
636
691
637 def __nonzero__(self):
692 def __nonzero__(self):
638 return True
693 return True
639
694
640 __bool__ = __nonzero__
695 __bool__ = __nonzero__
641
696
642 def __len__(self):
697 def __len__(self):
643 return len(self.changelog)
698 return len(self.changelog)
644
699
645 def __iter__(self):
700 def __iter__(self):
646 return iter(self.changelog)
701 return iter(self.changelog)
647
702
648 def revs(self, expr, *args):
703 def revs(self, expr, *args):
649 '''Find revisions matching a revset.
704 '''Find revisions matching a revset.
650
705
651 The revset is specified as a string ``expr`` that may contain
706 The revset is specified as a string ``expr`` that may contain
652 %-formatting to escape certain types. See ``revsetlang.formatspec``.
707 %-formatting to escape certain types. See ``revsetlang.formatspec``.
653
708
654 Revset aliases from the configuration are not expanded. To expand
709 Revset aliases from the configuration are not expanded. To expand
655 user aliases, consider calling ``scmutil.revrange()`` or
710 user aliases, consider calling ``scmutil.revrange()`` or
656 ``repo.anyrevs([expr], user=True)``.
711 ``repo.anyrevs([expr], user=True)``.
657
712
658 Returns a revset.abstractsmartset, which is a list-like interface
713 Returns a revset.abstractsmartset, which is a list-like interface
659 that contains integer revisions.
714 that contains integer revisions.
660 '''
715 '''
661 expr = revsetlang.formatspec(expr, *args)
716 expr = revsetlang.formatspec(expr, *args)
662 m = revset.match(None, expr)
717 m = revset.match(None, expr)
663 return m(self)
718 return m(self)
664
719
665 def set(self, expr, *args):
720 def set(self, expr, *args):
666 '''Find revisions matching a revset and emit changectx instances.
721 '''Find revisions matching a revset and emit changectx instances.
667
722
668 This is a convenience wrapper around ``revs()`` that iterates the
723 This is a convenience wrapper around ``revs()`` that iterates the
669 result and is a generator of changectx instances.
724 result and is a generator of changectx instances.
670
725
671 Revset aliases from the configuration are not expanded. To expand
726 Revset aliases from the configuration are not expanded. To expand
672 user aliases, consider calling ``scmutil.revrange()``.
727 user aliases, consider calling ``scmutil.revrange()``.
673 '''
728 '''
674 for r in self.revs(expr, *args):
729 for r in self.revs(expr, *args):
675 yield self[r]
730 yield self[r]
676
731
677 def anyrevs(self, specs, user=False, localalias=None):
732 def anyrevs(self, specs, user=False, localalias=None):
678 '''Find revisions matching one of the given revsets.
733 '''Find revisions matching one of the given revsets.
679
734
680 Revset aliases from the configuration are not expanded by default. To
735 Revset aliases from the configuration are not expanded by default. To
681 expand user aliases, specify ``user=True``. To provide some local
736 expand user aliases, specify ``user=True``. To provide some local
682 definitions overriding user aliases, set ``localalias`` to
737 definitions overriding user aliases, set ``localalias`` to
683 ``{name: definitionstring}``.
738 ``{name: definitionstring}``.
684 '''
739 '''
685 if user:
740 if user:
686 m = revset.matchany(self.ui, specs, repo=self,
741 m = revset.matchany(self.ui, specs, repo=self,
687 localalias=localalias)
742 localalias=localalias)
688 else:
743 else:
689 m = revset.matchany(None, specs, localalias=localalias)
744 m = revset.matchany(None, specs, localalias=localalias)
690 return m(self)
745 return m(self)
691
746
692 def url(self):
747 def url(self):
693 return 'file:' + self.root
748 return 'file:' + self.root
694
749
695 def hook(self, name, throw=False, **args):
750 def hook(self, name, throw=False, **args):
696 """Call a hook, passing this repo instance.
751 """Call a hook, passing this repo instance.
697
752
698 This a convenience method to aid invoking hooks. Extensions likely
753 This a convenience method to aid invoking hooks. Extensions likely
699 won't call this unless they have registered a custom hook or are
754 won't call this unless they have registered a custom hook or are
700 replacing code that is expected to call a hook.
755 replacing code that is expected to call a hook.
701 """
756 """
702 return hook.hook(self.ui, self, name, throw, **args)
757 return hook.hook(self.ui, self, name, throw, **args)
703
758
704 @filteredpropertycache
759 @filteredpropertycache
705 def _tagscache(self):
760 def _tagscache(self):
706 '''Returns a tagscache object that contains various tags related
761 '''Returns a tagscache object that contains various tags related
707 caches.'''
762 caches.'''
708
763
709 # This simplifies its cache management by having one decorated
764 # This simplifies its cache management by having one decorated
710 # function (this one) and the rest simply fetch things from it.
765 # function (this one) and the rest simply fetch things from it.
711 class tagscache(object):
766 class tagscache(object):
712 def __init__(self):
767 def __init__(self):
713 # These two define the set of tags for this repository. tags
768 # These two define the set of tags for this repository. tags
714 # maps tag name to node; tagtypes maps tag name to 'global' or
769 # maps tag name to node; tagtypes maps tag name to 'global' or
715 # 'local'. (Global tags are defined by .hgtags across all
770 # 'local'. (Global tags are defined by .hgtags across all
716 # heads, and local tags are defined in .hg/localtags.)
771 # heads, and local tags are defined in .hg/localtags.)
717 # They constitute the in-memory cache of tags.
772 # They constitute the in-memory cache of tags.
718 self.tags = self.tagtypes = None
773 self.tags = self.tagtypes = None
719
774
720 self.nodetagscache = self.tagslist = None
775 self.nodetagscache = self.tagslist = None
721
776
722 cache = tagscache()
777 cache = tagscache()
723 cache.tags, cache.tagtypes = self._findtags()
778 cache.tags, cache.tagtypes = self._findtags()
724
779
725 return cache
780 return cache
726
781
727 def tags(self):
782 def tags(self):
728 '''return a mapping of tag to node'''
783 '''return a mapping of tag to node'''
729 t = {}
784 t = {}
730 if self.changelog.filteredrevs:
785 if self.changelog.filteredrevs:
731 tags, tt = self._findtags()
786 tags, tt = self._findtags()
732 else:
787 else:
733 tags = self._tagscache.tags
788 tags = self._tagscache.tags
734 for k, v in tags.iteritems():
789 for k, v in tags.iteritems():
735 try:
790 try:
736 # ignore tags to unknown nodes
791 # ignore tags to unknown nodes
737 self.changelog.rev(v)
792 self.changelog.rev(v)
738 t[k] = v
793 t[k] = v
739 except (error.LookupError, ValueError):
794 except (error.LookupError, ValueError):
740 pass
795 pass
741 return t
796 return t
742
797
743 def _findtags(self):
798 def _findtags(self):
744 '''Do the hard work of finding tags. Return a pair of dicts
799 '''Do the hard work of finding tags. Return a pair of dicts
745 (tags, tagtypes) where tags maps tag name to node, and tagtypes
800 (tags, tagtypes) where tags maps tag name to node, and tagtypes
746 maps tag name to a string like \'global\' or \'local\'.
801 maps tag name to a string like \'global\' or \'local\'.
747 Subclasses or extensions are free to add their own tags, but
802 Subclasses or extensions are free to add their own tags, but
748 should be aware that the returned dicts will be retained for the
803 should be aware that the returned dicts will be retained for the
749 duration of the localrepo object.'''
804 duration of the localrepo object.'''
750
805
751 # XXX what tagtype should subclasses/extensions use? Currently
806 # XXX what tagtype should subclasses/extensions use? Currently
752 # mq and bookmarks add tags, but do not set the tagtype at all.
807 # mq and bookmarks add tags, but do not set the tagtype at all.
753 # Should each extension invent its own tag type? Should there
808 # Should each extension invent its own tag type? Should there
754 # be one tagtype for all such "virtual" tags? Or is the status
809 # be one tagtype for all such "virtual" tags? Or is the status
755 # quo fine?
810 # quo fine?
756
811
757
812
758 # map tag name to (node, hist)
813 # map tag name to (node, hist)
759 alltags = tagsmod.findglobaltags(self.ui, self)
814 alltags = tagsmod.findglobaltags(self.ui, self)
760 # map tag name to tag type
815 # map tag name to tag type
761 tagtypes = dict((tag, 'global') for tag in alltags)
816 tagtypes = dict((tag, 'global') for tag in alltags)
762
817
763 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
818 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
764
819
765 # Build the return dicts. Have to re-encode tag names because
820 # Build the return dicts. Have to re-encode tag names because
766 # the tags module always uses UTF-8 (in order not to lose info
821 # the tags module always uses UTF-8 (in order not to lose info
767 # writing to the cache), but the rest of Mercurial wants them in
822 # writing to the cache), but the rest of Mercurial wants them in
768 # local encoding.
823 # local encoding.
769 tags = {}
824 tags = {}
770 for (name, (node, hist)) in alltags.iteritems():
825 for (name, (node, hist)) in alltags.iteritems():
771 if node != nullid:
826 if node != nullid:
772 tags[encoding.tolocal(name)] = node
827 tags[encoding.tolocal(name)] = node
773 tags['tip'] = self.changelog.tip()
828 tags['tip'] = self.changelog.tip()
774 tagtypes = dict([(encoding.tolocal(name), value)
829 tagtypes = dict([(encoding.tolocal(name), value)
775 for (name, value) in tagtypes.iteritems()])
830 for (name, value) in tagtypes.iteritems()])
776 return (tags, tagtypes)
831 return (tags, tagtypes)
777
832
778 def tagtype(self, tagname):
833 def tagtype(self, tagname):
779 '''
834 '''
780 return the type of the given tag. result can be:
835 return the type of the given tag. result can be:
781
836
782 'local' : a local tag
837 'local' : a local tag
783 'global' : a global tag
838 'global' : a global tag
784 None : tag does not exist
839 None : tag does not exist
785 '''
840 '''
786
841
787 return self._tagscache.tagtypes.get(tagname)
842 return self._tagscache.tagtypes.get(tagname)
788
843
789 def tagslist(self):
844 def tagslist(self):
790 '''return a list of tags ordered by revision'''
845 '''return a list of tags ordered by revision'''
791 if not self._tagscache.tagslist:
846 if not self._tagscache.tagslist:
792 l = []
847 l = []
793 for t, n in self.tags().iteritems():
848 for t, n in self.tags().iteritems():
794 l.append((self.changelog.rev(n), t, n))
849 l.append((self.changelog.rev(n), t, n))
795 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
850 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
796
851
797 return self._tagscache.tagslist
852 return self._tagscache.tagslist
798
853
799 def nodetags(self, node):
854 def nodetags(self, node):
800 '''return the tags associated with a node'''
855 '''return the tags associated with a node'''
801 if not self._tagscache.nodetagscache:
856 if not self._tagscache.nodetagscache:
802 nodetagscache = {}
857 nodetagscache = {}
803 for t, n in self._tagscache.tags.iteritems():
858 for t, n in self._tagscache.tags.iteritems():
804 nodetagscache.setdefault(n, []).append(t)
859 nodetagscache.setdefault(n, []).append(t)
805 for tags in nodetagscache.itervalues():
860 for tags in nodetagscache.itervalues():
806 tags.sort()
861 tags.sort()
807 self._tagscache.nodetagscache = nodetagscache
862 self._tagscache.nodetagscache = nodetagscache
808 return self._tagscache.nodetagscache.get(node, [])
863 return self._tagscache.nodetagscache.get(node, [])
809
864
810 def nodebookmarks(self, node):
865 def nodebookmarks(self, node):
811 """return the list of bookmarks pointing to the specified node"""
866 """return the list of bookmarks pointing to the specified node"""
812 marks = []
867 marks = []
813 for bookmark, n in self._bookmarks.iteritems():
868 for bookmark, n in self._bookmarks.iteritems():
814 if n == node:
869 if n == node:
815 marks.append(bookmark)
870 marks.append(bookmark)
816 return sorted(marks)
871 return sorted(marks)
817
872
818 def branchmap(self):
873 def branchmap(self):
819 '''returns a dictionary {branch: [branchheads]} with branchheads
874 '''returns a dictionary {branch: [branchheads]} with branchheads
820 ordered by increasing revision number'''
875 ordered by increasing revision number'''
821 branchmap.updatecache(self)
876 branchmap.updatecache(self)
822 return self._branchcaches[self.filtername]
877 return self._branchcaches[self.filtername]
823
878
824 @unfilteredmethod
879 @unfilteredmethod
825 def revbranchcache(self):
880 def revbranchcache(self):
826 if not self._revbranchcache:
881 if not self._revbranchcache:
827 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
882 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
828 return self._revbranchcache
883 return self._revbranchcache
829
884
830 def branchtip(self, branch, ignoremissing=False):
885 def branchtip(self, branch, ignoremissing=False):
831 '''return the tip node for a given branch
886 '''return the tip node for a given branch
832
887
833 If ignoremissing is True, then this method will not raise an error.
888 If ignoremissing is True, then this method will not raise an error.
834 This is helpful for callers that only expect None for a missing branch
889 This is helpful for callers that only expect None for a missing branch
835 (e.g. namespace).
890 (e.g. namespace).
836
891
837 '''
892 '''
838 try:
893 try:
839 return self.branchmap().branchtip(branch)
894 return self.branchmap().branchtip(branch)
840 except KeyError:
895 except KeyError:
841 if not ignoremissing:
896 if not ignoremissing:
842 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
897 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
843 else:
898 else:
844 pass
899 pass
845
900
846 def lookup(self, key):
901 def lookup(self, key):
847 return self[key].node()
902 return self[key].node()
848
903
849 def lookupbranch(self, key, remote=None):
904 def lookupbranch(self, key, remote=None):
850 repo = remote or self
905 repo = remote or self
851 if key in repo.branchmap():
906 if key in repo.branchmap():
852 return key
907 return key
853
908
854 repo = (remote and remote.local()) and remote or self
909 repo = (remote and remote.local()) and remote or self
855 return repo[key].branch()
910 return repo[key].branch()
856
911
857 def known(self, nodes):
912 def known(self, nodes):
858 cl = self.changelog
913 cl = self.changelog
859 nm = cl.nodemap
914 nm = cl.nodemap
860 filtered = cl.filteredrevs
915 filtered = cl.filteredrevs
861 result = []
916 result = []
862 for n in nodes:
917 for n in nodes:
863 r = nm.get(n)
918 r = nm.get(n)
864 resp = not (r is None or r in filtered)
919 resp = not (r is None or r in filtered)
865 result.append(resp)
920 result.append(resp)
866 return result
921 return result
867
922
868 def local(self):
923 def local(self):
869 return self
924 return self
870
925
871 def publishing(self):
926 def publishing(self):
872 # it's safe (and desirable) to trust the publish flag unconditionally
927 # it's safe (and desirable) to trust the publish flag unconditionally
873 # so that we don't finalize changes shared between users via ssh or nfs
928 # so that we don't finalize changes shared between users via ssh or nfs
874 return self.ui.configbool('phases', 'publish', True, untrusted=True)
929 return self.ui.configbool('phases', 'publish', True, untrusted=True)
875
930
876 def cancopy(self):
931 def cancopy(self):
877 # so statichttprepo's override of local() works
932 # so statichttprepo's override of local() works
878 if not self.local():
933 if not self.local():
879 return False
934 return False
880 if not self.publishing():
935 if not self.publishing():
881 return True
936 return True
882 # if publishing we can't copy if there is filtered content
937 # if publishing we can't copy if there is filtered content
883 return not self.filtered('visible').changelog.filteredrevs
938 return not self.filtered('visible').changelog.filteredrevs
884
939
885 def shared(self):
940 def shared(self):
886 '''the type of shared repository (None if not shared)'''
941 '''the type of shared repository (None if not shared)'''
887 if self.sharedpath != self.path:
942 if self.sharedpath != self.path:
888 return 'store'
943 return 'store'
889 return None
944 return None
890
945
891 def wjoin(self, f, *insidef):
946 def wjoin(self, f, *insidef):
892 return self.vfs.reljoin(self.root, f, *insidef)
947 return self.vfs.reljoin(self.root, f, *insidef)
893
948
894 def file(self, f):
949 def file(self, f):
895 if f[0] == '/':
950 if f[0] == '/':
896 f = f[1:]
951 f = f[1:]
897 return filelog.filelog(self.svfs, f)
952 return filelog.filelog(self.svfs, f)
898
953
899 def changectx(self, changeid):
954 def changectx(self, changeid):
900 return self[changeid]
955 return self[changeid]
901
956
902 def setparents(self, p1, p2=nullid):
957 def setparents(self, p1, p2=nullid):
903 with self.dirstate.parentchange():
958 with self.dirstate.parentchange():
904 copies = self.dirstate.setparents(p1, p2)
959 copies = self.dirstate.setparents(p1, p2)
905 pctx = self[p1]
960 pctx = self[p1]
906 if copies:
961 if copies:
907 # Adjust copy records, the dirstate cannot do it, it
962 # Adjust copy records, the dirstate cannot do it, it
908 # requires access to parents manifests. Preserve them
963 # requires access to parents manifests. Preserve them
909 # only for entries added to first parent.
964 # only for entries added to first parent.
910 for f in copies:
965 for f in copies:
911 if f not in pctx and copies[f] in pctx:
966 if f not in pctx and copies[f] in pctx:
912 self.dirstate.copy(copies[f], f)
967 self.dirstate.copy(copies[f], f)
913 if p2 == nullid:
968 if p2 == nullid:
914 for f, s in sorted(self.dirstate.copies().items()):
969 for f, s in sorted(self.dirstate.copies().items()):
915 if f not in pctx and s not in pctx:
970 if f not in pctx and s not in pctx:
916 self.dirstate.copy(None, f)
971 self.dirstate.copy(None, f)
917
972
918 def filectx(self, path, changeid=None, fileid=None):
973 def filectx(self, path, changeid=None, fileid=None):
919 """changeid can be a changeset revision, node, or tag.
974 """changeid can be a changeset revision, node, or tag.
920 fileid can be a file revision or node."""
975 fileid can be a file revision or node."""
921 return context.filectx(self, path, changeid, fileid)
976 return context.filectx(self, path, changeid, fileid)
922
977
923 def getcwd(self):
978 def getcwd(self):
924 return self.dirstate.getcwd()
979 return self.dirstate.getcwd()
925
980
926 def pathto(self, f, cwd=None):
981 def pathto(self, f, cwd=None):
927 return self.dirstate.pathto(f, cwd)
982 return self.dirstate.pathto(f, cwd)
928
983
929 def _loadfilter(self, filter):
984 def _loadfilter(self, filter):
930 if filter not in self.filterpats:
985 if filter not in self.filterpats:
931 l = []
986 l = []
932 for pat, cmd in self.ui.configitems(filter):
987 for pat, cmd in self.ui.configitems(filter):
933 if cmd == '!':
988 if cmd == '!':
934 continue
989 continue
935 mf = matchmod.match(self.root, '', [pat])
990 mf = matchmod.match(self.root, '', [pat])
936 fn = None
991 fn = None
937 params = cmd
992 params = cmd
938 for name, filterfn in self._datafilters.iteritems():
993 for name, filterfn in self._datafilters.iteritems():
939 if cmd.startswith(name):
994 if cmd.startswith(name):
940 fn = filterfn
995 fn = filterfn
941 params = cmd[len(name):].lstrip()
996 params = cmd[len(name):].lstrip()
942 break
997 break
943 if not fn:
998 if not fn:
944 fn = lambda s, c, **kwargs: util.filter(s, c)
999 fn = lambda s, c, **kwargs: util.filter(s, c)
945 # Wrap old filters not supporting keyword arguments
1000 # Wrap old filters not supporting keyword arguments
946 if not inspect.getargspec(fn)[2]:
1001 if not inspect.getargspec(fn)[2]:
947 oldfn = fn
1002 oldfn = fn
948 fn = lambda s, c, **kwargs: oldfn(s, c)
1003 fn = lambda s, c, **kwargs: oldfn(s, c)
949 l.append((mf, fn, params))
1004 l.append((mf, fn, params))
950 self.filterpats[filter] = l
1005 self.filterpats[filter] = l
951 return self.filterpats[filter]
1006 return self.filterpats[filter]
952
1007
953 def _filter(self, filterpats, filename, data):
1008 def _filter(self, filterpats, filename, data):
954 for mf, fn, cmd in filterpats:
1009 for mf, fn, cmd in filterpats:
955 if mf(filename):
1010 if mf(filename):
956 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1011 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
957 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1012 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
958 break
1013 break
959
1014
960 return data
1015 return data
961
1016
962 @unfilteredpropertycache
1017 @unfilteredpropertycache
963 def _encodefilterpats(self):
1018 def _encodefilterpats(self):
964 return self._loadfilter('encode')
1019 return self._loadfilter('encode')
965
1020
966 @unfilteredpropertycache
1021 @unfilteredpropertycache
967 def _decodefilterpats(self):
1022 def _decodefilterpats(self):
968 return self._loadfilter('decode')
1023 return self._loadfilter('decode')
969
1024
970 def adddatafilter(self, name, filter):
1025 def adddatafilter(self, name, filter):
971 self._datafilters[name] = filter
1026 self._datafilters[name] = filter
972
1027
973 def wread(self, filename):
1028 def wread(self, filename):
974 if self.wvfs.islink(filename):
1029 if self.wvfs.islink(filename):
975 data = self.wvfs.readlink(filename)
1030 data = self.wvfs.readlink(filename)
976 else:
1031 else:
977 data = self.wvfs.read(filename)
1032 data = self.wvfs.read(filename)
978 return self._filter(self._encodefilterpats, filename, data)
1033 return self._filter(self._encodefilterpats, filename, data)
979
1034
980 def wwrite(self, filename, data, flags, backgroundclose=False):
1035 def wwrite(self, filename, data, flags, backgroundclose=False):
981 """write ``data`` into ``filename`` in the working directory
1036 """write ``data`` into ``filename`` in the working directory
982
1037
983 This returns length of written (maybe decoded) data.
1038 This returns length of written (maybe decoded) data.
984 """
1039 """
985 data = self._filter(self._decodefilterpats, filename, data)
1040 data = self._filter(self._decodefilterpats, filename, data)
986 if 'l' in flags:
1041 if 'l' in flags:
987 self.wvfs.symlink(data, filename)
1042 self.wvfs.symlink(data, filename)
988 else:
1043 else:
989 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1044 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
990 if 'x' in flags:
1045 if 'x' in flags:
991 self.wvfs.setflags(filename, False, True)
1046 self.wvfs.setflags(filename, False, True)
992 return len(data)
1047 return len(data)
993
1048
994 def wwritedata(self, filename, data):
1049 def wwritedata(self, filename, data):
995 return self._filter(self._decodefilterpats, filename, data)
1050 return self._filter(self._decodefilterpats, filename, data)
996
1051
997 def currenttransaction(self):
1052 def currenttransaction(self):
998 """return the current transaction or None if non exists"""
1053 """return the current transaction or None if non exists"""
999 if self._transref:
1054 if self._transref:
1000 tr = self._transref()
1055 tr = self._transref()
1001 else:
1056 else:
1002 tr = None
1057 tr = None
1003
1058
1004 if tr and tr.running():
1059 if tr and tr.running():
1005 return tr
1060 return tr
1006 return None
1061 return None
1007
1062
1008 def transaction(self, desc, report=None):
1063 def transaction(self, desc, report=None):
1009 if (self.ui.configbool('devel', 'all-warnings')
1064 if (self.ui.configbool('devel', 'all-warnings')
1010 or self.ui.configbool('devel', 'check-locks')):
1065 or self.ui.configbool('devel', 'check-locks')):
1011 if self._currentlock(self._lockref) is None:
1066 if self._currentlock(self._lockref) is None:
1012 raise error.ProgrammingError('transaction requires locking')
1067 raise error.ProgrammingError('transaction requires locking')
1013 tr = self.currenttransaction()
1068 tr = self.currenttransaction()
1014 if tr is not None:
1069 if tr is not None:
1015 return tr.nest()
1070 return tr.nest()
1016
1071
1017 # abort here if the journal already exists
1072 # abort here if the journal already exists
1018 if self.svfs.exists("journal"):
1073 if self.svfs.exists("journal"):
1019 raise error.RepoError(
1074 raise error.RepoError(
1020 _("abandoned transaction found"),
1075 _("abandoned transaction found"),
1021 hint=_("run 'hg recover' to clean up transaction"))
1076 hint=_("run 'hg recover' to clean up transaction"))
1022
1077
1023 idbase = "%.40f#%f" % (random.random(), time.time())
1078 idbase = "%.40f#%f" % (random.random(), time.time())
1024 ha = hex(hashlib.sha1(idbase).digest())
1079 ha = hex(hashlib.sha1(idbase).digest())
1025 txnid = 'TXN:' + ha
1080 txnid = 'TXN:' + ha
1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1081 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1027
1082
1028 self._writejournal(desc)
1083 self._writejournal(desc)
1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1084 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1030 if report:
1085 if report:
1031 rp = report
1086 rp = report
1032 else:
1087 else:
1033 rp = self.ui.warn
1088 rp = self.ui.warn
1034 vfsmap = {'plain': self.vfs} # root of .hg/
1089 vfsmap = {'plain': self.vfs} # root of .hg/
1035 # we must avoid cyclic reference between repo and transaction.
1090 # we must avoid cyclic reference between repo and transaction.
1036 reporef = weakref.ref(self)
1091 reporef = weakref.ref(self)
1037 # Code to track tag movement
1092 # Code to track tag movement
1038 #
1093 #
1039 # Since tags are all handled as file content, it is actually quite hard
1094 # Since tags are all handled as file content, it is actually quite hard
1040 # to track these movement from a code perspective. So we fallback to a
1095 # to track these movement from a code perspective. So we fallback to a
1041 # tracking at the repository level. One could envision to track changes
1096 # tracking at the repository level. One could envision to track changes
1042 # to the '.hgtags' file through changegroup apply but that fails to
1097 # to the '.hgtags' file through changegroup apply but that fails to
1043 # cope with case where transaction expose new heads without changegroup
1098 # cope with case where transaction expose new heads without changegroup
1044 # being involved (eg: phase movement).
1099 # being involved (eg: phase movement).
1045 #
1100 #
1046 # For now, We gate the feature behind a flag since this likely comes
1101 # For now, We gate the feature behind a flag since this likely comes
1047 # with performance impacts. The current code run more often than needed
1102 # with performance impacts. The current code run more often than needed
1048 # and do not use caches as much as it could. The current focus is on
1103 # and do not use caches as much as it could. The current focus is on
1049 # the behavior of the feature so we disable it by default. The flag
1104 # the behavior of the feature so we disable it by default. The flag
1050 # will be removed when we are happy with the performance impact.
1105 # will be removed when we are happy with the performance impact.
1051 #
1106 #
1052 # Once this feature is no longer experimental move the following
1107 # Once this feature is no longer experimental move the following
1053 # documentation to the appropriate help section:
1108 # documentation to the appropriate help section:
1054 #
1109 #
1055 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1110 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1056 # tags (new or changed or deleted tags). In addition the details of
1111 # tags (new or changed or deleted tags). In addition the details of
1057 # these changes are made available in a file at:
1112 # these changes are made available in a file at:
1058 # ``REPOROOT/.hg/changes/tags.changes``.
1113 # ``REPOROOT/.hg/changes/tags.changes``.
1059 # Make sure you check for HG_TAG_MOVED before reading that file as it
1114 # Make sure you check for HG_TAG_MOVED before reading that file as it
1060 # might exist from a previous transaction even if no tag were touched
1115 # might exist from a previous transaction even if no tag were touched
1061 # in this one. Changes are recorded in a line base format::
1116 # in this one. Changes are recorded in a line base format::
1062 #
1117 #
1063 # <action> <hex-node> <tag-name>\n
1118 # <action> <hex-node> <tag-name>\n
1064 #
1119 #
1065 # Actions are defined as follow:
1120 # Actions are defined as follow:
1066 # "-R": tag is removed,
1121 # "-R": tag is removed,
1067 # "+A": tag is added,
1122 # "+A": tag is added,
1068 # "-M": tag is moved (old value),
1123 # "-M": tag is moved (old value),
1069 # "+M": tag is moved (new value),
1124 # "+M": tag is moved (new value),
1070 tracktags = lambda x: None
1125 tracktags = lambda x: None
1071 # experimental config: experimental.hook-track-tags
1126 # experimental config: experimental.hook-track-tags
1072 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1127 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1073 False)
1128 False)
1074 if desc != 'strip' and shouldtracktags:
1129 if desc != 'strip' and shouldtracktags:
1075 oldheads = self.changelog.headrevs()
1130 oldheads = self.changelog.headrevs()
1076 def tracktags(tr2):
1131 def tracktags(tr2):
1077 repo = reporef()
1132 repo = reporef()
1078 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1133 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1079 newheads = repo.changelog.headrevs()
1134 newheads = repo.changelog.headrevs()
1080 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1135 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1081 # notes: we compare lists here.
1136 # notes: we compare lists here.
1082 # As we do it only once buiding set would not be cheaper
1137 # As we do it only once buiding set would not be cheaper
1083 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1138 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1084 if changes:
1139 if changes:
1085 tr2.hookargs['tag_moved'] = '1'
1140 tr2.hookargs['tag_moved'] = '1'
1086 with repo.vfs('changes/tags.changes', 'w',
1141 with repo.vfs('changes/tags.changes', 'w',
1087 atomictemp=True) as changesfile:
1142 atomictemp=True) as changesfile:
1088 # note: we do not register the file to the transaction
1143 # note: we do not register the file to the transaction
1089 # because we needs it to still exist on the transaction
1144 # because we needs it to still exist on the transaction
1090 # is close (for txnclose hooks)
1145 # is close (for txnclose hooks)
1091 tagsmod.writediff(changesfile, changes)
1146 tagsmod.writediff(changesfile, changes)
1092 def validate(tr2):
1147 def validate(tr2):
1093 """will run pre-closing hooks"""
1148 """will run pre-closing hooks"""
1094 # XXX the transaction API is a bit lacking here so we take a hacky
1149 # XXX the transaction API is a bit lacking here so we take a hacky
1095 # path for now
1150 # path for now
1096 #
1151 #
1097 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1152 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1098 # dict is copied before these run. In addition we needs the data
1153 # dict is copied before these run. In addition we needs the data
1099 # available to in memory hooks too.
1154 # available to in memory hooks too.
1100 #
1155 #
1101 # Moreover, we also need to make sure this runs before txnclose
1156 # Moreover, we also need to make sure this runs before txnclose
1102 # hooks and there is no "pending" mechanism that would execute
1157 # hooks and there is no "pending" mechanism that would execute
1103 # logic only if hooks are about to run.
1158 # logic only if hooks are about to run.
1104 #
1159 #
1105 # Fixing this limitation of the transaction is also needed to track
1160 # Fixing this limitation of the transaction is also needed to track
1106 # other families of changes (bookmarks, phases, obsolescence).
1161 # other families of changes (bookmarks, phases, obsolescence).
1107 #
1162 #
1108 # This will have to be fixed before we remove the experimental
1163 # This will have to be fixed before we remove the experimental
1109 # gating.
1164 # gating.
1110 tracktags(tr2)
1165 tracktags(tr2)
1111 reporef().hook('pretxnclose', throw=True,
1166 reporef().hook('pretxnclose', throw=True,
1112 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1167 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1113 def releasefn(tr, success):
1168 def releasefn(tr, success):
1114 repo = reporef()
1169 repo = reporef()
1115 if success:
1170 if success:
1116 # this should be explicitly invoked here, because
1171 # this should be explicitly invoked here, because
1117 # in-memory changes aren't written out at closing
1172 # in-memory changes aren't written out at closing
1118 # transaction, if tr.addfilegenerator (via
1173 # transaction, if tr.addfilegenerator (via
1119 # dirstate.write or so) isn't invoked while
1174 # dirstate.write or so) isn't invoked while
1120 # transaction running
1175 # transaction running
1121 repo.dirstate.write(None)
1176 repo.dirstate.write(None)
1122 else:
1177 else:
1123 # discard all changes (including ones already written
1178 # discard all changes (including ones already written
1124 # out) in this transaction
1179 # out) in this transaction
1125 repo.dirstate.restorebackup(None, prefix='journal.')
1180 repo.dirstate.restorebackup(None, prefix='journal.')
1126
1181
1127 repo.invalidate(clearfilecache=True)
1182 repo.invalidate(clearfilecache=True)
1128
1183
1129 tr = transaction.transaction(rp, self.svfs, vfsmap,
1184 tr = transaction.transaction(rp, self.svfs, vfsmap,
1130 "journal",
1185 "journal",
1131 "undo",
1186 "undo",
1132 aftertrans(renames),
1187 aftertrans(renames),
1133 self.store.createmode,
1188 self.store.createmode,
1134 validator=validate,
1189 validator=validate,
1135 releasefn=releasefn,
1190 releasefn=releasefn,
1136 checkambigfiles=_cachedfiles)
1191 checkambigfiles=_cachedfiles)
1137 tr.changes['revs'] = set()
1192 tr.changes['revs'] = set()
1138 tr.changes['obsmarkers'] = set()
1193 tr.changes['obsmarkers'] = set()
1139
1194
1140 tr.hookargs['txnid'] = txnid
1195 tr.hookargs['txnid'] = txnid
1141 # note: writing the fncache only during finalize mean that the file is
1196 # note: writing the fncache only during finalize mean that the file is
1142 # outdated when running hooks. As fncache is used for streaming clone,
1197 # outdated when running hooks. As fncache is used for streaming clone,
1143 # this is not expected to break anything that happen during the hooks.
1198 # this is not expected to break anything that happen during the hooks.
1144 tr.addfinalize('flush-fncache', self.store.write)
1199 tr.addfinalize('flush-fncache', self.store.write)
1145 def txnclosehook(tr2):
1200 def txnclosehook(tr2):
1146 """To be run if transaction is successful, will schedule a hook run
1201 """To be run if transaction is successful, will schedule a hook run
1147 """
1202 """
1148 # Don't reference tr2 in hook() so we don't hold a reference.
1203 # Don't reference tr2 in hook() so we don't hold a reference.
1149 # This reduces memory consumption when there are multiple
1204 # This reduces memory consumption when there are multiple
1150 # transactions per lock. This can likely go away if issue5045
1205 # transactions per lock. This can likely go away if issue5045
1151 # fixes the function accumulation.
1206 # fixes the function accumulation.
1152 hookargs = tr2.hookargs
1207 hookargs = tr2.hookargs
1153
1208
1154 def hook():
1209 def hook():
1155 reporef().hook('txnclose', throw=False, txnname=desc,
1210 reporef().hook('txnclose', throw=False, txnname=desc,
1156 **pycompat.strkwargs(hookargs))
1211 **pycompat.strkwargs(hookargs))
1157 reporef()._afterlock(hook)
1212 reporef()._afterlock(hook)
1158 tr.addfinalize('txnclose-hook', txnclosehook)
1213 tr.addfinalize('txnclose-hook', txnclosehook)
1159 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1214 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1160 def txnaborthook(tr2):
1215 def txnaborthook(tr2):
1161 """To be run if transaction is aborted
1216 """To be run if transaction is aborted
1162 """
1217 """
1163 reporef().hook('txnabort', throw=False, txnname=desc,
1218 reporef().hook('txnabort', throw=False, txnname=desc,
1164 **tr2.hookargs)
1219 **tr2.hookargs)
1165 tr.addabort('txnabort-hook', txnaborthook)
1220 tr.addabort('txnabort-hook', txnaborthook)
1166 # avoid eager cache invalidation. in-memory data should be identical
1221 # avoid eager cache invalidation. in-memory data should be identical
1167 # to stored data if transaction has no error.
1222 # to stored data if transaction has no error.
1168 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1223 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1169 self._transref = weakref.ref(tr)
1224 self._transref = weakref.ref(tr)
1170 return tr
1225 return tr
1171
1226
1172 def _journalfiles(self):
1227 def _journalfiles(self):
1173 return ((self.svfs, 'journal'),
1228 return ((self.svfs, 'journal'),
1174 (self.vfs, 'journal.dirstate'),
1229 (self.vfs, 'journal.dirstate'),
1175 (self.vfs, 'journal.branch'),
1230 (self.vfs, 'journal.branch'),
1176 (self.vfs, 'journal.desc'),
1231 (self.vfs, 'journal.desc'),
1177 (self.vfs, 'journal.bookmarks'),
1232 (self.vfs, 'journal.bookmarks'),
1178 (self.svfs, 'journal.phaseroots'))
1233 (self.svfs, 'journal.phaseroots'))
1179
1234
1180 def undofiles(self):
1235 def undofiles(self):
1181 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1236 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1182
1237
1183 @unfilteredmethod
1238 @unfilteredmethod
1184 def _writejournal(self, desc):
1239 def _writejournal(self, desc):
1185 self.dirstate.savebackup(None, prefix='journal.')
1240 self.dirstate.savebackup(None, prefix='journal.')
1186 self.vfs.write("journal.branch",
1241 self.vfs.write("journal.branch",
1187 encoding.fromlocal(self.dirstate.branch()))
1242 encoding.fromlocal(self.dirstate.branch()))
1188 self.vfs.write("journal.desc",
1243 self.vfs.write("journal.desc",
1189 "%d\n%s\n" % (len(self), desc))
1244 "%d\n%s\n" % (len(self), desc))
1190 self.vfs.write("journal.bookmarks",
1245 self.vfs.write("journal.bookmarks",
1191 self.vfs.tryread("bookmarks"))
1246 self.vfs.tryread("bookmarks"))
1192 self.svfs.write("journal.phaseroots",
1247 self.svfs.write("journal.phaseroots",
1193 self.svfs.tryread("phaseroots"))
1248 self.svfs.tryread("phaseroots"))
1194
1249
1195 def recover(self):
1250 def recover(self):
1196 with self.lock():
1251 with self.lock():
1197 if self.svfs.exists("journal"):
1252 if self.svfs.exists("journal"):
1198 self.ui.status(_("rolling back interrupted transaction\n"))
1253 self.ui.status(_("rolling back interrupted transaction\n"))
1199 vfsmap = {'': self.svfs,
1254 vfsmap = {'': self.svfs,
1200 'plain': self.vfs,}
1255 'plain': self.vfs,}
1201 transaction.rollback(self.svfs, vfsmap, "journal",
1256 transaction.rollback(self.svfs, vfsmap, "journal",
1202 self.ui.warn,
1257 self.ui.warn,
1203 checkambigfiles=_cachedfiles)
1258 checkambigfiles=_cachedfiles)
1204 self.invalidate()
1259 self.invalidate()
1205 return True
1260 return True
1206 else:
1261 else:
1207 self.ui.warn(_("no interrupted transaction available\n"))
1262 self.ui.warn(_("no interrupted transaction available\n"))
1208 return False
1263 return False
1209
1264
1210 def rollback(self, dryrun=False, force=False):
1265 def rollback(self, dryrun=False, force=False):
1211 wlock = lock = dsguard = None
1266 wlock = lock = dsguard = None
1212 try:
1267 try:
1213 wlock = self.wlock()
1268 wlock = self.wlock()
1214 lock = self.lock()
1269 lock = self.lock()
1215 if self.svfs.exists("undo"):
1270 if self.svfs.exists("undo"):
1216 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1271 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1217
1272
1218 return self._rollback(dryrun, force, dsguard)
1273 return self._rollback(dryrun, force, dsguard)
1219 else:
1274 else:
1220 self.ui.warn(_("no rollback information available\n"))
1275 self.ui.warn(_("no rollback information available\n"))
1221 return 1
1276 return 1
1222 finally:
1277 finally:
1223 release(dsguard, lock, wlock)
1278 release(dsguard, lock, wlock)
1224
1279
1225 @unfilteredmethod # Until we get smarter cache management
1280 @unfilteredmethod # Until we get smarter cache management
1226 def _rollback(self, dryrun, force, dsguard):
1281 def _rollback(self, dryrun, force, dsguard):
1227 ui = self.ui
1282 ui = self.ui
1228 try:
1283 try:
1229 args = self.vfs.read('undo.desc').splitlines()
1284 args = self.vfs.read('undo.desc').splitlines()
1230 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1285 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1231 if len(args) >= 3:
1286 if len(args) >= 3:
1232 detail = args[2]
1287 detail = args[2]
1233 oldtip = oldlen - 1
1288 oldtip = oldlen - 1
1234
1289
1235 if detail and ui.verbose:
1290 if detail and ui.verbose:
1236 msg = (_('repository tip rolled back to revision %d'
1291 msg = (_('repository tip rolled back to revision %d'
1237 ' (undo %s: %s)\n')
1292 ' (undo %s: %s)\n')
1238 % (oldtip, desc, detail))
1293 % (oldtip, desc, detail))
1239 else:
1294 else:
1240 msg = (_('repository tip rolled back to revision %d'
1295 msg = (_('repository tip rolled back to revision %d'
1241 ' (undo %s)\n')
1296 ' (undo %s)\n')
1242 % (oldtip, desc))
1297 % (oldtip, desc))
1243 except IOError:
1298 except IOError:
1244 msg = _('rolling back unknown transaction\n')
1299 msg = _('rolling back unknown transaction\n')
1245 desc = None
1300 desc = None
1246
1301
1247 if not force and self['.'] != self['tip'] and desc == 'commit':
1302 if not force and self['.'] != self['tip'] and desc == 'commit':
1248 raise error.Abort(
1303 raise error.Abort(
1249 _('rollback of last commit while not checked out '
1304 _('rollback of last commit while not checked out '
1250 'may lose data'), hint=_('use -f to force'))
1305 'may lose data'), hint=_('use -f to force'))
1251
1306
1252 ui.status(msg)
1307 ui.status(msg)
1253 if dryrun:
1308 if dryrun:
1254 return 0
1309 return 0
1255
1310
1256 parents = self.dirstate.parents()
1311 parents = self.dirstate.parents()
1257 self.destroying()
1312 self.destroying()
1258 vfsmap = {'plain': self.vfs, '': self.svfs}
1313 vfsmap = {'plain': self.vfs, '': self.svfs}
1259 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1314 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1260 checkambigfiles=_cachedfiles)
1315 checkambigfiles=_cachedfiles)
1261 if self.vfs.exists('undo.bookmarks'):
1316 if self.vfs.exists('undo.bookmarks'):
1262 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1317 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1263 if self.svfs.exists('undo.phaseroots'):
1318 if self.svfs.exists('undo.phaseroots'):
1264 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1319 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1265 self.invalidate()
1320 self.invalidate()
1266
1321
1267 parentgone = (parents[0] not in self.changelog.nodemap or
1322 parentgone = (parents[0] not in self.changelog.nodemap or
1268 parents[1] not in self.changelog.nodemap)
1323 parents[1] not in self.changelog.nodemap)
1269 if parentgone:
1324 if parentgone:
1270 # prevent dirstateguard from overwriting already restored one
1325 # prevent dirstateguard from overwriting already restored one
1271 dsguard.close()
1326 dsguard.close()
1272
1327
1273 self.dirstate.restorebackup(None, prefix='undo.')
1328 self.dirstate.restorebackup(None, prefix='undo.')
1274 try:
1329 try:
1275 branch = self.vfs.read('undo.branch')
1330 branch = self.vfs.read('undo.branch')
1276 self.dirstate.setbranch(encoding.tolocal(branch))
1331 self.dirstate.setbranch(encoding.tolocal(branch))
1277 except IOError:
1332 except IOError:
1278 ui.warn(_('named branch could not be reset: '
1333 ui.warn(_('named branch could not be reset: '
1279 'current branch is still \'%s\'\n')
1334 'current branch is still \'%s\'\n')
1280 % self.dirstate.branch())
1335 % self.dirstate.branch())
1281
1336
1282 parents = tuple([p.rev() for p in self[None].parents()])
1337 parents = tuple([p.rev() for p in self[None].parents()])
1283 if len(parents) > 1:
1338 if len(parents) > 1:
1284 ui.status(_('working directory now based on '
1339 ui.status(_('working directory now based on '
1285 'revisions %d and %d\n') % parents)
1340 'revisions %d and %d\n') % parents)
1286 else:
1341 else:
1287 ui.status(_('working directory now based on '
1342 ui.status(_('working directory now based on '
1288 'revision %d\n') % parents)
1343 'revision %d\n') % parents)
1289 mergemod.mergestate.clean(self, self['.'].node())
1344 mergemod.mergestate.clean(self, self['.'].node())
1290
1345
1291 # TODO: if we know which new heads may result from this rollback, pass
1346 # TODO: if we know which new heads may result from this rollback, pass
1292 # them to destroy(), which will prevent the branchhead cache from being
1347 # them to destroy(), which will prevent the branchhead cache from being
1293 # invalidated.
1348 # invalidated.
1294 self.destroyed()
1349 self.destroyed()
1295 return 0
1350 return 0
1296
1351
1297 def _buildcacheupdater(self, newtransaction):
1352 def _buildcacheupdater(self, newtransaction):
1298 """called during transaction to build the callback updating cache
1353 """called during transaction to build the callback updating cache
1299
1354
1300 Lives on the repository to help extension who might want to augment
1355 Lives on the repository to help extension who might want to augment
1301 this logic. For this purpose, the created transaction is passed to the
1356 this logic. For this purpose, the created transaction is passed to the
1302 method.
1357 method.
1303 """
1358 """
1304 # we must avoid cyclic reference between repo and transaction.
1359 # we must avoid cyclic reference between repo and transaction.
1305 reporef = weakref.ref(self)
1360 reporef = weakref.ref(self)
1306 def updater(tr):
1361 def updater(tr):
1307 repo = reporef()
1362 repo = reporef()
1308 repo.updatecaches(tr)
1363 repo.updatecaches(tr)
1309 return updater
1364 return updater
1310
1365
1311 @unfilteredmethod
1366 @unfilteredmethod
1312 def updatecaches(self, tr=None):
1367 def updatecaches(self, tr=None):
1313 """warm appropriate caches
1368 """warm appropriate caches
1314
1369
1315 If this function is called after a transaction closed. The transaction
1370 If this function is called after a transaction closed. The transaction
1316 will be available in the 'tr' argument. This can be used to selectively
1371 will be available in the 'tr' argument. This can be used to selectively
1317 update caches relevant to the changes in that transaction.
1372 update caches relevant to the changes in that transaction.
1318 """
1373 """
1319 if tr is not None and tr.hookargs.get('source') == 'strip':
1374 if tr is not None and tr.hookargs.get('source') == 'strip':
1320 # During strip, many caches are invalid but
1375 # During strip, many caches are invalid but
1321 # later call to `destroyed` will refresh them.
1376 # later call to `destroyed` will refresh them.
1322 return
1377 return
1323
1378
1324 if tr is None or tr.changes['revs']:
1379 if tr is None or tr.changes['revs']:
1325 # updating the unfiltered branchmap should refresh all the others,
1380 # updating the unfiltered branchmap should refresh all the others,
1326 self.ui.debug('updating the branch cache\n')
1381 self.ui.debug('updating the branch cache\n')
1327 branchmap.updatecache(self.filtered('served'))
1382 branchmap.updatecache(self.filtered('served'))
1328
1383
1329 def invalidatecaches(self):
1384 def invalidatecaches(self):
1330
1385
1331 if '_tagscache' in vars(self):
1386 if '_tagscache' in vars(self):
1332 # can't use delattr on proxy
1387 # can't use delattr on proxy
1333 del self.__dict__['_tagscache']
1388 del self.__dict__['_tagscache']
1334
1389
1335 self.unfiltered()._branchcaches.clear()
1390 self.unfiltered()._branchcaches.clear()
1336 self.invalidatevolatilesets()
1391 self.invalidatevolatilesets()
1337 self._sparsesignaturecache.clear()
1392 self._sparsesignaturecache.clear()
1338
1393
1339 def invalidatevolatilesets(self):
1394 def invalidatevolatilesets(self):
1340 self.filteredrevcache.clear()
1395 self.filteredrevcache.clear()
1341 obsolete.clearobscaches(self)
1396 obsolete.clearobscaches(self)
1342
1397
1343 def invalidatedirstate(self):
1398 def invalidatedirstate(self):
1344 '''Invalidates the dirstate, causing the next call to dirstate
1399 '''Invalidates the dirstate, causing the next call to dirstate
1345 to check if it was modified since the last time it was read,
1400 to check if it was modified since the last time it was read,
1346 rereading it if it has.
1401 rereading it if it has.
1347
1402
1348 This is different to dirstate.invalidate() that it doesn't always
1403 This is different to dirstate.invalidate() that it doesn't always
1349 rereads the dirstate. Use dirstate.invalidate() if you want to
1404 rereads the dirstate. Use dirstate.invalidate() if you want to
1350 explicitly read the dirstate again (i.e. restoring it to a previous
1405 explicitly read the dirstate again (i.e. restoring it to a previous
1351 known good state).'''
1406 known good state).'''
1352 if hasunfilteredcache(self, 'dirstate'):
1407 if hasunfilteredcache(self, 'dirstate'):
1353 for k in self.dirstate._filecache:
1408 for k in self.dirstate._filecache:
1354 try:
1409 try:
1355 delattr(self.dirstate, k)
1410 delattr(self.dirstate, k)
1356 except AttributeError:
1411 except AttributeError:
1357 pass
1412 pass
1358 delattr(self.unfiltered(), 'dirstate')
1413 delattr(self.unfiltered(), 'dirstate')
1359
1414
1360 def invalidate(self, clearfilecache=False):
1415 def invalidate(self, clearfilecache=False):
1361 '''Invalidates both store and non-store parts other than dirstate
1416 '''Invalidates both store and non-store parts other than dirstate
1362
1417
1363 If a transaction is running, invalidation of store is omitted,
1418 If a transaction is running, invalidation of store is omitted,
1364 because discarding in-memory changes might cause inconsistency
1419 because discarding in-memory changes might cause inconsistency
1365 (e.g. incomplete fncache causes unintentional failure, but
1420 (e.g. incomplete fncache causes unintentional failure, but
1366 redundant one doesn't).
1421 redundant one doesn't).
1367 '''
1422 '''
1368 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1423 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1369 for k in list(self._filecache.keys()):
1424 for k in list(self._filecache.keys()):
1370 # dirstate is invalidated separately in invalidatedirstate()
1425 # dirstate is invalidated separately in invalidatedirstate()
1371 if k == 'dirstate':
1426 if k == 'dirstate':
1372 continue
1427 continue
1373
1428
1374 if clearfilecache:
1429 if clearfilecache:
1375 del self._filecache[k]
1430 del self._filecache[k]
1376 try:
1431 try:
1377 delattr(unfiltered, k)
1432 delattr(unfiltered, k)
1378 except AttributeError:
1433 except AttributeError:
1379 pass
1434 pass
1380 self.invalidatecaches()
1435 self.invalidatecaches()
1381 if not self.currenttransaction():
1436 if not self.currenttransaction():
1382 # TODO: Changing contents of store outside transaction
1437 # TODO: Changing contents of store outside transaction
1383 # causes inconsistency. We should make in-memory store
1438 # causes inconsistency. We should make in-memory store
1384 # changes detectable, and abort if changed.
1439 # changes detectable, and abort if changed.
1385 self.store.invalidatecaches()
1440 self.store.invalidatecaches()
1386
1441
1387 def invalidateall(self):
1442 def invalidateall(self):
1388 '''Fully invalidates both store and non-store parts, causing the
1443 '''Fully invalidates both store and non-store parts, causing the
1389 subsequent operation to reread any outside changes.'''
1444 subsequent operation to reread any outside changes.'''
1390 # extension should hook this to invalidate its caches
1445 # extension should hook this to invalidate its caches
1391 self.invalidate()
1446 self.invalidate()
1392 self.invalidatedirstate()
1447 self.invalidatedirstate()
1393
1448
1394 @unfilteredmethod
1449 @unfilteredmethod
1395 def _refreshfilecachestats(self, tr):
1450 def _refreshfilecachestats(self, tr):
1396 """Reload stats of cached files so that they are flagged as valid"""
1451 """Reload stats of cached files so that they are flagged as valid"""
1397 for k, ce in self._filecache.items():
1452 for k, ce in self._filecache.items():
1398 if k == 'dirstate' or k not in self.__dict__:
1453 if k == 'dirstate' or k not in self.__dict__:
1399 continue
1454 continue
1400 ce.refresh()
1455 ce.refresh()
1401
1456
1402 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1457 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1403 inheritchecker=None, parentenvvar=None):
1458 inheritchecker=None, parentenvvar=None):
1404 parentlock = None
1459 parentlock = None
1405 # the contents of parentenvvar are used by the underlying lock to
1460 # the contents of parentenvvar are used by the underlying lock to
1406 # determine whether it can be inherited
1461 # determine whether it can be inherited
1407 if parentenvvar is not None:
1462 if parentenvvar is not None:
1408 parentlock = encoding.environ.get(parentenvvar)
1463 parentlock = encoding.environ.get(parentenvvar)
1409 try:
1464 try:
1410 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1465 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1411 acquirefn=acquirefn, desc=desc,
1466 acquirefn=acquirefn, desc=desc,
1412 inheritchecker=inheritchecker,
1467 inheritchecker=inheritchecker,
1413 parentlock=parentlock)
1468 parentlock=parentlock)
1414 except error.LockHeld as inst:
1469 except error.LockHeld as inst:
1415 if not wait:
1470 if not wait:
1416 raise
1471 raise
1417 # show more details for new-style locks
1472 # show more details for new-style locks
1418 if ':' in inst.locker:
1473 if ':' in inst.locker:
1419 host, pid = inst.locker.split(":", 1)
1474 host, pid = inst.locker.split(":", 1)
1420 self.ui.warn(
1475 self.ui.warn(
1421 _("waiting for lock on %s held by process %r "
1476 _("waiting for lock on %s held by process %r "
1422 "on host %r\n") % (desc, pid, host))
1477 "on host %r\n") % (desc, pid, host))
1423 else:
1478 else:
1424 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1479 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1425 (desc, inst.locker))
1480 (desc, inst.locker))
1426 # default to 600 seconds timeout
1481 # default to 600 seconds timeout
1427 l = lockmod.lock(vfs, lockname,
1482 l = lockmod.lock(vfs, lockname,
1428 int(self.ui.config("ui", "timeout", "600")),
1483 int(self.ui.config("ui", "timeout", "600")),
1429 releasefn=releasefn, acquirefn=acquirefn,
1484 releasefn=releasefn, acquirefn=acquirefn,
1430 desc=desc)
1485 desc=desc)
1431 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1486 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1432 return l
1487 return l
1433
1488
1434 def _afterlock(self, callback):
1489 def _afterlock(self, callback):
1435 """add a callback to be run when the repository is fully unlocked
1490 """add a callback to be run when the repository is fully unlocked
1436
1491
1437 The callback will be executed when the outermost lock is released
1492 The callback will be executed when the outermost lock is released
1438 (with wlock being higher level than 'lock')."""
1493 (with wlock being higher level than 'lock')."""
1439 for ref in (self._wlockref, self._lockref):
1494 for ref in (self._wlockref, self._lockref):
1440 l = ref and ref()
1495 l = ref and ref()
1441 if l and l.held:
1496 if l and l.held:
1442 l.postrelease.append(callback)
1497 l.postrelease.append(callback)
1443 break
1498 break
1444 else: # no lock have been found.
1499 else: # no lock have been found.
1445 callback()
1500 callback()
1446
1501
1447 def lock(self, wait=True):
1502 def lock(self, wait=True):
1448 '''Lock the repository store (.hg/store) and return a weak reference
1503 '''Lock the repository store (.hg/store) and return a weak reference
1449 to the lock. Use this before modifying the store (e.g. committing or
1504 to the lock. Use this before modifying the store (e.g. committing or
1450 stripping). If you are opening a transaction, get a lock as well.)
1505 stripping). If you are opening a transaction, get a lock as well.)
1451
1506
1452 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1507 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1453 'wlock' first to avoid a dead-lock hazard.'''
1508 'wlock' first to avoid a dead-lock hazard.'''
1454 l = self._currentlock(self._lockref)
1509 l = self._currentlock(self._lockref)
1455 if l is not None:
1510 if l is not None:
1456 l.lock()
1511 l.lock()
1457 return l
1512 return l
1458
1513
1459 l = self._lock(self.svfs, "lock", wait, None,
1514 l = self._lock(self.svfs, "lock", wait, None,
1460 self.invalidate, _('repository %s') % self.origroot)
1515 self.invalidate, _('repository %s') % self.origroot)
1461 self._lockref = weakref.ref(l)
1516 self._lockref = weakref.ref(l)
1462 return l
1517 return l
1463
1518
1464 def _wlockchecktransaction(self):
1519 def _wlockchecktransaction(self):
1465 if self.currenttransaction() is not None:
1520 if self.currenttransaction() is not None:
1466 raise error.LockInheritanceContractViolation(
1521 raise error.LockInheritanceContractViolation(
1467 'wlock cannot be inherited in the middle of a transaction')
1522 'wlock cannot be inherited in the middle of a transaction')
1468
1523
1469 def wlock(self, wait=True):
1524 def wlock(self, wait=True):
1470 '''Lock the non-store parts of the repository (everything under
1525 '''Lock the non-store parts of the repository (everything under
1471 .hg except .hg/store) and return a weak reference to the lock.
1526 .hg except .hg/store) and return a weak reference to the lock.
1472
1527
1473 Use this before modifying files in .hg.
1528 Use this before modifying files in .hg.
1474
1529
1475 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1530 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1476 'wlock' first to avoid a dead-lock hazard.'''
1531 'wlock' first to avoid a dead-lock hazard.'''
1477 l = self._wlockref and self._wlockref()
1532 l = self._wlockref and self._wlockref()
1478 if l is not None and l.held:
1533 if l is not None and l.held:
1479 l.lock()
1534 l.lock()
1480 return l
1535 return l
1481
1536
1482 # We do not need to check for non-waiting lock acquisition. Such
1537 # We do not need to check for non-waiting lock acquisition. Such
1483 # acquisition would not cause dead-lock as they would just fail.
1538 # acquisition would not cause dead-lock as they would just fail.
1484 if wait and (self.ui.configbool('devel', 'all-warnings')
1539 if wait and (self.ui.configbool('devel', 'all-warnings')
1485 or self.ui.configbool('devel', 'check-locks')):
1540 or self.ui.configbool('devel', 'check-locks')):
1486 if self._currentlock(self._lockref) is not None:
1541 if self._currentlock(self._lockref) is not None:
1487 self.ui.develwarn('"wlock" acquired after "lock"')
1542 self.ui.develwarn('"wlock" acquired after "lock"')
1488
1543
1489 def unlock():
1544 def unlock():
1490 if self.dirstate.pendingparentchange():
1545 if self.dirstate.pendingparentchange():
1491 self.dirstate.invalidate()
1546 self.dirstate.invalidate()
1492 else:
1547 else:
1493 self.dirstate.write(None)
1548 self.dirstate.write(None)
1494
1549
1495 self._filecache['dirstate'].refresh()
1550 self._filecache['dirstate'].refresh()
1496
1551
1497 l = self._lock(self.vfs, "wlock", wait, unlock,
1552 l = self._lock(self.vfs, "wlock", wait, unlock,
1498 self.invalidatedirstate, _('working directory of %s') %
1553 self.invalidatedirstate, _('working directory of %s') %
1499 self.origroot,
1554 self.origroot,
1500 inheritchecker=self._wlockchecktransaction,
1555 inheritchecker=self._wlockchecktransaction,
1501 parentenvvar='HG_WLOCK_LOCKER')
1556 parentenvvar='HG_WLOCK_LOCKER')
1502 self._wlockref = weakref.ref(l)
1557 self._wlockref = weakref.ref(l)
1503 return l
1558 return l
1504
1559
1505 def _currentlock(self, lockref):
1560 def _currentlock(self, lockref):
1506 """Returns the lock if it's held, or None if it's not."""
1561 """Returns the lock if it's held, or None if it's not."""
1507 if lockref is None:
1562 if lockref is None:
1508 return None
1563 return None
1509 l = lockref()
1564 l = lockref()
1510 if l is None or not l.held:
1565 if l is None or not l.held:
1511 return None
1566 return None
1512 return l
1567 return l
1513
1568
1514 def currentwlock(self):
1569 def currentwlock(self):
1515 """Returns the wlock if it's held, or None if it's not."""
1570 """Returns the wlock if it's held, or None if it's not."""
1516 return self._currentlock(self._wlockref)
1571 return self._currentlock(self._wlockref)
1517
1572
1518 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1573 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1519 """
1574 """
1520 commit an individual file as part of a larger transaction
1575 commit an individual file as part of a larger transaction
1521 """
1576 """
1522
1577
1523 fname = fctx.path()
1578 fname = fctx.path()
1524 fparent1 = manifest1.get(fname, nullid)
1579 fparent1 = manifest1.get(fname, nullid)
1525 fparent2 = manifest2.get(fname, nullid)
1580 fparent2 = manifest2.get(fname, nullid)
1526 if isinstance(fctx, context.filectx):
1581 if isinstance(fctx, context.filectx):
1527 node = fctx.filenode()
1582 node = fctx.filenode()
1528 if node in [fparent1, fparent2]:
1583 if node in [fparent1, fparent2]:
1529 self.ui.debug('reusing %s filelog entry\n' % fname)
1584 self.ui.debug('reusing %s filelog entry\n' % fname)
1530 if manifest1.flags(fname) != fctx.flags():
1585 if manifest1.flags(fname) != fctx.flags():
1531 changelist.append(fname)
1586 changelist.append(fname)
1532 return node
1587 return node
1533
1588
1534 flog = self.file(fname)
1589 flog = self.file(fname)
1535 meta = {}
1590 meta = {}
1536 copy = fctx.renamed()
1591 copy = fctx.renamed()
1537 if copy and copy[0] != fname:
1592 if copy and copy[0] != fname:
1538 # Mark the new revision of this file as a copy of another
1593 # Mark the new revision of this file as a copy of another
1539 # file. This copy data will effectively act as a parent
1594 # file. This copy data will effectively act as a parent
1540 # of this new revision. If this is a merge, the first
1595 # of this new revision. If this is a merge, the first
1541 # parent will be the nullid (meaning "look up the copy data")
1596 # parent will be the nullid (meaning "look up the copy data")
1542 # and the second one will be the other parent. For example:
1597 # and the second one will be the other parent. For example:
1543 #
1598 #
1544 # 0 --- 1 --- 3 rev1 changes file foo
1599 # 0 --- 1 --- 3 rev1 changes file foo
1545 # \ / rev2 renames foo to bar and changes it
1600 # \ / rev2 renames foo to bar and changes it
1546 # \- 2 -/ rev3 should have bar with all changes and
1601 # \- 2 -/ rev3 should have bar with all changes and
1547 # should record that bar descends from
1602 # should record that bar descends from
1548 # bar in rev2 and foo in rev1
1603 # bar in rev2 and foo in rev1
1549 #
1604 #
1550 # this allows this merge to succeed:
1605 # this allows this merge to succeed:
1551 #
1606 #
1552 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1607 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1553 # \ / merging rev3 and rev4 should use bar@rev2
1608 # \ / merging rev3 and rev4 should use bar@rev2
1554 # \- 2 --- 4 as the merge base
1609 # \- 2 --- 4 as the merge base
1555 #
1610 #
1556
1611
1557 cfname = copy[0]
1612 cfname = copy[0]
1558 crev = manifest1.get(cfname)
1613 crev = manifest1.get(cfname)
1559 newfparent = fparent2
1614 newfparent = fparent2
1560
1615
1561 if manifest2: # branch merge
1616 if manifest2: # branch merge
1562 if fparent2 == nullid or crev is None: # copied on remote side
1617 if fparent2 == nullid or crev is None: # copied on remote side
1563 if cfname in manifest2:
1618 if cfname in manifest2:
1564 crev = manifest2[cfname]
1619 crev = manifest2[cfname]
1565 newfparent = fparent1
1620 newfparent = fparent1
1566
1621
1567 # Here, we used to search backwards through history to try to find
1622 # Here, we used to search backwards through history to try to find
1568 # where the file copy came from if the source of a copy was not in
1623 # where the file copy came from if the source of a copy was not in
1569 # the parent directory. However, this doesn't actually make sense to
1624 # the parent directory. However, this doesn't actually make sense to
1570 # do (what does a copy from something not in your working copy even
1625 # do (what does a copy from something not in your working copy even
1571 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1626 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1572 # the user that copy information was dropped, so if they didn't
1627 # the user that copy information was dropped, so if they didn't
1573 # expect this outcome it can be fixed, but this is the correct
1628 # expect this outcome it can be fixed, but this is the correct
1574 # behavior in this circumstance.
1629 # behavior in this circumstance.
1575
1630
1576 if crev:
1631 if crev:
1577 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1632 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1578 meta["copy"] = cfname
1633 meta["copy"] = cfname
1579 meta["copyrev"] = hex(crev)
1634 meta["copyrev"] = hex(crev)
1580 fparent1, fparent2 = nullid, newfparent
1635 fparent1, fparent2 = nullid, newfparent
1581 else:
1636 else:
1582 self.ui.warn(_("warning: can't find ancestor for '%s' "
1637 self.ui.warn(_("warning: can't find ancestor for '%s' "
1583 "copied from '%s'!\n") % (fname, cfname))
1638 "copied from '%s'!\n") % (fname, cfname))
1584
1639
1585 elif fparent1 == nullid:
1640 elif fparent1 == nullid:
1586 fparent1, fparent2 = fparent2, nullid
1641 fparent1, fparent2 = fparent2, nullid
1587 elif fparent2 != nullid:
1642 elif fparent2 != nullid:
1588 # is one parent an ancestor of the other?
1643 # is one parent an ancestor of the other?
1589 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1644 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1590 if fparent1 in fparentancestors:
1645 if fparent1 in fparentancestors:
1591 fparent1, fparent2 = fparent2, nullid
1646 fparent1, fparent2 = fparent2, nullid
1592 elif fparent2 in fparentancestors:
1647 elif fparent2 in fparentancestors:
1593 fparent2 = nullid
1648 fparent2 = nullid
1594
1649
1595 # is the file changed?
1650 # is the file changed?
1596 text = fctx.data()
1651 text = fctx.data()
1597 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1652 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1598 changelist.append(fname)
1653 changelist.append(fname)
1599 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1654 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1600 # are just the flags changed during merge?
1655 # are just the flags changed during merge?
1601 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1656 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1602 changelist.append(fname)
1657 changelist.append(fname)
1603
1658
1604 return fparent1
1659 return fparent1
1605
1660
1606 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1661 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1607 """check for commit arguments that aren't committable"""
1662 """check for commit arguments that aren't committable"""
1608 if match.isexact() or match.prefix():
1663 if match.isexact() or match.prefix():
1609 matched = set(status.modified + status.added + status.removed)
1664 matched = set(status.modified + status.added + status.removed)
1610
1665
1611 for f in match.files():
1666 for f in match.files():
1612 f = self.dirstate.normalize(f)
1667 f = self.dirstate.normalize(f)
1613 if f == '.' or f in matched or f in wctx.substate:
1668 if f == '.' or f in matched or f in wctx.substate:
1614 continue
1669 continue
1615 if f in status.deleted:
1670 if f in status.deleted:
1616 fail(f, _('file not found!'))
1671 fail(f, _('file not found!'))
1617 if f in vdirs: # visited directory
1672 if f in vdirs: # visited directory
1618 d = f + '/'
1673 d = f + '/'
1619 for mf in matched:
1674 for mf in matched:
1620 if mf.startswith(d):
1675 if mf.startswith(d):
1621 break
1676 break
1622 else:
1677 else:
1623 fail(f, _("no match under directory!"))
1678 fail(f, _("no match under directory!"))
1624 elif f not in self.dirstate:
1679 elif f not in self.dirstate:
1625 fail(f, _("file not tracked!"))
1680 fail(f, _("file not tracked!"))
1626
1681
1627 @unfilteredmethod
1682 @unfilteredmethod
1628 def commit(self, text="", user=None, date=None, match=None, force=False,
1683 def commit(self, text="", user=None, date=None, match=None, force=False,
1629 editor=False, extra=None):
1684 editor=False, extra=None):
1630 """Add a new revision to current repository.
1685 """Add a new revision to current repository.
1631
1686
1632 Revision information is gathered from the working directory,
1687 Revision information is gathered from the working directory,
1633 match can be used to filter the committed files. If editor is
1688 match can be used to filter the committed files. If editor is
1634 supplied, it is called to get a commit message.
1689 supplied, it is called to get a commit message.
1635 """
1690 """
1636 if extra is None:
1691 if extra is None:
1637 extra = {}
1692 extra = {}
1638
1693
1639 def fail(f, msg):
1694 def fail(f, msg):
1640 raise error.Abort('%s: %s' % (f, msg))
1695 raise error.Abort('%s: %s' % (f, msg))
1641
1696
1642 if not match:
1697 if not match:
1643 match = matchmod.always(self.root, '')
1698 match = matchmod.always(self.root, '')
1644
1699
1645 if not force:
1700 if not force:
1646 vdirs = []
1701 vdirs = []
1647 match.explicitdir = vdirs.append
1702 match.explicitdir = vdirs.append
1648 match.bad = fail
1703 match.bad = fail
1649
1704
1650 wlock = lock = tr = None
1705 wlock = lock = tr = None
1651 try:
1706 try:
1652 wlock = self.wlock()
1707 wlock = self.wlock()
1653 lock = self.lock() # for recent changelog (see issue4368)
1708 lock = self.lock() # for recent changelog (see issue4368)
1654
1709
1655 wctx = self[None]
1710 wctx = self[None]
1656 merge = len(wctx.parents()) > 1
1711 merge = len(wctx.parents()) > 1
1657
1712
1658 if not force and merge and not match.always():
1713 if not force and merge and not match.always():
1659 raise error.Abort(_('cannot partially commit a merge '
1714 raise error.Abort(_('cannot partially commit a merge '
1660 '(do not specify files or patterns)'))
1715 '(do not specify files or patterns)'))
1661
1716
1662 status = self.status(match=match, clean=force)
1717 status = self.status(match=match, clean=force)
1663 if force:
1718 if force:
1664 status.modified.extend(status.clean) # mq may commit clean files
1719 status.modified.extend(status.clean) # mq may commit clean files
1665
1720
1666 # check subrepos
1721 # check subrepos
1667 subs = []
1722 subs = []
1668 commitsubs = set()
1723 commitsubs = set()
1669 newstate = wctx.substate.copy()
1724 newstate = wctx.substate.copy()
1670 # only manage subrepos and .hgsubstate if .hgsub is present
1725 # only manage subrepos and .hgsubstate if .hgsub is present
1671 if '.hgsub' in wctx:
1726 if '.hgsub' in wctx:
1672 # we'll decide whether to track this ourselves, thanks
1727 # we'll decide whether to track this ourselves, thanks
1673 for c in status.modified, status.added, status.removed:
1728 for c in status.modified, status.added, status.removed:
1674 if '.hgsubstate' in c:
1729 if '.hgsubstate' in c:
1675 c.remove('.hgsubstate')
1730 c.remove('.hgsubstate')
1676
1731
1677 # compare current state to last committed state
1732 # compare current state to last committed state
1678 # build new substate based on last committed state
1733 # build new substate based on last committed state
1679 oldstate = wctx.p1().substate
1734 oldstate = wctx.p1().substate
1680 for s in sorted(newstate.keys()):
1735 for s in sorted(newstate.keys()):
1681 if not match(s):
1736 if not match(s):
1682 # ignore working copy, use old state if present
1737 # ignore working copy, use old state if present
1683 if s in oldstate:
1738 if s in oldstate:
1684 newstate[s] = oldstate[s]
1739 newstate[s] = oldstate[s]
1685 continue
1740 continue
1686 if not force:
1741 if not force:
1687 raise error.Abort(
1742 raise error.Abort(
1688 _("commit with new subrepo %s excluded") % s)
1743 _("commit with new subrepo %s excluded") % s)
1689 dirtyreason = wctx.sub(s).dirtyreason(True)
1744 dirtyreason = wctx.sub(s).dirtyreason(True)
1690 if dirtyreason:
1745 if dirtyreason:
1691 if not self.ui.configbool('ui', 'commitsubrepos'):
1746 if not self.ui.configbool('ui', 'commitsubrepos'):
1692 raise error.Abort(dirtyreason,
1747 raise error.Abort(dirtyreason,
1693 hint=_("use --subrepos for recursive commit"))
1748 hint=_("use --subrepos for recursive commit"))
1694 subs.append(s)
1749 subs.append(s)
1695 commitsubs.add(s)
1750 commitsubs.add(s)
1696 else:
1751 else:
1697 bs = wctx.sub(s).basestate()
1752 bs = wctx.sub(s).basestate()
1698 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1753 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1699 if oldstate.get(s, (None, None, None))[1] != bs:
1754 if oldstate.get(s, (None, None, None))[1] != bs:
1700 subs.append(s)
1755 subs.append(s)
1701
1756
1702 # check for removed subrepos
1757 # check for removed subrepos
1703 for p in wctx.parents():
1758 for p in wctx.parents():
1704 r = [s for s in p.substate if s not in newstate]
1759 r = [s for s in p.substate if s not in newstate]
1705 subs += [s for s in r if match(s)]
1760 subs += [s for s in r if match(s)]
1706 if subs:
1761 if subs:
1707 if (not match('.hgsub') and
1762 if (not match('.hgsub') and
1708 '.hgsub' in (wctx.modified() + wctx.added())):
1763 '.hgsub' in (wctx.modified() + wctx.added())):
1709 raise error.Abort(
1764 raise error.Abort(
1710 _("can't commit subrepos without .hgsub"))
1765 _("can't commit subrepos without .hgsub"))
1711 status.modified.insert(0, '.hgsubstate')
1766 status.modified.insert(0, '.hgsubstate')
1712
1767
1713 elif '.hgsub' in status.removed:
1768 elif '.hgsub' in status.removed:
1714 # clean up .hgsubstate when .hgsub is removed
1769 # clean up .hgsubstate when .hgsub is removed
1715 if ('.hgsubstate' in wctx and
1770 if ('.hgsubstate' in wctx and
1716 '.hgsubstate' not in (status.modified + status.added +
1771 '.hgsubstate' not in (status.modified + status.added +
1717 status.removed)):
1772 status.removed)):
1718 status.removed.insert(0, '.hgsubstate')
1773 status.removed.insert(0, '.hgsubstate')
1719
1774
1720 # make sure all explicit patterns are matched
1775 # make sure all explicit patterns are matched
1721 if not force:
1776 if not force:
1722 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1777 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1723
1778
1724 cctx = context.workingcommitctx(self, status,
1779 cctx = context.workingcommitctx(self, status,
1725 text, user, date, extra)
1780 text, user, date, extra)
1726
1781
1727 # internal config: ui.allowemptycommit
1782 # internal config: ui.allowemptycommit
1728 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1783 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1729 or extra.get('close') or merge or cctx.files()
1784 or extra.get('close') or merge or cctx.files()
1730 or self.ui.configbool('ui', 'allowemptycommit'))
1785 or self.ui.configbool('ui', 'allowemptycommit'))
1731 if not allowemptycommit:
1786 if not allowemptycommit:
1732 return None
1787 return None
1733
1788
1734 if merge and cctx.deleted():
1789 if merge and cctx.deleted():
1735 raise error.Abort(_("cannot commit merge with missing files"))
1790 raise error.Abort(_("cannot commit merge with missing files"))
1736
1791
1737 ms = mergemod.mergestate.read(self)
1792 ms = mergemod.mergestate.read(self)
1738 mergeutil.checkunresolved(ms)
1793 mergeutil.checkunresolved(ms)
1739
1794
1740 if editor:
1795 if editor:
1741 cctx._text = editor(self, cctx, subs)
1796 cctx._text = editor(self, cctx, subs)
1742 edited = (text != cctx._text)
1797 edited = (text != cctx._text)
1743
1798
1744 # Save commit message in case this transaction gets rolled back
1799 # Save commit message in case this transaction gets rolled back
1745 # (e.g. by a pretxncommit hook). Leave the content alone on
1800 # (e.g. by a pretxncommit hook). Leave the content alone on
1746 # the assumption that the user will use the same editor again.
1801 # the assumption that the user will use the same editor again.
1747 msgfn = self.savecommitmessage(cctx._text)
1802 msgfn = self.savecommitmessage(cctx._text)
1748
1803
1749 # commit subs and write new state
1804 # commit subs and write new state
1750 if subs:
1805 if subs:
1751 for s in sorted(commitsubs):
1806 for s in sorted(commitsubs):
1752 sub = wctx.sub(s)
1807 sub = wctx.sub(s)
1753 self.ui.status(_('committing subrepository %s\n') %
1808 self.ui.status(_('committing subrepository %s\n') %
1754 subrepo.subrelpath(sub))
1809 subrepo.subrelpath(sub))
1755 sr = sub.commit(cctx._text, user, date)
1810 sr = sub.commit(cctx._text, user, date)
1756 newstate[s] = (newstate[s][0], sr)
1811 newstate[s] = (newstate[s][0], sr)
1757 subrepo.writestate(self, newstate)
1812 subrepo.writestate(self, newstate)
1758
1813
1759 p1, p2 = self.dirstate.parents()
1814 p1, p2 = self.dirstate.parents()
1760 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1815 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1761 try:
1816 try:
1762 self.hook("precommit", throw=True, parent1=hookp1,
1817 self.hook("precommit", throw=True, parent1=hookp1,
1763 parent2=hookp2)
1818 parent2=hookp2)
1764 tr = self.transaction('commit')
1819 tr = self.transaction('commit')
1765 ret = self.commitctx(cctx, True)
1820 ret = self.commitctx(cctx, True)
1766 except: # re-raises
1821 except: # re-raises
1767 if edited:
1822 if edited:
1768 self.ui.write(
1823 self.ui.write(
1769 _('note: commit message saved in %s\n') % msgfn)
1824 _('note: commit message saved in %s\n') % msgfn)
1770 raise
1825 raise
1771 # update bookmarks, dirstate and mergestate
1826 # update bookmarks, dirstate and mergestate
1772 bookmarks.update(self, [p1, p2], ret)
1827 bookmarks.update(self, [p1, p2], ret)
1773 cctx.markcommitted(ret)
1828 cctx.markcommitted(ret)
1774 ms.reset()
1829 ms.reset()
1775 tr.close()
1830 tr.close()
1776
1831
1777 finally:
1832 finally:
1778 lockmod.release(tr, lock, wlock)
1833 lockmod.release(tr, lock, wlock)
1779
1834
1780 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1835 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1781 # hack for command that use a temporary commit (eg: histedit)
1836 # hack for command that use a temporary commit (eg: histedit)
1782 # temporary commit got stripped before hook release
1837 # temporary commit got stripped before hook release
1783 if self.changelog.hasnode(ret):
1838 if self.changelog.hasnode(ret):
1784 self.hook("commit", node=node, parent1=parent1,
1839 self.hook("commit", node=node, parent1=parent1,
1785 parent2=parent2)
1840 parent2=parent2)
1786 self._afterlock(commithook)
1841 self._afterlock(commithook)
1787 return ret
1842 return ret
1788
1843
1789 @unfilteredmethod
1844 @unfilteredmethod
1790 def commitctx(self, ctx, error=False):
1845 def commitctx(self, ctx, error=False):
1791 """Add a new revision to current repository.
1846 """Add a new revision to current repository.
1792 Revision information is passed via the context argument.
1847 Revision information is passed via the context argument.
1793 """
1848 """
1794
1849
1795 tr = None
1850 tr = None
1796 p1, p2 = ctx.p1(), ctx.p2()
1851 p1, p2 = ctx.p1(), ctx.p2()
1797 user = ctx.user()
1852 user = ctx.user()
1798
1853
1799 lock = self.lock()
1854 lock = self.lock()
1800 try:
1855 try:
1801 tr = self.transaction("commit")
1856 tr = self.transaction("commit")
1802 trp = weakref.proxy(tr)
1857 trp = weakref.proxy(tr)
1803
1858
1804 if ctx.manifestnode():
1859 if ctx.manifestnode():
1805 # reuse an existing manifest revision
1860 # reuse an existing manifest revision
1806 mn = ctx.manifestnode()
1861 mn = ctx.manifestnode()
1807 files = ctx.files()
1862 files = ctx.files()
1808 elif ctx.files():
1863 elif ctx.files():
1809 m1ctx = p1.manifestctx()
1864 m1ctx = p1.manifestctx()
1810 m2ctx = p2.manifestctx()
1865 m2ctx = p2.manifestctx()
1811 mctx = m1ctx.copy()
1866 mctx = m1ctx.copy()
1812
1867
1813 m = mctx.read()
1868 m = mctx.read()
1814 m1 = m1ctx.read()
1869 m1 = m1ctx.read()
1815 m2 = m2ctx.read()
1870 m2 = m2ctx.read()
1816
1871
1817 # check in files
1872 # check in files
1818 added = []
1873 added = []
1819 changed = []
1874 changed = []
1820 removed = list(ctx.removed())
1875 removed = list(ctx.removed())
1821 linkrev = len(self)
1876 linkrev = len(self)
1822 self.ui.note(_("committing files:\n"))
1877 self.ui.note(_("committing files:\n"))
1823 for f in sorted(ctx.modified() + ctx.added()):
1878 for f in sorted(ctx.modified() + ctx.added()):
1824 self.ui.note(f + "\n")
1879 self.ui.note(f + "\n")
1825 try:
1880 try:
1826 fctx = ctx[f]
1881 fctx = ctx[f]
1827 if fctx is None:
1882 if fctx is None:
1828 removed.append(f)
1883 removed.append(f)
1829 else:
1884 else:
1830 added.append(f)
1885 added.append(f)
1831 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1886 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1832 trp, changed)
1887 trp, changed)
1833 m.setflag(f, fctx.flags())
1888 m.setflag(f, fctx.flags())
1834 except OSError as inst:
1889 except OSError as inst:
1835 self.ui.warn(_("trouble committing %s!\n") % f)
1890 self.ui.warn(_("trouble committing %s!\n") % f)
1836 raise
1891 raise
1837 except IOError as inst:
1892 except IOError as inst:
1838 errcode = getattr(inst, 'errno', errno.ENOENT)
1893 errcode = getattr(inst, 'errno', errno.ENOENT)
1839 if error or errcode and errcode != errno.ENOENT:
1894 if error or errcode and errcode != errno.ENOENT:
1840 self.ui.warn(_("trouble committing %s!\n") % f)
1895 self.ui.warn(_("trouble committing %s!\n") % f)
1841 raise
1896 raise
1842
1897
1843 # update manifest
1898 # update manifest
1844 self.ui.note(_("committing manifest\n"))
1899 self.ui.note(_("committing manifest\n"))
1845 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1900 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1846 drop = [f for f in removed if f in m]
1901 drop = [f for f in removed if f in m]
1847 for f in drop:
1902 for f in drop:
1848 del m[f]
1903 del m[f]
1849 mn = mctx.write(trp, linkrev,
1904 mn = mctx.write(trp, linkrev,
1850 p1.manifestnode(), p2.manifestnode(),
1905 p1.manifestnode(), p2.manifestnode(),
1851 added, drop)
1906 added, drop)
1852 files = changed + removed
1907 files = changed + removed
1853 else:
1908 else:
1854 mn = p1.manifestnode()
1909 mn = p1.manifestnode()
1855 files = []
1910 files = []
1856
1911
1857 # update changelog
1912 # update changelog
1858 self.ui.note(_("committing changelog\n"))
1913 self.ui.note(_("committing changelog\n"))
1859 self.changelog.delayupdate(tr)
1914 self.changelog.delayupdate(tr)
1860 n = self.changelog.add(mn, files, ctx.description(),
1915 n = self.changelog.add(mn, files, ctx.description(),
1861 trp, p1.node(), p2.node(),
1916 trp, p1.node(), p2.node(),
1862 user, ctx.date(), ctx.extra().copy())
1917 user, ctx.date(), ctx.extra().copy())
1863 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1918 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1864 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1919 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1865 parent2=xp2)
1920 parent2=xp2)
1866 # set the new commit is proper phase
1921 # set the new commit is proper phase
1867 targetphase = subrepo.newcommitphase(self.ui, ctx)
1922 targetphase = subrepo.newcommitphase(self.ui, ctx)
1868 if targetphase:
1923 if targetphase:
1869 # retract boundary do not alter parent changeset.
1924 # retract boundary do not alter parent changeset.
1870 # if a parent have higher the resulting phase will
1925 # if a parent have higher the resulting phase will
1871 # be compliant anyway
1926 # be compliant anyway
1872 #
1927 #
1873 # if minimal phase was 0 we don't need to retract anything
1928 # if minimal phase was 0 we don't need to retract anything
1874 phases.retractboundary(self, tr, targetphase, [n])
1929 phases.retractboundary(self, tr, targetphase, [n])
1875 tr.close()
1930 tr.close()
1876 return n
1931 return n
1877 finally:
1932 finally:
1878 if tr:
1933 if tr:
1879 tr.release()
1934 tr.release()
1880 lock.release()
1935 lock.release()
1881
1936
1882 @unfilteredmethod
1937 @unfilteredmethod
1883 def destroying(self):
1938 def destroying(self):
1884 '''Inform the repository that nodes are about to be destroyed.
1939 '''Inform the repository that nodes are about to be destroyed.
1885 Intended for use by strip and rollback, so there's a common
1940 Intended for use by strip and rollback, so there's a common
1886 place for anything that has to be done before destroying history.
1941 place for anything that has to be done before destroying history.
1887
1942
1888 This is mostly useful for saving state that is in memory and waiting
1943 This is mostly useful for saving state that is in memory and waiting
1889 to be flushed when the current lock is released. Because a call to
1944 to be flushed when the current lock is released. Because a call to
1890 destroyed is imminent, the repo will be invalidated causing those
1945 destroyed is imminent, the repo will be invalidated causing those
1891 changes to stay in memory (waiting for the next unlock), or vanish
1946 changes to stay in memory (waiting for the next unlock), or vanish
1892 completely.
1947 completely.
1893 '''
1948 '''
1894 # When using the same lock to commit and strip, the phasecache is left
1949 # When using the same lock to commit and strip, the phasecache is left
1895 # dirty after committing. Then when we strip, the repo is invalidated,
1950 # dirty after committing. Then when we strip, the repo is invalidated,
1896 # causing those changes to disappear.
1951 # causing those changes to disappear.
1897 if '_phasecache' in vars(self):
1952 if '_phasecache' in vars(self):
1898 self._phasecache.write()
1953 self._phasecache.write()
1899
1954
1900 @unfilteredmethod
1955 @unfilteredmethod
1901 def destroyed(self):
1956 def destroyed(self):
1902 '''Inform the repository that nodes have been destroyed.
1957 '''Inform the repository that nodes have been destroyed.
1903 Intended for use by strip and rollback, so there's a common
1958 Intended for use by strip and rollback, so there's a common
1904 place for anything that has to be done after destroying history.
1959 place for anything that has to be done after destroying history.
1905 '''
1960 '''
1906 # When one tries to:
1961 # When one tries to:
1907 # 1) destroy nodes thus calling this method (e.g. strip)
1962 # 1) destroy nodes thus calling this method (e.g. strip)
1908 # 2) use phasecache somewhere (e.g. commit)
1963 # 2) use phasecache somewhere (e.g. commit)
1909 #
1964 #
1910 # then 2) will fail because the phasecache contains nodes that were
1965 # then 2) will fail because the phasecache contains nodes that were
1911 # removed. We can either remove phasecache from the filecache,
1966 # removed. We can either remove phasecache from the filecache,
1912 # causing it to reload next time it is accessed, or simply filter
1967 # causing it to reload next time it is accessed, or simply filter
1913 # the removed nodes now and write the updated cache.
1968 # the removed nodes now and write the updated cache.
1914 self._phasecache.filterunknown(self)
1969 self._phasecache.filterunknown(self)
1915 self._phasecache.write()
1970 self._phasecache.write()
1916
1971
1917 # refresh all repository caches
1972 # refresh all repository caches
1918 self.updatecaches()
1973 self.updatecaches()
1919
1974
1920 # Ensure the persistent tag cache is updated. Doing it now
1975 # Ensure the persistent tag cache is updated. Doing it now
1921 # means that the tag cache only has to worry about destroyed
1976 # means that the tag cache only has to worry about destroyed
1922 # heads immediately after a strip/rollback. That in turn
1977 # heads immediately after a strip/rollback. That in turn
1923 # guarantees that "cachetip == currenttip" (comparing both rev
1978 # guarantees that "cachetip == currenttip" (comparing both rev
1924 # and node) always means no nodes have been added or destroyed.
1979 # and node) always means no nodes have been added or destroyed.
1925
1980
1926 # XXX this is suboptimal when qrefresh'ing: we strip the current
1981 # XXX this is suboptimal when qrefresh'ing: we strip the current
1927 # head, refresh the tag cache, then immediately add a new head.
1982 # head, refresh the tag cache, then immediately add a new head.
1928 # But I think doing it this way is necessary for the "instant
1983 # But I think doing it this way is necessary for the "instant
1929 # tag cache retrieval" case to work.
1984 # tag cache retrieval" case to work.
1930 self.invalidate()
1985 self.invalidate()
1931
1986
1932 def walk(self, match, node=None):
1987 def walk(self, match, node=None):
1933 '''
1988 '''
1934 walk recursively through the directory tree or a given
1989 walk recursively through the directory tree or a given
1935 changeset, finding all files matched by the match
1990 changeset, finding all files matched by the match
1936 function
1991 function
1937 '''
1992 '''
1938 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1993 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1939 return self[node].walk(match)
1994 return self[node].walk(match)
1940
1995
1941 def status(self, node1='.', node2=None, match=None,
1996 def status(self, node1='.', node2=None, match=None,
1942 ignored=False, clean=False, unknown=False,
1997 ignored=False, clean=False, unknown=False,
1943 listsubrepos=False):
1998 listsubrepos=False):
1944 '''a convenience method that calls node1.status(node2)'''
1999 '''a convenience method that calls node1.status(node2)'''
1945 return self[node1].status(node2, match, ignored, clean, unknown,
2000 return self[node1].status(node2, match, ignored, clean, unknown,
1946 listsubrepos)
2001 listsubrepos)
1947
2002
1948 def addpostdsstatus(self, ps):
2003 def addpostdsstatus(self, ps):
1949 """Add a callback to run within the wlock, at the point at which status
2004 """Add a callback to run within the wlock, at the point at which status
1950 fixups happen.
2005 fixups happen.
1951
2006
1952 On status completion, callback(wctx, status) will be called with the
2007 On status completion, callback(wctx, status) will be called with the
1953 wlock held, unless the dirstate has changed from underneath or the wlock
2008 wlock held, unless the dirstate has changed from underneath or the wlock
1954 couldn't be grabbed.
2009 couldn't be grabbed.
1955
2010
1956 Callbacks should not capture and use a cached copy of the dirstate --
2011 Callbacks should not capture and use a cached copy of the dirstate --
1957 it might change in the meanwhile. Instead, they should access the
2012 it might change in the meanwhile. Instead, they should access the
1958 dirstate via wctx.repo().dirstate.
2013 dirstate via wctx.repo().dirstate.
1959
2014
1960 This list is emptied out after each status run -- extensions should
2015 This list is emptied out after each status run -- extensions should
1961 make sure it adds to this list each time dirstate.status is called.
2016 make sure it adds to this list each time dirstate.status is called.
1962 Extensions should also make sure they don't call this for statuses
2017 Extensions should also make sure they don't call this for statuses
1963 that don't involve the dirstate.
2018 that don't involve the dirstate.
1964 """
2019 """
1965
2020
1966 # The list is located here for uniqueness reasons -- it is actually
2021 # The list is located here for uniqueness reasons -- it is actually
1967 # managed by the workingctx, but that isn't unique per-repo.
2022 # managed by the workingctx, but that isn't unique per-repo.
1968 self._postdsstatus.append(ps)
2023 self._postdsstatus.append(ps)
1969
2024
1970 def postdsstatus(self):
2025 def postdsstatus(self):
1971 """Used by workingctx to get the list of post-dirstate-status hooks."""
2026 """Used by workingctx to get the list of post-dirstate-status hooks."""
1972 return self._postdsstatus
2027 return self._postdsstatus
1973
2028
1974 def clearpostdsstatus(self):
2029 def clearpostdsstatus(self):
1975 """Used by workingctx to clear post-dirstate-status hooks."""
2030 """Used by workingctx to clear post-dirstate-status hooks."""
1976 del self._postdsstatus[:]
2031 del self._postdsstatus[:]
1977
2032
1978 def heads(self, start=None):
2033 def heads(self, start=None):
1979 if start is None:
2034 if start is None:
1980 cl = self.changelog
2035 cl = self.changelog
1981 headrevs = reversed(cl.headrevs())
2036 headrevs = reversed(cl.headrevs())
1982 return [cl.node(rev) for rev in headrevs]
2037 return [cl.node(rev) for rev in headrevs]
1983
2038
1984 heads = self.changelog.heads(start)
2039 heads = self.changelog.heads(start)
1985 # sort the output in rev descending order
2040 # sort the output in rev descending order
1986 return sorted(heads, key=self.changelog.rev, reverse=True)
2041 return sorted(heads, key=self.changelog.rev, reverse=True)
1987
2042
1988 def branchheads(self, branch=None, start=None, closed=False):
2043 def branchheads(self, branch=None, start=None, closed=False):
1989 '''return a (possibly filtered) list of heads for the given branch
2044 '''return a (possibly filtered) list of heads for the given branch
1990
2045
1991 Heads are returned in topological order, from newest to oldest.
2046 Heads are returned in topological order, from newest to oldest.
1992 If branch is None, use the dirstate branch.
2047 If branch is None, use the dirstate branch.
1993 If start is not None, return only heads reachable from start.
2048 If start is not None, return only heads reachable from start.
1994 If closed is True, return heads that are marked as closed as well.
2049 If closed is True, return heads that are marked as closed as well.
1995 '''
2050 '''
1996 if branch is None:
2051 if branch is None:
1997 branch = self[None].branch()
2052 branch = self[None].branch()
1998 branches = self.branchmap()
2053 branches = self.branchmap()
1999 if branch not in branches:
2054 if branch not in branches:
2000 return []
2055 return []
2001 # the cache returns heads ordered lowest to highest
2056 # the cache returns heads ordered lowest to highest
2002 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2057 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2003 if start is not None:
2058 if start is not None:
2004 # filter out the heads that cannot be reached from startrev
2059 # filter out the heads that cannot be reached from startrev
2005 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2060 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2006 bheads = [h for h in bheads if h in fbheads]
2061 bheads = [h for h in bheads if h in fbheads]
2007 return bheads
2062 return bheads
2008
2063
2009 def branches(self, nodes):
2064 def branches(self, nodes):
2010 if not nodes:
2065 if not nodes:
2011 nodes = [self.changelog.tip()]
2066 nodes = [self.changelog.tip()]
2012 b = []
2067 b = []
2013 for n in nodes:
2068 for n in nodes:
2014 t = n
2069 t = n
2015 while True:
2070 while True:
2016 p = self.changelog.parents(n)
2071 p = self.changelog.parents(n)
2017 if p[1] != nullid or p[0] == nullid:
2072 if p[1] != nullid or p[0] == nullid:
2018 b.append((t, n, p[0], p[1]))
2073 b.append((t, n, p[0], p[1]))
2019 break
2074 break
2020 n = p[0]
2075 n = p[0]
2021 return b
2076 return b
2022
2077
2023 def between(self, pairs):
2078 def between(self, pairs):
2024 r = []
2079 r = []
2025
2080
2026 for top, bottom in pairs:
2081 for top, bottom in pairs:
2027 n, l, i = top, [], 0
2082 n, l, i = top, [], 0
2028 f = 1
2083 f = 1
2029
2084
2030 while n != bottom and n != nullid:
2085 while n != bottom and n != nullid:
2031 p = self.changelog.parents(n)[0]
2086 p = self.changelog.parents(n)[0]
2032 if i == f:
2087 if i == f:
2033 l.append(n)
2088 l.append(n)
2034 f = f * 2
2089 f = f * 2
2035 n = p
2090 n = p
2036 i += 1
2091 i += 1
2037
2092
2038 r.append(l)
2093 r.append(l)
2039
2094
2040 return r
2095 return r
2041
2096
2042 def checkpush(self, pushop):
2097 def checkpush(self, pushop):
2043 """Extensions can override this function if additional checks have
2098 """Extensions can override this function if additional checks have
2044 to be performed before pushing, or call it if they override push
2099 to be performed before pushing, or call it if they override push
2045 command.
2100 command.
2046 """
2101 """
2047 pass
2102 pass
2048
2103
2049 @unfilteredpropertycache
2104 @unfilteredpropertycache
2050 def prepushoutgoinghooks(self):
2105 def prepushoutgoinghooks(self):
2051 """Return util.hooks consists of a pushop with repo, remote, outgoing
2106 """Return util.hooks consists of a pushop with repo, remote, outgoing
2052 methods, which are called before pushing changesets.
2107 methods, which are called before pushing changesets.
2053 """
2108 """
2054 return util.hooks()
2109 return util.hooks()
2055
2110
2056 def pushkey(self, namespace, key, old, new):
2111 def pushkey(self, namespace, key, old, new):
2057 try:
2112 try:
2058 tr = self.currenttransaction()
2113 tr = self.currenttransaction()
2059 hookargs = {}
2114 hookargs = {}
2060 if tr is not None:
2115 if tr is not None:
2061 hookargs.update(tr.hookargs)
2116 hookargs.update(tr.hookargs)
2062 hookargs['namespace'] = namespace
2117 hookargs['namespace'] = namespace
2063 hookargs['key'] = key
2118 hookargs['key'] = key
2064 hookargs['old'] = old
2119 hookargs['old'] = old
2065 hookargs['new'] = new
2120 hookargs['new'] = new
2066 self.hook('prepushkey', throw=True, **hookargs)
2121 self.hook('prepushkey', throw=True, **hookargs)
2067 except error.HookAbort as exc:
2122 except error.HookAbort as exc:
2068 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2123 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2069 if exc.hint:
2124 if exc.hint:
2070 self.ui.write_err(_("(%s)\n") % exc.hint)
2125 self.ui.write_err(_("(%s)\n") % exc.hint)
2071 return False
2126 return False
2072 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2127 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2073 ret = pushkey.push(self, namespace, key, old, new)
2128 ret = pushkey.push(self, namespace, key, old, new)
2074 def runhook():
2129 def runhook():
2075 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2130 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2076 ret=ret)
2131 ret=ret)
2077 self._afterlock(runhook)
2132 self._afterlock(runhook)
2078 return ret
2133 return ret
2079
2134
2080 def listkeys(self, namespace):
2135 def listkeys(self, namespace):
2081 self.hook('prelistkeys', throw=True, namespace=namespace)
2136 self.hook('prelistkeys', throw=True, namespace=namespace)
2082 self.ui.debug('listing keys for "%s"\n' % namespace)
2137 self.ui.debug('listing keys for "%s"\n' % namespace)
2083 values = pushkey.list(self, namespace)
2138 values = pushkey.list(self, namespace)
2084 self.hook('listkeys', namespace=namespace, values=values)
2139 self.hook('listkeys', namespace=namespace, values=values)
2085 return values
2140 return values
2086
2141
2087 def debugwireargs(self, one, two, three=None, four=None, five=None):
2142 def debugwireargs(self, one, two, three=None, four=None, five=None):
2088 '''used to test argument passing over the wire'''
2143 '''used to test argument passing over the wire'''
2089 return "%s %s %s %s %s" % (one, two, three, four, five)
2144 return "%s %s %s %s %s" % (one, two, three, four, five)
2090
2145
2091 def savecommitmessage(self, text):
2146 def savecommitmessage(self, text):
2092 fp = self.vfs('last-message.txt', 'wb')
2147 fp = self.vfs('last-message.txt', 'wb')
2093 try:
2148 try:
2094 fp.write(text)
2149 fp.write(text)
2095 finally:
2150 finally:
2096 fp.close()
2151 fp.close()
2097 return self.pathto(fp.name[len(self.root) + 1:])
2152 return self.pathto(fp.name[len(self.root) + 1:])
2098
2153
2099 # used to avoid circular references so destructors work
2154 # used to avoid circular references so destructors work
2100 def aftertrans(files):
2155 def aftertrans(files):
2101 renamefiles = [tuple(t) for t in files]
2156 renamefiles = [tuple(t) for t in files]
2102 def a():
2157 def a():
2103 for vfs, src, dest in renamefiles:
2158 for vfs, src, dest in renamefiles:
2104 # if src and dest refer to a same file, vfs.rename is a no-op,
2159 # if src and dest refer to a same file, vfs.rename is a no-op,
2105 # leaving both src and dest on disk. delete dest to make sure
2160 # leaving both src and dest on disk. delete dest to make sure
2106 # the rename couldn't be such a no-op.
2161 # the rename couldn't be such a no-op.
2107 vfs.tryunlink(dest)
2162 vfs.tryunlink(dest)
2108 try:
2163 try:
2109 vfs.rename(src, dest)
2164 vfs.rename(src, dest)
2110 except OSError: # journal file does not yet exist
2165 except OSError: # journal file does not yet exist
2111 pass
2166 pass
2112 return a
2167 return a
2113
2168
2114 def undoname(fn):
2169 def undoname(fn):
2115 base, name = os.path.split(fn)
2170 base, name = os.path.split(fn)
2116 assert name.startswith('journal')
2171 assert name.startswith('journal')
2117 return os.path.join(base, name.replace('journal', 'undo', 1))
2172 return os.path.join(base, name.replace('journal', 'undo', 1))
2118
2173
2119 def instance(ui, path, create):
2174 def instance(ui, path, create):
2120 return localrepository(ui, util.urllocalpath(path), create)
2175 return localrepository(ui, util.urllocalpath(path), create)
2121
2176
2122 def islocal(path):
2177 def islocal(path):
2123 return True
2178 return True
2124
2179
2125 def newreporequirements(repo):
2180 def newreporequirements(repo):
2126 """Determine the set of requirements for a new local repository.
2181 """Determine the set of requirements for a new local repository.
2127
2182
2128 Extensions can wrap this function to specify custom requirements for
2183 Extensions can wrap this function to specify custom requirements for
2129 new repositories.
2184 new repositories.
2130 """
2185 """
2131 ui = repo.ui
2186 ui = repo.ui
2132 requirements = {'revlogv1'}
2187 requirements = {'revlogv1'}
2133 if ui.configbool('format', 'usestore'):
2188 if ui.configbool('format', 'usestore'):
2134 requirements.add('store')
2189 requirements.add('store')
2135 if ui.configbool('format', 'usefncache'):
2190 if ui.configbool('format', 'usefncache'):
2136 requirements.add('fncache')
2191 requirements.add('fncache')
2137 if ui.configbool('format', 'dotencode'):
2192 if ui.configbool('format', 'dotencode'):
2138 requirements.add('dotencode')
2193 requirements.add('dotencode')
2139
2194
2140 compengine = ui.config('experimental', 'format.compression', 'zlib')
2195 compengine = ui.config('experimental', 'format.compression', 'zlib')
2141 if compengine not in util.compengines:
2196 if compengine not in util.compengines:
2142 raise error.Abort(_('compression engine %s defined by '
2197 raise error.Abort(_('compression engine %s defined by '
2143 'experimental.format.compression not available') %
2198 'experimental.format.compression not available') %
2144 compengine,
2199 compengine,
2145 hint=_('run "hg debuginstall" to list available '
2200 hint=_('run "hg debuginstall" to list available '
2146 'compression engines'))
2201 'compression engines'))
2147
2202
2148 # zlib is the historical default and doesn't need an explicit requirement.
2203 # zlib is the historical default and doesn't need an explicit requirement.
2149 if compengine != 'zlib':
2204 if compengine != 'zlib':
2150 requirements.add('exp-compression-%s' % compengine)
2205 requirements.add('exp-compression-%s' % compengine)
2151
2206
2152 if scmutil.gdinitconfig(ui):
2207 if scmutil.gdinitconfig(ui):
2153 requirements.add('generaldelta')
2208 requirements.add('generaldelta')
2154 if ui.configbool('experimental', 'treemanifest', False):
2209 if ui.configbool('experimental', 'treemanifest', False):
2155 requirements.add('treemanifest')
2210 requirements.add('treemanifest')
2156 if ui.configbool('experimental', 'manifestv2', False):
2211 if ui.configbool('experimental', 'manifestv2', False):
2157 requirements.add('manifestv2')
2212 requirements.add('manifestv2')
2158
2213
2159 revlogv2 = ui.config('experimental', 'revlogv2')
2214 revlogv2 = ui.config('experimental', 'revlogv2')
2160 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2215 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2161 requirements.remove('revlogv1')
2216 requirements.remove('revlogv1')
2162 # generaldelta is implied by revlogv2.
2217 # generaldelta is implied by revlogv2.
2163 requirements.discard('generaldelta')
2218 requirements.discard('generaldelta')
2164 requirements.add(REVLOGV2_REQUIREMENT)
2219 requirements.add(REVLOGV2_REQUIREMENT)
2165
2220
2166 return requirements
2221 return requirements
@@ -1,230 +1,240 b''
1
1
2 $ cat << EOF > buggylocking.py
2 $ cat << EOF > buggylocking.py
3 > """A small extension that tests our developer warnings
3 > """A small extension that tests our developer warnings
4 > """
4 > """
5 >
5 >
6 > from mercurial import error, registrar, repair, util
6 > from mercurial import error, registrar, repair, util
7 >
7 >
8 > cmdtable = {}
8 > cmdtable = {}
9 > command = registrar.command(cmdtable)
9 > command = registrar.command(cmdtable)
10 >
10 >
11 > @command(b'buggylocking', [], '')
11 > @command(b'buggylocking', [], '')
12 > def buggylocking(ui, repo):
12 > def buggylocking(ui, repo):
13 > lo = repo.lock()
13 > lo = repo.lock()
14 > wl = repo.wlock()
14 > wl = repo.wlock()
15 > wl.release()
15 > wl.release()
16 > lo.release()
16 > lo.release()
17 >
17 >
18 > @command(b'buggytransaction', [], '')
18 > @command(b'buggytransaction', [], '')
19 > def buggylocking(ui, repo):
19 > def buggylocking(ui, repo):
20 > tr = repo.transaction('buggy')
20 > tr = repo.transaction('buggy')
21 > # make sure we rollback the transaction as we don't want to rely on the__del__
21 > # make sure we rollback the transaction as we don't want to rely on the__del__
22 > tr.release()
22 > tr.release()
23 >
23 >
24 > @command(b'properlocking', [], '')
24 > @command(b'properlocking', [], '')
25 > def properlocking(ui, repo):
25 > def properlocking(ui, repo):
26 > """check that reentrance is fine"""
26 > """check that reentrance is fine"""
27 > wl = repo.wlock()
27 > wl = repo.wlock()
28 > lo = repo.lock()
28 > lo = repo.lock()
29 > tr = repo.transaction('proper')
29 > tr = repo.transaction('proper')
30 > tr2 = repo.transaction('proper')
30 > tr2 = repo.transaction('proper')
31 > lo2 = repo.lock()
31 > lo2 = repo.lock()
32 > wl2 = repo.wlock()
32 > wl2 = repo.wlock()
33 > wl2.release()
33 > wl2.release()
34 > lo2.release()
34 > lo2.release()
35 > tr2.close()
35 > tr2.close()
36 > tr.close()
36 > tr.close()
37 > lo.release()
37 > lo.release()
38 > wl.release()
38 > wl.release()
39 >
39 >
40 > @command(b'nowaitlocking', [], '')
40 > @command(b'nowaitlocking', [], '')
41 > def nowaitlocking(ui, repo):
41 > def nowaitlocking(ui, repo):
42 > lo = repo.lock()
42 > lo = repo.lock()
43 > wl = repo.wlock(wait=False)
43 > wl = repo.wlock(wait=False)
44 > wl.release()
44 > wl.release()
45 > lo.release()
45 > lo.release()
46 >
46 >
47 > @command(b'no-wlock-write', [], '')
48 > def nowlockwrite(ui, repo):
49 > with repo.vfs(b'branch', 'a'):
50 > pass
51 >
47 > @command(b'stripintr', [], '')
52 > @command(b'stripintr', [], '')
48 > def stripintr(ui, repo):
53 > def stripintr(ui, repo):
49 > lo = repo.lock()
54 > lo = repo.lock()
50 > tr = repo.transaction('foobar')
55 > tr = repo.transaction('foobar')
51 > try:
56 > try:
52 > repair.strip(repo.ui, repo, [repo['.'].node()])
57 > repair.strip(repo.ui, repo, [repo['.'].node()])
53 > finally:
58 > finally:
54 > lo.release()
59 > lo.release()
55 > @command(b'oldanddeprecated', [], '')
60 > @command(b'oldanddeprecated', [], '')
56 > def oldanddeprecated(ui, repo):
61 > def oldanddeprecated(ui, repo):
57 > """test deprecation warning API"""
62 > """test deprecation warning API"""
58 > def foobar(ui):
63 > def foobar(ui):
59 > ui.deprecwarn('foorbar is deprecated, go shopping', '42.1337')
64 > ui.deprecwarn('foorbar is deprecated, go shopping', '42.1337')
60 > foobar(ui)
65 > foobar(ui)
61 > @command(b'nouiwarning', [], '')
66 > @command(b'nouiwarning', [], '')
62 > def nouiwarning(ui, repo):
67 > def nouiwarning(ui, repo):
63 > util.nouideprecwarn('this is a test', '13.37')
68 > util.nouideprecwarn('this is a test', '13.37')
64 > @command(b'programmingerror', [], '')
69 > @command(b'programmingerror', [], '')
65 > def programmingerror(ui, repo):
70 > def programmingerror(ui, repo):
66 > raise error.ProgrammingError('something went wrong', hint='try again')
71 > raise error.ProgrammingError('something went wrong', hint='try again')
67 > EOF
72 > EOF
68
73
69 $ cat << EOF >> $HGRCPATH
74 $ cat << EOF >> $HGRCPATH
70 > [extensions]
75 > [extensions]
71 > buggylocking=$TESTTMP/buggylocking.py
76 > buggylocking=$TESTTMP/buggylocking.py
72 > mock=$TESTDIR/mockblackbox.py
77 > mock=$TESTDIR/mockblackbox.py
73 > blackbox=
78 > blackbox=
74 > [devel]
79 > [devel]
75 > all-warnings=1
80 > all-warnings=1
76 > EOF
81 > EOF
77
82
78 $ hg init lock-checker
83 $ hg init lock-checker
79 $ cd lock-checker
84 $ cd lock-checker
80 $ hg buggylocking
85 $ hg buggylocking
81 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
86 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
82 $ cat << EOF >> $HGRCPATH
87 $ cat << EOF >> $HGRCPATH
83 > [devel]
88 > [devel]
84 > all=0
89 > all=0
85 > check-locks=1
90 > check-locks=1
86 > EOF
91 > EOF
87 $ hg buggylocking
92 $ hg buggylocking
88 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
93 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
89 $ hg buggylocking --traceback
94 $ hg buggylocking --traceback
90 devel-warn: "wlock" acquired after "lock" at:
95 devel-warn: "wlock" acquired after "lock" at:
91 */hg:* in * (glob)
96 */hg:* in * (glob)
92 */mercurial/dispatch.py:* in run (glob)
97 */mercurial/dispatch.py:* in run (glob)
93 */mercurial/dispatch.py:* in dispatch (glob)
98 */mercurial/dispatch.py:* in dispatch (glob)
94 */mercurial/dispatch.py:* in _runcatch (glob)
99 */mercurial/dispatch.py:* in _runcatch (glob)
95 */mercurial/dispatch.py:* in _callcatch (glob)
100 */mercurial/dispatch.py:* in _callcatch (glob)
96 */mercurial/scmutil.py* in callcatch (glob)
101 */mercurial/scmutil.py* in callcatch (glob)
97 */mercurial/dispatch.py:* in _runcatchfunc (glob)
102 */mercurial/dispatch.py:* in _runcatchfunc (glob)
98 */mercurial/dispatch.py:* in _dispatch (glob)
103 */mercurial/dispatch.py:* in _dispatch (glob)
99 */mercurial/dispatch.py:* in runcommand (glob)
104 */mercurial/dispatch.py:* in runcommand (glob)
100 */mercurial/dispatch.py:* in _runcommand (glob)
105 */mercurial/dispatch.py:* in _runcommand (glob)
101 */mercurial/dispatch.py:* in <lambda> (glob)
106 */mercurial/dispatch.py:* in <lambda> (glob)
102 */mercurial/util.py:* in check (glob)
107 */mercurial/util.py:* in check (glob)
103 $TESTTMP/buggylocking.py:* in buggylocking (glob)
108 $TESTTMP/buggylocking.py:* in buggylocking (glob)
104 $ hg properlocking
109 $ hg properlocking
105 $ hg nowaitlocking
110 $ hg nowaitlocking
106
111
112 Writing without lock
113
114 $ hg no-wlock-write
115 devel-warn: write with no wlock: "branch" at: $TESTTMP/buggylocking.py:* (nowlockwrite) (glob)
116
107 Stripping from a transaction
117 Stripping from a transaction
108
118
109 $ echo a > a
119 $ echo a > a
110 $ hg add a
120 $ hg add a
111 $ hg commit -m a
121 $ hg commit -m a
112 $ hg stripintr 2>&1 | egrep -v '^(\*\*| )'
122 $ hg stripintr 2>&1 | egrep -v '^(\*\*| )'
113 Traceback (most recent call last):
123 Traceback (most recent call last):
114 mercurial.error.ProgrammingError: cannot strip from inside a transaction
124 mercurial.error.ProgrammingError: cannot strip from inside a transaction
115
125
116 $ hg oldanddeprecated
126 $ hg oldanddeprecated
117 devel-warn: foorbar is deprecated, go shopping
127 devel-warn: foorbar is deprecated, go shopping
118 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
128 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
119
129
120 $ hg oldanddeprecated --traceback
130 $ hg oldanddeprecated --traceback
121 devel-warn: foorbar is deprecated, go shopping
131 devel-warn: foorbar is deprecated, go shopping
122 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
132 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
123 */hg:* in <module> (glob)
133 */hg:* in <module> (glob)
124 */mercurial/dispatch.py:* in run (glob)
134 */mercurial/dispatch.py:* in run (glob)
125 */mercurial/dispatch.py:* in dispatch (glob)
135 */mercurial/dispatch.py:* in dispatch (glob)
126 */mercurial/dispatch.py:* in _runcatch (glob)
136 */mercurial/dispatch.py:* in _runcatch (glob)
127 */mercurial/dispatch.py:* in _callcatch (glob)
137 */mercurial/dispatch.py:* in _callcatch (glob)
128 */mercurial/scmutil.py* in callcatch (glob)
138 */mercurial/scmutil.py* in callcatch (glob)
129 */mercurial/dispatch.py:* in _runcatchfunc (glob)
139 */mercurial/dispatch.py:* in _runcatchfunc (glob)
130 */mercurial/dispatch.py:* in _dispatch (glob)
140 */mercurial/dispatch.py:* in _dispatch (glob)
131 */mercurial/dispatch.py:* in runcommand (glob)
141 */mercurial/dispatch.py:* in runcommand (glob)
132 */mercurial/dispatch.py:* in _runcommand (glob)
142 */mercurial/dispatch.py:* in _runcommand (glob)
133 */mercurial/dispatch.py:* in <lambda> (glob)
143 */mercurial/dispatch.py:* in <lambda> (glob)
134 */mercurial/util.py:* in check (glob)
144 */mercurial/util.py:* in check (glob)
135 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
145 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
136 $ hg blackbox -l 7
146 $ hg blackbox -l 7
137 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
147 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
138 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
148 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
139 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
149 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
140 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
150 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
141 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
151 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
142 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
152 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
143 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
153 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
144 */hg:* in <module> (glob)
154 */hg:* in <module> (glob)
145 */mercurial/dispatch.py:* in run (glob)
155 */mercurial/dispatch.py:* in run (glob)
146 */mercurial/dispatch.py:* in dispatch (glob)
156 */mercurial/dispatch.py:* in dispatch (glob)
147 */mercurial/dispatch.py:* in _runcatch (glob)
157 */mercurial/dispatch.py:* in _runcatch (glob)
148 */mercurial/dispatch.py:* in _callcatch (glob)
158 */mercurial/dispatch.py:* in _callcatch (glob)
149 */mercurial/scmutil.py* in callcatch (glob)
159 */mercurial/scmutil.py* in callcatch (glob)
150 */mercurial/dispatch.py:* in _runcatchfunc (glob)
160 */mercurial/dispatch.py:* in _runcatchfunc (glob)
151 */mercurial/dispatch.py:* in _dispatch (glob)
161 */mercurial/dispatch.py:* in _dispatch (glob)
152 */mercurial/dispatch.py:* in runcommand (glob)
162 */mercurial/dispatch.py:* in runcommand (glob)
153 */mercurial/dispatch.py:* in _runcommand (glob)
163 */mercurial/dispatch.py:* in _runcommand (glob)
154 */mercurial/dispatch.py:* in <lambda> (glob)
164 */mercurial/dispatch.py:* in <lambda> (glob)
155 */mercurial/util.py:* in check (glob)
165 */mercurial/util.py:* in check (glob)
156 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
166 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
157 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
167 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
158 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 7
168 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 7
159
169
160 Test programming error failure:
170 Test programming error failure:
161
171
162 $ hg buggytransaction 2>&1 | egrep -v '^ '
172 $ hg buggytransaction 2>&1 | egrep -v '^ '
163 ** Unknown exception encountered with possibly-broken third-party extension buggylocking
173 ** Unknown exception encountered with possibly-broken third-party extension buggylocking
164 ** which supports versions unknown of Mercurial.
174 ** which supports versions unknown of Mercurial.
165 ** Please disable buggylocking and try your action again.
175 ** Please disable buggylocking and try your action again.
166 ** If that fixes the bug please report it to the extension author.
176 ** If that fixes the bug please report it to the extension author.
167 ** Python * (glob)
177 ** Python * (glob)
168 ** Mercurial Distributed SCM (*) (glob)
178 ** Mercurial Distributed SCM (*) (glob)
169 ** Extensions loaded: * (glob)
179 ** Extensions loaded: * (glob)
170 ** ProgrammingError: transaction requires locking
180 ** ProgrammingError: transaction requires locking
171 Traceback (most recent call last):
181 Traceback (most recent call last):
172 mercurial.error.ProgrammingError: transaction requires locking
182 mercurial.error.ProgrammingError: transaction requires locking
173
183
174 $ hg programmingerror 2>&1 | egrep -v '^ '
184 $ hg programmingerror 2>&1 | egrep -v '^ '
175 ** Unknown exception encountered with possibly-broken third-party extension buggylocking
185 ** Unknown exception encountered with possibly-broken third-party extension buggylocking
176 ** which supports versions unknown of Mercurial.
186 ** which supports versions unknown of Mercurial.
177 ** Please disable buggylocking and try your action again.
187 ** Please disable buggylocking and try your action again.
178 ** If that fixes the bug please report it to the extension author.
188 ** If that fixes the bug please report it to the extension author.
179 ** Python * (glob)
189 ** Python * (glob)
180 ** Mercurial Distributed SCM (*) (glob)
190 ** Mercurial Distributed SCM (*) (glob)
181 ** Extensions loaded: * (glob)
191 ** Extensions loaded: * (glob)
182 ** ProgrammingError: something went wrong
192 ** ProgrammingError: something went wrong
183 ** (try again)
193 ** (try again)
184 Traceback (most recent call last):
194 Traceback (most recent call last):
185 mercurial.error.ProgrammingError: something went wrong
195 mercurial.error.ProgrammingError: something went wrong
186
196
187 Old style deprecation warning
197 Old style deprecation warning
188
198
189 $ hg nouiwarning
199 $ hg nouiwarning
190 $TESTTMP/buggylocking.py:*: DeprecationWarning: this is a test (glob)
200 $TESTTMP/buggylocking.py:*: DeprecationWarning: this is a test (glob)
191 (compatibility will be dropped after Mercurial-13.37, update your code.)
201 (compatibility will be dropped after Mercurial-13.37, update your code.)
192 util.nouideprecwarn('this is a test', '13.37')
202 util.nouideprecwarn('this is a test', '13.37')
193
203
194 (disabled outside of test run)
204 (disabled outside of test run)
195
205
196 $ HGEMITWARNINGS= hg nouiwarning
206 $ HGEMITWARNINGS= hg nouiwarning
197
207
198 Test warning on config option access and registration
208 Test warning on config option access and registration
199
209
200 $ cat << EOF > ${TESTTMP}/buggyconfig.py
210 $ cat << EOF > ${TESTTMP}/buggyconfig.py
201 > """A small extension that tests our developer warnings for config"""
211 > """A small extension that tests our developer warnings for config"""
202 >
212 >
203 > from mercurial import registrar
213 > from mercurial import registrar
204 >
214 >
205 > cmdtable = {}
215 > cmdtable = {}
206 > command = registrar.command(cmdtable)
216 > command = registrar.command(cmdtable)
207 >
217 >
208 > configtable = {}
218 > configtable = {}
209 > configitem = registrar.configitem(configtable)
219 > configitem = registrar.configitem(configtable)
210 >
220 >
211 > configitem('test', 'some', default='foo')
221 > configitem('test', 'some', default='foo')
212 > # overwrite a core config
222 > # overwrite a core config
213 > configitem('ui', 'quiet', default=False)
223 > configitem('ui', 'quiet', default=False)
214 > configitem('ui', 'interactive', default=None)
224 > configitem('ui', 'interactive', default=None)
215 >
225 >
216 > @command(b'buggyconfig')
226 > @command(b'buggyconfig')
217 > def cmdbuggyconfig(ui, repo):
227 > def cmdbuggyconfig(ui, repo):
218 > repo.ui.config('ui', 'quiet', False)
228 > repo.ui.config('ui', 'quiet', False)
219 > repo.ui.config('ui', 'interactive', None)
229 > repo.ui.config('ui', 'interactive', None)
220 > repo.ui.config('test', 'some', 'foo')
230 > repo.ui.config('test', 'some', 'foo')
221 > EOF
231 > EOF
222
232
223 $ hg --config "extensions.buggyconfig=${TESTTMP}/buggyconfig.py" buggyconfig
233 $ hg --config "extensions.buggyconfig=${TESTTMP}/buggyconfig.py" buggyconfig
224 devel-warn: extension 'buggyconfig' overwrite config item 'ui.interactive' at: */mercurial/extensions.py:* (loadall) (glob)
234 devel-warn: extension 'buggyconfig' overwrite config item 'ui.interactive' at: */mercurial/extensions.py:* (loadall) (glob)
225 devel-warn: extension 'buggyconfig' overwrite config item 'ui.quiet' at: */mercurial/extensions.py:* (loadall) (glob)
235 devel-warn: extension 'buggyconfig' overwrite config item 'ui.quiet' at: */mercurial/extensions.py:* (loadall) (glob)
226 devel-warn: specifying a default value for a registered config item: 'ui.quiet' 'False' at: $TESTTMP/buggyconfig.py:* (cmdbuggyconfig) (glob)
236 devel-warn: specifying a default value for a registered config item: 'ui.quiet' 'False' at: $TESTTMP/buggyconfig.py:* (cmdbuggyconfig) (glob)
227 devel-warn: specifying a default value for a registered config item: 'ui.interactive' 'None' at: $TESTTMP/buggyconfig.py:* (cmdbuggyconfig) (glob)
237 devel-warn: specifying a default value for a registered config item: 'ui.interactive' 'None' at: $TESTTMP/buggyconfig.py:* (cmdbuggyconfig) (glob)
228 devel-warn: specifying a default value for a registered config item: 'test.some' 'foo' at: $TESTTMP/buggyconfig.py:* (cmdbuggyconfig) (glob)
238 devel-warn: specifying a default value for a registered config item: 'test.some' 'foo' at: $TESTTMP/buggyconfig.py:* (cmdbuggyconfig) (glob)
229
239
230 $ cd ..
240 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now