##// END OF EJS Templates
merge with stable
Matt Mackall -
r17980:83aa4359 merge default
parent child Browse files
Show More
@@ -1,188 +1,189 b''
1 # hook.py - hook support for mercurial
1 # hook.py - hook support for mercurial
2 #
2 #
3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import os, sys
9 import os, sys
10 import extensions, util
10 import extensions, util
11
11
12 def _pythonhook(ui, repo, name, hname, funcname, args, throw):
12 def _pythonhook(ui, repo, name, hname, funcname, args, throw):
13 '''call python hook. hook is callable object, looked up as
13 '''call python hook. hook is callable object, looked up as
14 name in python module. if callable returns "true", hook
14 name in python module. if callable returns "true", hook
15 fails, else passes. if hook raises exception, treated as
15 fails, else passes. if hook raises exception, treated as
16 hook failure. exception propagates if throw is "true".
16 hook failure. exception propagates if throw is "true".
17
17
18 reason for "true" meaning "hook failed" is so that
18 reason for "true" meaning "hook failed" is so that
19 unmodified commands (e.g. mercurial.commands.update) can
19 unmodified commands (e.g. mercurial.commands.update) can
20 be run as hooks without wrappers to convert return values.'''
20 be run as hooks without wrappers to convert return values.'''
21
21
22 ui.note(_("calling hook %s: %s\n") % (hname, funcname))
22 ui.note(_("calling hook %s: %s\n") % (hname, funcname))
23 obj = funcname
23 obj = funcname
24 if not util.safehasattr(obj, '__call__'):
24 if not util.safehasattr(obj, '__call__'):
25 d = funcname.rfind('.')
25 d = funcname.rfind('.')
26 if d == -1:
26 if d == -1:
27 raise util.Abort(_('%s hook is invalid ("%s" not in '
27 raise util.Abort(_('%s hook is invalid ("%s" not in '
28 'a module)') % (hname, funcname))
28 'a module)') % (hname, funcname))
29 modname = funcname[:d]
29 modname = funcname[:d]
30 oldpaths = sys.path
30 oldpaths = sys.path
31 if util.mainfrozen():
31 if util.mainfrozen():
32 # binary installs require sys.path manipulation
32 # binary installs require sys.path manipulation
33 modpath, modfile = os.path.split(modname)
33 modpath, modfile = os.path.split(modname)
34 if modpath and modfile:
34 if modpath and modfile:
35 sys.path = sys.path[:] + [modpath]
35 sys.path = sys.path[:] + [modpath]
36 modname = modfile
36 modname = modfile
37 try:
37 try:
38 obj = __import__(modname)
38 obj = __import__(modname)
39 except ImportError:
39 except ImportError:
40 e1 = sys.exc_type, sys.exc_value, sys.exc_traceback
40 e1 = sys.exc_type, sys.exc_value, sys.exc_traceback
41 try:
41 try:
42 # extensions are loaded with hgext_ prefix
42 # extensions are loaded with hgext_ prefix
43 obj = __import__("hgext_%s" % modname)
43 obj = __import__("hgext_%s" % modname)
44 except ImportError:
44 except ImportError:
45 e2 = sys.exc_type, sys.exc_value, sys.exc_traceback
45 e2 = sys.exc_type, sys.exc_value, sys.exc_traceback
46 if ui.tracebackflag:
46 if ui.tracebackflag:
47 ui.warn(_('exception from first failed import attempt:\n'))
47 ui.warn(_('exception from first failed import attempt:\n'))
48 ui.traceback(e1)
48 ui.traceback(e1)
49 if ui.tracebackflag:
49 if ui.tracebackflag:
50 ui.warn(_('exception from second failed import attempt:\n'))
50 ui.warn(_('exception from second failed import attempt:\n'))
51 ui.traceback(e2)
51 ui.traceback(e2)
52 raise util.Abort(_('%s hook is invalid '
52 raise util.Abort(_('%s hook is invalid '
53 '(import of "%s" failed)') %
53 '(import of "%s" failed)') %
54 (hname, modname))
54 (hname, modname))
55 sys.path = oldpaths
55 sys.path = oldpaths
56 try:
56 try:
57 for p in funcname.split('.')[1:]:
57 for p in funcname.split('.')[1:]:
58 obj = getattr(obj, p)
58 obj = getattr(obj, p)
59 except AttributeError:
59 except AttributeError:
60 raise util.Abort(_('%s hook is invalid '
60 raise util.Abort(_('%s hook is invalid '
61 '("%s" is not defined)') %
61 '("%s" is not defined)') %
62 (hname, funcname))
62 (hname, funcname))
63 if not util.safehasattr(obj, '__call__'):
63 if not util.safehasattr(obj, '__call__'):
64 raise util.Abort(_('%s hook is invalid '
64 raise util.Abort(_('%s hook is invalid '
65 '("%s" is not callable)') %
65 '("%s" is not callable)') %
66 (hname, funcname))
66 (hname, funcname))
67 try:
67 try:
68 try:
68 try:
69 # redirect IO descriptors to the ui descriptors so hooks
69 # redirect IO descriptors to the ui descriptors so hooks
70 # that write directly to these don't mess up the command
70 # that write directly to these don't mess up the command
71 # protocol when running through the command server
71 # protocol when running through the command server
72 old = sys.stdout, sys.stderr, sys.stdin
72 old = sys.stdout, sys.stderr, sys.stdin
73 sys.stdout, sys.stderr, sys.stdin = ui.fout, ui.ferr, ui.fin
73 sys.stdout, sys.stderr, sys.stdin = ui.fout, ui.ferr, ui.fin
74
74
75 r = obj(ui=ui, repo=repo, hooktype=name, **args)
75 r = obj(ui=ui, repo=repo, hooktype=name, **args)
76 except KeyboardInterrupt:
76 except KeyboardInterrupt:
77 raise
77 raise
78 except Exception, exc:
78 except Exception, exc:
79 if isinstance(exc, util.Abort):
79 if isinstance(exc, util.Abort):
80 ui.warn(_('error: %s hook failed: %s\n') %
80 ui.warn(_('error: %s hook failed: %s\n') %
81 (hname, exc.args[0]))
81 (hname, exc.args[0]))
82 else:
82 else:
83 ui.warn(_('error: %s hook raised an exception: '
83 ui.warn(_('error: %s hook raised an exception: '
84 '%s\n') % (hname, exc))
84 '%s\n') % (hname, exc))
85 if throw:
85 if throw:
86 raise
86 raise
87 ui.traceback()
87 ui.traceback()
88 return True
88 return True
89 finally:
89 finally:
90 sys.stdout, sys.stderr, sys.stdin = old
90 sys.stdout, sys.stderr, sys.stdin = old
91 if r:
91 if r:
92 if throw:
92 if throw:
93 raise util.Abort(_('%s hook failed') % hname)
93 raise util.Abort(_('%s hook failed') % hname)
94 ui.warn(_('warning: %s hook failed\n') % hname)
94 ui.warn(_('warning: %s hook failed\n') % hname)
95 return r
95 return r
96
96
97 def _exthook(ui, repo, name, cmd, args, throw):
97 def _exthook(ui, repo, name, cmd, args, throw):
98 ui.note(_("running hook %s: %s\n") % (name, cmd))
98 ui.note(_("running hook %s: %s\n") % (name, cmd))
99
99
100 env = {}
100 env = {}
101 for k, v in args.iteritems():
101 for k, v in args.iteritems():
102 if util.safehasattr(v, '__call__'):
102 if util.safehasattr(v, '__call__'):
103 v = v()
103 v = v()
104 if isinstance(v, dict):
104 if isinstance(v, dict):
105 # make the dictionary element order stable across Python
105 # make the dictionary element order stable across Python
106 # implementations
106 # implementations
107 v = ('{' +
107 v = ('{' +
108 ', '.join('%r: %r' % i for i in sorted(v.iteritems())) +
108 ', '.join('%r: %r' % i for i in sorted(v.iteritems())) +
109 '}')
109 '}')
110 env['HG_' + k.upper()] = v
110 env['HG_' + k.upper()] = v
111
111
112 if repo:
112 if repo:
113 cwd = repo.root
113 cwd = repo.root
114 else:
114 else:
115 cwd = os.getcwd()
115 cwd = os.getcwd()
116 if 'HG_URL' in env and env['HG_URL'].startswith('remote:http'):
116 if 'HG_URL' in env and env['HG_URL'].startswith('remote:http'):
117 r = util.system(cmd, environ=env, cwd=cwd, out=ui)
117 r = util.system(cmd, environ=env, cwd=cwd, out=ui)
118 else:
118 else:
119 r = util.system(cmd, environ=env, cwd=cwd, out=ui.fout)
119 r = util.system(cmd, environ=env, cwd=cwd, out=ui.fout)
120 if r:
120 if r:
121 desc, r = util.explainexit(r)
121 desc, r = util.explainexit(r)
122 if throw:
122 if throw:
123 raise util.Abort(_('%s hook %s') % (name, desc))
123 raise util.Abort(_('%s hook %s') % (name, desc))
124 ui.warn(_('warning: %s hook %s\n') % (name, desc))
124 ui.warn(_('warning: %s hook %s\n') % (name, desc))
125 return r
125 return r
126
126
127 def _allhooks(ui):
127 def _allhooks(ui):
128 hooks = []
128 hooks = []
129 for name, cmd in ui.configitems('hooks'):
129 for name, cmd in ui.configitems('hooks'):
130 if not name.startswith('priority'):
130 if not name.startswith('priority'):
131 priority = ui.configint('hooks', 'priority.%s' % name, 0)
131 priority = ui.configint('hooks', 'priority.%s' % name, 0)
132 hooks.append((-priority, len(hooks), name, cmd))
132 hooks.append((-priority, len(hooks), name, cmd))
133 return [(k, v) for p, o, k, v in sorted(hooks)]
133 return [(k, v) for p, o, k, v in sorted(hooks)]
134
134
135 _redirect = False
135 _redirect = False
136 def redirect(state):
136 def redirect(state):
137 global _redirect
137 global _redirect
138 _redirect = state
138 _redirect = state
139
139
140 def hook(ui, repo, name, throw=False, **args):
140 def hook(ui, repo, name, throw=False, **args):
141 if not ui.callhooks:
141 if not ui.callhooks:
142 return False
142 return False
143
143
144 r = False
144 r = False
145 oldstdout = -1
145
146
146 oldstdout = -1
147 try:
147 if _redirect:
148 for hname, cmd in _allhooks(ui):
149 if hname.split('.')[0] != name or not cmd:
150 continue
151
152 if oldstdout == -1 and _redirect:
148 try:
153 try:
149 stdoutno = sys.__stdout__.fileno()
154 stdoutno = sys.__stdout__.fileno()
150 stderrno = sys.__stderr__.fileno()
155 stderrno = sys.__stderr__.fileno()
151 # temporarily redirect stdout to stderr, if possible
156 # temporarily redirect stdout to stderr, if possible
152 if stdoutno >= 0 and stderrno >= 0:
157 if stdoutno >= 0 and stderrno >= 0:
153 sys.__stdout__.flush()
158 sys.__stdout__.flush()
154 oldstdout = os.dup(stdoutno)
159 oldstdout = os.dup(stdoutno)
155 os.dup2(stderrno, stdoutno)
160 os.dup2(stderrno, stdoutno)
156 except AttributeError:
161 except (OSError, AttributeError):
157 # __stdout__/__stderr__ doesn't have fileno(), it's not a real file
162 # files seem to be bogus, give up on redirecting (WSGI, etc)
158 pass
163 pass
159
164
160 try:
161 for hname, cmd in _allhooks(ui):
162 if hname.split('.')[0] != name or not cmd:
163 continue
164 if util.safehasattr(cmd, '__call__'):
165 if util.safehasattr(cmd, '__call__'):
165 r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r
166 r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r
166 elif cmd.startswith('python:'):
167 elif cmd.startswith('python:'):
167 if cmd.count(':') >= 2:
168 if cmd.count(':') >= 2:
168 path, cmd = cmd[7:].rsplit(':', 1)
169 path, cmd = cmd[7:].rsplit(':', 1)
169 path = util.expandpath(path)
170 path = util.expandpath(path)
170 if repo:
171 if repo:
171 path = os.path.join(repo.root, path)
172 path = os.path.join(repo.root, path)
172 try:
173 try:
173 mod = extensions.loadpath(path, 'hghook.%s' % hname)
174 mod = extensions.loadpath(path, 'hghook.%s' % hname)
174 except Exception:
175 except Exception:
175 ui.write(_("loading %s hook failed:\n") % hname)
176 ui.write(_("loading %s hook failed:\n") % hname)
176 raise
177 raise
177 hookfn = getattr(mod, cmd)
178 hookfn = getattr(mod, cmd)
178 else:
179 else:
179 hookfn = cmd[7:].strip()
180 hookfn = cmd[7:].strip()
180 r = _pythonhook(ui, repo, name, hname, hookfn, args, throw) or r
181 r = _pythonhook(ui, repo, name, hname, hookfn, args, throw) or r
181 else:
182 else:
182 r = _exthook(ui, repo, hname, cmd, args, throw) or r
183 r = _exthook(ui, repo, hname, cmd, args, throw) or r
183 finally:
184 finally:
184 if _redirect and oldstdout >= 0:
185 if _redirect and oldstdout >= 0:
185 os.dup2(oldstdout, stdoutno)
186 os.dup2(oldstdout, stdoutno)
186 os.close(oldstdout)
187 os.close(oldstdout)
187
188
188 return r
189 return r
@@ -1,393 +1,393 b''
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License version 2 or any later version.
10 of the GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phase' is an indicator that tells us how a changeset is
20 A 'changeset phase' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described
21 manipulated and communicated. The details of each phase is described
22 below, here we describe the properties they have in common.
22 below, here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not
24 Like bookmarks, phases are not stored in history and thus are not
25 permanent and leave no audit trail.
25 permanent and leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered,
27 First, no changeset can be in two phases at once. Phases are ordered,
28 so they can be considered from lowest to highest. The default, lowest
28 so they can be considered from lowest to highest. The default, lowest
29 phase is 'public' - this is the normal phase of existing changesets. A
29 phase is 'public' - this is the normal phase of existing changesets. A
30 child changeset can not be in a lower phase than its parents.
30 child changeset can not be in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 Local commits are draft by default.
39 Local commits are draft by default.
40
40
41 Phase Movement and Exchange
41 Phase Movement and Exchange
42 ===========================
42 ===========================
43
43
44 Phase data is exchanged by pushkey on pull and push. Some servers have
44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 a publish option set, we call such a server a "publishing server".
45 a publish option set, we call such a server a "publishing server".
46 Pushing a draft changeset to a publishing server changes the phase to
46 Pushing a draft changeset to a publishing server changes the phase to
47 public.
47 public.
48
48
49 A small list of fact/rules define the exchange of phase:
49 A small list of fact/rules define the exchange of phase:
50
50
51 * old client never changes server states
51 * old client never changes server states
52 * pull never changes server states
52 * pull never changes server states
53 * publish and old server changesets are seen as public by client
53 * publish and old server changesets are seen as public by client
54 * any secret changeset seen in another repository is lowered to at
54 * any secret changeset seen in another repository is lowered to at
55 least draft
55 least draft
56
56
57 Here is the final table summing up the 49 possible use cases of phase
57 Here is the final table summing up the 49 possible use cases of phase
58 exchange:
58 exchange:
59
59
60 server
60 server
61 old publish non-publish
61 old publish non-publish
62 N X N D P N D P
62 N X N D P N D P
63 old client
63 old client
64 pull
64 pull
65 N - X/X - X/D X/P - X/D X/P
65 N - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
67 push
67 push
68 X X/X X/X X/P X/P X/P X/D X/D X/P
68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 new client
69 new client
70 pull
70 pull
71 N - P/X - P/D P/P - D/D P/P
71 N - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
73 P - P/X - P/D P/P - P/D P/P
73 P - P/X - P/D P/P - P/D P/P
74 push
74 push
75 D P/X P/X P/P P/P P/P D/D D/D P/P
75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
77
77
78 Legend:
78 Legend:
79
79
80 A/B = final state on client / state on server
80 A/B = final state on client / state on server
81
81
82 * N = new/not present,
82 * N = new/not present,
83 * P = public,
83 * P = public,
84 * D = draft,
84 * D = draft,
85 * X = not tracked (i.e., the old client or server has no internal
85 * X = not tracked (i.e., the old client or server has no internal
86 way of recording the phase.)
86 way of recording the phase.)
87
87
88 passive = only pushes
88 passive = only pushes
89
89
90
90
91 A cell here can be read like this:
91 A cell here can be read like this:
92
92
93 "When a new client pushes a draft changeset (D) to a publishing
93 "When a new client pushes a draft changeset (D) to a publishing
94 server where it's not present (N), it's marked public on both
94 server where it's not present (N), it's marked public on both
95 sides (P/P)."
95 sides (P/P)."
96
96
97 Note: old client behave as a publishing server with draft only content
97 Note: old client behave as a publishing server with draft only content
98 - other people see it as public
98 - other people see it as public
99 - content is pushed as draft
99 - content is pushed as draft
100
100
101 """
101 """
102
102
103 import errno
103 import errno
104 from node import nullid, nullrev, bin, hex, short
104 from node import nullid, nullrev, bin, hex, short
105 from i18n import _
105 from i18n import _
106 import util
106 import util, error
107 import obsolete
107 import obsolete
108
108
109 allphases = public, draft, secret = range(3)
109 allphases = public, draft, secret = range(3)
110 trackedphases = allphases[1:]
110 trackedphases = allphases[1:]
111 phasenames = ['public', 'draft', 'secret']
111 phasenames = ['public', 'draft', 'secret']
112
112
113 def _filterunknown(ui, changelog, phaseroots):
113 def _filterunknown(ui, changelog, phaseroots):
114 """remove unknown nodes from the phase boundary
114 """remove unknown nodes from the phase boundary
115
115
116 Nothing is lost as unknown nodes only hold data for their descendants.
116 Nothing is lost as unknown nodes only hold data for their descendants.
117 """
117 """
118 updated = False
118 updated = False
119 nodemap = changelog.nodemap # to filter unknown nodes
119 nodemap = changelog.nodemap # to filter unknown nodes
120 for phase, nodes in enumerate(phaseroots):
120 for phase, nodes in enumerate(phaseroots):
121 missing = [node for node in nodes if node not in nodemap]
121 missing = [node for node in nodes if node not in nodemap]
122 if missing:
122 if missing:
123 for mnode in missing:
123 for mnode in missing:
124 ui.debug(
124 ui.debug(
125 'removing unknown node %s from %i-phase boundary\n'
125 'removing unknown node %s from %i-phase boundary\n'
126 % (short(mnode), phase))
126 % (short(mnode), phase))
127 nodes.symmetric_difference_update(missing)
127 nodes.symmetric_difference_update(missing)
128 updated = True
128 updated = True
129 return updated
129 return updated
130
130
131 def _readroots(repo, phasedefaults=None):
131 def _readroots(repo, phasedefaults=None):
132 """Read phase roots from disk
132 """Read phase roots from disk
133
133
134 phasedefaults is a list of fn(repo, roots) callable, which are
134 phasedefaults is a list of fn(repo, roots) callable, which are
135 executed if the phase roots file does not exist. When phases are
135 executed if the phase roots file does not exist. When phases are
136 being initialized on an existing repository, this could be used to
136 being initialized on an existing repository, this could be used to
137 set selected changesets phase to something else than public.
137 set selected changesets phase to something else than public.
138
138
139 Return (roots, dirty) where dirty is true if roots differ from
139 Return (roots, dirty) where dirty is true if roots differ from
140 what is being stored.
140 what is being stored.
141 """
141 """
142 dirty = False
142 dirty = False
143 roots = [set() for i in allphases]
143 roots = [set() for i in allphases]
144 try:
144 try:
145 f = repo.sopener('phaseroots')
145 f = repo.sopener('phaseroots')
146 try:
146 try:
147 for line in f:
147 for line in f:
148 phase, nh = line.split()
148 phase, nh = line.split()
149 roots[int(phase)].add(bin(nh))
149 roots[int(phase)].add(bin(nh))
150 finally:
150 finally:
151 f.close()
151 f.close()
152 except IOError, inst:
152 except IOError, inst:
153 if inst.errno != errno.ENOENT:
153 if inst.errno != errno.ENOENT:
154 raise
154 raise
155 if phasedefaults:
155 if phasedefaults:
156 for f in phasedefaults:
156 for f in phasedefaults:
157 roots = f(repo, roots)
157 roots = f(repo, roots)
158 dirty = True
158 dirty = True
159 if _filterunknown(repo.ui, repo.changelog, roots):
159 if _filterunknown(repo.ui, repo.changelog, roots):
160 dirty = True
160 dirty = True
161 return roots, dirty
161 return roots, dirty
162
162
163 class phasecache(object):
163 class phasecache(object):
164 def __init__(self, repo, phasedefaults, _load=True):
164 def __init__(self, repo, phasedefaults, _load=True):
165 if _load:
165 if _load:
166 # Cheap trick to allow shallow-copy without copy module
166 # Cheap trick to allow shallow-copy without copy module
167 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
167 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
168 self.opener = repo.sopener
168 self.opener = repo.sopener
169 self._phaserevs = None
169 self._phaserevs = None
170
170
171 def copy(self):
171 def copy(self):
172 # Shallow copy meant to ensure isolation in
172 # Shallow copy meant to ensure isolation in
173 # advance/retractboundary(), nothing more.
173 # advance/retractboundary(), nothing more.
174 ph = phasecache(None, None, _load=False)
174 ph = phasecache(None, None, _load=False)
175 ph.phaseroots = self.phaseroots[:]
175 ph.phaseroots = self.phaseroots[:]
176 ph.dirty = self.dirty
176 ph.dirty = self.dirty
177 ph.opener = self.opener
177 ph.opener = self.opener
178 ph._phaserevs = self._phaserevs
178 ph._phaserevs = self._phaserevs
179 return ph
179 return ph
180
180
181 def replace(self, phcache):
181 def replace(self, phcache):
182 for a in 'phaseroots dirty opener _phaserevs'.split():
182 for a in 'phaseroots dirty opener _phaserevs'.split():
183 setattr(self, a, getattr(phcache, a))
183 setattr(self, a, getattr(phcache, a))
184
184
185 def getphaserevs(self, repo, rebuild=False):
185 def getphaserevs(self, repo, rebuild=False):
186 if rebuild or self._phaserevs is None:
186 if rebuild or self._phaserevs is None:
187 revs = [public] * len(repo.changelog)
187 revs = [public] * len(repo.changelog)
188 for phase in trackedphases:
188 for phase in trackedphases:
189 roots = map(repo.changelog.rev, self.phaseroots[phase])
189 roots = map(repo.changelog.rev, self.phaseroots[phase])
190 if roots:
190 if roots:
191 for rev in roots:
191 for rev in roots:
192 revs[rev] = phase
192 revs[rev] = phase
193 for rev in repo.changelog.descendants(roots):
193 for rev in repo.changelog.descendants(roots):
194 revs[rev] = phase
194 revs[rev] = phase
195 self._phaserevs = revs
195 self._phaserevs = revs
196 return self._phaserevs
196 return self._phaserevs
197
197
198 def phase(self, repo, rev):
198 def phase(self, repo, rev):
199 # We need a repo argument here to be able to build _phaserevs
199 # We need a repo argument here to be able to build _phaserevs
200 # if necessary. The repository instance is not stored in
200 # if necessary. The repository instance is not stored in
201 # phasecache to avoid reference cycles. The changelog instance
201 # phasecache to avoid reference cycles. The changelog instance
202 # is not stored because it is a filecache() property and can
202 # is not stored because it is a filecache() property and can
203 # be replaced without us being notified.
203 # be replaced without us being notified.
204 if rev == nullrev:
204 if rev == nullrev:
205 return public
205 return public
206 if self._phaserevs is None or rev >= len(self._phaserevs):
206 if self._phaserevs is None or rev >= len(self._phaserevs):
207 self._phaserevs = self.getphaserevs(repo, rebuild=True)
207 self._phaserevs = self.getphaserevs(repo, rebuild=True)
208 return self._phaserevs[rev]
208 return self._phaserevs[rev]
209
209
210 def write(self):
210 def write(self):
211 if not self.dirty:
211 if not self.dirty:
212 return
212 return
213 f = self.opener('phaseroots', 'w', atomictemp=True)
213 f = self.opener('phaseroots', 'w', atomictemp=True)
214 try:
214 try:
215 for phase, roots in enumerate(self.phaseroots):
215 for phase, roots in enumerate(self.phaseroots):
216 for h in roots:
216 for h in roots:
217 f.write('%i %s\n' % (phase, hex(h)))
217 f.write('%i %s\n' % (phase, hex(h)))
218 finally:
218 finally:
219 f.close()
219 f.close()
220 self.dirty = False
220 self.dirty = False
221
221
222 def _updateroots(self, phase, newroots):
222 def _updateroots(self, phase, newroots):
223 self.phaseroots[phase] = newroots
223 self.phaseroots[phase] = newroots
224 self._phaserevs = None
224 self._phaserevs = None
225 self.dirty = True
225 self.dirty = True
226
226
227 def advanceboundary(self, repo, targetphase, nodes):
227 def advanceboundary(self, repo, targetphase, nodes):
228 # Be careful to preserve shallow-copied values: do not update
228 # Be careful to preserve shallow-copied values: do not update
229 # phaseroots values, replace them.
229 # phaseroots values, replace them.
230
230
231 delroots = [] # set of root deleted by this path
231 delroots = [] # set of root deleted by this path
232 for phase in xrange(targetphase + 1, len(allphases)):
232 for phase in xrange(targetphase + 1, len(allphases)):
233 # filter nodes that are not in a compatible phase already
233 # filter nodes that are not in a compatible phase already
234 nodes = [n for n in nodes
234 nodes = [n for n in nodes
235 if self.phase(repo, repo[n].rev()) >= phase]
235 if self.phase(repo, repo[n].rev()) >= phase]
236 if not nodes:
236 if not nodes:
237 break # no roots to move anymore
237 break # no roots to move anymore
238 olds = self.phaseroots[phase]
238 olds = self.phaseroots[phase]
239 roots = set(ctx.node() for ctx in repo.set(
239 roots = set(ctx.node() for ctx in repo.set(
240 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
240 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
241 if olds != roots:
241 if olds != roots:
242 self._updateroots(phase, roots)
242 self._updateroots(phase, roots)
243 # some roots may need to be declared for lower phases
243 # some roots may need to be declared for lower phases
244 delroots.extend(olds - roots)
244 delroots.extend(olds - roots)
245 # declare deleted root in the target phase
245 # declare deleted root in the target phase
246 if targetphase != 0:
246 if targetphase != 0:
247 self.retractboundary(repo, targetphase, delroots)
247 self.retractboundary(repo, targetphase, delroots)
248 obsolete.clearobscaches(repo)
248 obsolete.clearobscaches(repo)
249
249
250 def retractboundary(self, repo, targetphase, nodes):
250 def retractboundary(self, repo, targetphase, nodes):
251 # Be careful to preserve shallow-copied values: do not update
251 # Be careful to preserve shallow-copied values: do not update
252 # phaseroots values, replace them.
252 # phaseroots values, replace them.
253
253
254 currentroots = self.phaseroots[targetphase]
254 currentroots = self.phaseroots[targetphase]
255 newroots = [n for n in nodes
255 newroots = [n for n in nodes
256 if self.phase(repo, repo[n].rev()) < targetphase]
256 if self.phase(repo, repo[n].rev()) < targetphase]
257 if newroots:
257 if newroots:
258 if nullid in newroots:
258 if nullid in newroots:
259 raise util.Abort(_('cannot change null revision phase'))
259 raise util.Abort(_('cannot change null revision phase'))
260 currentroots = currentroots.copy()
260 currentroots = currentroots.copy()
261 currentroots.update(newroots)
261 currentroots.update(newroots)
262 ctxs = repo.set('roots(%ln::)', currentroots)
262 ctxs = repo.set('roots(%ln::)', currentroots)
263 currentroots.intersection_update(ctx.node() for ctx in ctxs)
263 currentroots.intersection_update(ctx.node() for ctx in ctxs)
264 self._updateroots(targetphase, currentroots)
264 self._updateroots(targetphase, currentroots)
265 obsolete.clearobscaches(repo)
265 obsolete.clearobscaches(repo)
266
266
267 def advanceboundary(repo, targetphase, nodes):
267 def advanceboundary(repo, targetphase, nodes):
268 """Add nodes to a phase changing other nodes phases if necessary.
268 """Add nodes to a phase changing other nodes phases if necessary.
269
269
270 This function move boundary *forward* this means that all nodes
270 This function move boundary *forward* this means that all nodes
271 are set in the target phase or kept in a *lower* phase.
271 are set in the target phase or kept in a *lower* phase.
272
272
273 Simplify boundary to contains phase roots only."""
273 Simplify boundary to contains phase roots only."""
274 phcache = repo._phasecache.copy()
274 phcache = repo._phasecache.copy()
275 phcache.advanceboundary(repo, targetphase, nodes)
275 phcache.advanceboundary(repo, targetphase, nodes)
276 repo._phasecache.replace(phcache)
276 repo._phasecache.replace(phcache)
277
277
278 def retractboundary(repo, targetphase, nodes):
278 def retractboundary(repo, targetphase, nodes):
279 """Set nodes back to a phase changing other nodes phases if
279 """Set nodes back to a phase changing other nodes phases if
280 necessary.
280 necessary.
281
281
282 This function move boundary *backward* this means that all nodes
282 This function move boundary *backward* this means that all nodes
283 are set in the target phase or kept in a *higher* phase.
283 are set in the target phase or kept in a *higher* phase.
284
284
285 Simplify boundary to contains phase roots only."""
285 Simplify boundary to contains phase roots only."""
286 phcache = repo._phasecache.copy()
286 phcache = repo._phasecache.copy()
287 phcache.retractboundary(repo, targetphase, nodes)
287 phcache.retractboundary(repo, targetphase, nodes)
288 repo._phasecache.replace(phcache)
288 repo._phasecache.replace(phcache)
289
289
290 def listphases(repo):
290 def listphases(repo):
291 """List phases root for serialization over pushkey"""
291 """List phases root for serialization over pushkey"""
292 keys = {}
292 keys = {}
293 value = '%i' % draft
293 value = '%i' % draft
294 for root in repo._phasecache.phaseroots[draft]:
294 for root in repo._phasecache.phaseroots[draft]:
295 keys[hex(root)] = value
295 keys[hex(root)] = value
296
296
297 if repo.ui.configbool('phases', 'publish', True):
297 if repo.ui.configbool('phases', 'publish', True):
298 # Add an extra data to let remote know we are a publishing
298 # Add an extra data to let remote know we are a publishing
299 # repo. Publishing repo can't just pretend they are old repo.
299 # repo. Publishing repo can't just pretend they are old repo.
300 # When pushing to a publishing repo, the client still need to
300 # When pushing to a publishing repo, the client still need to
301 # push phase boundary
301 # push phase boundary
302 #
302 #
303 # Push do not only push changeset. It also push phase data.
303 # Push do not only push changeset. It also push phase data.
304 # New phase data may apply to common changeset which won't be
304 # New phase data may apply to common changeset which won't be
305 # push (as they are common). Here is a very simple example:
305 # push (as they are common). Here is a very simple example:
306 #
306 #
307 # 1) repo A push changeset X as draft to repo B
307 # 1) repo A push changeset X as draft to repo B
308 # 2) repo B make changeset X public
308 # 2) repo B make changeset X public
309 # 3) repo B push to repo A. X is not pushed but the data that
309 # 3) repo B push to repo A. X is not pushed but the data that
310 # X as now public should
310 # X as now public should
311 #
311 #
312 # The server can't handle it on it's own as it has no idea of
312 # The server can't handle it on it's own as it has no idea of
313 # client phase data.
313 # client phase data.
314 keys['publishing'] = 'True'
314 keys['publishing'] = 'True'
315 return keys
315 return keys
316
316
317 def pushphase(repo, nhex, oldphasestr, newphasestr):
317 def pushphase(repo, nhex, oldphasestr, newphasestr):
318 """List phases root for serialization over pushkey"""
318 """List phases root for serialization over pushkey"""
319 lock = repo.lock()
319 lock = repo.lock()
320 try:
320 try:
321 currentphase = repo[nhex].phase()
321 currentphase = repo[nhex].phase()
322 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
322 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
323 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
323 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
324 if currentphase == oldphase and newphase < oldphase:
324 if currentphase == oldphase and newphase < oldphase:
325 advanceboundary(repo, newphase, [bin(nhex)])
325 advanceboundary(repo, newphase, [bin(nhex)])
326 return 1
326 return 1
327 elif currentphase == newphase:
327 elif currentphase == newphase:
328 # raced, but got correct result
328 # raced, but got correct result
329 return 1
329 return 1
330 else:
330 else:
331 return 0
331 return 0
332 finally:
332 finally:
333 lock.release()
333 lock.release()
334
334
335 def analyzeremotephases(repo, subset, roots):
335 def analyzeremotephases(repo, subset, roots):
336 """Compute phases heads and root in a subset of node from root dict
336 """Compute phases heads and root in a subset of node from root dict
337
337
338 * subset is heads of the subset
338 * subset is heads of the subset
339 * roots is {<nodeid> => phase} mapping. key and value are string.
339 * roots is {<nodeid> => phase} mapping. key and value are string.
340
340
341 Accept unknown element input
341 Accept unknown element input
342 """
342 """
343 # build list from dictionary
343 # build list from dictionary
344 draftroots = []
344 draftroots = []
345 nodemap = repo.changelog.nodemap # to filter unknown nodes
345 nodemap = repo.changelog.nodemap # to filter unknown nodes
346 for nhex, phase in roots.iteritems():
346 for nhex, phase in roots.iteritems():
347 if nhex == 'publishing': # ignore data related to publish option
347 if nhex == 'publishing': # ignore data related to publish option
348 continue
348 continue
349 node = bin(nhex)
349 node = bin(nhex)
350 phase = int(phase)
350 phase = int(phase)
351 if phase == 0:
351 if phase == 0:
352 if node != nullid:
352 if node != nullid:
353 repo.ui.warn(_('ignoring inconsistent public root'
353 repo.ui.warn(_('ignoring inconsistent public root'
354 ' from remote: %s\n') % nhex)
354 ' from remote: %s\n') % nhex)
355 elif phase == 1:
355 elif phase == 1:
356 if node in nodemap:
356 if node in nodemap:
357 draftroots.append(node)
357 draftroots.append(node)
358 else:
358 else:
359 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
359 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
360 % (phase, nhex))
360 % (phase, nhex))
361 # compute heads
361 # compute heads
362 publicheads = newheads(repo, subset, draftroots)
362 publicheads = newheads(repo, subset, draftroots)
363 return publicheads, draftroots
363 return publicheads, draftroots
364
364
365 def newheads(repo, heads, roots):
365 def newheads(repo, heads, roots):
366 """compute new head of a subset minus another
366 """compute new head of a subset minus another
367
367
368 * `heads`: define the first subset
368 * `heads`: define the first subset
369 * `roots`: define the second we subtract from the first"""
369 * `roots`: define the second we subtract from the first"""
370 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
370 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
371 heads, roots, roots, heads)
371 heads, roots, roots, heads)
372 return [c.node() for c in revset]
372 return [c.node() for c in revset]
373
373
374
374
375 def newcommitphase(ui):
375 def newcommitphase(ui):
376 """helper to get the target phase of new commit
376 """helper to get the target phase of new commit
377
377
378 Handle all possible values for the phases.new-commit options.
378 Handle all possible values for the phases.new-commit options.
379
379
380 """
380 """
381 v = ui.config('phases', 'new-commit', draft)
381 v = ui.config('phases', 'new-commit', draft)
382 try:
382 try:
383 return phasenames.index(v)
383 return phasenames.index(v)
384 except ValueError:
384 except ValueError:
385 try:
385 try:
386 return int(v)
386 return int(v)
387 except ValueError:
387 except ValueError:
388 msg = _("phases.new-commit: not a valid phase name ('%s')")
388 msg = _("phases.new-commit: not a valid phase name ('%s')")
389 raise error.ConfigError(msg % v)
389 raise error.ConfigError(msg % v)
390
390
391 def hassecret(repo):
391 def hassecret(repo):
392 """utility function that check if a repo have any secret changeset."""
392 """utility function that check if a repo have any secret changeset."""
393 return bool(repo._phasecache.phaseroots[2])
393 return bool(repo._phasecache.phaseroots[2])
@@ -1,1931 +1,1920 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import parser, util, error, discovery, hbisect, phases
9 import parser, util, error, discovery, hbisect, phases
10 import node
10 import node
11 import bookmarks as bookmarksmod
11 import bookmarks as bookmarksmod
12 import match as matchmod
12 import match as matchmod
13 from i18n import _
13 from i18n import _
14 import encoding
14 import encoding
15 import obsolete as obsmod
15 import obsolete as obsmod
16
16
17 def _revancestors(repo, revs, followfirst):
17 def _revancestors(repo, revs, followfirst):
18 """Like revlog.ancestors(), but supports followfirst."""
18 """Like revlog.ancestors(), but supports followfirst."""
19 cut = followfirst and 1 or None
19 cut = followfirst and 1 or None
20 cl = repo.changelog
20 cl = repo.changelog
21 visit = util.deque(revs)
21 visit = util.deque(revs)
22 seen = set([node.nullrev])
22 seen = set([node.nullrev])
23 while visit:
23 while visit:
24 for parent in cl.parentrevs(visit.popleft())[:cut]:
24 for parent in cl.parentrevs(visit.popleft())[:cut]:
25 if parent not in seen:
25 if parent not in seen:
26 visit.append(parent)
26 visit.append(parent)
27 seen.add(parent)
27 seen.add(parent)
28 yield parent
28 yield parent
29
29
30 def _revdescendants(repo, revs, followfirst):
30 def _revdescendants(repo, revs, followfirst):
31 """Like revlog.descendants() but supports followfirst."""
31 """Like revlog.descendants() but supports followfirst."""
32 cut = followfirst and 1 or None
32 cut = followfirst and 1 or None
33 cl = repo.changelog
33 cl = repo.changelog
34 first = min(revs)
34 first = min(revs)
35 nullrev = node.nullrev
35 nullrev = node.nullrev
36 if first == nullrev:
36 if first == nullrev:
37 # Are there nodes with a null first parent and a non-null
37 # Are there nodes with a null first parent and a non-null
38 # second one? Maybe. Do we care? Probably not.
38 # second one? Maybe. Do we care? Probably not.
39 for i in cl:
39 for i in cl:
40 yield i
40 yield i
41 return
41 return
42
42
43 seen = set(revs)
43 seen = set(revs)
44 for i in cl.revs(first + 1):
44 for i in cl.revs(first + 1):
45 for x in cl.parentrevs(i)[:cut]:
45 for x in cl.parentrevs(i)[:cut]:
46 if x != nullrev and x in seen:
46 if x != nullrev and x in seen:
47 seen.add(i)
47 seen.add(i)
48 yield i
48 yield i
49 break
49 break
50
50
51 def _revsbetween(repo, roots, heads):
51 def _revsbetween(repo, roots, heads):
52 """Return all paths between roots and heads, inclusive of both endpoint
52 """Return all paths between roots and heads, inclusive of both endpoint
53 sets."""
53 sets."""
54 if not roots:
54 if not roots:
55 return []
55 return []
56 parentrevs = repo.changelog.parentrevs
56 parentrevs = repo.changelog.parentrevs
57 visit = heads[:]
57 visit = heads[:]
58 reachable = set()
58 reachable = set()
59 seen = {}
59 seen = {}
60 minroot = min(roots)
60 minroot = min(roots)
61 roots = set(roots)
61 roots = set(roots)
62 # open-code the post-order traversal due to the tiny size of
62 # open-code the post-order traversal due to the tiny size of
63 # sys.getrecursionlimit()
63 # sys.getrecursionlimit()
64 while visit:
64 while visit:
65 rev = visit.pop()
65 rev = visit.pop()
66 if rev in roots:
66 if rev in roots:
67 reachable.add(rev)
67 reachable.add(rev)
68 parents = parentrevs(rev)
68 parents = parentrevs(rev)
69 seen[rev] = parents
69 seen[rev] = parents
70 for parent in parents:
70 for parent in parents:
71 if parent >= minroot and parent not in seen:
71 if parent >= minroot and parent not in seen:
72 visit.append(parent)
72 visit.append(parent)
73 if not reachable:
73 if not reachable:
74 return []
74 return []
75 for rev in sorted(seen):
75 for rev in sorted(seen):
76 for parent in seen[rev]:
76 for parent in seen[rev]:
77 if parent in reachable:
77 if parent in reachable:
78 reachable.add(rev)
78 reachable.add(rev)
79 return sorted(reachable)
79 return sorted(reachable)
80
80
81 elements = {
81 elements = {
82 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
82 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
83 "~": (18, None, ("ancestor", 18)),
83 "~": (18, None, ("ancestor", 18)),
84 "^": (18, None, ("parent", 18), ("parentpost", 18)),
84 "^": (18, None, ("parent", 18), ("parentpost", 18)),
85 "-": (5, ("negate", 19), ("minus", 5)),
85 "-": (5, ("negate", 19), ("minus", 5)),
86 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
86 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
87 ("dagrangepost", 17)),
87 ("dagrangepost", 17)),
88 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
88 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
89 ("dagrangepost", 17)),
89 ("dagrangepost", 17)),
90 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
90 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
91 "not": (10, ("not", 10)),
91 "not": (10, ("not", 10)),
92 "!": (10, ("not", 10)),
92 "!": (10, ("not", 10)),
93 "and": (5, None, ("and", 5)),
93 "and": (5, None, ("and", 5)),
94 "&": (5, None, ("and", 5)),
94 "&": (5, None, ("and", 5)),
95 "or": (4, None, ("or", 4)),
95 "or": (4, None, ("or", 4)),
96 "|": (4, None, ("or", 4)),
96 "|": (4, None, ("or", 4)),
97 "+": (4, None, ("or", 4)),
97 "+": (4, None, ("or", 4)),
98 ",": (2, None, ("list", 2)),
98 ",": (2, None, ("list", 2)),
99 ")": (0, None, None),
99 ")": (0, None, None),
100 "symbol": (0, ("symbol",), None),
100 "symbol": (0, ("symbol",), None),
101 "string": (0, ("string",), None),
101 "string": (0, ("string",), None),
102 "end": (0, None, None),
102 "end": (0, None, None),
103 }
103 }
104
104
105 keywords = set(['and', 'or', 'not'])
105 keywords = set(['and', 'or', 'not'])
106
106
107 def tokenize(program):
107 def tokenize(program):
108 '''
108 '''
109 Parse a revset statement into a stream of tokens
109 Parse a revset statement into a stream of tokens
110
110
111 Check that @ is a valid unquoted token character (issue3686):
111 Check that @ is a valid unquoted token character (issue3686):
112 >>> list(tokenize("@::"))
112 >>> list(tokenize("@::"))
113 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
113 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
114
114
115 '''
115 '''
116
116
117 pos, l = 0, len(program)
117 pos, l = 0, len(program)
118 while pos < l:
118 while pos < l:
119 c = program[pos]
119 c = program[pos]
120 if c.isspace(): # skip inter-token whitespace
120 if c.isspace(): # skip inter-token whitespace
121 pass
121 pass
122 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
122 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
123 yield ('::', None, pos)
123 yield ('::', None, pos)
124 pos += 1 # skip ahead
124 pos += 1 # skip ahead
125 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
125 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
126 yield ('..', None, pos)
126 yield ('..', None, pos)
127 pos += 1 # skip ahead
127 pos += 1 # skip ahead
128 elif c in "():,-|&+!~^": # handle simple operators
128 elif c in "():,-|&+!~^": # handle simple operators
129 yield (c, None, pos)
129 yield (c, None, pos)
130 elif (c in '"\'' or c == 'r' and
130 elif (c in '"\'' or c == 'r' and
131 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
131 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
132 if c == 'r':
132 if c == 'r':
133 pos += 1
133 pos += 1
134 c = program[pos]
134 c = program[pos]
135 decode = lambda x: x
135 decode = lambda x: x
136 else:
136 else:
137 decode = lambda x: x.decode('string-escape')
137 decode = lambda x: x.decode('string-escape')
138 pos += 1
138 pos += 1
139 s = pos
139 s = pos
140 while pos < l: # find closing quote
140 while pos < l: # find closing quote
141 d = program[pos]
141 d = program[pos]
142 if d == '\\': # skip over escaped characters
142 if d == '\\': # skip over escaped characters
143 pos += 2
143 pos += 2
144 continue
144 continue
145 if d == c:
145 if d == c:
146 yield ('string', decode(program[s:pos]), s)
146 yield ('string', decode(program[s:pos]), s)
147 break
147 break
148 pos += 1
148 pos += 1
149 else:
149 else:
150 raise error.ParseError(_("unterminated string"), s)
150 raise error.ParseError(_("unterminated string"), s)
151 # gather up a symbol/keyword
151 # gather up a symbol/keyword
152 elif c.isalnum() or c in '._@' or ord(c) > 127:
152 elif c.isalnum() or c in '._@' or ord(c) > 127:
153 s = pos
153 s = pos
154 pos += 1
154 pos += 1
155 while pos < l: # find end of symbol
155 while pos < l: # find end of symbol
156 d = program[pos]
156 d = program[pos]
157 if not (d.isalnum() or d in "._/@" or ord(d) > 127):
157 if not (d.isalnum() or d in "._/@" or ord(d) > 127):
158 break
158 break
159 if d == '.' and program[pos - 1] == '.': # special case for ..
159 if d == '.' and program[pos - 1] == '.': # special case for ..
160 pos -= 1
160 pos -= 1
161 break
161 break
162 pos += 1
162 pos += 1
163 sym = program[s:pos]
163 sym = program[s:pos]
164 if sym in keywords: # operator keywords
164 if sym in keywords: # operator keywords
165 yield (sym, None, s)
165 yield (sym, None, s)
166 else:
166 else:
167 yield ('symbol', sym, s)
167 yield ('symbol', sym, s)
168 pos -= 1
168 pos -= 1
169 else:
169 else:
170 raise error.ParseError(_("syntax error"), pos)
170 raise error.ParseError(_("syntax error"), pos)
171 pos += 1
171 pos += 1
172 yield ('end', None, pos)
172 yield ('end', None, pos)
173
173
174 # helpers
174 # helpers
175
175
176 def getstring(x, err):
176 def getstring(x, err):
177 if x and (x[0] == 'string' or x[0] == 'symbol'):
177 if x and (x[0] == 'string' or x[0] == 'symbol'):
178 return x[1]
178 return x[1]
179 raise error.ParseError(err)
179 raise error.ParseError(err)
180
180
181 def getlist(x):
181 def getlist(x):
182 if not x:
182 if not x:
183 return []
183 return []
184 if x[0] == 'list':
184 if x[0] == 'list':
185 return getlist(x[1]) + [x[2]]
185 return getlist(x[1]) + [x[2]]
186 return [x]
186 return [x]
187
187
188 def getargs(x, min, max, err):
188 def getargs(x, min, max, err):
189 l = getlist(x)
189 l = getlist(x)
190 if len(l) < min or (max >= 0 and len(l) > max):
190 if len(l) < min or (max >= 0 and len(l) > max):
191 raise error.ParseError(err)
191 raise error.ParseError(err)
192 return l
192 return l
193
193
194 def getset(repo, subset, x):
194 def getset(repo, subset, x):
195 if not x:
195 if not x:
196 raise error.ParseError(_("missing argument"))
196 raise error.ParseError(_("missing argument"))
197 return methods[x[0]](repo, subset, *x[1:])
197 return methods[x[0]](repo, subset, *x[1:])
198
198
199 def _getrevsource(repo, r):
199 def _getrevsource(repo, r):
200 extra = repo[r].extra()
200 extra = repo[r].extra()
201 for label in ('source', 'transplant_source', 'rebase_source'):
201 for label in ('source', 'transplant_source', 'rebase_source'):
202 if label in extra:
202 if label in extra:
203 try:
203 try:
204 return repo[extra[label]].rev()
204 return repo[extra[label]].rev()
205 except error.RepoLookupError:
205 except error.RepoLookupError:
206 pass
206 pass
207 return None
207 return None
208
208
209 # operator methods
209 # operator methods
210
210
211 def stringset(repo, subset, x):
211 def stringset(repo, subset, x):
212 x = repo[x].rev()
212 x = repo[x].rev()
213 if x == -1 and len(subset) == len(repo):
213 if x == -1 and len(subset) == len(repo):
214 return [-1]
214 return [-1]
215 if len(subset) == len(repo) or x in subset:
215 if len(subset) == len(repo) or x in subset:
216 return [x]
216 return [x]
217 return []
217 return []
218
218
219 def symbolset(repo, subset, x):
219 def symbolset(repo, subset, x):
220 if x in symbols:
220 if x in symbols:
221 raise error.ParseError(_("can't use %s here") % x)
221 raise error.ParseError(_("can't use %s here") % x)
222 return stringset(repo, subset, x)
222 return stringset(repo, subset, x)
223
223
224 def rangeset(repo, subset, x, y):
224 def rangeset(repo, subset, x, y):
225 m = getset(repo, subset, x)
225 m = getset(repo, subset, x)
226 if not m:
226 if not m:
227 m = getset(repo, list(repo), x)
227 m = getset(repo, list(repo), x)
228
228
229 n = getset(repo, subset, y)
229 n = getset(repo, subset, y)
230 if not n:
230 if not n:
231 n = getset(repo, list(repo), y)
231 n = getset(repo, list(repo), y)
232
232
233 if not m or not n:
233 if not m or not n:
234 return []
234 return []
235 m, n = m[0], n[-1]
235 m, n = m[0], n[-1]
236
236
237 if m < n:
237 if m < n:
238 r = range(m, n + 1)
238 r = range(m, n + 1)
239 else:
239 else:
240 r = range(m, n - 1, -1)
240 r = range(m, n - 1, -1)
241 s = set(subset)
241 s = set(subset)
242 return [x for x in r if x in s]
242 return [x for x in r if x in s]
243
243
244 def dagrange(repo, subset, x, y):
244 def dagrange(repo, subset, x, y):
245 if subset:
245 if subset:
246 r = list(repo)
246 r = list(repo)
247 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
247 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
248 s = set(subset)
248 s = set(subset)
249 return [r for r in xs if r in s]
249 return [r for r in xs if r in s]
250 return []
250 return []
251
251
252 def andset(repo, subset, x, y):
252 def andset(repo, subset, x, y):
253 return getset(repo, getset(repo, subset, x), y)
253 return getset(repo, getset(repo, subset, x), y)
254
254
255 def orset(repo, subset, x, y):
255 def orset(repo, subset, x, y):
256 xl = getset(repo, subset, x)
256 xl = getset(repo, subset, x)
257 s = set(xl)
257 s = set(xl)
258 yl = getset(repo, [r for r in subset if r not in s], y)
258 yl = getset(repo, [r for r in subset if r not in s], y)
259 return xl + yl
259 return xl + yl
260
260
261 def notset(repo, subset, x):
261 def notset(repo, subset, x):
262 s = set(getset(repo, subset, x))
262 s = set(getset(repo, subset, x))
263 return [r for r in subset if r not in s]
263 return [r for r in subset if r not in s]
264
264
265 def listset(repo, subset, a, b):
265 def listset(repo, subset, a, b):
266 raise error.ParseError(_("can't use a list in this context"))
266 raise error.ParseError(_("can't use a list in this context"))
267
267
268 def func(repo, subset, a, b):
268 def func(repo, subset, a, b):
269 if a[0] == 'symbol' and a[1] in symbols:
269 if a[0] == 'symbol' and a[1] in symbols:
270 return symbols[a[1]](repo, subset, b)
270 return symbols[a[1]](repo, subset, b)
271 raise error.ParseError(_("not a function: %s") % a[1])
271 raise error.ParseError(_("not a function: %s") % a[1])
272
272
273 # functions
273 # functions
274
274
275 def adds(repo, subset, x):
275 def adds(repo, subset, x):
276 """``adds(pattern)``
276 """``adds(pattern)``
277 Changesets that add a file matching pattern.
277 Changesets that add a file matching pattern.
278 """
278 """
279 # i18n: "adds" is a keyword
279 # i18n: "adds" is a keyword
280 pat = getstring(x, _("adds requires a pattern"))
280 pat = getstring(x, _("adds requires a pattern"))
281 return checkstatus(repo, subset, pat, 1)
281 return checkstatus(repo, subset, pat, 1)
282
282
283 def ancestor(repo, subset, x):
283 def ancestor(repo, subset, x):
284 """``ancestor(single, single)``
284 """``ancestor(single, single)``
285 Greatest common ancestor of the two changesets.
285 Greatest common ancestor of the two changesets.
286 """
286 """
287 # i18n: "ancestor" is a keyword
287 # i18n: "ancestor" is a keyword
288 l = getargs(x, 2, 2, _("ancestor requires two arguments"))
288 l = getargs(x, 2, 2, _("ancestor requires two arguments"))
289 r = list(repo)
289 r = list(repo)
290 a = getset(repo, r, l[0])
290 a = getset(repo, r, l[0])
291 b = getset(repo, r, l[1])
291 b = getset(repo, r, l[1])
292 if len(a) != 1 or len(b) != 1:
292 if len(a) != 1 or len(b) != 1:
293 # i18n: "ancestor" is a keyword
293 # i18n: "ancestor" is a keyword
294 raise error.ParseError(_("ancestor arguments must be single revisions"))
294 raise error.ParseError(_("ancestor arguments must be single revisions"))
295 an = [repo[a[0]].ancestor(repo[b[0]]).rev()]
295 an = [repo[a[0]].ancestor(repo[b[0]]).rev()]
296
296
297 return [r for r in an if r in subset]
297 return [r for r in an if r in subset]
298
298
299 def _ancestors(repo, subset, x, followfirst=False):
299 def _ancestors(repo, subset, x, followfirst=False):
300 args = getset(repo, list(repo), x)
300 args = getset(repo, list(repo), x)
301 if not args:
301 if not args:
302 return []
302 return []
303 s = set(_revancestors(repo, args, followfirst)) | set(args)
303 s = set(_revancestors(repo, args, followfirst)) | set(args)
304 return [r for r in subset if r in s]
304 return [r for r in subset if r in s]
305
305
306 def ancestors(repo, subset, x):
306 def ancestors(repo, subset, x):
307 """``ancestors(set)``
307 """``ancestors(set)``
308 Changesets that are ancestors of a changeset in set.
308 Changesets that are ancestors of a changeset in set.
309 """
309 """
310 return _ancestors(repo, subset, x)
310 return _ancestors(repo, subset, x)
311
311
312 def _firstancestors(repo, subset, x):
312 def _firstancestors(repo, subset, x):
313 # ``_firstancestors(set)``
313 # ``_firstancestors(set)``
314 # Like ``ancestors(set)`` but follows only the first parents.
314 # Like ``ancestors(set)`` but follows only the first parents.
315 return _ancestors(repo, subset, x, followfirst=True)
315 return _ancestors(repo, subset, x, followfirst=True)
316
316
317 def ancestorspec(repo, subset, x, n):
317 def ancestorspec(repo, subset, x, n):
318 """``set~n``
318 """``set~n``
319 Changesets that are the Nth ancestor (first parents only) of a changeset
319 Changesets that are the Nth ancestor (first parents only) of a changeset
320 in set.
320 in set.
321 """
321 """
322 try:
322 try:
323 n = int(n[1])
323 n = int(n[1])
324 except (TypeError, ValueError):
324 except (TypeError, ValueError):
325 raise error.ParseError(_("~ expects a number"))
325 raise error.ParseError(_("~ expects a number"))
326 ps = set()
326 ps = set()
327 cl = repo.changelog
327 cl = repo.changelog
328 for r in getset(repo, subset, x):
328 for r in getset(repo, subset, x):
329 for i in range(n):
329 for i in range(n):
330 r = cl.parentrevs(r)[0]
330 r = cl.parentrevs(r)[0]
331 ps.add(r)
331 ps.add(r)
332 return [r for r in subset if r in ps]
332 return [r for r in subset if r in ps]
333
333
334 def author(repo, subset, x):
334 def author(repo, subset, x):
335 """``author(string)``
335 """``author(string)``
336 Alias for ``user(string)``.
336 Alias for ``user(string)``.
337 """
337 """
338 # i18n: "author" is a keyword
338 # i18n: "author" is a keyword
339 n = encoding.lower(getstring(x, _("author requires a string")))
339 n = encoding.lower(getstring(x, _("author requires a string")))
340 kind, pattern, matcher = _substringmatcher(n)
340 kind, pattern, matcher = _substringmatcher(n)
341 return [r for r in subset if matcher(encoding.lower(repo[r].user()))]
341 return [r for r in subset if matcher(encoding.lower(repo[r].user()))]
342
342
343 def bisect(repo, subset, x):
343 def bisect(repo, subset, x):
344 """``bisect(string)``
344 """``bisect(string)``
345 Changesets marked in the specified bisect status:
345 Changesets marked in the specified bisect status:
346
346
347 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
347 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
348 - ``goods``, ``bads`` : csets topologically good/bad
348 - ``goods``, ``bads`` : csets topologically good/bad
349 - ``range`` : csets taking part in the bisection
349 - ``range`` : csets taking part in the bisection
350 - ``pruned`` : csets that are goods, bads or skipped
350 - ``pruned`` : csets that are goods, bads or skipped
351 - ``untested`` : csets whose fate is yet unknown
351 - ``untested`` : csets whose fate is yet unknown
352 - ``ignored`` : csets ignored due to DAG topology
352 - ``ignored`` : csets ignored due to DAG topology
353 - ``current`` : the cset currently being bisected
353 - ``current`` : the cset currently being bisected
354 """
354 """
355 # i18n: "bisect" is a keyword
355 # i18n: "bisect" is a keyword
356 status = getstring(x, _("bisect requires a string")).lower()
356 status = getstring(x, _("bisect requires a string")).lower()
357 state = set(hbisect.get(repo, status))
357 state = set(hbisect.get(repo, status))
358 return [r for r in subset if r in state]
358 return [r for r in subset if r in state]
359
359
360 # Backward-compatibility
360 # Backward-compatibility
361 # - no help entry so that we do not advertise it any more
361 # - no help entry so that we do not advertise it any more
362 def bisected(repo, subset, x):
362 def bisected(repo, subset, x):
363 return bisect(repo, subset, x)
363 return bisect(repo, subset, x)
364
364
365 def bookmark(repo, subset, x):
365 def bookmark(repo, subset, x):
366 """``bookmark([name])``
366 """``bookmark([name])``
367 The named bookmark or all bookmarks.
367 The named bookmark or all bookmarks.
368
368
369 If `name` starts with `re:`, the remainder of the name is treated as
369 If `name` starts with `re:`, the remainder of the name is treated as
370 a regular expression. To match a bookmark that actually starts with `re:`,
370 a regular expression. To match a bookmark that actually starts with `re:`,
371 use the prefix `literal:`.
371 use the prefix `literal:`.
372 """
372 """
373 # i18n: "bookmark" is a keyword
373 # i18n: "bookmark" is a keyword
374 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
374 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
375 if args:
375 if args:
376 bm = getstring(args[0],
376 bm = getstring(args[0],
377 # i18n: "bookmark" is a keyword
377 # i18n: "bookmark" is a keyword
378 _('the argument to bookmark must be a string'))
378 _('the argument to bookmark must be a string'))
379 kind, pattern, matcher = _stringmatcher(bm)
379 kind, pattern, matcher = _stringmatcher(bm)
380 if kind == 'literal':
380 if kind == 'literal':
381 bmrev = bookmarksmod.listbookmarks(repo).get(bm, None)
381 bmrev = bookmarksmod.listbookmarks(repo).get(bm, None)
382 if not bmrev:
382 if not bmrev:
383 raise util.Abort(_("bookmark '%s' does not exist") % bm)
383 raise util.Abort(_("bookmark '%s' does not exist") % bm)
384 bmrev = repo[bmrev].rev()
384 bmrev = repo[bmrev].rev()
385 return [r for r in subset if r == bmrev]
385 return [r for r in subset if r == bmrev]
386 else:
386 else:
387 matchrevs = set()
387 matchrevs = set()
388 for name, bmrev in bookmarksmod.listbookmarks(repo).iteritems():
388 for name, bmrev in bookmarksmod.listbookmarks(repo).iteritems():
389 if matcher(name):
389 if matcher(name):
390 matchrevs.add(bmrev)
390 matchrevs.add(bmrev)
391 if not matchrevs:
391 if not matchrevs:
392 raise util.Abort(_("no bookmarks exist that match '%s'")
392 raise util.Abort(_("no bookmarks exist that match '%s'")
393 % pattern)
393 % pattern)
394 bmrevs = set()
394 bmrevs = set()
395 for bmrev in matchrevs:
395 for bmrev in matchrevs:
396 bmrevs.add(repo[bmrev].rev())
396 bmrevs.add(repo[bmrev].rev())
397 return [r for r in subset if r in bmrevs]
397 return [r for r in subset if r in bmrevs]
398
398
399 bms = set([repo[r].rev()
399 bms = set([repo[r].rev()
400 for r in bookmarksmod.listbookmarks(repo).values()])
400 for r in bookmarksmod.listbookmarks(repo).values()])
401 return [r for r in subset if r in bms]
401 return [r for r in subset if r in bms]
402
402
403 def branch(repo, subset, x):
403 def branch(repo, subset, x):
404 """``branch(string or set)``
404 """``branch(string or set)``
405 All changesets belonging to the given branch or the branches of the given
405 All changesets belonging to the given branch or the branches of the given
406 changesets.
406 changesets.
407
407
408 If `string` starts with `re:`, the remainder of the name is treated as
408 If `string` starts with `re:`, the remainder of the name is treated as
409 a regular expression. To match a branch that actually starts with `re:`,
409 a regular expression. To match a branch that actually starts with `re:`,
410 use the prefix `literal:`.
410 use the prefix `literal:`.
411 """
411 """
412 try:
412 try:
413 b = getstring(x, '')
413 b = getstring(x, '')
414 except error.ParseError:
414 except error.ParseError:
415 # not a string, but another revspec, e.g. tip()
415 # not a string, but another revspec, e.g. tip()
416 pass
416 pass
417 else:
417 else:
418 kind, pattern, matcher = _stringmatcher(b)
418 kind, pattern, matcher = _stringmatcher(b)
419 if kind == 'literal':
419 if kind == 'literal':
420 # note: falls through to the revspec case if no branch with
420 # note: falls through to the revspec case if no branch with
421 # this name exists
421 # this name exists
422 if pattern in repo.branchmap():
422 if pattern in repo.branchmap():
423 return [r for r in subset if matcher(repo[r].branch())]
423 return [r for r in subset if matcher(repo[r].branch())]
424 else:
424 else:
425 return [r for r in subset if matcher(repo[r].branch())]
425 return [r for r in subset if matcher(repo[r].branch())]
426
426
427 s = getset(repo, list(repo), x)
427 s = getset(repo, list(repo), x)
428 b = set()
428 b = set()
429 for r in s:
429 for r in s:
430 b.add(repo[r].branch())
430 b.add(repo[r].branch())
431 s = set(s)
431 s = set(s)
432 return [r for r in subset if r in s or repo[r].branch() in b]
432 return [r for r in subset if r in s or repo[r].branch() in b]
433
433
434 def bumped(repo, subset, x):
434 def bumped(repo, subset, x):
435 """``bumped()``
435 """``bumped()``
436 Mutable changesets marked as successors of public changesets.
436 Mutable changesets marked as successors of public changesets.
437
437
438 Only non-public and non-obsolete changesets can be `bumped`.
438 Only non-public and non-obsolete changesets can be `bumped`.
439 """
439 """
440 # i18n: "bumped" is a keyword
440 # i18n: "bumped" is a keyword
441 getargs(x, 0, 0, _("bumped takes no arguments"))
441 getargs(x, 0, 0, _("bumped takes no arguments"))
442 bumped = obsmod.getrevs(repo, 'bumped')
442 bumped = obsmod.getrevs(repo, 'bumped')
443 return [r for r in subset if r in bumped]
443 return [r for r in subset if r in bumped]
444
444
445 def bundle(repo, subset, x):
445 def bundle(repo, subset, x):
446 """``bundle()``
446 """``bundle()``
447 Changesets in the bundle.
447 Changesets in the bundle.
448
448
449 Bundle must be specified by the -R option."""
449 Bundle must be specified by the -R option."""
450
450
451 try:
451 try:
452 bundlenodes = repo.changelog.bundlenodes
452 bundlenodes = repo.changelog.bundlenodes
453 except AttributeError:
453 except AttributeError:
454 raise util.Abort(_("no bundle provided - specify with -R"))
454 raise util.Abort(_("no bundle provided - specify with -R"))
455 revs = set(repo[n].rev() for n in bundlenodes)
455 revs = set(repo[n].rev() for n in bundlenodes)
456 return [r for r in subset if r in revs]
456 return [r for r in subset if r in revs]
457
457
458 def checkstatus(repo, subset, pat, field):
458 def checkstatus(repo, subset, pat, field):
459 m = None
459 m = None
460 s = []
460 s = []
461 hasset = matchmod.patkind(pat) == 'set'
461 hasset = matchmod.patkind(pat) == 'set'
462 fname = None
462 fname = None
463 for r in subset:
463 for r in subset:
464 c = repo[r]
464 c = repo[r]
465 if not m or hasset:
465 if not m or hasset:
466 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
466 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
467 if not m.anypats() and len(m.files()) == 1:
467 if not m.anypats() and len(m.files()) == 1:
468 fname = m.files()[0]
468 fname = m.files()[0]
469 if fname is not None:
469 if fname is not None:
470 if fname not in c.files():
470 if fname not in c.files():
471 continue
471 continue
472 else:
472 else:
473 for f in c.files():
473 for f in c.files():
474 if m(f):
474 if m(f):
475 break
475 break
476 else:
476 else:
477 continue
477 continue
478 files = repo.status(c.p1().node(), c.node())[field]
478 files = repo.status(c.p1().node(), c.node())[field]
479 if fname is not None:
479 if fname is not None:
480 if fname in files:
480 if fname in files:
481 s.append(r)
481 s.append(r)
482 else:
482 else:
483 for f in files:
483 for f in files:
484 if m(f):
484 if m(f):
485 s.append(r)
485 s.append(r)
486 break
486 break
487 return s
487 return s
488
488
489 def _children(repo, narrow, parentset):
489 def _children(repo, narrow, parentset):
490 cs = set()
490 cs = set()
491 pr = repo.changelog.parentrevs
491 pr = repo.changelog.parentrevs
492 for r in narrow:
492 for r in narrow:
493 for p in pr(r):
493 for p in pr(r):
494 if p in parentset:
494 if p in parentset:
495 cs.add(r)
495 cs.add(r)
496 return cs
496 return cs
497
497
498 def children(repo, subset, x):
498 def children(repo, subset, x):
499 """``children(set)``
499 """``children(set)``
500 Child changesets of changesets in set.
500 Child changesets of changesets in set.
501 """
501 """
502 s = set(getset(repo, list(repo), x))
502 s = set(getset(repo, list(repo), x))
503 cs = _children(repo, subset, s)
503 cs = _children(repo, subset, s)
504 return [r for r in subset if r in cs]
504 return [r for r in subset if r in cs]
505
505
506 def closed(repo, subset, x):
506 def closed(repo, subset, x):
507 """``closed()``
507 """``closed()``
508 Changeset is closed.
508 Changeset is closed.
509 """
509 """
510 # i18n: "closed" is a keyword
510 # i18n: "closed" is a keyword
511 getargs(x, 0, 0, _("closed takes no arguments"))
511 getargs(x, 0, 0, _("closed takes no arguments"))
512 return [r for r in subset if repo[r].closesbranch()]
512 return [r for r in subset if repo[r].closesbranch()]
513
513
514 def contains(repo, subset, x):
514 def contains(repo, subset, x):
515 """``contains(pattern)``
515 """``contains(pattern)``
516 Revision contains a file matching pattern. See :hg:`help patterns`
516 Revision contains a file matching pattern. See :hg:`help patterns`
517 for information about file patterns.
517 for information about file patterns.
518 """
518 """
519 # i18n: "contains" is a keyword
519 # i18n: "contains" is a keyword
520 pat = getstring(x, _("contains requires a pattern"))
520 pat = getstring(x, _("contains requires a pattern"))
521 m = None
521 m = None
522 s = []
522 s = []
523 if not matchmod.patkind(pat):
523 if not matchmod.patkind(pat):
524 for r in subset:
524 for r in subset:
525 if pat in repo[r]:
525 if pat in repo[r]:
526 s.append(r)
526 s.append(r)
527 else:
527 else:
528 for r in subset:
528 for r in subset:
529 c = repo[r]
529 c = repo[r]
530 if not m or matchmod.patkind(pat) == 'set':
530 if not m or matchmod.patkind(pat) == 'set':
531 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
531 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
532 for f in c.manifest():
532 for f in c.manifest():
533 if m(f):
533 if m(f):
534 s.append(r)
534 s.append(r)
535 break
535 break
536 return s
536 return s
537
537
538 def converted(repo, subset, x):
538 def converted(repo, subset, x):
539 """``converted([id])``
539 """``converted([id])``
540 Changesets converted from the given identifier in the old repository if
540 Changesets converted from the given identifier in the old repository if
541 present, or all converted changesets if no identifier is specified.
541 present, or all converted changesets if no identifier is specified.
542 """
542 """
543
543
544 # There is exactly no chance of resolving the revision, so do a simple
544 # There is exactly no chance of resolving the revision, so do a simple
545 # string compare and hope for the best
545 # string compare and hope for the best
546
546
547 rev = None
547 rev = None
548 # i18n: "converted" is a keyword
548 # i18n: "converted" is a keyword
549 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
549 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
550 if l:
550 if l:
551 # i18n: "converted" is a keyword
551 # i18n: "converted" is a keyword
552 rev = getstring(l[0], _('converted requires a revision'))
552 rev = getstring(l[0], _('converted requires a revision'))
553
553
554 def _matchvalue(r):
554 def _matchvalue(r):
555 source = repo[r].extra().get('convert_revision', None)
555 source = repo[r].extra().get('convert_revision', None)
556 return source is not None and (rev is None or source.startswith(rev))
556 return source is not None and (rev is None or source.startswith(rev))
557
557
558 return [r for r in subset if _matchvalue(r)]
558 return [r for r in subset if _matchvalue(r)]
559
559
560 def date(repo, subset, x):
560 def date(repo, subset, x):
561 """``date(interval)``
561 """``date(interval)``
562 Changesets within the interval, see :hg:`help dates`.
562 Changesets within the interval, see :hg:`help dates`.
563 """
563 """
564 # i18n: "date" is a keyword
564 # i18n: "date" is a keyword
565 ds = getstring(x, _("date requires a string"))
565 ds = getstring(x, _("date requires a string"))
566 dm = util.matchdate(ds)
566 dm = util.matchdate(ds)
567 return [r for r in subset if dm(repo[r].date()[0])]
567 return [r for r in subset if dm(repo[r].date()[0])]
568
568
569 def desc(repo, subset, x):
569 def desc(repo, subset, x):
570 """``desc(string)``
570 """``desc(string)``
571 Search commit message for string. The match is case-insensitive.
571 Search commit message for string. The match is case-insensitive.
572 """
572 """
573 # i18n: "desc" is a keyword
573 # i18n: "desc" is a keyword
574 ds = encoding.lower(getstring(x, _("desc requires a string")))
574 ds = encoding.lower(getstring(x, _("desc requires a string")))
575 l = []
575 l = []
576 for r in subset:
576 for r in subset:
577 c = repo[r]
577 c = repo[r]
578 if ds in encoding.lower(c.description()):
578 if ds in encoding.lower(c.description()):
579 l.append(r)
579 l.append(r)
580 return l
580 return l
581
581
582 def _descendants(repo, subset, x, followfirst=False):
582 def _descendants(repo, subset, x, followfirst=False):
583 args = getset(repo, list(repo), x)
583 args = getset(repo, list(repo), x)
584 if not args:
584 if not args:
585 return []
585 return []
586 s = set(_revdescendants(repo, args, followfirst)) | set(args)
586 s = set(_revdescendants(repo, args, followfirst)) | set(args)
587
588 if len(subset) == len(repo):
589 # the passed in revisions may not exist, -1 for example
590 for arg in args:
591 if arg not in subset:
592 s.remove(arg)
593 return list(s)
594
595 return [r for r in subset if r in s]
587 return [r for r in subset if r in s]
596
588
597 def descendants(repo, subset, x):
589 def descendants(repo, subset, x):
598 """``descendants(set)``
590 """``descendants(set)``
599 Changesets which are descendants of changesets in set.
591 Changesets which are descendants of changesets in set.
600 """
592 """
601 return _descendants(repo, subset, x)
593 return _descendants(repo, subset, x)
602
594
603 def _firstdescendants(repo, subset, x):
595 def _firstdescendants(repo, subset, x):
604 # ``_firstdescendants(set)``
596 # ``_firstdescendants(set)``
605 # Like ``descendants(set)`` but follows only the first parents.
597 # Like ``descendants(set)`` but follows only the first parents.
606 return _descendants(repo, subset, x, followfirst=True)
598 return _descendants(repo, subset, x, followfirst=True)
607
599
608 def destination(repo, subset, x):
600 def destination(repo, subset, x):
609 """``destination([set])``
601 """``destination([set])``
610 Changesets that were created by a graft, transplant or rebase operation,
602 Changesets that were created by a graft, transplant or rebase operation,
611 with the given revisions specified as the source. Omitting the optional set
603 with the given revisions specified as the source. Omitting the optional set
612 is the same as passing all().
604 is the same as passing all().
613 """
605 """
614 if x is not None:
606 if x is not None:
615 args = set(getset(repo, list(repo), x))
607 args = set(getset(repo, list(repo), x))
616 else:
608 else:
617 args = set(getall(repo, list(repo), x))
609 args = set(getall(repo, list(repo), x))
618
610
619 dests = set()
611 dests = set()
620
612
621 # subset contains all of the possible destinations that can be returned, so
613 # subset contains all of the possible destinations that can be returned, so
622 # iterate over them and see if their source(s) were provided in the args.
614 # iterate over them and see if their source(s) were provided in the args.
623 # Even if the immediate src of r is not in the args, src's source (or
615 # Even if the immediate src of r is not in the args, src's source (or
624 # further back) may be. Scanning back further than the immediate src allows
616 # further back) may be. Scanning back further than the immediate src allows
625 # transitive transplants and rebases to yield the same results as transitive
617 # transitive transplants and rebases to yield the same results as transitive
626 # grafts.
618 # grafts.
627 for r in subset:
619 for r in subset:
628 src = _getrevsource(repo, r)
620 src = _getrevsource(repo, r)
629 lineage = None
621 lineage = None
630
622
631 while src is not None:
623 while src is not None:
632 if lineage is None:
624 if lineage is None:
633 lineage = list()
625 lineage = list()
634
626
635 lineage.append(r)
627 lineage.append(r)
636
628
637 # The visited lineage is a match if the current source is in the arg
629 # The visited lineage is a match if the current source is in the arg
638 # set. Since every candidate dest is visited by way of iterating
630 # set. Since every candidate dest is visited by way of iterating
639 # subset, any dests further back in the lineage will be tested by a
631 # subset, any dests further back in the lineage will be tested by a
640 # different iteration over subset. Likewise, if the src was already
632 # different iteration over subset. Likewise, if the src was already
641 # selected, the current lineage can be selected without going back
633 # selected, the current lineage can be selected without going back
642 # further.
634 # further.
643 if src in args or src in dests:
635 if src in args or src in dests:
644 dests.update(lineage)
636 dests.update(lineage)
645 break
637 break
646
638
647 r = src
639 r = src
648 src = _getrevsource(repo, r)
640 src = _getrevsource(repo, r)
649
641
650 return [r for r in subset if r in dests]
642 return [r for r in subset if r in dests]
651
643
652 def draft(repo, subset, x):
644 def draft(repo, subset, x):
653 """``draft()``
645 """``draft()``
654 Changeset in draft phase."""
646 Changeset in draft phase."""
655 # i18n: "draft" is a keyword
647 # i18n: "draft" is a keyword
656 getargs(x, 0, 0, _("draft takes no arguments"))
648 getargs(x, 0, 0, _("draft takes no arguments"))
657 pc = repo._phasecache
649 pc = repo._phasecache
658 return [r for r in subset if pc.phase(repo, r) == phases.draft]
650 return [r for r in subset if pc.phase(repo, r) == phases.draft]
659
651
660 def extinct(repo, subset, x):
652 def extinct(repo, subset, x):
661 """``extinct()``
653 """``extinct()``
662 Obsolete changesets with obsolete descendants only.
654 Obsolete changesets with obsolete descendants only.
663 """
655 """
664 # i18n: "extinct" is a keyword
656 # i18n: "extinct" is a keyword
665 getargs(x, 0, 0, _("extinct takes no arguments"))
657 getargs(x, 0, 0, _("extinct takes no arguments"))
666 extincts = obsmod.getrevs(repo, 'extinct')
658 extincts = obsmod.getrevs(repo, 'extinct')
667 return [r for r in subset if r in extincts]
659 return [r for r in subset if r in extincts]
668
660
669 def extra(repo, subset, x):
661 def extra(repo, subset, x):
670 """``extra(label, [value])``
662 """``extra(label, [value])``
671 Changesets with the given label in the extra metadata, with the given
663 Changesets with the given label in the extra metadata, with the given
672 optional value.
664 optional value.
673
665
674 If `value` starts with `re:`, the remainder of the value is treated as
666 If `value` starts with `re:`, the remainder of the value is treated as
675 a regular expression. To match a value that actually starts with `re:`,
667 a regular expression. To match a value that actually starts with `re:`,
676 use the prefix `literal:`.
668 use the prefix `literal:`.
677 """
669 """
678
670
679 # i18n: "extra" is a keyword
671 # i18n: "extra" is a keyword
680 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
672 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
681 # i18n: "extra" is a keyword
673 # i18n: "extra" is a keyword
682 label = getstring(l[0], _('first argument to extra must be a string'))
674 label = getstring(l[0], _('first argument to extra must be a string'))
683 value = None
675 value = None
684
676
685 if len(l) > 1:
677 if len(l) > 1:
686 # i18n: "extra" is a keyword
678 # i18n: "extra" is a keyword
687 value = getstring(l[1], _('second argument to extra must be a string'))
679 value = getstring(l[1], _('second argument to extra must be a string'))
688 kind, value, matcher = _stringmatcher(value)
680 kind, value, matcher = _stringmatcher(value)
689
681
690 def _matchvalue(r):
682 def _matchvalue(r):
691 extra = repo[r].extra()
683 extra = repo[r].extra()
692 return label in extra and (value is None or matcher(extra[label]))
684 return label in extra and (value is None or matcher(extra[label]))
693
685
694 return [r for r in subset if _matchvalue(r)]
686 return [r for r in subset if _matchvalue(r)]
695
687
696 def filelog(repo, subset, x):
688 def filelog(repo, subset, x):
697 """``filelog(pattern)``
689 """``filelog(pattern)``
698 Changesets connected to the specified filelog.
690 Changesets connected to the specified filelog.
699
691
700 For performance reasons, ``filelog()`` does not show every changeset
692 For performance reasons, ``filelog()`` does not show every changeset
701 that affects the requested file(s). See :hg:`help log` for details. For
693 that affects the requested file(s). See :hg:`help log` for details. For
702 a slower, more accurate result, use ``file()``.
694 a slower, more accurate result, use ``file()``.
703 """
695 """
704
696
705 # i18n: "filelog" is a keyword
697 # i18n: "filelog" is a keyword
706 pat = getstring(x, _("filelog requires a pattern"))
698 pat = getstring(x, _("filelog requires a pattern"))
707 m = matchmod.match(repo.root, repo.getcwd(), [pat], default='relpath',
699 m = matchmod.match(repo.root, repo.getcwd(), [pat], default='relpath',
708 ctx=repo[None])
700 ctx=repo[None])
709 s = set()
701 s = set()
710
702
711 if not matchmod.patkind(pat):
703 if not matchmod.patkind(pat):
712 for f in m.files():
704 for f in m.files():
713 fl = repo.file(f)
705 fl = repo.file(f)
714 for fr in fl:
706 for fr in fl:
715 s.add(fl.linkrev(fr))
707 s.add(fl.linkrev(fr))
716 else:
708 else:
717 for f in repo[None]:
709 for f in repo[None]:
718 if m(f):
710 if m(f):
719 fl = repo.file(f)
711 fl = repo.file(f)
720 for fr in fl:
712 for fr in fl:
721 s.add(fl.linkrev(fr))
713 s.add(fl.linkrev(fr))
722
714
723 return [r for r in subset if r in s]
715 return [r for r in subset if r in s]
724
716
725 def first(repo, subset, x):
717 def first(repo, subset, x):
726 """``first(set, [n])``
718 """``first(set, [n])``
727 An alias for limit().
719 An alias for limit().
728 """
720 """
729 return limit(repo, subset, x)
721 return limit(repo, subset, x)
730
722
731 def _follow(repo, subset, x, name, followfirst=False):
723 def _follow(repo, subset, x, name, followfirst=False):
732 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
724 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
733 c = repo['.']
725 c = repo['.']
734 if l:
726 if l:
735 x = getstring(l[0], _("%s expected a filename") % name)
727 x = getstring(l[0], _("%s expected a filename") % name)
736 if x in c:
728 if x in c:
737 cx = c[x]
729 cx = c[x]
738 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
730 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
739 # include the revision responsible for the most recent version
731 # include the revision responsible for the most recent version
740 s.add(cx.linkrev())
732 s.add(cx.linkrev())
741 else:
733 else:
742 return []
734 return []
743 else:
735 else:
744 s = set(_revancestors(repo, [c.rev()], followfirst)) | set([c.rev()])
736 s = set(_revancestors(repo, [c.rev()], followfirst)) | set([c.rev()])
745
737
746 return [r for r in subset if r in s]
738 return [r for r in subset if r in s]
747
739
748 def follow(repo, subset, x):
740 def follow(repo, subset, x):
749 """``follow([file])``
741 """``follow([file])``
750 An alias for ``::.`` (ancestors of the working copy's first parent).
742 An alias for ``::.`` (ancestors of the working copy's first parent).
751 If a filename is specified, the history of the given file is followed,
743 If a filename is specified, the history of the given file is followed,
752 including copies.
744 including copies.
753 """
745 """
754 return _follow(repo, subset, x, 'follow')
746 return _follow(repo, subset, x, 'follow')
755
747
756 def _followfirst(repo, subset, x):
748 def _followfirst(repo, subset, x):
757 # ``followfirst([file])``
749 # ``followfirst([file])``
758 # Like ``follow([file])`` but follows only the first parent of
750 # Like ``follow([file])`` but follows only the first parent of
759 # every revision or file revision.
751 # every revision or file revision.
760 return _follow(repo, subset, x, '_followfirst', followfirst=True)
752 return _follow(repo, subset, x, '_followfirst', followfirst=True)
761
753
762 def getall(repo, subset, x):
754 def getall(repo, subset, x):
763 """``all()``
755 """``all()``
764 All changesets, the same as ``0:tip``.
756 All changesets, the same as ``0:tip``.
765 """
757 """
766 # i18n: "all" is a keyword
758 # i18n: "all" is a keyword
767 getargs(x, 0, 0, _("all takes no arguments"))
759 getargs(x, 0, 0, _("all takes no arguments"))
768 return subset
760 return subset
769
761
770 def grep(repo, subset, x):
762 def grep(repo, subset, x):
771 """``grep(regex)``
763 """``grep(regex)``
772 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
764 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
773 to ensure special escape characters are handled correctly. Unlike
765 to ensure special escape characters are handled correctly. Unlike
774 ``keyword(string)``, the match is case-sensitive.
766 ``keyword(string)``, the match is case-sensitive.
775 """
767 """
776 try:
768 try:
777 # i18n: "grep" is a keyword
769 # i18n: "grep" is a keyword
778 gr = re.compile(getstring(x, _("grep requires a string")))
770 gr = re.compile(getstring(x, _("grep requires a string")))
779 except re.error, e:
771 except re.error, e:
780 raise error.ParseError(_('invalid match pattern: %s') % e)
772 raise error.ParseError(_('invalid match pattern: %s') % e)
781 l = []
773 l = []
782 for r in subset:
774 for r in subset:
783 c = repo[r]
775 c = repo[r]
784 for e in c.files() + [c.user(), c.description()]:
776 for e in c.files() + [c.user(), c.description()]:
785 if gr.search(e):
777 if gr.search(e):
786 l.append(r)
778 l.append(r)
787 break
779 break
788 return l
780 return l
789
781
790 def _matchfiles(repo, subset, x):
782 def _matchfiles(repo, subset, x):
791 # _matchfiles takes a revset list of prefixed arguments:
783 # _matchfiles takes a revset list of prefixed arguments:
792 #
784 #
793 # [p:foo, i:bar, x:baz]
785 # [p:foo, i:bar, x:baz]
794 #
786 #
795 # builds a match object from them and filters subset. Allowed
787 # builds a match object from them and filters subset. Allowed
796 # prefixes are 'p:' for regular patterns, 'i:' for include
788 # prefixes are 'p:' for regular patterns, 'i:' for include
797 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
789 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
798 # a revision identifier, or the empty string to reference the
790 # a revision identifier, or the empty string to reference the
799 # working directory, from which the match object is
791 # working directory, from which the match object is
800 # initialized. Use 'd:' to set the default matching mode, default
792 # initialized. Use 'd:' to set the default matching mode, default
801 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
793 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
802
794
803 # i18n: "_matchfiles" is a keyword
795 # i18n: "_matchfiles" is a keyword
804 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
796 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
805 pats, inc, exc = [], [], []
797 pats, inc, exc = [], [], []
806 hasset = False
798 hasset = False
807 rev, default = None, None
799 rev, default = None, None
808 for arg in l:
800 for arg in l:
809 # i18n: "_matchfiles" is a keyword
801 # i18n: "_matchfiles" is a keyword
810 s = getstring(arg, _("_matchfiles requires string arguments"))
802 s = getstring(arg, _("_matchfiles requires string arguments"))
811 prefix, value = s[:2], s[2:]
803 prefix, value = s[:2], s[2:]
812 if prefix == 'p:':
804 if prefix == 'p:':
813 pats.append(value)
805 pats.append(value)
814 elif prefix == 'i:':
806 elif prefix == 'i:':
815 inc.append(value)
807 inc.append(value)
816 elif prefix == 'x:':
808 elif prefix == 'x:':
817 exc.append(value)
809 exc.append(value)
818 elif prefix == 'r:':
810 elif prefix == 'r:':
819 if rev is not None:
811 if rev is not None:
820 # i18n: "_matchfiles" is a keyword
812 # i18n: "_matchfiles" is a keyword
821 raise error.ParseError(_('_matchfiles expected at most one '
813 raise error.ParseError(_('_matchfiles expected at most one '
822 'revision'))
814 'revision'))
823 rev = value
815 rev = value
824 elif prefix == 'd:':
816 elif prefix == 'd:':
825 if default is not None:
817 if default is not None:
826 # i18n: "_matchfiles" is a keyword
818 # i18n: "_matchfiles" is a keyword
827 raise error.ParseError(_('_matchfiles expected at most one '
819 raise error.ParseError(_('_matchfiles expected at most one '
828 'default mode'))
820 'default mode'))
829 default = value
821 default = value
830 else:
822 else:
831 # i18n: "_matchfiles" is a keyword
823 # i18n: "_matchfiles" is a keyword
832 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
824 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
833 if not hasset and matchmod.patkind(value) == 'set':
825 if not hasset and matchmod.patkind(value) == 'set':
834 hasset = True
826 hasset = True
835 if not default:
827 if not default:
836 default = 'glob'
828 default = 'glob'
837 m = None
829 m = None
838 s = []
830 s = []
839 for r in subset:
831 for r in subset:
840 c = repo[r]
832 c = repo[r]
841 if not m or (hasset and rev is None):
833 if not m or (hasset and rev is None):
842 ctx = c
834 ctx = c
843 if rev is not None:
835 if rev is not None:
844 ctx = repo[rev or None]
836 ctx = repo[rev or None]
845 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
837 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
846 exclude=exc, ctx=ctx, default=default)
838 exclude=exc, ctx=ctx, default=default)
847 for f in c.files():
839 for f in c.files():
848 if m(f):
840 if m(f):
849 s.append(r)
841 s.append(r)
850 break
842 break
851 return s
843 return s
852
844
853 def hasfile(repo, subset, x):
845 def hasfile(repo, subset, x):
854 """``file(pattern)``
846 """``file(pattern)``
855 Changesets affecting files matched by pattern.
847 Changesets affecting files matched by pattern.
856
848
857 For a faster but less accurate result, consider using ``filelog()``
849 For a faster but less accurate result, consider using ``filelog()``
858 instead.
850 instead.
859 """
851 """
860 # i18n: "file" is a keyword
852 # i18n: "file" is a keyword
861 pat = getstring(x, _("file requires a pattern"))
853 pat = getstring(x, _("file requires a pattern"))
862 return _matchfiles(repo, subset, ('string', 'p:' + pat))
854 return _matchfiles(repo, subset, ('string', 'p:' + pat))
863
855
864 def head(repo, subset, x):
856 def head(repo, subset, x):
865 """``head()``
857 """``head()``
866 Changeset is a named branch head.
858 Changeset is a named branch head.
867 """
859 """
868 # i18n: "head" is a keyword
860 # i18n: "head" is a keyword
869 getargs(x, 0, 0, _("head takes no arguments"))
861 getargs(x, 0, 0, _("head takes no arguments"))
870 hs = set()
862 hs = set()
871 for b, ls in repo.branchmap().iteritems():
863 for b, ls in repo.branchmap().iteritems():
872 hs.update(repo[h].rev() for h in ls)
864 hs.update(repo[h].rev() for h in ls)
873 return [r for r in subset if r in hs]
865 return [r for r in subset if r in hs]
874
866
875 def heads(repo, subset, x):
867 def heads(repo, subset, x):
876 """``heads(set)``
868 """``heads(set)``
877 Members of set with no children in set.
869 Members of set with no children in set.
878 """
870 """
879 s = getset(repo, subset, x)
871 s = getset(repo, subset, x)
880 ps = set(parents(repo, subset, x))
872 ps = set(parents(repo, subset, x))
881 return [r for r in s if r not in ps]
873 return [r for r in s if r not in ps]
882
874
883 def hidden(repo, subset, x):
875 def hidden(repo, subset, x):
884 """``hidden()``
876 """``hidden()``
885 Hidden changesets.
877 Hidden changesets.
886 """
878 """
887 # i18n: "hidden" is a keyword
879 # i18n: "hidden" is a keyword
888 getargs(x, 0, 0, _("hidden takes no arguments"))
880 getargs(x, 0, 0, _("hidden takes no arguments"))
889 return [r for r in subset if r in repo.hiddenrevs]
881 return [r for r in subset if r in repo.hiddenrevs]
890
882
891 def keyword(repo, subset, x):
883 def keyword(repo, subset, x):
892 """``keyword(string)``
884 """``keyword(string)``
893 Search commit message, user name, and names of changed files for
885 Search commit message, user name, and names of changed files for
894 string. The match is case-insensitive.
886 string. The match is case-insensitive.
895 """
887 """
896 # i18n: "keyword" is a keyword
888 # i18n: "keyword" is a keyword
897 kw = encoding.lower(getstring(x, _("keyword requires a string")))
889 kw = encoding.lower(getstring(x, _("keyword requires a string")))
898 l = []
890 l = []
899 for r in subset:
891 for r in subset:
900 c = repo[r]
892 c = repo[r]
901 t = " ".join(c.files() + [c.user(), c.description()])
893 t = " ".join(c.files() + [c.user(), c.description()])
902 if kw in encoding.lower(t):
894 if kw in encoding.lower(t):
903 l.append(r)
895 l.append(r)
904 return l
896 return l
905
897
906 def limit(repo, subset, x):
898 def limit(repo, subset, x):
907 """``limit(set, [n])``
899 """``limit(set, [n])``
908 First n members of set, defaulting to 1.
900 First n members of set, defaulting to 1.
909 """
901 """
910 # i18n: "limit" is a keyword
902 # i18n: "limit" is a keyword
911 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
903 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
912 try:
904 try:
913 lim = 1
905 lim = 1
914 if len(l) == 2:
906 if len(l) == 2:
915 # i18n: "limit" is a keyword
907 # i18n: "limit" is a keyword
916 lim = int(getstring(l[1], _("limit requires a number")))
908 lim = int(getstring(l[1], _("limit requires a number")))
917 except (TypeError, ValueError):
909 except (TypeError, ValueError):
918 # i18n: "limit" is a keyword
910 # i18n: "limit" is a keyword
919 raise error.ParseError(_("limit expects a number"))
911 raise error.ParseError(_("limit expects a number"))
920 ss = set(subset)
912 ss = set(subset)
921 os = getset(repo, list(repo), l[0])[:lim]
913 os = getset(repo, list(repo), l[0])[:lim]
922 return [r for r in os if r in ss]
914 return [r for r in os if r in ss]
923
915
924 def last(repo, subset, x):
916 def last(repo, subset, x):
925 """``last(set, [n])``
917 """``last(set, [n])``
926 Last n members of set, defaulting to 1.
918 Last n members of set, defaulting to 1.
927 """
919 """
928 # i18n: "last" is a keyword
920 # i18n: "last" is a keyword
929 l = getargs(x, 1, 2, _("last requires one or two arguments"))
921 l = getargs(x, 1, 2, _("last requires one or two arguments"))
930 try:
922 try:
931 lim = 1
923 lim = 1
932 if len(l) == 2:
924 if len(l) == 2:
933 # i18n: "last" is a keyword
925 # i18n: "last" is a keyword
934 lim = int(getstring(l[1], _("last requires a number")))
926 lim = int(getstring(l[1], _("last requires a number")))
935 except (TypeError, ValueError):
927 except (TypeError, ValueError):
936 # i18n: "last" is a keyword
928 # i18n: "last" is a keyword
937 raise error.ParseError(_("last expects a number"))
929 raise error.ParseError(_("last expects a number"))
938 ss = set(subset)
930 ss = set(subset)
939 os = getset(repo, list(repo), l[0])[-lim:]
931 os = getset(repo, list(repo), l[0])[-lim:]
940 return [r for r in os if r in ss]
932 return [r for r in os if r in ss]
941
933
942 def maxrev(repo, subset, x):
934 def maxrev(repo, subset, x):
943 """``max(set)``
935 """``max(set)``
944 Changeset with highest revision number in set.
936 Changeset with highest revision number in set.
945 """
937 """
946 os = getset(repo, list(repo), x)
938 os = getset(repo, list(repo), x)
947 if os:
939 if os:
948 m = max(os)
940 m = max(os)
949 if m in subset:
941 if m in subset:
950 return [m]
942 return [m]
951 return []
943 return []
952
944
953 def merge(repo, subset, x):
945 def merge(repo, subset, x):
954 """``merge()``
946 """``merge()``
955 Changeset is a merge changeset.
947 Changeset is a merge changeset.
956 """
948 """
957 # i18n: "merge" is a keyword
949 # i18n: "merge" is a keyword
958 getargs(x, 0, 0, _("merge takes no arguments"))
950 getargs(x, 0, 0, _("merge takes no arguments"))
959 cl = repo.changelog
951 cl = repo.changelog
960 return [r for r in subset if cl.parentrevs(r)[1] != -1]
952 return [r for r in subset if cl.parentrevs(r)[1] != -1]
961
953
962 def branchpoint(repo, subset, x):
954 def branchpoint(repo, subset, x):
963 """``branchpoint()``
955 """``branchpoint()``
964 Changesets with more than one child.
956 Changesets with more than one child.
965 """
957 """
966 # i18n: "branchpoint" is a keyword
958 # i18n: "branchpoint" is a keyword
967 getargs(x, 0, 0, _("branchpoint takes no arguments"))
959 getargs(x, 0, 0, _("branchpoint takes no arguments"))
968 cl = repo.changelog
960 cl = repo.changelog
969 if not subset:
961 if not subset:
970 return []
962 return []
971 baserev = min(subset)
963 baserev = min(subset)
972 parentscount = [0]*(len(repo) - baserev)
964 parentscount = [0]*(len(repo) - baserev)
973 for r in cl.revs(start=baserev + 1):
965 for r in cl.revs(start=baserev + 1):
974 for p in cl.parentrevs(r):
966 for p in cl.parentrevs(r):
975 if p >= baserev:
967 if p >= baserev:
976 parentscount[p - baserev] += 1
968 parentscount[p - baserev] += 1
977 return [r for r in subset if (parentscount[r - baserev] > 1)]
969 return [r for r in subset if (parentscount[r - baserev] > 1)]
978
970
979 def minrev(repo, subset, x):
971 def minrev(repo, subset, x):
980 """``min(set)``
972 """``min(set)``
981 Changeset with lowest revision number in set.
973 Changeset with lowest revision number in set.
982 """
974 """
983 os = getset(repo, list(repo), x)
975 os = getset(repo, list(repo), x)
984 if os:
976 if os:
985 m = min(os)
977 m = min(os)
986 if m in subset:
978 if m in subset:
987 return [m]
979 return [m]
988 return []
980 return []
989
981
990 def modifies(repo, subset, x):
982 def modifies(repo, subset, x):
991 """``modifies(pattern)``
983 """``modifies(pattern)``
992 Changesets modifying files matched by pattern.
984 Changesets modifying files matched by pattern.
993 """
985 """
994 # i18n: "modifies" is a keyword
986 # i18n: "modifies" is a keyword
995 pat = getstring(x, _("modifies requires a pattern"))
987 pat = getstring(x, _("modifies requires a pattern"))
996 return checkstatus(repo, subset, pat, 0)
988 return checkstatus(repo, subset, pat, 0)
997
989
998 def node_(repo, subset, x):
990 def node_(repo, subset, x):
999 """``id(string)``
991 """``id(string)``
1000 Revision non-ambiguously specified by the given hex string prefix.
992 Revision non-ambiguously specified by the given hex string prefix.
1001 """
993 """
1002 # i18n: "id" is a keyword
994 # i18n: "id" is a keyword
1003 l = getargs(x, 1, 1, _("id requires one argument"))
995 l = getargs(x, 1, 1, _("id requires one argument"))
1004 # i18n: "id" is a keyword
996 # i18n: "id" is a keyword
1005 n = getstring(l[0], _("id requires a string"))
997 n = getstring(l[0], _("id requires a string"))
1006 if len(n) == 40:
998 if len(n) == 40:
1007 rn = repo[n].rev()
999 rn = repo[n].rev()
1008 else:
1000 else:
1009 rn = None
1001 rn = None
1010 pm = repo.changelog._partialmatch(n)
1002 pm = repo.changelog._partialmatch(n)
1011 if pm is not None:
1003 if pm is not None:
1012 rn = repo.changelog.rev(pm)
1004 rn = repo.changelog.rev(pm)
1013
1005
1014 return [r for r in subset if r == rn]
1006 return [r for r in subset if r == rn]
1015
1007
1016 def obsolete(repo, subset, x):
1008 def obsolete(repo, subset, x):
1017 """``obsolete()``
1009 """``obsolete()``
1018 Mutable changeset with a newer version."""
1010 Mutable changeset with a newer version."""
1019 # i18n: "obsolete" is a keyword
1011 # i18n: "obsolete" is a keyword
1020 getargs(x, 0, 0, _("obsolete takes no arguments"))
1012 getargs(x, 0, 0, _("obsolete takes no arguments"))
1021 obsoletes = obsmod.getrevs(repo, 'obsolete')
1013 obsoletes = obsmod.getrevs(repo, 'obsolete')
1022 return [r for r in subset if r in obsoletes]
1014 return [r for r in subset if r in obsoletes]
1023
1015
1024 def origin(repo, subset, x):
1016 def origin(repo, subset, x):
1025 """``origin([set])``
1017 """``origin([set])``
1026 Changesets that were specified as a source for the grafts, transplants or
1018 Changesets that were specified as a source for the grafts, transplants or
1027 rebases that created the given revisions. Omitting the optional set is the
1019 rebases that created the given revisions. Omitting the optional set is the
1028 same as passing all(). If a changeset created by these operations is itself
1020 same as passing all(). If a changeset created by these operations is itself
1029 specified as a source for one of these operations, only the source changeset
1021 specified as a source for one of these operations, only the source changeset
1030 for the first operation is selected.
1022 for the first operation is selected.
1031 """
1023 """
1032 if x is not None:
1024 if x is not None:
1033 args = set(getset(repo, list(repo), x))
1025 args = set(getset(repo, list(repo), x))
1034 else:
1026 else:
1035 args = set(getall(repo, list(repo), x))
1027 args = set(getall(repo, list(repo), x))
1036
1028
1037 def _firstsrc(rev):
1029 def _firstsrc(rev):
1038 src = _getrevsource(repo, rev)
1030 src = _getrevsource(repo, rev)
1039 if src is None:
1031 if src is None:
1040 return None
1032 return None
1041
1033
1042 while True:
1034 while True:
1043 prev = _getrevsource(repo, src)
1035 prev = _getrevsource(repo, src)
1044
1036
1045 if prev is None:
1037 if prev is None:
1046 return src
1038 return src
1047 src = prev
1039 src = prev
1048
1040
1049 o = set([_firstsrc(r) for r in args])
1041 o = set([_firstsrc(r) for r in args])
1050 return [r for r in subset if r in o]
1042 return [r for r in subset if r in o]
1051
1043
1052 def outgoing(repo, subset, x):
1044 def outgoing(repo, subset, x):
1053 """``outgoing([path])``
1045 """``outgoing([path])``
1054 Changesets not found in the specified destination repository, or the
1046 Changesets not found in the specified destination repository, or the
1055 default push location.
1047 default push location.
1056 """
1048 """
1057 import hg # avoid start-up nasties
1049 import hg # avoid start-up nasties
1058 # i18n: "outgoing" is a keyword
1050 # i18n: "outgoing" is a keyword
1059 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1051 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1060 # i18n: "outgoing" is a keyword
1052 # i18n: "outgoing" is a keyword
1061 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1053 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1062 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1054 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1063 dest, branches = hg.parseurl(dest)
1055 dest, branches = hg.parseurl(dest)
1064 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1056 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1065 if revs:
1057 if revs:
1066 revs = [repo.lookup(rev) for rev in revs]
1058 revs = [repo.lookup(rev) for rev in revs]
1067 other = hg.peer(repo, {}, dest)
1059 other = hg.peer(repo, {}, dest)
1068 repo.ui.pushbuffer()
1060 repo.ui.pushbuffer()
1069 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1061 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1070 repo.ui.popbuffer()
1062 repo.ui.popbuffer()
1071 cl = repo.changelog
1063 cl = repo.changelog
1072 o = set([cl.rev(r) for r in outgoing.missing])
1064 o = set([cl.rev(r) for r in outgoing.missing])
1073 return [r for r in subset if r in o]
1065 return [r for r in subset if r in o]
1074
1066
1075 def p1(repo, subset, x):
1067 def p1(repo, subset, x):
1076 """``p1([set])``
1068 """``p1([set])``
1077 First parent of changesets in set, or the working directory.
1069 First parent of changesets in set, or the working directory.
1078 """
1070 """
1079 if x is None:
1071 if x is None:
1080 p = repo[x].p1().rev()
1072 p = repo[x].p1().rev()
1081 return [r for r in subset if r == p]
1073 return [r for r in subset if r == p]
1082
1074
1083 ps = set()
1075 ps = set()
1084 cl = repo.changelog
1076 cl = repo.changelog
1085 for r in getset(repo, list(repo), x):
1077 for r in getset(repo, list(repo), x):
1086 ps.add(cl.parentrevs(r)[0])
1078 ps.add(cl.parentrevs(r)[0])
1087 return [r for r in subset if r in ps]
1079 return [r for r in subset if r in ps]
1088
1080
1089 def p2(repo, subset, x):
1081 def p2(repo, subset, x):
1090 """``p2([set])``
1082 """``p2([set])``
1091 Second parent of changesets in set, or the working directory.
1083 Second parent of changesets in set, or the working directory.
1092 """
1084 """
1093 if x is None:
1085 if x is None:
1094 ps = repo[x].parents()
1086 ps = repo[x].parents()
1095 try:
1087 try:
1096 p = ps[1].rev()
1088 p = ps[1].rev()
1097 return [r for r in subset if r == p]
1089 return [r for r in subset if r == p]
1098 except IndexError:
1090 except IndexError:
1099 return []
1091 return []
1100
1092
1101 ps = set()
1093 ps = set()
1102 cl = repo.changelog
1094 cl = repo.changelog
1103 for r in getset(repo, list(repo), x):
1095 for r in getset(repo, list(repo), x):
1104 ps.add(cl.parentrevs(r)[1])
1096 ps.add(cl.parentrevs(r)[1])
1105 return [r for r in subset if r in ps]
1097 return [r for r in subset if r in ps]
1106
1098
1107 def parents(repo, subset, x):
1099 def parents(repo, subset, x):
1108 """``parents([set])``
1100 """``parents([set])``
1109 The set of all parents for all changesets in set, or the working directory.
1101 The set of all parents for all changesets in set, or the working directory.
1110 """
1102 """
1111 if x is None:
1103 if x is None:
1112 ps = tuple(p.rev() for p in repo[x].parents())
1104 ps = tuple(p.rev() for p in repo[x].parents())
1113 return [r for r in subset if r in ps]
1105 return [r for r in subset if r in ps]
1114
1106
1115 ps = set()
1107 ps = set()
1116 cl = repo.changelog
1108 cl = repo.changelog
1117 for r in getset(repo, list(repo), x):
1109 for r in getset(repo, list(repo), x):
1118 ps.update(cl.parentrevs(r))
1110 ps.update(cl.parentrevs(r))
1119 return [r for r in subset if r in ps]
1111 return [r for r in subset if r in ps]
1120
1112
1121 def parentspec(repo, subset, x, n):
1113 def parentspec(repo, subset, x, n):
1122 """``set^0``
1114 """``set^0``
1123 The set.
1115 The set.
1124 ``set^1`` (or ``set^``), ``set^2``
1116 ``set^1`` (or ``set^``), ``set^2``
1125 First or second parent, respectively, of all changesets in set.
1117 First or second parent, respectively, of all changesets in set.
1126 """
1118 """
1127 try:
1119 try:
1128 n = int(n[1])
1120 n = int(n[1])
1129 if n not in (0, 1, 2):
1121 if n not in (0, 1, 2):
1130 raise ValueError
1122 raise ValueError
1131 except (TypeError, ValueError):
1123 except (TypeError, ValueError):
1132 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1124 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1133 ps = set()
1125 ps = set()
1134 cl = repo.changelog
1126 cl = repo.changelog
1135 for r in getset(repo, subset, x):
1127 for r in getset(repo, subset, x):
1136 if n == 0:
1128 if n == 0:
1137 ps.add(r)
1129 ps.add(r)
1138 elif n == 1:
1130 elif n == 1:
1139 ps.add(cl.parentrevs(r)[0])
1131 ps.add(cl.parentrevs(r)[0])
1140 elif n == 2:
1132 elif n == 2:
1141 parents = cl.parentrevs(r)
1133 parents = cl.parentrevs(r)
1142 if len(parents) > 1:
1134 if len(parents) > 1:
1143 ps.add(parents[1])
1135 ps.add(parents[1])
1144 return [r for r in subset if r in ps]
1136 return [r for r in subset if r in ps]
1145
1137
1146 def present(repo, subset, x):
1138 def present(repo, subset, x):
1147 """``present(set)``
1139 """``present(set)``
1148 An empty set, if any revision in set isn't found; otherwise,
1140 An empty set, if any revision in set isn't found; otherwise,
1149 all revisions in set.
1141 all revisions in set.
1150
1142
1151 If any of specified revisions is not present in the local repository,
1143 If any of specified revisions is not present in the local repository,
1152 the query is normally aborted. But this predicate allows the query
1144 the query is normally aborted. But this predicate allows the query
1153 to continue even in such cases.
1145 to continue even in such cases.
1154 """
1146 """
1155 try:
1147 try:
1156 return getset(repo, subset, x)
1148 return getset(repo, subset, x)
1157 except error.RepoLookupError:
1149 except error.RepoLookupError:
1158 return []
1150 return []
1159
1151
1160 def public(repo, subset, x):
1152 def public(repo, subset, x):
1161 """``public()``
1153 """``public()``
1162 Changeset in public phase."""
1154 Changeset in public phase."""
1163 # i18n: "public" is a keyword
1155 # i18n: "public" is a keyword
1164 getargs(x, 0, 0, _("public takes no arguments"))
1156 getargs(x, 0, 0, _("public takes no arguments"))
1165 pc = repo._phasecache
1157 pc = repo._phasecache
1166 return [r for r in subset if pc.phase(repo, r) == phases.public]
1158 return [r for r in subset if pc.phase(repo, r) == phases.public]
1167
1159
1168 def remote(repo, subset, x):
1160 def remote(repo, subset, x):
1169 """``remote([id [,path]])``
1161 """``remote([id [,path]])``
1170 Local revision that corresponds to the given identifier in a
1162 Local revision that corresponds to the given identifier in a
1171 remote repository, if present. Here, the '.' identifier is a
1163 remote repository, if present. Here, the '.' identifier is a
1172 synonym for the current local branch.
1164 synonym for the current local branch.
1173 """
1165 """
1174
1166
1175 import hg # avoid start-up nasties
1167 import hg # avoid start-up nasties
1176 # i18n: "remote" is a keyword
1168 # i18n: "remote" is a keyword
1177 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1169 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1178
1170
1179 q = '.'
1171 q = '.'
1180 if len(l) > 0:
1172 if len(l) > 0:
1181 # i18n: "remote" is a keyword
1173 # i18n: "remote" is a keyword
1182 q = getstring(l[0], _("remote requires a string id"))
1174 q = getstring(l[0], _("remote requires a string id"))
1183 if q == '.':
1175 if q == '.':
1184 q = repo['.'].branch()
1176 q = repo['.'].branch()
1185
1177
1186 dest = ''
1178 dest = ''
1187 if len(l) > 1:
1179 if len(l) > 1:
1188 # i18n: "remote" is a keyword
1180 # i18n: "remote" is a keyword
1189 dest = getstring(l[1], _("remote requires a repository path"))
1181 dest = getstring(l[1], _("remote requires a repository path"))
1190 dest = repo.ui.expandpath(dest or 'default')
1182 dest = repo.ui.expandpath(dest or 'default')
1191 dest, branches = hg.parseurl(dest)
1183 dest, branches = hg.parseurl(dest)
1192 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1184 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1193 if revs:
1185 if revs:
1194 revs = [repo.lookup(rev) for rev in revs]
1186 revs = [repo.lookup(rev) for rev in revs]
1195 other = hg.peer(repo, {}, dest)
1187 other = hg.peer(repo, {}, dest)
1196 n = other.lookup(q)
1188 n = other.lookup(q)
1197 if n in repo:
1189 if n in repo:
1198 r = repo[n].rev()
1190 r = repo[n].rev()
1199 if r in subset:
1191 if r in subset:
1200 return [r]
1192 return [r]
1201 return []
1193 return []
1202
1194
1203 def removes(repo, subset, x):
1195 def removes(repo, subset, x):
1204 """``removes(pattern)``
1196 """``removes(pattern)``
1205 Changesets which remove files matching pattern.
1197 Changesets which remove files matching pattern.
1206 """
1198 """
1207 # i18n: "removes" is a keyword
1199 # i18n: "removes" is a keyword
1208 pat = getstring(x, _("removes requires a pattern"))
1200 pat = getstring(x, _("removes requires a pattern"))
1209 return checkstatus(repo, subset, pat, 2)
1201 return checkstatus(repo, subset, pat, 2)
1210
1202
1211 def rev(repo, subset, x):
1203 def rev(repo, subset, x):
1212 """``rev(number)``
1204 """``rev(number)``
1213 Revision with the given numeric identifier.
1205 Revision with the given numeric identifier.
1214 """
1206 """
1215 # i18n: "rev" is a keyword
1207 # i18n: "rev" is a keyword
1216 l = getargs(x, 1, 1, _("rev requires one argument"))
1208 l = getargs(x, 1, 1, _("rev requires one argument"))
1217 try:
1209 try:
1218 # i18n: "rev" is a keyword
1210 # i18n: "rev" is a keyword
1219 l = int(getstring(l[0], _("rev requires a number")))
1211 l = int(getstring(l[0], _("rev requires a number")))
1220 except (TypeError, ValueError):
1212 except (TypeError, ValueError):
1221 # i18n: "rev" is a keyword
1213 # i18n: "rev" is a keyword
1222 raise error.ParseError(_("rev expects a number"))
1214 raise error.ParseError(_("rev expects a number"))
1223 return [r for r in subset if r == l]
1215 return [r for r in subset if r == l]
1224
1216
1225 def matching(repo, subset, x):
1217 def matching(repo, subset, x):
1226 """``matching(revision [, field])``
1218 """``matching(revision [, field])``
1227 Changesets in which a given set of fields match the set of fields in the
1219 Changesets in which a given set of fields match the set of fields in the
1228 selected revision or set.
1220 selected revision or set.
1229
1221
1230 To match more than one field pass the list of fields to match separated
1222 To match more than one field pass the list of fields to match separated
1231 by spaces (e.g. ``author description``).
1223 by spaces (e.g. ``author description``).
1232
1224
1233 Valid fields are most regular revision fields and some special fields.
1225 Valid fields are most regular revision fields and some special fields.
1234
1226
1235 Regular revision fields are ``description``, ``author``, ``branch``,
1227 Regular revision fields are ``description``, ``author``, ``branch``,
1236 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1228 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1237 and ``diff``.
1229 and ``diff``.
1238 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1230 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1239 contents of the revision. Two revisions matching their ``diff`` will
1231 contents of the revision. Two revisions matching their ``diff`` will
1240 also match their ``files``.
1232 also match their ``files``.
1241
1233
1242 Special fields are ``summary`` and ``metadata``:
1234 Special fields are ``summary`` and ``metadata``:
1243 ``summary`` matches the first line of the description.
1235 ``summary`` matches the first line of the description.
1244 ``metadata`` is equivalent to matching ``description user date``
1236 ``metadata`` is equivalent to matching ``description user date``
1245 (i.e. it matches the main metadata fields).
1237 (i.e. it matches the main metadata fields).
1246
1238
1247 ``metadata`` is the default field which is used when no fields are
1239 ``metadata`` is the default field which is used when no fields are
1248 specified. You can match more than one field at a time.
1240 specified. You can match more than one field at a time.
1249 """
1241 """
1250 # i18n: "matching" is a keyword
1242 # i18n: "matching" is a keyword
1251 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1243 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1252
1244
1253 revs = getset(repo, repo.changelog, l[0])
1245 revs = getset(repo, repo.changelog, l[0])
1254
1246
1255 fieldlist = ['metadata']
1247 fieldlist = ['metadata']
1256 if len(l) > 1:
1248 if len(l) > 1:
1257 fieldlist = getstring(l[1],
1249 fieldlist = getstring(l[1],
1258 # i18n: "matching" is a keyword
1250 # i18n: "matching" is a keyword
1259 _("matching requires a string "
1251 _("matching requires a string "
1260 "as its second argument")).split()
1252 "as its second argument")).split()
1261
1253
1262 # Make sure that there are no repeated fields,
1254 # Make sure that there are no repeated fields,
1263 # expand the 'special' 'metadata' field type
1255 # expand the 'special' 'metadata' field type
1264 # and check the 'files' whenever we check the 'diff'
1256 # and check the 'files' whenever we check the 'diff'
1265 fields = []
1257 fields = []
1266 for field in fieldlist:
1258 for field in fieldlist:
1267 if field == 'metadata':
1259 if field == 'metadata':
1268 fields += ['user', 'description', 'date']
1260 fields += ['user', 'description', 'date']
1269 elif field == 'diff':
1261 elif field == 'diff':
1270 # a revision matching the diff must also match the files
1262 # a revision matching the diff must also match the files
1271 # since matching the diff is very costly, make sure to
1263 # since matching the diff is very costly, make sure to
1272 # also match the files first
1264 # also match the files first
1273 fields += ['files', 'diff']
1265 fields += ['files', 'diff']
1274 else:
1266 else:
1275 if field == 'author':
1267 if field == 'author':
1276 field = 'user'
1268 field = 'user'
1277 fields.append(field)
1269 fields.append(field)
1278 fields = set(fields)
1270 fields = set(fields)
1279 if 'summary' in fields and 'description' in fields:
1271 if 'summary' in fields and 'description' in fields:
1280 # If a revision matches its description it also matches its summary
1272 # If a revision matches its description it also matches its summary
1281 fields.discard('summary')
1273 fields.discard('summary')
1282
1274
1283 # We may want to match more than one field
1275 # We may want to match more than one field
1284 # Not all fields take the same amount of time to be matched
1276 # Not all fields take the same amount of time to be matched
1285 # Sort the selected fields in order of increasing matching cost
1277 # Sort the selected fields in order of increasing matching cost
1286 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1278 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1287 'files', 'description', 'substate', 'diff']
1279 'files', 'description', 'substate', 'diff']
1288 def fieldkeyfunc(f):
1280 def fieldkeyfunc(f):
1289 try:
1281 try:
1290 return fieldorder.index(f)
1282 return fieldorder.index(f)
1291 except ValueError:
1283 except ValueError:
1292 # assume an unknown field is very costly
1284 # assume an unknown field is very costly
1293 return len(fieldorder)
1285 return len(fieldorder)
1294 fields = list(fields)
1286 fields = list(fields)
1295 fields.sort(key=fieldkeyfunc)
1287 fields.sort(key=fieldkeyfunc)
1296
1288
1297 # Each field will be matched with its own "getfield" function
1289 # Each field will be matched with its own "getfield" function
1298 # which will be added to the getfieldfuncs array of functions
1290 # which will be added to the getfieldfuncs array of functions
1299 getfieldfuncs = []
1291 getfieldfuncs = []
1300 _funcs = {
1292 _funcs = {
1301 'user': lambda r: repo[r].user(),
1293 'user': lambda r: repo[r].user(),
1302 'branch': lambda r: repo[r].branch(),
1294 'branch': lambda r: repo[r].branch(),
1303 'date': lambda r: repo[r].date(),
1295 'date': lambda r: repo[r].date(),
1304 'description': lambda r: repo[r].description(),
1296 'description': lambda r: repo[r].description(),
1305 'files': lambda r: repo[r].files(),
1297 'files': lambda r: repo[r].files(),
1306 'parents': lambda r: repo[r].parents(),
1298 'parents': lambda r: repo[r].parents(),
1307 'phase': lambda r: repo[r].phase(),
1299 'phase': lambda r: repo[r].phase(),
1308 'substate': lambda r: repo[r].substate,
1300 'substate': lambda r: repo[r].substate,
1309 'summary': lambda r: repo[r].description().splitlines()[0],
1301 'summary': lambda r: repo[r].description().splitlines()[0],
1310 'diff': lambda r: list(repo[r].diff(git=True),)
1302 'diff': lambda r: list(repo[r].diff(git=True),)
1311 }
1303 }
1312 for info in fields:
1304 for info in fields:
1313 getfield = _funcs.get(info, None)
1305 getfield = _funcs.get(info, None)
1314 if getfield is None:
1306 if getfield is None:
1315 raise error.ParseError(
1307 raise error.ParseError(
1316 # i18n: "matching" is a keyword
1308 # i18n: "matching" is a keyword
1317 _("unexpected field name passed to matching: %s") % info)
1309 _("unexpected field name passed to matching: %s") % info)
1318 getfieldfuncs.append(getfield)
1310 getfieldfuncs.append(getfield)
1319 # convert the getfield array of functions into a "getinfo" function
1311 # convert the getfield array of functions into a "getinfo" function
1320 # which returns an array of field values (or a single value if there
1312 # which returns an array of field values (or a single value if there
1321 # is only one field to match)
1313 # is only one field to match)
1322 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1314 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1323
1315
1324 matches = set()
1316 matches = set()
1325 for rev in revs:
1317 for rev in revs:
1326 target = getinfo(rev)
1318 target = getinfo(rev)
1327 for r in subset:
1319 for r in subset:
1328 match = True
1320 match = True
1329 for n, f in enumerate(getfieldfuncs):
1321 for n, f in enumerate(getfieldfuncs):
1330 if target[n] != f(r):
1322 if target[n] != f(r):
1331 match = False
1323 match = False
1332 break
1324 break
1333 if match:
1325 if match:
1334 matches.add(r)
1326 matches.add(r)
1335 return [r for r in subset if r in matches]
1327 return [r for r in subset if r in matches]
1336
1328
1337 def reverse(repo, subset, x):
1329 def reverse(repo, subset, x):
1338 """``reverse(set)``
1330 """``reverse(set)``
1339 Reverse order of set.
1331 Reverse order of set.
1340 """
1332 """
1341 l = getset(repo, subset, x)
1333 l = getset(repo, subset, x)
1342 if not isinstance(l, list):
1334 if not isinstance(l, list):
1343 l = list(l)
1335 l = list(l)
1344 l.reverse()
1336 l.reverse()
1345 return l
1337 return l
1346
1338
1347 def roots(repo, subset, x):
1339 def roots(repo, subset, x):
1348 """``roots(set)``
1340 """``roots(set)``
1349 Changesets in set with no parent changeset in set.
1341 Changesets in set with no parent changeset in set.
1350 """
1342 """
1351 s = set(getset(repo, repo.changelog, x))
1343 s = set(getset(repo, repo.changelog, x))
1352 if len(subset) == len(repo):
1353 subset = s
1354 else:
1355 subset = [r for r in subset if r in s]
1344 subset = [r for r in subset if r in s]
1356 cs = _children(repo, subset, s)
1345 cs = _children(repo, subset, s)
1357 return [r for r in subset if r not in cs]
1346 return [r for r in subset if r not in cs]
1358
1347
1359 def secret(repo, subset, x):
1348 def secret(repo, subset, x):
1360 """``secret()``
1349 """``secret()``
1361 Changeset in secret phase."""
1350 Changeset in secret phase."""
1362 # i18n: "secret" is a keyword
1351 # i18n: "secret" is a keyword
1363 getargs(x, 0, 0, _("secret takes no arguments"))
1352 getargs(x, 0, 0, _("secret takes no arguments"))
1364 pc = repo._phasecache
1353 pc = repo._phasecache
1365 return [r for r in subset if pc.phase(repo, r) == phases.secret]
1354 return [r for r in subset if pc.phase(repo, r) == phases.secret]
1366
1355
1367 def sort(repo, subset, x):
1356 def sort(repo, subset, x):
1368 """``sort(set[, [-]key...])``
1357 """``sort(set[, [-]key...])``
1369 Sort set by keys. The default sort order is ascending, specify a key
1358 Sort set by keys. The default sort order is ascending, specify a key
1370 as ``-key`` to sort in descending order.
1359 as ``-key`` to sort in descending order.
1371
1360
1372 The keys can be:
1361 The keys can be:
1373
1362
1374 - ``rev`` for the revision number,
1363 - ``rev`` for the revision number,
1375 - ``branch`` for the branch name,
1364 - ``branch`` for the branch name,
1376 - ``desc`` for the commit message (description),
1365 - ``desc`` for the commit message (description),
1377 - ``user`` for user name (``author`` can be used as an alias),
1366 - ``user`` for user name (``author`` can be used as an alias),
1378 - ``date`` for the commit date
1367 - ``date`` for the commit date
1379 """
1368 """
1380 # i18n: "sort" is a keyword
1369 # i18n: "sort" is a keyword
1381 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1370 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1382 keys = "rev"
1371 keys = "rev"
1383 if len(l) == 2:
1372 if len(l) == 2:
1384 # i18n: "sort" is a keyword
1373 # i18n: "sort" is a keyword
1385 keys = getstring(l[1], _("sort spec must be a string"))
1374 keys = getstring(l[1], _("sort spec must be a string"))
1386
1375
1387 s = l[0]
1376 s = l[0]
1388 keys = keys.split()
1377 keys = keys.split()
1389 l = []
1378 l = []
1390 def invert(s):
1379 def invert(s):
1391 return "".join(chr(255 - ord(c)) for c in s)
1380 return "".join(chr(255 - ord(c)) for c in s)
1392 for r in getset(repo, subset, s):
1381 for r in getset(repo, subset, s):
1393 c = repo[r]
1382 c = repo[r]
1394 e = []
1383 e = []
1395 for k in keys:
1384 for k in keys:
1396 if k == 'rev':
1385 if k == 'rev':
1397 e.append(r)
1386 e.append(r)
1398 elif k == '-rev':
1387 elif k == '-rev':
1399 e.append(-r)
1388 e.append(-r)
1400 elif k == 'branch':
1389 elif k == 'branch':
1401 e.append(c.branch())
1390 e.append(c.branch())
1402 elif k == '-branch':
1391 elif k == '-branch':
1403 e.append(invert(c.branch()))
1392 e.append(invert(c.branch()))
1404 elif k == 'desc':
1393 elif k == 'desc':
1405 e.append(c.description())
1394 e.append(c.description())
1406 elif k == '-desc':
1395 elif k == '-desc':
1407 e.append(invert(c.description()))
1396 e.append(invert(c.description()))
1408 elif k in 'user author':
1397 elif k in 'user author':
1409 e.append(c.user())
1398 e.append(c.user())
1410 elif k in '-user -author':
1399 elif k in '-user -author':
1411 e.append(invert(c.user()))
1400 e.append(invert(c.user()))
1412 elif k == 'date':
1401 elif k == 'date':
1413 e.append(c.date()[0])
1402 e.append(c.date()[0])
1414 elif k == '-date':
1403 elif k == '-date':
1415 e.append(-c.date()[0])
1404 e.append(-c.date()[0])
1416 else:
1405 else:
1417 raise error.ParseError(_("unknown sort key %r") % k)
1406 raise error.ParseError(_("unknown sort key %r") % k)
1418 e.append(r)
1407 e.append(r)
1419 l.append(e)
1408 l.append(e)
1420 l.sort()
1409 l.sort()
1421 return [e[-1] for e in l]
1410 return [e[-1] for e in l]
1422
1411
1423 def _stringmatcher(pattern):
1412 def _stringmatcher(pattern):
1424 """
1413 """
1425 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1414 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1426 returns the matcher name, pattern, and matcher function.
1415 returns the matcher name, pattern, and matcher function.
1427 missing or unknown prefixes are treated as literal matches.
1416 missing or unknown prefixes are treated as literal matches.
1428
1417
1429 helper for tests:
1418 helper for tests:
1430 >>> def test(pattern, *tests):
1419 >>> def test(pattern, *tests):
1431 ... kind, pattern, matcher = _stringmatcher(pattern)
1420 ... kind, pattern, matcher = _stringmatcher(pattern)
1432 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1421 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1433
1422
1434 exact matching (no prefix):
1423 exact matching (no prefix):
1435 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1424 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1436 ('literal', 'abcdefg', [False, False, True])
1425 ('literal', 'abcdefg', [False, False, True])
1437
1426
1438 regex matching ('re:' prefix)
1427 regex matching ('re:' prefix)
1439 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1428 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1440 ('re', 'a.+b', [False, False, True])
1429 ('re', 'a.+b', [False, False, True])
1441
1430
1442 force exact matches ('literal:' prefix)
1431 force exact matches ('literal:' prefix)
1443 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1432 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1444 ('literal', 're:foobar', [False, True])
1433 ('literal', 're:foobar', [False, True])
1445
1434
1446 unknown prefixes are ignored and treated as literals
1435 unknown prefixes are ignored and treated as literals
1447 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1436 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1448 ('literal', 'foo:bar', [False, False, True])
1437 ('literal', 'foo:bar', [False, False, True])
1449 """
1438 """
1450 if pattern.startswith('re:'):
1439 if pattern.startswith('re:'):
1451 pattern = pattern[3:]
1440 pattern = pattern[3:]
1452 try:
1441 try:
1453 regex = re.compile(pattern)
1442 regex = re.compile(pattern)
1454 except re.error, e:
1443 except re.error, e:
1455 raise error.ParseError(_('invalid regular expression: %s')
1444 raise error.ParseError(_('invalid regular expression: %s')
1456 % e)
1445 % e)
1457 return 're', pattern, regex.search
1446 return 're', pattern, regex.search
1458 elif pattern.startswith('literal:'):
1447 elif pattern.startswith('literal:'):
1459 pattern = pattern[8:]
1448 pattern = pattern[8:]
1460 return 'literal', pattern, pattern.__eq__
1449 return 'literal', pattern, pattern.__eq__
1461
1450
1462 def _substringmatcher(pattern):
1451 def _substringmatcher(pattern):
1463 kind, pattern, matcher = _stringmatcher(pattern)
1452 kind, pattern, matcher = _stringmatcher(pattern)
1464 if kind == 'literal':
1453 if kind == 'literal':
1465 matcher = lambda s: pattern in s
1454 matcher = lambda s: pattern in s
1466 return kind, pattern, matcher
1455 return kind, pattern, matcher
1467
1456
1468 def tag(repo, subset, x):
1457 def tag(repo, subset, x):
1469 """``tag([name])``
1458 """``tag([name])``
1470 The specified tag by name, or all tagged revisions if no name is given.
1459 The specified tag by name, or all tagged revisions if no name is given.
1471 """
1460 """
1472 # i18n: "tag" is a keyword
1461 # i18n: "tag" is a keyword
1473 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1462 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1474 cl = repo.changelog
1463 cl = repo.changelog
1475 if args:
1464 if args:
1476 pattern = getstring(args[0],
1465 pattern = getstring(args[0],
1477 # i18n: "tag" is a keyword
1466 # i18n: "tag" is a keyword
1478 _('the argument to tag must be a string'))
1467 _('the argument to tag must be a string'))
1479 kind, pattern, matcher = _stringmatcher(pattern)
1468 kind, pattern, matcher = _stringmatcher(pattern)
1480 if kind == 'literal':
1469 if kind == 'literal':
1481 # avoid resolving all tags
1470 # avoid resolving all tags
1482 tn = repo._tagscache.tags.get(pattern, None)
1471 tn = repo._tagscache.tags.get(pattern, None)
1483 if tn is None:
1472 if tn is None:
1484 raise util.Abort(_("tag '%s' does not exist") % pattern)
1473 raise util.Abort(_("tag '%s' does not exist") % pattern)
1485 s = set([repo[tn].rev()])
1474 s = set([repo[tn].rev()])
1486 else:
1475 else:
1487 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1476 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1488 if not s:
1477 if not s:
1489 raise util.Abort(_("no tags exist that match '%s'") % pattern)
1478 raise util.Abort(_("no tags exist that match '%s'") % pattern)
1490 else:
1479 else:
1491 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1480 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1492 return [r for r in subset if r in s]
1481 return [r for r in subset if r in s]
1493
1482
1494 def tagged(repo, subset, x):
1483 def tagged(repo, subset, x):
1495 return tag(repo, subset, x)
1484 return tag(repo, subset, x)
1496
1485
1497 def unstable(repo, subset, x):
1486 def unstable(repo, subset, x):
1498 """``unstable()``
1487 """``unstable()``
1499 Non-obsolete changesets with obsolete ancestors.
1488 Non-obsolete changesets with obsolete ancestors.
1500 """
1489 """
1501 # i18n: "unstable" is a keyword
1490 # i18n: "unstable" is a keyword
1502 getargs(x, 0, 0, _("unstable takes no arguments"))
1491 getargs(x, 0, 0, _("unstable takes no arguments"))
1503 unstables = obsmod.getrevs(repo, 'unstable')
1492 unstables = obsmod.getrevs(repo, 'unstable')
1504 return [r for r in subset if r in unstables]
1493 return [r for r in subset if r in unstables]
1505
1494
1506
1495
1507 def user(repo, subset, x):
1496 def user(repo, subset, x):
1508 """``user(string)``
1497 """``user(string)``
1509 User name contains string. The match is case-insensitive.
1498 User name contains string. The match is case-insensitive.
1510
1499
1511 If `string` starts with `re:`, the remainder of the string is treated as
1500 If `string` starts with `re:`, the remainder of the string is treated as
1512 a regular expression. To match a user that actually contains `re:`, use
1501 a regular expression. To match a user that actually contains `re:`, use
1513 the prefix `literal:`.
1502 the prefix `literal:`.
1514 """
1503 """
1515 return author(repo, subset, x)
1504 return author(repo, subset, x)
1516
1505
1517 # for internal use
1506 # for internal use
1518 def _list(repo, subset, x):
1507 def _list(repo, subset, x):
1519 s = getstring(x, "internal error")
1508 s = getstring(x, "internal error")
1520 if not s:
1509 if not s:
1521 return []
1510 return []
1522 if not isinstance(subset, set):
1511 if not isinstance(subset, set):
1523 subset = set(subset)
1512 subset = set(subset)
1524 ls = [repo[r].rev() for r in s.split('\0')]
1513 ls = [repo[r].rev() for r in s.split('\0')]
1525 return [r for r in ls if r in subset]
1514 return [r for r in ls if r in subset]
1526
1515
1527 symbols = {
1516 symbols = {
1528 "adds": adds,
1517 "adds": adds,
1529 "all": getall,
1518 "all": getall,
1530 "ancestor": ancestor,
1519 "ancestor": ancestor,
1531 "ancestors": ancestors,
1520 "ancestors": ancestors,
1532 "_firstancestors": _firstancestors,
1521 "_firstancestors": _firstancestors,
1533 "author": author,
1522 "author": author,
1534 "bisect": bisect,
1523 "bisect": bisect,
1535 "bisected": bisected,
1524 "bisected": bisected,
1536 "bookmark": bookmark,
1525 "bookmark": bookmark,
1537 "branch": branch,
1526 "branch": branch,
1538 "branchpoint": branchpoint,
1527 "branchpoint": branchpoint,
1539 "bumped": bumped,
1528 "bumped": bumped,
1540 "bundle": bundle,
1529 "bundle": bundle,
1541 "children": children,
1530 "children": children,
1542 "closed": closed,
1531 "closed": closed,
1543 "contains": contains,
1532 "contains": contains,
1544 "converted": converted,
1533 "converted": converted,
1545 "date": date,
1534 "date": date,
1546 "desc": desc,
1535 "desc": desc,
1547 "descendants": descendants,
1536 "descendants": descendants,
1548 "_firstdescendants": _firstdescendants,
1537 "_firstdescendants": _firstdescendants,
1549 "destination": destination,
1538 "destination": destination,
1550 "draft": draft,
1539 "draft": draft,
1551 "extinct": extinct,
1540 "extinct": extinct,
1552 "extra": extra,
1541 "extra": extra,
1553 "file": hasfile,
1542 "file": hasfile,
1554 "filelog": filelog,
1543 "filelog": filelog,
1555 "first": first,
1544 "first": first,
1556 "follow": follow,
1545 "follow": follow,
1557 "_followfirst": _followfirst,
1546 "_followfirst": _followfirst,
1558 "grep": grep,
1547 "grep": grep,
1559 "head": head,
1548 "head": head,
1560 "heads": heads,
1549 "heads": heads,
1561 "hidden": hidden,
1550 "hidden": hidden,
1562 "id": node_,
1551 "id": node_,
1563 "keyword": keyword,
1552 "keyword": keyword,
1564 "last": last,
1553 "last": last,
1565 "limit": limit,
1554 "limit": limit,
1566 "_matchfiles": _matchfiles,
1555 "_matchfiles": _matchfiles,
1567 "max": maxrev,
1556 "max": maxrev,
1568 "merge": merge,
1557 "merge": merge,
1569 "min": minrev,
1558 "min": minrev,
1570 "modifies": modifies,
1559 "modifies": modifies,
1571 "obsolete": obsolete,
1560 "obsolete": obsolete,
1572 "origin": origin,
1561 "origin": origin,
1573 "outgoing": outgoing,
1562 "outgoing": outgoing,
1574 "p1": p1,
1563 "p1": p1,
1575 "p2": p2,
1564 "p2": p2,
1576 "parents": parents,
1565 "parents": parents,
1577 "present": present,
1566 "present": present,
1578 "public": public,
1567 "public": public,
1579 "remote": remote,
1568 "remote": remote,
1580 "removes": removes,
1569 "removes": removes,
1581 "rev": rev,
1570 "rev": rev,
1582 "reverse": reverse,
1571 "reverse": reverse,
1583 "roots": roots,
1572 "roots": roots,
1584 "sort": sort,
1573 "sort": sort,
1585 "secret": secret,
1574 "secret": secret,
1586 "matching": matching,
1575 "matching": matching,
1587 "tag": tag,
1576 "tag": tag,
1588 "tagged": tagged,
1577 "tagged": tagged,
1589 "user": user,
1578 "user": user,
1590 "unstable": unstable,
1579 "unstable": unstable,
1591 "_list": _list,
1580 "_list": _list,
1592 }
1581 }
1593
1582
1594 methods = {
1583 methods = {
1595 "range": rangeset,
1584 "range": rangeset,
1596 "dagrange": dagrange,
1585 "dagrange": dagrange,
1597 "string": stringset,
1586 "string": stringset,
1598 "symbol": symbolset,
1587 "symbol": symbolset,
1599 "and": andset,
1588 "and": andset,
1600 "or": orset,
1589 "or": orset,
1601 "not": notset,
1590 "not": notset,
1602 "list": listset,
1591 "list": listset,
1603 "func": func,
1592 "func": func,
1604 "ancestor": ancestorspec,
1593 "ancestor": ancestorspec,
1605 "parent": parentspec,
1594 "parent": parentspec,
1606 "parentpost": p1,
1595 "parentpost": p1,
1607 }
1596 }
1608
1597
1609 def optimize(x, small):
1598 def optimize(x, small):
1610 if x is None:
1599 if x is None:
1611 return 0, x
1600 return 0, x
1612
1601
1613 smallbonus = 1
1602 smallbonus = 1
1614 if small:
1603 if small:
1615 smallbonus = .5
1604 smallbonus = .5
1616
1605
1617 op = x[0]
1606 op = x[0]
1618 if op == 'minus':
1607 if op == 'minus':
1619 return optimize(('and', x[1], ('not', x[2])), small)
1608 return optimize(('and', x[1], ('not', x[2])), small)
1620 elif op == 'dagrangepre':
1609 elif op == 'dagrangepre':
1621 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1610 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1622 elif op == 'dagrangepost':
1611 elif op == 'dagrangepost':
1623 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1612 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1624 elif op == 'rangepre':
1613 elif op == 'rangepre':
1625 return optimize(('range', ('string', '0'), x[1]), small)
1614 return optimize(('range', ('string', '0'), x[1]), small)
1626 elif op == 'rangepost':
1615 elif op == 'rangepost':
1627 return optimize(('range', x[1], ('string', 'tip')), small)
1616 return optimize(('range', x[1], ('string', 'tip')), small)
1628 elif op == 'negate':
1617 elif op == 'negate':
1629 return optimize(('string',
1618 return optimize(('string',
1630 '-' + getstring(x[1], _("can't negate that"))), small)
1619 '-' + getstring(x[1], _("can't negate that"))), small)
1631 elif op in 'string symbol negate':
1620 elif op in 'string symbol negate':
1632 return smallbonus, x # single revisions are small
1621 return smallbonus, x # single revisions are small
1633 elif op == 'and':
1622 elif op == 'and':
1634 wa, ta = optimize(x[1], True)
1623 wa, ta = optimize(x[1], True)
1635 wb, tb = optimize(x[2], True)
1624 wb, tb = optimize(x[2], True)
1636 w = min(wa, wb)
1625 w = min(wa, wb)
1637 if wa > wb:
1626 if wa > wb:
1638 return w, (op, tb, ta)
1627 return w, (op, tb, ta)
1639 return w, (op, ta, tb)
1628 return w, (op, ta, tb)
1640 elif op == 'or':
1629 elif op == 'or':
1641 wa, ta = optimize(x[1], False)
1630 wa, ta = optimize(x[1], False)
1642 wb, tb = optimize(x[2], False)
1631 wb, tb = optimize(x[2], False)
1643 if wb < wa:
1632 if wb < wa:
1644 wb, wa = wa, wb
1633 wb, wa = wa, wb
1645 return max(wa, wb), (op, ta, tb)
1634 return max(wa, wb), (op, ta, tb)
1646 elif op == 'not':
1635 elif op == 'not':
1647 o = optimize(x[1], not small)
1636 o = optimize(x[1], not small)
1648 return o[0], (op, o[1])
1637 return o[0], (op, o[1])
1649 elif op == 'parentpost':
1638 elif op == 'parentpost':
1650 o = optimize(x[1], small)
1639 o = optimize(x[1], small)
1651 return o[0], (op, o[1])
1640 return o[0], (op, o[1])
1652 elif op == 'group':
1641 elif op == 'group':
1653 return optimize(x[1], small)
1642 return optimize(x[1], small)
1654 elif op in 'dagrange range list parent ancestorspec':
1643 elif op in 'dagrange range list parent ancestorspec':
1655 if op == 'parent':
1644 if op == 'parent':
1656 # x^:y means (x^) : y, not x ^ (:y)
1645 # x^:y means (x^) : y, not x ^ (:y)
1657 post = ('parentpost', x[1])
1646 post = ('parentpost', x[1])
1658 if x[2][0] == 'dagrangepre':
1647 if x[2][0] == 'dagrangepre':
1659 return optimize(('dagrange', post, x[2][1]), small)
1648 return optimize(('dagrange', post, x[2][1]), small)
1660 elif x[2][0] == 'rangepre':
1649 elif x[2][0] == 'rangepre':
1661 return optimize(('range', post, x[2][1]), small)
1650 return optimize(('range', post, x[2][1]), small)
1662
1651
1663 wa, ta = optimize(x[1], small)
1652 wa, ta = optimize(x[1], small)
1664 wb, tb = optimize(x[2], small)
1653 wb, tb = optimize(x[2], small)
1665 return wa + wb, (op, ta, tb)
1654 return wa + wb, (op, ta, tb)
1666 elif op == 'func':
1655 elif op == 'func':
1667 f = getstring(x[1], _("not a symbol"))
1656 f = getstring(x[1], _("not a symbol"))
1668 wa, ta = optimize(x[2], small)
1657 wa, ta = optimize(x[2], small)
1669 if f in ("author branch closed date desc file grep keyword "
1658 if f in ("author branch closed date desc file grep keyword "
1670 "outgoing user"):
1659 "outgoing user"):
1671 w = 10 # slow
1660 w = 10 # slow
1672 elif f in "modifies adds removes":
1661 elif f in "modifies adds removes":
1673 w = 30 # slower
1662 w = 30 # slower
1674 elif f == "contains":
1663 elif f == "contains":
1675 w = 100 # very slow
1664 w = 100 # very slow
1676 elif f == "ancestor":
1665 elif f == "ancestor":
1677 w = 1 * smallbonus
1666 w = 1 * smallbonus
1678 elif f in "reverse limit first":
1667 elif f in "reverse limit first":
1679 w = 0
1668 w = 0
1680 elif f in "sort":
1669 elif f in "sort":
1681 w = 10 # assume most sorts look at changelog
1670 w = 10 # assume most sorts look at changelog
1682 else:
1671 else:
1683 w = 1
1672 w = 1
1684 return w + wa, (op, x[1], ta)
1673 return w + wa, (op, x[1], ta)
1685 return 1, x
1674 return 1, x
1686
1675
1687 _aliasarg = ('func', ('symbol', '_aliasarg'))
1676 _aliasarg = ('func', ('symbol', '_aliasarg'))
1688 def _getaliasarg(tree):
1677 def _getaliasarg(tree):
1689 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1678 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1690 return X, None otherwise.
1679 return X, None otherwise.
1691 """
1680 """
1692 if (len(tree) == 3 and tree[:2] == _aliasarg
1681 if (len(tree) == 3 and tree[:2] == _aliasarg
1693 and tree[2][0] == 'string'):
1682 and tree[2][0] == 'string'):
1694 return tree[2][1]
1683 return tree[2][1]
1695 return None
1684 return None
1696
1685
1697 def _checkaliasarg(tree, known=None):
1686 def _checkaliasarg(tree, known=None):
1698 """Check tree contains no _aliasarg construct or only ones which
1687 """Check tree contains no _aliasarg construct or only ones which
1699 value is in known. Used to avoid alias placeholders injection.
1688 value is in known. Used to avoid alias placeholders injection.
1700 """
1689 """
1701 if isinstance(tree, tuple):
1690 if isinstance(tree, tuple):
1702 arg = _getaliasarg(tree)
1691 arg = _getaliasarg(tree)
1703 if arg is not None and (not known or arg not in known):
1692 if arg is not None and (not known or arg not in known):
1704 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1693 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1705 for t in tree:
1694 for t in tree:
1706 _checkaliasarg(t, known)
1695 _checkaliasarg(t, known)
1707
1696
1708 class revsetalias(object):
1697 class revsetalias(object):
1709 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1698 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1710 args = None
1699 args = None
1711
1700
1712 def __init__(self, name, value):
1701 def __init__(self, name, value):
1713 '''Aliases like:
1702 '''Aliases like:
1714
1703
1715 h = heads(default)
1704 h = heads(default)
1716 b($1) = ancestors($1) - ancestors(default)
1705 b($1) = ancestors($1) - ancestors(default)
1717 '''
1706 '''
1718 m = self.funcre.search(name)
1707 m = self.funcre.search(name)
1719 if m:
1708 if m:
1720 self.name = m.group(1)
1709 self.name = m.group(1)
1721 self.tree = ('func', ('symbol', m.group(1)))
1710 self.tree = ('func', ('symbol', m.group(1)))
1722 self.args = [x.strip() for x in m.group(2).split(',')]
1711 self.args = [x.strip() for x in m.group(2).split(',')]
1723 for arg in self.args:
1712 for arg in self.args:
1724 # _aliasarg() is an unknown symbol only used separate
1713 # _aliasarg() is an unknown symbol only used separate
1725 # alias argument placeholders from regular strings.
1714 # alias argument placeholders from regular strings.
1726 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1715 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1727 else:
1716 else:
1728 self.name = name
1717 self.name = name
1729 self.tree = ('symbol', name)
1718 self.tree = ('symbol', name)
1730
1719
1731 self.replacement, pos = parse(value)
1720 self.replacement, pos = parse(value)
1732 if pos != len(value):
1721 if pos != len(value):
1733 raise error.ParseError(_('invalid token'), pos)
1722 raise error.ParseError(_('invalid token'), pos)
1734 # Check for placeholder injection
1723 # Check for placeholder injection
1735 _checkaliasarg(self.replacement, self.args)
1724 _checkaliasarg(self.replacement, self.args)
1736
1725
1737 def _getalias(aliases, tree):
1726 def _getalias(aliases, tree):
1738 """If tree looks like an unexpanded alias, return it. Return None
1727 """If tree looks like an unexpanded alias, return it. Return None
1739 otherwise.
1728 otherwise.
1740 """
1729 """
1741 if isinstance(tree, tuple) and tree:
1730 if isinstance(tree, tuple) and tree:
1742 if tree[0] == 'symbol' and len(tree) == 2:
1731 if tree[0] == 'symbol' and len(tree) == 2:
1743 name = tree[1]
1732 name = tree[1]
1744 alias = aliases.get(name)
1733 alias = aliases.get(name)
1745 if alias and alias.args is None and alias.tree == tree:
1734 if alias and alias.args is None and alias.tree == tree:
1746 return alias
1735 return alias
1747 if tree[0] == 'func' and len(tree) > 1:
1736 if tree[0] == 'func' and len(tree) > 1:
1748 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1737 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1749 name = tree[1][1]
1738 name = tree[1][1]
1750 alias = aliases.get(name)
1739 alias = aliases.get(name)
1751 if alias and alias.args is not None and alias.tree == tree[:2]:
1740 if alias and alias.args is not None and alias.tree == tree[:2]:
1752 return alias
1741 return alias
1753 return None
1742 return None
1754
1743
1755 def _expandargs(tree, args):
1744 def _expandargs(tree, args):
1756 """Replace _aliasarg instances with the substitution value of the
1745 """Replace _aliasarg instances with the substitution value of the
1757 same name in args, recursively.
1746 same name in args, recursively.
1758 """
1747 """
1759 if not tree or not isinstance(tree, tuple):
1748 if not tree or not isinstance(tree, tuple):
1760 return tree
1749 return tree
1761 arg = _getaliasarg(tree)
1750 arg = _getaliasarg(tree)
1762 if arg is not None:
1751 if arg is not None:
1763 return args[arg]
1752 return args[arg]
1764 return tuple(_expandargs(t, args) for t in tree)
1753 return tuple(_expandargs(t, args) for t in tree)
1765
1754
1766 def _expandaliases(aliases, tree, expanding, cache):
1755 def _expandaliases(aliases, tree, expanding, cache):
1767 """Expand aliases in tree, recursively.
1756 """Expand aliases in tree, recursively.
1768
1757
1769 'aliases' is a dictionary mapping user defined aliases to
1758 'aliases' is a dictionary mapping user defined aliases to
1770 revsetalias objects.
1759 revsetalias objects.
1771 """
1760 """
1772 if not isinstance(tree, tuple):
1761 if not isinstance(tree, tuple):
1773 # Do not expand raw strings
1762 # Do not expand raw strings
1774 return tree
1763 return tree
1775 alias = _getalias(aliases, tree)
1764 alias = _getalias(aliases, tree)
1776 if alias is not None:
1765 if alias is not None:
1777 if alias in expanding:
1766 if alias in expanding:
1778 raise error.ParseError(_('infinite expansion of revset alias "%s" '
1767 raise error.ParseError(_('infinite expansion of revset alias "%s" '
1779 'detected') % alias.name)
1768 'detected') % alias.name)
1780 expanding.append(alias)
1769 expanding.append(alias)
1781 if alias.name not in cache:
1770 if alias.name not in cache:
1782 cache[alias.name] = _expandaliases(aliases, alias.replacement,
1771 cache[alias.name] = _expandaliases(aliases, alias.replacement,
1783 expanding, cache)
1772 expanding, cache)
1784 result = cache[alias.name]
1773 result = cache[alias.name]
1785 expanding.pop()
1774 expanding.pop()
1786 if alias.args is not None:
1775 if alias.args is not None:
1787 l = getlist(tree[2])
1776 l = getlist(tree[2])
1788 if len(l) != len(alias.args):
1777 if len(l) != len(alias.args):
1789 raise error.ParseError(
1778 raise error.ParseError(
1790 _('invalid number of arguments: %s') % len(l))
1779 _('invalid number of arguments: %s') % len(l))
1791 l = [_expandaliases(aliases, a, [], cache) for a in l]
1780 l = [_expandaliases(aliases, a, [], cache) for a in l]
1792 result = _expandargs(result, dict(zip(alias.args, l)))
1781 result = _expandargs(result, dict(zip(alias.args, l)))
1793 else:
1782 else:
1794 result = tuple(_expandaliases(aliases, t, expanding, cache)
1783 result = tuple(_expandaliases(aliases, t, expanding, cache)
1795 for t in tree)
1784 for t in tree)
1796 return result
1785 return result
1797
1786
1798 def findaliases(ui, tree):
1787 def findaliases(ui, tree):
1799 _checkaliasarg(tree)
1788 _checkaliasarg(tree)
1800 aliases = {}
1789 aliases = {}
1801 for k, v in ui.configitems('revsetalias'):
1790 for k, v in ui.configitems('revsetalias'):
1802 alias = revsetalias(k, v)
1791 alias = revsetalias(k, v)
1803 aliases[alias.name] = alias
1792 aliases[alias.name] = alias
1804 return _expandaliases(aliases, tree, [], {})
1793 return _expandaliases(aliases, tree, [], {})
1805
1794
1806 parse = parser.parser(tokenize, elements).parse
1795 parse = parser.parser(tokenize, elements).parse
1807
1796
1808 def match(ui, spec):
1797 def match(ui, spec):
1809 if not spec:
1798 if not spec:
1810 raise error.ParseError(_("empty query"))
1799 raise error.ParseError(_("empty query"))
1811 tree, pos = parse(spec)
1800 tree, pos = parse(spec)
1812 if (pos != len(spec)):
1801 if (pos != len(spec)):
1813 raise error.ParseError(_("invalid token"), pos)
1802 raise error.ParseError(_("invalid token"), pos)
1814 if ui:
1803 if ui:
1815 tree = findaliases(ui, tree)
1804 tree = findaliases(ui, tree)
1816 weight, tree = optimize(tree, True)
1805 weight, tree = optimize(tree, True)
1817 def mfunc(repo, subset):
1806 def mfunc(repo, subset):
1818 return getset(repo, subset, tree)
1807 return getset(repo, subset, tree)
1819 return mfunc
1808 return mfunc
1820
1809
1821 def formatspec(expr, *args):
1810 def formatspec(expr, *args):
1822 '''
1811 '''
1823 This is a convenience function for using revsets internally, and
1812 This is a convenience function for using revsets internally, and
1824 escapes arguments appropriately. Aliases are intentionally ignored
1813 escapes arguments appropriately. Aliases are intentionally ignored
1825 so that intended expression behavior isn't accidentally subverted.
1814 so that intended expression behavior isn't accidentally subverted.
1826
1815
1827 Supported arguments:
1816 Supported arguments:
1828
1817
1829 %r = revset expression, parenthesized
1818 %r = revset expression, parenthesized
1830 %d = int(arg), no quoting
1819 %d = int(arg), no quoting
1831 %s = string(arg), escaped and single-quoted
1820 %s = string(arg), escaped and single-quoted
1832 %b = arg.branch(), escaped and single-quoted
1821 %b = arg.branch(), escaped and single-quoted
1833 %n = hex(arg), single-quoted
1822 %n = hex(arg), single-quoted
1834 %% = a literal '%'
1823 %% = a literal '%'
1835
1824
1836 Prefixing the type with 'l' specifies a parenthesized list of that type.
1825 Prefixing the type with 'l' specifies a parenthesized list of that type.
1837
1826
1838 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
1827 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
1839 '(10 or 11):: and ((this()) or (that()))'
1828 '(10 or 11):: and ((this()) or (that()))'
1840 >>> formatspec('%d:: and not %d::', 10, 20)
1829 >>> formatspec('%d:: and not %d::', 10, 20)
1841 '10:: and not 20::'
1830 '10:: and not 20::'
1842 >>> formatspec('%ld or %ld', [], [1])
1831 >>> formatspec('%ld or %ld', [], [1])
1843 "_list('') or 1"
1832 "_list('') or 1"
1844 >>> formatspec('keyword(%s)', 'foo\\xe9')
1833 >>> formatspec('keyword(%s)', 'foo\\xe9')
1845 "keyword('foo\\\\xe9')"
1834 "keyword('foo\\\\xe9')"
1846 >>> b = lambda: 'default'
1835 >>> b = lambda: 'default'
1847 >>> b.branch = b
1836 >>> b.branch = b
1848 >>> formatspec('branch(%b)', b)
1837 >>> formatspec('branch(%b)', b)
1849 "branch('default')"
1838 "branch('default')"
1850 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
1839 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
1851 "root(_list('a\\x00b\\x00c\\x00d'))"
1840 "root(_list('a\\x00b\\x00c\\x00d'))"
1852 '''
1841 '''
1853
1842
1854 def quote(s):
1843 def quote(s):
1855 return repr(str(s))
1844 return repr(str(s))
1856
1845
1857 def argtype(c, arg):
1846 def argtype(c, arg):
1858 if c == 'd':
1847 if c == 'd':
1859 return str(int(arg))
1848 return str(int(arg))
1860 elif c == 's':
1849 elif c == 's':
1861 return quote(arg)
1850 return quote(arg)
1862 elif c == 'r':
1851 elif c == 'r':
1863 parse(arg) # make sure syntax errors are confined
1852 parse(arg) # make sure syntax errors are confined
1864 return '(%s)' % arg
1853 return '(%s)' % arg
1865 elif c == 'n':
1854 elif c == 'n':
1866 return quote(node.hex(arg))
1855 return quote(node.hex(arg))
1867 elif c == 'b':
1856 elif c == 'b':
1868 return quote(arg.branch())
1857 return quote(arg.branch())
1869
1858
1870 def listexp(s, t):
1859 def listexp(s, t):
1871 l = len(s)
1860 l = len(s)
1872 if l == 0:
1861 if l == 0:
1873 return "_list('')"
1862 return "_list('')"
1874 elif l == 1:
1863 elif l == 1:
1875 return argtype(t, s[0])
1864 return argtype(t, s[0])
1876 elif t == 'd':
1865 elif t == 'd':
1877 return "_list('%s')" % "\0".join(str(int(a)) for a in s)
1866 return "_list('%s')" % "\0".join(str(int(a)) for a in s)
1878 elif t == 's':
1867 elif t == 's':
1879 return "_list('%s')" % "\0".join(s)
1868 return "_list('%s')" % "\0".join(s)
1880 elif t == 'n':
1869 elif t == 'n':
1881 return "_list('%s')" % "\0".join(node.hex(a) for a in s)
1870 return "_list('%s')" % "\0".join(node.hex(a) for a in s)
1882 elif t == 'b':
1871 elif t == 'b':
1883 return "_list('%s')" % "\0".join(a.branch() for a in s)
1872 return "_list('%s')" % "\0".join(a.branch() for a in s)
1884
1873
1885 m = l // 2
1874 m = l // 2
1886 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
1875 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
1887
1876
1888 ret = ''
1877 ret = ''
1889 pos = 0
1878 pos = 0
1890 arg = 0
1879 arg = 0
1891 while pos < len(expr):
1880 while pos < len(expr):
1892 c = expr[pos]
1881 c = expr[pos]
1893 if c == '%':
1882 if c == '%':
1894 pos += 1
1883 pos += 1
1895 d = expr[pos]
1884 d = expr[pos]
1896 if d == '%':
1885 if d == '%':
1897 ret += d
1886 ret += d
1898 elif d in 'dsnbr':
1887 elif d in 'dsnbr':
1899 ret += argtype(d, args[arg])
1888 ret += argtype(d, args[arg])
1900 arg += 1
1889 arg += 1
1901 elif d == 'l':
1890 elif d == 'l':
1902 # a list of some type
1891 # a list of some type
1903 pos += 1
1892 pos += 1
1904 d = expr[pos]
1893 d = expr[pos]
1905 ret += listexp(list(args[arg]), d)
1894 ret += listexp(list(args[arg]), d)
1906 arg += 1
1895 arg += 1
1907 else:
1896 else:
1908 raise util.Abort('unexpected revspec format character %s' % d)
1897 raise util.Abort('unexpected revspec format character %s' % d)
1909 else:
1898 else:
1910 ret += c
1899 ret += c
1911 pos += 1
1900 pos += 1
1912
1901
1913 return ret
1902 return ret
1914
1903
1915 def prettyformat(tree):
1904 def prettyformat(tree):
1916 def _prettyformat(tree, level, lines):
1905 def _prettyformat(tree, level, lines):
1917 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
1906 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
1918 lines.append((level, str(tree)))
1907 lines.append((level, str(tree)))
1919 else:
1908 else:
1920 lines.append((level, '(%s' % tree[0]))
1909 lines.append((level, '(%s' % tree[0]))
1921 for s in tree[1:]:
1910 for s in tree[1:]:
1922 _prettyformat(s, level + 1, lines)
1911 _prettyformat(s, level + 1, lines)
1923 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
1912 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
1924
1913
1925 lines = []
1914 lines = []
1926 _prettyformat(tree, 0, lines)
1915 _prettyformat(tree, 0, lines)
1927 output = '\n'.join((' '*l + s) for l, s in lines)
1916 output = '\n'.join((' '*l + s) for l, s in lines)
1928 return output
1917 return output
1929
1918
1930 # tell hggettext to extract docstrings from these functions:
1919 # tell hggettext to extract docstrings from these functions:
1931 i18nfunctions = symbols.values()
1920 i18nfunctions = symbols.values()
@@ -1,1805 +1,1805 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding, collections
17 import error, osutil, encoding, collections
18 import errno, re, shutil, sys, tempfile, traceback
18 import errno, re, shutil, sys, tempfile, traceback
19 import os, time, datetime, calendar, textwrap, signal
19 import os, time, datetime, calendar, textwrap, signal
20 import imp, socket, urllib
20 import imp, socket, urllib
21
21
22 if os.name == 'nt':
22 if os.name == 'nt':
23 import windows as platform
23 import windows as platform
24 else:
24 else:
25 import posix as platform
25 import posix as platform
26
26
27 cachestat = platform.cachestat
27 cachestat = platform.cachestat
28 checkexec = platform.checkexec
28 checkexec = platform.checkexec
29 checklink = platform.checklink
29 checklink = platform.checklink
30 copymode = platform.copymode
30 copymode = platform.copymode
31 executablepath = platform.executablepath
31 executablepath = platform.executablepath
32 expandglobs = platform.expandglobs
32 expandglobs = platform.expandglobs
33 explainexit = platform.explainexit
33 explainexit = platform.explainexit
34 findexe = platform.findexe
34 findexe = platform.findexe
35 gethgcmd = platform.gethgcmd
35 gethgcmd = platform.gethgcmd
36 getuser = platform.getuser
36 getuser = platform.getuser
37 groupmembers = platform.groupmembers
37 groupmembers = platform.groupmembers
38 groupname = platform.groupname
38 groupname = platform.groupname
39 hidewindow = platform.hidewindow
39 hidewindow = platform.hidewindow
40 isexec = platform.isexec
40 isexec = platform.isexec
41 isowner = platform.isowner
41 isowner = platform.isowner
42 localpath = platform.localpath
42 localpath = platform.localpath
43 lookupreg = platform.lookupreg
43 lookupreg = platform.lookupreg
44 makedir = platform.makedir
44 makedir = platform.makedir
45 nlinks = platform.nlinks
45 nlinks = platform.nlinks
46 normpath = platform.normpath
46 normpath = platform.normpath
47 normcase = platform.normcase
47 normcase = platform.normcase
48 openhardlinks = platform.openhardlinks
48 openhardlinks = platform.openhardlinks
49 oslink = platform.oslink
49 oslink = platform.oslink
50 parsepatchoutput = platform.parsepatchoutput
50 parsepatchoutput = platform.parsepatchoutput
51 pconvert = platform.pconvert
51 pconvert = platform.pconvert
52 popen = platform.popen
52 popen = platform.popen
53 posixfile = platform.posixfile
53 posixfile = platform.posixfile
54 quotecommand = platform.quotecommand
54 quotecommand = platform.quotecommand
55 realpath = platform.realpath
55 realpath = platform.realpath
56 rename = platform.rename
56 rename = platform.rename
57 samedevice = platform.samedevice
57 samedevice = platform.samedevice
58 samefile = platform.samefile
58 samefile = platform.samefile
59 samestat = platform.samestat
59 samestat = platform.samestat
60 setbinary = platform.setbinary
60 setbinary = platform.setbinary
61 setflags = platform.setflags
61 setflags = platform.setflags
62 setsignalhandler = platform.setsignalhandler
62 setsignalhandler = platform.setsignalhandler
63 shellquote = platform.shellquote
63 shellquote = platform.shellquote
64 spawndetached = platform.spawndetached
64 spawndetached = platform.spawndetached
65 split = platform.split
65 split = platform.split
66 sshargs = platform.sshargs
66 sshargs = platform.sshargs
67 statfiles = platform.statfiles
67 statfiles = platform.statfiles
68 termwidth = platform.termwidth
68 termwidth = platform.termwidth
69 testpid = platform.testpid
69 testpid = platform.testpid
70 umask = platform.umask
70 umask = platform.umask
71 unlink = platform.unlink
71 unlink = platform.unlink
72 unlinkpath = platform.unlinkpath
72 unlinkpath = platform.unlinkpath
73 username = platform.username
73 username = platform.username
74
74
75 # Python compatibility
75 # Python compatibility
76
76
77 _notset = object()
77 _notset = object()
78
78
79 def safehasattr(thing, attr):
79 def safehasattr(thing, attr):
80 return getattr(thing, attr, _notset) is not _notset
80 return getattr(thing, attr, _notset) is not _notset
81
81
82 def sha1(s=''):
82 def sha1(s=''):
83 '''
83 '''
84 Low-overhead wrapper around Python's SHA support
84 Low-overhead wrapper around Python's SHA support
85
85
86 >>> f = _fastsha1
86 >>> f = _fastsha1
87 >>> a = sha1()
87 >>> a = sha1()
88 >>> a = f()
88 >>> a = f()
89 >>> a.hexdigest()
89 >>> a.hexdigest()
90 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
90 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
91 '''
91 '''
92
92
93 return _fastsha1(s)
93 return _fastsha1(s)
94
94
95 def _fastsha1(s=''):
95 def _fastsha1(s=''):
96 # This function will import sha1 from hashlib or sha (whichever is
96 # This function will import sha1 from hashlib or sha (whichever is
97 # available) and overwrite itself with it on the first call.
97 # available) and overwrite itself with it on the first call.
98 # Subsequent calls will go directly to the imported function.
98 # Subsequent calls will go directly to the imported function.
99 if sys.version_info >= (2, 5):
99 if sys.version_info >= (2, 5):
100 from hashlib import sha1 as _sha1
100 from hashlib import sha1 as _sha1
101 else:
101 else:
102 from sha import sha as _sha1
102 from sha import sha as _sha1
103 global _fastsha1, sha1
103 global _fastsha1, sha1
104 _fastsha1 = sha1 = _sha1
104 _fastsha1 = sha1 = _sha1
105 return _sha1(s)
105 return _sha1(s)
106
106
107 try:
107 try:
108 buffer = buffer
108 buffer = buffer
109 except NameError:
109 except NameError:
110 if sys.version_info[0] < 3:
110 if sys.version_info[0] < 3:
111 def buffer(sliceable, offset=0):
111 def buffer(sliceable, offset=0):
112 return sliceable[offset:]
112 return sliceable[offset:]
113 else:
113 else:
114 def buffer(sliceable, offset=0):
114 def buffer(sliceable, offset=0):
115 return memoryview(sliceable)[offset:]
115 return memoryview(sliceable)[offset:]
116
116
117 import subprocess
117 import subprocess
118 closefds = os.name == 'posix'
118 closefds = os.name == 'posix'
119
119
120 def popen2(cmd, env=None, newlines=False):
120 def popen2(cmd, env=None, newlines=False):
121 # Setting bufsize to -1 lets the system decide the buffer size.
121 # Setting bufsize to -1 lets the system decide the buffer size.
122 # The default for bufsize is 0, meaning unbuffered. This leads to
122 # The default for bufsize is 0, meaning unbuffered. This leads to
123 # poor performance on Mac OS X: http://bugs.python.org/issue4194
123 # poor performance on Mac OS X: http://bugs.python.org/issue4194
124 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
124 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
125 close_fds=closefds,
125 close_fds=closefds,
126 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
126 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
127 universal_newlines=newlines,
127 universal_newlines=newlines,
128 env=env)
128 env=env)
129 return p.stdin, p.stdout
129 return p.stdin, p.stdout
130
130
131 def popen3(cmd, env=None, newlines=False):
131 def popen3(cmd, env=None, newlines=False):
132 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
132 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
133 close_fds=closefds,
133 close_fds=closefds,
134 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
134 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
135 stderr=subprocess.PIPE,
135 stderr=subprocess.PIPE,
136 universal_newlines=newlines,
136 universal_newlines=newlines,
137 env=env)
137 env=env)
138 return p.stdin, p.stdout, p.stderr
138 return p.stdin, p.stdout, p.stderr
139
139
140 def version():
140 def version():
141 """Return version information if available."""
141 """Return version information if available."""
142 try:
142 try:
143 import __version__
143 import __version__
144 return __version__.version
144 return __version__.version
145 except ImportError:
145 except ImportError:
146 return 'unknown'
146 return 'unknown'
147
147
148 # used by parsedate
148 # used by parsedate
149 defaultdateformats = (
149 defaultdateformats = (
150 '%Y-%m-%d %H:%M:%S',
150 '%Y-%m-%d %H:%M:%S',
151 '%Y-%m-%d %I:%M:%S%p',
151 '%Y-%m-%d %I:%M:%S%p',
152 '%Y-%m-%d %H:%M',
152 '%Y-%m-%d %H:%M',
153 '%Y-%m-%d %I:%M%p',
153 '%Y-%m-%d %I:%M%p',
154 '%Y-%m-%d',
154 '%Y-%m-%d',
155 '%m-%d',
155 '%m-%d',
156 '%m/%d',
156 '%m/%d',
157 '%m/%d/%y',
157 '%m/%d/%y',
158 '%m/%d/%Y',
158 '%m/%d/%Y',
159 '%a %b %d %H:%M:%S %Y',
159 '%a %b %d %H:%M:%S %Y',
160 '%a %b %d %I:%M:%S%p %Y',
160 '%a %b %d %I:%M:%S%p %Y',
161 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
161 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
162 '%b %d %H:%M:%S %Y',
162 '%b %d %H:%M:%S %Y',
163 '%b %d %I:%M:%S%p %Y',
163 '%b %d %I:%M:%S%p %Y',
164 '%b %d %H:%M:%S',
164 '%b %d %H:%M:%S',
165 '%b %d %I:%M:%S%p',
165 '%b %d %I:%M:%S%p',
166 '%b %d %H:%M',
166 '%b %d %H:%M',
167 '%b %d %I:%M%p',
167 '%b %d %I:%M%p',
168 '%b %d %Y',
168 '%b %d %Y',
169 '%b %d',
169 '%b %d',
170 '%H:%M:%S',
170 '%H:%M:%S',
171 '%I:%M:%S%p',
171 '%I:%M:%S%p',
172 '%H:%M',
172 '%H:%M',
173 '%I:%M%p',
173 '%I:%M%p',
174 )
174 )
175
175
176 extendeddateformats = defaultdateformats + (
176 extendeddateformats = defaultdateformats + (
177 "%Y",
177 "%Y",
178 "%Y-%m",
178 "%Y-%m",
179 "%b",
179 "%b",
180 "%b %Y",
180 "%b %Y",
181 )
181 )
182
182
183 def cachefunc(func):
183 def cachefunc(func):
184 '''cache the result of function calls'''
184 '''cache the result of function calls'''
185 # XXX doesn't handle keywords args
185 # XXX doesn't handle keywords args
186 cache = {}
186 cache = {}
187 if func.func_code.co_argcount == 1:
187 if func.func_code.co_argcount == 1:
188 # we gain a small amount of time because
188 # we gain a small amount of time because
189 # we don't need to pack/unpack the list
189 # we don't need to pack/unpack the list
190 def f(arg):
190 def f(arg):
191 if arg not in cache:
191 if arg not in cache:
192 cache[arg] = func(arg)
192 cache[arg] = func(arg)
193 return cache[arg]
193 return cache[arg]
194 else:
194 else:
195 def f(*args):
195 def f(*args):
196 if args not in cache:
196 if args not in cache:
197 cache[args] = func(*args)
197 cache[args] = func(*args)
198 return cache[args]
198 return cache[args]
199
199
200 return f
200 return f
201
201
202 try:
202 try:
203 collections.deque.remove
203 collections.deque.remove
204 deque = collections.deque
204 deque = collections.deque
205 except AttributeError:
205 except AttributeError:
206 # python 2.4 lacks deque.remove
206 # python 2.4 lacks deque.remove
207 class deque(collections.deque):
207 class deque(collections.deque):
208 def remove(self, val):
208 def remove(self, val):
209 for i, v in enumerate(self):
209 for i, v in enumerate(self):
210 if v == val:
210 if v == val:
211 del self[i]
211 del self[i]
212 break
212 break
213
213
214 def lrucachefunc(func):
214 def lrucachefunc(func):
215 '''cache most recent results of function calls'''
215 '''cache most recent results of function calls'''
216 cache = {}
216 cache = {}
217 order = deque()
217 order = deque()
218 if func.func_code.co_argcount == 1:
218 if func.func_code.co_argcount == 1:
219 def f(arg):
219 def f(arg):
220 if arg not in cache:
220 if arg not in cache:
221 if len(cache) > 20:
221 if len(cache) > 20:
222 del cache[order.popleft()]
222 del cache[order.popleft()]
223 cache[arg] = func(arg)
223 cache[arg] = func(arg)
224 else:
224 else:
225 order.remove(arg)
225 order.remove(arg)
226 order.append(arg)
226 order.append(arg)
227 return cache[arg]
227 return cache[arg]
228 else:
228 else:
229 def f(*args):
229 def f(*args):
230 if args not in cache:
230 if args not in cache:
231 if len(cache) > 20:
231 if len(cache) > 20:
232 del cache[order.popleft()]
232 del cache[order.popleft()]
233 cache[args] = func(*args)
233 cache[args] = func(*args)
234 else:
234 else:
235 order.remove(args)
235 order.remove(args)
236 order.append(args)
236 order.append(args)
237 return cache[args]
237 return cache[args]
238
238
239 return f
239 return f
240
240
241 class propertycache(object):
241 class propertycache(object):
242 def __init__(self, func):
242 def __init__(self, func):
243 self.func = func
243 self.func = func
244 self.name = func.__name__
244 self.name = func.__name__
245 def __get__(self, obj, type=None):
245 def __get__(self, obj, type=None):
246 result = self.func(obj)
246 result = self.func(obj)
247 setattr(obj, self.name, result)
247 setattr(obj, self.name, result)
248 return result
248 return result
249
249
250 def pipefilter(s, cmd):
250 def pipefilter(s, cmd):
251 '''filter string S through command CMD, returning its output'''
251 '''filter string S through command CMD, returning its output'''
252 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
252 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
253 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
253 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
254 pout, perr = p.communicate(s)
254 pout, perr = p.communicate(s)
255 return pout
255 return pout
256
256
257 def tempfilter(s, cmd):
257 def tempfilter(s, cmd):
258 '''filter string S through a pair of temporary files with CMD.
258 '''filter string S through a pair of temporary files with CMD.
259 CMD is used as a template to create the real command to be run,
259 CMD is used as a template to create the real command to be run,
260 with the strings INFILE and OUTFILE replaced by the real names of
260 with the strings INFILE and OUTFILE replaced by the real names of
261 the temporary files generated.'''
261 the temporary files generated.'''
262 inname, outname = None, None
262 inname, outname = None, None
263 try:
263 try:
264 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
264 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
265 fp = os.fdopen(infd, 'wb')
265 fp = os.fdopen(infd, 'wb')
266 fp.write(s)
266 fp.write(s)
267 fp.close()
267 fp.close()
268 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
268 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
269 os.close(outfd)
269 os.close(outfd)
270 cmd = cmd.replace('INFILE', inname)
270 cmd = cmd.replace('INFILE', inname)
271 cmd = cmd.replace('OUTFILE', outname)
271 cmd = cmd.replace('OUTFILE', outname)
272 code = os.system(cmd)
272 code = os.system(cmd)
273 if sys.platform == 'OpenVMS' and code & 1:
273 if sys.platform == 'OpenVMS' and code & 1:
274 code = 0
274 code = 0
275 if code:
275 if code:
276 raise Abort(_("command '%s' failed: %s") %
276 raise Abort(_("command '%s' failed: %s") %
277 (cmd, explainexit(code)))
277 (cmd, explainexit(code)))
278 fp = open(outname, 'rb')
278 fp = open(outname, 'rb')
279 r = fp.read()
279 r = fp.read()
280 fp.close()
280 fp.close()
281 return r
281 return r
282 finally:
282 finally:
283 try:
283 try:
284 if inname:
284 if inname:
285 os.unlink(inname)
285 os.unlink(inname)
286 except OSError:
286 except OSError:
287 pass
287 pass
288 try:
288 try:
289 if outname:
289 if outname:
290 os.unlink(outname)
290 os.unlink(outname)
291 except OSError:
291 except OSError:
292 pass
292 pass
293
293
294 filtertable = {
294 filtertable = {
295 'tempfile:': tempfilter,
295 'tempfile:': tempfilter,
296 'pipe:': pipefilter,
296 'pipe:': pipefilter,
297 }
297 }
298
298
299 def filter(s, cmd):
299 def filter(s, cmd):
300 "filter a string through a command that transforms its input to its output"
300 "filter a string through a command that transforms its input to its output"
301 for name, fn in filtertable.iteritems():
301 for name, fn in filtertable.iteritems():
302 if cmd.startswith(name):
302 if cmd.startswith(name):
303 return fn(s, cmd[len(name):].lstrip())
303 return fn(s, cmd[len(name):].lstrip())
304 return pipefilter(s, cmd)
304 return pipefilter(s, cmd)
305
305
306 def binary(s):
306 def binary(s):
307 """return true if a string is binary data"""
307 """return true if a string is binary data"""
308 return bool(s and '\0' in s)
308 return bool(s and '\0' in s)
309
309
310 def increasingchunks(source, min=1024, max=65536):
310 def increasingchunks(source, min=1024, max=65536):
311 '''return no less than min bytes per chunk while data remains,
311 '''return no less than min bytes per chunk while data remains,
312 doubling min after each chunk until it reaches max'''
312 doubling min after each chunk until it reaches max'''
313 def log2(x):
313 def log2(x):
314 if not x:
314 if not x:
315 return 0
315 return 0
316 i = 0
316 i = 0
317 while x:
317 while x:
318 x >>= 1
318 x >>= 1
319 i += 1
319 i += 1
320 return i - 1
320 return i - 1
321
321
322 buf = []
322 buf = []
323 blen = 0
323 blen = 0
324 for chunk in source:
324 for chunk in source:
325 buf.append(chunk)
325 buf.append(chunk)
326 blen += len(chunk)
326 blen += len(chunk)
327 if blen >= min:
327 if blen >= min:
328 if min < max:
328 if min < max:
329 min = min << 1
329 min = min << 1
330 nmin = 1 << log2(blen)
330 nmin = 1 << log2(blen)
331 if nmin > min:
331 if nmin > min:
332 min = nmin
332 min = nmin
333 if min > max:
333 if min > max:
334 min = max
334 min = max
335 yield ''.join(buf)
335 yield ''.join(buf)
336 blen = 0
336 blen = 0
337 buf = []
337 buf = []
338 if buf:
338 if buf:
339 yield ''.join(buf)
339 yield ''.join(buf)
340
340
341 Abort = error.Abort
341 Abort = error.Abort
342
342
343 def always(fn):
343 def always(fn):
344 return True
344 return True
345
345
346 def never(fn):
346 def never(fn):
347 return False
347 return False
348
348
349 def pathto(root, n1, n2):
349 def pathto(root, n1, n2):
350 '''return the relative path from one place to another.
350 '''return the relative path from one place to another.
351 root should use os.sep to separate directories
351 root should use os.sep to separate directories
352 n1 should use os.sep to separate directories
352 n1 should use os.sep to separate directories
353 n2 should use "/" to separate directories
353 n2 should use "/" to separate directories
354 returns an os.sep-separated path.
354 returns an os.sep-separated path.
355
355
356 If n1 is a relative path, it's assumed it's
356 If n1 is a relative path, it's assumed it's
357 relative to root.
357 relative to root.
358 n2 should always be relative to root.
358 n2 should always be relative to root.
359 '''
359 '''
360 if not n1:
360 if not n1:
361 return localpath(n2)
361 return localpath(n2)
362 if os.path.isabs(n1):
362 if os.path.isabs(n1):
363 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
363 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
364 return os.path.join(root, localpath(n2))
364 return os.path.join(root, localpath(n2))
365 n2 = '/'.join((pconvert(root), n2))
365 n2 = '/'.join((pconvert(root), n2))
366 a, b = splitpath(n1), n2.split('/')
366 a, b = splitpath(n1), n2.split('/')
367 a.reverse()
367 a.reverse()
368 b.reverse()
368 b.reverse()
369 while a and b and a[-1] == b[-1]:
369 while a and b and a[-1] == b[-1]:
370 a.pop()
370 a.pop()
371 b.pop()
371 b.pop()
372 b.reverse()
372 b.reverse()
373 return os.sep.join((['..'] * len(a)) + b) or '.'
373 return os.sep.join((['..'] * len(a)) + b) or '.'
374
374
375 _hgexecutable = None
375 _hgexecutable = None
376
376
377 def mainfrozen():
377 def mainfrozen():
378 """return True if we are a frozen executable.
378 """return True if we are a frozen executable.
379
379
380 The code supports py2exe (most common, Windows only) and tools/freeze
380 The code supports py2exe (most common, Windows only) and tools/freeze
381 (portable, not much used).
381 (portable, not much used).
382 """
382 """
383 return (safehasattr(sys, "frozen") or # new py2exe
383 return (safehasattr(sys, "frozen") or # new py2exe
384 safehasattr(sys, "importers") or # old py2exe
384 safehasattr(sys, "importers") or # old py2exe
385 imp.is_frozen("__main__")) # tools/freeze
385 imp.is_frozen("__main__")) # tools/freeze
386
386
387 def hgexecutable():
387 def hgexecutable():
388 """return location of the 'hg' executable.
388 """return location of the 'hg' executable.
389
389
390 Defaults to $HG or 'hg' in the search path.
390 Defaults to $HG or 'hg' in the search path.
391 """
391 """
392 if _hgexecutable is None:
392 if _hgexecutable is None:
393 hg = os.environ.get('HG')
393 hg = os.environ.get('HG')
394 mainmod = sys.modules['__main__']
394 mainmod = sys.modules['__main__']
395 if hg:
395 if hg:
396 _sethgexecutable(hg)
396 _sethgexecutable(hg)
397 elif mainfrozen():
397 elif mainfrozen():
398 _sethgexecutable(sys.executable)
398 _sethgexecutable(sys.executable)
399 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
399 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
400 _sethgexecutable(mainmod.__file__)
400 _sethgexecutable(mainmod.__file__)
401 else:
401 else:
402 exe = findexe('hg') or os.path.basename(sys.argv[0])
402 exe = findexe('hg') or os.path.basename(sys.argv[0])
403 _sethgexecutable(exe)
403 _sethgexecutable(exe)
404 return _hgexecutable
404 return _hgexecutable
405
405
406 def _sethgexecutable(path):
406 def _sethgexecutable(path):
407 """set location of the 'hg' executable"""
407 """set location of the 'hg' executable"""
408 global _hgexecutable
408 global _hgexecutable
409 _hgexecutable = path
409 _hgexecutable = path
410
410
411 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
411 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
412 '''enhanced shell command execution.
412 '''enhanced shell command execution.
413 run with environment maybe modified, maybe in different dir.
413 run with environment maybe modified, maybe in different dir.
414
414
415 if command fails and onerr is None, return status. if ui object,
415 if command fails and onerr is None, return status. if ui object,
416 print error message and return status, else raise onerr object as
416 print error message and return status, else raise onerr object as
417 exception.
417 exception.
418
418
419 if out is specified, it is assumed to be a file-like object that has a
419 if out is specified, it is assumed to be a file-like object that has a
420 write() method. stdout and stderr will be redirected to out.'''
420 write() method. stdout and stderr will be redirected to out.'''
421 try:
421 try:
422 sys.stdout.flush()
422 sys.stdout.flush()
423 except Exception:
423 except Exception:
424 pass
424 pass
425 def py2shell(val):
425 def py2shell(val):
426 'convert python object into string that is useful to shell'
426 'convert python object into string that is useful to shell'
427 if val is None or val is False:
427 if val is None or val is False:
428 return '0'
428 return '0'
429 if val is True:
429 if val is True:
430 return '1'
430 return '1'
431 return str(val)
431 return str(val)
432 origcmd = cmd
432 origcmd = cmd
433 cmd = quotecommand(cmd)
433 cmd = quotecommand(cmd)
434 if sys.platform == 'plan9':
434 if sys.platform == 'plan9':
435 # subprocess kludge to work around issues in half-baked Python
435 # subprocess kludge to work around issues in half-baked Python
436 # ports, notably bichued/python:
436 # ports, notably bichued/python:
437 if not cwd is None:
437 if not cwd is None:
438 os.chdir(cwd)
438 os.chdir(cwd)
439 rc = os.system(cmd)
439 rc = os.system(cmd)
440 else:
440 else:
441 env = dict(os.environ)
441 env = dict(os.environ)
442 env.update((k, py2shell(v)) for k, v in environ.iteritems())
442 env.update((k, py2shell(v)) for k, v in environ.iteritems())
443 env['HG'] = hgexecutable()
443 env['HG'] = hgexecutable()
444 if out is None or out == sys.__stdout__:
444 if out is None or out == sys.__stdout__:
445 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
445 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
446 env=env, cwd=cwd)
446 env=env, cwd=cwd)
447 else:
447 else:
448 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
448 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
449 env=env, cwd=cwd, stdout=subprocess.PIPE,
449 env=env, cwd=cwd, stdout=subprocess.PIPE,
450 stderr=subprocess.STDOUT)
450 stderr=subprocess.STDOUT)
451 for line in proc.stdout:
451 for line in proc.stdout:
452 out.write(line)
452 out.write(line)
453 proc.wait()
453 proc.wait()
454 rc = proc.returncode
454 rc = proc.returncode
455 if sys.platform == 'OpenVMS' and rc & 1:
455 if sys.platform == 'OpenVMS' and rc & 1:
456 rc = 0
456 rc = 0
457 if rc and onerr:
457 if rc and onerr:
458 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
458 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
459 explainexit(rc)[0])
459 explainexit(rc)[0])
460 if errprefix:
460 if errprefix:
461 errmsg = '%s: %s' % (errprefix, errmsg)
461 errmsg = '%s: %s' % (errprefix, errmsg)
462 try:
462 try:
463 onerr.warn(errmsg + '\n')
463 onerr.warn(errmsg + '\n')
464 except AttributeError:
464 except AttributeError:
465 raise onerr(errmsg)
465 raise onerr(errmsg)
466 return rc
466 return rc
467
467
468 def checksignature(func):
468 def checksignature(func):
469 '''wrap a function with code to check for calling errors'''
469 '''wrap a function with code to check for calling errors'''
470 def check(*args, **kwargs):
470 def check(*args, **kwargs):
471 try:
471 try:
472 return func(*args, **kwargs)
472 return func(*args, **kwargs)
473 except TypeError:
473 except TypeError:
474 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
474 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
475 raise error.SignatureError
475 raise error.SignatureError
476 raise
476 raise
477
477
478 return check
478 return check
479
479
480 def copyfile(src, dest):
480 def copyfile(src, dest):
481 "copy a file, preserving mode and atime/mtime"
481 "copy a file, preserving mode and atime/mtime"
482 if os.path.islink(src):
482 if os.path.islink(src):
483 try:
483 try:
484 os.unlink(dest)
484 os.unlink(dest)
485 except OSError:
485 except OSError:
486 pass
486 pass
487 os.symlink(os.readlink(src), dest)
487 os.symlink(os.readlink(src), dest)
488 else:
488 else:
489 try:
489 try:
490 shutil.copyfile(src, dest)
490 shutil.copyfile(src, dest)
491 shutil.copymode(src, dest)
491 shutil.copymode(src, dest)
492 except shutil.Error, inst:
492 except shutil.Error, inst:
493 raise Abort(str(inst))
493 raise Abort(str(inst))
494
494
495 def copyfiles(src, dst, hardlink=None):
495 def copyfiles(src, dst, hardlink=None):
496 """Copy a directory tree using hardlinks if possible"""
496 """Copy a directory tree using hardlinks if possible"""
497
497
498 if hardlink is None:
498 if hardlink is None:
499 hardlink = (os.stat(src).st_dev ==
499 hardlink = (os.stat(src).st_dev ==
500 os.stat(os.path.dirname(dst)).st_dev)
500 os.stat(os.path.dirname(dst)).st_dev)
501
501
502 num = 0
502 num = 0
503 if os.path.isdir(src):
503 if os.path.isdir(src):
504 os.mkdir(dst)
504 os.mkdir(dst)
505 for name, kind in osutil.listdir(src):
505 for name, kind in osutil.listdir(src):
506 srcname = os.path.join(src, name)
506 srcname = os.path.join(src, name)
507 dstname = os.path.join(dst, name)
507 dstname = os.path.join(dst, name)
508 hardlink, n = copyfiles(srcname, dstname, hardlink)
508 hardlink, n = copyfiles(srcname, dstname, hardlink)
509 num += n
509 num += n
510 else:
510 else:
511 if hardlink:
511 if hardlink:
512 try:
512 try:
513 oslink(src, dst)
513 oslink(src, dst)
514 except (IOError, OSError):
514 except (IOError, OSError):
515 hardlink = False
515 hardlink = False
516 shutil.copy(src, dst)
516 shutil.copy(src, dst)
517 else:
517 else:
518 shutil.copy(src, dst)
518 shutil.copy(src, dst)
519 num += 1
519 num += 1
520
520
521 return hardlink, num
521 return hardlink, num
522
522
523 _winreservednames = '''con prn aux nul
523 _winreservednames = '''con prn aux nul
524 com1 com2 com3 com4 com5 com6 com7 com8 com9
524 com1 com2 com3 com4 com5 com6 com7 com8 com9
525 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
525 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
526 _winreservedchars = ':*?"<>|'
526 _winreservedchars = ':*?"<>|'
527 def checkwinfilename(path):
527 def checkwinfilename(path):
528 '''Check that the base-relative path is a valid filename on Windows.
528 '''Check that the base-relative path is a valid filename on Windows.
529 Returns None if the path is ok, or a UI string describing the problem.
529 Returns None if the path is ok, or a UI string describing the problem.
530
530
531 >>> checkwinfilename("just/a/normal/path")
531 >>> checkwinfilename("just/a/normal/path")
532 >>> checkwinfilename("foo/bar/con.xml")
532 >>> checkwinfilename("foo/bar/con.xml")
533 "filename contains 'con', which is reserved on Windows"
533 "filename contains 'con', which is reserved on Windows"
534 >>> checkwinfilename("foo/con.xml/bar")
534 >>> checkwinfilename("foo/con.xml/bar")
535 "filename contains 'con', which is reserved on Windows"
535 "filename contains 'con', which is reserved on Windows"
536 >>> checkwinfilename("foo/bar/xml.con")
536 >>> checkwinfilename("foo/bar/xml.con")
537 >>> checkwinfilename("foo/bar/AUX/bla.txt")
537 >>> checkwinfilename("foo/bar/AUX/bla.txt")
538 "filename contains 'AUX', which is reserved on Windows"
538 "filename contains 'AUX', which is reserved on Windows"
539 >>> checkwinfilename("foo/bar/bla:.txt")
539 >>> checkwinfilename("foo/bar/bla:.txt")
540 "filename contains ':', which is reserved on Windows"
540 "filename contains ':', which is reserved on Windows"
541 >>> checkwinfilename("foo/bar/b\07la.txt")
541 >>> checkwinfilename("foo/bar/b\07la.txt")
542 "filename contains '\\\\x07', which is invalid on Windows"
542 "filename contains '\\\\x07', which is invalid on Windows"
543 >>> checkwinfilename("foo/bar/bla ")
543 >>> checkwinfilename("foo/bar/bla ")
544 "filename ends with ' ', which is not allowed on Windows"
544 "filename ends with ' ', which is not allowed on Windows"
545 >>> checkwinfilename("../bar")
545 >>> checkwinfilename("../bar")
546 '''
546 '''
547 for n in path.replace('\\', '/').split('/'):
547 for n in path.replace('\\', '/').split('/'):
548 if not n:
548 if not n:
549 continue
549 continue
550 for c in n:
550 for c in n:
551 if c in _winreservedchars:
551 if c in _winreservedchars:
552 return _("filename contains '%s', which is reserved "
552 return _("filename contains '%s', which is reserved "
553 "on Windows") % c
553 "on Windows") % c
554 if ord(c) <= 31:
554 if ord(c) <= 31:
555 return _("filename contains %r, which is invalid "
555 return _("filename contains %r, which is invalid "
556 "on Windows") % c
556 "on Windows") % c
557 base = n.split('.')[0]
557 base = n.split('.')[0]
558 if base and base.lower() in _winreservednames:
558 if base and base.lower() in _winreservednames:
559 return _("filename contains '%s', which is reserved "
559 return _("filename contains '%s', which is reserved "
560 "on Windows") % base
560 "on Windows") % base
561 t = n[-1]
561 t = n[-1]
562 if t in '. ' and n not in '..':
562 if t in '. ' and n not in '..':
563 return _("filename ends with '%s', which is not allowed "
563 return _("filename ends with '%s', which is not allowed "
564 "on Windows") % t
564 "on Windows") % t
565
565
566 if os.name == 'nt':
566 if os.name == 'nt':
567 checkosfilename = checkwinfilename
567 checkosfilename = checkwinfilename
568 else:
568 else:
569 checkosfilename = platform.checkosfilename
569 checkosfilename = platform.checkosfilename
570
570
571 def makelock(info, pathname):
571 def makelock(info, pathname):
572 try:
572 try:
573 return os.symlink(info, pathname)
573 return os.symlink(info, pathname)
574 except OSError, why:
574 except OSError, why:
575 if why.errno == errno.EEXIST:
575 if why.errno == errno.EEXIST:
576 raise
576 raise
577 except AttributeError: # no symlink in os
577 except AttributeError: # no symlink in os
578 pass
578 pass
579
579
580 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
580 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
581 os.write(ld, info)
581 os.write(ld, info)
582 os.close(ld)
582 os.close(ld)
583
583
584 def readlock(pathname):
584 def readlock(pathname):
585 try:
585 try:
586 return os.readlink(pathname)
586 return os.readlink(pathname)
587 except OSError, why:
587 except OSError, why:
588 if why.errno not in (errno.EINVAL, errno.ENOSYS):
588 if why.errno not in (errno.EINVAL, errno.ENOSYS):
589 raise
589 raise
590 except AttributeError: # no symlink in os
590 except AttributeError: # no symlink in os
591 pass
591 pass
592 fp = posixfile(pathname)
592 fp = posixfile(pathname)
593 r = fp.read()
593 r = fp.read()
594 fp.close()
594 fp.close()
595 return r
595 return r
596
596
597 def fstat(fp):
597 def fstat(fp):
598 '''stat file object that may not have fileno method.'''
598 '''stat file object that may not have fileno method.'''
599 try:
599 try:
600 return os.fstat(fp.fileno())
600 return os.fstat(fp.fileno())
601 except AttributeError:
601 except AttributeError:
602 return os.stat(fp.name)
602 return os.stat(fp.name)
603
603
604 # File system features
604 # File system features
605
605
606 def checkcase(path):
606 def checkcase(path):
607 """
607 """
608 Check whether the given path is on a case-sensitive filesystem
608 Check whether the given path is on a case-sensitive filesystem
609
609
610 Requires a path (like /foo/.hg) ending with a foldable final
610 Requires a path (like /foo/.hg) ending with a foldable final
611 directory component.
611 directory component.
612 """
612 """
613 s1 = os.stat(path)
613 s1 = os.stat(path)
614 d, b = os.path.split(path)
614 d, b = os.path.split(path)
615 b2 = b.upper()
615 b2 = b.upper()
616 if b == b2:
616 if b == b2:
617 b2 = b.lower()
617 b2 = b.lower()
618 if b == b2:
618 if b == b2:
619 return True # no evidence against case sensitivity
619 return True # no evidence against case sensitivity
620 p2 = os.path.join(d, b2)
620 p2 = os.path.join(d, b2)
621 try:
621 try:
622 s2 = os.stat(p2)
622 s2 = os.stat(p2)
623 if s2 == s1:
623 if s2 == s1:
624 return False
624 return False
625 return True
625 return True
626 except OSError:
626 except OSError:
627 return True
627 return True
628
628
629 try:
629 try:
630 import re2
630 import re2
631 _re2 = None
631 _re2 = None
632 except ImportError:
632 except ImportError:
633 _re2 = False
633 _re2 = False
634
634
635 def compilere(pat):
635 def compilere(pat):
636 '''Compile a regular expression, using re2 if possible
636 '''Compile a regular expression, using re2 if possible
637
637
638 For best performance, use only re2-compatible regexp features.'''
638 For best performance, use only re2-compatible regexp features.'''
639 global _re2
639 global _re2
640 if _re2 is None:
640 if _re2 is None:
641 try:
641 try:
642 re2.compile
642 re2.compile
643 _re2 = True
643 _re2 = True
644 except ImportError:
644 except ImportError:
645 _re2 = False
645 _re2 = False
646 if _re2:
646 if _re2:
647 try:
647 try:
648 return re2.compile(pat)
648 return re2.compile(pat)
649 except re2.error:
649 except re2.error:
650 pass
650 pass
651 return re.compile(pat)
651 return re.compile(pat)
652
652
653 _fspathcache = {}
653 _fspathcache = {}
654 def fspath(name, root):
654 def fspath(name, root):
655 '''Get name in the case stored in the filesystem
655 '''Get name in the case stored in the filesystem
656
656
657 The name should be relative to root, and be normcase-ed for efficiency.
657 The name should be relative to root, and be normcase-ed for efficiency.
658
658
659 Note that this function is unnecessary, and should not be
659 Note that this function is unnecessary, and should not be
660 called, for case-sensitive filesystems (simply because it's expensive).
660 called, for case-sensitive filesystems (simply because it's expensive).
661
661
662 The root should be normcase-ed, too.
662 The root should be normcase-ed, too.
663 '''
663 '''
664 def find(p, contents):
664 def find(p, contents):
665 for n in contents:
665 for n in contents:
666 if normcase(n) == p:
666 if normcase(n) == p:
667 return n
667 return n
668 return None
668 return None
669
669
670 seps = os.sep
670 seps = os.sep
671 if os.altsep:
671 if os.altsep:
672 seps = seps + os.altsep
672 seps = seps + os.altsep
673 # Protect backslashes. This gets silly very quickly.
673 # Protect backslashes. This gets silly very quickly.
674 seps.replace('\\','\\\\')
674 seps.replace('\\','\\\\')
675 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
675 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
676 dir = os.path.normpath(root)
676 dir = os.path.normpath(root)
677 result = []
677 result = []
678 for part, sep in pattern.findall(name):
678 for part, sep in pattern.findall(name):
679 if sep:
679 if sep:
680 result.append(sep)
680 result.append(sep)
681 continue
681 continue
682
682
683 if dir not in _fspathcache:
683 if dir not in _fspathcache:
684 _fspathcache[dir] = os.listdir(dir)
684 _fspathcache[dir] = os.listdir(dir)
685 contents = _fspathcache[dir]
685 contents = _fspathcache[dir]
686
686
687 found = find(part, contents)
687 found = find(part, contents)
688 if not found:
688 if not found:
689 # retry "once per directory" per "dirstate.walk" which
689 # retry "once per directory" per "dirstate.walk" which
690 # may take place for each patches of "hg qpush", for example
690 # may take place for each patches of "hg qpush", for example
691 contents = os.listdir(dir)
691 contents = os.listdir(dir)
692 _fspathcache[dir] = contents
692 _fspathcache[dir] = contents
693 found = find(part, contents)
693 found = find(part, contents)
694
694
695 result.append(found or part)
695 result.append(found or part)
696 dir = os.path.join(dir, part)
696 dir = os.path.join(dir, part)
697
697
698 return ''.join(result)
698 return ''.join(result)
699
699
700 def checknlink(testfile):
700 def checknlink(testfile):
701 '''check whether hardlink count reporting works properly'''
701 '''check whether hardlink count reporting works properly'''
702
702
703 # testfile may be open, so we need a separate file for checking to
703 # testfile may be open, so we need a separate file for checking to
704 # work around issue2543 (or testfile may get lost on Samba shares)
704 # work around issue2543 (or testfile may get lost on Samba shares)
705 f1 = testfile + ".hgtmp1"
705 f1 = testfile + ".hgtmp1"
706 if os.path.lexists(f1):
706 if os.path.lexists(f1):
707 return False
707 return False
708 try:
708 try:
709 posixfile(f1, 'w').close()
709 posixfile(f1, 'w').close()
710 except IOError:
710 except IOError:
711 return False
711 return False
712
712
713 f2 = testfile + ".hgtmp2"
713 f2 = testfile + ".hgtmp2"
714 fd = None
714 fd = None
715 try:
715 try:
716 try:
716 try:
717 oslink(f1, f2)
717 oslink(f1, f2)
718 except OSError:
718 except OSError:
719 return False
719 return False
720
720
721 # nlinks() may behave differently for files on Windows shares if
721 # nlinks() may behave differently for files on Windows shares if
722 # the file is open.
722 # the file is open.
723 fd = posixfile(f2)
723 fd = posixfile(f2)
724 return nlinks(f2) > 1
724 return nlinks(f2) > 1
725 finally:
725 finally:
726 if fd is not None:
726 if fd is not None:
727 fd.close()
727 fd.close()
728 for f in (f1, f2):
728 for f in (f1, f2):
729 try:
729 try:
730 os.unlink(f)
730 os.unlink(f)
731 except OSError:
731 except OSError:
732 pass
732 pass
733
733
734 return False
734 return False
735
735
736 def endswithsep(path):
736 def endswithsep(path):
737 '''Check path ends with os.sep or os.altsep.'''
737 '''Check path ends with os.sep or os.altsep.'''
738 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
738 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
739
739
740 def splitpath(path):
740 def splitpath(path):
741 '''Split path by os.sep.
741 '''Split path by os.sep.
742 Note that this function does not use os.altsep because this is
742 Note that this function does not use os.altsep because this is
743 an alternative of simple "xxx.split(os.sep)".
743 an alternative of simple "xxx.split(os.sep)".
744 It is recommended to use os.path.normpath() before using this
744 It is recommended to use os.path.normpath() before using this
745 function if need.'''
745 function if need.'''
746 return path.split(os.sep)
746 return path.split(os.sep)
747
747
748 def gui():
748 def gui():
749 '''Are we running in a GUI?'''
749 '''Are we running in a GUI?'''
750 if sys.platform == 'darwin':
750 if sys.platform == 'darwin':
751 if 'SSH_CONNECTION' in os.environ:
751 if 'SSH_CONNECTION' in os.environ:
752 # handle SSH access to a box where the user is logged in
752 # handle SSH access to a box where the user is logged in
753 return False
753 return False
754 elif getattr(osutil, 'isgui', None):
754 elif getattr(osutil, 'isgui', None):
755 # check if a CoreGraphics session is available
755 # check if a CoreGraphics session is available
756 return osutil.isgui()
756 return osutil.isgui()
757 else:
757 else:
758 # pure build; use a safe default
758 # pure build; use a safe default
759 return True
759 return True
760 else:
760 else:
761 return os.name == "nt" or os.environ.get("DISPLAY")
761 return os.name == "nt" or os.environ.get("DISPLAY")
762
762
763 def mktempcopy(name, emptyok=False, createmode=None):
763 def mktempcopy(name, emptyok=False, createmode=None):
764 """Create a temporary file with the same contents from name
764 """Create a temporary file with the same contents from name
765
765
766 The permission bits are copied from the original file.
766 The permission bits are copied from the original file.
767
767
768 If the temporary file is going to be truncated immediately, you
768 If the temporary file is going to be truncated immediately, you
769 can use emptyok=True as an optimization.
769 can use emptyok=True as an optimization.
770
770
771 Returns the name of the temporary file.
771 Returns the name of the temporary file.
772 """
772 """
773 d, fn = os.path.split(name)
773 d, fn = os.path.split(name)
774 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
774 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
775 os.close(fd)
775 os.close(fd)
776 # Temporary files are created with mode 0600, which is usually not
776 # Temporary files are created with mode 0600, which is usually not
777 # what we want. If the original file already exists, just copy
777 # what we want. If the original file already exists, just copy
778 # its mode. Otherwise, manually obey umask.
778 # its mode. Otherwise, manually obey umask.
779 copymode(name, temp, createmode)
779 copymode(name, temp, createmode)
780 if emptyok:
780 if emptyok:
781 return temp
781 return temp
782 try:
782 try:
783 try:
783 try:
784 ifp = posixfile(name, "rb")
784 ifp = posixfile(name, "rb")
785 except IOError, inst:
785 except IOError, inst:
786 if inst.errno == errno.ENOENT:
786 if inst.errno == errno.ENOENT:
787 return temp
787 return temp
788 if not getattr(inst, 'filename', None):
788 if not getattr(inst, 'filename', None):
789 inst.filename = name
789 inst.filename = name
790 raise
790 raise
791 ofp = posixfile(temp, "wb")
791 ofp = posixfile(temp, "wb")
792 for chunk in filechunkiter(ifp):
792 for chunk in filechunkiter(ifp):
793 ofp.write(chunk)
793 ofp.write(chunk)
794 ifp.close()
794 ifp.close()
795 ofp.close()
795 ofp.close()
796 except: # re-raises
796 except: # re-raises
797 try: os.unlink(temp)
797 try: os.unlink(temp)
798 except OSError: pass
798 except OSError: pass
799 raise
799 raise
800 return temp
800 return temp
801
801
802 class atomictempfile(object):
802 class atomictempfile(object):
803 '''writable file object that atomically updates a file
803 '''writable file object that atomically updates a file
804
804
805 All writes will go to a temporary copy of the original file. Call
805 All writes will go to a temporary copy of the original file. Call
806 close() when you are done writing, and atomictempfile will rename
806 close() when you are done writing, and atomictempfile will rename
807 the temporary copy to the original name, making the changes
807 the temporary copy to the original name, making the changes
808 visible. If the object is destroyed without being closed, all your
808 visible. If the object is destroyed without being closed, all your
809 writes are discarded.
809 writes are discarded.
810 '''
810 '''
811 def __init__(self, name, mode='w+b', createmode=None):
811 def __init__(self, name, mode='w+b', createmode=None):
812 self.__name = name # permanent name
812 self.__name = name # permanent name
813 self._tempname = mktempcopy(name, emptyok=('w' in mode),
813 self._tempname = mktempcopy(name, emptyok=('w' in mode),
814 createmode=createmode)
814 createmode=createmode)
815 self._fp = posixfile(self._tempname, mode)
815 self._fp = posixfile(self._tempname, mode)
816
816
817 # delegated methods
817 # delegated methods
818 self.write = self._fp.write
818 self.write = self._fp.write
819 self.seek = self._fp.seek
819 self.seek = self._fp.seek
820 self.tell = self._fp.tell
820 self.tell = self._fp.tell
821 self.fileno = self._fp.fileno
821 self.fileno = self._fp.fileno
822
822
823 def close(self):
823 def close(self):
824 if not self._fp.closed:
824 if not self._fp.closed:
825 self._fp.close()
825 self._fp.close()
826 rename(self._tempname, localpath(self.__name))
826 rename(self._tempname, localpath(self.__name))
827
827
828 def discard(self):
828 def discard(self):
829 if not self._fp.closed:
829 if not self._fp.closed:
830 try:
830 try:
831 os.unlink(self._tempname)
831 os.unlink(self._tempname)
832 except OSError:
832 except OSError:
833 pass
833 pass
834 self._fp.close()
834 self._fp.close()
835
835
836 def __del__(self):
836 def __del__(self):
837 if safehasattr(self, '_fp'): # constructor actually did something
837 if safehasattr(self, '_fp'): # constructor actually did something
838 self.discard()
838 self.discard()
839
839
840 def makedirs(name, mode=None):
840 def makedirs(name, mode=None):
841 """recursive directory creation with parent mode inheritance"""
841 """recursive directory creation with parent mode inheritance"""
842 try:
842 try:
843 os.mkdir(name)
843 os.mkdir(name)
844 except OSError, err:
844 except OSError, err:
845 if err.errno == errno.EEXIST:
845 if err.errno == errno.EEXIST:
846 return
846 return
847 if err.errno != errno.ENOENT or not name:
847 if err.errno != errno.ENOENT or not name:
848 raise
848 raise
849 parent = os.path.dirname(os.path.abspath(name))
849 parent = os.path.dirname(os.path.abspath(name))
850 if parent == name:
850 if parent == name:
851 raise
851 raise
852 makedirs(parent, mode)
852 makedirs(parent, mode)
853 os.mkdir(name)
853 os.mkdir(name)
854 if mode is not None:
854 if mode is not None:
855 os.chmod(name, mode)
855 os.chmod(name, mode)
856
856
857 def readfile(path):
857 def readfile(path):
858 fp = open(path, 'rb')
858 fp = open(path, 'rb')
859 try:
859 try:
860 return fp.read()
860 return fp.read()
861 finally:
861 finally:
862 fp.close()
862 fp.close()
863
863
864 def writefile(path, text):
864 def writefile(path, text):
865 fp = open(path, 'wb')
865 fp = open(path, 'wb')
866 try:
866 try:
867 fp.write(text)
867 fp.write(text)
868 finally:
868 finally:
869 fp.close()
869 fp.close()
870
870
871 def appendfile(path, text):
871 def appendfile(path, text):
872 fp = open(path, 'ab')
872 fp = open(path, 'ab')
873 try:
873 try:
874 fp.write(text)
874 fp.write(text)
875 finally:
875 finally:
876 fp.close()
876 fp.close()
877
877
878 class chunkbuffer(object):
878 class chunkbuffer(object):
879 """Allow arbitrary sized chunks of data to be efficiently read from an
879 """Allow arbitrary sized chunks of data to be efficiently read from an
880 iterator over chunks of arbitrary size."""
880 iterator over chunks of arbitrary size."""
881
881
882 def __init__(self, in_iter):
882 def __init__(self, in_iter):
883 """in_iter is the iterator that's iterating over the input chunks.
883 """in_iter is the iterator that's iterating over the input chunks.
884 targetsize is how big a buffer to try to maintain."""
884 targetsize is how big a buffer to try to maintain."""
885 def splitbig(chunks):
885 def splitbig(chunks):
886 for chunk in chunks:
886 for chunk in chunks:
887 if len(chunk) > 2**20:
887 if len(chunk) > 2**20:
888 pos = 0
888 pos = 0
889 while pos < len(chunk):
889 while pos < len(chunk):
890 end = pos + 2 ** 18
890 end = pos + 2 ** 18
891 yield chunk[pos:end]
891 yield chunk[pos:end]
892 pos = end
892 pos = end
893 else:
893 else:
894 yield chunk
894 yield chunk
895 self.iter = splitbig(in_iter)
895 self.iter = splitbig(in_iter)
896 self._queue = deque()
896 self._queue = deque()
897
897
898 def read(self, l):
898 def read(self, l):
899 """Read L bytes of data from the iterator of chunks of data.
899 """Read L bytes of data from the iterator of chunks of data.
900 Returns less than L bytes if the iterator runs dry."""
900 Returns less than L bytes if the iterator runs dry."""
901 left = l
901 left = l
902 buf = ''
902 buf = []
903 queue = self._queue
903 queue = self._queue
904 while left > 0:
904 while left > 0:
905 # refill the queue
905 # refill the queue
906 if not queue:
906 if not queue:
907 target = 2**18
907 target = 2**18
908 for chunk in self.iter:
908 for chunk in self.iter:
909 queue.append(chunk)
909 queue.append(chunk)
910 target -= len(chunk)
910 target -= len(chunk)
911 if target <= 0:
911 if target <= 0:
912 break
912 break
913 if not queue:
913 if not queue:
914 break
914 break
915
915
916 chunk = queue.popleft()
916 chunk = queue.popleft()
917 left -= len(chunk)
917 left -= len(chunk)
918 if left < 0:
918 if left < 0:
919 queue.appendleft(chunk[left:])
919 queue.appendleft(chunk[left:])
920 buf += chunk[:left]
920 buf.append(chunk[:left])
921 else:
921 else:
922 buf += chunk
922 buf.append(chunk)
923
923
924 return buf
924 return ''.join(buf)
925
925
926 def filechunkiter(f, size=65536, limit=None):
926 def filechunkiter(f, size=65536, limit=None):
927 """Create a generator that produces the data in the file size
927 """Create a generator that produces the data in the file size
928 (default 65536) bytes at a time, up to optional limit (default is
928 (default 65536) bytes at a time, up to optional limit (default is
929 to read all data). Chunks may be less than size bytes if the
929 to read all data). Chunks may be less than size bytes if the
930 chunk is the last chunk in the file, or the file is a socket or
930 chunk is the last chunk in the file, or the file is a socket or
931 some other type of file that sometimes reads less data than is
931 some other type of file that sometimes reads less data than is
932 requested."""
932 requested."""
933 assert size >= 0
933 assert size >= 0
934 assert limit is None or limit >= 0
934 assert limit is None or limit >= 0
935 while True:
935 while True:
936 if limit is None:
936 if limit is None:
937 nbytes = size
937 nbytes = size
938 else:
938 else:
939 nbytes = min(limit, size)
939 nbytes = min(limit, size)
940 s = nbytes and f.read(nbytes)
940 s = nbytes and f.read(nbytes)
941 if not s:
941 if not s:
942 break
942 break
943 if limit:
943 if limit:
944 limit -= len(s)
944 limit -= len(s)
945 yield s
945 yield s
946
946
947 def makedate():
947 def makedate():
948 ct = time.time()
948 ct = time.time()
949 if ct < 0:
949 if ct < 0:
950 hint = _("check your clock")
950 hint = _("check your clock")
951 raise Abort(_("negative timestamp: %d") % ct, hint=hint)
951 raise Abort(_("negative timestamp: %d") % ct, hint=hint)
952 delta = (datetime.datetime.utcfromtimestamp(ct) -
952 delta = (datetime.datetime.utcfromtimestamp(ct) -
953 datetime.datetime.fromtimestamp(ct))
953 datetime.datetime.fromtimestamp(ct))
954 tz = delta.days * 86400 + delta.seconds
954 tz = delta.days * 86400 + delta.seconds
955 return ct, tz
955 return ct, tz
956
956
957 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
957 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
958 """represent a (unixtime, offset) tuple as a localized time.
958 """represent a (unixtime, offset) tuple as a localized time.
959 unixtime is seconds since the epoch, and offset is the time zone's
959 unixtime is seconds since the epoch, and offset is the time zone's
960 number of seconds away from UTC. if timezone is false, do not
960 number of seconds away from UTC. if timezone is false, do not
961 append time zone to string."""
961 append time zone to string."""
962 t, tz = date or makedate()
962 t, tz = date or makedate()
963 if t < 0:
963 if t < 0:
964 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
964 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
965 tz = 0
965 tz = 0
966 if "%1" in format or "%2" in format:
966 if "%1" in format or "%2" in format:
967 sign = (tz > 0) and "-" or "+"
967 sign = (tz > 0) and "-" or "+"
968 minutes = abs(tz) // 60
968 minutes = abs(tz) // 60
969 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
969 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
970 format = format.replace("%2", "%02d" % (minutes % 60))
970 format = format.replace("%2", "%02d" % (minutes % 60))
971 try:
971 try:
972 t = time.gmtime(float(t) - tz)
972 t = time.gmtime(float(t) - tz)
973 except ValueError:
973 except ValueError:
974 # time was out of range
974 # time was out of range
975 t = time.gmtime(sys.maxint)
975 t = time.gmtime(sys.maxint)
976 s = time.strftime(format, t)
976 s = time.strftime(format, t)
977 return s
977 return s
978
978
979 def shortdate(date=None):
979 def shortdate(date=None):
980 """turn (timestamp, tzoff) tuple into iso 8631 date."""
980 """turn (timestamp, tzoff) tuple into iso 8631 date."""
981 return datestr(date, format='%Y-%m-%d')
981 return datestr(date, format='%Y-%m-%d')
982
982
983 def strdate(string, format, defaults=[]):
983 def strdate(string, format, defaults=[]):
984 """parse a localized time string and return a (unixtime, offset) tuple.
984 """parse a localized time string and return a (unixtime, offset) tuple.
985 if the string cannot be parsed, ValueError is raised."""
985 if the string cannot be parsed, ValueError is raised."""
986 def timezone(string):
986 def timezone(string):
987 tz = string.split()[-1]
987 tz = string.split()[-1]
988 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
988 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
989 sign = (tz[0] == "+") and 1 or -1
989 sign = (tz[0] == "+") and 1 or -1
990 hours = int(tz[1:3])
990 hours = int(tz[1:3])
991 minutes = int(tz[3:5])
991 minutes = int(tz[3:5])
992 return -sign * (hours * 60 + minutes) * 60
992 return -sign * (hours * 60 + minutes) * 60
993 if tz == "GMT" or tz == "UTC":
993 if tz == "GMT" or tz == "UTC":
994 return 0
994 return 0
995 return None
995 return None
996
996
997 # NOTE: unixtime = localunixtime + offset
997 # NOTE: unixtime = localunixtime + offset
998 offset, date = timezone(string), string
998 offset, date = timezone(string), string
999 if offset is not None:
999 if offset is not None:
1000 date = " ".join(string.split()[:-1])
1000 date = " ".join(string.split()[:-1])
1001
1001
1002 # add missing elements from defaults
1002 # add missing elements from defaults
1003 usenow = False # default to using biased defaults
1003 usenow = False # default to using biased defaults
1004 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1004 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1005 found = [True for p in part if ("%"+p) in format]
1005 found = [True for p in part if ("%"+p) in format]
1006 if not found:
1006 if not found:
1007 date += "@" + defaults[part][usenow]
1007 date += "@" + defaults[part][usenow]
1008 format += "@%" + part[0]
1008 format += "@%" + part[0]
1009 else:
1009 else:
1010 # We've found a specific time element, less specific time
1010 # We've found a specific time element, less specific time
1011 # elements are relative to today
1011 # elements are relative to today
1012 usenow = True
1012 usenow = True
1013
1013
1014 timetuple = time.strptime(date, format)
1014 timetuple = time.strptime(date, format)
1015 localunixtime = int(calendar.timegm(timetuple))
1015 localunixtime = int(calendar.timegm(timetuple))
1016 if offset is None:
1016 if offset is None:
1017 # local timezone
1017 # local timezone
1018 unixtime = int(time.mktime(timetuple))
1018 unixtime = int(time.mktime(timetuple))
1019 offset = unixtime - localunixtime
1019 offset = unixtime - localunixtime
1020 else:
1020 else:
1021 unixtime = localunixtime + offset
1021 unixtime = localunixtime + offset
1022 return unixtime, offset
1022 return unixtime, offset
1023
1023
1024 def parsedate(date, formats=None, bias={}):
1024 def parsedate(date, formats=None, bias={}):
1025 """parse a localized date/time and return a (unixtime, offset) tuple.
1025 """parse a localized date/time and return a (unixtime, offset) tuple.
1026
1026
1027 The date may be a "unixtime offset" string or in one of the specified
1027 The date may be a "unixtime offset" string or in one of the specified
1028 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1028 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1029 """
1029 """
1030 if not date:
1030 if not date:
1031 return 0, 0
1031 return 0, 0
1032 if isinstance(date, tuple) and len(date) == 2:
1032 if isinstance(date, tuple) and len(date) == 2:
1033 return date
1033 return date
1034 if not formats:
1034 if not formats:
1035 formats = defaultdateformats
1035 formats = defaultdateformats
1036 date = date.strip()
1036 date = date.strip()
1037 try:
1037 try:
1038 when, offset = map(int, date.split(' '))
1038 when, offset = map(int, date.split(' '))
1039 except ValueError:
1039 except ValueError:
1040 # fill out defaults
1040 # fill out defaults
1041 now = makedate()
1041 now = makedate()
1042 defaults = {}
1042 defaults = {}
1043 for part in ("d", "mb", "yY", "HI", "M", "S"):
1043 for part in ("d", "mb", "yY", "HI", "M", "S"):
1044 # this piece is for rounding the specific end of unknowns
1044 # this piece is for rounding the specific end of unknowns
1045 b = bias.get(part)
1045 b = bias.get(part)
1046 if b is None:
1046 if b is None:
1047 if part[0] in "HMS":
1047 if part[0] in "HMS":
1048 b = "00"
1048 b = "00"
1049 else:
1049 else:
1050 b = "0"
1050 b = "0"
1051
1051
1052 # this piece is for matching the generic end to today's date
1052 # this piece is for matching the generic end to today's date
1053 n = datestr(now, "%" + part[0])
1053 n = datestr(now, "%" + part[0])
1054
1054
1055 defaults[part] = (b, n)
1055 defaults[part] = (b, n)
1056
1056
1057 for format in formats:
1057 for format in formats:
1058 try:
1058 try:
1059 when, offset = strdate(date, format, defaults)
1059 when, offset = strdate(date, format, defaults)
1060 except (ValueError, OverflowError):
1060 except (ValueError, OverflowError):
1061 pass
1061 pass
1062 else:
1062 else:
1063 break
1063 break
1064 else:
1064 else:
1065 raise Abort(_('invalid date: %r') % date)
1065 raise Abort(_('invalid date: %r') % date)
1066 # validate explicit (probably user-specified) date and
1066 # validate explicit (probably user-specified) date and
1067 # time zone offset. values must fit in signed 32 bits for
1067 # time zone offset. values must fit in signed 32 bits for
1068 # current 32-bit linux runtimes. timezones go from UTC-12
1068 # current 32-bit linux runtimes. timezones go from UTC-12
1069 # to UTC+14
1069 # to UTC+14
1070 if abs(when) > 0x7fffffff:
1070 if abs(when) > 0x7fffffff:
1071 raise Abort(_('date exceeds 32 bits: %d') % when)
1071 raise Abort(_('date exceeds 32 bits: %d') % when)
1072 if when < 0:
1072 if when < 0:
1073 raise Abort(_('negative date value: %d') % when)
1073 raise Abort(_('negative date value: %d') % when)
1074 if offset < -50400 or offset > 43200:
1074 if offset < -50400 or offset > 43200:
1075 raise Abort(_('impossible time zone offset: %d') % offset)
1075 raise Abort(_('impossible time zone offset: %d') % offset)
1076 return when, offset
1076 return when, offset
1077
1077
1078 def matchdate(date):
1078 def matchdate(date):
1079 """Return a function that matches a given date match specifier
1079 """Return a function that matches a given date match specifier
1080
1080
1081 Formats include:
1081 Formats include:
1082
1082
1083 '{date}' match a given date to the accuracy provided
1083 '{date}' match a given date to the accuracy provided
1084
1084
1085 '<{date}' on or before a given date
1085 '<{date}' on or before a given date
1086
1086
1087 '>{date}' on or after a given date
1087 '>{date}' on or after a given date
1088
1088
1089 >>> p1 = parsedate("10:29:59")
1089 >>> p1 = parsedate("10:29:59")
1090 >>> p2 = parsedate("10:30:00")
1090 >>> p2 = parsedate("10:30:00")
1091 >>> p3 = parsedate("10:30:59")
1091 >>> p3 = parsedate("10:30:59")
1092 >>> p4 = parsedate("10:31:00")
1092 >>> p4 = parsedate("10:31:00")
1093 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1093 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1094 >>> f = matchdate("10:30")
1094 >>> f = matchdate("10:30")
1095 >>> f(p1[0])
1095 >>> f(p1[0])
1096 False
1096 False
1097 >>> f(p2[0])
1097 >>> f(p2[0])
1098 True
1098 True
1099 >>> f(p3[0])
1099 >>> f(p3[0])
1100 True
1100 True
1101 >>> f(p4[0])
1101 >>> f(p4[0])
1102 False
1102 False
1103 >>> f(p5[0])
1103 >>> f(p5[0])
1104 False
1104 False
1105 """
1105 """
1106
1106
1107 def lower(date):
1107 def lower(date):
1108 d = dict(mb="1", d="1")
1108 d = dict(mb="1", d="1")
1109 return parsedate(date, extendeddateformats, d)[0]
1109 return parsedate(date, extendeddateformats, d)[0]
1110
1110
1111 def upper(date):
1111 def upper(date):
1112 d = dict(mb="12", HI="23", M="59", S="59")
1112 d = dict(mb="12", HI="23", M="59", S="59")
1113 for days in ("31", "30", "29"):
1113 for days in ("31", "30", "29"):
1114 try:
1114 try:
1115 d["d"] = days
1115 d["d"] = days
1116 return parsedate(date, extendeddateformats, d)[0]
1116 return parsedate(date, extendeddateformats, d)[0]
1117 except Abort:
1117 except Abort:
1118 pass
1118 pass
1119 d["d"] = "28"
1119 d["d"] = "28"
1120 return parsedate(date, extendeddateformats, d)[0]
1120 return parsedate(date, extendeddateformats, d)[0]
1121
1121
1122 date = date.strip()
1122 date = date.strip()
1123
1123
1124 if not date:
1124 if not date:
1125 raise Abort(_("dates cannot consist entirely of whitespace"))
1125 raise Abort(_("dates cannot consist entirely of whitespace"))
1126 elif date[0] == "<":
1126 elif date[0] == "<":
1127 if not date[1:]:
1127 if not date[1:]:
1128 raise Abort(_("invalid day spec, use '<DATE'"))
1128 raise Abort(_("invalid day spec, use '<DATE'"))
1129 when = upper(date[1:])
1129 when = upper(date[1:])
1130 return lambda x: x <= when
1130 return lambda x: x <= when
1131 elif date[0] == ">":
1131 elif date[0] == ">":
1132 if not date[1:]:
1132 if not date[1:]:
1133 raise Abort(_("invalid day spec, use '>DATE'"))
1133 raise Abort(_("invalid day spec, use '>DATE'"))
1134 when = lower(date[1:])
1134 when = lower(date[1:])
1135 return lambda x: x >= when
1135 return lambda x: x >= when
1136 elif date[0] == "-":
1136 elif date[0] == "-":
1137 try:
1137 try:
1138 days = int(date[1:])
1138 days = int(date[1:])
1139 except ValueError:
1139 except ValueError:
1140 raise Abort(_("invalid day spec: %s") % date[1:])
1140 raise Abort(_("invalid day spec: %s") % date[1:])
1141 if days < 0:
1141 if days < 0:
1142 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1142 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1143 % date[1:])
1143 % date[1:])
1144 when = makedate()[0] - days * 3600 * 24
1144 when = makedate()[0] - days * 3600 * 24
1145 return lambda x: x >= when
1145 return lambda x: x >= when
1146 elif " to " in date:
1146 elif " to " in date:
1147 a, b = date.split(" to ")
1147 a, b = date.split(" to ")
1148 start, stop = lower(a), upper(b)
1148 start, stop = lower(a), upper(b)
1149 return lambda x: x >= start and x <= stop
1149 return lambda x: x >= start and x <= stop
1150 else:
1150 else:
1151 start, stop = lower(date), upper(date)
1151 start, stop = lower(date), upper(date)
1152 return lambda x: x >= start and x <= stop
1152 return lambda x: x >= start and x <= stop
1153
1153
1154 def shortuser(user):
1154 def shortuser(user):
1155 """Return a short representation of a user name or email address."""
1155 """Return a short representation of a user name or email address."""
1156 f = user.find('@')
1156 f = user.find('@')
1157 if f >= 0:
1157 if f >= 0:
1158 user = user[:f]
1158 user = user[:f]
1159 f = user.find('<')
1159 f = user.find('<')
1160 if f >= 0:
1160 if f >= 0:
1161 user = user[f + 1:]
1161 user = user[f + 1:]
1162 f = user.find(' ')
1162 f = user.find(' ')
1163 if f >= 0:
1163 if f >= 0:
1164 user = user[:f]
1164 user = user[:f]
1165 f = user.find('.')
1165 f = user.find('.')
1166 if f >= 0:
1166 if f >= 0:
1167 user = user[:f]
1167 user = user[:f]
1168 return user
1168 return user
1169
1169
1170 def emailuser(user):
1170 def emailuser(user):
1171 """Return the user portion of an email address."""
1171 """Return the user portion of an email address."""
1172 f = user.find('@')
1172 f = user.find('@')
1173 if f >= 0:
1173 if f >= 0:
1174 user = user[:f]
1174 user = user[:f]
1175 f = user.find('<')
1175 f = user.find('<')
1176 if f >= 0:
1176 if f >= 0:
1177 user = user[f + 1:]
1177 user = user[f + 1:]
1178 return user
1178 return user
1179
1179
1180 def email(author):
1180 def email(author):
1181 '''get email of author.'''
1181 '''get email of author.'''
1182 r = author.find('>')
1182 r = author.find('>')
1183 if r == -1:
1183 if r == -1:
1184 r = None
1184 r = None
1185 return author[author.find('<') + 1:r]
1185 return author[author.find('<') + 1:r]
1186
1186
1187 def _ellipsis(text, maxlength):
1187 def _ellipsis(text, maxlength):
1188 if len(text) <= maxlength:
1188 if len(text) <= maxlength:
1189 return text, False
1189 return text, False
1190 else:
1190 else:
1191 return "%s..." % (text[:maxlength - 3]), True
1191 return "%s..." % (text[:maxlength - 3]), True
1192
1192
1193 def ellipsis(text, maxlength=400):
1193 def ellipsis(text, maxlength=400):
1194 """Trim string to at most maxlength (default: 400) characters."""
1194 """Trim string to at most maxlength (default: 400) characters."""
1195 try:
1195 try:
1196 # use unicode not to split at intermediate multi-byte sequence
1196 # use unicode not to split at intermediate multi-byte sequence
1197 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1197 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1198 maxlength)
1198 maxlength)
1199 if not truncated:
1199 if not truncated:
1200 return text
1200 return text
1201 return utext.encode(encoding.encoding)
1201 return utext.encode(encoding.encoding)
1202 except (UnicodeDecodeError, UnicodeEncodeError):
1202 except (UnicodeDecodeError, UnicodeEncodeError):
1203 return _ellipsis(text, maxlength)[0]
1203 return _ellipsis(text, maxlength)[0]
1204
1204
1205 _byteunits = (
1205 _byteunits = (
1206 (100, 1 << 30, _('%.0f GB')),
1206 (100, 1 << 30, _('%.0f GB')),
1207 (10, 1 << 30, _('%.1f GB')),
1207 (10, 1 << 30, _('%.1f GB')),
1208 (1, 1 << 30, _('%.2f GB')),
1208 (1, 1 << 30, _('%.2f GB')),
1209 (100, 1 << 20, _('%.0f MB')),
1209 (100, 1 << 20, _('%.0f MB')),
1210 (10, 1 << 20, _('%.1f MB')),
1210 (10, 1 << 20, _('%.1f MB')),
1211 (1, 1 << 20, _('%.2f MB')),
1211 (1, 1 << 20, _('%.2f MB')),
1212 (100, 1 << 10, _('%.0f KB')),
1212 (100, 1 << 10, _('%.0f KB')),
1213 (10, 1 << 10, _('%.1f KB')),
1213 (10, 1 << 10, _('%.1f KB')),
1214 (1, 1 << 10, _('%.2f KB')),
1214 (1, 1 << 10, _('%.2f KB')),
1215 (1, 1, _('%.0f bytes')),
1215 (1, 1, _('%.0f bytes')),
1216 )
1216 )
1217
1217
1218 def bytecount(nbytes):
1218 def bytecount(nbytes):
1219 '''return byte count formatted as readable string, with units'''
1219 '''return byte count formatted as readable string, with units'''
1220
1220
1221 for multiplier, divisor, format in _byteunits:
1221 for multiplier, divisor, format in _byteunits:
1222 if nbytes >= divisor * multiplier:
1222 if nbytes >= divisor * multiplier:
1223 return format % (nbytes / float(divisor))
1223 return format % (nbytes / float(divisor))
1224 return _byteunits[-1][2] % nbytes
1224 return _byteunits[-1][2] % nbytes
1225
1225
1226 def uirepr(s):
1226 def uirepr(s):
1227 # Avoid double backslash in Windows path repr()
1227 # Avoid double backslash in Windows path repr()
1228 return repr(s).replace('\\\\', '\\')
1228 return repr(s).replace('\\\\', '\\')
1229
1229
1230 # delay import of textwrap
1230 # delay import of textwrap
1231 def MBTextWrapper(**kwargs):
1231 def MBTextWrapper(**kwargs):
1232 class tw(textwrap.TextWrapper):
1232 class tw(textwrap.TextWrapper):
1233 """
1233 """
1234 Extend TextWrapper for width-awareness.
1234 Extend TextWrapper for width-awareness.
1235
1235
1236 Neither number of 'bytes' in any encoding nor 'characters' is
1236 Neither number of 'bytes' in any encoding nor 'characters' is
1237 appropriate to calculate terminal columns for specified string.
1237 appropriate to calculate terminal columns for specified string.
1238
1238
1239 Original TextWrapper implementation uses built-in 'len()' directly,
1239 Original TextWrapper implementation uses built-in 'len()' directly,
1240 so overriding is needed to use width information of each characters.
1240 so overriding is needed to use width information of each characters.
1241
1241
1242 In addition, characters classified into 'ambiguous' width are
1242 In addition, characters classified into 'ambiguous' width are
1243 treated as wide in East Asian area, but as narrow in other.
1243 treated as wide in East Asian area, but as narrow in other.
1244
1244
1245 This requires use decision to determine width of such characters.
1245 This requires use decision to determine width of such characters.
1246 """
1246 """
1247 def __init__(self, **kwargs):
1247 def __init__(self, **kwargs):
1248 textwrap.TextWrapper.__init__(self, **kwargs)
1248 textwrap.TextWrapper.__init__(self, **kwargs)
1249
1249
1250 # for compatibility between 2.4 and 2.6
1250 # for compatibility between 2.4 and 2.6
1251 if getattr(self, 'drop_whitespace', None) is None:
1251 if getattr(self, 'drop_whitespace', None) is None:
1252 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1252 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1253
1253
1254 def _cutdown(self, ucstr, space_left):
1254 def _cutdown(self, ucstr, space_left):
1255 l = 0
1255 l = 0
1256 colwidth = encoding.ucolwidth
1256 colwidth = encoding.ucolwidth
1257 for i in xrange(len(ucstr)):
1257 for i in xrange(len(ucstr)):
1258 l += colwidth(ucstr[i])
1258 l += colwidth(ucstr[i])
1259 if space_left < l:
1259 if space_left < l:
1260 return (ucstr[:i], ucstr[i:])
1260 return (ucstr[:i], ucstr[i:])
1261 return ucstr, ''
1261 return ucstr, ''
1262
1262
1263 # overriding of base class
1263 # overriding of base class
1264 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1264 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1265 space_left = max(width - cur_len, 1)
1265 space_left = max(width - cur_len, 1)
1266
1266
1267 if self.break_long_words:
1267 if self.break_long_words:
1268 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1268 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1269 cur_line.append(cut)
1269 cur_line.append(cut)
1270 reversed_chunks[-1] = res
1270 reversed_chunks[-1] = res
1271 elif not cur_line:
1271 elif not cur_line:
1272 cur_line.append(reversed_chunks.pop())
1272 cur_line.append(reversed_chunks.pop())
1273
1273
1274 # this overriding code is imported from TextWrapper of python 2.6
1274 # this overriding code is imported from TextWrapper of python 2.6
1275 # to calculate columns of string by 'encoding.ucolwidth()'
1275 # to calculate columns of string by 'encoding.ucolwidth()'
1276 def _wrap_chunks(self, chunks):
1276 def _wrap_chunks(self, chunks):
1277 colwidth = encoding.ucolwidth
1277 colwidth = encoding.ucolwidth
1278
1278
1279 lines = []
1279 lines = []
1280 if self.width <= 0:
1280 if self.width <= 0:
1281 raise ValueError("invalid width %r (must be > 0)" % self.width)
1281 raise ValueError("invalid width %r (must be > 0)" % self.width)
1282
1282
1283 # Arrange in reverse order so items can be efficiently popped
1283 # Arrange in reverse order so items can be efficiently popped
1284 # from a stack of chucks.
1284 # from a stack of chucks.
1285 chunks.reverse()
1285 chunks.reverse()
1286
1286
1287 while chunks:
1287 while chunks:
1288
1288
1289 # Start the list of chunks that will make up the current line.
1289 # Start the list of chunks that will make up the current line.
1290 # cur_len is just the length of all the chunks in cur_line.
1290 # cur_len is just the length of all the chunks in cur_line.
1291 cur_line = []
1291 cur_line = []
1292 cur_len = 0
1292 cur_len = 0
1293
1293
1294 # Figure out which static string will prefix this line.
1294 # Figure out which static string will prefix this line.
1295 if lines:
1295 if lines:
1296 indent = self.subsequent_indent
1296 indent = self.subsequent_indent
1297 else:
1297 else:
1298 indent = self.initial_indent
1298 indent = self.initial_indent
1299
1299
1300 # Maximum width for this line.
1300 # Maximum width for this line.
1301 width = self.width - len(indent)
1301 width = self.width - len(indent)
1302
1302
1303 # First chunk on line is whitespace -- drop it, unless this
1303 # First chunk on line is whitespace -- drop it, unless this
1304 # is the very beginning of the text (i.e. no lines started yet).
1304 # is the very beginning of the text (i.e. no lines started yet).
1305 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1305 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1306 del chunks[-1]
1306 del chunks[-1]
1307
1307
1308 while chunks:
1308 while chunks:
1309 l = colwidth(chunks[-1])
1309 l = colwidth(chunks[-1])
1310
1310
1311 # Can at least squeeze this chunk onto the current line.
1311 # Can at least squeeze this chunk onto the current line.
1312 if cur_len + l <= width:
1312 if cur_len + l <= width:
1313 cur_line.append(chunks.pop())
1313 cur_line.append(chunks.pop())
1314 cur_len += l
1314 cur_len += l
1315
1315
1316 # Nope, this line is full.
1316 # Nope, this line is full.
1317 else:
1317 else:
1318 break
1318 break
1319
1319
1320 # The current line is full, and the next chunk is too big to
1320 # The current line is full, and the next chunk is too big to
1321 # fit on *any* line (not just this one).
1321 # fit on *any* line (not just this one).
1322 if chunks and colwidth(chunks[-1]) > width:
1322 if chunks and colwidth(chunks[-1]) > width:
1323 self._handle_long_word(chunks, cur_line, cur_len, width)
1323 self._handle_long_word(chunks, cur_line, cur_len, width)
1324
1324
1325 # If the last chunk on this line is all whitespace, drop it.
1325 # If the last chunk on this line is all whitespace, drop it.
1326 if (self.drop_whitespace and
1326 if (self.drop_whitespace and
1327 cur_line and cur_line[-1].strip() == ''):
1327 cur_line and cur_line[-1].strip() == ''):
1328 del cur_line[-1]
1328 del cur_line[-1]
1329
1329
1330 # Convert current line back to a string and store it in list
1330 # Convert current line back to a string and store it in list
1331 # of all lines (return value).
1331 # of all lines (return value).
1332 if cur_line:
1332 if cur_line:
1333 lines.append(indent + ''.join(cur_line))
1333 lines.append(indent + ''.join(cur_line))
1334
1334
1335 return lines
1335 return lines
1336
1336
1337 global MBTextWrapper
1337 global MBTextWrapper
1338 MBTextWrapper = tw
1338 MBTextWrapper = tw
1339 return tw(**kwargs)
1339 return tw(**kwargs)
1340
1340
1341 def wrap(line, width, initindent='', hangindent=''):
1341 def wrap(line, width, initindent='', hangindent=''):
1342 maxindent = max(len(hangindent), len(initindent))
1342 maxindent = max(len(hangindent), len(initindent))
1343 if width <= maxindent:
1343 if width <= maxindent:
1344 # adjust for weird terminal size
1344 # adjust for weird terminal size
1345 width = max(78, maxindent + 1)
1345 width = max(78, maxindent + 1)
1346 line = line.decode(encoding.encoding, encoding.encodingmode)
1346 line = line.decode(encoding.encoding, encoding.encodingmode)
1347 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1347 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1348 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1348 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1349 wrapper = MBTextWrapper(width=width,
1349 wrapper = MBTextWrapper(width=width,
1350 initial_indent=initindent,
1350 initial_indent=initindent,
1351 subsequent_indent=hangindent)
1351 subsequent_indent=hangindent)
1352 return wrapper.fill(line).encode(encoding.encoding)
1352 return wrapper.fill(line).encode(encoding.encoding)
1353
1353
1354 def iterlines(iterator):
1354 def iterlines(iterator):
1355 for chunk in iterator:
1355 for chunk in iterator:
1356 for line in chunk.splitlines():
1356 for line in chunk.splitlines():
1357 yield line
1357 yield line
1358
1358
1359 def expandpath(path):
1359 def expandpath(path):
1360 return os.path.expanduser(os.path.expandvars(path))
1360 return os.path.expanduser(os.path.expandvars(path))
1361
1361
1362 def hgcmd():
1362 def hgcmd():
1363 """Return the command used to execute current hg
1363 """Return the command used to execute current hg
1364
1364
1365 This is different from hgexecutable() because on Windows we want
1365 This is different from hgexecutable() because on Windows we want
1366 to avoid things opening new shell windows like batch files, so we
1366 to avoid things opening new shell windows like batch files, so we
1367 get either the python call or current executable.
1367 get either the python call or current executable.
1368 """
1368 """
1369 if mainfrozen():
1369 if mainfrozen():
1370 return [sys.executable]
1370 return [sys.executable]
1371 return gethgcmd()
1371 return gethgcmd()
1372
1372
1373 def rundetached(args, condfn):
1373 def rundetached(args, condfn):
1374 """Execute the argument list in a detached process.
1374 """Execute the argument list in a detached process.
1375
1375
1376 condfn is a callable which is called repeatedly and should return
1376 condfn is a callable which is called repeatedly and should return
1377 True once the child process is known to have started successfully.
1377 True once the child process is known to have started successfully.
1378 At this point, the child process PID is returned. If the child
1378 At this point, the child process PID is returned. If the child
1379 process fails to start or finishes before condfn() evaluates to
1379 process fails to start or finishes before condfn() evaluates to
1380 True, return -1.
1380 True, return -1.
1381 """
1381 """
1382 # Windows case is easier because the child process is either
1382 # Windows case is easier because the child process is either
1383 # successfully starting and validating the condition or exiting
1383 # successfully starting and validating the condition or exiting
1384 # on failure. We just poll on its PID. On Unix, if the child
1384 # on failure. We just poll on its PID. On Unix, if the child
1385 # process fails to start, it will be left in a zombie state until
1385 # process fails to start, it will be left in a zombie state until
1386 # the parent wait on it, which we cannot do since we expect a long
1386 # the parent wait on it, which we cannot do since we expect a long
1387 # running process on success. Instead we listen for SIGCHLD telling
1387 # running process on success. Instead we listen for SIGCHLD telling
1388 # us our child process terminated.
1388 # us our child process terminated.
1389 terminated = set()
1389 terminated = set()
1390 def handler(signum, frame):
1390 def handler(signum, frame):
1391 terminated.add(os.wait())
1391 terminated.add(os.wait())
1392 prevhandler = None
1392 prevhandler = None
1393 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1393 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1394 if SIGCHLD is not None:
1394 if SIGCHLD is not None:
1395 prevhandler = signal.signal(SIGCHLD, handler)
1395 prevhandler = signal.signal(SIGCHLD, handler)
1396 try:
1396 try:
1397 pid = spawndetached(args)
1397 pid = spawndetached(args)
1398 while not condfn():
1398 while not condfn():
1399 if ((pid in terminated or not testpid(pid))
1399 if ((pid in terminated or not testpid(pid))
1400 and not condfn()):
1400 and not condfn()):
1401 return -1
1401 return -1
1402 time.sleep(0.1)
1402 time.sleep(0.1)
1403 return pid
1403 return pid
1404 finally:
1404 finally:
1405 if prevhandler is not None:
1405 if prevhandler is not None:
1406 signal.signal(signal.SIGCHLD, prevhandler)
1406 signal.signal(signal.SIGCHLD, prevhandler)
1407
1407
1408 try:
1408 try:
1409 any, all = any, all
1409 any, all = any, all
1410 except NameError:
1410 except NameError:
1411 def any(iterable):
1411 def any(iterable):
1412 for i in iterable:
1412 for i in iterable:
1413 if i:
1413 if i:
1414 return True
1414 return True
1415 return False
1415 return False
1416
1416
1417 def all(iterable):
1417 def all(iterable):
1418 for i in iterable:
1418 for i in iterable:
1419 if not i:
1419 if not i:
1420 return False
1420 return False
1421 return True
1421 return True
1422
1422
1423 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1423 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1424 """Return the result of interpolating items in the mapping into string s.
1424 """Return the result of interpolating items in the mapping into string s.
1425
1425
1426 prefix is a single character string, or a two character string with
1426 prefix is a single character string, or a two character string with
1427 a backslash as the first character if the prefix needs to be escaped in
1427 a backslash as the first character if the prefix needs to be escaped in
1428 a regular expression.
1428 a regular expression.
1429
1429
1430 fn is an optional function that will be applied to the replacement text
1430 fn is an optional function that will be applied to the replacement text
1431 just before replacement.
1431 just before replacement.
1432
1432
1433 escape_prefix is an optional flag that allows using doubled prefix for
1433 escape_prefix is an optional flag that allows using doubled prefix for
1434 its escaping.
1434 its escaping.
1435 """
1435 """
1436 fn = fn or (lambda s: s)
1436 fn = fn or (lambda s: s)
1437 patterns = '|'.join(mapping.keys())
1437 patterns = '|'.join(mapping.keys())
1438 if escape_prefix:
1438 if escape_prefix:
1439 patterns += '|' + prefix
1439 patterns += '|' + prefix
1440 if len(prefix) > 1:
1440 if len(prefix) > 1:
1441 prefix_char = prefix[1:]
1441 prefix_char = prefix[1:]
1442 else:
1442 else:
1443 prefix_char = prefix
1443 prefix_char = prefix
1444 mapping[prefix_char] = prefix_char
1444 mapping[prefix_char] = prefix_char
1445 r = re.compile(r'%s(%s)' % (prefix, patterns))
1445 r = re.compile(r'%s(%s)' % (prefix, patterns))
1446 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1446 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1447
1447
1448 def getport(port):
1448 def getport(port):
1449 """Return the port for a given network service.
1449 """Return the port for a given network service.
1450
1450
1451 If port is an integer, it's returned as is. If it's a string, it's
1451 If port is an integer, it's returned as is. If it's a string, it's
1452 looked up using socket.getservbyname(). If there's no matching
1452 looked up using socket.getservbyname(). If there's no matching
1453 service, util.Abort is raised.
1453 service, util.Abort is raised.
1454 """
1454 """
1455 try:
1455 try:
1456 return int(port)
1456 return int(port)
1457 except ValueError:
1457 except ValueError:
1458 pass
1458 pass
1459
1459
1460 try:
1460 try:
1461 return socket.getservbyname(port)
1461 return socket.getservbyname(port)
1462 except socket.error:
1462 except socket.error:
1463 raise Abort(_("no port number associated with service '%s'") % port)
1463 raise Abort(_("no port number associated with service '%s'") % port)
1464
1464
1465 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1465 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1466 '0': False, 'no': False, 'false': False, 'off': False,
1466 '0': False, 'no': False, 'false': False, 'off': False,
1467 'never': False}
1467 'never': False}
1468
1468
1469 def parsebool(s):
1469 def parsebool(s):
1470 """Parse s into a boolean.
1470 """Parse s into a boolean.
1471
1471
1472 If s is not a valid boolean, returns None.
1472 If s is not a valid boolean, returns None.
1473 """
1473 """
1474 return _booleans.get(s.lower(), None)
1474 return _booleans.get(s.lower(), None)
1475
1475
1476 _hexdig = '0123456789ABCDEFabcdef'
1476 _hexdig = '0123456789ABCDEFabcdef'
1477 _hextochr = dict((a + b, chr(int(a + b, 16)))
1477 _hextochr = dict((a + b, chr(int(a + b, 16)))
1478 for a in _hexdig for b in _hexdig)
1478 for a in _hexdig for b in _hexdig)
1479
1479
1480 def _urlunquote(s):
1480 def _urlunquote(s):
1481 """Decode HTTP/HTML % encoding.
1481 """Decode HTTP/HTML % encoding.
1482
1482
1483 >>> _urlunquote('abc%20def')
1483 >>> _urlunquote('abc%20def')
1484 'abc def'
1484 'abc def'
1485 """
1485 """
1486 res = s.split('%')
1486 res = s.split('%')
1487 # fastpath
1487 # fastpath
1488 if len(res) == 1:
1488 if len(res) == 1:
1489 return s
1489 return s
1490 s = res[0]
1490 s = res[0]
1491 for item in res[1:]:
1491 for item in res[1:]:
1492 try:
1492 try:
1493 s += _hextochr[item[:2]] + item[2:]
1493 s += _hextochr[item[:2]] + item[2:]
1494 except KeyError:
1494 except KeyError:
1495 s += '%' + item
1495 s += '%' + item
1496 except UnicodeDecodeError:
1496 except UnicodeDecodeError:
1497 s += unichr(int(item[:2], 16)) + item[2:]
1497 s += unichr(int(item[:2], 16)) + item[2:]
1498 return s
1498 return s
1499
1499
1500 class url(object):
1500 class url(object):
1501 r"""Reliable URL parser.
1501 r"""Reliable URL parser.
1502
1502
1503 This parses URLs and provides attributes for the following
1503 This parses URLs and provides attributes for the following
1504 components:
1504 components:
1505
1505
1506 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1506 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1507
1507
1508 Missing components are set to None. The only exception is
1508 Missing components are set to None. The only exception is
1509 fragment, which is set to '' if present but empty.
1509 fragment, which is set to '' if present but empty.
1510
1510
1511 If parsefragment is False, fragment is included in query. If
1511 If parsefragment is False, fragment is included in query. If
1512 parsequery is False, query is included in path. If both are
1512 parsequery is False, query is included in path. If both are
1513 False, both fragment and query are included in path.
1513 False, both fragment and query are included in path.
1514
1514
1515 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1515 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1516
1516
1517 Note that for backward compatibility reasons, bundle URLs do not
1517 Note that for backward compatibility reasons, bundle URLs do not
1518 take host names. That means 'bundle://../' has a path of '../'.
1518 take host names. That means 'bundle://../' has a path of '../'.
1519
1519
1520 Examples:
1520 Examples:
1521
1521
1522 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1522 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1523 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1523 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1524 >>> url('ssh://[::1]:2200//home/joe/repo')
1524 >>> url('ssh://[::1]:2200//home/joe/repo')
1525 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1525 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1526 >>> url('file:///home/joe/repo')
1526 >>> url('file:///home/joe/repo')
1527 <url scheme: 'file', path: '/home/joe/repo'>
1527 <url scheme: 'file', path: '/home/joe/repo'>
1528 >>> url('file:///c:/temp/foo/')
1528 >>> url('file:///c:/temp/foo/')
1529 <url scheme: 'file', path: 'c:/temp/foo/'>
1529 <url scheme: 'file', path: 'c:/temp/foo/'>
1530 >>> url('bundle:foo')
1530 >>> url('bundle:foo')
1531 <url scheme: 'bundle', path: 'foo'>
1531 <url scheme: 'bundle', path: 'foo'>
1532 >>> url('bundle://../foo')
1532 >>> url('bundle://../foo')
1533 <url scheme: 'bundle', path: '../foo'>
1533 <url scheme: 'bundle', path: '../foo'>
1534 >>> url(r'c:\foo\bar')
1534 >>> url(r'c:\foo\bar')
1535 <url path: 'c:\\foo\\bar'>
1535 <url path: 'c:\\foo\\bar'>
1536 >>> url(r'\\blah\blah\blah')
1536 >>> url(r'\\blah\blah\blah')
1537 <url path: '\\\\blah\\blah\\blah'>
1537 <url path: '\\\\blah\\blah\\blah'>
1538 >>> url(r'\\blah\blah\blah#baz')
1538 >>> url(r'\\blah\blah\blah#baz')
1539 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1539 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1540
1540
1541 Authentication credentials:
1541 Authentication credentials:
1542
1542
1543 >>> url('ssh://joe:xyz@x/repo')
1543 >>> url('ssh://joe:xyz@x/repo')
1544 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1544 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1545 >>> url('ssh://joe@x/repo')
1545 >>> url('ssh://joe@x/repo')
1546 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1546 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1547
1547
1548 Query strings and fragments:
1548 Query strings and fragments:
1549
1549
1550 >>> url('http://host/a?b#c')
1550 >>> url('http://host/a?b#c')
1551 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1551 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1552 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1552 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1553 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1553 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1554 """
1554 """
1555
1555
1556 _safechars = "!~*'()+"
1556 _safechars = "!~*'()+"
1557 _safepchars = "/!~*'()+:"
1557 _safepchars = "/!~*'()+:"
1558 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1558 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1559
1559
1560 def __init__(self, path, parsequery=True, parsefragment=True):
1560 def __init__(self, path, parsequery=True, parsefragment=True):
1561 # We slowly chomp away at path until we have only the path left
1561 # We slowly chomp away at path until we have only the path left
1562 self.scheme = self.user = self.passwd = self.host = None
1562 self.scheme = self.user = self.passwd = self.host = None
1563 self.port = self.path = self.query = self.fragment = None
1563 self.port = self.path = self.query = self.fragment = None
1564 self._localpath = True
1564 self._localpath = True
1565 self._hostport = ''
1565 self._hostport = ''
1566 self._origpath = path
1566 self._origpath = path
1567
1567
1568 if parsefragment and '#' in path:
1568 if parsefragment and '#' in path:
1569 path, self.fragment = path.split('#', 1)
1569 path, self.fragment = path.split('#', 1)
1570 if not path:
1570 if not path:
1571 path = None
1571 path = None
1572
1572
1573 # special case for Windows drive letters and UNC paths
1573 # special case for Windows drive letters and UNC paths
1574 if hasdriveletter(path) or path.startswith(r'\\'):
1574 if hasdriveletter(path) or path.startswith(r'\\'):
1575 self.path = path
1575 self.path = path
1576 return
1576 return
1577
1577
1578 # For compatibility reasons, we can't handle bundle paths as
1578 # For compatibility reasons, we can't handle bundle paths as
1579 # normal URLS
1579 # normal URLS
1580 if path.startswith('bundle:'):
1580 if path.startswith('bundle:'):
1581 self.scheme = 'bundle'
1581 self.scheme = 'bundle'
1582 path = path[7:]
1582 path = path[7:]
1583 if path.startswith('//'):
1583 if path.startswith('//'):
1584 path = path[2:]
1584 path = path[2:]
1585 self.path = path
1585 self.path = path
1586 return
1586 return
1587
1587
1588 if self._matchscheme(path):
1588 if self._matchscheme(path):
1589 parts = path.split(':', 1)
1589 parts = path.split(':', 1)
1590 if parts[0]:
1590 if parts[0]:
1591 self.scheme, path = parts
1591 self.scheme, path = parts
1592 self._localpath = False
1592 self._localpath = False
1593
1593
1594 if not path:
1594 if not path:
1595 path = None
1595 path = None
1596 if self._localpath:
1596 if self._localpath:
1597 self.path = ''
1597 self.path = ''
1598 return
1598 return
1599 else:
1599 else:
1600 if self._localpath:
1600 if self._localpath:
1601 self.path = path
1601 self.path = path
1602 return
1602 return
1603
1603
1604 if parsequery and '?' in path:
1604 if parsequery and '?' in path:
1605 path, self.query = path.split('?', 1)
1605 path, self.query = path.split('?', 1)
1606 if not path:
1606 if not path:
1607 path = None
1607 path = None
1608 if not self.query:
1608 if not self.query:
1609 self.query = None
1609 self.query = None
1610
1610
1611 # // is required to specify a host/authority
1611 # // is required to specify a host/authority
1612 if path and path.startswith('//'):
1612 if path and path.startswith('//'):
1613 parts = path[2:].split('/', 1)
1613 parts = path[2:].split('/', 1)
1614 if len(parts) > 1:
1614 if len(parts) > 1:
1615 self.host, path = parts
1615 self.host, path = parts
1616 path = path
1616 path = path
1617 else:
1617 else:
1618 self.host = parts[0]
1618 self.host = parts[0]
1619 path = None
1619 path = None
1620 if not self.host:
1620 if not self.host:
1621 self.host = None
1621 self.host = None
1622 # path of file:///d is /d
1622 # path of file:///d is /d
1623 # path of file:///d:/ is d:/, not /d:/
1623 # path of file:///d:/ is d:/, not /d:/
1624 if path and not hasdriveletter(path):
1624 if path and not hasdriveletter(path):
1625 path = '/' + path
1625 path = '/' + path
1626
1626
1627 if self.host and '@' in self.host:
1627 if self.host and '@' in self.host:
1628 self.user, self.host = self.host.rsplit('@', 1)
1628 self.user, self.host = self.host.rsplit('@', 1)
1629 if ':' in self.user:
1629 if ':' in self.user:
1630 self.user, self.passwd = self.user.split(':', 1)
1630 self.user, self.passwd = self.user.split(':', 1)
1631 if not self.host:
1631 if not self.host:
1632 self.host = None
1632 self.host = None
1633
1633
1634 # Don't split on colons in IPv6 addresses without ports
1634 # Don't split on colons in IPv6 addresses without ports
1635 if (self.host and ':' in self.host and
1635 if (self.host and ':' in self.host and
1636 not (self.host.startswith('[') and self.host.endswith(']'))):
1636 not (self.host.startswith('[') and self.host.endswith(']'))):
1637 self._hostport = self.host
1637 self._hostport = self.host
1638 self.host, self.port = self.host.rsplit(':', 1)
1638 self.host, self.port = self.host.rsplit(':', 1)
1639 if not self.host:
1639 if not self.host:
1640 self.host = None
1640 self.host = None
1641
1641
1642 if (self.host and self.scheme == 'file' and
1642 if (self.host and self.scheme == 'file' and
1643 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1643 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1644 raise Abort(_('file:// URLs can only refer to localhost'))
1644 raise Abort(_('file:// URLs can only refer to localhost'))
1645
1645
1646 self.path = path
1646 self.path = path
1647
1647
1648 # leave the query string escaped
1648 # leave the query string escaped
1649 for a in ('user', 'passwd', 'host', 'port',
1649 for a in ('user', 'passwd', 'host', 'port',
1650 'path', 'fragment'):
1650 'path', 'fragment'):
1651 v = getattr(self, a)
1651 v = getattr(self, a)
1652 if v is not None:
1652 if v is not None:
1653 setattr(self, a, _urlunquote(v))
1653 setattr(self, a, _urlunquote(v))
1654
1654
1655 def __repr__(self):
1655 def __repr__(self):
1656 attrs = []
1656 attrs = []
1657 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1657 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1658 'query', 'fragment'):
1658 'query', 'fragment'):
1659 v = getattr(self, a)
1659 v = getattr(self, a)
1660 if v is not None:
1660 if v is not None:
1661 attrs.append('%s: %r' % (a, v))
1661 attrs.append('%s: %r' % (a, v))
1662 return '<url %s>' % ', '.join(attrs)
1662 return '<url %s>' % ', '.join(attrs)
1663
1663
1664 def __str__(self):
1664 def __str__(self):
1665 r"""Join the URL's components back into a URL string.
1665 r"""Join the URL's components back into a URL string.
1666
1666
1667 Examples:
1667 Examples:
1668
1668
1669 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1669 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1670 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1670 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1671 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1671 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1672 'http://user:pw@host:80/?foo=bar&baz=42'
1672 'http://user:pw@host:80/?foo=bar&baz=42'
1673 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1673 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1674 'http://user:pw@host:80/?foo=bar%3dbaz'
1674 'http://user:pw@host:80/?foo=bar%3dbaz'
1675 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1675 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1676 'ssh://user:pw@[::1]:2200//home/joe#'
1676 'ssh://user:pw@[::1]:2200//home/joe#'
1677 >>> str(url('http://localhost:80//'))
1677 >>> str(url('http://localhost:80//'))
1678 'http://localhost:80//'
1678 'http://localhost:80//'
1679 >>> str(url('http://localhost:80/'))
1679 >>> str(url('http://localhost:80/'))
1680 'http://localhost:80/'
1680 'http://localhost:80/'
1681 >>> str(url('http://localhost:80'))
1681 >>> str(url('http://localhost:80'))
1682 'http://localhost:80/'
1682 'http://localhost:80/'
1683 >>> str(url('bundle:foo'))
1683 >>> str(url('bundle:foo'))
1684 'bundle:foo'
1684 'bundle:foo'
1685 >>> str(url('bundle://../foo'))
1685 >>> str(url('bundle://../foo'))
1686 'bundle:../foo'
1686 'bundle:../foo'
1687 >>> str(url('path'))
1687 >>> str(url('path'))
1688 'path'
1688 'path'
1689 >>> str(url('file:///tmp/foo/bar'))
1689 >>> str(url('file:///tmp/foo/bar'))
1690 'file:///tmp/foo/bar'
1690 'file:///tmp/foo/bar'
1691 >>> str(url('file:///c:/tmp/foo/bar'))
1691 >>> str(url('file:///c:/tmp/foo/bar'))
1692 'file:///c:/tmp/foo/bar'
1692 'file:///c:/tmp/foo/bar'
1693 >>> print url(r'bundle:foo\bar')
1693 >>> print url(r'bundle:foo\bar')
1694 bundle:foo\bar
1694 bundle:foo\bar
1695 """
1695 """
1696 if self._localpath:
1696 if self._localpath:
1697 s = self.path
1697 s = self.path
1698 if self.scheme == 'bundle':
1698 if self.scheme == 'bundle':
1699 s = 'bundle:' + s
1699 s = 'bundle:' + s
1700 if self.fragment:
1700 if self.fragment:
1701 s += '#' + self.fragment
1701 s += '#' + self.fragment
1702 return s
1702 return s
1703
1703
1704 s = self.scheme + ':'
1704 s = self.scheme + ':'
1705 if self.user or self.passwd or self.host:
1705 if self.user or self.passwd or self.host:
1706 s += '//'
1706 s += '//'
1707 elif self.scheme and (not self.path or self.path.startswith('/')
1707 elif self.scheme and (not self.path or self.path.startswith('/')
1708 or hasdriveletter(self.path)):
1708 or hasdriveletter(self.path)):
1709 s += '//'
1709 s += '//'
1710 if hasdriveletter(self.path):
1710 if hasdriveletter(self.path):
1711 s += '/'
1711 s += '/'
1712 if self.user:
1712 if self.user:
1713 s += urllib.quote(self.user, safe=self._safechars)
1713 s += urllib.quote(self.user, safe=self._safechars)
1714 if self.passwd:
1714 if self.passwd:
1715 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1715 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1716 if self.user or self.passwd:
1716 if self.user or self.passwd:
1717 s += '@'
1717 s += '@'
1718 if self.host:
1718 if self.host:
1719 if not (self.host.startswith('[') and self.host.endswith(']')):
1719 if not (self.host.startswith('[') and self.host.endswith(']')):
1720 s += urllib.quote(self.host)
1720 s += urllib.quote(self.host)
1721 else:
1721 else:
1722 s += self.host
1722 s += self.host
1723 if self.port:
1723 if self.port:
1724 s += ':' + urllib.quote(self.port)
1724 s += ':' + urllib.quote(self.port)
1725 if self.host:
1725 if self.host:
1726 s += '/'
1726 s += '/'
1727 if self.path:
1727 if self.path:
1728 # TODO: similar to the query string, we should not unescape the
1728 # TODO: similar to the query string, we should not unescape the
1729 # path when we store it, the path might contain '%2f' = '/',
1729 # path when we store it, the path might contain '%2f' = '/',
1730 # which we should *not* escape.
1730 # which we should *not* escape.
1731 s += urllib.quote(self.path, safe=self._safepchars)
1731 s += urllib.quote(self.path, safe=self._safepchars)
1732 if self.query:
1732 if self.query:
1733 # we store the query in escaped form.
1733 # we store the query in escaped form.
1734 s += '?' + self.query
1734 s += '?' + self.query
1735 if self.fragment is not None:
1735 if self.fragment is not None:
1736 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1736 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1737 return s
1737 return s
1738
1738
1739 def authinfo(self):
1739 def authinfo(self):
1740 user, passwd = self.user, self.passwd
1740 user, passwd = self.user, self.passwd
1741 try:
1741 try:
1742 self.user, self.passwd = None, None
1742 self.user, self.passwd = None, None
1743 s = str(self)
1743 s = str(self)
1744 finally:
1744 finally:
1745 self.user, self.passwd = user, passwd
1745 self.user, self.passwd = user, passwd
1746 if not self.user:
1746 if not self.user:
1747 return (s, None)
1747 return (s, None)
1748 # authinfo[1] is passed to urllib2 password manager, and its
1748 # authinfo[1] is passed to urllib2 password manager, and its
1749 # URIs must not contain credentials. The host is passed in the
1749 # URIs must not contain credentials. The host is passed in the
1750 # URIs list because Python < 2.4.3 uses only that to search for
1750 # URIs list because Python < 2.4.3 uses only that to search for
1751 # a password.
1751 # a password.
1752 return (s, (None, (s, self.host),
1752 return (s, (None, (s, self.host),
1753 self.user, self.passwd or ''))
1753 self.user, self.passwd or ''))
1754
1754
1755 def isabs(self):
1755 def isabs(self):
1756 if self.scheme and self.scheme != 'file':
1756 if self.scheme and self.scheme != 'file':
1757 return True # remote URL
1757 return True # remote URL
1758 if hasdriveletter(self.path):
1758 if hasdriveletter(self.path):
1759 return True # absolute for our purposes - can't be joined()
1759 return True # absolute for our purposes - can't be joined()
1760 if self.path.startswith(r'\\'):
1760 if self.path.startswith(r'\\'):
1761 return True # Windows UNC path
1761 return True # Windows UNC path
1762 if self.path.startswith('/'):
1762 if self.path.startswith('/'):
1763 return True # POSIX-style
1763 return True # POSIX-style
1764 return False
1764 return False
1765
1765
1766 def localpath(self):
1766 def localpath(self):
1767 if self.scheme == 'file' or self.scheme == 'bundle':
1767 if self.scheme == 'file' or self.scheme == 'bundle':
1768 path = self.path or '/'
1768 path = self.path or '/'
1769 # For Windows, we need to promote hosts containing drive
1769 # For Windows, we need to promote hosts containing drive
1770 # letters to paths with drive letters.
1770 # letters to paths with drive letters.
1771 if hasdriveletter(self._hostport):
1771 if hasdriveletter(self._hostport):
1772 path = self._hostport + '/' + self.path
1772 path = self._hostport + '/' + self.path
1773 elif (self.host is not None and self.path
1773 elif (self.host is not None and self.path
1774 and not hasdriveletter(path)):
1774 and not hasdriveletter(path)):
1775 path = '/' + path
1775 path = '/' + path
1776 return path
1776 return path
1777 return self._origpath
1777 return self._origpath
1778
1778
1779 def hasscheme(path):
1779 def hasscheme(path):
1780 return bool(url(path).scheme)
1780 return bool(url(path).scheme)
1781
1781
1782 def hasdriveletter(path):
1782 def hasdriveletter(path):
1783 return path and path[1:2] == ':' and path[0:1].isalpha()
1783 return path and path[1:2] == ':' and path[0:1].isalpha()
1784
1784
1785 def urllocalpath(path):
1785 def urllocalpath(path):
1786 return url(path, parsequery=False, parsefragment=False).localpath()
1786 return url(path, parsequery=False, parsefragment=False).localpath()
1787
1787
1788 def hidepassword(u):
1788 def hidepassword(u):
1789 '''hide user credential in a url string'''
1789 '''hide user credential in a url string'''
1790 u = url(u)
1790 u = url(u)
1791 if u.passwd:
1791 if u.passwd:
1792 u.passwd = '***'
1792 u.passwd = '***'
1793 return str(u)
1793 return str(u)
1794
1794
1795 def removeauth(u):
1795 def removeauth(u):
1796 '''remove all authentication information from a url string'''
1796 '''remove all authentication information from a url string'''
1797 u = url(u)
1797 u = url(u)
1798 u.user = u.passwd = None
1798 u.user = u.passwd = None
1799 return str(u)
1799 return str(u)
1800
1800
1801 def isatty(fd):
1801 def isatty(fd):
1802 try:
1802 try:
1803 return fd.isatty()
1803 return fd.isatty()
1804 except AttributeError:
1804 except AttributeError:
1805 return False
1805 return False
@@ -1,479 +1,488 b''
1 $ hglog() { hg log --template "{rev} {phaseidx} {desc}\n" $*; }
1 $ hglog() { hg log --template "{rev} {phaseidx} {desc}\n" $*; }
2 $ mkcommit() {
2 $ mkcommit() {
3 > echo "$1" > "$1"
3 > echo "$1" > "$1"
4 > hg add "$1"
4 > hg add "$1"
5 > message="$1"
5 > message="$1"
6 > shift
6 > shift
7 > hg ci -m "$message" $*
7 > hg ci -m "$message" $*
8 > }
8 > }
9
9
10 $ hg init initialrepo
10 $ hg init initialrepo
11 $ cd initialrepo
11 $ cd initialrepo
12
12
13 Cannot change null revision phase
13 Cannot change null revision phase
14
14
15 $ hg phase --force --secret null
15 $ hg phase --force --secret null
16 abort: cannot change null revision phase
16 abort: cannot change null revision phase
17 [255]
17 [255]
18 $ hg phase null
18 $ hg phase null
19 -1: public
19 -1: public
20
20
21 $ mkcommit A
21 $ mkcommit A
22
22
23 New commit are draft by default
23 New commit are draft by default
24
24
25 $ hglog
25 $ hglog
26 0 1 A
26 0 1 A
27
27
28 Following commit are draft too
28 Following commit are draft too
29
29
30 $ mkcommit B
30 $ mkcommit B
31
31
32 $ hglog
32 $ hglog
33 1 1 B
33 1 1 B
34 0 1 A
34 0 1 A
35
35
36 Draft commit are properly created over public one:
36 Draft commit are properly created over public one:
37
37
38 $ hg phase --public .
38 $ hg phase --public .
39 $ hglog
39 $ hglog
40 1 0 B
40 1 0 B
41 0 0 A
41 0 0 A
42
42
43 $ mkcommit C
43 $ mkcommit C
44 $ mkcommit D
44 $ mkcommit D
45
45
46 $ hglog
46 $ hglog
47 3 1 D
47 3 1 D
48 2 1 C
48 2 1 C
49 1 0 B
49 1 0 B
50 0 0 A
50 0 0 A
51
51
52 Test creating changeset as secret
52 Test creating changeset as secret
53
53
54 $ mkcommit E --config phases.new-commit='secret'
54 $ mkcommit E --config phases.new-commit='secret'
55 $ hglog
55 $ hglog
56 4 2 E
56 4 2 E
57 3 1 D
57 3 1 D
58 2 1 C
58 2 1 C
59 1 0 B
59 1 0 B
60 0 0 A
60 0 0 A
61
61
62 Test the secret property is inherited
62 Test the secret property is inherited
63
63
64 $ mkcommit H
64 $ mkcommit H
65 $ hglog
65 $ hglog
66 5 2 H
66 5 2 H
67 4 2 E
67 4 2 E
68 3 1 D
68 3 1 D
69 2 1 C
69 2 1 C
70 1 0 B
70 1 0 B
71 0 0 A
71 0 0 A
72
72
73 Even on merge
73 Even on merge
74
74
75 $ hg up -q 1
75 $ hg up -q 1
76 $ mkcommit "B'"
76 $ mkcommit "B'"
77 created new head
77 created new head
78 $ hglog
78 $ hglog
79 6 1 B'
79 6 1 B'
80 5 2 H
80 5 2 H
81 4 2 E
81 4 2 E
82 3 1 D
82 3 1 D
83 2 1 C
83 2 1 C
84 1 0 B
84 1 0 B
85 0 0 A
85 0 0 A
86 $ hg merge 4 # E
86 $ hg merge 4 # E
87 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
87 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
88 (branch merge, don't forget to commit)
88 (branch merge, don't forget to commit)
89 $ hg ci -m "merge B' and E"
89 $ hg ci -m "merge B' and E"
90 $ hglog
90 $ hglog
91 7 2 merge B' and E
91 7 2 merge B' and E
92 6 1 B'
92 6 1 B'
93 5 2 H
93 5 2 H
94 4 2 E
94 4 2 E
95 3 1 D
95 3 1 D
96 2 1 C
96 2 1 C
97 1 0 B
97 1 0 B
98 0 0 A
98 0 0 A
99
99
100 Test secret changeset are not pushed
100 Test secret changeset are not pushed
101
101
102 $ hg init ../push-dest
102 $ hg init ../push-dest
103 $ cat > ../push-dest/.hg/hgrc << EOF
103 $ cat > ../push-dest/.hg/hgrc << EOF
104 > [phases]
104 > [phases]
105 > publish=False
105 > publish=False
106 > EOF
106 > EOF
107 $ hg outgoing ../push-dest --template='{rev} {phase} {desc|firstline}\n'
107 $ hg outgoing ../push-dest --template='{rev} {phase} {desc|firstline}\n'
108 comparing with ../push-dest
108 comparing with ../push-dest
109 searching for changes
109 searching for changes
110 0 public A
110 0 public A
111 1 public B
111 1 public B
112 2 draft C
112 2 draft C
113 3 draft D
113 3 draft D
114 6 draft B'
114 6 draft B'
115 $ hg outgoing -r 'branch(default)' ../push-dest --template='{rev} {phase} {desc|firstline}\n'
115 $ hg outgoing -r 'branch(default)' ../push-dest --template='{rev} {phase} {desc|firstline}\n'
116 comparing with ../push-dest
116 comparing with ../push-dest
117 searching for changes
117 searching for changes
118 0 public A
118 0 public A
119 1 public B
119 1 public B
120 2 draft C
120 2 draft C
121 3 draft D
121 3 draft D
122 6 draft B'
122 6 draft B'
123
123
124 $ hg push ../push-dest -f # force because we push multiple heads
124 $ hg push ../push-dest -f # force because we push multiple heads
125 pushing to ../push-dest
125 pushing to ../push-dest
126 searching for changes
126 searching for changes
127 adding changesets
127 adding changesets
128 adding manifests
128 adding manifests
129 adding file changes
129 adding file changes
130 added 5 changesets with 5 changes to 5 files (+1 heads)
130 added 5 changesets with 5 changes to 5 files (+1 heads)
131 $ hglog
131 $ hglog
132 7 2 merge B' and E
132 7 2 merge B' and E
133 6 1 B'
133 6 1 B'
134 5 2 H
134 5 2 H
135 4 2 E
135 4 2 E
136 3 1 D
136 3 1 D
137 2 1 C
137 2 1 C
138 1 0 B
138 1 0 B
139 0 0 A
139 0 0 A
140 $ cd ../push-dest
140 $ cd ../push-dest
141 $ hglog
141 $ hglog
142 4 1 B'
142 4 1 B'
143 3 1 D
143 3 1 D
144 2 1 C
144 2 1 C
145 1 0 B
145 1 0 B
146 0 0 A
146 0 0 A
147
147
148 (Issue3303)
148 (Issue3303)
149 Check that remote secret changeset are ignore when checking creation of remote heads
149 Check that remote secret changeset are ignore when checking creation of remote heads
150
150
151 We add a secret head into the push destination. This secreat head shadow a
151 We add a secret head into the push destination. This secreat head shadow a
152 visible shared between the initial repo and the push destination.
152 visible shared between the initial repo and the push destination.
153
153
154 $ hg up -q 4 # B'
154 $ hg up -q 4 # B'
155 $ mkcommit Z --config phases.new-commit=secret
155 $ mkcommit Z --config phases.new-commit=secret
156 $ hg phase .
156 $ hg phase .
157 5: secret
157 5: secret
158
158
159 # We now try to push a new public changeset that descend from the common public
159 # We now try to push a new public changeset that descend from the common public
160 # head shadowed by the remote secret head.
160 # head shadowed by the remote secret head.
161
161
162 $ cd ../initialrepo
162 $ cd ../initialrepo
163 $ hg up -q 6 #B'
163 $ hg up -q 6 #B'
164 $ mkcommit I
164 $ mkcommit I
165 created new head
165 created new head
166 $ hg push ../push-dest
166 $ hg push ../push-dest
167 pushing to ../push-dest
167 pushing to ../push-dest
168 searching for changes
168 searching for changes
169 adding changesets
169 adding changesets
170 adding manifests
170 adding manifests
171 adding file changes
171 adding file changes
172 added 1 changesets with 1 changes to 1 files (+1 heads)
172 added 1 changesets with 1 changes to 1 files (+1 heads)
173
173
174 :note: The "(+1 heads)" is wrong as we do not had any visible head
174 :note: The "(+1 heads)" is wrong as we do not had any visible head
175
175
176
176
177 Restore condition prior extra insertion.
177 Restore condition prior extra insertion.
178 $ hg -q --config extensions.mq= strip .
178 $ hg -q --config extensions.mq= strip .
179 $ hg up -q 7
179 $ hg up -q 7
180 $ cd ..
180 $ cd ..
181
181
182 Test secret changeset are not pull
182 Test secret changeset are not pull
183
183
184 $ hg init pull-dest
184 $ hg init pull-dest
185 $ cd pull-dest
185 $ cd pull-dest
186 $ hg pull ../initialrepo
186 $ hg pull ../initialrepo
187 pulling from ../initialrepo
187 pulling from ../initialrepo
188 requesting all changes
188 requesting all changes
189 adding changesets
189 adding changesets
190 adding manifests
190 adding manifests
191 adding file changes
191 adding file changes
192 added 5 changesets with 5 changes to 5 files (+1 heads)
192 added 5 changesets with 5 changes to 5 files (+1 heads)
193 (run 'hg heads' to see heads, 'hg merge' to merge)
193 (run 'hg heads' to see heads, 'hg merge' to merge)
194 $ hglog
194 $ hglog
195 4 0 B'
195 4 0 B'
196 3 0 D
196 3 0 D
197 2 0 C
197 2 0 C
198 1 0 B
198 1 0 B
199 0 0 A
199 0 0 A
200 $ cd ..
200 $ cd ..
201
201
202 But secret can still be bundled explicitly
202 But secret can still be bundled explicitly
203
203
204 $ cd initialrepo
204 $ cd initialrepo
205 $ hg bundle --base '4^' -r 'children(4)' ../secret-bundle.hg
205 $ hg bundle --base '4^' -r 'children(4)' ../secret-bundle.hg
206 4 changesets found
206 4 changesets found
207 $ cd ..
207 $ cd ..
208
208
209 Test secret changeset are not cloned
209 Test secret changeset are not cloned
210 (during local clone)
210 (during local clone)
211
211
212 $ hg clone -qU initialrepo clone-dest
212 $ hg clone -qU initialrepo clone-dest
213 $ hglog -R clone-dest
213 $ hglog -R clone-dest
214 4 0 B'
214 4 0 B'
215 3 0 D
215 3 0 D
216 2 0 C
216 2 0 C
217 1 0 B
217 1 0 B
218 0 0 A
218 0 0 A
219
219
220 Test revset
220 Test revset
221
221
222 $ cd initialrepo
222 $ cd initialrepo
223 $ hglog -r 'public()'
223 $ hglog -r 'public()'
224 0 0 A
224 0 0 A
225 1 0 B
225 1 0 B
226 $ hglog -r 'draft()'
226 $ hglog -r 'draft()'
227 2 1 C
227 2 1 C
228 3 1 D
228 3 1 D
229 6 1 B'
229 6 1 B'
230 $ hglog -r 'secret()'
230 $ hglog -r 'secret()'
231 4 2 E
231 4 2 E
232 5 2 H
232 5 2 H
233 7 2 merge B' and E
233 7 2 merge B' and E
234
234
235 test that phase are displayed in log at debug level
235 test that phase are displayed in log at debug level
236
236
237 $ hg log --debug
237 $ hg log --debug
238 changeset: 7:17a481b3bccb796c0521ae97903d81c52bfee4af
238 changeset: 7:17a481b3bccb796c0521ae97903d81c52bfee4af
239 tag: tip
239 tag: tip
240 phase: secret
240 phase: secret
241 parent: 6:cf9fe039dfd67e829edf6522a45de057b5c86519
241 parent: 6:cf9fe039dfd67e829edf6522a45de057b5c86519
242 parent: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
242 parent: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
243 manifest: 7:5e724ffacba267b2ab726c91fc8b650710deaaa8
243 manifest: 7:5e724ffacba267b2ab726c91fc8b650710deaaa8
244 user: test
244 user: test
245 date: Thu Jan 01 00:00:00 1970 +0000
245 date: Thu Jan 01 00:00:00 1970 +0000
246 files+: C D E
246 files+: C D E
247 extra: branch=default
247 extra: branch=default
248 description:
248 description:
249 merge B' and E
249 merge B' and E
250
250
251
251
252 changeset: 6:cf9fe039dfd67e829edf6522a45de057b5c86519
252 changeset: 6:cf9fe039dfd67e829edf6522a45de057b5c86519
253 phase: draft
253 phase: draft
254 parent: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
254 parent: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
255 parent: -1:0000000000000000000000000000000000000000
255 parent: -1:0000000000000000000000000000000000000000
256 manifest: 6:ab8bfef2392903058bf4ebb9e7746e8d7026b27a
256 manifest: 6:ab8bfef2392903058bf4ebb9e7746e8d7026b27a
257 user: test
257 user: test
258 date: Thu Jan 01 00:00:00 1970 +0000
258 date: Thu Jan 01 00:00:00 1970 +0000
259 files+: B'
259 files+: B'
260 extra: branch=default
260 extra: branch=default
261 description:
261 description:
262 B'
262 B'
263
263
264
264
265 changeset: 5:a030c6be5127abc010fcbff1851536552e6951a8
265 changeset: 5:a030c6be5127abc010fcbff1851536552e6951a8
266 phase: secret
266 phase: secret
267 parent: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
267 parent: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
268 parent: -1:0000000000000000000000000000000000000000
268 parent: -1:0000000000000000000000000000000000000000
269 manifest: 5:5c710aa854874fe3d5fa7192e77bdb314cc08b5a
269 manifest: 5:5c710aa854874fe3d5fa7192e77bdb314cc08b5a
270 user: test
270 user: test
271 date: Thu Jan 01 00:00:00 1970 +0000
271 date: Thu Jan 01 00:00:00 1970 +0000
272 files+: H
272 files+: H
273 extra: branch=default
273 extra: branch=default
274 description:
274 description:
275 H
275 H
276
276
277
277
278 changeset: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
278 changeset: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
279 phase: secret
279 phase: secret
280 parent: 3:b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e
280 parent: 3:b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e
281 parent: -1:0000000000000000000000000000000000000000
281 parent: -1:0000000000000000000000000000000000000000
282 manifest: 4:7173fd1c27119750b959e3a0f47ed78abe75d6dc
282 manifest: 4:7173fd1c27119750b959e3a0f47ed78abe75d6dc
283 user: test
283 user: test
284 date: Thu Jan 01 00:00:00 1970 +0000
284 date: Thu Jan 01 00:00:00 1970 +0000
285 files+: E
285 files+: E
286 extra: branch=default
286 extra: branch=default
287 description:
287 description:
288 E
288 E
289
289
290
290
291 changeset: 3:b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e
291 changeset: 3:b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e
292 phase: draft
292 phase: draft
293 parent: 2:f838bfaca5c7226600ebcfd84f3c3c13a28d3757
293 parent: 2:f838bfaca5c7226600ebcfd84f3c3c13a28d3757
294 parent: -1:0000000000000000000000000000000000000000
294 parent: -1:0000000000000000000000000000000000000000
295 manifest: 3:6e1f4c47ecb533ffd0c8e52cdc88afb6cd39e20c
295 manifest: 3:6e1f4c47ecb533ffd0c8e52cdc88afb6cd39e20c
296 user: test
296 user: test
297 date: Thu Jan 01 00:00:00 1970 +0000
297 date: Thu Jan 01 00:00:00 1970 +0000
298 files+: D
298 files+: D
299 extra: branch=default
299 extra: branch=default
300 description:
300 description:
301 D
301 D
302
302
303
303
304 changeset: 2:f838bfaca5c7226600ebcfd84f3c3c13a28d3757
304 changeset: 2:f838bfaca5c7226600ebcfd84f3c3c13a28d3757
305 phase: draft
305 phase: draft
306 parent: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
306 parent: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
307 parent: -1:0000000000000000000000000000000000000000
307 parent: -1:0000000000000000000000000000000000000000
308 manifest: 2:66a5a01817fdf5239c273802b5b7618d051c89e4
308 manifest: 2:66a5a01817fdf5239c273802b5b7618d051c89e4
309 user: test
309 user: test
310 date: Thu Jan 01 00:00:00 1970 +0000
310 date: Thu Jan 01 00:00:00 1970 +0000
311 files+: C
311 files+: C
312 extra: branch=default
312 extra: branch=default
313 description:
313 description:
314 C
314 C
315
315
316
316
317 changeset: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
317 changeset: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
318 parent: 0:4a2df7238c3b48766b5e22fafbb8a2f506ec8256
318 parent: 0:4a2df7238c3b48766b5e22fafbb8a2f506ec8256
319 parent: -1:0000000000000000000000000000000000000000
319 parent: -1:0000000000000000000000000000000000000000
320 manifest: 1:cb5cbbc1bfbf24cc34b9e8c16914e9caa2d2a7fd
320 manifest: 1:cb5cbbc1bfbf24cc34b9e8c16914e9caa2d2a7fd
321 user: test
321 user: test
322 date: Thu Jan 01 00:00:00 1970 +0000
322 date: Thu Jan 01 00:00:00 1970 +0000
323 files+: B
323 files+: B
324 extra: branch=default
324 extra: branch=default
325 description:
325 description:
326 B
326 B
327
327
328
328
329 changeset: 0:4a2df7238c3b48766b5e22fafbb8a2f506ec8256
329 changeset: 0:4a2df7238c3b48766b5e22fafbb8a2f506ec8256
330 parent: -1:0000000000000000000000000000000000000000
330 parent: -1:0000000000000000000000000000000000000000
331 parent: -1:0000000000000000000000000000000000000000
331 parent: -1:0000000000000000000000000000000000000000
332 manifest: 0:007d8c9d88841325f5c6b06371b35b4e8a2b1a83
332 manifest: 0:007d8c9d88841325f5c6b06371b35b4e8a2b1a83
333 user: test
333 user: test
334 date: Thu Jan 01 00:00:00 1970 +0000
334 date: Thu Jan 01 00:00:00 1970 +0000
335 files+: A
335 files+: A
336 extra: branch=default
336 extra: branch=default
337 description:
337 description:
338 A
338 A
339
339
340
340
341
341
342 (Issue3707)
343 test invalid phase name
344
345 $ mkcommit I --config phases.new-commit='babar'
346 transaction abort!
347 rollback completed
348 abort: phases.new-commit: not a valid phase name ('babar')
349 [255]
350
342 Test phase command
351 Test phase command
343 ===================
352 ===================
344
353
345 initial picture
354 initial picture
346
355
347 $ cat >> $HGRCPATH << EOF
356 $ cat >> $HGRCPATH << EOF
348 > [extensions]
357 > [extensions]
349 > hgext.graphlog=
358 > hgext.graphlog=
350 > EOF
359 > EOF
351 $ hg log -G --template "{rev} {phase} {desc}\n"
360 $ hg log -G --template "{rev} {phase} {desc}\n"
352 @ 7 secret merge B' and E
361 @ 7 secret merge B' and E
353 |\
362 |\
354 | o 6 draft B'
363 | o 6 draft B'
355 | |
364 | |
356 +---o 5 secret H
365 +---o 5 secret H
357 | |
366 | |
358 o | 4 secret E
367 o | 4 secret E
359 | |
368 | |
360 o | 3 draft D
369 o | 3 draft D
361 | |
370 | |
362 o | 2 draft C
371 o | 2 draft C
363 |/
372 |/
364 o 1 public B
373 o 1 public B
365 |
374 |
366 o 0 public A
375 o 0 public A
367
376
368
377
369 display changesets phase
378 display changesets phase
370
379
371 (mixing -r and plain rev specification)
380 (mixing -r and plain rev specification)
372
381
373 $ hg phase 1::4 -r 7
382 $ hg phase 1::4 -r 7
374 1: public
383 1: public
375 2: draft
384 2: draft
376 3: draft
385 3: draft
377 4: secret
386 4: secret
378 7: secret
387 7: secret
379
388
380
389
381 move changeset forward
390 move changeset forward
382
391
383 (with -r option)
392 (with -r option)
384
393
385 $ hg phase --public -r 2
394 $ hg phase --public -r 2
386 $ hg log -G --template "{rev} {phase} {desc}\n"
395 $ hg log -G --template "{rev} {phase} {desc}\n"
387 @ 7 secret merge B' and E
396 @ 7 secret merge B' and E
388 |\
397 |\
389 | o 6 draft B'
398 | o 6 draft B'
390 | |
399 | |
391 +---o 5 secret H
400 +---o 5 secret H
392 | |
401 | |
393 o | 4 secret E
402 o | 4 secret E
394 | |
403 | |
395 o | 3 draft D
404 o | 3 draft D
396 | |
405 | |
397 o | 2 public C
406 o | 2 public C
398 |/
407 |/
399 o 1 public B
408 o 1 public B
400 |
409 |
401 o 0 public A
410 o 0 public A
402
411
403
412
404 move changeset backward
413 move changeset backward
405
414
406 (without -r option)
415 (without -r option)
407
416
408 $ hg phase --draft --force 2
417 $ hg phase --draft --force 2
409 $ hg log -G --template "{rev} {phase} {desc}\n"
418 $ hg log -G --template "{rev} {phase} {desc}\n"
410 @ 7 secret merge B' and E
419 @ 7 secret merge B' and E
411 |\
420 |\
412 | o 6 draft B'
421 | o 6 draft B'
413 | |
422 | |
414 +---o 5 secret H
423 +---o 5 secret H
415 | |
424 | |
416 o | 4 secret E
425 o | 4 secret E
417 | |
426 | |
418 o | 3 draft D
427 o | 3 draft D
419 | |
428 | |
420 o | 2 draft C
429 o | 2 draft C
421 |/
430 |/
422 o 1 public B
431 o 1 public B
423 |
432 |
424 o 0 public A
433 o 0 public A
425
434
426
435
427 move changeset forward and backward
436 move changeset forward and backward
428
437
429 $ hg phase --draft --force 1::4
438 $ hg phase --draft --force 1::4
430 $ hg log -G --template "{rev} {phase} {desc}\n"
439 $ hg log -G --template "{rev} {phase} {desc}\n"
431 @ 7 secret merge B' and E
440 @ 7 secret merge B' and E
432 |\
441 |\
433 | o 6 draft B'
442 | o 6 draft B'
434 | |
443 | |
435 +---o 5 secret H
444 +---o 5 secret H
436 | |
445 | |
437 o | 4 draft E
446 o | 4 draft E
438 | |
447 | |
439 o | 3 draft D
448 o | 3 draft D
440 | |
449 | |
441 o | 2 draft C
450 o | 2 draft C
442 |/
451 |/
443 o 1 draft B
452 o 1 draft B
444 |
453 |
445 o 0 public A
454 o 0 public A
446
455
447 test partial failure
456 test partial failure
448
457
449 $ hg phase --public 7
458 $ hg phase --public 7
450 $ hg phase --draft '5 or 7'
459 $ hg phase --draft '5 or 7'
451 cannot move 1 changesets to a more permissive phase, use --force
460 cannot move 1 changesets to a more permissive phase, use --force
452 phase changed for 1 changesets
461 phase changed for 1 changesets
453 [1]
462 [1]
454 $ hg log -G --template "{rev} {phase} {desc}\n"
463 $ hg log -G --template "{rev} {phase} {desc}\n"
455 @ 7 public merge B' and E
464 @ 7 public merge B' and E
456 |\
465 |\
457 | o 6 public B'
466 | o 6 public B'
458 | |
467 | |
459 +---o 5 draft H
468 +---o 5 draft H
460 | |
469 | |
461 o | 4 public E
470 o | 4 public E
462 | |
471 | |
463 o | 3 public D
472 o | 3 public D
464 | |
473 | |
465 o | 2 public C
474 o | 2 public C
466 |/
475 |/
467 o 1 public B
476 o 1 public B
468 |
477 |
469 o 0 public A
478 o 0 public A
470
479
471
480
472 test complete failure
481 test complete failure
473
482
474 $ hg phase --draft 7
483 $ hg phase --draft 7
475 cannot move 1 changesets to a more permissive phase, use --force
484 cannot move 1 changesets to a more permissive phase, use --force
476 no phases changed
485 no phases changed
477 [1]
486 [1]
478
487
479 $ cd ..
488 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now