##// END OF EJS Templates
l10n: use %d instead of %s for numbers
timeless@mozdev.org -
r26778:a95c975f default
parent child Browse files
Show More
@@ -1,187 +1,187
1 # Mercurial extension to provide 'hg relink' command
1 # Mercurial extension to provide 'hg relink' command
2 #
2 #
3 # Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
3 # Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """recreates hardlinks between repository clones"""
8 """recreates hardlinks between repository clones"""
9
9
10 from mercurial import cmdutil, hg, util, error
10 from mercurial import cmdutil, hg, util, error
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 import os, stat
12 import os, stat
13
13
14 cmdtable = {}
14 cmdtable = {}
15 command = cmdutil.command(cmdtable)
15 command = cmdutil.command(cmdtable)
16 # Note for extension authors: ONLY specify testedwith = 'internal' for
16 # Note for extension authors: ONLY specify testedwith = 'internal' for
17 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
17 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
18 # be specifying the version(s) of Mercurial they are tested with, or
18 # be specifying the version(s) of Mercurial they are tested with, or
19 # leave the attribute unspecified.
19 # leave the attribute unspecified.
20 testedwith = 'internal'
20 testedwith = 'internal'
21
21
22 @command('relink', [], _('[ORIGIN]'))
22 @command('relink', [], _('[ORIGIN]'))
23 def relink(ui, repo, origin=None, **opts):
23 def relink(ui, repo, origin=None, **opts):
24 """recreate hardlinks between two repositories
24 """recreate hardlinks between two repositories
25
25
26 When repositories are cloned locally, their data files will be
26 When repositories are cloned locally, their data files will be
27 hardlinked so that they only use the space of a single repository.
27 hardlinked so that they only use the space of a single repository.
28
28
29 Unfortunately, subsequent pulls into either repository will break
29 Unfortunately, subsequent pulls into either repository will break
30 hardlinks for any files touched by the new changesets, even if
30 hardlinks for any files touched by the new changesets, even if
31 both repositories end up pulling the same changes.
31 both repositories end up pulling the same changes.
32
32
33 Similarly, passing --rev to "hg clone" will fail to use any
33 Similarly, passing --rev to "hg clone" will fail to use any
34 hardlinks, falling back to a complete copy of the source
34 hardlinks, falling back to a complete copy of the source
35 repository.
35 repository.
36
36
37 This command lets you recreate those hardlinks and reclaim that
37 This command lets you recreate those hardlinks and reclaim that
38 wasted space.
38 wasted space.
39
39
40 This repository will be relinked to share space with ORIGIN, which
40 This repository will be relinked to share space with ORIGIN, which
41 must be on the same local disk. If ORIGIN is omitted, looks for
41 must be on the same local disk. If ORIGIN is omitted, looks for
42 "default-relink", then "default", in [paths].
42 "default-relink", then "default", in [paths].
43
43
44 Do not attempt any read operations on this repository while the
44 Do not attempt any read operations on this repository while the
45 command is running. (Both repositories will be locked against
45 command is running. (Both repositories will be locked against
46 writes.)
46 writes.)
47 """
47 """
48 if (not util.safehasattr(util, 'samefile') or
48 if (not util.safehasattr(util, 'samefile') or
49 not util.safehasattr(util, 'samedevice')):
49 not util.safehasattr(util, 'samedevice')):
50 raise error.Abort(_('hardlinks are not supported on this system'))
50 raise error.Abort(_('hardlinks are not supported on this system'))
51 src = hg.repository(repo.baseui, ui.expandpath(origin or 'default-relink',
51 src = hg.repository(repo.baseui, ui.expandpath(origin or 'default-relink',
52 origin or 'default'))
52 origin or 'default'))
53 ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
53 ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
54 if repo.root == src.root:
54 if repo.root == src.root:
55 ui.status(_('there is nothing to relink\n'))
55 ui.status(_('there is nothing to relink\n'))
56 return
56 return
57
57
58 if not util.samedevice(src.store.path, repo.store.path):
58 if not util.samedevice(src.store.path, repo.store.path):
59 # No point in continuing
59 # No point in continuing
60 raise error.Abort(_('source and destination are on different devices'))
60 raise error.Abort(_('source and destination are on different devices'))
61
61
62 locallock = repo.lock()
62 locallock = repo.lock()
63 try:
63 try:
64 remotelock = src.lock()
64 remotelock = src.lock()
65 try:
65 try:
66 candidates = sorted(collect(src, ui))
66 candidates = sorted(collect(src, ui))
67 targets = prune(candidates, src.store.path, repo.store.path, ui)
67 targets = prune(candidates, src.store.path, repo.store.path, ui)
68 do_relink(src.store.path, repo.store.path, targets, ui)
68 do_relink(src.store.path, repo.store.path, targets, ui)
69 finally:
69 finally:
70 remotelock.release()
70 remotelock.release()
71 finally:
71 finally:
72 locallock.release()
72 locallock.release()
73
73
74 def collect(src, ui):
74 def collect(src, ui):
75 seplen = len(os.path.sep)
75 seplen = len(os.path.sep)
76 candidates = []
76 candidates = []
77 live = len(src['tip'].manifest())
77 live = len(src['tip'].manifest())
78 # Your average repository has some files which were deleted before
78 # Your average repository has some files which were deleted before
79 # the tip revision. We account for that by assuming that there are
79 # the tip revision. We account for that by assuming that there are
80 # 3 tracked files for every 2 live files as of the tip version of
80 # 3 tracked files for every 2 live files as of the tip version of
81 # the repository.
81 # the repository.
82 #
82 #
83 # mozilla-central as of 2010-06-10 had a ratio of just over 7:5.
83 # mozilla-central as of 2010-06-10 had a ratio of just over 7:5.
84 total = live * 3 // 2
84 total = live * 3 // 2
85 src = src.store.path
85 src = src.store.path
86 pos = 0
86 pos = 0
87 ui.status(_("tip has %d files, estimated total number of files: %s\n")
87 ui.status(_("tip has %d files, estimated total number of files: %d\n")
88 % (live, total))
88 % (live, total))
89 for dirpath, dirnames, filenames in os.walk(src):
89 for dirpath, dirnames, filenames in os.walk(src):
90 dirnames.sort()
90 dirnames.sort()
91 relpath = dirpath[len(src) + seplen:]
91 relpath = dirpath[len(src) + seplen:]
92 for filename in sorted(filenames):
92 for filename in sorted(filenames):
93 if filename[-2:] not in ('.d', '.i'):
93 if filename[-2:] not in ('.d', '.i'):
94 continue
94 continue
95 st = os.stat(os.path.join(dirpath, filename))
95 st = os.stat(os.path.join(dirpath, filename))
96 if not stat.S_ISREG(st.st_mode):
96 if not stat.S_ISREG(st.st_mode):
97 continue
97 continue
98 pos += 1
98 pos += 1
99 candidates.append((os.path.join(relpath, filename), st))
99 candidates.append((os.path.join(relpath, filename), st))
100 ui.progress(_('collecting'), pos, filename, _('files'), total)
100 ui.progress(_('collecting'), pos, filename, _('files'), total)
101
101
102 ui.progress(_('collecting'), None)
102 ui.progress(_('collecting'), None)
103 ui.status(_('collected %d candidate storage files\n') % len(candidates))
103 ui.status(_('collected %d candidate storage files\n') % len(candidates))
104 return candidates
104 return candidates
105
105
106 def prune(candidates, src, dst, ui):
106 def prune(candidates, src, dst, ui):
107 def linkfilter(src, dst, st):
107 def linkfilter(src, dst, st):
108 try:
108 try:
109 ts = os.stat(dst)
109 ts = os.stat(dst)
110 except OSError:
110 except OSError:
111 # Destination doesn't have this file?
111 # Destination doesn't have this file?
112 return False
112 return False
113 if util.samefile(src, dst):
113 if util.samefile(src, dst):
114 return False
114 return False
115 if not util.samedevice(src, dst):
115 if not util.samedevice(src, dst):
116 # No point in continuing
116 # No point in continuing
117 raise error.Abort(
117 raise error.Abort(
118 _('source and destination are on different devices'))
118 _('source and destination are on different devices'))
119 if st.st_size != ts.st_size:
119 if st.st_size != ts.st_size:
120 return False
120 return False
121 return st
121 return st
122
122
123 targets = []
123 targets = []
124 total = len(candidates)
124 total = len(candidates)
125 pos = 0
125 pos = 0
126 for fn, st in candidates:
126 for fn, st in candidates:
127 pos += 1
127 pos += 1
128 srcpath = os.path.join(src, fn)
128 srcpath = os.path.join(src, fn)
129 tgt = os.path.join(dst, fn)
129 tgt = os.path.join(dst, fn)
130 ts = linkfilter(srcpath, tgt, st)
130 ts = linkfilter(srcpath, tgt, st)
131 if not ts:
131 if not ts:
132 ui.debug('not linkable: %s\n' % fn)
132 ui.debug('not linkable: %s\n' % fn)
133 continue
133 continue
134 targets.append((fn, ts.st_size))
134 targets.append((fn, ts.st_size))
135 ui.progress(_('pruning'), pos, fn, _('files'), total)
135 ui.progress(_('pruning'), pos, fn, _('files'), total)
136
136
137 ui.progress(_('pruning'), None)
137 ui.progress(_('pruning'), None)
138 ui.status(_('pruned down to %d probably relinkable files\n') % len(targets))
138 ui.status(_('pruned down to %d probably relinkable files\n') % len(targets))
139 return targets
139 return targets
140
140
141 def do_relink(src, dst, files, ui):
141 def do_relink(src, dst, files, ui):
142 def relinkfile(src, dst):
142 def relinkfile(src, dst):
143 bak = dst + '.bak'
143 bak = dst + '.bak'
144 os.rename(dst, bak)
144 os.rename(dst, bak)
145 try:
145 try:
146 util.oslink(src, dst)
146 util.oslink(src, dst)
147 except OSError:
147 except OSError:
148 os.rename(bak, dst)
148 os.rename(bak, dst)
149 raise
149 raise
150 os.remove(bak)
150 os.remove(bak)
151
151
152 CHUNKLEN = 65536
152 CHUNKLEN = 65536
153 relinked = 0
153 relinked = 0
154 savedbytes = 0
154 savedbytes = 0
155
155
156 pos = 0
156 pos = 0
157 total = len(files)
157 total = len(files)
158 for f, sz in files:
158 for f, sz in files:
159 pos += 1
159 pos += 1
160 source = os.path.join(src, f)
160 source = os.path.join(src, f)
161 tgt = os.path.join(dst, f)
161 tgt = os.path.join(dst, f)
162 # Binary mode, so that read() works correctly, especially on Windows
162 # Binary mode, so that read() works correctly, especially on Windows
163 sfp = file(source, 'rb')
163 sfp = file(source, 'rb')
164 dfp = file(tgt, 'rb')
164 dfp = file(tgt, 'rb')
165 sin = sfp.read(CHUNKLEN)
165 sin = sfp.read(CHUNKLEN)
166 while sin:
166 while sin:
167 din = dfp.read(CHUNKLEN)
167 din = dfp.read(CHUNKLEN)
168 if sin != din:
168 if sin != din:
169 break
169 break
170 sin = sfp.read(CHUNKLEN)
170 sin = sfp.read(CHUNKLEN)
171 sfp.close()
171 sfp.close()
172 dfp.close()
172 dfp.close()
173 if sin:
173 if sin:
174 ui.debug('not linkable: %s\n' % f)
174 ui.debug('not linkable: %s\n' % f)
175 continue
175 continue
176 try:
176 try:
177 relinkfile(source, tgt)
177 relinkfile(source, tgt)
178 ui.progress(_('relinking'), pos, f, _('files'), total)
178 ui.progress(_('relinking'), pos, f, _('files'), total)
179 relinked += 1
179 relinked += 1
180 savedbytes += sz
180 savedbytes += sz
181 except OSError as inst:
181 except OSError as inst:
182 ui.warn('%s: %s\n' % (tgt, str(inst)))
182 ui.warn('%s: %s\n' % (tgt, str(inst)))
183
183
184 ui.progress(_('relinking'), None)
184 ui.progress(_('relinking'), None)
185
185
186 ui.status(_('relinked %d files (%s reclaimed)\n') %
186 ui.status(_('relinked %d files (%s reclaimed)\n') %
187 (relinked, util.bytecount(savedbytes)))
187 (relinked, util.bytecount(savedbytes)))
@@ -1,337 +1,337
1 # mail.py - mail sending bits for mercurial
1 # mail.py - mail sending bits for mercurial
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import email
10 import email
11 import os
11 import os
12 import quopri
12 import quopri
13 import smtplib
13 import smtplib
14 import socket
14 import socket
15 import sys
15 import sys
16 import time
16 import time
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 encoding,
20 encoding,
21 error,
21 error,
22 sslutil,
22 sslutil,
23 util,
23 util,
24 )
24 )
25
25
26 _oldheaderinit = email.Header.Header.__init__
26 _oldheaderinit = email.Header.Header.__init__
27 def _unifiedheaderinit(self, *args, **kw):
27 def _unifiedheaderinit(self, *args, **kw):
28 """
28 """
29 Python 2.7 introduces a backwards incompatible change
29 Python 2.7 introduces a backwards incompatible change
30 (Python issue1974, r70772) in email.Generator.Generator code:
30 (Python issue1974, r70772) in email.Generator.Generator code:
31 pre-2.7 code passed "continuation_ws='\t'" to the Header
31 pre-2.7 code passed "continuation_ws='\t'" to the Header
32 constructor, and 2.7 removed this parameter.
32 constructor, and 2.7 removed this parameter.
33
33
34 Default argument is continuation_ws=' ', which means that the
34 Default argument is continuation_ws=' ', which means that the
35 behavior is different in <2.7 and 2.7
35 behavior is different in <2.7 and 2.7
36
36
37 We consider the 2.7 behavior to be preferable, but need
37 We consider the 2.7 behavior to be preferable, but need
38 to have an unified behavior for versions 2.4 to 2.7
38 to have an unified behavior for versions 2.4 to 2.7
39 """
39 """
40 # override continuation_ws
40 # override continuation_ws
41 kw['continuation_ws'] = ' '
41 kw['continuation_ws'] = ' '
42 _oldheaderinit(self, *args, **kw)
42 _oldheaderinit(self, *args, **kw)
43
43
44 email.Header.Header.__dict__['__init__'] = _unifiedheaderinit
44 email.Header.Header.__dict__['__init__'] = _unifiedheaderinit
45
45
46 class STARTTLS(smtplib.SMTP):
46 class STARTTLS(smtplib.SMTP):
47 '''Derived class to verify the peer certificate for STARTTLS.
47 '''Derived class to verify the peer certificate for STARTTLS.
48
48
49 This class allows to pass any keyword arguments to SSL socket creation.
49 This class allows to pass any keyword arguments to SSL socket creation.
50 '''
50 '''
51 def __init__(self, sslkwargs, **kwargs):
51 def __init__(self, sslkwargs, **kwargs):
52 smtplib.SMTP.__init__(self, **kwargs)
52 smtplib.SMTP.__init__(self, **kwargs)
53 self._sslkwargs = sslkwargs
53 self._sslkwargs = sslkwargs
54
54
55 def starttls(self, keyfile=None, certfile=None):
55 def starttls(self, keyfile=None, certfile=None):
56 if not self.has_extn("starttls"):
56 if not self.has_extn("starttls"):
57 msg = "STARTTLS extension not supported by server"
57 msg = "STARTTLS extension not supported by server"
58 raise smtplib.SMTPException(msg)
58 raise smtplib.SMTPException(msg)
59 (resp, reply) = self.docmd("STARTTLS")
59 (resp, reply) = self.docmd("STARTTLS")
60 if resp == 220:
60 if resp == 220:
61 self.sock = sslutil.wrapsocket(self.sock, keyfile, certfile,
61 self.sock = sslutil.wrapsocket(self.sock, keyfile, certfile,
62 **self._sslkwargs)
62 **self._sslkwargs)
63 if not util.safehasattr(self.sock, "read"):
63 if not util.safehasattr(self.sock, "read"):
64 # using httplib.FakeSocket with Python 2.5.x or earlier
64 # using httplib.FakeSocket with Python 2.5.x or earlier
65 self.sock.read = self.sock.recv
65 self.sock.read = self.sock.recv
66 self.file = smtplib.SSLFakeFile(self.sock)
66 self.file = smtplib.SSLFakeFile(self.sock)
67 self.helo_resp = None
67 self.helo_resp = None
68 self.ehlo_resp = None
68 self.ehlo_resp = None
69 self.esmtp_features = {}
69 self.esmtp_features = {}
70 self.does_esmtp = 0
70 self.does_esmtp = 0
71 return (resp, reply)
71 return (resp, reply)
72
72
73 class SMTPS(smtplib.SMTP):
73 class SMTPS(smtplib.SMTP):
74 '''Derived class to verify the peer certificate for SMTPS.
74 '''Derived class to verify the peer certificate for SMTPS.
75
75
76 This class allows to pass any keyword arguments to SSL socket creation.
76 This class allows to pass any keyword arguments to SSL socket creation.
77 '''
77 '''
78 def __init__(self, sslkwargs, keyfile=None, certfile=None, **kwargs):
78 def __init__(self, sslkwargs, keyfile=None, certfile=None, **kwargs):
79 self.keyfile = keyfile
79 self.keyfile = keyfile
80 self.certfile = certfile
80 self.certfile = certfile
81 smtplib.SMTP.__init__(self, **kwargs)
81 smtplib.SMTP.__init__(self, **kwargs)
82 self.default_port = smtplib.SMTP_SSL_PORT
82 self.default_port = smtplib.SMTP_SSL_PORT
83 self._sslkwargs = sslkwargs
83 self._sslkwargs = sslkwargs
84
84
85 def _get_socket(self, host, port, timeout):
85 def _get_socket(self, host, port, timeout):
86 if self.debuglevel > 0:
86 if self.debuglevel > 0:
87 print >> sys.stderr, 'connect:', (host, port)
87 print >> sys.stderr, 'connect:', (host, port)
88 new_socket = socket.create_connection((host, port), timeout)
88 new_socket = socket.create_connection((host, port), timeout)
89 new_socket = sslutil.wrapsocket(new_socket,
89 new_socket = sslutil.wrapsocket(new_socket,
90 self.keyfile, self.certfile,
90 self.keyfile, self.certfile,
91 **self._sslkwargs)
91 **self._sslkwargs)
92 self.file = smtplib.SSLFakeFile(new_socket)
92 self.file = smtplib.SSLFakeFile(new_socket)
93 return new_socket
93 return new_socket
94
94
95 def _smtp(ui):
95 def _smtp(ui):
96 '''build an smtp connection and return a function to send mail'''
96 '''build an smtp connection and return a function to send mail'''
97 local_hostname = ui.config('smtp', 'local_hostname')
97 local_hostname = ui.config('smtp', 'local_hostname')
98 tls = ui.config('smtp', 'tls', 'none')
98 tls = ui.config('smtp', 'tls', 'none')
99 # backward compatible: when tls = true, we use starttls.
99 # backward compatible: when tls = true, we use starttls.
100 starttls = tls == 'starttls' or util.parsebool(tls)
100 starttls = tls == 'starttls' or util.parsebool(tls)
101 smtps = tls == 'smtps'
101 smtps = tls == 'smtps'
102 if (starttls or smtps) and not util.safehasattr(socket, 'ssl'):
102 if (starttls or smtps) and not util.safehasattr(socket, 'ssl'):
103 raise error.Abort(_("can't use TLS: Python SSL support not installed"))
103 raise error.Abort(_("can't use TLS: Python SSL support not installed"))
104 mailhost = ui.config('smtp', 'host')
104 mailhost = ui.config('smtp', 'host')
105 if not mailhost:
105 if not mailhost:
106 raise error.Abort(_('smtp.host not configured - cannot send mail'))
106 raise error.Abort(_('smtp.host not configured - cannot send mail'))
107 verifycert = ui.config('smtp', 'verifycert', 'strict')
107 verifycert = ui.config('smtp', 'verifycert', 'strict')
108 if verifycert not in ['strict', 'loose']:
108 if verifycert not in ['strict', 'loose']:
109 if util.parsebool(verifycert) is not False:
109 if util.parsebool(verifycert) is not False:
110 raise error.Abort(_('invalid smtp.verifycert configuration: %s')
110 raise error.Abort(_('invalid smtp.verifycert configuration: %s')
111 % (verifycert))
111 % (verifycert))
112 verifycert = False
112 verifycert = False
113 if (starttls or smtps) and verifycert:
113 if (starttls or smtps) and verifycert:
114 sslkwargs = sslutil.sslkwargs(ui, mailhost)
114 sslkwargs = sslutil.sslkwargs(ui, mailhost)
115 else:
115 else:
116 # 'ui' is required by sslutil.wrapsocket() and set by sslkwargs()
116 # 'ui' is required by sslutil.wrapsocket() and set by sslkwargs()
117 sslkwargs = {'ui': ui}
117 sslkwargs = {'ui': ui}
118 if smtps:
118 if smtps:
119 ui.note(_('(using smtps)\n'))
119 ui.note(_('(using smtps)\n'))
120 s = SMTPS(sslkwargs, local_hostname=local_hostname)
120 s = SMTPS(sslkwargs, local_hostname=local_hostname)
121 elif starttls:
121 elif starttls:
122 s = STARTTLS(sslkwargs, local_hostname=local_hostname)
122 s = STARTTLS(sslkwargs, local_hostname=local_hostname)
123 else:
123 else:
124 s = smtplib.SMTP(local_hostname=local_hostname)
124 s = smtplib.SMTP(local_hostname=local_hostname)
125 if smtps:
125 if smtps:
126 defaultport = 465
126 defaultport = 465
127 else:
127 else:
128 defaultport = 25
128 defaultport = 25
129 mailport = util.getport(ui.config('smtp', 'port', defaultport))
129 mailport = util.getport(ui.config('smtp', 'port', defaultport))
130 ui.note(_('sending mail: smtp host %s, port %s\n') %
130 ui.note(_('sending mail: smtp host %s, port %d\n') %
131 (mailhost, mailport))
131 (mailhost, mailport))
132 s.connect(host=mailhost, port=mailport)
132 s.connect(host=mailhost, port=mailport)
133 if starttls:
133 if starttls:
134 ui.note(_('(using starttls)\n'))
134 ui.note(_('(using starttls)\n'))
135 s.ehlo()
135 s.ehlo()
136 s.starttls()
136 s.starttls()
137 s.ehlo()
137 s.ehlo()
138 if (starttls or smtps) and verifycert:
138 if (starttls or smtps) and verifycert:
139 ui.note(_('(verifying remote certificate)\n'))
139 ui.note(_('(verifying remote certificate)\n'))
140 sslutil.validator(ui, mailhost)(s.sock, verifycert == 'strict')
140 sslutil.validator(ui, mailhost)(s.sock, verifycert == 'strict')
141 username = ui.config('smtp', 'username')
141 username = ui.config('smtp', 'username')
142 password = ui.config('smtp', 'password')
142 password = ui.config('smtp', 'password')
143 if username and not password:
143 if username and not password:
144 password = ui.getpass()
144 password = ui.getpass()
145 if username and password:
145 if username and password:
146 ui.note(_('(authenticating to mail server as %s)\n') %
146 ui.note(_('(authenticating to mail server as %s)\n') %
147 (username))
147 (username))
148 try:
148 try:
149 s.login(username, password)
149 s.login(username, password)
150 except smtplib.SMTPException as inst:
150 except smtplib.SMTPException as inst:
151 raise error.Abort(inst)
151 raise error.Abort(inst)
152
152
153 def send(sender, recipients, msg):
153 def send(sender, recipients, msg):
154 try:
154 try:
155 return s.sendmail(sender, recipients, msg)
155 return s.sendmail(sender, recipients, msg)
156 except smtplib.SMTPRecipientsRefused as inst:
156 except smtplib.SMTPRecipientsRefused as inst:
157 recipients = [r[1] for r in inst.recipients.values()]
157 recipients = [r[1] for r in inst.recipients.values()]
158 raise error.Abort('\n' + '\n'.join(recipients))
158 raise error.Abort('\n' + '\n'.join(recipients))
159 except smtplib.SMTPException as inst:
159 except smtplib.SMTPException as inst:
160 raise error.Abort(inst)
160 raise error.Abort(inst)
161
161
162 return send
162 return send
163
163
164 def _sendmail(ui, sender, recipients, msg):
164 def _sendmail(ui, sender, recipients, msg):
165 '''send mail using sendmail.'''
165 '''send mail using sendmail.'''
166 program = ui.config('email', 'method', 'smtp')
166 program = ui.config('email', 'method', 'smtp')
167 cmdline = '%s -f %s %s' % (program, util.email(sender),
167 cmdline = '%s -f %s %s' % (program, util.email(sender),
168 ' '.join(map(util.email, recipients)))
168 ' '.join(map(util.email, recipients)))
169 ui.note(_('sending mail: %s\n') % cmdline)
169 ui.note(_('sending mail: %s\n') % cmdline)
170 fp = util.popen(cmdline, 'w')
170 fp = util.popen(cmdline, 'w')
171 fp.write(msg)
171 fp.write(msg)
172 ret = fp.close()
172 ret = fp.close()
173 if ret:
173 if ret:
174 raise error.Abort('%s %s' % (
174 raise error.Abort('%s %s' % (
175 os.path.basename(program.split(None, 1)[0]),
175 os.path.basename(program.split(None, 1)[0]),
176 util.explainexit(ret)[0]))
176 util.explainexit(ret)[0]))
177
177
178 def _mbox(mbox, sender, recipients, msg):
178 def _mbox(mbox, sender, recipients, msg):
179 '''write mails to mbox'''
179 '''write mails to mbox'''
180 fp = open(mbox, 'ab+')
180 fp = open(mbox, 'ab+')
181 # Should be time.asctime(), but Windows prints 2-characters day
181 # Should be time.asctime(), but Windows prints 2-characters day
182 # of month instead of one. Make them print the same thing.
182 # of month instead of one. Make them print the same thing.
183 date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
183 date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
184 fp.write('From %s %s\n' % (sender, date))
184 fp.write('From %s %s\n' % (sender, date))
185 fp.write(msg)
185 fp.write(msg)
186 fp.write('\n\n')
186 fp.write('\n\n')
187 fp.close()
187 fp.close()
188
188
189 def connect(ui, mbox=None):
189 def connect(ui, mbox=None):
190 '''make a mail connection. return a function to send mail.
190 '''make a mail connection. return a function to send mail.
191 call as sendmail(sender, list-of-recipients, msg).'''
191 call as sendmail(sender, list-of-recipients, msg).'''
192 if mbox:
192 if mbox:
193 open(mbox, 'wb').close()
193 open(mbox, 'wb').close()
194 return lambda s, r, m: _mbox(mbox, s, r, m)
194 return lambda s, r, m: _mbox(mbox, s, r, m)
195 if ui.config('email', 'method', 'smtp') == 'smtp':
195 if ui.config('email', 'method', 'smtp') == 'smtp':
196 return _smtp(ui)
196 return _smtp(ui)
197 return lambda s, r, m: _sendmail(ui, s, r, m)
197 return lambda s, r, m: _sendmail(ui, s, r, m)
198
198
199 def sendmail(ui, sender, recipients, msg, mbox=None):
199 def sendmail(ui, sender, recipients, msg, mbox=None):
200 send = connect(ui, mbox=mbox)
200 send = connect(ui, mbox=mbox)
201 return send(sender, recipients, msg)
201 return send(sender, recipients, msg)
202
202
203 def validateconfig(ui):
203 def validateconfig(ui):
204 '''determine if we have enough config data to try sending email.'''
204 '''determine if we have enough config data to try sending email.'''
205 method = ui.config('email', 'method', 'smtp')
205 method = ui.config('email', 'method', 'smtp')
206 if method == 'smtp':
206 if method == 'smtp':
207 if not ui.config('smtp', 'host'):
207 if not ui.config('smtp', 'host'):
208 raise error.Abort(_('smtp specified as email transport, '
208 raise error.Abort(_('smtp specified as email transport, '
209 'but no smtp host configured'))
209 'but no smtp host configured'))
210 else:
210 else:
211 if not util.findexe(method):
211 if not util.findexe(method):
212 raise error.Abort(_('%r specified as email transport, '
212 raise error.Abort(_('%r specified as email transport, '
213 'but not in PATH') % method)
213 'but not in PATH') % method)
214
214
215 def mimetextpatch(s, subtype='plain', display=False):
215 def mimetextpatch(s, subtype='plain', display=False):
216 '''Return MIME message suitable for a patch.
216 '''Return MIME message suitable for a patch.
217 Charset will be detected as utf-8 or (possibly fake) us-ascii.
217 Charset will be detected as utf-8 or (possibly fake) us-ascii.
218 Transfer encodings will be used if necessary.'''
218 Transfer encodings will be used if necessary.'''
219
219
220 cs = 'us-ascii'
220 cs = 'us-ascii'
221 if not display:
221 if not display:
222 try:
222 try:
223 s.decode('us-ascii')
223 s.decode('us-ascii')
224 except UnicodeDecodeError:
224 except UnicodeDecodeError:
225 try:
225 try:
226 s.decode('utf-8')
226 s.decode('utf-8')
227 cs = 'utf-8'
227 cs = 'utf-8'
228 except UnicodeDecodeError:
228 except UnicodeDecodeError:
229 # We'll go with us-ascii as a fallback.
229 # We'll go with us-ascii as a fallback.
230 pass
230 pass
231
231
232 return mimetextqp(s, subtype, cs)
232 return mimetextqp(s, subtype, cs)
233
233
234 def mimetextqp(body, subtype, charset):
234 def mimetextqp(body, subtype, charset):
235 '''Return MIME message.
235 '''Return MIME message.
236 Quoted-printable transfer encoding will be used if necessary.
236 Quoted-printable transfer encoding will be used if necessary.
237 '''
237 '''
238 enc = None
238 enc = None
239 for line in body.splitlines():
239 for line in body.splitlines():
240 if len(line) > 950:
240 if len(line) > 950:
241 body = quopri.encodestring(body)
241 body = quopri.encodestring(body)
242 enc = "quoted-printable"
242 enc = "quoted-printable"
243 break
243 break
244
244
245 msg = email.MIMEText.MIMEText(body, subtype, charset)
245 msg = email.MIMEText.MIMEText(body, subtype, charset)
246 if enc:
246 if enc:
247 del msg['Content-Transfer-Encoding']
247 del msg['Content-Transfer-Encoding']
248 msg['Content-Transfer-Encoding'] = enc
248 msg['Content-Transfer-Encoding'] = enc
249 return msg
249 return msg
250
250
251 def _charsets(ui):
251 def _charsets(ui):
252 '''Obtains charsets to send mail parts not containing patches.'''
252 '''Obtains charsets to send mail parts not containing patches.'''
253 charsets = [cs.lower() for cs in ui.configlist('email', 'charsets')]
253 charsets = [cs.lower() for cs in ui.configlist('email', 'charsets')]
254 fallbacks = [encoding.fallbackencoding.lower(),
254 fallbacks = [encoding.fallbackencoding.lower(),
255 encoding.encoding.lower(), 'utf-8']
255 encoding.encoding.lower(), 'utf-8']
256 for cs in fallbacks: # find unique charsets while keeping order
256 for cs in fallbacks: # find unique charsets while keeping order
257 if cs not in charsets:
257 if cs not in charsets:
258 charsets.append(cs)
258 charsets.append(cs)
259 return [cs for cs in charsets if not cs.endswith('ascii')]
259 return [cs for cs in charsets if not cs.endswith('ascii')]
260
260
261 def _encode(ui, s, charsets):
261 def _encode(ui, s, charsets):
262 '''Returns (converted) string, charset tuple.
262 '''Returns (converted) string, charset tuple.
263 Finds out best charset by cycling through sendcharsets in descending
263 Finds out best charset by cycling through sendcharsets in descending
264 order. Tries both encoding and fallbackencoding for input. Only as
264 order. Tries both encoding and fallbackencoding for input. Only as
265 last resort send as is in fake ascii.
265 last resort send as is in fake ascii.
266 Caveat: Do not use for mail parts containing patches!'''
266 Caveat: Do not use for mail parts containing patches!'''
267 try:
267 try:
268 s.decode('ascii')
268 s.decode('ascii')
269 except UnicodeDecodeError:
269 except UnicodeDecodeError:
270 sendcharsets = charsets or _charsets(ui)
270 sendcharsets = charsets or _charsets(ui)
271 for ics in (encoding.encoding, encoding.fallbackencoding):
271 for ics in (encoding.encoding, encoding.fallbackencoding):
272 try:
272 try:
273 u = s.decode(ics)
273 u = s.decode(ics)
274 except UnicodeDecodeError:
274 except UnicodeDecodeError:
275 continue
275 continue
276 for ocs in sendcharsets:
276 for ocs in sendcharsets:
277 try:
277 try:
278 return u.encode(ocs), ocs
278 return u.encode(ocs), ocs
279 except UnicodeEncodeError:
279 except UnicodeEncodeError:
280 pass
280 pass
281 except LookupError:
281 except LookupError:
282 ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
282 ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
283 # if ascii, or all conversion attempts fail, send (broken) ascii
283 # if ascii, or all conversion attempts fail, send (broken) ascii
284 return s, 'us-ascii'
284 return s, 'us-ascii'
285
285
286 def headencode(ui, s, charsets=None, display=False):
286 def headencode(ui, s, charsets=None, display=False):
287 '''Returns RFC-2047 compliant header from given string.'''
287 '''Returns RFC-2047 compliant header from given string.'''
288 if not display:
288 if not display:
289 # split into words?
289 # split into words?
290 s, cs = _encode(ui, s, charsets)
290 s, cs = _encode(ui, s, charsets)
291 return str(email.Header.Header(s, cs))
291 return str(email.Header.Header(s, cs))
292 return s
292 return s
293
293
294 def _addressencode(ui, name, addr, charsets=None):
294 def _addressencode(ui, name, addr, charsets=None):
295 name = headencode(ui, name, charsets)
295 name = headencode(ui, name, charsets)
296 try:
296 try:
297 acc, dom = addr.split('@')
297 acc, dom = addr.split('@')
298 acc = acc.encode('ascii')
298 acc = acc.encode('ascii')
299 dom = dom.decode(encoding.encoding).encode('idna')
299 dom = dom.decode(encoding.encoding).encode('idna')
300 addr = '%s@%s' % (acc, dom)
300 addr = '%s@%s' % (acc, dom)
301 except UnicodeDecodeError:
301 except UnicodeDecodeError:
302 raise error.Abort(_('invalid email address: %s') % addr)
302 raise error.Abort(_('invalid email address: %s') % addr)
303 except ValueError:
303 except ValueError:
304 try:
304 try:
305 # too strict?
305 # too strict?
306 addr = addr.encode('ascii')
306 addr = addr.encode('ascii')
307 except UnicodeDecodeError:
307 except UnicodeDecodeError:
308 raise error.Abort(_('invalid local address: %s') % addr)
308 raise error.Abort(_('invalid local address: %s') % addr)
309 return email.Utils.formataddr((name, addr))
309 return email.Utils.formataddr((name, addr))
310
310
311 def addressencode(ui, address, charsets=None, display=False):
311 def addressencode(ui, address, charsets=None, display=False):
312 '''Turns address into RFC-2047 compliant header.'''
312 '''Turns address into RFC-2047 compliant header.'''
313 if display or not address:
313 if display or not address:
314 return address or ''
314 return address or ''
315 name, addr = email.Utils.parseaddr(address)
315 name, addr = email.Utils.parseaddr(address)
316 return _addressencode(ui, name, addr, charsets)
316 return _addressencode(ui, name, addr, charsets)
317
317
318 def addrlistencode(ui, addrs, charsets=None, display=False):
318 def addrlistencode(ui, addrs, charsets=None, display=False):
319 '''Turns a list of addresses into a list of RFC-2047 compliant headers.
319 '''Turns a list of addresses into a list of RFC-2047 compliant headers.
320 A single element of input list may contain multiple addresses, but output
320 A single element of input list may contain multiple addresses, but output
321 always has one address per item'''
321 always has one address per item'''
322 if display:
322 if display:
323 return [a.strip() for a in addrs if a.strip()]
323 return [a.strip() for a in addrs if a.strip()]
324
324
325 result = []
325 result = []
326 for name, addr in email.Utils.getaddresses(addrs):
326 for name, addr in email.Utils.getaddresses(addrs):
327 if name or addr:
327 if name or addr:
328 result.append(_addressencode(ui, name, addr, charsets))
328 result.append(_addressencode(ui, name, addr, charsets))
329 return result
329 return result
330
330
331 def mimeencode(ui, s, charsets=None, display=False):
331 def mimeencode(ui, s, charsets=None, display=False):
332 '''creates mime text object, encodes it if needed, and sets
332 '''creates mime text object, encodes it if needed, and sets
333 charset and transfer-encoding accordingly.'''
333 charset and transfer-encoding accordingly.'''
334 cs = 'us-ascii'
334 cs = 'us-ascii'
335 if not display:
335 if not display:
336 s, cs = _encode(ui, s, charsets)
336 s, cs = _encode(ui, s, charsets)
337 return mimetextqp(s, 'plain', cs)
337 return mimetextqp(s, 'plain', cs)
@@ -1,3728 +1,3728
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 parser,
22 parser,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 repoview,
25 repoview,
26 util,
26 util,
27 )
27 )
28
28
29 def _revancestors(repo, revs, followfirst):
29 def _revancestors(repo, revs, followfirst):
30 """Like revlog.ancestors(), but supports followfirst."""
30 """Like revlog.ancestors(), but supports followfirst."""
31 if followfirst:
31 if followfirst:
32 cut = 1
32 cut = 1
33 else:
33 else:
34 cut = None
34 cut = None
35 cl = repo.changelog
35 cl = repo.changelog
36
36
37 def iterate():
37 def iterate():
38 revs.sort(reverse=True)
38 revs.sort(reverse=True)
39 irevs = iter(revs)
39 irevs = iter(revs)
40 h = []
40 h = []
41
41
42 inputrev = next(irevs, None)
42 inputrev = next(irevs, None)
43 if inputrev is not None:
43 if inputrev is not None:
44 heapq.heappush(h, -inputrev)
44 heapq.heappush(h, -inputrev)
45
45
46 seen = set()
46 seen = set()
47 while h:
47 while h:
48 current = -heapq.heappop(h)
48 current = -heapq.heappop(h)
49 if current == inputrev:
49 if current == inputrev:
50 inputrev = next(irevs, None)
50 inputrev = next(irevs, None)
51 if inputrev is not None:
51 if inputrev is not None:
52 heapq.heappush(h, -inputrev)
52 heapq.heappush(h, -inputrev)
53 if current not in seen:
53 if current not in seen:
54 seen.add(current)
54 seen.add(current)
55 yield current
55 yield current
56 for parent in cl.parentrevs(current)[:cut]:
56 for parent in cl.parentrevs(current)[:cut]:
57 if parent != node.nullrev:
57 if parent != node.nullrev:
58 heapq.heappush(h, -parent)
58 heapq.heappush(h, -parent)
59
59
60 return generatorset(iterate(), iterasc=False)
60 return generatorset(iterate(), iterasc=False)
61
61
62 def _revdescendants(repo, revs, followfirst):
62 def _revdescendants(repo, revs, followfirst):
63 """Like revlog.descendants() but supports followfirst."""
63 """Like revlog.descendants() but supports followfirst."""
64 if followfirst:
64 if followfirst:
65 cut = 1
65 cut = 1
66 else:
66 else:
67 cut = None
67 cut = None
68
68
69 def iterate():
69 def iterate():
70 cl = repo.changelog
70 cl = repo.changelog
71 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 # smartset (and if it is not, it should.)
72 # smartset (and if it is not, it should.)
73 first = min(revs)
73 first = min(revs)
74 nullrev = node.nullrev
74 nullrev = node.nullrev
75 if first == nullrev:
75 if first == nullrev:
76 # Are there nodes with a null first parent and a non-null
76 # Are there nodes with a null first parent and a non-null
77 # second one? Maybe. Do we care? Probably not.
77 # second one? Maybe. Do we care? Probably not.
78 for i in cl:
78 for i in cl:
79 yield i
79 yield i
80 else:
80 else:
81 seen = set(revs)
81 seen = set(revs)
82 for i in cl.revs(first + 1):
82 for i in cl.revs(first + 1):
83 for x in cl.parentrevs(i)[:cut]:
83 for x in cl.parentrevs(i)[:cut]:
84 if x != nullrev and x in seen:
84 if x != nullrev and x in seen:
85 seen.add(i)
85 seen.add(i)
86 yield i
86 yield i
87 break
87 break
88
88
89 return generatorset(iterate(), iterasc=True)
89 return generatorset(iterate(), iterasc=True)
90
90
91 def _reachablerootspure(repo, minroot, roots, heads, includepath):
91 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 """return (heads(::<roots> and ::<heads>))
92 """return (heads(::<roots> and ::<heads>))
93
93
94 If includepath is True, return (<roots>::<heads>)."""
94 If includepath is True, return (<roots>::<heads>)."""
95 if not roots:
95 if not roots:
96 return []
96 return []
97 parentrevs = repo.changelog.parentrevs
97 parentrevs = repo.changelog.parentrevs
98 roots = set(roots)
98 roots = set(roots)
99 visit = list(heads)
99 visit = list(heads)
100 reachable = set()
100 reachable = set()
101 seen = {}
101 seen = {}
102 # prefetch all the things! (because python is slow)
102 # prefetch all the things! (because python is slow)
103 reached = reachable.add
103 reached = reachable.add
104 dovisit = visit.append
104 dovisit = visit.append
105 nextvisit = visit.pop
105 nextvisit = visit.pop
106 # open-code the post-order traversal due to the tiny size of
106 # open-code the post-order traversal due to the tiny size of
107 # sys.getrecursionlimit()
107 # sys.getrecursionlimit()
108 while visit:
108 while visit:
109 rev = nextvisit()
109 rev = nextvisit()
110 if rev in roots:
110 if rev in roots:
111 reached(rev)
111 reached(rev)
112 if not includepath:
112 if not includepath:
113 continue
113 continue
114 parents = parentrevs(rev)
114 parents = parentrevs(rev)
115 seen[rev] = parents
115 seen[rev] = parents
116 for parent in parents:
116 for parent in parents:
117 if parent >= minroot and parent not in seen:
117 if parent >= minroot and parent not in seen:
118 dovisit(parent)
118 dovisit(parent)
119 if not reachable:
119 if not reachable:
120 return baseset()
120 return baseset()
121 if not includepath:
121 if not includepath:
122 return reachable
122 return reachable
123 for rev in sorted(seen):
123 for rev in sorted(seen):
124 for parent in seen[rev]:
124 for parent in seen[rev]:
125 if parent in reachable:
125 if parent in reachable:
126 reached(rev)
126 reached(rev)
127 return reachable
127 return reachable
128
128
129 def reachableroots(repo, roots, heads, includepath=False):
129 def reachableroots(repo, roots, heads, includepath=False):
130 """return (heads(::<roots> and ::<heads>))
130 """return (heads(::<roots> and ::<heads>))
131
131
132 If includepath is True, return (<roots>::<heads>)."""
132 If includepath is True, return (<roots>::<heads>)."""
133 if not roots:
133 if not roots:
134 return baseset()
134 return baseset()
135 minroot = roots.min()
135 minroot = roots.min()
136 roots = list(roots)
136 roots = list(roots)
137 heads = list(heads)
137 heads = list(heads)
138 try:
138 try:
139 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
139 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 except AttributeError:
140 except AttributeError:
141 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
141 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 revs = baseset(revs)
142 revs = baseset(revs)
143 revs.sort()
143 revs.sort()
144 return revs
144 return revs
145
145
146 elements = {
146 elements = {
147 # token-type: binding-strength, primary, prefix, infix, suffix
147 # token-type: binding-strength, primary, prefix, infix, suffix
148 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
148 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 "##": (20, None, None, ("_concat", 20), None),
149 "##": (20, None, None, ("_concat", 20), None),
150 "~": (18, None, None, ("ancestor", 18), None),
150 "~": (18, None, None, ("ancestor", 18), None),
151 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
151 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
152 "-": (5, None, ("negate", 19), ("minus", 5), None),
152 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
153 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 ("dagrangepost", 17)),
154 ("dagrangepost", 17)),
155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
156 ("dagrangepost", 17)),
156 ("dagrangepost", 17)),
157 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
157 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
158 "not": (10, None, ("not", 10), None, None),
158 "not": (10, None, ("not", 10), None, None),
159 "!": (10, None, ("not", 10), None, None),
159 "!": (10, None, ("not", 10), None, None),
160 "and": (5, None, None, ("and", 5), None),
160 "and": (5, None, None, ("and", 5), None),
161 "&": (5, None, None, ("and", 5), None),
161 "&": (5, None, None, ("and", 5), None),
162 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
162 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
163 "or": (4, None, None, ("or", 4), None),
163 "or": (4, None, None, ("or", 4), None),
164 "|": (4, None, None, ("or", 4), None),
164 "|": (4, None, None, ("or", 4), None),
165 "+": (4, None, None, ("or", 4), None),
165 "+": (4, None, None, ("or", 4), None),
166 "=": (3, None, None, ("keyvalue", 3), None),
166 "=": (3, None, None, ("keyvalue", 3), None),
167 ",": (2, None, None, ("list", 2), None),
167 ",": (2, None, None, ("list", 2), None),
168 ")": (0, None, None, None, None),
168 ")": (0, None, None, None, None),
169 "symbol": (0, "symbol", None, None, None),
169 "symbol": (0, "symbol", None, None, None),
170 "string": (0, "string", None, None, None),
170 "string": (0, "string", None, None, None),
171 "end": (0, None, None, None, None),
171 "end": (0, None, None, None, None),
172 }
172 }
173
173
174 keywords = set(['and', 'or', 'not'])
174 keywords = set(['and', 'or', 'not'])
175
175
176 # default set of valid characters for the initial letter of symbols
176 # default set of valid characters for the initial letter of symbols
177 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
177 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
178 if c.isalnum() or c in '._@' or ord(c) > 127)
178 if c.isalnum() or c in '._@' or ord(c) > 127)
179
179
180 # default set of valid characters for non-initial letters of symbols
180 # default set of valid characters for non-initial letters of symbols
181 _symletters = set(c for c in [chr(i) for i in xrange(256)]
181 _symletters = set(c for c in [chr(i) for i in xrange(256)]
182 if c.isalnum() or c in '-._/@' or ord(c) > 127)
182 if c.isalnum() or c in '-._/@' or ord(c) > 127)
183
183
184 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
184 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 '''
185 '''
186 Parse a revset statement into a stream of tokens
186 Parse a revset statement into a stream of tokens
187
187
188 ``syminitletters`` is the set of valid characters for the initial
188 ``syminitletters`` is the set of valid characters for the initial
189 letter of symbols.
189 letter of symbols.
190
190
191 By default, character ``c`` is recognized as valid for initial
191 By default, character ``c`` is recognized as valid for initial
192 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
192 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193
193
194 ``symletters`` is the set of valid characters for non-initial
194 ``symletters`` is the set of valid characters for non-initial
195 letters of symbols.
195 letters of symbols.
196
196
197 By default, character ``c`` is recognized as valid for non-initial
197 By default, character ``c`` is recognized as valid for non-initial
198 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
198 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199
199
200 Check that @ is a valid unquoted token character (issue3686):
200 Check that @ is a valid unquoted token character (issue3686):
201 >>> list(tokenize("@::"))
201 >>> list(tokenize("@::"))
202 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
202 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203
203
204 '''
204 '''
205 if syminitletters is None:
205 if syminitletters is None:
206 syminitletters = _syminitletters
206 syminitletters = _syminitletters
207 if symletters is None:
207 if symletters is None:
208 symletters = _symletters
208 symletters = _symletters
209
209
210 if program and lookup:
210 if program and lookup:
211 # attempt to parse old-style ranges first to deal with
211 # attempt to parse old-style ranges first to deal with
212 # things like old-tag which contain query metacharacters
212 # things like old-tag which contain query metacharacters
213 parts = program.split(':', 1)
213 parts = program.split(':', 1)
214 if all(lookup(sym) for sym in parts if sym):
214 if all(lookup(sym) for sym in parts if sym):
215 if parts[0]:
215 if parts[0]:
216 yield ('symbol', parts[0], 0)
216 yield ('symbol', parts[0], 0)
217 if len(parts) > 1:
217 if len(parts) > 1:
218 s = len(parts[0])
218 s = len(parts[0])
219 yield (':', None, s)
219 yield (':', None, s)
220 if parts[1]:
220 if parts[1]:
221 yield ('symbol', parts[1], s + 1)
221 yield ('symbol', parts[1], s + 1)
222 yield ('end', None, len(program))
222 yield ('end', None, len(program))
223 return
223 return
224
224
225 pos, l = 0, len(program)
225 pos, l = 0, len(program)
226 while pos < l:
226 while pos < l:
227 c = program[pos]
227 c = program[pos]
228 if c.isspace(): # skip inter-token whitespace
228 if c.isspace(): # skip inter-token whitespace
229 pass
229 pass
230 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
230 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 yield ('::', None, pos)
231 yield ('::', None, pos)
232 pos += 1 # skip ahead
232 pos += 1 # skip ahead
233 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
233 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 yield ('..', None, pos)
234 yield ('..', None, pos)
235 pos += 1 # skip ahead
235 pos += 1 # skip ahead
236 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
236 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 yield ('##', None, pos)
237 yield ('##', None, pos)
238 pos += 1 # skip ahead
238 pos += 1 # skip ahead
239 elif c in "():=,-|&+!~^%": # handle simple operators
239 elif c in "():=,-|&+!~^%": # handle simple operators
240 yield (c, None, pos)
240 yield (c, None, pos)
241 elif (c in '"\'' or c == 'r' and
241 elif (c in '"\'' or c == 'r' and
242 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
242 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 if c == 'r':
243 if c == 'r':
244 pos += 1
244 pos += 1
245 c = program[pos]
245 c = program[pos]
246 decode = lambda x: x
246 decode = lambda x: x
247 else:
247 else:
248 decode = parser.unescapestr
248 decode = parser.unescapestr
249 pos += 1
249 pos += 1
250 s = pos
250 s = pos
251 while pos < l: # find closing quote
251 while pos < l: # find closing quote
252 d = program[pos]
252 d = program[pos]
253 if d == '\\': # skip over escaped characters
253 if d == '\\': # skip over escaped characters
254 pos += 2
254 pos += 2
255 continue
255 continue
256 if d == c:
256 if d == c:
257 yield ('string', decode(program[s:pos]), s)
257 yield ('string', decode(program[s:pos]), s)
258 break
258 break
259 pos += 1
259 pos += 1
260 else:
260 else:
261 raise error.ParseError(_("unterminated string"), s)
261 raise error.ParseError(_("unterminated string"), s)
262 # gather up a symbol/keyword
262 # gather up a symbol/keyword
263 elif c in syminitletters:
263 elif c in syminitletters:
264 s = pos
264 s = pos
265 pos += 1
265 pos += 1
266 while pos < l: # find end of symbol
266 while pos < l: # find end of symbol
267 d = program[pos]
267 d = program[pos]
268 if d not in symletters:
268 if d not in symletters:
269 break
269 break
270 if d == '.' and program[pos - 1] == '.': # special case for ..
270 if d == '.' and program[pos - 1] == '.': # special case for ..
271 pos -= 1
271 pos -= 1
272 break
272 break
273 pos += 1
273 pos += 1
274 sym = program[s:pos]
274 sym = program[s:pos]
275 if sym in keywords: # operator keywords
275 if sym in keywords: # operator keywords
276 yield (sym, None, s)
276 yield (sym, None, s)
277 elif '-' in sym:
277 elif '-' in sym:
278 # some jerk gave us foo-bar-baz, try to check if it's a symbol
278 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 if lookup and lookup(sym):
279 if lookup and lookup(sym):
280 # looks like a real symbol
280 # looks like a real symbol
281 yield ('symbol', sym, s)
281 yield ('symbol', sym, s)
282 else:
282 else:
283 # looks like an expression
283 # looks like an expression
284 parts = sym.split('-')
284 parts = sym.split('-')
285 for p in parts[:-1]:
285 for p in parts[:-1]:
286 if p: # possible consecutive -
286 if p: # possible consecutive -
287 yield ('symbol', p, s)
287 yield ('symbol', p, s)
288 s += len(p)
288 s += len(p)
289 yield ('-', None, pos)
289 yield ('-', None, pos)
290 s += 1
290 s += 1
291 if parts[-1]: # possible trailing -
291 if parts[-1]: # possible trailing -
292 yield ('symbol', parts[-1], s)
292 yield ('symbol', parts[-1], s)
293 else:
293 else:
294 yield ('symbol', sym, s)
294 yield ('symbol', sym, s)
295 pos -= 1
295 pos -= 1
296 else:
296 else:
297 raise error.ParseError(_("syntax error in revset '%s'") %
297 raise error.ParseError(_("syntax error in revset '%s'") %
298 program, pos)
298 program, pos)
299 pos += 1
299 pos += 1
300 yield ('end', None, pos)
300 yield ('end', None, pos)
301
301
302 def parseerrordetail(inst):
302 def parseerrordetail(inst):
303 """Compose error message from specified ParseError object
303 """Compose error message from specified ParseError object
304 """
304 """
305 if len(inst.args) > 1:
305 if len(inst.args) > 1:
306 return _('at %s: %s') % (inst.args[1], inst.args[0])
306 return _('at %s: %s') % (inst.args[1], inst.args[0])
307 else:
307 else:
308 return inst.args[0]
308 return inst.args[0]
309
309
310 # helpers
310 # helpers
311
311
312 def getstring(x, err):
312 def getstring(x, err):
313 if x and (x[0] == 'string' or x[0] == 'symbol'):
313 if x and (x[0] == 'string' or x[0] == 'symbol'):
314 return x[1]
314 return x[1]
315 raise error.ParseError(err)
315 raise error.ParseError(err)
316
316
317 def getlist(x):
317 def getlist(x):
318 if not x:
318 if not x:
319 return []
319 return []
320 if x[0] == 'list':
320 if x[0] == 'list':
321 return getlist(x[1]) + [x[2]]
321 return getlist(x[1]) + [x[2]]
322 return [x]
322 return [x]
323
323
324 def getargs(x, min, max, err):
324 def getargs(x, min, max, err):
325 l = getlist(x)
325 l = getlist(x)
326 if len(l) < min or (max >= 0 and len(l) > max):
326 if len(l) < min or (max >= 0 and len(l) > max):
327 raise error.ParseError(err)
327 raise error.ParseError(err)
328 return l
328 return l
329
329
330 def getargsdict(x, funcname, keys):
330 def getargsdict(x, funcname, keys):
331 return parser.buildargsdict(getlist(x), funcname, keys.split(),
331 return parser.buildargsdict(getlist(x), funcname, keys.split(),
332 keyvaluenode='keyvalue', keynode='symbol')
332 keyvaluenode='keyvalue', keynode='symbol')
333
333
334 def isvalidsymbol(tree):
334 def isvalidsymbol(tree):
335 """Examine whether specified ``tree`` is valid ``symbol`` or not
335 """Examine whether specified ``tree`` is valid ``symbol`` or not
336 """
336 """
337 return tree[0] == 'symbol' and len(tree) > 1
337 return tree[0] == 'symbol' and len(tree) > 1
338
338
339 def getsymbol(tree):
339 def getsymbol(tree):
340 """Get symbol name from valid ``symbol`` in ``tree``
340 """Get symbol name from valid ``symbol`` in ``tree``
341
341
342 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
342 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
343 """
343 """
344 return tree[1]
344 return tree[1]
345
345
346 def isvalidfunc(tree):
346 def isvalidfunc(tree):
347 """Examine whether specified ``tree`` is valid ``func`` or not
347 """Examine whether specified ``tree`` is valid ``func`` or not
348 """
348 """
349 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
349 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
350
350
351 def getfuncname(tree):
351 def getfuncname(tree):
352 """Get function name from valid ``func`` in ``tree``
352 """Get function name from valid ``func`` in ``tree``
353
353
354 This assumes that ``tree`` is already examined by ``isvalidfunc``.
354 This assumes that ``tree`` is already examined by ``isvalidfunc``.
355 """
355 """
356 return getsymbol(tree[1])
356 return getsymbol(tree[1])
357
357
358 def getfuncargs(tree):
358 def getfuncargs(tree):
359 """Get list of function arguments from valid ``func`` in ``tree``
359 """Get list of function arguments from valid ``func`` in ``tree``
360
360
361 This assumes that ``tree`` is already examined by ``isvalidfunc``.
361 This assumes that ``tree`` is already examined by ``isvalidfunc``.
362 """
362 """
363 if len(tree) > 2:
363 if len(tree) > 2:
364 return getlist(tree[2])
364 return getlist(tree[2])
365 else:
365 else:
366 return []
366 return []
367
367
368 def getset(repo, subset, x):
368 def getset(repo, subset, x):
369 if not x:
369 if not x:
370 raise error.ParseError(_("missing argument"))
370 raise error.ParseError(_("missing argument"))
371 s = methods[x[0]](repo, subset, *x[1:])
371 s = methods[x[0]](repo, subset, *x[1:])
372 if util.safehasattr(s, 'isascending'):
372 if util.safehasattr(s, 'isascending'):
373 return s
373 return s
374 if (repo.ui.configbool('devel', 'all-warnings')
374 if (repo.ui.configbool('devel', 'all-warnings')
375 or repo.ui.configbool('devel', 'old-revset')):
375 or repo.ui.configbool('devel', 'old-revset')):
376 # else case should not happen, because all non-func are internal,
376 # else case should not happen, because all non-func are internal,
377 # ignoring for now.
377 # ignoring for now.
378 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
378 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
379 repo.ui.develwarn('revset "%s" use list instead of smartset, '
379 repo.ui.develwarn('revset "%s" use list instead of smartset, '
380 '(upgrade your code)' % x[1][1])
380 '(upgrade your code)' % x[1][1])
381 return baseset(s)
381 return baseset(s)
382
382
383 def _getrevsource(repo, r):
383 def _getrevsource(repo, r):
384 extra = repo[r].extra()
384 extra = repo[r].extra()
385 for label in ('source', 'transplant_source', 'rebase_source'):
385 for label in ('source', 'transplant_source', 'rebase_source'):
386 if label in extra:
386 if label in extra:
387 try:
387 try:
388 return repo[extra[label]].rev()
388 return repo[extra[label]].rev()
389 except error.RepoLookupError:
389 except error.RepoLookupError:
390 pass
390 pass
391 return None
391 return None
392
392
393 # operator methods
393 # operator methods
394
394
395 def stringset(repo, subset, x):
395 def stringset(repo, subset, x):
396 x = repo[x].rev()
396 x = repo[x].rev()
397 if (x in subset
397 if (x in subset
398 or x == node.nullrev and isinstance(subset, fullreposet)):
398 or x == node.nullrev and isinstance(subset, fullreposet)):
399 return baseset([x])
399 return baseset([x])
400 return baseset()
400 return baseset()
401
401
402 def rangeset(repo, subset, x, y):
402 def rangeset(repo, subset, x, y):
403 m = getset(repo, fullreposet(repo), x)
403 m = getset(repo, fullreposet(repo), x)
404 n = getset(repo, fullreposet(repo), y)
404 n = getset(repo, fullreposet(repo), y)
405
405
406 if not m or not n:
406 if not m or not n:
407 return baseset()
407 return baseset()
408 m, n = m.first(), n.last()
408 m, n = m.first(), n.last()
409
409
410 if m == n:
410 if m == n:
411 r = baseset([m])
411 r = baseset([m])
412 elif n == node.wdirrev:
412 elif n == node.wdirrev:
413 r = spanset(repo, m, len(repo)) + baseset([n])
413 r = spanset(repo, m, len(repo)) + baseset([n])
414 elif m == node.wdirrev:
414 elif m == node.wdirrev:
415 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
415 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
416 elif m < n:
416 elif m < n:
417 r = spanset(repo, m, n + 1)
417 r = spanset(repo, m, n + 1)
418 else:
418 else:
419 r = spanset(repo, m, n - 1)
419 r = spanset(repo, m, n - 1)
420 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
420 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
421 # necessary to ensure we preserve the order in subset.
421 # necessary to ensure we preserve the order in subset.
422 #
422 #
423 # This has performance implication, carrying the sorting over when possible
423 # This has performance implication, carrying the sorting over when possible
424 # would be more efficient.
424 # would be more efficient.
425 return r & subset
425 return r & subset
426
426
427 def dagrange(repo, subset, x, y):
427 def dagrange(repo, subset, x, y):
428 r = fullreposet(repo)
428 r = fullreposet(repo)
429 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
429 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
430 includepath=True)
430 includepath=True)
431 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
431 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
432 # necessary to ensure we preserve the order in subset.
432 # necessary to ensure we preserve the order in subset.
433 return xs & subset
433 return xs & subset
434
434
435 def andset(repo, subset, x, y):
435 def andset(repo, subset, x, y):
436 return getset(repo, getset(repo, subset, x), y)
436 return getset(repo, getset(repo, subset, x), y)
437
437
438 def orset(repo, subset, *xs):
438 def orset(repo, subset, *xs):
439 assert xs
439 assert xs
440 if len(xs) == 1:
440 if len(xs) == 1:
441 return getset(repo, subset, xs[0])
441 return getset(repo, subset, xs[0])
442 p = len(xs) // 2
442 p = len(xs) // 2
443 a = orset(repo, subset, *xs[:p])
443 a = orset(repo, subset, *xs[:p])
444 b = orset(repo, subset, *xs[p:])
444 b = orset(repo, subset, *xs[p:])
445 return a + b
445 return a + b
446
446
447 def notset(repo, subset, x):
447 def notset(repo, subset, x):
448 return subset - getset(repo, subset, x)
448 return subset - getset(repo, subset, x)
449
449
450 def listset(repo, subset, a, b):
450 def listset(repo, subset, a, b):
451 raise error.ParseError(_("can't use a list in this context"))
451 raise error.ParseError(_("can't use a list in this context"))
452
452
453 def keyvaluepair(repo, subset, k, v):
453 def keyvaluepair(repo, subset, k, v):
454 raise error.ParseError(_("can't use a key-value pair in this context"))
454 raise error.ParseError(_("can't use a key-value pair in this context"))
455
455
456 def func(repo, subset, a, b):
456 def func(repo, subset, a, b):
457 if a[0] == 'symbol' and a[1] in symbols:
457 if a[0] == 'symbol' and a[1] in symbols:
458 return symbols[a[1]](repo, subset, b)
458 return symbols[a[1]](repo, subset, b)
459
459
460 keep = lambda fn: getattr(fn, '__doc__', None) is not None
460 keep = lambda fn: getattr(fn, '__doc__', None) is not None
461
461
462 syms = [s for (s, fn) in symbols.items() if keep(fn)]
462 syms = [s for (s, fn) in symbols.items() if keep(fn)]
463 raise error.UnknownIdentifier(a[1], syms)
463 raise error.UnknownIdentifier(a[1], syms)
464
464
465 # functions
465 # functions
466
466
467 def _destupdate(repo, subset, x):
467 def _destupdate(repo, subset, x):
468 # experimental revset for update destination
468 # experimental revset for update destination
469 args = getargsdict(x, 'limit', 'clean check')
469 args = getargsdict(x, 'limit', 'clean check')
470 return subset & baseset([destutil.destupdate(repo, **args)[0]])
470 return subset & baseset([destutil.destupdate(repo, **args)[0]])
471
471
472 def _destmerge(repo, subset, x):
472 def _destmerge(repo, subset, x):
473 # experimental revset for merge destination
473 # experimental revset for merge destination
474 getargs(x, 0, 0, _("_mergedefaultdest takes no arguments"))
474 getargs(x, 0, 0, _("_mergedefaultdest takes no arguments"))
475 return subset & baseset([destutil.destmerge(repo)])
475 return subset & baseset([destutil.destmerge(repo)])
476
476
477 def adds(repo, subset, x):
477 def adds(repo, subset, x):
478 """``adds(pattern)``
478 """``adds(pattern)``
479 Changesets that add a file matching pattern.
479 Changesets that add a file matching pattern.
480
480
481 The pattern without explicit kind like ``glob:`` is expected to be
481 The pattern without explicit kind like ``glob:`` is expected to be
482 relative to the current directory and match against a file or a
482 relative to the current directory and match against a file or a
483 directory.
483 directory.
484 """
484 """
485 # i18n: "adds" is a keyword
485 # i18n: "adds" is a keyword
486 pat = getstring(x, _("adds requires a pattern"))
486 pat = getstring(x, _("adds requires a pattern"))
487 return checkstatus(repo, subset, pat, 1)
487 return checkstatus(repo, subset, pat, 1)
488
488
489 def ancestor(repo, subset, x):
489 def ancestor(repo, subset, x):
490 """``ancestor(*changeset)``
490 """``ancestor(*changeset)``
491 A greatest common ancestor of the changesets.
491 A greatest common ancestor of the changesets.
492
492
493 Accepts 0 or more changesets.
493 Accepts 0 or more changesets.
494 Will return empty list when passed no args.
494 Will return empty list when passed no args.
495 Greatest common ancestor of a single changeset is that changeset.
495 Greatest common ancestor of a single changeset is that changeset.
496 """
496 """
497 # i18n: "ancestor" is a keyword
497 # i18n: "ancestor" is a keyword
498 l = getlist(x)
498 l = getlist(x)
499 rl = fullreposet(repo)
499 rl = fullreposet(repo)
500 anc = None
500 anc = None
501
501
502 # (getset(repo, rl, i) for i in l) generates a list of lists
502 # (getset(repo, rl, i) for i in l) generates a list of lists
503 for revs in (getset(repo, rl, i) for i in l):
503 for revs in (getset(repo, rl, i) for i in l):
504 for r in revs:
504 for r in revs:
505 if anc is None:
505 if anc is None:
506 anc = repo[r]
506 anc = repo[r]
507 else:
507 else:
508 anc = anc.ancestor(repo[r])
508 anc = anc.ancestor(repo[r])
509
509
510 if anc is not None and anc.rev() in subset:
510 if anc is not None and anc.rev() in subset:
511 return baseset([anc.rev()])
511 return baseset([anc.rev()])
512 return baseset()
512 return baseset()
513
513
514 def _ancestors(repo, subset, x, followfirst=False):
514 def _ancestors(repo, subset, x, followfirst=False):
515 heads = getset(repo, fullreposet(repo), x)
515 heads = getset(repo, fullreposet(repo), x)
516 if not heads:
516 if not heads:
517 return baseset()
517 return baseset()
518 s = _revancestors(repo, heads, followfirst)
518 s = _revancestors(repo, heads, followfirst)
519 return subset & s
519 return subset & s
520
520
521 def ancestors(repo, subset, x):
521 def ancestors(repo, subset, x):
522 """``ancestors(set)``
522 """``ancestors(set)``
523 Changesets that are ancestors of a changeset in set.
523 Changesets that are ancestors of a changeset in set.
524 """
524 """
525 return _ancestors(repo, subset, x)
525 return _ancestors(repo, subset, x)
526
526
527 def _firstancestors(repo, subset, x):
527 def _firstancestors(repo, subset, x):
528 # ``_firstancestors(set)``
528 # ``_firstancestors(set)``
529 # Like ``ancestors(set)`` but follows only the first parents.
529 # Like ``ancestors(set)`` but follows only the first parents.
530 return _ancestors(repo, subset, x, followfirst=True)
530 return _ancestors(repo, subset, x, followfirst=True)
531
531
532 def ancestorspec(repo, subset, x, n):
532 def ancestorspec(repo, subset, x, n):
533 """``set~n``
533 """``set~n``
534 Changesets that are the Nth ancestor (first parents only) of a changeset
534 Changesets that are the Nth ancestor (first parents only) of a changeset
535 in set.
535 in set.
536 """
536 """
537 try:
537 try:
538 n = int(n[1])
538 n = int(n[1])
539 except (TypeError, ValueError):
539 except (TypeError, ValueError):
540 raise error.ParseError(_("~ expects a number"))
540 raise error.ParseError(_("~ expects a number"))
541 ps = set()
541 ps = set()
542 cl = repo.changelog
542 cl = repo.changelog
543 for r in getset(repo, fullreposet(repo), x):
543 for r in getset(repo, fullreposet(repo), x):
544 for i in range(n):
544 for i in range(n):
545 r = cl.parentrevs(r)[0]
545 r = cl.parentrevs(r)[0]
546 ps.add(r)
546 ps.add(r)
547 return subset & ps
547 return subset & ps
548
548
549 def author(repo, subset, x):
549 def author(repo, subset, x):
550 """``author(string)``
550 """``author(string)``
551 Alias for ``user(string)``.
551 Alias for ``user(string)``.
552 """
552 """
553 # i18n: "author" is a keyword
553 # i18n: "author" is a keyword
554 n = encoding.lower(getstring(x, _("author requires a string")))
554 n = encoding.lower(getstring(x, _("author requires a string")))
555 kind, pattern, matcher = _substringmatcher(n)
555 kind, pattern, matcher = _substringmatcher(n)
556 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
556 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
557
557
558 def bisect(repo, subset, x):
558 def bisect(repo, subset, x):
559 """``bisect(string)``
559 """``bisect(string)``
560 Changesets marked in the specified bisect status:
560 Changesets marked in the specified bisect status:
561
561
562 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
562 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
563 - ``goods``, ``bads`` : csets topologically good/bad
563 - ``goods``, ``bads`` : csets topologically good/bad
564 - ``range`` : csets taking part in the bisection
564 - ``range`` : csets taking part in the bisection
565 - ``pruned`` : csets that are goods, bads or skipped
565 - ``pruned`` : csets that are goods, bads or skipped
566 - ``untested`` : csets whose fate is yet unknown
566 - ``untested`` : csets whose fate is yet unknown
567 - ``ignored`` : csets ignored due to DAG topology
567 - ``ignored`` : csets ignored due to DAG topology
568 - ``current`` : the cset currently being bisected
568 - ``current`` : the cset currently being bisected
569 """
569 """
570 # i18n: "bisect" is a keyword
570 # i18n: "bisect" is a keyword
571 status = getstring(x, _("bisect requires a string")).lower()
571 status = getstring(x, _("bisect requires a string")).lower()
572 state = set(hbisect.get(repo, status))
572 state = set(hbisect.get(repo, status))
573 return subset & state
573 return subset & state
574
574
575 # Backward-compatibility
575 # Backward-compatibility
576 # - no help entry so that we do not advertise it any more
576 # - no help entry so that we do not advertise it any more
577 def bisected(repo, subset, x):
577 def bisected(repo, subset, x):
578 return bisect(repo, subset, x)
578 return bisect(repo, subset, x)
579
579
580 def bookmark(repo, subset, x):
580 def bookmark(repo, subset, x):
581 """``bookmark([name])``
581 """``bookmark([name])``
582 The named bookmark or all bookmarks.
582 The named bookmark or all bookmarks.
583
583
584 If `name` starts with `re:`, the remainder of the name is treated as
584 If `name` starts with `re:`, the remainder of the name is treated as
585 a regular expression. To match a bookmark that actually starts with `re:`,
585 a regular expression. To match a bookmark that actually starts with `re:`,
586 use the prefix `literal:`.
586 use the prefix `literal:`.
587 """
587 """
588 # i18n: "bookmark" is a keyword
588 # i18n: "bookmark" is a keyword
589 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
589 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
590 if args:
590 if args:
591 bm = getstring(args[0],
591 bm = getstring(args[0],
592 # i18n: "bookmark" is a keyword
592 # i18n: "bookmark" is a keyword
593 _('the argument to bookmark must be a string'))
593 _('the argument to bookmark must be a string'))
594 kind, pattern, matcher = util.stringmatcher(bm)
594 kind, pattern, matcher = util.stringmatcher(bm)
595 bms = set()
595 bms = set()
596 if kind == 'literal':
596 if kind == 'literal':
597 bmrev = repo._bookmarks.get(pattern, None)
597 bmrev = repo._bookmarks.get(pattern, None)
598 if not bmrev:
598 if not bmrev:
599 raise error.RepoLookupError(_("bookmark '%s' does not exist")
599 raise error.RepoLookupError(_("bookmark '%s' does not exist")
600 % pattern)
600 % pattern)
601 bms.add(repo[bmrev].rev())
601 bms.add(repo[bmrev].rev())
602 else:
602 else:
603 matchrevs = set()
603 matchrevs = set()
604 for name, bmrev in repo._bookmarks.iteritems():
604 for name, bmrev in repo._bookmarks.iteritems():
605 if matcher(name):
605 if matcher(name):
606 matchrevs.add(bmrev)
606 matchrevs.add(bmrev)
607 if not matchrevs:
607 if not matchrevs:
608 raise error.RepoLookupError(_("no bookmarks exist"
608 raise error.RepoLookupError(_("no bookmarks exist"
609 " that match '%s'") % pattern)
609 " that match '%s'") % pattern)
610 for bmrev in matchrevs:
610 for bmrev in matchrevs:
611 bms.add(repo[bmrev].rev())
611 bms.add(repo[bmrev].rev())
612 else:
612 else:
613 bms = set([repo[r].rev()
613 bms = set([repo[r].rev()
614 for r in repo._bookmarks.values()])
614 for r in repo._bookmarks.values()])
615 bms -= set([node.nullrev])
615 bms -= set([node.nullrev])
616 return subset & bms
616 return subset & bms
617
617
618 def branch(repo, subset, x):
618 def branch(repo, subset, x):
619 """``branch(string or set)``
619 """``branch(string or set)``
620 All changesets belonging to the given branch or the branches of the given
620 All changesets belonging to the given branch or the branches of the given
621 changesets.
621 changesets.
622
622
623 If `string` starts with `re:`, the remainder of the name is treated as
623 If `string` starts with `re:`, the remainder of the name is treated as
624 a regular expression. To match a branch that actually starts with `re:`,
624 a regular expression. To match a branch that actually starts with `re:`,
625 use the prefix `literal:`.
625 use the prefix `literal:`.
626 """
626 """
627 getbi = repo.revbranchcache().branchinfo
627 getbi = repo.revbranchcache().branchinfo
628
628
629 try:
629 try:
630 b = getstring(x, '')
630 b = getstring(x, '')
631 except error.ParseError:
631 except error.ParseError:
632 # not a string, but another revspec, e.g. tip()
632 # not a string, but another revspec, e.g. tip()
633 pass
633 pass
634 else:
634 else:
635 kind, pattern, matcher = util.stringmatcher(b)
635 kind, pattern, matcher = util.stringmatcher(b)
636 if kind == 'literal':
636 if kind == 'literal':
637 # note: falls through to the revspec case if no branch with
637 # note: falls through to the revspec case if no branch with
638 # this name exists and pattern kind is not specified explicitly
638 # this name exists and pattern kind is not specified explicitly
639 if pattern in repo.branchmap():
639 if pattern in repo.branchmap():
640 return subset.filter(lambda r: matcher(getbi(r)[0]))
640 return subset.filter(lambda r: matcher(getbi(r)[0]))
641 if b.startswith('literal:'):
641 if b.startswith('literal:'):
642 raise error.RepoLookupError(_("branch '%s' does not exist")
642 raise error.RepoLookupError(_("branch '%s' does not exist")
643 % pattern)
643 % pattern)
644 else:
644 else:
645 return subset.filter(lambda r: matcher(getbi(r)[0]))
645 return subset.filter(lambda r: matcher(getbi(r)[0]))
646
646
647 s = getset(repo, fullreposet(repo), x)
647 s = getset(repo, fullreposet(repo), x)
648 b = set()
648 b = set()
649 for r in s:
649 for r in s:
650 b.add(getbi(r)[0])
650 b.add(getbi(r)[0])
651 c = s.__contains__
651 c = s.__contains__
652 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
652 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
653
653
654 def bumped(repo, subset, x):
654 def bumped(repo, subset, x):
655 """``bumped()``
655 """``bumped()``
656 Mutable changesets marked as successors of public changesets.
656 Mutable changesets marked as successors of public changesets.
657
657
658 Only non-public and non-obsolete changesets can be `bumped`.
658 Only non-public and non-obsolete changesets can be `bumped`.
659 """
659 """
660 # i18n: "bumped" is a keyword
660 # i18n: "bumped" is a keyword
661 getargs(x, 0, 0, _("bumped takes no arguments"))
661 getargs(x, 0, 0, _("bumped takes no arguments"))
662 bumped = obsmod.getrevs(repo, 'bumped')
662 bumped = obsmod.getrevs(repo, 'bumped')
663 return subset & bumped
663 return subset & bumped
664
664
665 def bundle(repo, subset, x):
665 def bundle(repo, subset, x):
666 """``bundle()``
666 """``bundle()``
667 Changesets in the bundle.
667 Changesets in the bundle.
668
668
669 Bundle must be specified by the -R option."""
669 Bundle must be specified by the -R option."""
670
670
671 try:
671 try:
672 bundlerevs = repo.changelog.bundlerevs
672 bundlerevs = repo.changelog.bundlerevs
673 except AttributeError:
673 except AttributeError:
674 raise error.Abort(_("no bundle provided - specify with -R"))
674 raise error.Abort(_("no bundle provided - specify with -R"))
675 return subset & bundlerevs
675 return subset & bundlerevs
676
676
677 def checkstatus(repo, subset, pat, field):
677 def checkstatus(repo, subset, pat, field):
678 hasset = matchmod.patkind(pat) == 'set'
678 hasset = matchmod.patkind(pat) == 'set'
679
679
680 mcache = [None]
680 mcache = [None]
681 def matches(x):
681 def matches(x):
682 c = repo[x]
682 c = repo[x]
683 if not mcache[0] or hasset:
683 if not mcache[0] or hasset:
684 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
684 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
685 m = mcache[0]
685 m = mcache[0]
686 fname = None
686 fname = None
687 if not m.anypats() and len(m.files()) == 1:
687 if not m.anypats() and len(m.files()) == 1:
688 fname = m.files()[0]
688 fname = m.files()[0]
689 if fname is not None:
689 if fname is not None:
690 if fname not in c.files():
690 if fname not in c.files():
691 return False
691 return False
692 else:
692 else:
693 for f in c.files():
693 for f in c.files():
694 if m(f):
694 if m(f):
695 break
695 break
696 else:
696 else:
697 return False
697 return False
698 files = repo.status(c.p1().node(), c.node())[field]
698 files = repo.status(c.p1().node(), c.node())[field]
699 if fname is not None:
699 if fname is not None:
700 if fname in files:
700 if fname in files:
701 return True
701 return True
702 else:
702 else:
703 for f in files:
703 for f in files:
704 if m(f):
704 if m(f):
705 return True
705 return True
706
706
707 return subset.filter(matches)
707 return subset.filter(matches)
708
708
709 def _children(repo, narrow, parentset):
709 def _children(repo, narrow, parentset):
710 if not parentset:
710 if not parentset:
711 return baseset()
711 return baseset()
712 cs = set()
712 cs = set()
713 pr = repo.changelog.parentrevs
713 pr = repo.changelog.parentrevs
714 minrev = parentset.min()
714 minrev = parentset.min()
715 for r in narrow:
715 for r in narrow:
716 if r <= minrev:
716 if r <= minrev:
717 continue
717 continue
718 for p in pr(r):
718 for p in pr(r):
719 if p in parentset:
719 if p in parentset:
720 cs.add(r)
720 cs.add(r)
721 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
721 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
722 # This does not break because of other fullreposet misbehavior.
722 # This does not break because of other fullreposet misbehavior.
723 return baseset(cs)
723 return baseset(cs)
724
724
725 def children(repo, subset, x):
725 def children(repo, subset, x):
726 """``children(set)``
726 """``children(set)``
727 Child changesets of changesets in set.
727 Child changesets of changesets in set.
728 """
728 """
729 s = getset(repo, fullreposet(repo), x)
729 s = getset(repo, fullreposet(repo), x)
730 cs = _children(repo, subset, s)
730 cs = _children(repo, subset, s)
731 return subset & cs
731 return subset & cs
732
732
733 def closed(repo, subset, x):
733 def closed(repo, subset, x):
734 """``closed()``
734 """``closed()``
735 Changeset is closed.
735 Changeset is closed.
736 """
736 """
737 # i18n: "closed" is a keyword
737 # i18n: "closed" is a keyword
738 getargs(x, 0, 0, _("closed takes no arguments"))
738 getargs(x, 0, 0, _("closed takes no arguments"))
739 return subset.filter(lambda r: repo[r].closesbranch())
739 return subset.filter(lambda r: repo[r].closesbranch())
740
740
741 def contains(repo, subset, x):
741 def contains(repo, subset, x):
742 """``contains(pattern)``
742 """``contains(pattern)``
743 The revision's manifest contains a file matching pattern (but might not
743 The revision's manifest contains a file matching pattern (but might not
744 modify it). See :hg:`help patterns` for information about file patterns.
744 modify it). See :hg:`help patterns` for information about file patterns.
745
745
746 The pattern without explicit kind like ``glob:`` is expected to be
746 The pattern without explicit kind like ``glob:`` is expected to be
747 relative to the current directory and match against a file exactly
747 relative to the current directory and match against a file exactly
748 for efficiency.
748 for efficiency.
749 """
749 """
750 # i18n: "contains" is a keyword
750 # i18n: "contains" is a keyword
751 pat = getstring(x, _("contains requires a pattern"))
751 pat = getstring(x, _("contains requires a pattern"))
752
752
753 def matches(x):
753 def matches(x):
754 if not matchmod.patkind(pat):
754 if not matchmod.patkind(pat):
755 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
755 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
756 if pats in repo[x]:
756 if pats in repo[x]:
757 return True
757 return True
758 else:
758 else:
759 c = repo[x]
759 c = repo[x]
760 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
760 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
761 for f in c.manifest():
761 for f in c.manifest():
762 if m(f):
762 if m(f):
763 return True
763 return True
764 return False
764 return False
765
765
766 return subset.filter(matches)
766 return subset.filter(matches)
767
767
768 def converted(repo, subset, x):
768 def converted(repo, subset, x):
769 """``converted([id])``
769 """``converted([id])``
770 Changesets converted from the given identifier in the old repository if
770 Changesets converted from the given identifier in the old repository if
771 present, or all converted changesets if no identifier is specified.
771 present, or all converted changesets if no identifier is specified.
772 """
772 """
773
773
774 # There is exactly no chance of resolving the revision, so do a simple
774 # There is exactly no chance of resolving the revision, so do a simple
775 # string compare and hope for the best
775 # string compare and hope for the best
776
776
777 rev = None
777 rev = None
778 # i18n: "converted" is a keyword
778 # i18n: "converted" is a keyword
779 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
779 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
780 if l:
780 if l:
781 # i18n: "converted" is a keyword
781 # i18n: "converted" is a keyword
782 rev = getstring(l[0], _('converted requires a revision'))
782 rev = getstring(l[0], _('converted requires a revision'))
783
783
784 def _matchvalue(r):
784 def _matchvalue(r):
785 source = repo[r].extra().get('convert_revision', None)
785 source = repo[r].extra().get('convert_revision', None)
786 return source is not None and (rev is None or source.startswith(rev))
786 return source is not None and (rev is None or source.startswith(rev))
787
787
788 return subset.filter(lambda r: _matchvalue(r))
788 return subset.filter(lambda r: _matchvalue(r))
789
789
790 def date(repo, subset, x):
790 def date(repo, subset, x):
791 """``date(interval)``
791 """``date(interval)``
792 Changesets within the interval, see :hg:`help dates`.
792 Changesets within the interval, see :hg:`help dates`.
793 """
793 """
794 # i18n: "date" is a keyword
794 # i18n: "date" is a keyword
795 ds = getstring(x, _("date requires a string"))
795 ds = getstring(x, _("date requires a string"))
796 dm = util.matchdate(ds)
796 dm = util.matchdate(ds)
797 return subset.filter(lambda x: dm(repo[x].date()[0]))
797 return subset.filter(lambda x: dm(repo[x].date()[0]))
798
798
799 def desc(repo, subset, x):
799 def desc(repo, subset, x):
800 """``desc(string)``
800 """``desc(string)``
801 Search commit message for string. The match is case-insensitive.
801 Search commit message for string. The match is case-insensitive.
802 """
802 """
803 # i18n: "desc" is a keyword
803 # i18n: "desc" is a keyword
804 ds = encoding.lower(getstring(x, _("desc requires a string")))
804 ds = encoding.lower(getstring(x, _("desc requires a string")))
805
805
806 def matches(x):
806 def matches(x):
807 c = repo[x]
807 c = repo[x]
808 return ds in encoding.lower(c.description())
808 return ds in encoding.lower(c.description())
809
809
810 return subset.filter(matches)
810 return subset.filter(matches)
811
811
812 def _descendants(repo, subset, x, followfirst=False):
812 def _descendants(repo, subset, x, followfirst=False):
813 roots = getset(repo, fullreposet(repo), x)
813 roots = getset(repo, fullreposet(repo), x)
814 if not roots:
814 if not roots:
815 return baseset()
815 return baseset()
816 s = _revdescendants(repo, roots, followfirst)
816 s = _revdescendants(repo, roots, followfirst)
817
817
818 # Both sets need to be ascending in order to lazily return the union
818 # Both sets need to be ascending in order to lazily return the union
819 # in the correct order.
819 # in the correct order.
820 base = subset & roots
820 base = subset & roots
821 desc = subset & s
821 desc = subset & s
822 result = base + desc
822 result = base + desc
823 if subset.isascending():
823 if subset.isascending():
824 result.sort()
824 result.sort()
825 elif subset.isdescending():
825 elif subset.isdescending():
826 result.sort(reverse=True)
826 result.sort(reverse=True)
827 else:
827 else:
828 result = subset & result
828 result = subset & result
829 return result
829 return result
830
830
831 def descendants(repo, subset, x):
831 def descendants(repo, subset, x):
832 """``descendants(set)``
832 """``descendants(set)``
833 Changesets which are descendants of changesets in set.
833 Changesets which are descendants of changesets in set.
834 """
834 """
835 return _descendants(repo, subset, x)
835 return _descendants(repo, subset, x)
836
836
837 def _firstdescendants(repo, subset, x):
837 def _firstdescendants(repo, subset, x):
838 # ``_firstdescendants(set)``
838 # ``_firstdescendants(set)``
839 # Like ``descendants(set)`` but follows only the first parents.
839 # Like ``descendants(set)`` but follows only the first parents.
840 return _descendants(repo, subset, x, followfirst=True)
840 return _descendants(repo, subset, x, followfirst=True)
841
841
842 def destination(repo, subset, x):
842 def destination(repo, subset, x):
843 """``destination([set])``
843 """``destination([set])``
844 Changesets that were created by a graft, transplant or rebase operation,
844 Changesets that were created by a graft, transplant or rebase operation,
845 with the given revisions specified as the source. Omitting the optional set
845 with the given revisions specified as the source. Omitting the optional set
846 is the same as passing all().
846 is the same as passing all().
847 """
847 """
848 if x is not None:
848 if x is not None:
849 sources = getset(repo, fullreposet(repo), x)
849 sources = getset(repo, fullreposet(repo), x)
850 else:
850 else:
851 sources = fullreposet(repo)
851 sources = fullreposet(repo)
852
852
853 dests = set()
853 dests = set()
854
854
855 # subset contains all of the possible destinations that can be returned, so
855 # subset contains all of the possible destinations that can be returned, so
856 # iterate over them and see if their source(s) were provided in the arg set.
856 # iterate over them and see if their source(s) were provided in the arg set.
857 # Even if the immediate src of r is not in the arg set, src's source (or
857 # Even if the immediate src of r is not in the arg set, src's source (or
858 # further back) may be. Scanning back further than the immediate src allows
858 # further back) may be. Scanning back further than the immediate src allows
859 # transitive transplants and rebases to yield the same results as transitive
859 # transitive transplants and rebases to yield the same results as transitive
860 # grafts.
860 # grafts.
861 for r in subset:
861 for r in subset:
862 src = _getrevsource(repo, r)
862 src = _getrevsource(repo, r)
863 lineage = None
863 lineage = None
864
864
865 while src is not None:
865 while src is not None:
866 if lineage is None:
866 if lineage is None:
867 lineage = list()
867 lineage = list()
868
868
869 lineage.append(r)
869 lineage.append(r)
870
870
871 # The visited lineage is a match if the current source is in the arg
871 # The visited lineage is a match if the current source is in the arg
872 # set. Since every candidate dest is visited by way of iterating
872 # set. Since every candidate dest is visited by way of iterating
873 # subset, any dests further back in the lineage will be tested by a
873 # subset, any dests further back in the lineage will be tested by a
874 # different iteration over subset. Likewise, if the src was already
874 # different iteration over subset. Likewise, if the src was already
875 # selected, the current lineage can be selected without going back
875 # selected, the current lineage can be selected without going back
876 # further.
876 # further.
877 if src in sources or src in dests:
877 if src in sources or src in dests:
878 dests.update(lineage)
878 dests.update(lineage)
879 break
879 break
880
880
881 r = src
881 r = src
882 src = _getrevsource(repo, r)
882 src = _getrevsource(repo, r)
883
883
884 return subset.filter(dests.__contains__)
884 return subset.filter(dests.__contains__)
885
885
886 def divergent(repo, subset, x):
886 def divergent(repo, subset, x):
887 """``divergent()``
887 """``divergent()``
888 Final successors of changesets with an alternative set of final successors.
888 Final successors of changesets with an alternative set of final successors.
889 """
889 """
890 # i18n: "divergent" is a keyword
890 # i18n: "divergent" is a keyword
891 getargs(x, 0, 0, _("divergent takes no arguments"))
891 getargs(x, 0, 0, _("divergent takes no arguments"))
892 divergent = obsmod.getrevs(repo, 'divergent')
892 divergent = obsmod.getrevs(repo, 'divergent')
893 return subset & divergent
893 return subset & divergent
894
894
895 def extinct(repo, subset, x):
895 def extinct(repo, subset, x):
896 """``extinct()``
896 """``extinct()``
897 Obsolete changesets with obsolete descendants only.
897 Obsolete changesets with obsolete descendants only.
898 """
898 """
899 # i18n: "extinct" is a keyword
899 # i18n: "extinct" is a keyword
900 getargs(x, 0, 0, _("extinct takes no arguments"))
900 getargs(x, 0, 0, _("extinct takes no arguments"))
901 extincts = obsmod.getrevs(repo, 'extinct')
901 extincts = obsmod.getrevs(repo, 'extinct')
902 return subset & extincts
902 return subset & extincts
903
903
904 def extra(repo, subset, x):
904 def extra(repo, subset, x):
905 """``extra(label, [value])``
905 """``extra(label, [value])``
906 Changesets with the given label in the extra metadata, with the given
906 Changesets with the given label in the extra metadata, with the given
907 optional value.
907 optional value.
908
908
909 If `value` starts with `re:`, the remainder of the value is treated as
909 If `value` starts with `re:`, the remainder of the value is treated as
910 a regular expression. To match a value that actually starts with `re:`,
910 a regular expression. To match a value that actually starts with `re:`,
911 use the prefix `literal:`.
911 use the prefix `literal:`.
912 """
912 """
913 args = getargsdict(x, 'extra', 'label value')
913 args = getargsdict(x, 'extra', 'label value')
914 if 'label' not in args:
914 if 'label' not in args:
915 # i18n: "extra" is a keyword
915 # i18n: "extra" is a keyword
916 raise error.ParseError(_('extra takes at least 1 argument'))
916 raise error.ParseError(_('extra takes at least 1 argument'))
917 # i18n: "extra" is a keyword
917 # i18n: "extra" is a keyword
918 label = getstring(args['label'], _('first argument to extra must be '
918 label = getstring(args['label'], _('first argument to extra must be '
919 'a string'))
919 'a string'))
920 value = None
920 value = None
921
921
922 if 'value' in args:
922 if 'value' in args:
923 # i18n: "extra" is a keyword
923 # i18n: "extra" is a keyword
924 value = getstring(args['value'], _('second argument to extra must be '
924 value = getstring(args['value'], _('second argument to extra must be '
925 'a string'))
925 'a string'))
926 kind, value, matcher = util.stringmatcher(value)
926 kind, value, matcher = util.stringmatcher(value)
927
927
928 def _matchvalue(r):
928 def _matchvalue(r):
929 extra = repo[r].extra()
929 extra = repo[r].extra()
930 return label in extra and (value is None or matcher(extra[label]))
930 return label in extra and (value is None or matcher(extra[label]))
931
931
932 return subset.filter(lambda r: _matchvalue(r))
932 return subset.filter(lambda r: _matchvalue(r))
933
933
934 def filelog(repo, subset, x):
934 def filelog(repo, subset, x):
935 """``filelog(pattern)``
935 """``filelog(pattern)``
936 Changesets connected to the specified filelog.
936 Changesets connected to the specified filelog.
937
937
938 For performance reasons, visits only revisions mentioned in the file-level
938 For performance reasons, visits only revisions mentioned in the file-level
939 filelog, rather than filtering through all changesets (much faster, but
939 filelog, rather than filtering through all changesets (much faster, but
940 doesn't include deletes or duplicate changes). For a slower, more accurate
940 doesn't include deletes or duplicate changes). For a slower, more accurate
941 result, use ``file()``.
941 result, use ``file()``.
942
942
943 The pattern without explicit kind like ``glob:`` is expected to be
943 The pattern without explicit kind like ``glob:`` is expected to be
944 relative to the current directory and match against a file exactly
944 relative to the current directory and match against a file exactly
945 for efficiency.
945 for efficiency.
946
946
947 If some linkrev points to revisions filtered by the current repoview, we'll
947 If some linkrev points to revisions filtered by the current repoview, we'll
948 work around it to return a non-filtered value.
948 work around it to return a non-filtered value.
949 """
949 """
950
950
951 # i18n: "filelog" is a keyword
951 # i18n: "filelog" is a keyword
952 pat = getstring(x, _("filelog requires a pattern"))
952 pat = getstring(x, _("filelog requires a pattern"))
953 s = set()
953 s = set()
954 cl = repo.changelog
954 cl = repo.changelog
955
955
956 if not matchmod.patkind(pat):
956 if not matchmod.patkind(pat):
957 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
957 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
958 files = [f]
958 files = [f]
959 else:
959 else:
960 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
960 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
961 files = (f for f in repo[None] if m(f))
961 files = (f for f in repo[None] if m(f))
962
962
963 for f in files:
963 for f in files:
964 backrevref = {} # final value for: filerev -> changerev
964 backrevref = {} # final value for: filerev -> changerev
965 lowestchild = {} # lowest known filerev child of a filerev
965 lowestchild = {} # lowest known filerev child of a filerev
966 delayed = [] # filerev with filtered linkrev, for post-processing
966 delayed = [] # filerev with filtered linkrev, for post-processing
967 lowesthead = None # cache for manifest content of all head revisions
967 lowesthead = None # cache for manifest content of all head revisions
968 fl = repo.file(f)
968 fl = repo.file(f)
969 for fr in list(fl):
969 for fr in list(fl):
970 rev = fl.linkrev(fr)
970 rev = fl.linkrev(fr)
971 if rev not in cl:
971 if rev not in cl:
972 # changerev pointed in linkrev is filtered
972 # changerev pointed in linkrev is filtered
973 # record it for post processing.
973 # record it for post processing.
974 delayed.append((fr, rev))
974 delayed.append((fr, rev))
975 continue
975 continue
976 for p in fl.parentrevs(fr):
976 for p in fl.parentrevs(fr):
977 if 0 <= p and p not in lowestchild:
977 if 0 <= p and p not in lowestchild:
978 lowestchild[p] = fr
978 lowestchild[p] = fr
979 backrevref[fr] = rev
979 backrevref[fr] = rev
980 s.add(rev)
980 s.add(rev)
981
981
982 # Post-processing of all filerevs we skipped because they were
982 # Post-processing of all filerevs we skipped because they were
983 # filtered. If such filerevs have known and unfiltered children, this
983 # filtered. If such filerevs have known and unfiltered children, this
984 # means they have an unfiltered appearance out there. We'll use linkrev
984 # means they have an unfiltered appearance out there. We'll use linkrev
985 # adjustment to find one of these appearances. The lowest known child
985 # adjustment to find one of these appearances. The lowest known child
986 # will be used as a starting point because it is the best upper-bound we
986 # will be used as a starting point because it is the best upper-bound we
987 # have.
987 # have.
988 #
988 #
989 # This approach will fail when an unfiltered but linkrev-shadowed
989 # This approach will fail when an unfiltered but linkrev-shadowed
990 # appearance exists in a head changeset without unfiltered filerev
990 # appearance exists in a head changeset without unfiltered filerev
991 # children anywhere.
991 # children anywhere.
992 while delayed:
992 while delayed:
993 # must be a descending iteration. To slowly fill lowest child
993 # must be a descending iteration. To slowly fill lowest child
994 # information that is of potential use by the next item.
994 # information that is of potential use by the next item.
995 fr, rev = delayed.pop()
995 fr, rev = delayed.pop()
996 lkr = rev
996 lkr = rev
997
997
998 child = lowestchild.get(fr)
998 child = lowestchild.get(fr)
999
999
1000 if child is None:
1000 if child is None:
1001 # search for existence of this file revision in a head revision.
1001 # search for existence of this file revision in a head revision.
1002 # There are three possibilities:
1002 # There are three possibilities:
1003 # - the revision exists in a head and we can find an
1003 # - the revision exists in a head and we can find an
1004 # introduction from there,
1004 # introduction from there,
1005 # - the revision does not exist in a head because it has been
1005 # - the revision does not exist in a head because it has been
1006 # changed since its introduction: we would have found a child
1006 # changed since its introduction: we would have found a child
1007 # and be in the other 'else' clause,
1007 # and be in the other 'else' clause,
1008 # - all versions of the revision are hidden.
1008 # - all versions of the revision are hidden.
1009 if lowesthead is None:
1009 if lowesthead is None:
1010 lowesthead = {}
1010 lowesthead = {}
1011 for h in repo.heads():
1011 for h in repo.heads():
1012 fnode = repo[h].manifest().get(f)
1012 fnode = repo[h].manifest().get(f)
1013 if fnode is not None:
1013 if fnode is not None:
1014 lowesthead[fl.rev(fnode)] = h
1014 lowesthead[fl.rev(fnode)] = h
1015 headrev = lowesthead.get(fr)
1015 headrev = lowesthead.get(fr)
1016 if headrev is None:
1016 if headrev is None:
1017 # content is nowhere unfiltered
1017 # content is nowhere unfiltered
1018 continue
1018 continue
1019 rev = repo[headrev][f].introrev()
1019 rev = repo[headrev][f].introrev()
1020 else:
1020 else:
1021 # the lowest known child is a good upper bound
1021 # the lowest known child is a good upper bound
1022 childcrev = backrevref[child]
1022 childcrev = backrevref[child]
1023 # XXX this does not guarantee returning the lowest
1023 # XXX this does not guarantee returning the lowest
1024 # introduction of this revision, but this gives a
1024 # introduction of this revision, but this gives a
1025 # result which is a good start and will fit in most
1025 # result which is a good start and will fit in most
1026 # cases. We probably need to fix the multiple
1026 # cases. We probably need to fix the multiple
1027 # introductions case properly (report each
1027 # introductions case properly (report each
1028 # introduction, even for identical file revisions)
1028 # introduction, even for identical file revisions)
1029 # once and for all at some point anyway.
1029 # once and for all at some point anyway.
1030 for p in repo[childcrev][f].parents():
1030 for p in repo[childcrev][f].parents():
1031 if p.filerev() == fr:
1031 if p.filerev() == fr:
1032 rev = p.rev()
1032 rev = p.rev()
1033 break
1033 break
1034 if rev == lkr: # no shadowed entry found
1034 if rev == lkr: # no shadowed entry found
1035 # XXX This should never happen unless some manifest points
1035 # XXX This should never happen unless some manifest points
1036 # to biggish file revisions (like a revision that uses a
1036 # to biggish file revisions (like a revision that uses a
1037 # parent that never appears in the manifest ancestors)
1037 # parent that never appears in the manifest ancestors)
1038 continue
1038 continue
1039
1039
1040 # Fill the data for the next iteration.
1040 # Fill the data for the next iteration.
1041 for p in fl.parentrevs(fr):
1041 for p in fl.parentrevs(fr):
1042 if 0 <= p and p not in lowestchild:
1042 if 0 <= p and p not in lowestchild:
1043 lowestchild[p] = fr
1043 lowestchild[p] = fr
1044 backrevref[fr] = rev
1044 backrevref[fr] = rev
1045 s.add(rev)
1045 s.add(rev)
1046
1046
1047 return subset & s
1047 return subset & s
1048
1048
1049 def first(repo, subset, x):
1049 def first(repo, subset, x):
1050 """``first(set, [n])``
1050 """``first(set, [n])``
1051 An alias for limit().
1051 An alias for limit().
1052 """
1052 """
1053 return limit(repo, subset, x)
1053 return limit(repo, subset, x)
1054
1054
1055 def _follow(repo, subset, x, name, followfirst=False):
1055 def _follow(repo, subset, x, name, followfirst=False):
1056 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1056 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1057 c = repo['.']
1057 c = repo['.']
1058 if l:
1058 if l:
1059 x = getstring(l[0], _("%s expected a pattern") % name)
1059 x = getstring(l[0], _("%s expected a pattern") % name)
1060 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1060 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1061 ctx=repo[None], default='path')
1061 ctx=repo[None], default='path')
1062
1062
1063 s = set()
1063 s = set()
1064 for fname in c:
1064 for fname in c:
1065 if matcher(fname):
1065 if matcher(fname):
1066 fctx = c[fname]
1066 fctx = c[fname]
1067 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1067 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1068 # include the revision responsible for the most recent version
1068 # include the revision responsible for the most recent version
1069 s.add(fctx.introrev())
1069 s.add(fctx.introrev())
1070 else:
1070 else:
1071 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1071 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1072
1072
1073 return subset & s
1073 return subset & s
1074
1074
1075 def follow(repo, subset, x):
1075 def follow(repo, subset, x):
1076 """``follow([pattern])``
1076 """``follow([pattern])``
1077 An alias for ``::.`` (ancestors of the working directory's first parent).
1077 An alias for ``::.`` (ancestors of the working directory's first parent).
1078 If pattern is specified, the histories of files matching given
1078 If pattern is specified, the histories of files matching given
1079 pattern is followed, including copies.
1079 pattern is followed, including copies.
1080 """
1080 """
1081 return _follow(repo, subset, x, 'follow')
1081 return _follow(repo, subset, x, 'follow')
1082
1082
1083 def _followfirst(repo, subset, x):
1083 def _followfirst(repo, subset, x):
1084 # ``followfirst([pattern])``
1084 # ``followfirst([pattern])``
1085 # Like ``follow([pattern])`` but follows only the first parent of
1085 # Like ``follow([pattern])`` but follows only the first parent of
1086 # every revisions or files revisions.
1086 # every revisions or files revisions.
1087 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1087 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1088
1088
1089 def getall(repo, subset, x):
1089 def getall(repo, subset, x):
1090 """``all()``
1090 """``all()``
1091 All changesets, the same as ``0:tip``.
1091 All changesets, the same as ``0:tip``.
1092 """
1092 """
1093 # i18n: "all" is a keyword
1093 # i18n: "all" is a keyword
1094 getargs(x, 0, 0, _("all takes no arguments"))
1094 getargs(x, 0, 0, _("all takes no arguments"))
1095 return subset & spanset(repo) # drop "null" if any
1095 return subset & spanset(repo) # drop "null" if any
1096
1096
1097 def grep(repo, subset, x):
1097 def grep(repo, subset, x):
1098 """``grep(regex)``
1098 """``grep(regex)``
1099 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1099 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1100 to ensure special escape characters are handled correctly. Unlike
1100 to ensure special escape characters are handled correctly. Unlike
1101 ``keyword(string)``, the match is case-sensitive.
1101 ``keyword(string)``, the match is case-sensitive.
1102 """
1102 """
1103 try:
1103 try:
1104 # i18n: "grep" is a keyword
1104 # i18n: "grep" is a keyword
1105 gr = re.compile(getstring(x, _("grep requires a string")))
1105 gr = re.compile(getstring(x, _("grep requires a string")))
1106 except re.error as e:
1106 except re.error as e:
1107 raise error.ParseError(_('invalid match pattern: %s') % e)
1107 raise error.ParseError(_('invalid match pattern: %s') % e)
1108
1108
1109 def matches(x):
1109 def matches(x):
1110 c = repo[x]
1110 c = repo[x]
1111 for e in c.files() + [c.user(), c.description()]:
1111 for e in c.files() + [c.user(), c.description()]:
1112 if gr.search(e):
1112 if gr.search(e):
1113 return True
1113 return True
1114 return False
1114 return False
1115
1115
1116 return subset.filter(matches)
1116 return subset.filter(matches)
1117
1117
1118 def _matchfiles(repo, subset, x):
1118 def _matchfiles(repo, subset, x):
1119 # _matchfiles takes a revset list of prefixed arguments:
1119 # _matchfiles takes a revset list of prefixed arguments:
1120 #
1120 #
1121 # [p:foo, i:bar, x:baz]
1121 # [p:foo, i:bar, x:baz]
1122 #
1122 #
1123 # builds a match object from them and filters subset. Allowed
1123 # builds a match object from them and filters subset. Allowed
1124 # prefixes are 'p:' for regular patterns, 'i:' for include
1124 # prefixes are 'p:' for regular patterns, 'i:' for include
1125 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1125 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1126 # a revision identifier, or the empty string to reference the
1126 # a revision identifier, or the empty string to reference the
1127 # working directory, from which the match object is
1127 # working directory, from which the match object is
1128 # initialized. Use 'd:' to set the default matching mode, default
1128 # initialized. Use 'd:' to set the default matching mode, default
1129 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1129 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1130
1130
1131 # i18n: "_matchfiles" is a keyword
1131 # i18n: "_matchfiles" is a keyword
1132 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1132 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1133 pats, inc, exc = [], [], []
1133 pats, inc, exc = [], [], []
1134 rev, default = None, None
1134 rev, default = None, None
1135 for arg in l:
1135 for arg in l:
1136 # i18n: "_matchfiles" is a keyword
1136 # i18n: "_matchfiles" is a keyword
1137 s = getstring(arg, _("_matchfiles requires string arguments"))
1137 s = getstring(arg, _("_matchfiles requires string arguments"))
1138 prefix, value = s[:2], s[2:]
1138 prefix, value = s[:2], s[2:]
1139 if prefix == 'p:':
1139 if prefix == 'p:':
1140 pats.append(value)
1140 pats.append(value)
1141 elif prefix == 'i:':
1141 elif prefix == 'i:':
1142 inc.append(value)
1142 inc.append(value)
1143 elif prefix == 'x:':
1143 elif prefix == 'x:':
1144 exc.append(value)
1144 exc.append(value)
1145 elif prefix == 'r:':
1145 elif prefix == 'r:':
1146 if rev is not None:
1146 if rev is not None:
1147 # i18n: "_matchfiles" is a keyword
1147 # i18n: "_matchfiles" is a keyword
1148 raise error.ParseError(_('_matchfiles expected at most one '
1148 raise error.ParseError(_('_matchfiles expected at most one '
1149 'revision'))
1149 'revision'))
1150 if value != '': # empty means working directory; leave rev as None
1150 if value != '': # empty means working directory; leave rev as None
1151 rev = value
1151 rev = value
1152 elif prefix == 'd:':
1152 elif prefix == 'd:':
1153 if default is not None:
1153 if default is not None:
1154 # i18n: "_matchfiles" is a keyword
1154 # i18n: "_matchfiles" is a keyword
1155 raise error.ParseError(_('_matchfiles expected at most one '
1155 raise error.ParseError(_('_matchfiles expected at most one '
1156 'default mode'))
1156 'default mode'))
1157 default = value
1157 default = value
1158 else:
1158 else:
1159 # i18n: "_matchfiles" is a keyword
1159 # i18n: "_matchfiles" is a keyword
1160 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1160 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1161 if not default:
1161 if not default:
1162 default = 'glob'
1162 default = 'glob'
1163
1163
1164 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1164 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1165 exclude=exc, ctx=repo[rev], default=default)
1165 exclude=exc, ctx=repo[rev], default=default)
1166
1166
1167 def matches(x):
1167 def matches(x):
1168 for f in repo[x].files():
1168 for f in repo[x].files():
1169 if m(f):
1169 if m(f):
1170 return True
1170 return True
1171 return False
1171 return False
1172
1172
1173 return subset.filter(matches)
1173 return subset.filter(matches)
1174
1174
1175 def hasfile(repo, subset, x):
1175 def hasfile(repo, subset, x):
1176 """``file(pattern)``
1176 """``file(pattern)``
1177 Changesets affecting files matched by pattern.
1177 Changesets affecting files matched by pattern.
1178
1178
1179 For a faster but less accurate result, consider using ``filelog()``
1179 For a faster but less accurate result, consider using ``filelog()``
1180 instead.
1180 instead.
1181
1181
1182 This predicate uses ``glob:`` as the default kind of pattern.
1182 This predicate uses ``glob:`` as the default kind of pattern.
1183 """
1183 """
1184 # i18n: "file" is a keyword
1184 # i18n: "file" is a keyword
1185 pat = getstring(x, _("file requires a pattern"))
1185 pat = getstring(x, _("file requires a pattern"))
1186 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1186 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1187
1187
1188 def head(repo, subset, x):
1188 def head(repo, subset, x):
1189 """``head()``
1189 """``head()``
1190 Changeset is a named branch head.
1190 Changeset is a named branch head.
1191 """
1191 """
1192 # i18n: "head" is a keyword
1192 # i18n: "head" is a keyword
1193 getargs(x, 0, 0, _("head takes no arguments"))
1193 getargs(x, 0, 0, _("head takes no arguments"))
1194 hs = set()
1194 hs = set()
1195 cl = repo.changelog
1195 cl = repo.changelog
1196 for b, ls in repo.branchmap().iteritems():
1196 for b, ls in repo.branchmap().iteritems():
1197 hs.update(cl.rev(h) for h in ls)
1197 hs.update(cl.rev(h) for h in ls)
1198 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1198 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1199 # This does not break because of other fullreposet misbehavior.
1199 # This does not break because of other fullreposet misbehavior.
1200 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1200 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1201 # necessary to ensure we preserve the order in subset.
1201 # necessary to ensure we preserve the order in subset.
1202 return baseset(hs) & subset
1202 return baseset(hs) & subset
1203
1203
1204 def heads(repo, subset, x):
1204 def heads(repo, subset, x):
1205 """``heads(set)``
1205 """``heads(set)``
1206 Members of set with no children in set.
1206 Members of set with no children in set.
1207 """
1207 """
1208 s = getset(repo, subset, x)
1208 s = getset(repo, subset, x)
1209 ps = parents(repo, subset, x)
1209 ps = parents(repo, subset, x)
1210 return s - ps
1210 return s - ps
1211
1211
1212 def hidden(repo, subset, x):
1212 def hidden(repo, subset, x):
1213 """``hidden()``
1213 """``hidden()``
1214 Hidden changesets.
1214 Hidden changesets.
1215 """
1215 """
1216 # i18n: "hidden" is a keyword
1216 # i18n: "hidden" is a keyword
1217 getargs(x, 0, 0, _("hidden takes no arguments"))
1217 getargs(x, 0, 0, _("hidden takes no arguments"))
1218 hiddenrevs = repoview.filterrevs(repo, 'visible')
1218 hiddenrevs = repoview.filterrevs(repo, 'visible')
1219 return subset & hiddenrevs
1219 return subset & hiddenrevs
1220
1220
1221 def keyword(repo, subset, x):
1221 def keyword(repo, subset, x):
1222 """``keyword(string)``
1222 """``keyword(string)``
1223 Search commit message, user name, and names of changed files for
1223 Search commit message, user name, and names of changed files for
1224 string. The match is case-insensitive.
1224 string. The match is case-insensitive.
1225 """
1225 """
1226 # i18n: "keyword" is a keyword
1226 # i18n: "keyword" is a keyword
1227 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1227 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1228
1228
1229 def matches(r):
1229 def matches(r):
1230 c = repo[r]
1230 c = repo[r]
1231 return any(kw in encoding.lower(t)
1231 return any(kw in encoding.lower(t)
1232 for t in c.files() + [c.user(), c.description()])
1232 for t in c.files() + [c.user(), c.description()])
1233
1233
1234 return subset.filter(matches)
1234 return subset.filter(matches)
1235
1235
1236 def limit(repo, subset, x):
1236 def limit(repo, subset, x):
1237 """``limit(set[, n[, offset]])``
1237 """``limit(set[, n[, offset]])``
1238 First n members of set, defaulting to 1, starting from offset.
1238 First n members of set, defaulting to 1, starting from offset.
1239 """
1239 """
1240 args = getargsdict(x, 'limit', 'set n offset')
1240 args = getargsdict(x, 'limit', 'set n offset')
1241 if 'set' not in args:
1241 if 'set' not in args:
1242 # i18n: "limit" is a keyword
1242 # i18n: "limit" is a keyword
1243 raise error.ParseError(_("limit requires one to three arguments"))
1243 raise error.ParseError(_("limit requires one to three arguments"))
1244 try:
1244 try:
1245 lim, ofs = 1, 0
1245 lim, ofs = 1, 0
1246 if 'n' in args:
1246 if 'n' in args:
1247 # i18n: "limit" is a keyword
1247 # i18n: "limit" is a keyword
1248 lim = int(getstring(args['n'], _("limit requires a number")))
1248 lim = int(getstring(args['n'], _("limit requires a number")))
1249 if 'offset' in args:
1249 if 'offset' in args:
1250 # i18n: "limit" is a keyword
1250 # i18n: "limit" is a keyword
1251 ofs = int(getstring(args['offset'], _("limit requires a number")))
1251 ofs = int(getstring(args['offset'], _("limit requires a number")))
1252 if ofs < 0:
1252 if ofs < 0:
1253 raise error.ParseError(_("negative offset"))
1253 raise error.ParseError(_("negative offset"))
1254 except (TypeError, ValueError):
1254 except (TypeError, ValueError):
1255 # i18n: "limit" is a keyword
1255 # i18n: "limit" is a keyword
1256 raise error.ParseError(_("limit expects a number"))
1256 raise error.ParseError(_("limit expects a number"))
1257 os = getset(repo, fullreposet(repo), args['set'])
1257 os = getset(repo, fullreposet(repo), args['set'])
1258 result = []
1258 result = []
1259 it = iter(os)
1259 it = iter(os)
1260 for x in xrange(ofs):
1260 for x in xrange(ofs):
1261 y = next(it, None)
1261 y = next(it, None)
1262 if y is None:
1262 if y is None:
1263 break
1263 break
1264 for x in xrange(lim):
1264 for x in xrange(lim):
1265 y = next(it, None)
1265 y = next(it, None)
1266 if y is None:
1266 if y is None:
1267 break
1267 break
1268 elif y in subset:
1268 elif y in subset:
1269 result.append(y)
1269 result.append(y)
1270 return baseset(result)
1270 return baseset(result)
1271
1271
1272 def last(repo, subset, x):
1272 def last(repo, subset, x):
1273 """``last(set, [n])``
1273 """``last(set, [n])``
1274 Last n members of set, defaulting to 1.
1274 Last n members of set, defaulting to 1.
1275 """
1275 """
1276 # i18n: "last" is a keyword
1276 # i18n: "last" is a keyword
1277 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1277 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1278 try:
1278 try:
1279 lim = 1
1279 lim = 1
1280 if len(l) == 2:
1280 if len(l) == 2:
1281 # i18n: "last" is a keyword
1281 # i18n: "last" is a keyword
1282 lim = int(getstring(l[1], _("last requires a number")))
1282 lim = int(getstring(l[1], _("last requires a number")))
1283 except (TypeError, ValueError):
1283 except (TypeError, ValueError):
1284 # i18n: "last" is a keyword
1284 # i18n: "last" is a keyword
1285 raise error.ParseError(_("last expects a number"))
1285 raise error.ParseError(_("last expects a number"))
1286 os = getset(repo, fullreposet(repo), l[0])
1286 os = getset(repo, fullreposet(repo), l[0])
1287 os.reverse()
1287 os.reverse()
1288 result = []
1288 result = []
1289 it = iter(os)
1289 it = iter(os)
1290 for x in xrange(lim):
1290 for x in xrange(lim):
1291 y = next(it, None)
1291 y = next(it, None)
1292 if y is None:
1292 if y is None:
1293 break
1293 break
1294 elif y in subset:
1294 elif y in subset:
1295 result.append(y)
1295 result.append(y)
1296 return baseset(result)
1296 return baseset(result)
1297
1297
1298 def maxrev(repo, subset, x):
1298 def maxrev(repo, subset, x):
1299 """``max(set)``
1299 """``max(set)``
1300 Changeset with highest revision number in set.
1300 Changeset with highest revision number in set.
1301 """
1301 """
1302 os = getset(repo, fullreposet(repo), x)
1302 os = getset(repo, fullreposet(repo), x)
1303 try:
1303 try:
1304 m = os.max()
1304 m = os.max()
1305 if m in subset:
1305 if m in subset:
1306 return baseset([m])
1306 return baseset([m])
1307 except ValueError:
1307 except ValueError:
1308 # os.max() throws a ValueError when the collection is empty.
1308 # os.max() throws a ValueError when the collection is empty.
1309 # Same as python's max().
1309 # Same as python's max().
1310 pass
1310 pass
1311 return baseset()
1311 return baseset()
1312
1312
1313 def merge(repo, subset, x):
1313 def merge(repo, subset, x):
1314 """``merge()``
1314 """``merge()``
1315 Changeset is a merge changeset.
1315 Changeset is a merge changeset.
1316 """
1316 """
1317 # i18n: "merge" is a keyword
1317 # i18n: "merge" is a keyword
1318 getargs(x, 0, 0, _("merge takes no arguments"))
1318 getargs(x, 0, 0, _("merge takes no arguments"))
1319 cl = repo.changelog
1319 cl = repo.changelog
1320 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1320 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1321
1321
1322 def branchpoint(repo, subset, x):
1322 def branchpoint(repo, subset, x):
1323 """``branchpoint()``
1323 """``branchpoint()``
1324 Changesets with more than one child.
1324 Changesets with more than one child.
1325 """
1325 """
1326 # i18n: "branchpoint" is a keyword
1326 # i18n: "branchpoint" is a keyword
1327 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1327 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1328 cl = repo.changelog
1328 cl = repo.changelog
1329 if not subset:
1329 if not subset:
1330 return baseset()
1330 return baseset()
1331 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1331 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1332 # (and if it is not, it should.)
1332 # (and if it is not, it should.)
1333 baserev = min(subset)
1333 baserev = min(subset)
1334 parentscount = [0]*(len(repo) - baserev)
1334 parentscount = [0]*(len(repo) - baserev)
1335 for r in cl.revs(start=baserev + 1):
1335 for r in cl.revs(start=baserev + 1):
1336 for p in cl.parentrevs(r):
1336 for p in cl.parentrevs(r):
1337 if p >= baserev:
1337 if p >= baserev:
1338 parentscount[p - baserev] += 1
1338 parentscount[p - baserev] += 1
1339 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1339 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1340
1340
1341 def minrev(repo, subset, x):
1341 def minrev(repo, subset, x):
1342 """``min(set)``
1342 """``min(set)``
1343 Changeset with lowest revision number in set.
1343 Changeset with lowest revision number in set.
1344 """
1344 """
1345 os = getset(repo, fullreposet(repo), x)
1345 os = getset(repo, fullreposet(repo), x)
1346 try:
1346 try:
1347 m = os.min()
1347 m = os.min()
1348 if m in subset:
1348 if m in subset:
1349 return baseset([m])
1349 return baseset([m])
1350 except ValueError:
1350 except ValueError:
1351 # os.min() throws a ValueError when the collection is empty.
1351 # os.min() throws a ValueError when the collection is empty.
1352 # Same as python's min().
1352 # Same as python's min().
1353 pass
1353 pass
1354 return baseset()
1354 return baseset()
1355
1355
1356 def modifies(repo, subset, x):
1356 def modifies(repo, subset, x):
1357 """``modifies(pattern)``
1357 """``modifies(pattern)``
1358 Changesets modifying files matched by pattern.
1358 Changesets modifying files matched by pattern.
1359
1359
1360 The pattern without explicit kind like ``glob:`` is expected to be
1360 The pattern without explicit kind like ``glob:`` is expected to be
1361 relative to the current directory and match against a file or a
1361 relative to the current directory and match against a file or a
1362 directory.
1362 directory.
1363 """
1363 """
1364 # i18n: "modifies" is a keyword
1364 # i18n: "modifies" is a keyword
1365 pat = getstring(x, _("modifies requires a pattern"))
1365 pat = getstring(x, _("modifies requires a pattern"))
1366 return checkstatus(repo, subset, pat, 0)
1366 return checkstatus(repo, subset, pat, 0)
1367
1367
1368 def named(repo, subset, x):
1368 def named(repo, subset, x):
1369 """``named(namespace)``
1369 """``named(namespace)``
1370 The changesets in a given namespace.
1370 The changesets in a given namespace.
1371
1371
1372 If `namespace` starts with `re:`, the remainder of the string is treated as
1372 If `namespace` starts with `re:`, the remainder of the string is treated as
1373 a regular expression. To match a namespace that actually starts with `re:`,
1373 a regular expression. To match a namespace that actually starts with `re:`,
1374 use the prefix `literal:`.
1374 use the prefix `literal:`.
1375 """
1375 """
1376 # i18n: "named" is a keyword
1376 # i18n: "named" is a keyword
1377 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1377 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1378
1378
1379 ns = getstring(args[0],
1379 ns = getstring(args[0],
1380 # i18n: "named" is a keyword
1380 # i18n: "named" is a keyword
1381 _('the argument to named must be a string'))
1381 _('the argument to named must be a string'))
1382 kind, pattern, matcher = util.stringmatcher(ns)
1382 kind, pattern, matcher = util.stringmatcher(ns)
1383 namespaces = set()
1383 namespaces = set()
1384 if kind == 'literal':
1384 if kind == 'literal':
1385 if pattern not in repo.names:
1385 if pattern not in repo.names:
1386 raise error.RepoLookupError(_("namespace '%s' does not exist")
1386 raise error.RepoLookupError(_("namespace '%s' does not exist")
1387 % ns)
1387 % ns)
1388 namespaces.add(repo.names[pattern])
1388 namespaces.add(repo.names[pattern])
1389 else:
1389 else:
1390 for name, ns in repo.names.iteritems():
1390 for name, ns in repo.names.iteritems():
1391 if matcher(name):
1391 if matcher(name):
1392 namespaces.add(ns)
1392 namespaces.add(ns)
1393 if not namespaces:
1393 if not namespaces:
1394 raise error.RepoLookupError(_("no namespace exists"
1394 raise error.RepoLookupError(_("no namespace exists"
1395 " that match '%s'") % pattern)
1395 " that match '%s'") % pattern)
1396
1396
1397 names = set()
1397 names = set()
1398 for ns in namespaces:
1398 for ns in namespaces:
1399 for name in ns.listnames(repo):
1399 for name in ns.listnames(repo):
1400 if name not in ns.deprecated:
1400 if name not in ns.deprecated:
1401 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1401 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1402
1402
1403 names -= set([node.nullrev])
1403 names -= set([node.nullrev])
1404 return subset & names
1404 return subset & names
1405
1405
1406 def node_(repo, subset, x):
1406 def node_(repo, subset, x):
1407 """``id(string)``
1407 """``id(string)``
1408 Revision non-ambiguously specified by the given hex string prefix.
1408 Revision non-ambiguously specified by the given hex string prefix.
1409 """
1409 """
1410 # i18n: "id" is a keyword
1410 # i18n: "id" is a keyword
1411 l = getargs(x, 1, 1, _("id requires one argument"))
1411 l = getargs(x, 1, 1, _("id requires one argument"))
1412 # i18n: "id" is a keyword
1412 # i18n: "id" is a keyword
1413 n = getstring(l[0], _("id requires a string"))
1413 n = getstring(l[0], _("id requires a string"))
1414 if len(n) == 40:
1414 if len(n) == 40:
1415 try:
1415 try:
1416 rn = repo.changelog.rev(node.bin(n))
1416 rn = repo.changelog.rev(node.bin(n))
1417 except (LookupError, TypeError):
1417 except (LookupError, TypeError):
1418 rn = None
1418 rn = None
1419 else:
1419 else:
1420 rn = None
1420 rn = None
1421 pm = repo.changelog._partialmatch(n)
1421 pm = repo.changelog._partialmatch(n)
1422 if pm is not None:
1422 if pm is not None:
1423 rn = repo.changelog.rev(pm)
1423 rn = repo.changelog.rev(pm)
1424
1424
1425 if rn is None:
1425 if rn is None:
1426 return baseset()
1426 return baseset()
1427 result = baseset([rn])
1427 result = baseset([rn])
1428 return result & subset
1428 return result & subset
1429
1429
1430 def obsolete(repo, subset, x):
1430 def obsolete(repo, subset, x):
1431 """``obsolete()``
1431 """``obsolete()``
1432 Mutable changeset with a newer version."""
1432 Mutable changeset with a newer version."""
1433 # i18n: "obsolete" is a keyword
1433 # i18n: "obsolete" is a keyword
1434 getargs(x, 0, 0, _("obsolete takes no arguments"))
1434 getargs(x, 0, 0, _("obsolete takes no arguments"))
1435 obsoletes = obsmod.getrevs(repo, 'obsolete')
1435 obsoletes = obsmod.getrevs(repo, 'obsolete')
1436 return subset & obsoletes
1436 return subset & obsoletes
1437
1437
1438 def only(repo, subset, x):
1438 def only(repo, subset, x):
1439 """``only(set, [set])``
1439 """``only(set, [set])``
1440 Changesets that are ancestors of the first set that are not ancestors
1440 Changesets that are ancestors of the first set that are not ancestors
1441 of any other head in the repo. If a second set is specified, the result
1441 of any other head in the repo. If a second set is specified, the result
1442 is ancestors of the first set that are not ancestors of the second set
1442 is ancestors of the first set that are not ancestors of the second set
1443 (i.e. ::<set1> - ::<set2>).
1443 (i.e. ::<set1> - ::<set2>).
1444 """
1444 """
1445 cl = repo.changelog
1445 cl = repo.changelog
1446 # i18n: "only" is a keyword
1446 # i18n: "only" is a keyword
1447 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1447 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1448 include = getset(repo, fullreposet(repo), args[0])
1448 include = getset(repo, fullreposet(repo), args[0])
1449 if len(args) == 1:
1449 if len(args) == 1:
1450 if not include:
1450 if not include:
1451 return baseset()
1451 return baseset()
1452
1452
1453 descendants = set(_revdescendants(repo, include, False))
1453 descendants = set(_revdescendants(repo, include, False))
1454 exclude = [rev for rev in cl.headrevs()
1454 exclude = [rev for rev in cl.headrevs()
1455 if not rev in descendants and not rev in include]
1455 if not rev in descendants and not rev in include]
1456 else:
1456 else:
1457 exclude = getset(repo, fullreposet(repo), args[1])
1457 exclude = getset(repo, fullreposet(repo), args[1])
1458
1458
1459 results = set(cl.findmissingrevs(common=exclude, heads=include))
1459 results = set(cl.findmissingrevs(common=exclude, heads=include))
1460 # XXX we should turn this into a baseset instead of a set, smartset may do
1460 # XXX we should turn this into a baseset instead of a set, smartset may do
1461 # some optimisations from the fact this is a baseset.
1461 # some optimisations from the fact this is a baseset.
1462 return subset & results
1462 return subset & results
1463
1463
1464 def origin(repo, subset, x):
1464 def origin(repo, subset, x):
1465 """``origin([set])``
1465 """``origin([set])``
1466 Changesets that were specified as a source for the grafts, transplants or
1466 Changesets that were specified as a source for the grafts, transplants or
1467 rebases that created the given revisions. Omitting the optional set is the
1467 rebases that created the given revisions. Omitting the optional set is the
1468 same as passing all(). If a changeset created by these operations is itself
1468 same as passing all(). If a changeset created by these operations is itself
1469 specified as a source for one of these operations, only the source changeset
1469 specified as a source for one of these operations, only the source changeset
1470 for the first operation is selected.
1470 for the first operation is selected.
1471 """
1471 """
1472 if x is not None:
1472 if x is not None:
1473 dests = getset(repo, fullreposet(repo), x)
1473 dests = getset(repo, fullreposet(repo), x)
1474 else:
1474 else:
1475 dests = fullreposet(repo)
1475 dests = fullreposet(repo)
1476
1476
1477 def _firstsrc(rev):
1477 def _firstsrc(rev):
1478 src = _getrevsource(repo, rev)
1478 src = _getrevsource(repo, rev)
1479 if src is None:
1479 if src is None:
1480 return None
1480 return None
1481
1481
1482 while True:
1482 while True:
1483 prev = _getrevsource(repo, src)
1483 prev = _getrevsource(repo, src)
1484
1484
1485 if prev is None:
1485 if prev is None:
1486 return src
1486 return src
1487 src = prev
1487 src = prev
1488
1488
1489 o = set([_firstsrc(r) for r in dests])
1489 o = set([_firstsrc(r) for r in dests])
1490 o -= set([None])
1490 o -= set([None])
1491 # XXX we should turn this into a baseset instead of a set, smartset may do
1491 # XXX we should turn this into a baseset instead of a set, smartset may do
1492 # some optimisations from the fact this is a baseset.
1492 # some optimisations from the fact this is a baseset.
1493 return subset & o
1493 return subset & o
1494
1494
1495 def outgoing(repo, subset, x):
1495 def outgoing(repo, subset, x):
1496 """``outgoing([path])``
1496 """``outgoing([path])``
1497 Changesets not found in the specified destination repository, or the
1497 Changesets not found in the specified destination repository, or the
1498 default push location.
1498 default push location.
1499 """
1499 """
1500 # Avoid cycles.
1500 # Avoid cycles.
1501 from . import (
1501 from . import (
1502 discovery,
1502 discovery,
1503 hg,
1503 hg,
1504 )
1504 )
1505 # i18n: "outgoing" is a keyword
1505 # i18n: "outgoing" is a keyword
1506 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1506 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1507 # i18n: "outgoing" is a keyword
1507 # i18n: "outgoing" is a keyword
1508 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1508 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1509 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1509 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1510 dest, branches = hg.parseurl(dest)
1510 dest, branches = hg.parseurl(dest)
1511 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1511 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1512 if revs:
1512 if revs:
1513 revs = [repo.lookup(rev) for rev in revs]
1513 revs = [repo.lookup(rev) for rev in revs]
1514 other = hg.peer(repo, {}, dest)
1514 other = hg.peer(repo, {}, dest)
1515 repo.ui.pushbuffer()
1515 repo.ui.pushbuffer()
1516 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1516 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1517 repo.ui.popbuffer()
1517 repo.ui.popbuffer()
1518 cl = repo.changelog
1518 cl = repo.changelog
1519 o = set([cl.rev(r) for r in outgoing.missing])
1519 o = set([cl.rev(r) for r in outgoing.missing])
1520 return subset & o
1520 return subset & o
1521
1521
1522 def p1(repo, subset, x):
1522 def p1(repo, subset, x):
1523 """``p1([set])``
1523 """``p1([set])``
1524 First parent of changesets in set, or the working directory.
1524 First parent of changesets in set, or the working directory.
1525 """
1525 """
1526 if x is None:
1526 if x is None:
1527 p = repo[x].p1().rev()
1527 p = repo[x].p1().rev()
1528 if p >= 0:
1528 if p >= 0:
1529 return subset & baseset([p])
1529 return subset & baseset([p])
1530 return baseset()
1530 return baseset()
1531
1531
1532 ps = set()
1532 ps = set()
1533 cl = repo.changelog
1533 cl = repo.changelog
1534 for r in getset(repo, fullreposet(repo), x):
1534 for r in getset(repo, fullreposet(repo), x):
1535 ps.add(cl.parentrevs(r)[0])
1535 ps.add(cl.parentrevs(r)[0])
1536 ps -= set([node.nullrev])
1536 ps -= set([node.nullrev])
1537 # XXX we should turn this into a baseset instead of a set, smartset may do
1537 # XXX we should turn this into a baseset instead of a set, smartset may do
1538 # some optimisations from the fact this is a baseset.
1538 # some optimisations from the fact this is a baseset.
1539 return subset & ps
1539 return subset & ps
1540
1540
1541 def p2(repo, subset, x):
1541 def p2(repo, subset, x):
1542 """``p2([set])``
1542 """``p2([set])``
1543 Second parent of changesets in set, or the working directory.
1543 Second parent of changesets in set, or the working directory.
1544 """
1544 """
1545 if x is None:
1545 if x is None:
1546 ps = repo[x].parents()
1546 ps = repo[x].parents()
1547 try:
1547 try:
1548 p = ps[1].rev()
1548 p = ps[1].rev()
1549 if p >= 0:
1549 if p >= 0:
1550 return subset & baseset([p])
1550 return subset & baseset([p])
1551 return baseset()
1551 return baseset()
1552 except IndexError:
1552 except IndexError:
1553 return baseset()
1553 return baseset()
1554
1554
1555 ps = set()
1555 ps = set()
1556 cl = repo.changelog
1556 cl = repo.changelog
1557 for r in getset(repo, fullreposet(repo), x):
1557 for r in getset(repo, fullreposet(repo), x):
1558 ps.add(cl.parentrevs(r)[1])
1558 ps.add(cl.parentrevs(r)[1])
1559 ps -= set([node.nullrev])
1559 ps -= set([node.nullrev])
1560 # XXX we should turn this into a baseset instead of a set, smartset may do
1560 # XXX we should turn this into a baseset instead of a set, smartset may do
1561 # some optimisations from the fact this is a baseset.
1561 # some optimisations from the fact this is a baseset.
1562 return subset & ps
1562 return subset & ps
1563
1563
1564 def parents(repo, subset, x):
1564 def parents(repo, subset, x):
1565 """``parents([set])``
1565 """``parents([set])``
1566 The set of all parents for all changesets in set, or the working directory.
1566 The set of all parents for all changesets in set, or the working directory.
1567 """
1567 """
1568 if x is None:
1568 if x is None:
1569 ps = set(p.rev() for p in repo[x].parents())
1569 ps = set(p.rev() for p in repo[x].parents())
1570 else:
1570 else:
1571 ps = set()
1571 ps = set()
1572 cl = repo.changelog
1572 cl = repo.changelog
1573 up = ps.update
1573 up = ps.update
1574 parentrevs = cl.parentrevs
1574 parentrevs = cl.parentrevs
1575 for r in getset(repo, fullreposet(repo), x):
1575 for r in getset(repo, fullreposet(repo), x):
1576 if r == node.wdirrev:
1576 if r == node.wdirrev:
1577 up(p.rev() for p in repo[r].parents())
1577 up(p.rev() for p in repo[r].parents())
1578 else:
1578 else:
1579 up(parentrevs(r))
1579 up(parentrevs(r))
1580 ps -= set([node.nullrev])
1580 ps -= set([node.nullrev])
1581 return subset & ps
1581 return subset & ps
1582
1582
1583 def _phase(repo, subset, target):
1583 def _phase(repo, subset, target):
1584 """helper to select all rev in phase <target>"""
1584 """helper to select all rev in phase <target>"""
1585 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1585 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1586 if repo._phasecache._phasesets:
1586 if repo._phasecache._phasesets:
1587 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1587 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1588 s = baseset(s)
1588 s = baseset(s)
1589 s.sort() # set are non ordered, so we enforce ascending
1589 s.sort() # set are non ordered, so we enforce ascending
1590 return subset & s
1590 return subset & s
1591 else:
1591 else:
1592 phase = repo._phasecache.phase
1592 phase = repo._phasecache.phase
1593 condition = lambda r: phase(repo, r) == target
1593 condition = lambda r: phase(repo, r) == target
1594 return subset.filter(condition, cache=False)
1594 return subset.filter(condition, cache=False)
1595
1595
1596 def draft(repo, subset, x):
1596 def draft(repo, subset, x):
1597 """``draft()``
1597 """``draft()``
1598 Changeset in draft phase."""
1598 Changeset in draft phase."""
1599 # i18n: "draft" is a keyword
1599 # i18n: "draft" is a keyword
1600 getargs(x, 0, 0, _("draft takes no arguments"))
1600 getargs(x, 0, 0, _("draft takes no arguments"))
1601 target = phases.draft
1601 target = phases.draft
1602 return _phase(repo, subset, target)
1602 return _phase(repo, subset, target)
1603
1603
1604 def secret(repo, subset, x):
1604 def secret(repo, subset, x):
1605 """``secret()``
1605 """``secret()``
1606 Changeset in secret phase."""
1606 Changeset in secret phase."""
1607 # i18n: "secret" is a keyword
1607 # i18n: "secret" is a keyword
1608 getargs(x, 0, 0, _("secret takes no arguments"))
1608 getargs(x, 0, 0, _("secret takes no arguments"))
1609 target = phases.secret
1609 target = phases.secret
1610 return _phase(repo, subset, target)
1610 return _phase(repo, subset, target)
1611
1611
1612 def parentspec(repo, subset, x, n):
1612 def parentspec(repo, subset, x, n):
1613 """``set^0``
1613 """``set^0``
1614 The set.
1614 The set.
1615 ``set^1`` (or ``set^``), ``set^2``
1615 ``set^1`` (or ``set^``), ``set^2``
1616 First or second parent, respectively, of all changesets in set.
1616 First or second parent, respectively, of all changesets in set.
1617 """
1617 """
1618 try:
1618 try:
1619 n = int(n[1])
1619 n = int(n[1])
1620 if n not in (0, 1, 2):
1620 if n not in (0, 1, 2):
1621 raise ValueError
1621 raise ValueError
1622 except (TypeError, ValueError):
1622 except (TypeError, ValueError):
1623 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1623 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1624 ps = set()
1624 ps = set()
1625 cl = repo.changelog
1625 cl = repo.changelog
1626 for r in getset(repo, fullreposet(repo), x):
1626 for r in getset(repo, fullreposet(repo), x):
1627 if n == 0:
1627 if n == 0:
1628 ps.add(r)
1628 ps.add(r)
1629 elif n == 1:
1629 elif n == 1:
1630 ps.add(cl.parentrevs(r)[0])
1630 ps.add(cl.parentrevs(r)[0])
1631 elif n == 2:
1631 elif n == 2:
1632 parents = cl.parentrevs(r)
1632 parents = cl.parentrevs(r)
1633 if len(parents) > 1:
1633 if len(parents) > 1:
1634 ps.add(parents[1])
1634 ps.add(parents[1])
1635 return subset & ps
1635 return subset & ps
1636
1636
1637 def present(repo, subset, x):
1637 def present(repo, subset, x):
1638 """``present(set)``
1638 """``present(set)``
1639 An empty set, if any revision in set isn't found; otherwise,
1639 An empty set, if any revision in set isn't found; otherwise,
1640 all revisions in set.
1640 all revisions in set.
1641
1641
1642 If any of specified revisions is not present in the local repository,
1642 If any of specified revisions is not present in the local repository,
1643 the query is normally aborted. But this predicate allows the query
1643 the query is normally aborted. But this predicate allows the query
1644 to continue even in such cases.
1644 to continue even in such cases.
1645 """
1645 """
1646 try:
1646 try:
1647 return getset(repo, subset, x)
1647 return getset(repo, subset, x)
1648 except error.RepoLookupError:
1648 except error.RepoLookupError:
1649 return baseset()
1649 return baseset()
1650
1650
1651 # for internal use
1651 # for internal use
1652 def _notpublic(repo, subset, x):
1652 def _notpublic(repo, subset, x):
1653 getargs(x, 0, 0, "_notpublic takes no arguments")
1653 getargs(x, 0, 0, "_notpublic takes no arguments")
1654 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1654 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1655 if repo._phasecache._phasesets:
1655 if repo._phasecache._phasesets:
1656 s = set()
1656 s = set()
1657 for u in repo._phasecache._phasesets[1:]:
1657 for u in repo._phasecache._phasesets[1:]:
1658 s.update(u)
1658 s.update(u)
1659 s = baseset(s - repo.changelog.filteredrevs)
1659 s = baseset(s - repo.changelog.filteredrevs)
1660 s.sort()
1660 s.sort()
1661 return subset & s
1661 return subset & s
1662 else:
1662 else:
1663 phase = repo._phasecache.phase
1663 phase = repo._phasecache.phase
1664 target = phases.public
1664 target = phases.public
1665 condition = lambda r: phase(repo, r) != target
1665 condition = lambda r: phase(repo, r) != target
1666 return subset.filter(condition, cache=False)
1666 return subset.filter(condition, cache=False)
1667
1667
1668 def public(repo, subset, x):
1668 def public(repo, subset, x):
1669 """``public()``
1669 """``public()``
1670 Changeset in public phase."""
1670 Changeset in public phase."""
1671 # i18n: "public" is a keyword
1671 # i18n: "public" is a keyword
1672 getargs(x, 0, 0, _("public takes no arguments"))
1672 getargs(x, 0, 0, _("public takes no arguments"))
1673 phase = repo._phasecache.phase
1673 phase = repo._phasecache.phase
1674 target = phases.public
1674 target = phases.public
1675 condition = lambda r: phase(repo, r) == target
1675 condition = lambda r: phase(repo, r) == target
1676 return subset.filter(condition, cache=False)
1676 return subset.filter(condition, cache=False)
1677
1677
1678 def remote(repo, subset, x):
1678 def remote(repo, subset, x):
1679 """``remote([id [,path]])``
1679 """``remote([id [,path]])``
1680 Local revision that corresponds to the given identifier in a
1680 Local revision that corresponds to the given identifier in a
1681 remote repository, if present. Here, the '.' identifier is a
1681 remote repository, if present. Here, the '.' identifier is a
1682 synonym for the current local branch.
1682 synonym for the current local branch.
1683 """
1683 """
1684
1684
1685 from . import hg # avoid start-up nasties
1685 from . import hg # avoid start-up nasties
1686 # i18n: "remote" is a keyword
1686 # i18n: "remote" is a keyword
1687 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1687 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1688
1688
1689 q = '.'
1689 q = '.'
1690 if len(l) > 0:
1690 if len(l) > 0:
1691 # i18n: "remote" is a keyword
1691 # i18n: "remote" is a keyword
1692 q = getstring(l[0], _("remote requires a string id"))
1692 q = getstring(l[0], _("remote requires a string id"))
1693 if q == '.':
1693 if q == '.':
1694 q = repo['.'].branch()
1694 q = repo['.'].branch()
1695
1695
1696 dest = ''
1696 dest = ''
1697 if len(l) > 1:
1697 if len(l) > 1:
1698 # i18n: "remote" is a keyword
1698 # i18n: "remote" is a keyword
1699 dest = getstring(l[1], _("remote requires a repository path"))
1699 dest = getstring(l[1], _("remote requires a repository path"))
1700 dest = repo.ui.expandpath(dest or 'default')
1700 dest = repo.ui.expandpath(dest or 'default')
1701 dest, branches = hg.parseurl(dest)
1701 dest, branches = hg.parseurl(dest)
1702 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1702 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1703 if revs:
1703 if revs:
1704 revs = [repo.lookup(rev) for rev in revs]
1704 revs = [repo.lookup(rev) for rev in revs]
1705 other = hg.peer(repo, {}, dest)
1705 other = hg.peer(repo, {}, dest)
1706 n = other.lookup(q)
1706 n = other.lookup(q)
1707 if n in repo:
1707 if n in repo:
1708 r = repo[n].rev()
1708 r = repo[n].rev()
1709 if r in subset:
1709 if r in subset:
1710 return baseset([r])
1710 return baseset([r])
1711 return baseset()
1711 return baseset()
1712
1712
1713 def removes(repo, subset, x):
1713 def removes(repo, subset, x):
1714 """``removes(pattern)``
1714 """``removes(pattern)``
1715 Changesets which remove files matching pattern.
1715 Changesets which remove files matching pattern.
1716
1716
1717 The pattern without explicit kind like ``glob:`` is expected to be
1717 The pattern without explicit kind like ``glob:`` is expected to be
1718 relative to the current directory and match against a file or a
1718 relative to the current directory and match against a file or a
1719 directory.
1719 directory.
1720 """
1720 """
1721 # i18n: "removes" is a keyword
1721 # i18n: "removes" is a keyword
1722 pat = getstring(x, _("removes requires a pattern"))
1722 pat = getstring(x, _("removes requires a pattern"))
1723 return checkstatus(repo, subset, pat, 2)
1723 return checkstatus(repo, subset, pat, 2)
1724
1724
1725 def rev(repo, subset, x):
1725 def rev(repo, subset, x):
1726 """``rev(number)``
1726 """``rev(number)``
1727 Revision with the given numeric identifier.
1727 Revision with the given numeric identifier.
1728 """
1728 """
1729 # i18n: "rev" is a keyword
1729 # i18n: "rev" is a keyword
1730 l = getargs(x, 1, 1, _("rev requires one argument"))
1730 l = getargs(x, 1, 1, _("rev requires one argument"))
1731 try:
1731 try:
1732 # i18n: "rev" is a keyword
1732 # i18n: "rev" is a keyword
1733 l = int(getstring(l[0], _("rev requires a number")))
1733 l = int(getstring(l[0], _("rev requires a number")))
1734 except (TypeError, ValueError):
1734 except (TypeError, ValueError):
1735 # i18n: "rev" is a keyword
1735 # i18n: "rev" is a keyword
1736 raise error.ParseError(_("rev expects a number"))
1736 raise error.ParseError(_("rev expects a number"))
1737 if l not in repo.changelog and l != node.nullrev:
1737 if l not in repo.changelog and l != node.nullrev:
1738 return baseset()
1738 return baseset()
1739 return subset & baseset([l])
1739 return subset & baseset([l])
1740
1740
1741 def matching(repo, subset, x):
1741 def matching(repo, subset, x):
1742 """``matching(revision [, field])``
1742 """``matching(revision [, field])``
1743 Changesets in which a given set of fields match the set of fields in the
1743 Changesets in which a given set of fields match the set of fields in the
1744 selected revision or set.
1744 selected revision or set.
1745
1745
1746 To match more than one field pass the list of fields to match separated
1746 To match more than one field pass the list of fields to match separated
1747 by spaces (e.g. ``author description``).
1747 by spaces (e.g. ``author description``).
1748
1748
1749 Valid fields are most regular revision fields and some special fields.
1749 Valid fields are most regular revision fields and some special fields.
1750
1750
1751 Regular revision fields are ``description``, ``author``, ``branch``,
1751 Regular revision fields are ``description``, ``author``, ``branch``,
1752 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1752 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1753 and ``diff``.
1753 and ``diff``.
1754 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1754 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1755 contents of the revision. Two revisions matching their ``diff`` will
1755 contents of the revision. Two revisions matching their ``diff`` will
1756 also match their ``files``.
1756 also match their ``files``.
1757
1757
1758 Special fields are ``summary`` and ``metadata``:
1758 Special fields are ``summary`` and ``metadata``:
1759 ``summary`` matches the first line of the description.
1759 ``summary`` matches the first line of the description.
1760 ``metadata`` is equivalent to matching ``description user date``
1760 ``metadata`` is equivalent to matching ``description user date``
1761 (i.e. it matches the main metadata fields).
1761 (i.e. it matches the main metadata fields).
1762
1762
1763 ``metadata`` is the default field which is used when no fields are
1763 ``metadata`` is the default field which is used when no fields are
1764 specified. You can match more than one field at a time.
1764 specified. You can match more than one field at a time.
1765 """
1765 """
1766 # i18n: "matching" is a keyword
1766 # i18n: "matching" is a keyword
1767 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1767 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1768
1768
1769 revs = getset(repo, fullreposet(repo), l[0])
1769 revs = getset(repo, fullreposet(repo), l[0])
1770
1770
1771 fieldlist = ['metadata']
1771 fieldlist = ['metadata']
1772 if len(l) > 1:
1772 if len(l) > 1:
1773 fieldlist = getstring(l[1],
1773 fieldlist = getstring(l[1],
1774 # i18n: "matching" is a keyword
1774 # i18n: "matching" is a keyword
1775 _("matching requires a string "
1775 _("matching requires a string "
1776 "as its second argument")).split()
1776 "as its second argument")).split()
1777
1777
1778 # Make sure that there are no repeated fields,
1778 # Make sure that there are no repeated fields,
1779 # expand the 'special' 'metadata' field type
1779 # expand the 'special' 'metadata' field type
1780 # and check the 'files' whenever we check the 'diff'
1780 # and check the 'files' whenever we check the 'diff'
1781 fields = []
1781 fields = []
1782 for field in fieldlist:
1782 for field in fieldlist:
1783 if field == 'metadata':
1783 if field == 'metadata':
1784 fields += ['user', 'description', 'date']
1784 fields += ['user', 'description', 'date']
1785 elif field == 'diff':
1785 elif field == 'diff':
1786 # a revision matching the diff must also match the files
1786 # a revision matching the diff must also match the files
1787 # since matching the diff is very costly, make sure to
1787 # since matching the diff is very costly, make sure to
1788 # also match the files first
1788 # also match the files first
1789 fields += ['files', 'diff']
1789 fields += ['files', 'diff']
1790 else:
1790 else:
1791 if field == 'author':
1791 if field == 'author':
1792 field = 'user'
1792 field = 'user'
1793 fields.append(field)
1793 fields.append(field)
1794 fields = set(fields)
1794 fields = set(fields)
1795 if 'summary' in fields and 'description' in fields:
1795 if 'summary' in fields and 'description' in fields:
1796 # If a revision matches its description it also matches its summary
1796 # If a revision matches its description it also matches its summary
1797 fields.discard('summary')
1797 fields.discard('summary')
1798
1798
1799 # We may want to match more than one field
1799 # We may want to match more than one field
1800 # Not all fields take the same amount of time to be matched
1800 # Not all fields take the same amount of time to be matched
1801 # Sort the selected fields in order of increasing matching cost
1801 # Sort the selected fields in order of increasing matching cost
1802 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1802 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1803 'files', 'description', 'substate', 'diff']
1803 'files', 'description', 'substate', 'diff']
1804 def fieldkeyfunc(f):
1804 def fieldkeyfunc(f):
1805 try:
1805 try:
1806 return fieldorder.index(f)
1806 return fieldorder.index(f)
1807 except ValueError:
1807 except ValueError:
1808 # assume an unknown field is very costly
1808 # assume an unknown field is very costly
1809 return len(fieldorder)
1809 return len(fieldorder)
1810 fields = list(fields)
1810 fields = list(fields)
1811 fields.sort(key=fieldkeyfunc)
1811 fields.sort(key=fieldkeyfunc)
1812
1812
1813 # Each field will be matched with its own "getfield" function
1813 # Each field will be matched with its own "getfield" function
1814 # which will be added to the getfieldfuncs array of functions
1814 # which will be added to the getfieldfuncs array of functions
1815 getfieldfuncs = []
1815 getfieldfuncs = []
1816 _funcs = {
1816 _funcs = {
1817 'user': lambda r: repo[r].user(),
1817 'user': lambda r: repo[r].user(),
1818 'branch': lambda r: repo[r].branch(),
1818 'branch': lambda r: repo[r].branch(),
1819 'date': lambda r: repo[r].date(),
1819 'date': lambda r: repo[r].date(),
1820 'description': lambda r: repo[r].description(),
1820 'description': lambda r: repo[r].description(),
1821 'files': lambda r: repo[r].files(),
1821 'files': lambda r: repo[r].files(),
1822 'parents': lambda r: repo[r].parents(),
1822 'parents': lambda r: repo[r].parents(),
1823 'phase': lambda r: repo[r].phase(),
1823 'phase': lambda r: repo[r].phase(),
1824 'substate': lambda r: repo[r].substate,
1824 'substate': lambda r: repo[r].substate,
1825 'summary': lambda r: repo[r].description().splitlines()[0],
1825 'summary': lambda r: repo[r].description().splitlines()[0],
1826 'diff': lambda r: list(repo[r].diff(git=True),)
1826 'diff': lambda r: list(repo[r].diff(git=True),)
1827 }
1827 }
1828 for info in fields:
1828 for info in fields:
1829 getfield = _funcs.get(info, None)
1829 getfield = _funcs.get(info, None)
1830 if getfield is None:
1830 if getfield is None:
1831 raise error.ParseError(
1831 raise error.ParseError(
1832 # i18n: "matching" is a keyword
1832 # i18n: "matching" is a keyword
1833 _("unexpected field name passed to matching: %s") % info)
1833 _("unexpected field name passed to matching: %s") % info)
1834 getfieldfuncs.append(getfield)
1834 getfieldfuncs.append(getfield)
1835 # convert the getfield array of functions into a "getinfo" function
1835 # convert the getfield array of functions into a "getinfo" function
1836 # which returns an array of field values (or a single value if there
1836 # which returns an array of field values (or a single value if there
1837 # is only one field to match)
1837 # is only one field to match)
1838 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1838 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1839
1839
1840 def matches(x):
1840 def matches(x):
1841 for rev in revs:
1841 for rev in revs:
1842 target = getinfo(rev)
1842 target = getinfo(rev)
1843 match = True
1843 match = True
1844 for n, f in enumerate(getfieldfuncs):
1844 for n, f in enumerate(getfieldfuncs):
1845 if target[n] != f(x):
1845 if target[n] != f(x):
1846 match = False
1846 match = False
1847 if match:
1847 if match:
1848 return True
1848 return True
1849 return False
1849 return False
1850
1850
1851 return subset.filter(matches)
1851 return subset.filter(matches)
1852
1852
1853 def reverse(repo, subset, x):
1853 def reverse(repo, subset, x):
1854 """``reverse(set)``
1854 """``reverse(set)``
1855 Reverse order of set.
1855 Reverse order of set.
1856 """
1856 """
1857 l = getset(repo, subset, x)
1857 l = getset(repo, subset, x)
1858 l.reverse()
1858 l.reverse()
1859 return l
1859 return l
1860
1860
1861 def roots(repo, subset, x):
1861 def roots(repo, subset, x):
1862 """``roots(set)``
1862 """``roots(set)``
1863 Changesets in set with no parent changeset in set.
1863 Changesets in set with no parent changeset in set.
1864 """
1864 """
1865 s = getset(repo, fullreposet(repo), x)
1865 s = getset(repo, fullreposet(repo), x)
1866 parents = repo.changelog.parentrevs
1866 parents = repo.changelog.parentrevs
1867 def filter(r):
1867 def filter(r):
1868 for p in parents(r):
1868 for p in parents(r):
1869 if 0 <= p and p in s:
1869 if 0 <= p and p in s:
1870 return False
1870 return False
1871 return True
1871 return True
1872 return subset & s.filter(filter)
1872 return subset & s.filter(filter)
1873
1873
1874 def sort(repo, subset, x):
1874 def sort(repo, subset, x):
1875 """``sort(set[, [-]key...])``
1875 """``sort(set[, [-]key...])``
1876 Sort set by keys. The default sort order is ascending, specify a key
1876 Sort set by keys. The default sort order is ascending, specify a key
1877 as ``-key`` to sort in descending order.
1877 as ``-key`` to sort in descending order.
1878
1878
1879 The keys can be:
1879 The keys can be:
1880
1880
1881 - ``rev`` for the revision number,
1881 - ``rev`` for the revision number,
1882 - ``branch`` for the branch name,
1882 - ``branch`` for the branch name,
1883 - ``desc`` for the commit message (description),
1883 - ``desc`` for the commit message (description),
1884 - ``user`` for user name (``author`` can be used as an alias),
1884 - ``user`` for user name (``author`` can be used as an alias),
1885 - ``date`` for the commit date
1885 - ``date`` for the commit date
1886 """
1886 """
1887 # i18n: "sort" is a keyword
1887 # i18n: "sort" is a keyword
1888 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1888 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1889 keys = "rev"
1889 keys = "rev"
1890 if len(l) == 2:
1890 if len(l) == 2:
1891 # i18n: "sort" is a keyword
1891 # i18n: "sort" is a keyword
1892 keys = getstring(l[1], _("sort spec must be a string"))
1892 keys = getstring(l[1], _("sort spec must be a string"))
1893
1893
1894 s = l[0]
1894 s = l[0]
1895 keys = keys.split()
1895 keys = keys.split()
1896 l = []
1896 l = []
1897 def invert(s):
1897 def invert(s):
1898 return "".join(chr(255 - ord(c)) for c in s)
1898 return "".join(chr(255 - ord(c)) for c in s)
1899 revs = getset(repo, subset, s)
1899 revs = getset(repo, subset, s)
1900 if keys == ["rev"]:
1900 if keys == ["rev"]:
1901 revs.sort()
1901 revs.sort()
1902 return revs
1902 return revs
1903 elif keys == ["-rev"]:
1903 elif keys == ["-rev"]:
1904 revs.sort(reverse=True)
1904 revs.sort(reverse=True)
1905 return revs
1905 return revs
1906 for r in revs:
1906 for r in revs:
1907 c = repo[r]
1907 c = repo[r]
1908 e = []
1908 e = []
1909 for k in keys:
1909 for k in keys:
1910 if k == 'rev':
1910 if k == 'rev':
1911 e.append(r)
1911 e.append(r)
1912 elif k == '-rev':
1912 elif k == '-rev':
1913 e.append(-r)
1913 e.append(-r)
1914 elif k == 'branch':
1914 elif k == 'branch':
1915 e.append(c.branch())
1915 e.append(c.branch())
1916 elif k == '-branch':
1916 elif k == '-branch':
1917 e.append(invert(c.branch()))
1917 e.append(invert(c.branch()))
1918 elif k == 'desc':
1918 elif k == 'desc':
1919 e.append(c.description())
1919 e.append(c.description())
1920 elif k == '-desc':
1920 elif k == '-desc':
1921 e.append(invert(c.description()))
1921 e.append(invert(c.description()))
1922 elif k in 'user author':
1922 elif k in 'user author':
1923 e.append(c.user())
1923 e.append(c.user())
1924 elif k in '-user -author':
1924 elif k in '-user -author':
1925 e.append(invert(c.user()))
1925 e.append(invert(c.user()))
1926 elif k == 'date':
1926 elif k == 'date':
1927 e.append(c.date()[0])
1927 e.append(c.date()[0])
1928 elif k == '-date':
1928 elif k == '-date':
1929 e.append(-c.date()[0])
1929 e.append(-c.date()[0])
1930 else:
1930 else:
1931 raise error.ParseError(_("unknown sort key %r") % k)
1931 raise error.ParseError(_("unknown sort key %r") % k)
1932 e.append(r)
1932 e.append(r)
1933 l.append(e)
1933 l.append(e)
1934 l.sort()
1934 l.sort()
1935 return baseset([e[-1] for e in l])
1935 return baseset([e[-1] for e in l])
1936
1936
1937 def subrepo(repo, subset, x):
1937 def subrepo(repo, subset, x):
1938 """``subrepo([pattern])``
1938 """``subrepo([pattern])``
1939 Changesets that add, modify or remove the given subrepo. If no subrepo
1939 Changesets that add, modify or remove the given subrepo. If no subrepo
1940 pattern is named, any subrepo changes are returned.
1940 pattern is named, any subrepo changes are returned.
1941 """
1941 """
1942 # i18n: "subrepo" is a keyword
1942 # i18n: "subrepo" is a keyword
1943 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1943 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1944 if len(args) != 0:
1944 if len(args) != 0:
1945 pat = getstring(args[0], _("subrepo requires a pattern"))
1945 pat = getstring(args[0], _("subrepo requires a pattern"))
1946
1946
1947 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1947 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1948
1948
1949 def submatches(names):
1949 def submatches(names):
1950 k, p, m = util.stringmatcher(pat)
1950 k, p, m = util.stringmatcher(pat)
1951 for name in names:
1951 for name in names:
1952 if m(name):
1952 if m(name):
1953 yield name
1953 yield name
1954
1954
1955 def matches(x):
1955 def matches(x):
1956 c = repo[x]
1956 c = repo[x]
1957 s = repo.status(c.p1().node(), c.node(), match=m)
1957 s = repo.status(c.p1().node(), c.node(), match=m)
1958
1958
1959 if len(args) == 0:
1959 if len(args) == 0:
1960 return s.added or s.modified or s.removed
1960 return s.added or s.modified or s.removed
1961
1961
1962 if s.added:
1962 if s.added:
1963 return any(submatches(c.substate.keys()))
1963 return any(submatches(c.substate.keys()))
1964
1964
1965 if s.modified:
1965 if s.modified:
1966 subs = set(c.p1().substate.keys())
1966 subs = set(c.p1().substate.keys())
1967 subs.update(c.substate.keys())
1967 subs.update(c.substate.keys())
1968
1968
1969 for path in submatches(subs):
1969 for path in submatches(subs):
1970 if c.p1().substate.get(path) != c.substate.get(path):
1970 if c.p1().substate.get(path) != c.substate.get(path):
1971 return True
1971 return True
1972
1972
1973 if s.removed:
1973 if s.removed:
1974 return any(submatches(c.p1().substate.keys()))
1974 return any(submatches(c.p1().substate.keys()))
1975
1975
1976 return False
1976 return False
1977
1977
1978 return subset.filter(matches)
1978 return subset.filter(matches)
1979
1979
1980 def _substringmatcher(pattern):
1980 def _substringmatcher(pattern):
1981 kind, pattern, matcher = util.stringmatcher(pattern)
1981 kind, pattern, matcher = util.stringmatcher(pattern)
1982 if kind == 'literal':
1982 if kind == 'literal':
1983 matcher = lambda s: pattern in s
1983 matcher = lambda s: pattern in s
1984 return kind, pattern, matcher
1984 return kind, pattern, matcher
1985
1985
1986 def tag(repo, subset, x):
1986 def tag(repo, subset, x):
1987 """``tag([name])``
1987 """``tag([name])``
1988 The specified tag by name, or all tagged revisions if no name is given.
1988 The specified tag by name, or all tagged revisions if no name is given.
1989
1989
1990 If `name` starts with `re:`, the remainder of the name is treated as
1990 If `name` starts with `re:`, the remainder of the name is treated as
1991 a regular expression. To match a tag that actually starts with `re:`,
1991 a regular expression. To match a tag that actually starts with `re:`,
1992 use the prefix `literal:`.
1992 use the prefix `literal:`.
1993 """
1993 """
1994 # i18n: "tag" is a keyword
1994 # i18n: "tag" is a keyword
1995 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1995 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1996 cl = repo.changelog
1996 cl = repo.changelog
1997 if args:
1997 if args:
1998 pattern = getstring(args[0],
1998 pattern = getstring(args[0],
1999 # i18n: "tag" is a keyword
1999 # i18n: "tag" is a keyword
2000 _('the argument to tag must be a string'))
2000 _('the argument to tag must be a string'))
2001 kind, pattern, matcher = util.stringmatcher(pattern)
2001 kind, pattern, matcher = util.stringmatcher(pattern)
2002 if kind == 'literal':
2002 if kind == 'literal':
2003 # avoid resolving all tags
2003 # avoid resolving all tags
2004 tn = repo._tagscache.tags.get(pattern, None)
2004 tn = repo._tagscache.tags.get(pattern, None)
2005 if tn is None:
2005 if tn is None:
2006 raise error.RepoLookupError(_("tag '%s' does not exist")
2006 raise error.RepoLookupError(_("tag '%s' does not exist")
2007 % pattern)
2007 % pattern)
2008 s = set([repo[tn].rev()])
2008 s = set([repo[tn].rev()])
2009 else:
2009 else:
2010 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2010 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2011 else:
2011 else:
2012 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2012 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2013 return subset & s
2013 return subset & s
2014
2014
2015 def tagged(repo, subset, x):
2015 def tagged(repo, subset, x):
2016 return tag(repo, subset, x)
2016 return tag(repo, subset, x)
2017
2017
2018 def unstable(repo, subset, x):
2018 def unstable(repo, subset, x):
2019 """``unstable()``
2019 """``unstable()``
2020 Non-obsolete changesets with obsolete ancestors.
2020 Non-obsolete changesets with obsolete ancestors.
2021 """
2021 """
2022 # i18n: "unstable" is a keyword
2022 # i18n: "unstable" is a keyword
2023 getargs(x, 0, 0, _("unstable takes no arguments"))
2023 getargs(x, 0, 0, _("unstable takes no arguments"))
2024 unstables = obsmod.getrevs(repo, 'unstable')
2024 unstables = obsmod.getrevs(repo, 'unstable')
2025 return subset & unstables
2025 return subset & unstables
2026
2026
2027
2027
2028 def user(repo, subset, x):
2028 def user(repo, subset, x):
2029 """``user(string)``
2029 """``user(string)``
2030 User name contains string. The match is case-insensitive.
2030 User name contains string. The match is case-insensitive.
2031
2031
2032 If `string` starts with `re:`, the remainder of the string is treated as
2032 If `string` starts with `re:`, the remainder of the string is treated as
2033 a regular expression. To match a user that actually contains `re:`, use
2033 a regular expression. To match a user that actually contains `re:`, use
2034 the prefix `literal:`.
2034 the prefix `literal:`.
2035 """
2035 """
2036 return author(repo, subset, x)
2036 return author(repo, subset, x)
2037
2037
2038 # experimental
2038 # experimental
2039 def wdir(repo, subset, x):
2039 def wdir(repo, subset, x):
2040 # i18n: "wdir" is a keyword
2040 # i18n: "wdir" is a keyword
2041 getargs(x, 0, 0, _("wdir takes no arguments"))
2041 getargs(x, 0, 0, _("wdir takes no arguments"))
2042 if node.wdirrev in subset or isinstance(subset, fullreposet):
2042 if node.wdirrev in subset or isinstance(subset, fullreposet):
2043 return baseset([node.wdirrev])
2043 return baseset([node.wdirrev])
2044 return baseset()
2044 return baseset()
2045
2045
2046 # for internal use
2046 # for internal use
2047 def _list(repo, subset, x):
2047 def _list(repo, subset, x):
2048 s = getstring(x, "internal error")
2048 s = getstring(x, "internal error")
2049 if not s:
2049 if not s:
2050 return baseset()
2050 return baseset()
2051 # remove duplicates here. it's difficult for caller to deduplicate sets
2051 # remove duplicates here. it's difficult for caller to deduplicate sets
2052 # because different symbols can point to the same rev.
2052 # because different symbols can point to the same rev.
2053 cl = repo.changelog
2053 cl = repo.changelog
2054 ls = []
2054 ls = []
2055 seen = set()
2055 seen = set()
2056 for t in s.split('\0'):
2056 for t in s.split('\0'):
2057 try:
2057 try:
2058 # fast path for integer revision
2058 # fast path for integer revision
2059 r = int(t)
2059 r = int(t)
2060 if str(r) != t or r not in cl:
2060 if str(r) != t or r not in cl:
2061 raise ValueError
2061 raise ValueError
2062 revs = [r]
2062 revs = [r]
2063 except ValueError:
2063 except ValueError:
2064 revs = stringset(repo, subset, t)
2064 revs = stringset(repo, subset, t)
2065
2065
2066 for r in revs:
2066 for r in revs:
2067 if r in seen:
2067 if r in seen:
2068 continue
2068 continue
2069 if (r in subset
2069 if (r in subset
2070 or r == node.nullrev and isinstance(subset, fullreposet)):
2070 or r == node.nullrev and isinstance(subset, fullreposet)):
2071 ls.append(r)
2071 ls.append(r)
2072 seen.add(r)
2072 seen.add(r)
2073 return baseset(ls)
2073 return baseset(ls)
2074
2074
2075 # for internal use
2075 # for internal use
2076 def _intlist(repo, subset, x):
2076 def _intlist(repo, subset, x):
2077 s = getstring(x, "internal error")
2077 s = getstring(x, "internal error")
2078 if not s:
2078 if not s:
2079 return baseset()
2079 return baseset()
2080 ls = [int(r) for r in s.split('\0')]
2080 ls = [int(r) for r in s.split('\0')]
2081 s = subset
2081 s = subset
2082 return baseset([r for r in ls if r in s])
2082 return baseset([r for r in ls if r in s])
2083
2083
2084 # for internal use
2084 # for internal use
2085 def _hexlist(repo, subset, x):
2085 def _hexlist(repo, subset, x):
2086 s = getstring(x, "internal error")
2086 s = getstring(x, "internal error")
2087 if not s:
2087 if not s:
2088 return baseset()
2088 return baseset()
2089 cl = repo.changelog
2089 cl = repo.changelog
2090 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2090 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2091 s = subset
2091 s = subset
2092 return baseset([r for r in ls if r in s])
2092 return baseset([r for r in ls if r in s])
2093
2093
2094 symbols = {
2094 symbols = {
2095 "_destupdate": _destupdate,
2095 "_destupdate": _destupdate,
2096 "_destmerge": _destmerge,
2096 "_destmerge": _destmerge,
2097 "adds": adds,
2097 "adds": adds,
2098 "all": getall,
2098 "all": getall,
2099 "ancestor": ancestor,
2099 "ancestor": ancestor,
2100 "ancestors": ancestors,
2100 "ancestors": ancestors,
2101 "_firstancestors": _firstancestors,
2101 "_firstancestors": _firstancestors,
2102 "author": author,
2102 "author": author,
2103 "bisect": bisect,
2103 "bisect": bisect,
2104 "bisected": bisected,
2104 "bisected": bisected,
2105 "bookmark": bookmark,
2105 "bookmark": bookmark,
2106 "branch": branch,
2106 "branch": branch,
2107 "branchpoint": branchpoint,
2107 "branchpoint": branchpoint,
2108 "bumped": bumped,
2108 "bumped": bumped,
2109 "bundle": bundle,
2109 "bundle": bundle,
2110 "children": children,
2110 "children": children,
2111 "closed": closed,
2111 "closed": closed,
2112 "contains": contains,
2112 "contains": contains,
2113 "converted": converted,
2113 "converted": converted,
2114 "date": date,
2114 "date": date,
2115 "desc": desc,
2115 "desc": desc,
2116 "descendants": descendants,
2116 "descendants": descendants,
2117 "_firstdescendants": _firstdescendants,
2117 "_firstdescendants": _firstdescendants,
2118 "destination": destination,
2118 "destination": destination,
2119 "divergent": divergent,
2119 "divergent": divergent,
2120 "draft": draft,
2120 "draft": draft,
2121 "extinct": extinct,
2121 "extinct": extinct,
2122 "extra": extra,
2122 "extra": extra,
2123 "file": hasfile,
2123 "file": hasfile,
2124 "filelog": filelog,
2124 "filelog": filelog,
2125 "first": first,
2125 "first": first,
2126 "follow": follow,
2126 "follow": follow,
2127 "_followfirst": _followfirst,
2127 "_followfirst": _followfirst,
2128 "grep": grep,
2128 "grep": grep,
2129 "head": head,
2129 "head": head,
2130 "heads": heads,
2130 "heads": heads,
2131 "hidden": hidden,
2131 "hidden": hidden,
2132 "id": node_,
2132 "id": node_,
2133 "keyword": keyword,
2133 "keyword": keyword,
2134 "last": last,
2134 "last": last,
2135 "limit": limit,
2135 "limit": limit,
2136 "_matchfiles": _matchfiles,
2136 "_matchfiles": _matchfiles,
2137 "max": maxrev,
2137 "max": maxrev,
2138 "merge": merge,
2138 "merge": merge,
2139 "min": minrev,
2139 "min": minrev,
2140 "modifies": modifies,
2140 "modifies": modifies,
2141 "named": named,
2141 "named": named,
2142 "obsolete": obsolete,
2142 "obsolete": obsolete,
2143 "only": only,
2143 "only": only,
2144 "origin": origin,
2144 "origin": origin,
2145 "outgoing": outgoing,
2145 "outgoing": outgoing,
2146 "p1": p1,
2146 "p1": p1,
2147 "p2": p2,
2147 "p2": p2,
2148 "parents": parents,
2148 "parents": parents,
2149 "present": present,
2149 "present": present,
2150 "public": public,
2150 "public": public,
2151 "_notpublic": _notpublic,
2151 "_notpublic": _notpublic,
2152 "remote": remote,
2152 "remote": remote,
2153 "removes": removes,
2153 "removes": removes,
2154 "rev": rev,
2154 "rev": rev,
2155 "reverse": reverse,
2155 "reverse": reverse,
2156 "roots": roots,
2156 "roots": roots,
2157 "sort": sort,
2157 "sort": sort,
2158 "secret": secret,
2158 "secret": secret,
2159 "subrepo": subrepo,
2159 "subrepo": subrepo,
2160 "matching": matching,
2160 "matching": matching,
2161 "tag": tag,
2161 "tag": tag,
2162 "tagged": tagged,
2162 "tagged": tagged,
2163 "user": user,
2163 "user": user,
2164 "unstable": unstable,
2164 "unstable": unstable,
2165 "wdir": wdir,
2165 "wdir": wdir,
2166 "_list": _list,
2166 "_list": _list,
2167 "_intlist": _intlist,
2167 "_intlist": _intlist,
2168 "_hexlist": _hexlist,
2168 "_hexlist": _hexlist,
2169 }
2169 }
2170
2170
2171 # symbols which can't be used for a DoS attack for any given input
2171 # symbols which can't be used for a DoS attack for any given input
2172 # (e.g. those which accept regexes as plain strings shouldn't be included)
2172 # (e.g. those which accept regexes as plain strings shouldn't be included)
2173 # functions that just return a lot of changesets (like all) don't count here
2173 # functions that just return a lot of changesets (like all) don't count here
2174 safesymbols = set([
2174 safesymbols = set([
2175 "adds",
2175 "adds",
2176 "all",
2176 "all",
2177 "ancestor",
2177 "ancestor",
2178 "ancestors",
2178 "ancestors",
2179 "_firstancestors",
2179 "_firstancestors",
2180 "author",
2180 "author",
2181 "bisect",
2181 "bisect",
2182 "bisected",
2182 "bisected",
2183 "bookmark",
2183 "bookmark",
2184 "branch",
2184 "branch",
2185 "branchpoint",
2185 "branchpoint",
2186 "bumped",
2186 "bumped",
2187 "bundle",
2187 "bundle",
2188 "children",
2188 "children",
2189 "closed",
2189 "closed",
2190 "converted",
2190 "converted",
2191 "date",
2191 "date",
2192 "desc",
2192 "desc",
2193 "descendants",
2193 "descendants",
2194 "_firstdescendants",
2194 "_firstdescendants",
2195 "destination",
2195 "destination",
2196 "divergent",
2196 "divergent",
2197 "draft",
2197 "draft",
2198 "extinct",
2198 "extinct",
2199 "extra",
2199 "extra",
2200 "file",
2200 "file",
2201 "filelog",
2201 "filelog",
2202 "first",
2202 "first",
2203 "follow",
2203 "follow",
2204 "_followfirst",
2204 "_followfirst",
2205 "head",
2205 "head",
2206 "heads",
2206 "heads",
2207 "hidden",
2207 "hidden",
2208 "id",
2208 "id",
2209 "keyword",
2209 "keyword",
2210 "last",
2210 "last",
2211 "limit",
2211 "limit",
2212 "_matchfiles",
2212 "_matchfiles",
2213 "max",
2213 "max",
2214 "merge",
2214 "merge",
2215 "min",
2215 "min",
2216 "modifies",
2216 "modifies",
2217 "obsolete",
2217 "obsolete",
2218 "only",
2218 "only",
2219 "origin",
2219 "origin",
2220 "outgoing",
2220 "outgoing",
2221 "p1",
2221 "p1",
2222 "p2",
2222 "p2",
2223 "parents",
2223 "parents",
2224 "present",
2224 "present",
2225 "public",
2225 "public",
2226 "_notpublic",
2226 "_notpublic",
2227 "remote",
2227 "remote",
2228 "removes",
2228 "removes",
2229 "rev",
2229 "rev",
2230 "reverse",
2230 "reverse",
2231 "roots",
2231 "roots",
2232 "sort",
2232 "sort",
2233 "secret",
2233 "secret",
2234 "matching",
2234 "matching",
2235 "tag",
2235 "tag",
2236 "tagged",
2236 "tagged",
2237 "user",
2237 "user",
2238 "unstable",
2238 "unstable",
2239 "wdir",
2239 "wdir",
2240 "_list",
2240 "_list",
2241 "_intlist",
2241 "_intlist",
2242 "_hexlist",
2242 "_hexlist",
2243 ])
2243 ])
2244
2244
2245 methods = {
2245 methods = {
2246 "range": rangeset,
2246 "range": rangeset,
2247 "dagrange": dagrange,
2247 "dagrange": dagrange,
2248 "string": stringset,
2248 "string": stringset,
2249 "symbol": stringset,
2249 "symbol": stringset,
2250 "and": andset,
2250 "and": andset,
2251 "or": orset,
2251 "or": orset,
2252 "not": notset,
2252 "not": notset,
2253 "list": listset,
2253 "list": listset,
2254 "keyvalue": keyvaluepair,
2254 "keyvalue": keyvaluepair,
2255 "func": func,
2255 "func": func,
2256 "ancestor": ancestorspec,
2256 "ancestor": ancestorspec,
2257 "parent": parentspec,
2257 "parent": parentspec,
2258 "parentpost": p1,
2258 "parentpost": p1,
2259 }
2259 }
2260
2260
2261 def optimize(x, small):
2261 def optimize(x, small):
2262 if x is None:
2262 if x is None:
2263 return 0, x
2263 return 0, x
2264
2264
2265 smallbonus = 1
2265 smallbonus = 1
2266 if small:
2266 if small:
2267 smallbonus = .5
2267 smallbonus = .5
2268
2268
2269 op = x[0]
2269 op = x[0]
2270 if op == 'minus':
2270 if op == 'minus':
2271 return optimize(('and', x[1], ('not', x[2])), small)
2271 return optimize(('and', x[1], ('not', x[2])), small)
2272 elif op == 'only':
2272 elif op == 'only':
2273 return optimize(('func', ('symbol', 'only'),
2273 return optimize(('func', ('symbol', 'only'),
2274 ('list', x[1], x[2])), small)
2274 ('list', x[1], x[2])), small)
2275 elif op == 'onlypost':
2275 elif op == 'onlypost':
2276 return optimize(('func', ('symbol', 'only'), x[1]), small)
2276 return optimize(('func', ('symbol', 'only'), x[1]), small)
2277 elif op == 'dagrangepre':
2277 elif op == 'dagrangepre':
2278 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2278 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2279 elif op == 'dagrangepost':
2279 elif op == 'dagrangepost':
2280 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2280 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2281 elif op == 'rangeall':
2281 elif op == 'rangeall':
2282 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2282 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2283 elif op == 'rangepre':
2283 elif op == 'rangepre':
2284 return optimize(('range', ('string', '0'), x[1]), small)
2284 return optimize(('range', ('string', '0'), x[1]), small)
2285 elif op == 'rangepost':
2285 elif op == 'rangepost':
2286 return optimize(('range', x[1], ('string', 'tip')), small)
2286 return optimize(('range', x[1], ('string', 'tip')), small)
2287 elif op == 'negate':
2287 elif op == 'negate':
2288 return optimize(('string',
2288 return optimize(('string',
2289 '-' + getstring(x[1], _("can't negate that"))), small)
2289 '-' + getstring(x[1], _("can't negate that"))), small)
2290 elif op in 'string symbol negate':
2290 elif op in 'string symbol negate':
2291 return smallbonus, x # single revisions are small
2291 return smallbonus, x # single revisions are small
2292 elif op == 'and':
2292 elif op == 'and':
2293 wa, ta = optimize(x[1], True)
2293 wa, ta = optimize(x[1], True)
2294 wb, tb = optimize(x[2], True)
2294 wb, tb = optimize(x[2], True)
2295
2295
2296 # (::x and not ::y)/(not ::y and ::x) have a fast path
2296 # (::x and not ::y)/(not ::y and ::x) have a fast path
2297 def isonly(revs, bases):
2297 def isonly(revs, bases):
2298 return (
2298 return (
2299 revs is not None
2299 revs is not None
2300 and revs[0] == 'func'
2300 and revs[0] == 'func'
2301 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2301 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2302 and bases is not None
2302 and bases is not None
2303 and bases[0] == 'not'
2303 and bases[0] == 'not'
2304 and bases[1][0] == 'func'
2304 and bases[1][0] == 'func'
2305 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2305 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2306
2306
2307 w = min(wa, wb)
2307 w = min(wa, wb)
2308 if isonly(ta, tb):
2308 if isonly(ta, tb):
2309 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2309 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2310 if isonly(tb, ta):
2310 if isonly(tb, ta):
2311 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2311 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2312
2312
2313 if wa > wb:
2313 if wa > wb:
2314 return w, (op, tb, ta)
2314 return w, (op, tb, ta)
2315 return w, (op, ta, tb)
2315 return w, (op, ta, tb)
2316 elif op == 'or':
2316 elif op == 'or':
2317 # fast path for machine-generated expression, that is likely to have
2317 # fast path for machine-generated expression, that is likely to have
2318 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2318 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2319 ws, ts, ss = [], [], []
2319 ws, ts, ss = [], [], []
2320 def flushss():
2320 def flushss():
2321 if not ss:
2321 if not ss:
2322 return
2322 return
2323 if len(ss) == 1:
2323 if len(ss) == 1:
2324 w, t = ss[0]
2324 w, t = ss[0]
2325 else:
2325 else:
2326 s = '\0'.join(t[1] for w, t in ss)
2326 s = '\0'.join(t[1] for w, t in ss)
2327 y = ('func', ('symbol', '_list'), ('string', s))
2327 y = ('func', ('symbol', '_list'), ('string', s))
2328 w, t = optimize(y, False)
2328 w, t = optimize(y, False)
2329 ws.append(w)
2329 ws.append(w)
2330 ts.append(t)
2330 ts.append(t)
2331 del ss[:]
2331 del ss[:]
2332 for y in x[1:]:
2332 for y in x[1:]:
2333 w, t = optimize(y, False)
2333 w, t = optimize(y, False)
2334 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2334 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2335 ss.append((w, t))
2335 ss.append((w, t))
2336 continue
2336 continue
2337 flushss()
2337 flushss()
2338 ws.append(w)
2338 ws.append(w)
2339 ts.append(t)
2339 ts.append(t)
2340 flushss()
2340 flushss()
2341 if len(ts) == 1:
2341 if len(ts) == 1:
2342 return ws[0], ts[0] # 'or' operation is fully optimized out
2342 return ws[0], ts[0] # 'or' operation is fully optimized out
2343 # we can't reorder trees by weight because it would change the order.
2343 # we can't reorder trees by weight because it would change the order.
2344 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2344 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2345 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2345 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2346 return max(ws), (op,) + tuple(ts)
2346 return max(ws), (op,) + tuple(ts)
2347 elif op == 'not':
2347 elif op == 'not':
2348 # Optimize not public() to _notpublic() because we have a fast version
2348 # Optimize not public() to _notpublic() because we have a fast version
2349 if x[1] == ('func', ('symbol', 'public'), None):
2349 if x[1] == ('func', ('symbol', 'public'), None):
2350 newsym = ('func', ('symbol', '_notpublic'), None)
2350 newsym = ('func', ('symbol', '_notpublic'), None)
2351 o = optimize(newsym, not small)
2351 o = optimize(newsym, not small)
2352 return o[0], o[1]
2352 return o[0], o[1]
2353 else:
2353 else:
2354 o = optimize(x[1], not small)
2354 o = optimize(x[1], not small)
2355 return o[0], (op, o[1])
2355 return o[0], (op, o[1])
2356 elif op == 'parentpost':
2356 elif op == 'parentpost':
2357 o = optimize(x[1], small)
2357 o = optimize(x[1], small)
2358 return o[0], (op, o[1])
2358 return o[0], (op, o[1])
2359 elif op == 'group':
2359 elif op == 'group':
2360 return optimize(x[1], small)
2360 return optimize(x[1], small)
2361 elif op in 'dagrange range list parent ancestorspec':
2361 elif op in 'dagrange range list parent ancestorspec':
2362 if op == 'parent':
2362 if op == 'parent':
2363 # x^:y means (x^) : y, not x ^ (:y)
2363 # x^:y means (x^) : y, not x ^ (:y)
2364 post = ('parentpost', x[1])
2364 post = ('parentpost', x[1])
2365 if x[2][0] == 'dagrangepre':
2365 if x[2][0] == 'dagrangepre':
2366 return optimize(('dagrange', post, x[2][1]), small)
2366 return optimize(('dagrange', post, x[2][1]), small)
2367 elif x[2][0] == 'rangepre':
2367 elif x[2][0] == 'rangepre':
2368 return optimize(('range', post, x[2][1]), small)
2368 return optimize(('range', post, x[2][1]), small)
2369
2369
2370 wa, ta = optimize(x[1], small)
2370 wa, ta = optimize(x[1], small)
2371 wb, tb = optimize(x[2], small)
2371 wb, tb = optimize(x[2], small)
2372 return wa + wb, (op, ta, tb)
2372 return wa + wb, (op, ta, tb)
2373 elif op == 'func':
2373 elif op == 'func':
2374 f = getstring(x[1], _("not a symbol"))
2374 f = getstring(x[1], _("not a symbol"))
2375 wa, ta = optimize(x[2], small)
2375 wa, ta = optimize(x[2], small)
2376 if f in ("author branch closed date desc file grep keyword "
2376 if f in ("author branch closed date desc file grep keyword "
2377 "outgoing user"):
2377 "outgoing user"):
2378 w = 10 # slow
2378 w = 10 # slow
2379 elif f in "modifies adds removes":
2379 elif f in "modifies adds removes":
2380 w = 30 # slower
2380 w = 30 # slower
2381 elif f == "contains":
2381 elif f == "contains":
2382 w = 100 # very slow
2382 w = 100 # very slow
2383 elif f == "ancestor":
2383 elif f == "ancestor":
2384 w = 1 * smallbonus
2384 w = 1 * smallbonus
2385 elif f in "reverse limit first _intlist":
2385 elif f in "reverse limit first _intlist":
2386 w = 0
2386 w = 0
2387 elif f in "sort":
2387 elif f in "sort":
2388 w = 10 # assume most sorts look at changelog
2388 w = 10 # assume most sorts look at changelog
2389 else:
2389 else:
2390 w = 1
2390 w = 1
2391 return w + wa, (op, x[1], ta)
2391 return w + wa, (op, x[1], ta)
2392 return 1, x
2392 return 1, x
2393
2393
2394 _aliasarg = ('func', ('symbol', '_aliasarg'))
2394 _aliasarg = ('func', ('symbol', '_aliasarg'))
2395 def _getaliasarg(tree):
2395 def _getaliasarg(tree):
2396 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2396 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2397 return X, None otherwise.
2397 return X, None otherwise.
2398 """
2398 """
2399 if (len(tree) == 3 and tree[:2] == _aliasarg
2399 if (len(tree) == 3 and tree[:2] == _aliasarg
2400 and tree[2][0] == 'string'):
2400 and tree[2][0] == 'string'):
2401 return tree[2][1]
2401 return tree[2][1]
2402 return None
2402 return None
2403
2403
2404 def _checkaliasarg(tree, known=None):
2404 def _checkaliasarg(tree, known=None):
2405 """Check tree contains no _aliasarg construct or only ones which
2405 """Check tree contains no _aliasarg construct or only ones which
2406 value is in known. Used to avoid alias placeholders injection.
2406 value is in known. Used to avoid alias placeholders injection.
2407 """
2407 """
2408 if isinstance(tree, tuple):
2408 if isinstance(tree, tuple):
2409 arg = _getaliasarg(tree)
2409 arg = _getaliasarg(tree)
2410 if arg is not None and (not known or arg not in known):
2410 if arg is not None and (not known or arg not in known):
2411 raise error.UnknownIdentifier('_aliasarg', [])
2411 raise error.UnknownIdentifier('_aliasarg', [])
2412 for t in tree:
2412 for t in tree:
2413 _checkaliasarg(t, known)
2413 _checkaliasarg(t, known)
2414
2414
2415 # the set of valid characters for the initial letter of symbols in
2415 # the set of valid characters for the initial letter of symbols in
2416 # alias declarations and definitions
2416 # alias declarations and definitions
2417 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2417 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2418 if c.isalnum() or c in '._@$' or ord(c) > 127)
2418 if c.isalnum() or c in '._@$' or ord(c) > 127)
2419
2419
2420 def _tokenizealias(program, lookup=None):
2420 def _tokenizealias(program, lookup=None):
2421 """Parse alias declaration/definition into a stream of tokens
2421 """Parse alias declaration/definition into a stream of tokens
2422
2422
2423 This allows symbol names to use also ``$`` as an initial letter
2423 This allows symbol names to use also ``$`` as an initial letter
2424 (for backward compatibility), and callers of this function should
2424 (for backward compatibility), and callers of this function should
2425 examine whether ``$`` is used also for unexpected symbols or not.
2425 examine whether ``$`` is used also for unexpected symbols or not.
2426 """
2426 """
2427 return tokenize(program, lookup=lookup,
2427 return tokenize(program, lookup=lookup,
2428 syminitletters=_aliassyminitletters)
2428 syminitletters=_aliassyminitletters)
2429
2429
2430 def _parsealiasdecl(decl):
2430 def _parsealiasdecl(decl):
2431 """Parse alias declaration ``decl``
2431 """Parse alias declaration ``decl``
2432
2432
2433 This returns ``(name, tree, args, errorstr)`` tuple:
2433 This returns ``(name, tree, args, errorstr)`` tuple:
2434
2434
2435 - ``name``: of declared alias (may be ``decl`` itself at error)
2435 - ``name``: of declared alias (may be ``decl`` itself at error)
2436 - ``tree``: parse result (or ``None`` at error)
2436 - ``tree``: parse result (or ``None`` at error)
2437 - ``args``: list of alias argument names (or None for symbol declaration)
2437 - ``args``: list of alias argument names (or None for symbol declaration)
2438 - ``errorstr``: detail about detected error (or None)
2438 - ``errorstr``: detail about detected error (or None)
2439
2439
2440 >>> _parsealiasdecl('foo')
2440 >>> _parsealiasdecl('foo')
2441 ('foo', ('symbol', 'foo'), None, None)
2441 ('foo', ('symbol', 'foo'), None, None)
2442 >>> _parsealiasdecl('$foo')
2442 >>> _parsealiasdecl('$foo')
2443 ('$foo', None, None, "'$' not for alias arguments")
2443 ('$foo', None, None, "'$' not for alias arguments")
2444 >>> _parsealiasdecl('foo::bar')
2444 >>> _parsealiasdecl('foo::bar')
2445 ('foo::bar', None, None, 'invalid format')
2445 ('foo::bar', None, None, 'invalid format')
2446 >>> _parsealiasdecl('foo bar')
2446 >>> _parsealiasdecl('foo bar')
2447 ('foo bar', None, None, 'at 4: invalid token')
2447 ('foo bar', None, None, 'at 4: invalid token')
2448 >>> _parsealiasdecl('foo()')
2448 >>> _parsealiasdecl('foo()')
2449 ('foo', ('func', ('symbol', 'foo')), [], None)
2449 ('foo', ('func', ('symbol', 'foo')), [], None)
2450 >>> _parsealiasdecl('$foo()')
2450 >>> _parsealiasdecl('$foo()')
2451 ('$foo()', None, None, "'$' not for alias arguments")
2451 ('$foo()', None, None, "'$' not for alias arguments")
2452 >>> _parsealiasdecl('foo($1, $2)')
2452 >>> _parsealiasdecl('foo($1, $2)')
2453 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2453 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2454 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2454 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2455 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2455 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2456 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2456 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2457 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2457 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2458 >>> _parsealiasdecl('foo(bar($1, $2))')
2458 >>> _parsealiasdecl('foo(bar($1, $2))')
2459 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2459 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2460 >>> _parsealiasdecl('foo("string")')
2460 >>> _parsealiasdecl('foo("string")')
2461 ('foo("string")', None, None, 'invalid argument list')
2461 ('foo("string")', None, None, 'invalid argument list')
2462 >>> _parsealiasdecl('foo($1, $2')
2462 >>> _parsealiasdecl('foo($1, $2')
2463 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2463 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2464 >>> _parsealiasdecl('foo("string')
2464 >>> _parsealiasdecl('foo("string')
2465 ('foo("string', None, None, 'at 5: unterminated string')
2465 ('foo("string', None, None, 'at 5: unterminated string')
2466 >>> _parsealiasdecl('foo($1, $2, $1)')
2466 >>> _parsealiasdecl('foo($1, $2, $1)')
2467 ('foo', None, None, 'argument names collide with each other')
2467 ('foo', None, None, 'argument names collide with each other')
2468 """
2468 """
2469 p = parser.parser(elements)
2469 p = parser.parser(elements)
2470 try:
2470 try:
2471 tree, pos = p.parse(_tokenizealias(decl))
2471 tree, pos = p.parse(_tokenizealias(decl))
2472 if (pos != len(decl)):
2472 if (pos != len(decl)):
2473 raise error.ParseError(_('invalid token'), pos)
2473 raise error.ParseError(_('invalid token'), pos)
2474
2474
2475 if isvalidsymbol(tree):
2475 if isvalidsymbol(tree):
2476 # "name = ...." style
2476 # "name = ...." style
2477 name = getsymbol(tree)
2477 name = getsymbol(tree)
2478 if name.startswith('$'):
2478 if name.startswith('$'):
2479 return (decl, None, None, _("'$' not for alias arguments"))
2479 return (decl, None, None, _("'$' not for alias arguments"))
2480 return (name, ('symbol', name), None, None)
2480 return (name, ('symbol', name), None, None)
2481
2481
2482 if isvalidfunc(tree):
2482 if isvalidfunc(tree):
2483 # "name(arg, ....) = ...." style
2483 # "name(arg, ....) = ...." style
2484 name = getfuncname(tree)
2484 name = getfuncname(tree)
2485 if name.startswith('$'):
2485 if name.startswith('$'):
2486 return (decl, None, None, _("'$' not for alias arguments"))
2486 return (decl, None, None, _("'$' not for alias arguments"))
2487 args = []
2487 args = []
2488 for arg in getfuncargs(tree):
2488 for arg in getfuncargs(tree):
2489 if not isvalidsymbol(arg):
2489 if not isvalidsymbol(arg):
2490 return (decl, None, None, _("invalid argument list"))
2490 return (decl, None, None, _("invalid argument list"))
2491 args.append(getsymbol(arg))
2491 args.append(getsymbol(arg))
2492 if len(args) != len(set(args)):
2492 if len(args) != len(set(args)):
2493 return (name, None, None,
2493 return (name, None, None,
2494 _("argument names collide with each other"))
2494 _("argument names collide with each other"))
2495 return (name, ('func', ('symbol', name)), args, None)
2495 return (name, ('func', ('symbol', name)), args, None)
2496
2496
2497 return (decl, None, None, _("invalid format"))
2497 return (decl, None, None, _("invalid format"))
2498 except error.ParseError as inst:
2498 except error.ParseError as inst:
2499 return (decl, None, None, parseerrordetail(inst))
2499 return (decl, None, None, parseerrordetail(inst))
2500
2500
2501 def _parsealiasdefn(defn, args):
2501 def _parsealiasdefn(defn, args):
2502 """Parse alias definition ``defn``
2502 """Parse alias definition ``defn``
2503
2503
2504 This function also replaces alias argument references in the
2504 This function also replaces alias argument references in the
2505 specified definition by ``_aliasarg(ARGNAME)``.
2505 specified definition by ``_aliasarg(ARGNAME)``.
2506
2506
2507 ``args`` is a list of alias argument names, or None if the alias
2507 ``args`` is a list of alias argument names, or None if the alias
2508 is declared as a symbol.
2508 is declared as a symbol.
2509
2509
2510 This returns "tree" as parsing result.
2510 This returns "tree" as parsing result.
2511
2511
2512 >>> args = ['$1', '$2', 'foo']
2512 >>> args = ['$1', '$2', 'foo']
2513 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2513 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2514 (or
2514 (or
2515 (func
2515 (func
2516 ('symbol', '_aliasarg')
2516 ('symbol', '_aliasarg')
2517 ('string', '$1'))
2517 ('string', '$1'))
2518 (func
2518 (func
2519 ('symbol', '_aliasarg')
2519 ('symbol', '_aliasarg')
2520 ('string', 'foo')))
2520 ('string', 'foo')))
2521 >>> try:
2521 >>> try:
2522 ... _parsealiasdefn('$1 or $bar', args)
2522 ... _parsealiasdefn('$1 or $bar', args)
2523 ... except error.ParseError, inst:
2523 ... except error.ParseError, inst:
2524 ... print parseerrordetail(inst)
2524 ... print parseerrordetail(inst)
2525 at 6: '$' not for alias arguments
2525 at 6: '$' not for alias arguments
2526 >>> args = ['$1', '$10', 'foo']
2526 >>> args = ['$1', '$10', 'foo']
2527 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2527 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2528 (or
2528 (or
2529 (func
2529 (func
2530 ('symbol', '_aliasarg')
2530 ('symbol', '_aliasarg')
2531 ('string', '$10'))
2531 ('string', '$10'))
2532 ('symbol', 'foobar'))
2532 ('symbol', 'foobar'))
2533 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2533 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2534 (or
2534 (or
2535 ('string', '$1')
2535 ('string', '$1')
2536 ('string', 'foo'))
2536 ('string', 'foo'))
2537 """
2537 """
2538 def tokenizedefn(program, lookup=None):
2538 def tokenizedefn(program, lookup=None):
2539 if args:
2539 if args:
2540 argset = set(args)
2540 argset = set(args)
2541 else:
2541 else:
2542 argset = set()
2542 argset = set()
2543
2543
2544 for t, value, pos in _tokenizealias(program, lookup=lookup):
2544 for t, value, pos in _tokenizealias(program, lookup=lookup):
2545 if t == 'symbol':
2545 if t == 'symbol':
2546 if value in argset:
2546 if value in argset:
2547 # emulate tokenization of "_aliasarg('ARGNAME')":
2547 # emulate tokenization of "_aliasarg('ARGNAME')":
2548 # "_aliasarg()" is an unknown symbol only used separate
2548 # "_aliasarg()" is an unknown symbol only used separate
2549 # alias argument placeholders from regular strings.
2549 # alias argument placeholders from regular strings.
2550 yield ('symbol', '_aliasarg', pos)
2550 yield ('symbol', '_aliasarg', pos)
2551 yield ('(', None, pos)
2551 yield ('(', None, pos)
2552 yield ('string', value, pos)
2552 yield ('string', value, pos)
2553 yield (')', None, pos)
2553 yield (')', None, pos)
2554 continue
2554 continue
2555 elif value.startswith('$'):
2555 elif value.startswith('$'):
2556 raise error.ParseError(_("'$' not for alias arguments"),
2556 raise error.ParseError(_("'$' not for alias arguments"),
2557 pos)
2557 pos)
2558 yield (t, value, pos)
2558 yield (t, value, pos)
2559
2559
2560 p = parser.parser(elements)
2560 p = parser.parser(elements)
2561 tree, pos = p.parse(tokenizedefn(defn))
2561 tree, pos = p.parse(tokenizedefn(defn))
2562 if pos != len(defn):
2562 if pos != len(defn):
2563 raise error.ParseError(_('invalid token'), pos)
2563 raise error.ParseError(_('invalid token'), pos)
2564 return parser.simplifyinfixops(tree, ('or',))
2564 return parser.simplifyinfixops(tree, ('or',))
2565
2565
2566 class revsetalias(object):
2566 class revsetalias(object):
2567 # whether own `error` information is already shown or not.
2567 # whether own `error` information is already shown or not.
2568 # this avoids showing same warning multiple times at each `findaliases`.
2568 # this avoids showing same warning multiple times at each `findaliases`.
2569 warned = False
2569 warned = False
2570
2570
2571 def __init__(self, name, value):
2571 def __init__(self, name, value):
2572 '''Aliases like:
2572 '''Aliases like:
2573
2573
2574 h = heads(default)
2574 h = heads(default)
2575 b($1) = ancestors($1) - ancestors(default)
2575 b($1) = ancestors($1) - ancestors(default)
2576 '''
2576 '''
2577 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2577 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2578 if self.error:
2578 if self.error:
2579 self.error = _('failed to parse the declaration of revset alias'
2579 self.error = _('failed to parse the declaration of revset alias'
2580 ' "%s": %s') % (self.name, self.error)
2580 ' "%s": %s') % (self.name, self.error)
2581 return
2581 return
2582
2582
2583 try:
2583 try:
2584 self.replacement = _parsealiasdefn(value, self.args)
2584 self.replacement = _parsealiasdefn(value, self.args)
2585 # Check for placeholder injection
2585 # Check for placeholder injection
2586 _checkaliasarg(self.replacement, self.args)
2586 _checkaliasarg(self.replacement, self.args)
2587 except error.ParseError as inst:
2587 except error.ParseError as inst:
2588 self.error = _('failed to parse the definition of revset alias'
2588 self.error = _('failed to parse the definition of revset alias'
2589 ' "%s": %s') % (self.name, parseerrordetail(inst))
2589 ' "%s": %s') % (self.name, parseerrordetail(inst))
2590
2590
2591 def _getalias(aliases, tree):
2591 def _getalias(aliases, tree):
2592 """If tree looks like an unexpanded alias, return it. Return None
2592 """If tree looks like an unexpanded alias, return it. Return None
2593 otherwise.
2593 otherwise.
2594 """
2594 """
2595 if isinstance(tree, tuple) and tree:
2595 if isinstance(tree, tuple) and tree:
2596 if tree[0] == 'symbol' and len(tree) == 2:
2596 if tree[0] == 'symbol' and len(tree) == 2:
2597 name = tree[1]
2597 name = tree[1]
2598 alias = aliases.get(name)
2598 alias = aliases.get(name)
2599 if alias and alias.args is None and alias.tree == tree:
2599 if alias and alias.args is None and alias.tree == tree:
2600 return alias
2600 return alias
2601 if tree[0] == 'func' and len(tree) > 1:
2601 if tree[0] == 'func' and len(tree) > 1:
2602 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2602 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2603 name = tree[1][1]
2603 name = tree[1][1]
2604 alias = aliases.get(name)
2604 alias = aliases.get(name)
2605 if alias and alias.args is not None and alias.tree == tree[:2]:
2605 if alias and alias.args is not None and alias.tree == tree[:2]:
2606 return alias
2606 return alias
2607 return None
2607 return None
2608
2608
2609 def _expandargs(tree, args):
2609 def _expandargs(tree, args):
2610 """Replace _aliasarg instances with the substitution value of the
2610 """Replace _aliasarg instances with the substitution value of the
2611 same name in args, recursively.
2611 same name in args, recursively.
2612 """
2612 """
2613 if not tree or not isinstance(tree, tuple):
2613 if not tree or not isinstance(tree, tuple):
2614 return tree
2614 return tree
2615 arg = _getaliasarg(tree)
2615 arg = _getaliasarg(tree)
2616 if arg is not None:
2616 if arg is not None:
2617 return args[arg]
2617 return args[arg]
2618 return tuple(_expandargs(t, args) for t in tree)
2618 return tuple(_expandargs(t, args) for t in tree)
2619
2619
2620 def _expandaliases(aliases, tree, expanding, cache):
2620 def _expandaliases(aliases, tree, expanding, cache):
2621 """Expand aliases in tree, recursively.
2621 """Expand aliases in tree, recursively.
2622
2622
2623 'aliases' is a dictionary mapping user defined aliases to
2623 'aliases' is a dictionary mapping user defined aliases to
2624 revsetalias objects.
2624 revsetalias objects.
2625 """
2625 """
2626 if not isinstance(tree, tuple):
2626 if not isinstance(tree, tuple):
2627 # Do not expand raw strings
2627 # Do not expand raw strings
2628 return tree
2628 return tree
2629 alias = _getalias(aliases, tree)
2629 alias = _getalias(aliases, tree)
2630 if alias is not None:
2630 if alias is not None:
2631 if alias.error:
2631 if alias.error:
2632 raise error.Abort(alias.error)
2632 raise error.Abort(alias.error)
2633 if alias in expanding:
2633 if alias in expanding:
2634 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2634 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2635 'detected') % alias.name)
2635 'detected') % alias.name)
2636 expanding.append(alias)
2636 expanding.append(alias)
2637 if alias.name not in cache:
2637 if alias.name not in cache:
2638 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2638 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2639 expanding, cache)
2639 expanding, cache)
2640 result = cache[alias.name]
2640 result = cache[alias.name]
2641 expanding.pop()
2641 expanding.pop()
2642 if alias.args is not None:
2642 if alias.args is not None:
2643 l = getlist(tree[2])
2643 l = getlist(tree[2])
2644 if len(l) != len(alias.args):
2644 if len(l) != len(alias.args):
2645 raise error.ParseError(
2645 raise error.ParseError(
2646 _('invalid number of arguments: %s') % len(l))
2646 _('invalid number of arguments: %d') % len(l))
2647 l = [_expandaliases(aliases, a, [], cache) for a in l]
2647 l = [_expandaliases(aliases, a, [], cache) for a in l]
2648 result = _expandargs(result, dict(zip(alias.args, l)))
2648 result = _expandargs(result, dict(zip(alias.args, l)))
2649 else:
2649 else:
2650 result = tuple(_expandaliases(aliases, t, expanding, cache)
2650 result = tuple(_expandaliases(aliases, t, expanding, cache)
2651 for t in tree)
2651 for t in tree)
2652 return result
2652 return result
2653
2653
2654 def findaliases(ui, tree, showwarning=None):
2654 def findaliases(ui, tree, showwarning=None):
2655 _checkaliasarg(tree)
2655 _checkaliasarg(tree)
2656 aliases = {}
2656 aliases = {}
2657 for k, v in ui.configitems('revsetalias'):
2657 for k, v in ui.configitems('revsetalias'):
2658 alias = revsetalias(k, v)
2658 alias = revsetalias(k, v)
2659 aliases[alias.name] = alias
2659 aliases[alias.name] = alias
2660 tree = _expandaliases(aliases, tree, [], {})
2660 tree = _expandaliases(aliases, tree, [], {})
2661 if showwarning:
2661 if showwarning:
2662 # warn about problematic (but not referred) aliases
2662 # warn about problematic (but not referred) aliases
2663 for name, alias in sorted(aliases.iteritems()):
2663 for name, alias in sorted(aliases.iteritems()):
2664 if alias.error and not alias.warned:
2664 if alias.error and not alias.warned:
2665 showwarning(_('warning: %s\n') % (alias.error))
2665 showwarning(_('warning: %s\n') % (alias.error))
2666 alias.warned = True
2666 alias.warned = True
2667 return tree
2667 return tree
2668
2668
2669 def foldconcat(tree):
2669 def foldconcat(tree):
2670 """Fold elements to be concatenated by `##`
2670 """Fold elements to be concatenated by `##`
2671 """
2671 """
2672 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2672 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2673 return tree
2673 return tree
2674 if tree[0] == '_concat':
2674 if tree[0] == '_concat':
2675 pending = [tree]
2675 pending = [tree]
2676 l = []
2676 l = []
2677 while pending:
2677 while pending:
2678 e = pending.pop()
2678 e = pending.pop()
2679 if e[0] == '_concat':
2679 if e[0] == '_concat':
2680 pending.extend(reversed(e[1:]))
2680 pending.extend(reversed(e[1:]))
2681 elif e[0] in ('string', 'symbol'):
2681 elif e[0] in ('string', 'symbol'):
2682 l.append(e[1])
2682 l.append(e[1])
2683 else:
2683 else:
2684 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2684 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2685 raise error.ParseError(msg)
2685 raise error.ParseError(msg)
2686 return ('string', ''.join(l))
2686 return ('string', ''.join(l))
2687 else:
2687 else:
2688 return tuple(foldconcat(t) for t in tree)
2688 return tuple(foldconcat(t) for t in tree)
2689
2689
2690 def parse(spec, lookup=None):
2690 def parse(spec, lookup=None):
2691 p = parser.parser(elements)
2691 p = parser.parser(elements)
2692 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2692 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2693 if pos != len(spec):
2693 if pos != len(spec):
2694 raise error.ParseError(_("invalid token"), pos)
2694 raise error.ParseError(_("invalid token"), pos)
2695 return parser.simplifyinfixops(tree, ('or',))
2695 return parser.simplifyinfixops(tree, ('or',))
2696
2696
2697 def posttreebuilthook(tree, repo):
2697 def posttreebuilthook(tree, repo):
2698 # hook for extensions to execute code on the optimized tree
2698 # hook for extensions to execute code on the optimized tree
2699 pass
2699 pass
2700
2700
2701 def match(ui, spec, repo=None):
2701 def match(ui, spec, repo=None):
2702 if not spec:
2702 if not spec:
2703 raise error.ParseError(_("empty query"))
2703 raise error.ParseError(_("empty query"))
2704 lookup = None
2704 lookup = None
2705 if repo:
2705 if repo:
2706 lookup = repo.__contains__
2706 lookup = repo.__contains__
2707 tree = parse(spec, lookup)
2707 tree = parse(spec, lookup)
2708 return _makematcher(ui, tree, repo)
2708 return _makematcher(ui, tree, repo)
2709
2709
2710 def matchany(ui, specs, repo=None):
2710 def matchany(ui, specs, repo=None):
2711 """Create a matcher that will include any revisions matching one of the
2711 """Create a matcher that will include any revisions matching one of the
2712 given specs"""
2712 given specs"""
2713 if not specs:
2713 if not specs:
2714 def mfunc(repo, subset=None):
2714 def mfunc(repo, subset=None):
2715 return baseset()
2715 return baseset()
2716 return mfunc
2716 return mfunc
2717 if not all(specs):
2717 if not all(specs):
2718 raise error.ParseError(_("empty query"))
2718 raise error.ParseError(_("empty query"))
2719 lookup = None
2719 lookup = None
2720 if repo:
2720 if repo:
2721 lookup = repo.__contains__
2721 lookup = repo.__contains__
2722 if len(specs) == 1:
2722 if len(specs) == 1:
2723 tree = parse(specs[0], lookup)
2723 tree = parse(specs[0], lookup)
2724 else:
2724 else:
2725 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2725 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2726 return _makematcher(ui, tree, repo)
2726 return _makematcher(ui, tree, repo)
2727
2727
2728 def _makematcher(ui, tree, repo):
2728 def _makematcher(ui, tree, repo):
2729 if ui:
2729 if ui:
2730 tree = findaliases(ui, tree, showwarning=ui.warn)
2730 tree = findaliases(ui, tree, showwarning=ui.warn)
2731 tree = foldconcat(tree)
2731 tree = foldconcat(tree)
2732 weight, tree = optimize(tree, True)
2732 weight, tree = optimize(tree, True)
2733 posttreebuilthook(tree, repo)
2733 posttreebuilthook(tree, repo)
2734 def mfunc(repo, subset=None):
2734 def mfunc(repo, subset=None):
2735 if subset is None:
2735 if subset is None:
2736 subset = fullreposet(repo)
2736 subset = fullreposet(repo)
2737 if util.safehasattr(subset, 'isascending'):
2737 if util.safehasattr(subset, 'isascending'):
2738 result = getset(repo, subset, tree)
2738 result = getset(repo, subset, tree)
2739 else:
2739 else:
2740 result = getset(repo, baseset(subset), tree)
2740 result = getset(repo, baseset(subset), tree)
2741 return result
2741 return result
2742 return mfunc
2742 return mfunc
2743
2743
2744 def formatspec(expr, *args):
2744 def formatspec(expr, *args):
2745 '''
2745 '''
2746 This is a convenience function for using revsets internally, and
2746 This is a convenience function for using revsets internally, and
2747 escapes arguments appropriately. Aliases are intentionally ignored
2747 escapes arguments appropriately. Aliases are intentionally ignored
2748 so that intended expression behavior isn't accidentally subverted.
2748 so that intended expression behavior isn't accidentally subverted.
2749
2749
2750 Supported arguments:
2750 Supported arguments:
2751
2751
2752 %r = revset expression, parenthesized
2752 %r = revset expression, parenthesized
2753 %d = int(arg), no quoting
2753 %d = int(arg), no quoting
2754 %s = string(arg), escaped and single-quoted
2754 %s = string(arg), escaped and single-quoted
2755 %b = arg.branch(), escaped and single-quoted
2755 %b = arg.branch(), escaped and single-quoted
2756 %n = hex(arg), single-quoted
2756 %n = hex(arg), single-quoted
2757 %% = a literal '%'
2757 %% = a literal '%'
2758
2758
2759 Prefixing the type with 'l' specifies a parenthesized list of that type.
2759 Prefixing the type with 'l' specifies a parenthesized list of that type.
2760
2760
2761 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2761 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2762 '(10 or 11):: and ((this()) or (that()))'
2762 '(10 or 11):: and ((this()) or (that()))'
2763 >>> formatspec('%d:: and not %d::', 10, 20)
2763 >>> formatspec('%d:: and not %d::', 10, 20)
2764 '10:: and not 20::'
2764 '10:: and not 20::'
2765 >>> formatspec('%ld or %ld', [], [1])
2765 >>> formatspec('%ld or %ld', [], [1])
2766 "_list('') or 1"
2766 "_list('') or 1"
2767 >>> formatspec('keyword(%s)', 'foo\\xe9')
2767 >>> formatspec('keyword(%s)', 'foo\\xe9')
2768 "keyword('foo\\\\xe9')"
2768 "keyword('foo\\\\xe9')"
2769 >>> b = lambda: 'default'
2769 >>> b = lambda: 'default'
2770 >>> b.branch = b
2770 >>> b.branch = b
2771 >>> formatspec('branch(%b)', b)
2771 >>> formatspec('branch(%b)', b)
2772 "branch('default')"
2772 "branch('default')"
2773 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2773 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2774 "root(_list('a\\x00b\\x00c\\x00d'))"
2774 "root(_list('a\\x00b\\x00c\\x00d'))"
2775 '''
2775 '''
2776
2776
2777 def quote(s):
2777 def quote(s):
2778 return repr(str(s))
2778 return repr(str(s))
2779
2779
2780 def argtype(c, arg):
2780 def argtype(c, arg):
2781 if c == 'd':
2781 if c == 'd':
2782 return str(int(arg))
2782 return str(int(arg))
2783 elif c == 's':
2783 elif c == 's':
2784 return quote(arg)
2784 return quote(arg)
2785 elif c == 'r':
2785 elif c == 'r':
2786 parse(arg) # make sure syntax errors are confined
2786 parse(arg) # make sure syntax errors are confined
2787 return '(%s)' % arg
2787 return '(%s)' % arg
2788 elif c == 'n':
2788 elif c == 'n':
2789 return quote(node.hex(arg))
2789 return quote(node.hex(arg))
2790 elif c == 'b':
2790 elif c == 'b':
2791 return quote(arg.branch())
2791 return quote(arg.branch())
2792
2792
2793 def listexp(s, t):
2793 def listexp(s, t):
2794 l = len(s)
2794 l = len(s)
2795 if l == 0:
2795 if l == 0:
2796 return "_list('')"
2796 return "_list('')"
2797 elif l == 1:
2797 elif l == 1:
2798 return argtype(t, s[0])
2798 return argtype(t, s[0])
2799 elif t == 'd':
2799 elif t == 'd':
2800 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2800 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2801 elif t == 's':
2801 elif t == 's':
2802 return "_list('%s')" % "\0".join(s)
2802 return "_list('%s')" % "\0".join(s)
2803 elif t == 'n':
2803 elif t == 'n':
2804 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2804 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2805 elif t == 'b':
2805 elif t == 'b':
2806 return "_list('%s')" % "\0".join(a.branch() for a in s)
2806 return "_list('%s')" % "\0".join(a.branch() for a in s)
2807
2807
2808 m = l // 2
2808 m = l // 2
2809 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2809 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2810
2810
2811 ret = ''
2811 ret = ''
2812 pos = 0
2812 pos = 0
2813 arg = 0
2813 arg = 0
2814 while pos < len(expr):
2814 while pos < len(expr):
2815 c = expr[pos]
2815 c = expr[pos]
2816 if c == '%':
2816 if c == '%':
2817 pos += 1
2817 pos += 1
2818 d = expr[pos]
2818 d = expr[pos]
2819 if d == '%':
2819 if d == '%':
2820 ret += d
2820 ret += d
2821 elif d in 'dsnbr':
2821 elif d in 'dsnbr':
2822 ret += argtype(d, args[arg])
2822 ret += argtype(d, args[arg])
2823 arg += 1
2823 arg += 1
2824 elif d == 'l':
2824 elif d == 'l':
2825 # a list of some type
2825 # a list of some type
2826 pos += 1
2826 pos += 1
2827 d = expr[pos]
2827 d = expr[pos]
2828 ret += listexp(list(args[arg]), d)
2828 ret += listexp(list(args[arg]), d)
2829 arg += 1
2829 arg += 1
2830 else:
2830 else:
2831 raise error.Abort('unexpected revspec format character %s' % d)
2831 raise error.Abort('unexpected revspec format character %s' % d)
2832 else:
2832 else:
2833 ret += c
2833 ret += c
2834 pos += 1
2834 pos += 1
2835
2835
2836 return ret
2836 return ret
2837
2837
2838 def prettyformat(tree):
2838 def prettyformat(tree):
2839 return parser.prettyformat(tree, ('string', 'symbol'))
2839 return parser.prettyformat(tree, ('string', 'symbol'))
2840
2840
2841 def depth(tree):
2841 def depth(tree):
2842 if isinstance(tree, tuple):
2842 if isinstance(tree, tuple):
2843 return max(map(depth, tree)) + 1
2843 return max(map(depth, tree)) + 1
2844 else:
2844 else:
2845 return 0
2845 return 0
2846
2846
2847 def funcsused(tree):
2847 def funcsused(tree):
2848 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2848 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2849 return set()
2849 return set()
2850 else:
2850 else:
2851 funcs = set()
2851 funcs = set()
2852 for s in tree[1:]:
2852 for s in tree[1:]:
2853 funcs |= funcsused(s)
2853 funcs |= funcsused(s)
2854 if tree[0] == 'func':
2854 if tree[0] == 'func':
2855 funcs.add(tree[1][1])
2855 funcs.add(tree[1][1])
2856 return funcs
2856 return funcs
2857
2857
2858 class abstractsmartset(object):
2858 class abstractsmartset(object):
2859
2859
2860 def __nonzero__(self):
2860 def __nonzero__(self):
2861 """True if the smartset is not empty"""
2861 """True if the smartset is not empty"""
2862 raise NotImplementedError()
2862 raise NotImplementedError()
2863
2863
2864 def __contains__(self, rev):
2864 def __contains__(self, rev):
2865 """provide fast membership testing"""
2865 """provide fast membership testing"""
2866 raise NotImplementedError()
2866 raise NotImplementedError()
2867
2867
2868 def __iter__(self):
2868 def __iter__(self):
2869 """iterate the set in the order it is supposed to be iterated"""
2869 """iterate the set in the order it is supposed to be iterated"""
2870 raise NotImplementedError()
2870 raise NotImplementedError()
2871
2871
2872 # Attributes containing a function to perform a fast iteration in a given
2872 # Attributes containing a function to perform a fast iteration in a given
2873 # direction. A smartset can have none, one, or both defined.
2873 # direction. A smartset can have none, one, or both defined.
2874 #
2874 #
2875 # Default value is None instead of a function returning None to avoid
2875 # Default value is None instead of a function returning None to avoid
2876 # initializing an iterator just for testing if a fast method exists.
2876 # initializing an iterator just for testing if a fast method exists.
2877 fastasc = None
2877 fastasc = None
2878 fastdesc = None
2878 fastdesc = None
2879
2879
2880 def isascending(self):
2880 def isascending(self):
2881 """True if the set will iterate in ascending order"""
2881 """True if the set will iterate in ascending order"""
2882 raise NotImplementedError()
2882 raise NotImplementedError()
2883
2883
2884 def isdescending(self):
2884 def isdescending(self):
2885 """True if the set will iterate in descending order"""
2885 """True if the set will iterate in descending order"""
2886 raise NotImplementedError()
2886 raise NotImplementedError()
2887
2887
2888 @util.cachefunc
2888 @util.cachefunc
2889 def min(self):
2889 def min(self):
2890 """return the minimum element in the set"""
2890 """return the minimum element in the set"""
2891 if self.fastasc is not None:
2891 if self.fastasc is not None:
2892 for r in self.fastasc():
2892 for r in self.fastasc():
2893 return r
2893 return r
2894 raise ValueError('arg is an empty sequence')
2894 raise ValueError('arg is an empty sequence')
2895 return min(self)
2895 return min(self)
2896
2896
2897 @util.cachefunc
2897 @util.cachefunc
2898 def max(self):
2898 def max(self):
2899 """return the maximum element in the set"""
2899 """return the maximum element in the set"""
2900 if self.fastdesc is not None:
2900 if self.fastdesc is not None:
2901 for r in self.fastdesc():
2901 for r in self.fastdesc():
2902 return r
2902 return r
2903 raise ValueError('arg is an empty sequence')
2903 raise ValueError('arg is an empty sequence')
2904 return max(self)
2904 return max(self)
2905
2905
2906 def first(self):
2906 def first(self):
2907 """return the first element in the set (user iteration perspective)
2907 """return the first element in the set (user iteration perspective)
2908
2908
2909 Return None if the set is empty"""
2909 Return None if the set is empty"""
2910 raise NotImplementedError()
2910 raise NotImplementedError()
2911
2911
2912 def last(self):
2912 def last(self):
2913 """return the last element in the set (user iteration perspective)
2913 """return the last element in the set (user iteration perspective)
2914
2914
2915 Return None if the set is empty"""
2915 Return None if the set is empty"""
2916 raise NotImplementedError()
2916 raise NotImplementedError()
2917
2917
2918 def __len__(self):
2918 def __len__(self):
2919 """return the length of the smartsets
2919 """return the length of the smartsets
2920
2920
2921 This can be expensive on smartset that could be lazy otherwise."""
2921 This can be expensive on smartset that could be lazy otherwise."""
2922 raise NotImplementedError()
2922 raise NotImplementedError()
2923
2923
2924 def reverse(self):
2924 def reverse(self):
2925 """reverse the expected iteration order"""
2925 """reverse the expected iteration order"""
2926 raise NotImplementedError()
2926 raise NotImplementedError()
2927
2927
2928 def sort(self, reverse=True):
2928 def sort(self, reverse=True):
2929 """get the set to iterate in an ascending or descending order"""
2929 """get the set to iterate in an ascending or descending order"""
2930 raise NotImplementedError()
2930 raise NotImplementedError()
2931
2931
2932 def __and__(self, other):
2932 def __and__(self, other):
2933 """Returns a new object with the intersection of the two collections.
2933 """Returns a new object with the intersection of the two collections.
2934
2934
2935 This is part of the mandatory API for smartset."""
2935 This is part of the mandatory API for smartset."""
2936 if isinstance(other, fullreposet):
2936 if isinstance(other, fullreposet):
2937 return self
2937 return self
2938 return self.filter(other.__contains__, cache=False)
2938 return self.filter(other.__contains__, cache=False)
2939
2939
2940 def __add__(self, other):
2940 def __add__(self, other):
2941 """Returns a new object with the union of the two collections.
2941 """Returns a new object with the union of the two collections.
2942
2942
2943 This is part of the mandatory API for smartset."""
2943 This is part of the mandatory API for smartset."""
2944 return addset(self, other)
2944 return addset(self, other)
2945
2945
2946 def __sub__(self, other):
2946 def __sub__(self, other):
2947 """Returns a new object with the substraction of the two collections.
2947 """Returns a new object with the substraction of the two collections.
2948
2948
2949 This is part of the mandatory API for smartset."""
2949 This is part of the mandatory API for smartset."""
2950 c = other.__contains__
2950 c = other.__contains__
2951 return self.filter(lambda r: not c(r), cache=False)
2951 return self.filter(lambda r: not c(r), cache=False)
2952
2952
2953 def filter(self, condition, cache=True):
2953 def filter(self, condition, cache=True):
2954 """Returns this smartset filtered by condition as a new smartset.
2954 """Returns this smartset filtered by condition as a new smartset.
2955
2955
2956 `condition` is a callable which takes a revision number and returns a
2956 `condition` is a callable which takes a revision number and returns a
2957 boolean.
2957 boolean.
2958
2958
2959 This is part of the mandatory API for smartset."""
2959 This is part of the mandatory API for smartset."""
2960 # builtin cannot be cached. but do not needs to
2960 # builtin cannot be cached. but do not needs to
2961 if cache and util.safehasattr(condition, 'func_code'):
2961 if cache and util.safehasattr(condition, 'func_code'):
2962 condition = util.cachefunc(condition)
2962 condition = util.cachefunc(condition)
2963 return filteredset(self, condition)
2963 return filteredset(self, condition)
2964
2964
2965 class baseset(abstractsmartset):
2965 class baseset(abstractsmartset):
2966 """Basic data structure that represents a revset and contains the basic
2966 """Basic data structure that represents a revset and contains the basic
2967 operation that it should be able to perform.
2967 operation that it should be able to perform.
2968
2968
2969 Every method in this class should be implemented by any smartset class.
2969 Every method in this class should be implemented by any smartset class.
2970 """
2970 """
2971 def __init__(self, data=()):
2971 def __init__(self, data=()):
2972 if not isinstance(data, list):
2972 if not isinstance(data, list):
2973 if isinstance(data, set):
2973 if isinstance(data, set):
2974 self._set = data
2974 self._set = data
2975 data = list(data)
2975 data = list(data)
2976 self._list = data
2976 self._list = data
2977 self._ascending = None
2977 self._ascending = None
2978
2978
2979 @util.propertycache
2979 @util.propertycache
2980 def _set(self):
2980 def _set(self):
2981 return set(self._list)
2981 return set(self._list)
2982
2982
2983 @util.propertycache
2983 @util.propertycache
2984 def _asclist(self):
2984 def _asclist(self):
2985 asclist = self._list[:]
2985 asclist = self._list[:]
2986 asclist.sort()
2986 asclist.sort()
2987 return asclist
2987 return asclist
2988
2988
2989 def __iter__(self):
2989 def __iter__(self):
2990 if self._ascending is None:
2990 if self._ascending is None:
2991 return iter(self._list)
2991 return iter(self._list)
2992 elif self._ascending:
2992 elif self._ascending:
2993 return iter(self._asclist)
2993 return iter(self._asclist)
2994 else:
2994 else:
2995 return reversed(self._asclist)
2995 return reversed(self._asclist)
2996
2996
2997 def fastasc(self):
2997 def fastasc(self):
2998 return iter(self._asclist)
2998 return iter(self._asclist)
2999
2999
3000 def fastdesc(self):
3000 def fastdesc(self):
3001 return reversed(self._asclist)
3001 return reversed(self._asclist)
3002
3002
3003 @util.propertycache
3003 @util.propertycache
3004 def __contains__(self):
3004 def __contains__(self):
3005 return self._set.__contains__
3005 return self._set.__contains__
3006
3006
3007 def __nonzero__(self):
3007 def __nonzero__(self):
3008 return bool(self._list)
3008 return bool(self._list)
3009
3009
3010 def sort(self, reverse=False):
3010 def sort(self, reverse=False):
3011 self._ascending = not bool(reverse)
3011 self._ascending = not bool(reverse)
3012
3012
3013 def reverse(self):
3013 def reverse(self):
3014 if self._ascending is None:
3014 if self._ascending is None:
3015 self._list.reverse()
3015 self._list.reverse()
3016 else:
3016 else:
3017 self._ascending = not self._ascending
3017 self._ascending = not self._ascending
3018
3018
3019 def __len__(self):
3019 def __len__(self):
3020 return len(self._list)
3020 return len(self._list)
3021
3021
3022 def isascending(self):
3022 def isascending(self):
3023 """Returns True if the collection is ascending order, False if not.
3023 """Returns True if the collection is ascending order, False if not.
3024
3024
3025 This is part of the mandatory API for smartset."""
3025 This is part of the mandatory API for smartset."""
3026 if len(self) <= 1:
3026 if len(self) <= 1:
3027 return True
3027 return True
3028 return self._ascending is not None and self._ascending
3028 return self._ascending is not None and self._ascending
3029
3029
3030 def isdescending(self):
3030 def isdescending(self):
3031 """Returns True if the collection is descending order, False if not.
3031 """Returns True if the collection is descending order, False if not.
3032
3032
3033 This is part of the mandatory API for smartset."""
3033 This is part of the mandatory API for smartset."""
3034 if len(self) <= 1:
3034 if len(self) <= 1:
3035 return True
3035 return True
3036 return self._ascending is not None and not self._ascending
3036 return self._ascending is not None and not self._ascending
3037
3037
3038 def first(self):
3038 def first(self):
3039 if self:
3039 if self:
3040 if self._ascending is None:
3040 if self._ascending is None:
3041 return self._list[0]
3041 return self._list[0]
3042 elif self._ascending:
3042 elif self._ascending:
3043 return self._asclist[0]
3043 return self._asclist[0]
3044 else:
3044 else:
3045 return self._asclist[-1]
3045 return self._asclist[-1]
3046 return None
3046 return None
3047
3047
3048 def last(self):
3048 def last(self):
3049 if self:
3049 if self:
3050 if self._ascending is None:
3050 if self._ascending is None:
3051 return self._list[-1]
3051 return self._list[-1]
3052 elif self._ascending:
3052 elif self._ascending:
3053 return self._asclist[-1]
3053 return self._asclist[-1]
3054 else:
3054 else:
3055 return self._asclist[0]
3055 return self._asclist[0]
3056 return None
3056 return None
3057
3057
3058 def __repr__(self):
3058 def __repr__(self):
3059 d = {None: '', False: '-', True: '+'}[self._ascending]
3059 d = {None: '', False: '-', True: '+'}[self._ascending]
3060 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3060 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3061
3061
3062 class filteredset(abstractsmartset):
3062 class filteredset(abstractsmartset):
3063 """Duck type for baseset class which iterates lazily over the revisions in
3063 """Duck type for baseset class which iterates lazily over the revisions in
3064 the subset and contains a function which tests for membership in the
3064 the subset and contains a function which tests for membership in the
3065 revset
3065 revset
3066 """
3066 """
3067 def __init__(self, subset, condition=lambda x: True):
3067 def __init__(self, subset, condition=lambda x: True):
3068 """
3068 """
3069 condition: a function that decide whether a revision in the subset
3069 condition: a function that decide whether a revision in the subset
3070 belongs to the revset or not.
3070 belongs to the revset or not.
3071 """
3071 """
3072 self._subset = subset
3072 self._subset = subset
3073 self._condition = condition
3073 self._condition = condition
3074
3074
3075 def __contains__(self, x):
3075 def __contains__(self, x):
3076 return x in self._subset and self._condition(x)
3076 return x in self._subset and self._condition(x)
3077
3077
3078 def __iter__(self):
3078 def __iter__(self):
3079 return self._iterfilter(self._subset)
3079 return self._iterfilter(self._subset)
3080
3080
3081 def _iterfilter(self, it):
3081 def _iterfilter(self, it):
3082 cond = self._condition
3082 cond = self._condition
3083 for x in it:
3083 for x in it:
3084 if cond(x):
3084 if cond(x):
3085 yield x
3085 yield x
3086
3086
3087 @property
3087 @property
3088 def fastasc(self):
3088 def fastasc(self):
3089 it = self._subset.fastasc
3089 it = self._subset.fastasc
3090 if it is None:
3090 if it is None:
3091 return None
3091 return None
3092 return lambda: self._iterfilter(it())
3092 return lambda: self._iterfilter(it())
3093
3093
3094 @property
3094 @property
3095 def fastdesc(self):
3095 def fastdesc(self):
3096 it = self._subset.fastdesc
3096 it = self._subset.fastdesc
3097 if it is None:
3097 if it is None:
3098 return None
3098 return None
3099 return lambda: self._iterfilter(it())
3099 return lambda: self._iterfilter(it())
3100
3100
3101 def __nonzero__(self):
3101 def __nonzero__(self):
3102 fast = self.fastasc
3102 fast = self.fastasc
3103 if fast is None:
3103 if fast is None:
3104 fast = self.fastdesc
3104 fast = self.fastdesc
3105 if fast is not None:
3105 if fast is not None:
3106 it = fast()
3106 it = fast()
3107 else:
3107 else:
3108 it = self
3108 it = self
3109
3109
3110 for r in it:
3110 for r in it:
3111 return True
3111 return True
3112 return False
3112 return False
3113
3113
3114 def __len__(self):
3114 def __len__(self):
3115 # Basic implementation to be changed in future patches.
3115 # Basic implementation to be changed in future patches.
3116 l = baseset([r for r in self])
3116 l = baseset([r for r in self])
3117 return len(l)
3117 return len(l)
3118
3118
3119 def sort(self, reverse=False):
3119 def sort(self, reverse=False):
3120 self._subset.sort(reverse=reverse)
3120 self._subset.sort(reverse=reverse)
3121
3121
3122 def reverse(self):
3122 def reverse(self):
3123 self._subset.reverse()
3123 self._subset.reverse()
3124
3124
3125 def isascending(self):
3125 def isascending(self):
3126 return self._subset.isascending()
3126 return self._subset.isascending()
3127
3127
3128 def isdescending(self):
3128 def isdescending(self):
3129 return self._subset.isdescending()
3129 return self._subset.isdescending()
3130
3130
3131 def first(self):
3131 def first(self):
3132 for x in self:
3132 for x in self:
3133 return x
3133 return x
3134 return None
3134 return None
3135
3135
3136 def last(self):
3136 def last(self):
3137 it = None
3137 it = None
3138 if self.isascending():
3138 if self.isascending():
3139 it = self.fastdesc
3139 it = self.fastdesc
3140 elif self.isdescending():
3140 elif self.isdescending():
3141 it = self.fastasc
3141 it = self.fastasc
3142 if it is not None:
3142 if it is not None:
3143 for x in it():
3143 for x in it():
3144 return x
3144 return x
3145 return None #empty case
3145 return None #empty case
3146 else:
3146 else:
3147 x = None
3147 x = None
3148 for x in self:
3148 for x in self:
3149 pass
3149 pass
3150 return x
3150 return x
3151
3151
3152 def __repr__(self):
3152 def __repr__(self):
3153 return '<%s %r>' % (type(self).__name__, self._subset)
3153 return '<%s %r>' % (type(self).__name__, self._subset)
3154
3154
3155 def _iterordered(ascending, iter1, iter2):
3155 def _iterordered(ascending, iter1, iter2):
3156 """produce an ordered iteration from two iterators with the same order
3156 """produce an ordered iteration from two iterators with the same order
3157
3157
3158 The ascending is used to indicated the iteration direction.
3158 The ascending is used to indicated the iteration direction.
3159 """
3159 """
3160 choice = max
3160 choice = max
3161 if ascending:
3161 if ascending:
3162 choice = min
3162 choice = min
3163
3163
3164 val1 = None
3164 val1 = None
3165 val2 = None
3165 val2 = None
3166 try:
3166 try:
3167 # Consume both iterators in an ordered way until one is empty
3167 # Consume both iterators in an ordered way until one is empty
3168 while True:
3168 while True:
3169 if val1 is None:
3169 if val1 is None:
3170 val1 = iter1.next()
3170 val1 = iter1.next()
3171 if val2 is None:
3171 if val2 is None:
3172 val2 = iter2.next()
3172 val2 = iter2.next()
3173 next = choice(val1, val2)
3173 next = choice(val1, val2)
3174 yield next
3174 yield next
3175 if val1 == next:
3175 if val1 == next:
3176 val1 = None
3176 val1 = None
3177 if val2 == next:
3177 if val2 == next:
3178 val2 = None
3178 val2 = None
3179 except StopIteration:
3179 except StopIteration:
3180 # Flush any remaining values and consume the other one
3180 # Flush any remaining values and consume the other one
3181 it = iter2
3181 it = iter2
3182 if val1 is not None:
3182 if val1 is not None:
3183 yield val1
3183 yield val1
3184 it = iter1
3184 it = iter1
3185 elif val2 is not None:
3185 elif val2 is not None:
3186 # might have been equality and both are empty
3186 # might have been equality and both are empty
3187 yield val2
3187 yield val2
3188 for val in it:
3188 for val in it:
3189 yield val
3189 yield val
3190
3190
3191 class addset(abstractsmartset):
3191 class addset(abstractsmartset):
3192 """Represent the addition of two sets
3192 """Represent the addition of two sets
3193
3193
3194 Wrapper structure for lazily adding two structures without losing much
3194 Wrapper structure for lazily adding two structures without losing much
3195 performance on the __contains__ method
3195 performance on the __contains__ method
3196
3196
3197 If the ascending attribute is set, that means the two structures are
3197 If the ascending attribute is set, that means the two structures are
3198 ordered in either an ascending or descending way. Therefore, we can add
3198 ordered in either an ascending or descending way. Therefore, we can add
3199 them maintaining the order by iterating over both at the same time
3199 them maintaining the order by iterating over both at the same time
3200
3200
3201 >>> xs = baseset([0, 3, 2])
3201 >>> xs = baseset([0, 3, 2])
3202 >>> ys = baseset([5, 2, 4])
3202 >>> ys = baseset([5, 2, 4])
3203
3203
3204 >>> rs = addset(xs, ys)
3204 >>> rs = addset(xs, ys)
3205 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3205 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3206 (True, True, False, True, 0, 4)
3206 (True, True, False, True, 0, 4)
3207 >>> rs = addset(xs, baseset([]))
3207 >>> rs = addset(xs, baseset([]))
3208 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3208 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3209 (True, True, False, 0, 2)
3209 (True, True, False, 0, 2)
3210 >>> rs = addset(baseset([]), baseset([]))
3210 >>> rs = addset(baseset([]), baseset([]))
3211 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3211 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3212 (False, False, None, None)
3212 (False, False, None, None)
3213
3213
3214 iterate unsorted:
3214 iterate unsorted:
3215 >>> rs = addset(xs, ys)
3215 >>> rs = addset(xs, ys)
3216 >>> [x for x in rs] # without _genlist
3216 >>> [x for x in rs] # without _genlist
3217 [0, 3, 2, 5, 4]
3217 [0, 3, 2, 5, 4]
3218 >>> assert not rs._genlist
3218 >>> assert not rs._genlist
3219 >>> len(rs)
3219 >>> len(rs)
3220 5
3220 5
3221 >>> [x for x in rs] # with _genlist
3221 >>> [x for x in rs] # with _genlist
3222 [0, 3, 2, 5, 4]
3222 [0, 3, 2, 5, 4]
3223 >>> assert rs._genlist
3223 >>> assert rs._genlist
3224
3224
3225 iterate ascending:
3225 iterate ascending:
3226 >>> rs = addset(xs, ys, ascending=True)
3226 >>> rs = addset(xs, ys, ascending=True)
3227 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3227 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3228 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3228 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3229 >>> assert not rs._asclist
3229 >>> assert not rs._asclist
3230 >>> len(rs)
3230 >>> len(rs)
3231 5
3231 5
3232 >>> [x for x in rs], [x for x in rs.fastasc()]
3232 >>> [x for x in rs], [x for x in rs.fastasc()]
3233 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3233 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3234 >>> assert rs._asclist
3234 >>> assert rs._asclist
3235
3235
3236 iterate descending:
3236 iterate descending:
3237 >>> rs = addset(xs, ys, ascending=False)
3237 >>> rs = addset(xs, ys, ascending=False)
3238 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3238 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3239 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3239 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3240 >>> assert not rs._asclist
3240 >>> assert not rs._asclist
3241 >>> len(rs)
3241 >>> len(rs)
3242 5
3242 5
3243 >>> [x for x in rs], [x for x in rs.fastdesc()]
3243 >>> [x for x in rs], [x for x in rs.fastdesc()]
3244 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3244 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3245 >>> assert rs._asclist
3245 >>> assert rs._asclist
3246
3246
3247 iterate ascending without fastasc:
3247 iterate ascending without fastasc:
3248 >>> rs = addset(xs, generatorset(ys), ascending=True)
3248 >>> rs = addset(xs, generatorset(ys), ascending=True)
3249 >>> assert rs.fastasc is None
3249 >>> assert rs.fastasc is None
3250 >>> [x for x in rs]
3250 >>> [x for x in rs]
3251 [0, 2, 3, 4, 5]
3251 [0, 2, 3, 4, 5]
3252
3252
3253 iterate descending without fastdesc:
3253 iterate descending without fastdesc:
3254 >>> rs = addset(generatorset(xs), ys, ascending=False)
3254 >>> rs = addset(generatorset(xs), ys, ascending=False)
3255 >>> assert rs.fastdesc is None
3255 >>> assert rs.fastdesc is None
3256 >>> [x for x in rs]
3256 >>> [x for x in rs]
3257 [5, 4, 3, 2, 0]
3257 [5, 4, 3, 2, 0]
3258 """
3258 """
3259 def __init__(self, revs1, revs2, ascending=None):
3259 def __init__(self, revs1, revs2, ascending=None):
3260 self._r1 = revs1
3260 self._r1 = revs1
3261 self._r2 = revs2
3261 self._r2 = revs2
3262 self._iter = None
3262 self._iter = None
3263 self._ascending = ascending
3263 self._ascending = ascending
3264 self._genlist = None
3264 self._genlist = None
3265 self._asclist = None
3265 self._asclist = None
3266
3266
3267 def __len__(self):
3267 def __len__(self):
3268 return len(self._list)
3268 return len(self._list)
3269
3269
3270 def __nonzero__(self):
3270 def __nonzero__(self):
3271 return bool(self._r1) or bool(self._r2)
3271 return bool(self._r1) or bool(self._r2)
3272
3272
3273 @util.propertycache
3273 @util.propertycache
3274 def _list(self):
3274 def _list(self):
3275 if not self._genlist:
3275 if not self._genlist:
3276 self._genlist = baseset(iter(self))
3276 self._genlist = baseset(iter(self))
3277 return self._genlist
3277 return self._genlist
3278
3278
3279 def __iter__(self):
3279 def __iter__(self):
3280 """Iterate over both collections without repeating elements
3280 """Iterate over both collections without repeating elements
3281
3281
3282 If the ascending attribute is not set, iterate over the first one and
3282 If the ascending attribute is not set, iterate over the first one and
3283 then over the second one checking for membership on the first one so we
3283 then over the second one checking for membership on the first one so we
3284 dont yield any duplicates.
3284 dont yield any duplicates.
3285
3285
3286 If the ascending attribute is set, iterate over both collections at the
3286 If the ascending attribute is set, iterate over both collections at the
3287 same time, yielding only one value at a time in the given order.
3287 same time, yielding only one value at a time in the given order.
3288 """
3288 """
3289 if self._ascending is None:
3289 if self._ascending is None:
3290 if self._genlist:
3290 if self._genlist:
3291 return iter(self._genlist)
3291 return iter(self._genlist)
3292 def arbitraryordergen():
3292 def arbitraryordergen():
3293 for r in self._r1:
3293 for r in self._r1:
3294 yield r
3294 yield r
3295 inr1 = self._r1.__contains__
3295 inr1 = self._r1.__contains__
3296 for r in self._r2:
3296 for r in self._r2:
3297 if not inr1(r):
3297 if not inr1(r):
3298 yield r
3298 yield r
3299 return arbitraryordergen()
3299 return arbitraryordergen()
3300 # try to use our own fast iterator if it exists
3300 # try to use our own fast iterator if it exists
3301 self._trysetasclist()
3301 self._trysetasclist()
3302 if self._ascending:
3302 if self._ascending:
3303 attr = 'fastasc'
3303 attr = 'fastasc'
3304 else:
3304 else:
3305 attr = 'fastdesc'
3305 attr = 'fastdesc'
3306 it = getattr(self, attr)
3306 it = getattr(self, attr)
3307 if it is not None:
3307 if it is not None:
3308 return it()
3308 return it()
3309 # maybe half of the component supports fast
3309 # maybe half of the component supports fast
3310 # get iterator for _r1
3310 # get iterator for _r1
3311 iter1 = getattr(self._r1, attr)
3311 iter1 = getattr(self._r1, attr)
3312 if iter1 is None:
3312 if iter1 is None:
3313 # let's avoid side effect (not sure it matters)
3313 # let's avoid side effect (not sure it matters)
3314 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3314 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3315 else:
3315 else:
3316 iter1 = iter1()
3316 iter1 = iter1()
3317 # get iterator for _r2
3317 # get iterator for _r2
3318 iter2 = getattr(self._r2, attr)
3318 iter2 = getattr(self._r2, attr)
3319 if iter2 is None:
3319 if iter2 is None:
3320 # let's avoid side effect (not sure it matters)
3320 # let's avoid side effect (not sure it matters)
3321 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3321 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3322 else:
3322 else:
3323 iter2 = iter2()
3323 iter2 = iter2()
3324 return _iterordered(self._ascending, iter1, iter2)
3324 return _iterordered(self._ascending, iter1, iter2)
3325
3325
3326 def _trysetasclist(self):
3326 def _trysetasclist(self):
3327 """populate the _asclist attribute if possible and necessary"""
3327 """populate the _asclist attribute if possible and necessary"""
3328 if self._genlist is not None and self._asclist is None:
3328 if self._genlist is not None and self._asclist is None:
3329 self._asclist = sorted(self._genlist)
3329 self._asclist = sorted(self._genlist)
3330
3330
3331 @property
3331 @property
3332 def fastasc(self):
3332 def fastasc(self):
3333 self._trysetasclist()
3333 self._trysetasclist()
3334 if self._asclist is not None:
3334 if self._asclist is not None:
3335 return self._asclist.__iter__
3335 return self._asclist.__iter__
3336 iter1 = self._r1.fastasc
3336 iter1 = self._r1.fastasc
3337 iter2 = self._r2.fastasc
3337 iter2 = self._r2.fastasc
3338 if None in (iter1, iter2):
3338 if None in (iter1, iter2):
3339 return None
3339 return None
3340 return lambda: _iterordered(True, iter1(), iter2())
3340 return lambda: _iterordered(True, iter1(), iter2())
3341
3341
3342 @property
3342 @property
3343 def fastdesc(self):
3343 def fastdesc(self):
3344 self._trysetasclist()
3344 self._trysetasclist()
3345 if self._asclist is not None:
3345 if self._asclist is not None:
3346 return self._asclist.__reversed__
3346 return self._asclist.__reversed__
3347 iter1 = self._r1.fastdesc
3347 iter1 = self._r1.fastdesc
3348 iter2 = self._r2.fastdesc
3348 iter2 = self._r2.fastdesc
3349 if None in (iter1, iter2):
3349 if None in (iter1, iter2):
3350 return None
3350 return None
3351 return lambda: _iterordered(False, iter1(), iter2())
3351 return lambda: _iterordered(False, iter1(), iter2())
3352
3352
3353 def __contains__(self, x):
3353 def __contains__(self, x):
3354 return x in self._r1 or x in self._r2
3354 return x in self._r1 or x in self._r2
3355
3355
3356 def sort(self, reverse=False):
3356 def sort(self, reverse=False):
3357 """Sort the added set
3357 """Sort the added set
3358
3358
3359 For this we use the cached list with all the generated values and if we
3359 For this we use the cached list with all the generated values and if we
3360 know they are ascending or descending we can sort them in a smart way.
3360 know they are ascending or descending we can sort them in a smart way.
3361 """
3361 """
3362 self._ascending = not reverse
3362 self._ascending = not reverse
3363
3363
3364 def isascending(self):
3364 def isascending(self):
3365 return self._ascending is not None and self._ascending
3365 return self._ascending is not None and self._ascending
3366
3366
3367 def isdescending(self):
3367 def isdescending(self):
3368 return self._ascending is not None and not self._ascending
3368 return self._ascending is not None and not self._ascending
3369
3369
3370 def reverse(self):
3370 def reverse(self):
3371 if self._ascending is None:
3371 if self._ascending is None:
3372 self._list.reverse()
3372 self._list.reverse()
3373 else:
3373 else:
3374 self._ascending = not self._ascending
3374 self._ascending = not self._ascending
3375
3375
3376 def first(self):
3376 def first(self):
3377 for x in self:
3377 for x in self:
3378 return x
3378 return x
3379 return None
3379 return None
3380
3380
3381 def last(self):
3381 def last(self):
3382 self.reverse()
3382 self.reverse()
3383 val = self.first()
3383 val = self.first()
3384 self.reverse()
3384 self.reverse()
3385 return val
3385 return val
3386
3386
3387 def __repr__(self):
3387 def __repr__(self):
3388 d = {None: '', False: '-', True: '+'}[self._ascending]
3388 d = {None: '', False: '-', True: '+'}[self._ascending]
3389 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3389 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3390
3390
3391 class generatorset(abstractsmartset):
3391 class generatorset(abstractsmartset):
3392 """Wrap a generator for lazy iteration
3392 """Wrap a generator for lazy iteration
3393
3393
3394 Wrapper structure for generators that provides lazy membership and can
3394 Wrapper structure for generators that provides lazy membership and can
3395 be iterated more than once.
3395 be iterated more than once.
3396 When asked for membership it generates values until either it finds the
3396 When asked for membership it generates values until either it finds the
3397 requested one or has gone through all the elements in the generator
3397 requested one or has gone through all the elements in the generator
3398 """
3398 """
3399 def __init__(self, gen, iterasc=None):
3399 def __init__(self, gen, iterasc=None):
3400 """
3400 """
3401 gen: a generator producing the values for the generatorset.
3401 gen: a generator producing the values for the generatorset.
3402 """
3402 """
3403 self._gen = gen
3403 self._gen = gen
3404 self._asclist = None
3404 self._asclist = None
3405 self._cache = {}
3405 self._cache = {}
3406 self._genlist = []
3406 self._genlist = []
3407 self._finished = False
3407 self._finished = False
3408 self._ascending = True
3408 self._ascending = True
3409 if iterasc is not None:
3409 if iterasc is not None:
3410 if iterasc:
3410 if iterasc:
3411 self.fastasc = self._iterator
3411 self.fastasc = self._iterator
3412 self.__contains__ = self._asccontains
3412 self.__contains__ = self._asccontains
3413 else:
3413 else:
3414 self.fastdesc = self._iterator
3414 self.fastdesc = self._iterator
3415 self.__contains__ = self._desccontains
3415 self.__contains__ = self._desccontains
3416
3416
3417 def __nonzero__(self):
3417 def __nonzero__(self):
3418 # Do not use 'for r in self' because it will enforce the iteration
3418 # Do not use 'for r in self' because it will enforce the iteration
3419 # order (default ascending), possibly unrolling a whole descending
3419 # order (default ascending), possibly unrolling a whole descending
3420 # iterator.
3420 # iterator.
3421 if self._genlist:
3421 if self._genlist:
3422 return True
3422 return True
3423 for r in self._consumegen():
3423 for r in self._consumegen():
3424 return True
3424 return True
3425 return False
3425 return False
3426
3426
3427 def __contains__(self, x):
3427 def __contains__(self, x):
3428 if x in self._cache:
3428 if x in self._cache:
3429 return self._cache[x]
3429 return self._cache[x]
3430
3430
3431 # Use new values only, as existing values would be cached.
3431 # Use new values only, as existing values would be cached.
3432 for l in self._consumegen():
3432 for l in self._consumegen():
3433 if l == x:
3433 if l == x:
3434 return True
3434 return True
3435
3435
3436 self._cache[x] = False
3436 self._cache[x] = False
3437 return False
3437 return False
3438
3438
3439 def _asccontains(self, x):
3439 def _asccontains(self, x):
3440 """version of contains optimised for ascending generator"""
3440 """version of contains optimised for ascending generator"""
3441 if x in self._cache:
3441 if x in self._cache:
3442 return self._cache[x]
3442 return self._cache[x]
3443
3443
3444 # Use new values only, as existing values would be cached.
3444 # Use new values only, as existing values would be cached.
3445 for l in self._consumegen():
3445 for l in self._consumegen():
3446 if l == x:
3446 if l == x:
3447 return True
3447 return True
3448 if l > x:
3448 if l > x:
3449 break
3449 break
3450
3450
3451 self._cache[x] = False
3451 self._cache[x] = False
3452 return False
3452 return False
3453
3453
3454 def _desccontains(self, x):
3454 def _desccontains(self, x):
3455 """version of contains optimised for descending generator"""
3455 """version of contains optimised for descending generator"""
3456 if x in self._cache:
3456 if x in self._cache:
3457 return self._cache[x]
3457 return self._cache[x]
3458
3458
3459 # Use new values only, as existing values would be cached.
3459 # Use new values only, as existing values would be cached.
3460 for l in self._consumegen():
3460 for l in self._consumegen():
3461 if l == x:
3461 if l == x:
3462 return True
3462 return True
3463 if l < x:
3463 if l < x:
3464 break
3464 break
3465
3465
3466 self._cache[x] = False
3466 self._cache[x] = False
3467 return False
3467 return False
3468
3468
3469 def __iter__(self):
3469 def __iter__(self):
3470 if self._ascending:
3470 if self._ascending:
3471 it = self.fastasc
3471 it = self.fastasc
3472 else:
3472 else:
3473 it = self.fastdesc
3473 it = self.fastdesc
3474 if it is not None:
3474 if it is not None:
3475 return it()
3475 return it()
3476 # we need to consume the iterator
3476 # we need to consume the iterator
3477 for x in self._consumegen():
3477 for x in self._consumegen():
3478 pass
3478 pass
3479 # recall the same code
3479 # recall the same code
3480 return iter(self)
3480 return iter(self)
3481
3481
3482 def _iterator(self):
3482 def _iterator(self):
3483 if self._finished:
3483 if self._finished:
3484 return iter(self._genlist)
3484 return iter(self._genlist)
3485
3485
3486 # We have to use this complex iteration strategy to allow multiple
3486 # We have to use this complex iteration strategy to allow multiple
3487 # iterations at the same time. We need to be able to catch revision
3487 # iterations at the same time. We need to be able to catch revision
3488 # removed from _consumegen and added to genlist in another instance.
3488 # removed from _consumegen and added to genlist in another instance.
3489 #
3489 #
3490 # Getting rid of it would provide an about 15% speed up on this
3490 # Getting rid of it would provide an about 15% speed up on this
3491 # iteration.
3491 # iteration.
3492 genlist = self._genlist
3492 genlist = self._genlist
3493 nextrev = self._consumegen().next
3493 nextrev = self._consumegen().next
3494 _len = len # cache global lookup
3494 _len = len # cache global lookup
3495 def gen():
3495 def gen():
3496 i = 0
3496 i = 0
3497 while True:
3497 while True:
3498 if i < _len(genlist):
3498 if i < _len(genlist):
3499 yield genlist[i]
3499 yield genlist[i]
3500 else:
3500 else:
3501 yield nextrev()
3501 yield nextrev()
3502 i += 1
3502 i += 1
3503 return gen()
3503 return gen()
3504
3504
3505 def _consumegen(self):
3505 def _consumegen(self):
3506 cache = self._cache
3506 cache = self._cache
3507 genlist = self._genlist.append
3507 genlist = self._genlist.append
3508 for item in self._gen:
3508 for item in self._gen:
3509 cache[item] = True
3509 cache[item] = True
3510 genlist(item)
3510 genlist(item)
3511 yield item
3511 yield item
3512 if not self._finished:
3512 if not self._finished:
3513 self._finished = True
3513 self._finished = True
3514 asc = self._genlist[:]
3514 asc = self._genlist[:]
3515 asc.sort()
3515 asc.sort()
3516 self._asclist = asc
3516 self._asclist = asc
3517 self.fastasc = asc.__iter__
3517 self.fastasc = asc.__iter__
3518 self.fastdesc = asc.__reversed__
3518 self.fastdesc = asc.__reversed__
3519
3519
3520 def __len__(self):
3520 def __len__(self):
3521 for x in self._consumegen():
3521 for x in self._consumegen():
3522 pass
3522 pass
3523 return len(self._genlist)
3523 return len(self._genlist)
3524
3524
3525 def sort(self, reverse=False):
3525 def sort(self, reverse=False):
3526 self._ascending = not reverse
3526 self._ascending = not reverse
3527
3527
3528 def reverse(self):
3528 def reverse(self):
3529 self._ascending = not self._ascending
3529 self._ascending = not self._ascending
3530
3530
3531 def isascending(self):
3531 def isascending(self):
3532 return self._ascending
3532 return self._ascending
3533
3533
3534 def isdescending(self):
3534 def isdescending(self):
3535 return not self._ascending
3535 return not self._ascending
3536
3536
3537 def first(self):
3537 def first(self):
3538 if self._ascending:
3538 if self._ascending:
3539 it = self.fastasc
3539 it = self.fastasc
3540 else:
3540 else:
3541 it = self.fastdesc
3541 it = self.fastdesc
3542 if it is None:
3542 if it is None:
3543 # we need to consume all and try again
3543 # we need to consume all and try again
3544 for x in self._consumegen():
3544 for x in self._consumegen():
3545 pass
3545 pass
3546 return self.first()
3546 return self.first()
3547 return next(it(), None)
3547 return next(it(), None)
3548
3548
3549 def last(self):
3549 def last(self):
3550 if self._ascending:
3550 if self._ascending:
3551 it = self.fastdesc
3551 it = self.fastdesc
3552 else:
3552 else:
3553 it = self.fastasc
3553 it = self.fastasc
3554 if it is None:
3554 if it is None:
3555 # we need to consume all and try again
3555 # we need to consume all and try again
3556 for x in self._consumegen():
3556 for x in self._consumegen():
3557 pass
3557 pass
3558 return self.first()
3558 return self.first()
3559 return next(it(), None)
3559 return next(it(), None)
3560
3560
3561 def __repr__(self):
3561 def __repr__(self):
3562 d = {False: '-', True: '+'}[self._ascending]
3562 d = {False: '-', True: '+'}[self._ascending]
3563 return '<%s%s>' % (type(self).__name__, d)
3563 return '<%s%s>' % (type(self).__name__, d)
3564
3564
3565 class spanset(abstractsmartset):
3565 class spanset(abstractsmartset):
3566 """Duck type for baseset class which represents a range of revisions and
3566 """Duck type for baseset class which represents a range of revisions and
3567 can work lazily and without having all the range in memory
3567 can work lazily and without having all the range in memory
3568
3568
3569 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3569 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3570 notable points:
3570 notable points:
3571 - when x < y it will be automatically descending,
3571 - when x < y it will be automatically descending,
3572 - revision filtered with this repoview will be skipped.
3572 - revision filtered with this repoview will be skipped.
3573
3573
3574 """
3574 """
3575 def __init__(self, repo, start=0, end=None):
3575 def __init__(self, repo, start=0, end=None):
3576 """
3576 """
3577 start: first revision included the set
3577 start: first revision included the set
3578 (default to 0)
3578 (default to 0)
3579 end: first revision excluded (last+1)
3579 end: first revision excluded (last+1)
3580 (default to len(repo)
3580 (default to len(repo)
3581
3581
3582 Spanset will be descending if `end` < `start`.
3582 Spanset will be descending if `end` < `start`.
3583 """
3583 """
3584 if end is None:
3584 if end is None:
3585 end = len(repo)
3585 end = len(repo)
3586 self._ascending = start <= end
3586 self._ascending = start <= end
3587 if not self._ascending:
3587 if not self._ascending:
3588 start, end = end + 1, start +1
3588 start, end = end + 1, start +1
3589 self._start = start
3589 self._start = start
3590 self._end = end
3590 self._end = end
3591 self._hiddenrevs = repo.changelog.filteredrevs
3591 self._hiddenrevs = repo.changelog.filteredrevs
3592
3592
3593 def sort(self, reverse=False):
3593 def sort(self, reverse=False):
3594 self._ascending = not reverse
3594 self._ascending = not reverse
3595
3595
3596 def reverse(self):
3596 def reverse(self):
3597 self._ascending = not self._ascending
3597 self._ascending = not self._ascending
3598
3598
3599 def _iterfilter(self, iterrange):
3599 def _iterfilter(self, iterrange):
3600 s = self._hiddenrevs
3600 s = self._hiddenrevs
3601 for r in iterrange:
3601 for r in iterrange:
3602 if r not in s:
3602 if r not in s:
3603 yield r
3603 yield r
3604
3604
3605 def __iter__(self):
3605 def __iter__(self):
3606 if self._ascending:
3606 if self._ascending:
3607 return self.fastasc()
3607 return self.fastasc()
3608 else:
3608 else:
3609 return self.fastdesc()
3609 return self.fastdesc()
3610
3610
3611 def fastasc(self):
3611 def fastasc(self):
3612 iterrange = xrange(self._start, self._end)
3612 iterrange = xrange(self._start, self._end)
3613 if self._hiddenrevs:
3613 if self._hiddenrevs:
3614 return self._iterfilter(iterrange)
3614 return self._iterfilter(iterrange)
3615 return iter(iterrange)
3615 return iter(iterrange)
3616
3616
3617 def fastdesc(self):
3617 def fastdesc(self):
3618 iterrange = xrange(self._end - 1, self._start - 1, -1)
3618 iterrange = xrange(self._end - 1, self._start - 1, -1)
3619 if self._hiddenrevs:
3619 if self._hiddenrevs:
3620 return self._iterfilter(iterrange)
3620 return self._iterfilter(iterrange)
3621 return iter(iterrange)
3621 return iter(iterrange)
3622
3622
3623 def __contains__(self, rev):
3623 def __contains__(self, rev):
3624 hidden = self._hiddenrevs
3624 hidden = self._hiddenrevs
3625 return ((self._start <= rev < self._end)
3625 return ((self._start <= rev < self._end)
3626 and not (hidden and rev in hidden))
3626 and not (hidden and rev in hidden))
3627
3627
3628 def __nonzero__(self):
3628 def __nonzero__(self):
3629 for r in self:
3629 for r in self:
3630 return True
3630 return True
3631 return False
3631 return False
3632
3632
3633 def __len__(self):
3633 def __len__(self):
3634 if not self._hiddenrevs:
3634 if not self._hiddenrevs:
3635 return abs(self._end - self._start)
3635 return abs(self._end - self._start)
3636 else:
3636 else:
3637 count = 0
3637 count = 0
3638 start = self._start
3638 start = self._start
3639 end = self._end
3639 end = self._end
3640 for rev in self._hiddenrevs:
3640 for rev in self._hiddenrevs:
3641 if (end < rev <= start) or (start <= rev < end):
3641 if (end < rev <= start) or (start <= rev < end):
3642 count += 1
3642 count += 1
3643 return abs(self._end - self._start) - count
3643 return abs(self._end - self._start) - count
3644
3644
3645 def isascending(self):
3645 def isascending(self):
3646 return self._ascending
3646 return self._ascending
3647
3647
3648 def isdescending(self):
3648 def isdescending(self):
3649 return not self._ascending
3649 return not self._ascending
3650
3650
3651 def first(self):
3651 def first(self):
3652 if self._ascending:
3652 if self._ascending:
3653 it = self.fastasc
3653 it = self.fastasc
3654 else:
3654 else:
3655 it = self.fastdesc
3655 it = self.fastdesc
3656 for x in it():
3656 for x in it():
3657 return x
3657 return x
3658 return None
3658 return None
3659
3659
3660 def last(self):
3660 def last(self):
3661 if self._ascending:
3661 if self._ascending:
3662 it = self.fastdesc
3662 it = self.fastdesc
3663 else:
3663 else:
3664 it = self.fastasc
3664 it = self.fastasc
3665 for x in it():
3665 for x in it():
3666 return x
3666 return x
3667 return None
3667 return None
3668
3668
3669 def __repr__(self):
3669 def __repr__(self):
3670 d = {False: '-', True: '+'}[self._ascending]
3670 d = {False: '-', True: '+'}[self._ascending]
3671 return '<%s%s %d:%d>' % (type(self).__name__, d,
3671 return '<%s%s %d:%d>' % (type(self).__name__, d,
3672 self._start, self._end - 1)
3672 self._start, self._end - 1)
3673
3673
3674 class fullreposet(spanset):
3674 class fullreposet(spanset):
3675 """a set containing all revisions in the repo
3675 """a set containing all revisions in the repo
3676
3676
3677 This class exists to host special optimization and magic to handle virtual
3677 This class exists to host special optimization and magic to handle virtual
3678 revisions such as "null".
3678 revisions such as "null".
3679 """
3679 """
3680
3680
3681 def __init__(self, repo):
3681 def __init__(self, repo):
3682 super(fullreposet, self).__init__(repo)
3682 super(fullreposet, self).__init__(repo)
3683
3683
3684 def __and__(self, other):
3684 def __and__(self, other):
3685 """As self contains the whole repo, all of the other set should also be
3685 """As self contains the whole repo, all of the other set should also be
3686 in self. Therefore `self & other = other`.
3686 in self. Therefore `self & other = other`.
3687
3687
3688 This boldly assumes the other contains valid revs only.
3688 This boldly assumes the other contains valid revs only.
3689 """
3689 """
3690 # other not a smartset, make is so
3690 # other not a smartset, make is so
3691 if not util.safehasattr(other, 'isascending'):
3691 if not util.safehasattr(other, 'isascending'):
3692 # filter out hidden revision
3692 # filter out hidden revision
3693 # (this boldly assumes all smartset are pure)
3693 # (this boldly assumes all smartset are pure)
3694 #
3694 #
3695 # `other` was used with "&", let's assume this is a set like
3695 # `other` was used with "&", let's assume this is a set like
3696 # object.
3696 # object.
3697 other = baseset(other - self._hiddenrevs)
3697 other = baseset(other - self._hiddenrevs)
3698
3698
3699 # XXX As fullreposet is also used as bootstrap, this is wrong.
3699 # XXX As fullreposet is also used as bootstrap, this is wrong.
3700 #
3700 #
3701 # With a giveme312() revset returning [3,1,2], this makes
3701 # With a giveme312() revset returning [3,1,2], this makes
3702 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3702 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3703 # We cannot just drop it because other usage still need to sort it:
3703 # We cannot just drop it because other usage still need to sort it:
3704 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3704 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3705 #
3705 #
3706 # There is also some faulty revset implementations that rely on it
3706 # There is also some faulty revset implementations that rely on it
3707 # (eg: children as of its state in e8075329c5fb)
3707 # (eg: children as of its state in e8075329c5fb)
3708 #
3708 #
3709 # When we fix the two points above we can move this into the if clause
3709 # When we fix the two points above we can move this into the if clause
3710 other.sort(reverse=self.isdescending())
3710 other.sort(reverse=self.isdescending())
3711 return other
3711 return other
3712
3712
3713 def prettyformatset(revs):
3713 def prettyformatset(revs):
3714 lines = []
3714 lines = []
3715 rs = repr(revs)
3715 rs = repr(revs)
3716 p = 0
3716 p = 0
3717 while p < len(rs):
3717 while p < len(rs):
3718 q = rs.find('<', p + 1)
3718 q = rs.find('<', p + 1)
3719 if q < 0:
3719 if q < 0:
3720 q = len(rs)
3720 q = len(rs)
3721 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3721 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3722 assert l >= 0
3722 assert l >= 0
3723 lines.append((l, rs[p:q].rstrip()))
3723 lines.append((l, rs[p:q].rstrip()))
3724 p = q
3724 p = q
3725 return '\n'.join(' ' * l + s for l, s in lines)
3725 return '\n'.join(' ' * l + s for l, s in lines)
3726
3726
3727 # tell hggettext to extract docstrings from these functions:
3727 # tell hggettext to extract docstrings from these functions:
3728 i18nfunctions = symbols.values()
3728 i18nfunctions = symbols.values()
@@ -1,542 +1,542
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import scmutil, util, parsers, error
9 import scmutil, util, parsers, error
10 import os, stat, errno
10 import os, stat, errno
11
11
12 _sha = util.sha1
12 _sha = util.sha1
13
13
14 # This avoids a collision between a file named foo and a dir named
14 # This avoids a collision between a file named foo and a dir named
15 # foo.i or foo.d
15 # foo.i or foo.d
16 def _encodedir(path):
16 def _encodedir(path):
17 '''
17 '''
18 >>> _encodedir('data/foo.i')
18 >>> _encodedir('data/foo.i')
19 'data/foo.i'
19 'data/foo.i'
20 >>> _encodedir('data/foo.i/bla.i')
20 >>> _encodedir('data/foo.i/bla.i')
21 'data/foo.i.hg/bla.i'
21 'data/foo.i.hg/bla.i'
22 >>> _encodedir('data/foo.i.hg/bla.i')
22 >>> _encodedir('data/foo.i.hg/bla.i')
23 'data/foo.i.hg.hg/bla.i'
23 'data/foo.i.hg.hg/bla.i'
24 >>> _encodedir('data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
24 >>> _encodedir('data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
25 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
25 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
26 '''
26 '''
27 return (path
27 return (path
28 .replace(".hg/", ".hg.hg/")
28 .replace(".hg/", ".hg.hg/")
29 .replace(".i/", ".i.hg/")
29 .replace(".i/", ".i.hg/")
30 .replace(".d/", ".d.hg/"))
30 .replace(".d/", ".d.hg/"))
31
31
32 encodedir = getattr(parsers, 'encodedir', _encodedir)
32 encodedir = getattr(parsers, 'encodedir', _encodedir)
33
33
34 def decodedir(path):
34 def decodedir(path):
35 '''
35 '''
36 >>> decodedir('data/foo.i')
36 >>> decodedir('data/foo.i')
37 'data/foo.i'
37 'data/foo.i'
38 >>> decodedir('data/foo.i.hg/bla.i')
38 >>> decodedir('data/foo.i.hg/bla.i')
39 'data/foo.i/bla.i'
39 'data/foo.i/bla.i'
40 >>> decodedir('data/foo.i.hg.hg/bla.i')
40 >>> decodedir('data/foo.i.hg.hg/bla.i')
41 'data/foo.i.hg/bla.i'
41 'data/foo.i.hg/bla.i'
42 '''
42 '''
43 if ".hg/" not in path:
43 if ".hg/" not in path:
44 return path
44 return path
45 return (path
45 return (path
46 .replace(".d.hg/", ".d/")
46 .replace(".d.hg/", ".d/")
47 .replace(".i.hg/", ".i/")
47 .replace(".i.hg/", ".i/")
48 .replace(".hg.hg/", ".hg/"))
48 .replace(".hg.hg/", ".hg/"))
49
49
50 def _buildencodefun():
50 def _buildencodefun():
51 '''
51 '''
52 >>> enc, dec = _buildencodefun()
52 >>> enc, dec = _buildencodefun()
53
53
54 >>> enc('nothing/special.txt')
54 >>> enc('nothing/special.txt')
55 'nothing/special.txt'
55 'nothing/special.txt'
56 >>> dec('nothing/special.txt')
56 >>> dec('nothing/special.txt')
57 'nothing/special.txt'
57 'nothing/special.txt'
58
58
59 >>> enc('HELLO')
59 >>> enc('HELLO')
60 '_h_e_l_l_o'
60 '_h_e_l_l_o'
61 >>> dec('_h_e_l_l_o')
61 >>> dec('_h_e_l_l_o')
62 'HELLO'
62 'HELLO'
63
63
64 >>> enc('hello:world?')
64 >>> enc('hello:world?')
65 'hello~3aworld~3f'
65 'hello~3aworld~3f'
66 >>> dec('hello~3aworld~3f')
66 >>> dec('hello~3aworld~3f')
67 'hello:world?'
67 'hello:world?'
68
68
69 >>> enc('the\x07quick\xADshot')
69 >>> enc('the\x07quick\xADshot')
70 'the~07quick~adshot'
70 'the~07quick~adshot'
71 >>> dec('the~07quick~adshot')
71 >>> dec('the~07quick~adshot')
72 'the\\x07quick\\xadshot'
72 'the\\x07quick\\xadshot'
73 '''
73 '''
74 e = '_'
74 e = '_'
75 winreserved = [ord(x) for x in '\\:*?"<>|']
75 winreserved = [ord(x) for x in '\\:*?"<>|']
76 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
76 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
77 for x in (range(32) + range(126, 256) + winreserved):
77 for x in (range(32) + range(126, 256) + winreserved):
78 cmap[chr(x)] = "~%02x" % x
78 cmap[chr(x)] = "~%02x" % x
79 for x in range(ord("A"), ord("Z") + 1) + [ord(e)]:
79 for x in range(ord("A"), ord("Z") + 1) + [ord(e)]:
80 cmap[chr(x)] = e + chr(x).lower()
80 cmap[chr(x)] = e + chr(x).lower()
81 dmap = {}
81 dmap = {}
82 for k, v in cmap.iteritems():
82 for k, v in cmap.iteritems():
83 dmap[v] = k
83 dmap[v] = k
84 def decode(s):
84 def decode(s):
85 i = 0
85 i = 0
86 while i < len(s):
86 while i < len(s):
87 for l in xrange(1, 4):
87 for l in xrange(1, 4):
88 try:
88 try:
89 yield dmap[s[i:i + l]]
89 yield dmap[s[i:i + l]]
90 i += l
90 i += l
91 break
91 break
92 except KeyError:
92 except KeyError:
93 pass
93 pass
94 else:
94 else:
95 raise KeyError
95 raise KeyError
96 return (lambda s: ''.join([cmap[c] for c in s]),
96 return (lambda s: ''.join([cmap[c] for c in s]),
97 lambda s: ''.join(list(decode(s))))
97 lambda s: ''.join(list(decode(s))))
98
98
99 _encodefname, _decodefname = _buildencodefun()
99 _encodefname, _decodefname = _buildencodefun()
100
100
101 def encodefilename(s):
101 def encodefilename(s):
102 '''
102 '''
103 >>> encodefilename('foo.i/bar.d/bla.hg/hi:world?/HELLO')
103 >>> encodefilename('foo.i/bar.d/bla.hg/hi:world?/HELLO')
104 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
104 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
105 '''
105 '''
106 return _encodefname(encodedir(s))
106 return _encodefname(encodedir(s))
107
107
108 def decodefilename(s):
108 def decodefilename(s):
109 '''
109 '''
110 >>> decodefilename('foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
110 >>> decodefilename('foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
111 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
111 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
112 '''
112 '''
113 return decodedir(_decodefname(s))
113 return decodedir(_decodefname(s))
114
114
115 def _buildlowerencodefun():
115 def _buildlowerencodefun():
116 '''
116 '''
117 >>> f = _buildlowerencodefun()
117 >>> f = _buildlowerencodefun()
118 >>> f('nothing/special.txt')
118 >>> f('nothing/special.txt')
119 'nothing/special.txt'
119 'nothing/special.txt'
120 >>> f('HELLO')
120 >>> f('HELLO')
121 'hello'
121 'hello'
122 >>> f('hello:world?')
122 >>> f('hello:world?')
123 'hello~3aworld~3f'
123 'hello~3aworld~3f'
124 >>> f('the\x07quick\xADshot')
124 >>> f('the\x07quick\xADshot')
125 'the~07quick~adshot'
125 'the~07quick~adshot'
126 '''
126 '''
127 winreserved = [ord(x) for x in '\\:*?"<>|']
127 winreserved = [ord(x) for x in '\\:*?"<>|']
128 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
128 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
129 for x in (range(32) + range(126, 256) + winreserved):
129 for x in (range(32) + range(126, 256) + winreserved):
130 cmap[chr(x)] = "~%02x" % x
130 cmap[chr(x)] = "~%02x" % x
131 for x in range(ord("A"), ord("Z") + 1):
131 for x in range(ord("A"), ord("Z") + 1):
132 cmap[chr(x)] = chr(x).lower()
132 cmap[chr(x)] = chr(x).lower()
133 return lambda s: "".join([cmap[c] for c in s])
133 return lambda s: "".join([cmap[c] for c in s])
134
134
135 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
135 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
136
136
137 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
137 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
138 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
138 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
139 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
139 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
140 def _auxencode(path, dotencode):
140 def _auxencode(path, dotencode):
141 '''
141 '''
142 Encodes filenames containing names reserved by Windows or which end in
142 Encodes filenames containing names reserved by Windows or which end in
143 period or space. Does not touch other single reserved characters c.
143 period or space. Does not touch other single reserved characters c.
144 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
144 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
145 Additionally encodes space or period at the beginning, if dotencode is
145 Additionally encodes space or period at the beginning, if dotencode is
146 True. Parameter path is assumed to be all lowercase.
146 True. Parameter path is assumed to be all lowercase.
147 A segment only needs encoding if a reserved name appears as a
147 A segment only needs encoding if a reserved name appears as a
148 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
148 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
149 doesn't need encoding.
149 doesn't need encoding.
150
150
151 >>> s = '.foo/aux.txt/txt.aux/con/prn/nul/foo.'
151 >>> s = '.foo/aux.txt/txt.aux/con/prn/nul/foo.'
152 >>> _auxencode(s.split('/'), True)
152 >>> _auxencode(s.split('/'), True)
153 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
153 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
154 >>> s = '.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
154 >>> s = '.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
155 >>> _auxencode(s.split('/'), False)
155 >>> _auxencode(s.split('/'), False)
156 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
156 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
157 >>> _auxencode(['foo. '], True)
157 >>> _auxencode(['foo. '], True)
158 ['foo.~20']
158 ['foo.~20']
159 >>> _auxencode([' .foo'], True)
159 >>> _auxencode([' .foo'], True)
160 ['~20.foo']
160 ['~20.foo']
161 '''
161 '''
162 for i, n in enumerate(path):
162 for i, n in enumerate(path):
163 if not n:
163 if not n:
164 continue
164 continue
165 if dotencode and n[0] in '. ':
165 if dotencode and n[0] in '. ':
166 n = "~%02x" % ord(n[0]) + n[1:]
166 n = "~%02x" % ord(n[0]) + n[1:]
167 path[i] = n
167 path[i] = n
168 else:
168 else:
169 l = n.find('.')
169 l = n.find('.')
170 if l == -1:
170 if l == -1:
171 l = len(n)
171 l = len(n)
172 if ((l == 3 and n[:3] in _winres3) or
172 if ((l == 3 and n[:3] in _winres3) or
173 (l == 4 and n[3] <= '9' and n[3] >= '1'
173 (l == 4 and n[3] <= '9' and n[3] >= '1'
174 and n[:3] in _winres4)):
174 and n[:3] in _winres4)):
175 # encode third letter ('aux' -> 'au~78')
175 # encode third letter ('aux' -> 'au~78')
176 ec = "~%02x" % ord(n[2])
176 ec = "~%02x" % ord(n[2])
177 n = n[0:2] + ec + n[3:]
177 n = n[0:2] + ec + n[3:]
178 path[i] = n
178 path[i] = n
179 if n[-1] in '. ':
179 if n[-1] in '. ':
180 # encode last period or space ('foo...' -> 'foo..~2e')
180 # encode last period or space ('foo...' -> 'foo..~2e')
181 path[i] = n[:-1] + "~%02x" % ord(n[-1])
181 path[i] = n[:-1] + "~%02x" % ord(n[-1])
182 return path
182 return path
183
183
184 _maxstorepathlen = 120
184 _maxstorepathlen = 120
185 _dirprefixlen = 8
185 _dirprefixlen = 8
186 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
186 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
187
187
188 def _hashencode(path, dotencode):
188 def _hashencode(path, dotencode):
189 digest = _sha(path).hexdigest()
189 digest = _sha(path).hexdigest()
190 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
190 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
191 parts = _auxencode(le, dotencode)
191 parts = _auxencode(le, dotencode)
192 basename = parts[-1]
192 basename = parts[-1]
193 _root, ext = os.path.splitext(basename)
193 _root, ext = os.path.splitext(basename)
194 sdirs = []
194 sdirs = []
195 sdirslen = 0
195 sdirslen = 0
196 for p in parts[:-1]:
196 for p in parts[:-1]:
197 d = p[:_dirprefixlen]
197 d = p[:_dirprefixlen]
198 if d[-1] in '. ':
198 if d[-1] in '. ':
199 # Windows can't access dirs ending in period or space
199 # Windows can't access dirs ending in period or space
200 d = d[:-1] + '_'
200 d = d[:-1] + '_'
201 if sdirslen == 0:
201 if sdirslen == 0:
202 t = len(d)
202 t = len(d)
203 else:
203 else:
204 t = sdirslen + 1 + len(d)
204 t = sdirslen + 1 + len(d)
205 if t > _maxshortdirslen:
205 if t > _maxshortdirslen:
206 break
206 break
207 sdirs.append(d)
207 sdirs.append(d)
208 sdirslen = t
208 sdirslen = t
209 dirs = '/'.join(sdirs)
209 dirs = '/'.join(sdirs)
210 if len(dirs) > 0:
210 if len(dirs) > 0:
211 dirs += '/'
211 dirs += '/'
212 res = 'dh/' + dirs + digest + ext
212 res = 'dh/' + dirs + digest + ext
213 spaceleft = _maxstorepathlen - len(res)
213 spaceleft = _maxstorepathlen - len(res)
214 if spaceleft > 0:
214 if spaceleft > 0:
215 filler = basename[:spaceleft]
215 filler = basename[:spaceleft]
216 res = 'dh/' + dirs + filler + digest + ext
216 res = 'dh/' + dirs + filler + digest + ext
217 return res
217 return res
218
218
219 def _hybridencode(path, dotencode):
219 def _hybridencode(path, dotencode):
220 '''encodes path with a length limit
220 '''encodes path with a length limit
221
221
222 Encodes all paths that begin with 'data/', according to the following.
222 Encodes all paths that begin with 'data/', according to the following.
223
223
224 Default encoding (reversible):
224 Default encoding (reversible):
225
225
226 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
226 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
227 characters are encoded as '~xx', where xx is the two digit hex code
227 characters are encoded as '~xx', where xx is the two digit hex code
228 of the character (see encodefilename).
228 of the character (see encodefilename).
229 Relevant path components consisting of Windows reserved filenames are
229 Relevant path components consisting of Windows reserved filenames are
230 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
230 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
231
231
232 Hashed encoding (not reversible):
232 Hashed encoding (not reversible):
233
233
234 If the default-encoded path is longer than _maxstorepathlen, a
234 If the default-encoded path is longer than _maxstorepathlen, a
235 non-reversible hybrid hashing of the path is done instead.
235 non-reversible hybrid hashing of the path is done instead.
236 This encoding uses up to _dirprefixlen characters of all directory
236 This encoding uses up to _dirprefixlen characters of all directory
237 levels of the lowerencoded path, but not more levels than can fit into
237 levels of the lowerencoded path, but not more levels than can fit into
238 _maxshortdirslen.
238 _maxshortdirslen.
239 Then follows the filler followed by the sha digest of the full path.
239 Then follows the filler followed by the sha digest of the full path.
240 The filler is the beginning of the basename of the lowerencoded path
240 The filler is the beginning of the basename of the lowerencoded path
241 (the basename is everything after the last path separator). The filler
241 (the basename is everything after the last path separator). The filler
242 is as long as possible, filling in characters from the basename until
242 is as long as possible, filling in characters from the basename until
243 the encoded path has _maxstorepathlen characters (or all chars of the
243 the encoded path has _maxstorepathlen characters (or all chars of the
244 basename have been taken).
244 basename have been taken).
245 The extension (e.g. '.i' or '.d') is preserved.
245 The extension (e.g. '.i' or '.d') is preserved.
246
246
247 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
247 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
248 encoding was used.
248 encoding was used.
249 '''
249 '''
250 path = encodedir(path)
250 path = encodedir(path)
251 ef = _encodefname(path).split('/')
251 ef = _encodefname(path).split('/')
252 res = '/'.join(_auxencode(ef, dotencode))
252 res = '/'.join(_auxencode(ef, dotencode))
253 if len(res) > _maxstorepathlen:
253 if len(res) > _maxstorepathlen:
254 res = _hashencode(path, dotencode)
254 res = _hashencode(path, dotencode)
255 return res
255 return res
256
256
257 def _pathencode(path):
257 def _pathencode(path):
258 de = encodedir(path)
258 de = encodedir(path)
259 if len(path) > _maxstorepathlen:
259 if len(path) > _maxstorepathlen:
260 return _hashencode(de, True)
260 return _hashencode(de, True)
261 ef = _encodefname(de).split('/')
261 ef = _encodefname(de).split('/')
262 res = '/'.join(_auxencode(ef, True))
262 res = '/'.join(_auxencode(ef, True))
263 if len(res) > _maxstorepathlen:
263 if len(res) > _maxstorepathlen:
264 return _hashencode(de, True)
264 return _hashencode(de, True)
265 return res
265 return res
266
266
267 _pathencode = getattr(parsers, 'pathencode', _pathencode)
267 _pathencode = getattr(parsers, 'pathencode', _pathencode)
268
268
269 def _plainhybridencode(f):
269 def _plainhybridencode(f):
270 return _hybridencode(f, False)
270 return _hybridencode(f, False)
271
271
272 def _calcmode(vfs):
272 def _calcmode(vfs):
273 try:
273 try:
274 # files in .hg/ will be created using this mode
274 # files in .hg/ will be created using this mode
275 mode = vfs.stat().st_mode
275 mode = vfs.stat().st_mode
276 # avoid some useless chmods
276 # avoid some useless chmods
277 if (0o777 & ~util.umask) == (0o777 & mode):
277 if (0o777 & ~util.umask) == (0o777 & mode):
278 mode = None
278 mode = None
279 except OSError:
279 except OSError:
280 mode = None
280 mode = None
281 return mode
281 return mode
282
282
283 _data = ('data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
283 _data = ('data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
284 ' phaseroots obsstore')
284 ' phaseroots obsstore')
285
285
286 class basicstore(object):
286 class basicstore(object):
287 '''base class for local repository stores'''
287 '''base class for local repository stores'''
288 def __init__(self, path, vfstype):
288 def __init__(self, path, vfstype):
289 vfs = vfstype(path)
289 vfs = vfstype(path)
290 self.path = vfs.base
290 self.path = vfs.base
291 self.createmode = _calcmode(vfs)
291 self.createmode = _calcmode(vfs)
292 vfs.createmode = self.createmode
292 vfs.createmode = self.createmode
293 self.rawvfs = vfs
293 self.rawvfs = vfs
294 self.vfs = scmutil.filtervfs(vfs, encodedir)
294 self.vfs = scmutil.filtervfs(vfs, encodedir)
295 self.opener = self.vfs
295 self.opener = self.vfs
296
296
297 def join(self, f):
297 def join(self, f):
298 return self.path + '/' + encodedir(f)
298 return self.path + '/' + encodedir(f)
299
299
300 def _walk(self, relpath, recurse):
300 def _walk(self, relpath, recurse):
301 '''yields (unencoded, encoded, size)'''
301 '''yields (unencoded, encoded, size)'''
302 path = self.path
302 path = self.path
303 if relpath:
303 if relpath:
304 path += '/' + relpath
304 path += '/' + relpath
305 striplen = len(self.path) + 1
305 striplen = len(self.path) + 1
306 l = []
306 l = []
307 if self.rawvfs.isdir(path):
307 if self.rawvfs.isdir(path):
308 visit = [path]
308 visit = [path]
309 readdir = self.rawvfs.readdir
309 readdir = self.rawvfs.readdir
310 while visit:
310 while visit:
311 p = visit.pop()
311 p = visit.pop()
312 for f, kind, st in readdir(p, stat=True):
312 for f, kind, st in readdir(p, stat=True):
313 fp = p + '/' + f
313 fp = p + '/' + f
314 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
314 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
315 n = util.pconvert(fp[striplen:])
315 n = util.pconvert(fp[striplen:])
316 l.append((decodedir(n), n, st.st_size))
316 l.append((decodedir(n), n, st.st_size))
317 elif kind == stat.S_IFDIR and recurse:
317 elif kind == stat.S_IFDIR and recurse:
318 visit.append(fp)
318 visit.append(fp)
319 l.sort()
319 l.sort()
320 return l
320 return l
321
321
322 def datafiles(self):
322 def datafiles(self):
323 return self._walk('data', True)
323 return self._walk('data', True)
324
324
325 def topfiles(self):
325 def topfiles(self):
326 # yield manifest before changelog
326 # yield manifest before changelog
327 return reversed(self._walk('', False))
327 return reversed(self._walk('', False))
328
328
329 def walk(self):
329 def walk(self):
330 '''yields (unencoded, encoded, size)'''
330 '''yields (unencoded, encoded, size)'''
331 # yield data files first
331 # yield data files first
332 for x in self.datafiles():
332 for x in self.datafiles():
333 yield x
333 yield x
334 for x in self.topfiles():
334 for x in self.topfiles():
335 yield x
335 yield x
336
336
337 def copylist(self):
337 def copylist(self):
338 return ['requires'] + _data.split()
338 return ['requires'] + _data.split()
339
339
340 def write(self, tr):
340 def write(self, tr):
341 pass
341 pass
342
342
343 def invalidatecaches(self):
343 def invalidatecaches(self):
344 pass
344 pass
345
345
346 def markremoved(self, fn):
346 def markremoved(self, fn):
347 pass
347 pass
348
348
349 def __contains__(self, path):
349 def __contains__(self, path):
350 '''Checks if the store contains path'''
350 '''Checks if the store contains path'''
351 path = "/".join(("data", path))
351 path = "/".join(("data", path))
352 # file?
352 # file?
353 if self.vfs.exists(path + ".i"):
353 if self.vfs.exists(path + ".i"):
354 return True
354 return True
355 # dir?
355 # dir?
356 if not path.endswith("/"):
356 if not path.endswith("/"):
357 path = path + "/"
357 path = path + "/"
358 return self.vfs.exists(path)
358 return self.vfs.exists(path)
359
359
360 class encodedstore(basicstore):
360 class encodedstore(basicstore):
361 def __init__(self, path, vfstype):
361 def __init__(self, path, vfstype):
362 vfs = vfstype(path + '/store')
362 vfs = vfstype(path + '/store')
363 self.path = vfs.base
363 self.path = vfs.base
364 self.createmode = _calcmode(vfs)
364 self.createmode = _calcmode(vfs)
365 vfs.createmode = self.createmode
365 vfs.createmode = self.createmode
366 self.rawvfs = vfs
366 self.rawvfs = vfs
367 self.vfs = scmutil.filtervfs(vfs, encodefilename)
367 self.vfs = scmutil.filtervfs(vfs, encodefilename)
368 self.opener = self.vfs
368 self.opener = self.vfs
369
369
370 def datafiles(self):
370 def datafiles(self):
371 for a, b, size in self._walk('data', True):
371 for a, b, size in self._walk('data', True):
372 try:
372 try:
373 a = decodefilename(a)
373 a = decodefilename(a)
374 except KeyError:
374 except KeyError:
375 a = None
375 a = None
376 yield a, b, size
376 yield a, b, size
377
377
378 def join(self, f):
378 def join(self, f):
379 return self.path + '/' + encodefilename(f)
379 return self.path + '/' + encodefilename(f)
380
380
381 def copylist(self):
381 def copylist(self):
382 return (['requires', '00changelog.i'] +
382 return (['requires', '00changelog.i'] +
383 ['store/' + f for f in _data.split()])
383 ['store/' + f for f in _data.split()])
384
384
385 class fncache(object):
385 class fncache(object):
386 # the filename used to be partially encoded
386 # the filename used to be partially encoded
387 # hence the encodedir/decodedir dance
387 # hence the encodedir/decodedir dance
388 def __init__(self, vfs):
388 def __init__(self, vfs):
389 self.vfs = vfs
389 self.vfs = vfs
390 self.entries = None
390 self.entries = None
391 self._dirty = False
391 self._dirty = False
392
392
393 def _load(self):
393 def _load(self):
394 '''fill the entries from the fncache file'''
394 '''fill the entries from the fncache file'''
395 self._dirty = False
395 self._dirty = False
396 try:
396 try:
397 fp = self.vfs('fncache', mode='rb')
397 fp = self.vfs('fncache', mode='rb')
398 except IOError:
398 except IOError:
399 # skip nonexistent file
399 # skip nonexistent file
400 self.entries = set()
400 self.entries = set()
401 return
401 return
402 self.entries = set(decodedir(fp.read()).splitlines())
402 self.entries = set(decodedir(fp.read()).splitlines())
403 if '' in self.entries:
403 if '' in self.entries:
404 fp.seek(0)
404 fp.seek(0)
405 for n, line in enumerate(fp):
405 for n, line in enumerate(fp):
406 if not line.rstrip('\n'):
406 if not line.rstrip('\n'):
407 t = _('invalid entry in fncache, line %s') % (n + 1)
407 t = _('invalid entry in fncache, line %d') % (n + 1)
408 raise error.Abort(t)
408 raise error.Abort(t)
409 fp.close()
409 fp.close()
410
410
411 def write(self, tr):
411 def write(self, tr):
412 if self._dirty:
412 if self._dirty:
413 tr.addbackup('fncache')
413 tr.addbackup('fncache')
414 fp = self.vfs('fncache', mode='wb', atomictemp=True)
414 fp = self.vfs('fncache', mode='wb', atomictemp=True)
415 if self.entries:
415 if self.entries:
416 fp.write(encodedir('\n'.join(self.entries) + '\n'))
416 fp.write(encodedir('\n'.join(self.entries) + '\n'))
417 fp.close()
417 fp.close()
418 self._dirty = False
418 self._dirty = False
419
419
420 def add(self, fn):
420 def add(self, fn):
421 if self.entries is None:
421 if self.entries is None:
422 self._load()
422 self._load()
423 if fn not in self.entries:
423 if fn not in self.entries:
424 self._dirty = True
424 self._dirty = True
425 self.entries.add(fn)
425 self.entries.add(fn)
426
426
427 def remove(self, fn):
427 def remove(self, fn):
428 if self.entries is None:
428 if self.entries is None:
429 self._load()
429 self._load()
430 try:
430 try:
431 self.entries.remove(fn)
431 self.entries.remove(fn)
432 self._dirty = True
432 self._dirty = True
433 except KeyError:
433 except KeyError:
434 pass
434 pass
435
435
436 def __contains__(self, fn):
436 def __contains__(self, fn):
437 if self.entries is None:
437 if self.entries is None:
438 self._load()
438 self._load()
439 return fn in self.entries
439 return fn in self.entries
440
440
441 def __iter__(self):
441 def __iter__(self):
442 if self.entries is None:
442 if self.entries is None:
443 self._load()
443 self._load()
444 return iter(self.entries)
444 return iter(self.entries)
445
445
446 class _fncachevfs(scmutil.abstractvfs, scmutil.auditvfs):
446 class _fncachevfs(scmutil.abstractvfs, scmutil.auditvfs):
447 def __init__(self, vfs, fnc, encode):
447 def __init__(self, vfs, fnc, encode):
448 scmutil.auditvfs.__init__(self, vfs)
448 scmutil.auditvfs.__init__(self, vfs)
449 self.fncache = fnc
449 self.fncache = fnc
450 self.encode = encode
450 self.encode = encode
451
451
452 def __call__(self, path, mode='r', *args, **kw):
452 def __call__(self, path, mode='r', *args, **kw):
453 if mode not in ('r', 'rb') and path.startswith('data/'):
453 if mode not in ('r', 'rb') and path.startswith('data/'):
454 self.fncache.add(path)
454 self.fncache.add(path)
455 return self.vfs(self.encode(path), mode, *args, **kw)
455 return self.vfs(self.encode(path), mode, *args, **kw)
456
456
457 def join(self, path):
457 def join(self, path):
458 if path:
458 if path:
459 return self.vfs.join(self.encode(path))
459 return self.vfs.join(self.encode(path))
460 else:
460 else:
461 return self.vfs.join(path)
461 return self.vfs.join(path)
462
462
463 class fncachestore(basicstore):
463 class fncachestore(basicstore):
464 def __init__(self, path, vfstype, dotencode):
464 def __init__(self, path, vfstype, dotencode):
465 if dotencode:
465 if dotencode:
466 encode = _pathencode
466 encode = _pathencode
467 else:
467 else:
468 encode = _plainhybridencode
468 encode = _plainhybridencode
469 self.encode = encode
469 self.encode = encode
470 vfs = vfstype(path + '/store')
470 vfs = vfstype(path + '/store')
471 self.path = vfs.base
471 self.path = vfs.base
472 self.pathsep = self.path + '/'
472 self.pathsep = self.path + '/'
473 self.createmode = _calcmode(vfs)
473 self.createmode = _calcmode(vfs)
474 vfs.createmode = self.createmode
474 vfs.createmode = self.createmode
475 self.rawvfs = vfs
475 self.rawvfs = vfs
476 fnc = fncache(vfs)
476 fnc = fncache(vfs)
477 self.fncache = fnc
477 self.fncache = fnc
478 self.vfs = _fncachevfs(vfs, fnc, encode)
478 self.vfs = _fncachevfs(vfs, fnc, encode)
479 self.opener = self.vfs
479 self.opener = self.vfs
480
480
481 def join(self, f):
481 def join(self, f):
482 return self.pathsep + self.encode(f)
482 return self.pathsep + self.encode(f)
483
483
484 def getsize(self, path):
484 def getsize(self, path):
485 return self.rawvfs.stat(path).st_size
485 return self.rawvfs.stat(path).st_size
486
486
487 def datafiles(self):
487 def datafiles(self):
488 for f in sorted(self.fncache):
488 for f in sorted(self.fncache):
489 ef = self.encode(f)
489 ef = self.encode(f)
490 try:
490 try:
491 yield f, ef, self.getsize(ef)
491 yield f, ef, self.getsize(ef)
492 except OSError as err:
492 except OSError as err:
493 if err.errno != errno.ENOENT:
493 if err.errno != errno.ENOENT:
494 raise
494 raise
495
495
496 def copylist(self):
496 def copylist(self):
497 d = ('data dh fncache phaseroots obsstore'
497 d = ('data dh fncache phaseroots obsstore'
498 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
498 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
499 return (['requires', '00changelog.i'] +
499 return (['requires', '00changelog.i'] +
500 ['store/' + f for f in d.split()])
500 ['store/' + f for f in d.split()])
501
501
502 def write(self, tr):
502 def write(self, tr):
503 self.fncache.write(tr)
503 self.fncache.write(tr)
504
504
505 def invalidatecaches(self):
505 def invalidatecaches(self):
506 self.fncache.entries = None
506 self.fncache.entries = None
507
507
508 def markremoved(self, fn):
508 def markremoved(self, fn):
509 self.fncache.remove(fn)
509 self.fncache.remove(fn)
510
510
511 def _exists(self, f):
511 def _exists(self, f):
512 ef = self.encode(f)
512 ef = self.encode(f)
513 try:
513 try:
514 self.getsize(ef)
514 self.getsize(ef)
515 return True
515 return True
516 except OSError as err:
516 except OSError as err:
517 if err.errno != errno.ENOENT:
517 if err.errno != errno.ENOENT:
518 raise
518 raise
519 # nonexistent entry
519 # nonexistent entry
520 return False
520 return False
521
521
522 def __contains__(self, path):
522 def __contains__(self, path):
523 '''Checks if the store contains path'''
523 '''Checks if the store contains path'''
524 path = "/".join(("data", path))
524 path = "/".join(("data", path))
525 # check for files (exact match)
525 # check for files (exact match)
526 e = path + '.i'
526 e = path + '.i'
527 if e in self.fncache and self._exists(e):
527 if e in self.fncache and self._exists(e):
528 return True
528 return True
529 # now check for directories (prefix match)
529 # now check for directories (prefix match)
530 if not path.endswith('/'):
530 if not path.endswith('/'):
531 path += '/'
531 path += '/'
532 for e in self.fncache:
532 for e in self.fncache:
533 if e.startswith(path) and self._exists(e):
533 if e.startswith(path) and self._exists(e):
534 return True
534 return True
535 return False
535 return False
536
536
537 def store(requirements, path, vfstype):
537 def store(requirements, path, vfstype):
538 if 'store' in requirements:
538 if 'store' in requirements:
539 if 'fncache' in requirements:
539 if 'fncache' in requirements:
540 return fncachestore(path, vfstype, 'dotencode' in requirements)
540 return fncachestore(path, vfstype, 'dotencode' in requirements)
541 return encodedstore(path, vfstype)
541 return encodedstore(path, vfstype)
542 return basicstore(path, vfstype)
542 return basicstore(path, vfstype)
General Comments 0
You need to be logged in to leave comments. Login now