##// END OF EJS Templates
merge with stable
Matt Mackall -
r17980:83aa4359 merge default
parent child Browse files
Show More
@@ -1,188 +1,189 b''
1 1 # hook.py - hook support for mercurial
2 2 #
3 3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import os, sys
10 10 import extensions, util
11 11
12 12 def _pythonhook(ui, repo, name, hname, funcname, args, throw):
13 13 '''call python hook. hook is callable object, looked up as
14 14 name in python module. if callable returns "true", hook
15 15 fails, else passes. if hook raises exception, treated as
16 16 hook failure. exception propagates if throw is "true".
17 17
18 18 reason for "true" meaning "hook failed" is so that
19 19 unmodified commands (e.g. mercurial.commands.update) can
20 20 be run as hooks without wrappers to convert return values.'''
21 21
22 22 ui.note(_("calling hook %s: %s\n") % (hname, funcname))
23 23 obj = funcname
24 24 if not util.safehasattr(obj, '__call__'):
25 25 d = funcname.rfind('.')
26 26 if d == -1:
27 27 raise util.Abort(_('%s hook is invalid ("%s" not in '
28 28 'a module)') % (hname, funcname))
29 29 modname = funcname[:d]
30 30 oldpaths = sys.path
31 31 if util.mainfrozen():
32 32 # binary installs require sys.path manipulation
33 33 modpath, modfile = os.path.split(modname)
34 34 if modpath and modfile:
35 35 sys.path = sys.path[:] + [modpath]
36 36 modname = modfile
37 37 try:
38 38 obj = __import__(modname)
39 39 except ImportError:
40 40 e1 = sys.exc_type, sys.exc_value, sys.exc_traceback
41 41 try:
42 42 # extensions are loaded with hgext_ prefix
43 43 obj = __import__("hgext_%s" % modname)
44 44 except ImportError:
45 45 e2 = sys.exc_type, sys.exc_value, sys.exc_traceback
46 46 if ui.tracebackflag:
47 47 ui.warn(_('exception from first failed import attempt:\n'))
48 48 ui.traceback(e1)
49 49 if ui.tracebackflag:
50 50 ui.warn(_('exception from second failed import attempt:\n'))
51 51 ui.traceback(e2)
52 52 raise util.Abort(_('%s hook is invalid '
53 53 '(import of "%s" failed)') %
54 54 (hname, modname))
55 55 sys.path = oldpaths
56 56 try:
57 57 for p in funcname.split('.')[1:]:
58 58 obj = getattr(obj, p)
59 59 except AttributeError:
60 60 raise util.Abort(_('%s hook is invalid '
61 61 '("%s" is not defined)') %
62 62 (hname, funcname))
63 63 if not util.safehasattr(obj, '__call__'):
64 64 raise util.Abort(_('%s hook is invalid '
65 65 '("%s" is not callable)') %
66 66 (hname, funcname))
67 67 try:
68 68 try:
69 69 # redirect IO descriptors to the ui descriptors so hooks
70 70 # that write directly to these don't mess up the command
71 71 # protocol when running through the command server
72 72 old = sys.stdout, sys.stderr, sys.stdin
73 73 sys.stdout, sys.stderr, sys.stdin = ui.fout, ui.ferr, ui.fin
74 74
75 75 r = obj(ui=ui, repo=repo, hooktype=name, **args)
76 76 except KeyboardInterrupt:
77 77 raise
78 78 except Exception, exc:
79 79 if isinstance(exc, util.Abort):
80 80 ui.warn(_('error: %s hook failed: %s\n') %
81 81 (hname, exc.args[0]))
82 82 else:
83 83 ui.warn(_('error: %s hook raised an exception: '
84 84 '%s\n') % (hname, exc))
85 85 if throw:
86 86 raise
87 87 ui.traceback()
88 88 return True
89 89 finally:
90 90 sys.stdout, sys.stderr, sys.stdin = old
91 91 if r:
92 92 if throw:
93 93 raise util.Abort(_('%s hook failed') % hname)
94 94 ui.warn(_('warning: %s hook failed\n') % hname)
95 95 return r
96 96
97 97 def _exthook(ui, repo, name, cmd, args, throw):
98 98 ui.note(_("running hook %s: %s\n") % (name, cmd))
99 99
100 100 env = {}
101 101 for k, v in args.iteritems():
102 102 if util.safehasattr(v, '__call__'):
103 103 v = v()
104 104 if isinstance(v, dict):
105 105 # make the dictionary element order stable across Python
106 106 # implementations
107 107 v = ('{' +
108 108 ', '.join('%r: %r' % i for i in sorted(v.iteritems())) +
109 109 '}')
110 110 env['HG_' + k.upper()] = v
111 111
112 112 if repo:
113 113 cwd = repo.root
114 114 else:
115 115 cwd = os.getcwd()
116 116 if 'HG_URL' in env and env['HG_URL'].startswith('remote:http'):
117 117 r = util.system(cmd, environ=env, cwd=cwd, out=ui)
118 118 else:
119 119 r = util.system(cmd, environ=env, cwd=cwd, out=ui.fout)
120 120 if r:
121 121 desc, r = util.explainexit(r)
122 122 if throw:
123 123 raise util.Abort(_('%s hook %s') % (name, desc))
124 124 ui.warn(_('warning: %s hook %s\n') % (name, desc))
125 125 return r
126 126
127 127 def _allhooks(ui):
128 128 hooks = []
129 129 for name, cmd in ui.configitems('hooks'):
130 130 if not name.startswith('priority'):
131 131 priority = ui.configint('hooks', 'priority.%s' % name, 0)
132 132 hooks.append((-priority, len(hooks), name, cmd))
133 133 return [(k, v) for p, o, k, v in sorted(hooks)]
134 134
135 135 _redirect = False
136 136 def redirect(state):
137 137 global _redirect
138 138 _redirect = state
139 139
140 140 def hook(ui, repo, name, throw=False, **args):
141 141 if not ui.callhooks:
142 142 return False
143 143
144 144 r = False
145 oldstdout = -1
145 146
146 oldstdout = -1
147 if _redirect:
147 try:
148 for hname, cmd in _allhooks(ui):
149 if hname.split('.')[0] != name or not cmd:
150 continue
151
152 if oldstdout == -1 and _redirect:
148 153 try:
149 154 stdoutno = sys.__stdout__.fileno()
150 155 stderrno = sys.__stderr__.fileno()
151 156 # temporarily redirect stdout to stderr, if possible
152 157 if stdoutno >= 0 and stderrno >= 0:
153 158 sys.__stdout__.flush()
154 159 oldstdout = os.dup(stdoutno)
155 160 os.dup2(stderrno, stdoutno)
156 except AttributeError:
157 # __stdout__/__stderr__ doesn't have fileno(), it's not a real file
161 except (OSError, AttributeError):
162 # files seem to be bogus, give up on redirecting (WSGI, etc)
158 163 pass
159 164
160 try:
161 for hname, cmd in _allhooks(ui):
162 if hname.split('.')[0] != name or not cmd:
163 continue
164 165 if util.safehasattr(cmd, '__call__'):
165 166 r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r
166 167 elif cmd.startswith('python:'):
167 168 if cmd.count(':') >= 2:
168 169 path, cmd = cmd[7:].rsplit(':', 1)
169 170 path = util.expandpath(path)
170 171 if repo:
171 172 path = os.path.join(repo.root, path)
172 173 try:
173 174 mod = extensions.loadpath(path, 'hghook.%s' % hname)
174 175 except Exception:
175 176 ui.write(_("loading %s hook failed:\n") % hname)
176 177 raise
177 178 hookfn = getattr(mod, cmd)
178 179 else:
179 180 hookfn = cmd[7:].strip()
180 181 r = _pythonhook(ui, repo, name, hname, hookfn, args, throw) or r
181 182 else:
182 183 r = _exthook(ui, repo, hname, cmd, args, throw) or r
183 184 finally:
184 185 if _redirect and oldstdout >= 0:
185 186 os.dup2(oldstdout, stdoutno)
186 187 os.close(oldstdout)
187 188
188 189 return r
@@ -1,393 +1,393 b''
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phase' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described
22 22 below, here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not
25 25 permanent and leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered,
28 28 so they can be considered from lowest to highest. The default, lowest
29 29 phase is 'public' - this is the normal phase of existing changesets. A
30 30 child changeset can not be in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 Local commits are draft by default.
40 40
41 41 Phase Movement and Exchange
42 42 ===========================
43 43
44 44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 45 a publish option set, we call such a server a "publishing server".
46 46 Pushing a draft changeset to a publishing server changes the phase to
47 47 public.
48 48
49 49 A small list of fact/rules define the exchange of phase:
50 50
51 51 * old client never changes server states
52 52 * pull never changes server states
53 53 * publish and old server changesets are seen as public by client
54 54 * any secret changeset seen in another repository is lowered to at
55 55 least draft
56 56
57 57 Here is the final table summing up the 49 possible use cases of phase
58 58 exchange:
59 59
60 60 server
61 61 old publish non-publish
62 62 N X N D P N D P
63 63 old client
64 64 pull
65 65 N - X/X - X/D X/P - X/D X/P
66 66 X - X/X - X/D X/P - X/D X/P
67 67 push
68 68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 69 new client
70 70 pull
71 71 N - P/X - P/D P/P - D/D P/P
72 72 D - P/X - P/D P/P - D/D P/P
73 73 P - P/X - P/D P/P - P/D P/P
74 74 push
75 75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 76 P P/X P/X P/P P/P P/P P/P P/P P/P
77 77
78 78 Legend:
79 79
80 80 A/B = final state on client / state on server
81 81
82 82 * N = new/not present,
83 83 * P = public,
84 84 * D = draft,
85 85 * X = not tracked (i.e., the old client or server has no internal
86 86 way of recording the phase.)
87 87
88 88 passive = only pushes
89 89
90 90
91 91 A cell here can be read like this:
92 92
93 93 "When a new client pushes a draft changeset (D) to a publishing
94 94 server where it's not present (N), it's marked public on both
95 95 sides (P/P)."
96 96
97 97 Note: old client behave as a publishing server with draft only content
98 98 - other people see it as public
99 99 - content is pushed as draft
100 100
101 101 """
102 102
103 103 import errno
104 104 from node import nullid, nullrev, bin, hex, short
105 105 from i18n import _
106 import util
106 import util, error
107 107 import obsolete
108 108
109 109 allphases = public, draft, secret = range(3)
110 110 trackedphases = allphases[1:]
111 111 phasenames = ['public', 'draft', 'secret']
112 112
113 113 def _filterunknown(ui, changelog, phaseroots):
114 114 """remove unknown nodes from the phase boundary
115 115
116 116 Nothing is lost as unknown nodes only hold data for their descendants.
117 117 """
118 118 updated = False
119 119 nodemap = changelog.nodemap # to filter unknown nodes
120 120 for phase, nodes in enumerate(phaseroots):
121 121 missing = [node for node in nodes if node not in nodemap]
122 122 if missing:
123 123 for mnode in missing:
124 124 ui.debug(
125 125 'removing unknown node %s from %i-phase boundary\n'
126 126 % (short(mnode), phase))
127 127 nodes.symmetric_difference_update(missing)
128 128 updated = True
129 129 return updated
130 130
131 131 def _readroots(repo, phasedefaults=None):
132 132 """Read phase roots from disk
133 133
134 134 phasedefaults is a list of fn(repo, roots) callable, which are
135 135 executed if the phase roots file does not exist. When phases are
136 136 being initialized on an existing repository, this could be used to
137 137 set selected changesets phase to something else than public.
138 138
139 139 Return (roots, dirty) where dirty is true if roots differ from
140 140 what is being stored.
141 141 """
142 142 dirty = False
143 143 roots = [set() for i in allphases]
144 144 try:
145 145 f = repo.sopener('phaseroots')
146 146 try:
147 147 for line in f:
148 148 phase, nh = line.split()
149 149 roots[int(phase)].add(bin(nh))
150 150 finally:
151 151 f.close()
152 152 except IOError, inst:
153 153 if inst.errno != errno.ENOENT:
154 154 raise
155 155 if phasedefaults:
156 156 for f in phasedefaults:
157 157 roots = f(repo, roots)
158 158 dirty = True
159 159 if _filterunknown(repo.ui, repo.changelog, roots):
160 160 dirty = True
161 161 return roots, dirty
162 162
163 163 class phasecache(object):
164 164 def __init__(self, repo, phasedefaults, _load=True):
165 165 if _load:
166 166 # Cheap trick to allow shallow-copy without copy module
167 167 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
168 168 self.opener = repo.sopener
169 169 self._phaserevs = None
170 170
171 171 def copy(self):
172 172 # Shallow copy meant to ensure isolation in
173 173 # advance/retractboundary(), nothing more.
174 174 ph = phasecache(None, None, _load=False)
175 175 ph.phaseroots = self.phaseroots[:]
176 176 ph.dirty = self.dirty
177 177 ph.opener = self.opener
178 178 ph._phaserevs = self._phaserevs
179 179 return ph
180 180
181 181 def replace(self, phcache):
182 182 for a in 'phaseroots dirty opener _phaserevs'.split():
183 183 setattr(self, a, getattr(phcache, a))
184 184
185 185 def getphaserevs(self, repo, rebuild=False):
186 186 if rebuild or self._phaserevs is None:
187 187 revs = [public] * len(repo.changelog)
188 188 for phase in trackedphases:
189 189 roots = map(repo.changelog.rev, self.phaseroots[phase])
190 190 if roots:
191 191 for rev in roots:
192 192 revs[rev] = phase
193 193 for rev in repo.changelog.descendants(roots):
194 194 revs[rev] = phase
195 195 self._phaserevs = revs
196 196 return self._phaserevs
197 197
198 198 def phase(self, repo, rev):
199 199 # We need a repo argument here to be able to build _phaserevs
200 200 # if necessary. The repository instance is not stored in
201 201 # phasecache to avoid reference cycles. The changelog instance
202 202 # is not stored because it is a filecache() property and can
203 203 # be replaced without us being notified.
204 204 if rev == nullrev:
205 205 return public
206 206 if self._phaserevs is None or rev >= len(self._phaserevs):
207 207 self._phaserevs = self.getphaserevs(repo, rebuild=True)
208 208 return self._phaserevs[rev]
209 209
210 210 def write(self):
211 211 if not self.dirty:
212 212 return
213 213 f = self.opener('phaseroots', 'w', atomictemp=True)
214 214 try:
215 215 for phase, roots in enumerate(self.phaseroots):
216 216 for h in roots:
217 217 f.write('%i %s\n' % (phase, hex(h)))
218 218 finally:
219 219 f.close()
220 220 self.dirty = False
221 221
222 222 def _updateroots(self, phase, newroots):
223 223 self.phaseroots[phase] = newroots
224 224 self._phaserevs = None
225 225 self.dirty = True
226 226
227 227 def advanceboundary(self, repo, targetphase, nodes):
228 228 # Be careful to preserve shallow-copied values: do not update
229 229 # phaseroots values, replace them.
230 230
231 231 delroots = [] # set of root deleted by this path
232 232 for phase in xrange(targetphase + 1, len(allphases)):
233 233 # filter nodes that are not in a compatible phase already
234 234 nodes = [n for n in nodes
235 235 if self.phase(repo, repo[n].rev()) >= phase]
236 236 if not nodes:
237 237 break # no roots to move anymore
238 238 olds = self.phaseroots[phase]
239 239 roots = set(ctx.node() for ctx in repo.set(
240 240 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
241 241 if olds != roots:
242 242 self._updateroots(phase, roots)
243 243 # some roots may need to be declared for lower phases
244 244 delroots.extend(olds - roots)
245 245 # declare deleted root in the target phase
246 246 if targetphase != 0:
247 247 self.retractboundary(repo, targetphase, delroots)
248 248 obsolete.clearobscaches(repo)
249 249
250 250 def retractboundary(self, repo, targetphase, nodes):
251 251 # Be careful to preserve shallow-copied values: do not update
252 252 # phaseroots values, replace them.
253 253
254 254 currentroots = self.phaseroots[targetphase]
255 255 newroots = [n for n in nodes
256 256 if self.phase(repo, repo[n].rev()) < targetphase]
257 257 if newroots:
258 258 if nullid in newroots:
259 259 raise util.Abort(_('cannot change null revision phase'))
260 260 currentroots = currentroots.copy()
261 261 currentroots.update(newroots)
262 262 ctxs = repo.set('roots(%ln::)', currentroots)
263 263 currentroots.intersection_update(ctx.node() for ctx in ctxs)
264 264 self._updateroots(targetphase, currentroots)
265 265 obsolete.clearobscaches(repo)
266 266
267 267 def advanceboundary(repo, targetphase, nodes):
268 268 """Add nodes to a phase changing other nodes phases if necessary.
269 269
270 270 This function move boundary *forward* this means that all nodes
271 271 are set in the target phase or kept in a *lower* phase.
272 272
273 273 Simplify boundary to contains phase roots only."""
274 274 phcache = repo._phasecache.copy()
275 275 phcache.advanceboundary(repo, targetphase, nodes)
276 276 repo._phasecache.replace(phcache)
277 277
278 278 def retractboundary(repo, targetphase, nodes):
279 279 """Set nodes back to a phase changing other nodes phases if
280 280 necessary.
281 281
282 282 This function move boundary *backward* this means that all nodes
283 283 are set in the target phase or kept in a *higher* phase.
284 284
285 285 Simplify boundary to contains phase roots only."""
286 286 phcache = repo._phasecache.copy()
287 287 phcache.retractboundary(repo, targetphase, nodes)
288 288 repo._phasecache.replace(phcache)
289 289
290 290 def listphases(repo):
291 291 """List phases root for serialization over pushkey"""
292 292 keys = {}
293 293 value = '%i' % draft
294 294 for root in repo._phasecache.phaseroots[draft]:
295 295 keys[hex(root)] = value
296 296
297 297 if repo.ui.configbool('phases', 'publish', True):
298 298 # Add an extra data to let remote know we are a publishing
299 299 # repo. Publishing repo can't just pretend they are old repo.
300 300 # When pushing to a publishing repo, the client still need to
301 301 # push phase boundary
302 302 #
303 303 # Push do not only push changeset. It also push phase data.
304 304 # New phase data may apply to common changeset which won't be
305 305 # push (as they are common). Here is a very simple example:
306 306 #
307 307 # 1) repo A push changeset X as draft to repo B
308 308 # 2) repo B make changeset X public
309 309 # 3) repo B push to repo A. X is not pushed but the data that
310 310 # X as now public should
311 311 #
312 312 # The server can't handle it on it's own as it has no idea of
313 313 # client phase data.
314 314 keys['publishing'] = 'True'
315 315 return keys
316 316
317 317 def pushphase(repo, nhex, oldphasestr, newphasestr):
318 318 """List phases root for serialization over pushkey"""
319 319 lock = repo.lock()
320 320 try:
321 321 currentphase = repo[nhex].phase()
322 322 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
323 323 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
324 324 if currentphase == oldphase and newphase < oldphase:
325 325 advanceboundary(repo, newphase, [bin(nhex)])
326 326 return 1
327 327 elif currentphase == newphase:
328 328 # raced, but got correct result
329 329 return 1
330 330 else:
331 331 return 0
332 332 finally:
333 333 lock.release()
334 334
335 335 def analyzeremotephases(repo, subset, roots):
336 336 """Compute phases heads and root in a subset of node from root dict
337 337
338 338 * subset is heads of the subset
339 339 * roots is {<nodeid> => phase} mapping. key and value are string.
340 340
341 341 Accept unknown element input
342 342 """
343 343 # build list from dictionary
344 344 draftroots = []
345 345 nodemap = repo.changelog.nodemap # to filter unknown nodes
346 346 for nhex, phase in roots.iteritems():
347 347 if nhex == 'publishing': # ignore data related to publish option
348 348 continue
349 349 node = bin(nhex)
350 350 phase = int(phase)
351 351 if phase == 0:
352 352 if node != nullid:
353 353 repo.ui.warn(_('ignoring inconsistent public root'
354 354 ' from remote: %s\n') % nhex)
355 355 elif phase == 1:
356 356 if node in nodemap:
357 357 draftroots.append(node)
358 358 else:
359 359 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
360 360 % (phase, nhex))
361 361 # compute heads
362 362 publicheads = newheads(repo, subset, draftroots)
363 363 return publicheads, draftroots
364 364
365 365 def newheads(repo, heads, roots):
366 366 """compute new head of a subset minus another
367 367
368 368 * `heads`: define the first subset
369 369 * `roots`: define the second we subtract from the first"""
370 370 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
371 371 heads, roots, roots, heads)
372 372 return [c.node() for c in revset]
373 373
374 374
375 375 def newcommitphase(ui):
376 376 """helper to get the target phase of new commit
377 377
378 378 Handle all possible values for the phases.new-commit options.
379 379
380 380 """
381 381 v = ui.config('phases', 'new-commit', draft)
382 382 try:
383 383 return phasenames.index(v)
384 384 except ValueError:
385 385 try:
386 386 return int(v)
387 387 except ValueError:
388 388 msg = _("phases.new-commit: not a valid phase name ('%s')")
389 389 raise error.ConfigError(msg % v)
390 390
391 391 def hassecret(repo):
392 392 """utility function that check if a repo have any secret changeset."""
393 393 return bool(repo._phasecache.phaseroots[2])
@@ -1,1931 +1,1920 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import bookmarks as bookmarksmod
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16
17 17 def _revancestors(repo, revs, followfirst):
18 18 """Like revlog.ancestors(), but supports followfirst."""
19 19 cut = followfirst and 1 or None
20 20 cl = repo.changelog
21 21 visit = util.deque(revs)
22 22 seen = set([node.nullrev])
23 23 while visit:
24 24 for parent in cl.parentrevs(visit.popleft())[:cut]:
25 25 if parent not in seen:
26 26 visit.append(parent)
27 27 seen.add(parent)
28 28 yield parent
29 29
30 30 def _revdescendants(repo, revs, followfirst):
31 31 """Like revlog.descendants() but supports followfirst."""
32 32 cut = followfirst and 1 or None
33 33 cl = repo.changelog
34 34 first = min(revs)
35 35 nullrev = node.nullrev
36 36 if first == nullrev:
37 37 # Are there nodes with a null first parent and a non-null
38 38 # second one? Maybe. Do we care? Probably not.
39 39 for i in cl:
40 40 yield i
41 41 return
42 42
43 43 seen = set(revs)
44 44 for i in cl.revs(first + 1):
45 45 for x in cl.parentrevs(i)[:cut]:
46 46 if x != nullrev and x in seen:
47 47 seen.add(i)
48 48 yield i
49 49 break
50 50
51 51 def _revsbetween(repo, roots, heads):
52 52 """Return all paths between roots and heads, inclusive of both endpoint
53 53 sets."""
54 54 if not roots:
55 55 return []
56 56 parentrevs = repo.changelog.parentrevs
57 57 visit = heads[:]
58 58 reachable = set()
59 59 seen = {}
60 60 minroot = min(roots)
61 61 roots = set(roots)
62 62 # open-code the post-order traversal due to the tiny size of
63 63 # sys.getrecursionlimit()
64 64 while visit:
65 65 rev = visit.pop()
66 66 if rev in roots:
67 67 reachable.add(rev)
68 68 parents = parentrevs(rev)
69 69 seen[rev] = parents
70 70 for parent in parents:
71 71 if parent >= minroot and parent not in seen:
72 72 visit.append(parent)
73 73 if not reachable:
74 74 return []
75 75 for rev in sorted(seen):
76 76 for parent in seen[rev]:
77 77 if parent in reachable:
78 78 reachable.add(rev)
79 79 return sorted(reachable)
80 80
81 81 elements = {
82 82 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
83 83 "~": (18, None, ("ancestor", 18)),
84 84 "^": (18, None, ("parent", 18), ("parentpost", 18)),
85 85 "-": (5, ("negate", 19), ("minus", 5)),
86 86 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
87 87 ("dagrangepost", 17)),
88 88 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
89 89 ("dagrangepost", 17)),
90 90 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
91 91 "not": (10, ("not", 10)),
92 92 "!": (10, ("not", 10)),
93 93 "and": (5, None, ("and", 5)),
94 94 "&": (5, None, ("and", 5)),
95 95 "or": (4, None, ("or", 4)),
96 96 "|": (4, None, ("or", 4)),
97 97 "+": (4, None, ("or", 4)),
98 98 ",": (2, None, ("list", 2)),
99 99 ")": (0, None, None),
100 100 "symbol": (0, ("symbol",), None),
101 101 "string": (0, ("string",), None),
102 102 "end": (0, None, None),
103 103 }
104 104
105 105 keywords = set(['and', 'or', 'not'])
106 106
107 107 def tokenize(program):
108 108 '''
109 109 Parse a revset statement into a stream of tokens
110 110
111 111 Check that @ is a valid unquoted token character (issue3686):
112 112 >>> list(tokenize("@::"))
113 113 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
114 114
115 115 '''
116 116
117 117 pos, l = 0, len(program)
118 118 while pos < l:
119 119 c = program[pos]
120 120 if c.isspace(): # skip inter-token whitespace
121 121 pass
122 122 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
123 123 yield ('::', None, pos)
124 124 pos += 1 # skip ahead
125 125 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
126 126 yield ('..', None, pos)
127 127 pos += 1 # skip ahead
128 128 elif c in "():,-|&+!~^": # handle simple operators
129 129 yield (c, None, pos)
130 130 elif (c in '"\'' or c == 'r' and
131 131 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
132 132 if c == 'r':
133 133 pos += 1
134 134 c = program[pos]
135 135 decode = lambda x: x
136 136 else:
137 137 decode = lambda x: x.decode('string-escape')
138 138 pos += 1
139 139 s = pos
140 140 while pos < l: # find closing quote
141 141 d = program[pos]
142 142 if d == '\\': # skip over escaped characters
143 143 pos += 2
144 144 continue
145 145 if d == c:
146 146 yield ('string', decode(program[s:pos]), s)
147 147 break
148 148 pos += 1
149 149 else:
150 150 raise error.ParseError(_("unterminated string"), s)
151 151 # gather up a symbol/keyword
152 152 elif c.isalnum() or c in '._@' or ord(c) > 127:
153 153 s = pos
154 154 pos += 1
155 155 while pos < l: # find end of symbol
156 156 d = program[pos]
157 157 if not (d.isalnum() or d in "._/@" or ord(d) > 127):
158 158 break
159 159 if d == '.' and program[pos - 1] == '.': # special case for ..
160 160 pos -= 1
161 161 break
162 162 pos += 1
163 163 sym = program[s:pos]
164 164 if sym in keywords: # operator keywords
165 165 yield (sym, None, s)
166 166 else:
167 167 yield ('symbol', sym, s)
168 168 pos -= 1
169 169 else:
170 170 raise error.ParseError(_("syntax error"), pos)
171 171 pos += 1
172 172 yield ('end', None, pos)
173 173
174 174 # helpers
175 175
176 176 def getstring(x, err):
177 177 if x and (x[0] == 'string' or x[0] == 'symbol'):
178 178 return x[1]
179 179 raise error.ParseError(err)
180 180
181 181 def getlist(x):
182 182 if not x:
183 183 return []
184 184 if x[0] == 'list':
185 185 return getlist(x[1]) + [x[2]]
186 186 return [x]
187 187
188 188 def getargs(x, min, max, err):
189 189 l = getlist(x)
190 190 if len(l) < min or (max >= 0 and len(l) > max):
191 191 raise error.ParseError(err)
192 192 return l
193 193
194 194 def getset(repo, subset, x):
195 195 if not x:
196 196 raise error.ParseError(_("missing argument"))
197 197 return methods[x[0]](repo, subset, *x[1:])
198 198
199 199 def _getrevsource(repo, r):
200 200 extra = repo[r].extra()
201 201 for label in ('source', 'transplant_source', 'rebase_source'):
202 202 if label in extra:
203 203 try:
204 204 return repo[extra[label]].rev()
205 205 except error.RepoLookupError:
206 206 pass
207 207 return None
208 208
209 209 # operator methods
210 210
211 211 def stringset(repo, subset, x):
212 212 x = repo[x].rev()
213 213 if x == -1 and len(subset) == len(repo):
214 214 return [-1]
215 215 if len(subset) == len(repo) or x in subset:
216 216 return [x]
217 217 return []
218 218
219 219 def symbolset(repo, subset, x):
220 220 if x in symbols:
221 221 raise error.ParseError(_("can't use %s here") % x)
222 222 return stringset(repo, subset, x)
223 223
224 224 def rangeset(repo, subset, x, y):
225 225 m = getset(repo, subset, x)
226 226 if not m:
227 227 m = getset(repo, list(repo), x)
228 228
229 229 n = getset(repo, subset, y)
230 230 if not n:
231 231 n = getset(repo, list(repo), y)
232 232
233 233 if not m or not n:
234 234 return []
235 235 m, n = m[0], n[-1]
236 236
237 237 if m < n:
238 238 r = range(m, n + 1)
239 239 else:
240 240 r = range(m, n - 1, -1)
241 241 s = set(subset)
242 242 return [x for x in r if x in s]
243 243
244 244 def dagrange(repo, subset, x, y):
245 245 if subset:
246 246 r = list(repo)
247 247 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
248 248 s = set(subset)
249 249 return [r for r in xs if r in s]
250 250 return []
251 251
252 252 def andset(repo, subset, x, y):
253 253 return getset(repo, getset(repo, subset, x), y)
254 254
255 255 def orset(repo, subset, x, y):
256 256 xl = getset(repo, subset, x)
257 257 s = set(xl)
258 258 yl = getset(repo, [r for r in subset if r not in s], y)
259 259 return xl + yl
260 260
261 261 def notset(repo, subset, x):
262 262 s = set(getset(repo, subset, x))
263 263 return [r for r in subset if r not in s]
264 264
265 265 def listset(repo, subset, a, b):
266 266 raise error.ParseError(_("can't use a list in this context"))
267 267
268 268 def func(repo, subset, a, b):
269 269 if a[0] == 'symbol' and a[1] in symbols:
270 270 return symbols[a[1]](repo, subset, b)
271 271 raise error.ParseError(_("not a function: %s") % a[1])
272 272
273 273 # functions
274 274
275 275 def adds(repo, subset, x):
276 276 """``adds(pattern)``
277 277 Changesets that add a file matching pattern.
278 278 """
279 279 # i18n: "adds" is a keyword
280 280 pat = getstring(x, _("adds requires a pattern"))
281 281 return checkstatus(repo, subset, pat, 1)
282 282
283 283 def ancestor(repo, subset, x):
284 284 """``ancestor(single, single)``
285 285 Greatest common ancestor of the two changesets.
286 286 """
287 287 # i18n: "ancestor" is a keyword
288 288 l = getargs(x, 2, 2, _("ancestor requires two arguments"))
289 289 r = list(repo)
290 290 a = getset(repo, r, l[0])
291 291 b = getset(repo, r, l[1])
292 292 if len(a) != 1 or len(b) != 1:
293 293 # i18n: "ancestor" is a keyword
294 294 raise error.ParseError(_("ancestor arguments must be single revisions"))
295 295 an = [repo[a[0]].ancestor(repo[b[0]]).rev()]
296 296
297 297 return [r for r in an if r in subset]
298 298
299 299 def _ancestors(repo, subset, x, followfirst=False):
300 300 args = getset(repo, list(repo), x)
301 301 if not args:
302 302 return []
303 303 s = set(_revancestors(repo, args, followfirst)) | set(args)
304 304 return [r for r in subset if r in s]
305 305
306 306 def ancestors(repo, subset, x):
307 307 """``ancestors(set)``
308 308 Changesets that are ancestors of a changeset in set.
309 309 """
310 310 return _ancestors(repo, subset, x)
311 311
312 312 def _firstancestors(repo, subset, x):
313 313 # ``_firstancestors(set)``
314 314 # Like ``ancestors(set)`` but follows only the first parents.
315 315 return _ancestors(repo, subset, x, followfirst=True)
316 316
317 317 def ancestorspec(repo, subset, x, n):
318 318 """``set~n``
319 319 Changesets that are the Nth ancestor (first parents only) of a changeset
320 320 in set.
321 321 """
322 322 try:
323 323 n = int(n[1])
324 324 except (TypeError, ValueError):
325 325 raise error.ParseError(_("~ expects a number"))
326 326 ps = set()
327 327 cl = repo.changelog
328 328 for r in getset(repo, subset, x):
329 329 for i in range(n):
330 330 r = cl.parentrevs(r)[0]
331 331 ps.add(r)
332 332 return [r for r in subset if r in ps]
333 333
334 334 def author(repo, subset, x):
335 335 """``author(string)``
336 336 Alias for ``user(string)``.
337 337 """
338 338 # i18n: "author" is a keyword
339 339 n = encoding.lower(getstring(x, _("author requires a string")))
340 340 kind, pattern, matcher = _substringmatcher(n)
341 341 return [r for r in subset if matcher(encoding.lower(repo[r].user()))]
342 342
343 343 def bisect(repo, subset, x):
344 344 """``bisect(string)``
345 345 Changesets marked in the specified bisect status:
346 346
347 347 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
348 348 - ``goods``, ``bads`` : csets topologically good/bad
349 349 - ``range`` : csets taking part in the bisection
350 350 - ``pruned`` : csets that are goods, bads or skipped
351 351 - ``untested`` : csets whose fate is yet unknown
352 352 - ``ignored`` : csets ignored due to DAG topology
353 353 - ``current`` : the cset currently being bisected
354 354 """
355 355 # i18n: "bisect" is a keyword
356 356 status = getstring(x, _("bisect requires a string")).lower()
357 357 state = set(hbisect.get(repo, status))
358 358 return [r for r in subset if r in state]
359 359
360 360 # Backward-compatibility
361 361 # - no help entry so that we do not advertise it any more
362 362 def bisected(repo, subset, x):
363 363 return bisect(repo, subset, x)
364 364
365 365 def bookmark(repo, subset, x):
366 366 """``bookmark([name])``
367 367 The named bookmark or all bookmarks.
368 368
369 369 If `name` starts with `re:`, the remainder of the name is treated as
370 370 a regular expression. To match a bookmark that actually starts with `re:`,
371 371 use the prefix `literal:`.
372 372 """
373 373 # i18n: "bookmark" is a keyword
374 374 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
375 375 if args:
376 376 bm = getstring(args[0],
377 377 # i18n: "bookmark" is a keyword
378 378 _('the argument to bookmark must be a string'))
379 379 kind, pattern, matcher = _stringmatcher(bm)
380 380 if kind == 'literal':
381 381 bmrev = bookmarksmod.listbookmarks(repo).get(bm, None)
382 382 if not bmrev:
383 383 raise util.Abort(_("bookmark '%s' does not exist") % bm)
384 384 bmrev = repo[bmrev].rev()
385 385 return [r for r in subset if r == bmrev]
386 386 else:
387 387 matchrevs = set()
388 388 for name, bmrev in bookmarksmod.listbookmarks(repo).iteritems():
389 389 if matcher(name):
390 390 matchrevs.add(bmrev)
391 391 if not matchrevs:
392 392 raise util.Abort(_("no bookmarks exist that match '%s'")
393 393 % pattern)
394 394 bmrevs = set()
395 395 for bmrev in matchrevs:
396 396 bmrevs.add(repo[bmrev].rev())
397 397 return [r for r in subset if r in bmrevs]
398 398
399 399 bms = set([repo[r].rev()
400 400 for r in bookmarksmod.listbookmarks(repo).values()])
401 401 return [r for r in subset if r in bms]
402 402
403 403 def branch(repo, subset, x):
404 404 """``branch(string or set)``
405 405 All changesets belonging to the given branch or the branches of the given
406 406 changesets.
407 407
408 408 If `string` starts with `re:`, the remainder of the name is treated as
409 409 a regular expression. To match a branch that actually starts with `re:`,
410 410 use the prefix `literal:`.
411 411 """
412 412 try:
413 413 b = getstring(x, '')
414 414 except error.ParseError:
415 415 # not a string, but another revspec, e.g. tip()
416 416 pass
417 417 else:
418 418 kind, pattern, matcher = _stringmatcher(b)
419 419 if kind == 'literal':
420 420 # note: falls through to the revspec case if no branch with
421 421 # this name exists
422 422 if pattern in repo.branchmap():
423 423 return [r for r in subset if matcher(repo[r].branch())]
424 424 else:
425 425 return [r for r in subset if matcher(repo[r].branch())]
426 426
427 427 s = getset(repo, list(repo), x)
428 428 b = set()
429 429 for r in s:
430 430 b.add(repo[r].branch())
431 431 s = set(s)
432 432 return [r for r in subset if r in s or repo[r].branch() in b]
433 433
434 434 def bumped(repo, subset, x):
435 435 """``bumped()``
436 436 Mutable changesets marked as successors of public changesets.
437 437
438 438 Only non-public and non-obsolete changesets can be `bumped`.
439 439 """
440 440 # i18n: "bumped" is a keyword
441 441 getargs(x, 0, 0, _("bumped takes no arguments"))
442 442 bumped = obsmod.getrevs(repo, 'bumped')
443 443 return [r for r in subset if r in bumped]
444 444
445 445 def bundle(repo, subset, x):
446 446 """``bundle()``
447 447 Changesets in the bundle.
448 448
449 449 Bundle must be specified by the -R option."""
450 450
451 451 try:
452 452 bundlenodes = repo.changelog.bundlenodes
453 453 except AttributeError:
454 454 raise util.Abort(_("no bundle provided - specify with -R"))
455 455 revs = set(repo[n].rev() for n in bundlenodes)
456 456 return [r for r in subset if r in revs]
457 457
458 458 def checkstatus(repo, subset, pat, field):
459 459 m = None
460 460 s = []
461 461 hasset = matchmod.patkind(pat) == 'set'
462 462 fname = None
463 463 for r in subset:
464 464 c = repo[r]
465 465 if not m or hasset:
466 466 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
467 467 if not m.anypats() and len(m.files()) == 1:
468 468 fname = m.files()[0]
469 469 if fname is not None:
470 470 if fname not in c.files():
471 471 continue
472 472 else:
473 473 for f in c.files():
474 474 if m(f):
475 475 break
476 476 else:
477 477 continue
478 478 files = repo.status(c.p1().node(), c.node())[field]
479 479 if fname is not None:
480 480 if fname in files:
481 481 s.append(r)
482 482 else:
483 483 for f in files:
484 484 if m(f):
485 485 s.append(r)
486 486 break
487 487 return s
488 488
489 489 def _children(repo, narrow, parentset):
490 490 cs = set()
491 491 pr = repo.changelog.parentrevs
492 492 for r in narrow:
493 493 for p in pr(r):
494 494 if p in parentset:
495 495 cs.add(r)
496 496 return cs
497 497
498 498 def children(repo, subset, x):
499 499 """``children(set)``
500 500 Child changesets of changesets in set.
501 501 """
502 502 s = set(getset(repo, list(repo), x))
503 503 cs = _children(repo, subset, s)
504 504 return [r for r in subset if r in cs]
505 505
506 506 def closed(repo, subset, x):
507 507 """``closed()``
508 508 Changeset is closed.
509 509 """
510 510 # i18n: "closed" is a keyword
511 511 getargs(x, 0, 0, _("closed takes no arguments"))
512 512 return [r for r in subset if repo[r].closesbranch()]
513 513
514 514 def contains(repo, subset, x):
515 515 """``contains(pattern)``
516 516 Revision contains a file matching pattern. See :hg:`help patterns`
517 517 for information about file patterns.
518 518 """
519 519 # i18n: "contains" is a keyword
520 520 pat = getstring(x, _("contains requires a pattern"))
521 521 m = None
522 522 s = []
523 523 if not matchmod.patkind(pat):
524 524 for r in subset:
525 525 if pat in repo[r]:
526 526 s.append(r)
527 527 else:
528 528 for r in subset:
529 529 c = repo[r]
530 530 if not m or matchmod.patkind(pat) == 'set':
531 531 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
532 532 for f in c.manifest():
533 533 if m(f):
534 534 s.append(r)
535 535 break
536 536 return s
537 537
538 538 def converted(repo, subset, x):
539 539 """``converted([id])``
540 540 Changesets converted from the given identifier in the old repository if
541 541 present, or all converted changesets if no identifier is specified.
542 542 """
543 543
544 544 # There is exactly no chance of resolving the revision, so do a simple
545 545 # string compare and hope for the best
546 546
547 547 rev = None
548 548 # i18n: "converted" is a keyword
549 549 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
550 550 if l:
551 551 # i18n: "converted" is a keyword
552 552 rev = getstring(l[0], _('converted requires a revision'))
553 553
554 554 def _matchvalue(r):
555 555 source = repo[r].extra().get('convert_revision', None)
556 556 return source is not None and (rev is None or source.startswith(rev))
557 557
558 558 return [r for r in subset if _matchvalue(r)]
559 559
560 560 def date(repo, subset, x):
561 561 """``date(interval)``
562 562 Changesets within the interval, see :hg:`help dates`.
563 563 """
564 564 # i18n: "date" is a keyword
565 565 ds = getstring(x, _("date requires a string"))
566 566 dm = util.matchdate(ds)
567 567 return [r for r in subset if dm(repo[r].date()[0])]
568 568
569 569 def desc(repo, subset, x):
570 570 """``desc(string)``
571 571 Search commit message for string. The match is case-insensitive.
572 572 """
573 573 # i18n: "desc" is a keyword
574 574 ds = encoding.lower(getstring(x, _("desc requires a string")))
575 575 l = []
576 576 for r in subset:
577 577 c = repo[r]
578 578 if ds in encoding.lower(c.description()):
579 579 l.append(r)
580 580 return l
581 581
582 582 def _descendants(repo, subset, x, followfirst=False):
583 583 args = getset(repo, list(repo), x)
584 584 if not args:
585 585 return []
586 586 s = set(_revdescendants(repo, args, followfirst)) | set(args)
587
588 if len(subset) == len(repo):
589 # the passed in revisions may not exist, -1 for example
590 for arg in args:
591 if arg not in subset:
592 s.remove(arg)
593 return list(s)
594
595 587 return [r for r in subset if r in s]
596 588
597 589 def descendants(repo, subset, x):
598 590 """``descendants(set)``
599 591 Changesets which are descendants of changesets in set.
600 592 """
601 593 return _descendants(repo, subset, x)
602 594
603 595 def _firstdescendants(repo, subset, x):
604 596 # ``_firstdescendants(set)``
605 597 # Like ``descendants(set)`` but follows only the first parents.
606 598 return _descendants(repo, subset, x, followfirst=True)
607 599
608 600 def destination(repo, subset, x):
609 601 """``destination([set])``
610 602 Changesets that were created by a graft, transplant or rebase operation,
611 603 with the given revisions specified as the source. Omitting the optional set
612 604 is the same as passing all().
613 605 """
614 606 if x is not None:
615 607 args = set(getset(repo, list(repo), x))
616 608 else:
617 609 args = set(getall(repo, list(repo), x))
618 610
619 611 dests = set()
620 612
621 613 # subset contains all of the possible destinations that can be returned, so
622 614 # iterate over them and see if their source(s) were provided in the args.
623 615 # Even if the immediate src of r is not in the args, src's source (or
624 616 # further back) may be. Scanning back further than the immediate src allows
625 617 # transitive transplants and rebases to yield the same results as transitive
626 618 # grafts.
627 619 for r in subset:
628 620 src = _getrevsource(repo, r)
629 621 lineage = None
630 622
631 623 while src is not None:
632 624 if lineage is None:
633 625 lineage = list()
634 626
635 627 lineage.append(r)
636 628
637 629 # The visited lineage is a match if the current source is in the arg
638 630 # set. Since every candidate dest is visited by way of iterating
639 631 # subset, any dests further back in the lineage will be tested by a
640 632 # different iteration over subset. Likewise, if the src was already
641 633 # selected, the current lineage can be selected without going back
642 634 # further.
643 635 if src in args or src in dests:
644 636 dests.update(lineage)
645 637 break
646 638
647 639 r = src
648 640 src = _getrevsource(repo, r)
649 641
650 642 return [r for r in subset if r in dests]
651 643
652 644 def draft(repo, subset, x):
653 645 """``draft()``
654 646 Changeset in draft phase."""
655 647 # i18n: "draft" is a keyword
656 648 getargs(x, 0, 0, _("draft takes no arguments"))
657 649 pc = repo._phasecache
658 650 return [r for r in subset if pc.phase(repo, r) == phases.draft]
659 651
660 652 def extinct(repo, subset, x):
661 653 """``extinct()``
662 654 Obsolete changesets with obsolete descendants only.
663 655 """
664 656 # i18n: "extinct" is a keyword
665 657 getargs(x, 0, 0, _("extinct takes no arguments"))
666 658 extincts = obsmod.getrevs(repo, 'extinct')
667 659 return [r for r in subset if r in extincts]
668 660
669 661 def extra(repo, subset, x):
670 662 """``extra(label, [value])``
671 663 Changesets with the given label in the extra metadata, with the given
672 664 optional value.
673 665
674 666 If `value` starts with `re:`, the remainder of the value is treated as
675 667 a regular expression. To match a value that actually starts with `re:`,
676 668 use the prefix `literal:`.
677 669 """
678 670
679 671 # i18n: "extra" is a keyword
680 672 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
681 673 # i18n: "extra" is a keyword
682 674 label = getstring(l[0], _('first argument to extra must be a string'))
683 675 value = None
684 676
685 677 if len(l) > 1:
686 678 # i18n: "extra" is a keyword
687 679 value = getstring(l[1], _('second argument to extra must be a string'))
688 680 kind, value, matcher = _stringmatcher(value)
689 681
690 682 def _matchvalue(r):
691 683 extra = repo[r].extra()
692 684 return label in extra and (value is None or matcher(extra[label]))
693 685
694 686 return [r for r in subset if _matchvalue(r)]
695 687
696 688 def filelog(repo, subset, x):
697 689 """``filelog(pattern)``
698 690 Changesets connected to the specified filelog.
699 691
700 692 For performance reasons, ``filelog()`` does not show every changeset
701 693 that affects the requested file(s). See :hg:`help log` for details. For
702 694 a slower, more accurate result, use ``file()``.
703 695 """
704 696
705 697 # i18n: "filelog" is a keyword
706 698 pat = getstring(x, _("filelog requires a pattern"))
707 699 m = matchmod.match(repo.root, repo.getcwd(), [pat], default='relpath',
708 700 ctx=repo[None])
709 701 s = set()
710 702
711 703 if not matchmod.patkind(pat):
712 704 for f in m.files():
713 705 fl = repo.file(f)
714 706 for fr in fl:
715 707 s.add(fl.linkrev(fr))
716 708 else:
717 709 for f in repo[None]:
718 710 if m(f):
719 711 fl = repo.file(f)
720 712 for fr in fl:
721 713 s.add(fl.linkrev(fr))
722 714
723 715 return [r for r in subset if r in s]
724 716
725 717 def first(repo, subset, x):
726 718 """``first(set, [n])``
727 719 An alias for limit().
728 720 """
729 721 return limit(repo, subset, x)
730 722
731 723 def _follow(repo, subset, x, name, followfirst=False):
732 724 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
733 725 c = repo['.']
734 726 if l:
735 727 x = getstring(l[0], _("%s expected a filename") % name)
736 728 if x in c:
737 729 cx = c[x]
738 730 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
739 731 # include the revision responsible for the most recent version
740 732 s.add(cx.linkrev())
741 733 else:
742 734 return []
743 735 else:
744 736 s = set(_revancestors(repo, [c.rev()], followfirst)) | set([c.rev()])
745 737
746 738 return [r for r in subset if r in s]
747 739
748 740 def follow(repo, subset, x):
749 741 """``follow([file])``
750 742 An alias for ``::.`` (ancestors of the working copy's first parent).
751 743 If a filename is specified, the history of the given file is followed,
752 744 including copies.
753 745 """
754 746 return _follow(repo, subset, x, 'follow')
755 747
756 748 def _followfirst(repo, subset, x):
757 749 # ``followfirst([file])``
758 750 # Like ``follow([file])`` but follows only the first parent of
759 751 # every revision or file revision.
760 752 return _follow(repo, subset, x, '_followfirst', followfirst=True)
761 753
762 754 def getall(repo, subset, x):
763 755 """``all()``
764 756 All changesets, the same as ``0:tip``.
765 757 """
766 758 # i18n: "all" is a keyword
767 759 getargs(x, 0, 0, _("all takes no arguments"))
768 760 return subset
769 761
770 762 def grep(repo, subset, x):
771 763 """``grep(regex)``
772 764 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
773 765 to ensure special escape characters are handled correctly. Unlike
774 766 ``keyword(string)``, the match is case-sensitive.
775 767 """
776 768 try:
777 769 # i18n: "grep" is a keyword
778 770 gr = re.compile(getstring(x, _("grep requires a string")))
779 771 except re.error, e:
780 772 raise error.ParseError(_('invalid match pattern: %s') % e)
781 773 l = []
782 774 for r in subset:
783 775 c = repo[r]
784 776 for e in c.files() + [c.user(), c.description()]:
785 777 if gr.search(e):
786 778 l.append(r)
787 779 break
788 780 return l
789 781
790 782 def _matchfiles(repo, subset, x):
791 783 # _matchfiles takes a revset list of prefixed arguments:
792 784 #
793 785 # [p:foo, i:bar, x:baz]
794 786 #
795 787 # builds a match object from them and filters subset. Allowed
796 788 # prefixes are 'p:' for regular patterns, 'i:' for include
797 789 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
798 790 # a revision identifier, or the empty string to reference the
799 791 # working directory, from which the match object is
800 792 # initialized. Use 'd:' to set the default matching mode, default
801 793 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
802 794
803 795 # i18n: "_matchfiles" is a keyword
804 796 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
805 797 pats, inc, exc = [], [], []
806 798 hasset = False
807 799 rev, default = None, None
808 800 for arg in l:
809 801 # i18n: "_matchfiles" is a keyword
810 802 s = getstring(arg, _("_matchfiles requires string arguments"))
811 803 prefix, value = s[:2], s[2:]
812 804 if prefix == 'p:':
813 805 pats.append(value)
814 806 elif prefix == 'i:':
815 807 inc.append(value)
816 808 elif prefix == 'x:':
817 809 exc.append(value)
818 810 elif prefix == 'r:':
819 811 if rev is not None:
820 812 # i18n: "_matchfiles" is a keyword
821 813 raise error.ParseError(_('_matchfiles expected at most one '
822 814 'revision'))
823 815 rev = value
824 816 elif prefix == 'd:':
825 817 if default is not None:
826 818 # i18n: "_matchfiles" is a keyword
827 819 raise error.ParseError(_('_matchfiles expected at most one '
828 820 'default mode'))
829 821 default = value
830 822 else:
831 823 # i18n: "_matchfiles" is a keyword
832 824 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
833 825 if not hasset and matchmod.patkind(value) == 'set':
834 826 hasset = True
835 827 if not default:
836 828 default = 'glob'
837 829 m = None
838 830 s = []
839 831 for r in subset:
840 832 c = repo[r]
841 833 if not m or (hasset and rev is None):
842 834 ctx = c
843 835 if rev is not None:
844 836 ctx = repo[rev or None]
845 837 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
846 838 exclude=exc, ctx=ctx, default=default)
847 839 for f in c.files():
848 840 if m(f):
849 841 s.append(r)
850 842 break
851 843 return s
852 844
853 845 def hasfile(repo, subset, x):
854 846 """``file(pattern)``
855 847 Changesets affecting files matched by pattern.
856 848
857 849 For a faster but less accurate result, consider using ``filelog()``
858 850 instead.
859 851 """
860 852 # i18n: "file" is a keyword
861 853 pat = getstring(x, _("file requires a pattern"))
862 854 return _matchfiles(repo, subset, ('string', 'p:' + pat))
863 855
864 856 def head(repo, subset, x):
865 857 """``head()``
866 858 Changeset is a named branch head.
867 859 """
868 860 # i18n: "head" is a keyword
869 861 getargs(x, 0, 0, _("head takes no arguments"))
870 862 hs = set()
871 863 for b, ls in repo.branchmap().iteritems():
872 864 hs.update(repo[h].rev() for h in ls)
873 865 return [r for r in subset if r in hs]
874 866
875 867 def heads(repo, subset, x):
876 868 """``heads(set)``
877 869 Members of set with no children in set.
878 870 """
879 871 s = getset(repo, subset, x)
880 872 ps = set(parents(repo, subset, x))
881 873 return [r for r in s if r not in ps]
882 874
883 875 def hidden(repo, subset, x):
884 876 """``hidden()``
885 877 Hidden changesets.
886 878 """
887 879 # i18n: "hidden" is a keyword
888 880 getargs(x, 0, 0, _("hidden takes no arguments"))
889 881 return [r for r in subset if r in repo.hiddenrevs]
890 882
891 883 def keyword(repo, subset, x):
892 884 """``keyword(string)``
893 885 Search commit message, user name, and names of changed files for
894 886 string. The match is case-insensitive.
895 887 """
896 888 # i18n: "keyword" is a keyword
897 889 kw = encoding.lower(getstring(x, _("keyword requires a string")))
898 890 l = []
899 891 for r in subset:
900 892 c = repo[r]
901 893 t = " ".join(c.files() + [c.user(), c.description()])
902 894 if kw in encoding.lower(t):
903 895 l.append(r)
904 896 return l
905 897
906 898 def limit(repo, subset, x):
907 899 """``limit(set, [n])``
908 900 First n members of set, defaulting to 1.
909 901 """
910 902 # i18n: "limit" is a keyword
911 903 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
912 904 try:
913 905 lim = 1
914 906 if len(l) == 2:
915 907 # i18n: "limit" is a keyword
916 908 lim = int(getstring(l[1], _("limit requires a number")))
917 909 except (TypeError, ValueError):
918 910 # i18n: "limit" is a keyword
919 911 raise error.ParseError(_("limit expects a number"))
920 912 ss = set(subset)
921 913 os = getset(repo, list(repo), l[0])[:lim]
922 914 return [r for r in os if r in ss]
923 915
924 916 def last(repo, subset, x):
925 917 """``last(set, [n])``
926 918 Last n members of set, defaulting to 1.
927 919 """
928 920 # i18n: "last" is a keyword
929 921 l = getargs(x, 1, 2, _("last requires one or two arguments"))
930 922 try:
931 923 lim = 1
932 924 if len(l) == 2:
933 925 # i18n: "last" is a keyword
934 926 lim = int(getstring(l[1], _("last requires a number")))
935 927 except (TypeError, ValueError):
936 928 # i18n: "last" is a keyword
937 929 raise error.ParseError(_("last expects a number"))
938 930 ss = set(subset)
939 931 os = getset(repo, list(repo), l[0])[-lim:]
940 932 return [r for r in os if r in ss]
941 933
942 934 def maxrev(repo, subset, x):
943 935 """``max(set)``
944 936 Changeset with highest revision number in set.
945 937 """
946 938 os = getset(repo, list(repo), x)
947 939 if os:
948 940 m = max(os)
949 941 if m in subset:
950 942 return [m]
951 943 return []
952 944
953 945 def merge(repo, subset, x):
954 946 """``merge()``
955 947 Changeset is a merge changeset.
956 948 """
957 949 # i18n: "merge" is a keyword
958 950 getargs(x, 0, 0, _("merge takes no arguments"))
959 951 cl = repo.changelog
960 952 return [r for r in subset if cl.parentrevs(r)[1] != -1]
961 953
962 954 def branchpoint(repo, subset, x):
963 955 """``branchpoint()``
964 956 Changesets with more than one child.
965 957 """
966 958 # i18n: "branchpoint" is a keyword
967 959 getargs(x, 0, 0, _("branchpoint takes no arguments"))
968 960 cl = repo.changelog
969 961 if not subset:
970 962 return []
971 963 baserev = min(subset)
972 964 parentscount = [0]*(len(repo) - baserev)
973 965 for r in cl.revs(start=baserev + 1):
974 966 for p in cl.parentrevs(r):
975 967 if p >= baserev:
976 968 parentscount[p - baserev] += 1
977 969 return [r for r in subset if (parentscount[r - baserev] > 1)]
978 970
979 971 def minrev(repo, subset, x):
980 972 """``min(set)``
981 973 Changeset with lowest revision number in set.
982 974 """
983 975 os = getset(repo, list(repo), x)
984 976 if os:
985 977 m = min(os)
986 978 if m in subset:
987 979 return [m]
988 980 return []
989 981
990 982 def modifies(repo, subset, x):
991 983 """``modifies(pattern)``
992 984 Changesets modifying files matched by pattern.
993 985 """
994 986 # i18n: "modifies" is a keyword
995 987 pat = getstring(x, _("modifies requires a pattern"))
996 988 return checkstatus(repo, subset, pat, 0)
997 989
998 990 def node_(repo, subset, x):
999 991 """``id(string)``
1000 992 Revision non-ambiguously specified by the given hex string prefix.
1001 993 """
1002 994 # i18n: "id" is a keyword
1003 995 l = getargs(x, 1, 1, _("id requires one argument"))
1004 996 # i18n: "id" is a keyword
1005 997 n = getstring(l[0], _("id requires a string"))
1006 998 if len(n) == 40:
1007 999 rn = repo[n].rev()
1008 1000 else:
1009 1001 rn = None
1010 1002 pm = repo.changelog._partialmatch(n)
1011 1003 if pm is not None:
1012 1004 rn = repo.changelog.rev(pm)
1013 1005
1014 1006 return [r for r in subset if r == rn]
1015 1007
1016 1008 def obsolete(repo, subset, x):
1017 1009 """``obsolete()``
1018 1010 Mutable changeset with a newer version."""
1019 1011 # i18n: "obsolete" is a keyword
1020 1012 getargs(x, 0, 0, _("obsolete takes no arguments"))
1021 1013 obsoletes = obsmod.getrevs(repo, 'obsolete')
1022 1014 return [r for r in subset if r in obsoletes]
1023 1015
1024 1016 def origin(repo, subset, x):
1025 1017 """``origin([set])``
1026 1018 Changesets that were specified as a source for the grafts, transplants or
1027 1019 rebases that created the given revisions. Omitting the optional set is the
1028 1020 same as passing all(). If a changeset created by these operations is itself
1029 1021 specified as a source for one of these operations, only the source changeset
1030 1022 for the first operation is selected.
1031 1023 """
1032 1024 if x is not None:
1033 1025 args = set(getset(repo, list(repo), x))
1034 1026 else:
1035 1027 args = set(getall(repo, list(repo), x))
1036 1028
1037 1029 def _firstsrc(rev):
1038 1030 src = _getrevsource(repo, rev)
1039 1031 if src is None:
1040 1032 return None
1041 1033
1042 1034 while True:
1043 1035 prev = _getrevsource(repo, src)
1044 1036
1045 1037 if prev is None:
1046 1038 return src
1047 1039 src = prev
1048 1040
1049 1041 o = set([_firstsrc(r) for r in args])
1050 1042 return [r for r in subset if r in o]
1051 1043
1052 1044 def outgoing(repo, subset, x):
1053 1045 """``outgoing([path])``
1054 1046 Changesets not found in the specified destination repository, or the
1055 1047 default push location.
1056 1048 """
1057 1049 import hg # avoid start-up nasties
1058 1050 # i18n: "outgoing" is a keyword
1059 1051 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1060 1052 # i18n: "outgoing" is a keyword
1061 1053 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1062 1054 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1063 1055 dest, branches = hg.parseurl(dest)
1064 1056 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1065 1057 if revs:
1066 1058 revs = [repo.lookup(rev) for rev in revs]
1067 1059 other = hg.peer(repo, {}, dest)
1068 1060 repo.ui.pushbuffer()
1069 1061 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1070 1062 repo.ui.popbuffer()
1071 1063 cl = repo.changelog
1072 1064 o = set([cl.rev(r) for r in outgoing.missing])
1073 1065 return [r for r in subset if r in o]
1074 1066
1075 1067 def p1(repo, subset, x):
1076 1068 """``p1([set])``
1077 1069 First parent of changesets in set, or the working directory.
1078 1070 """
1079 1071 if x is None:
1080 1072 p = repo[x].p1().rev()
1081 1073 return [r for r in subset if r == p]
1082 1074
1083 1075 ps = set()
1084 1076 cl = repo.changelog
1085 1077 for r in getset(repo, list(repo), x):
1086 1078 ps.add(cl.parentrevs(r)[0])
1087 1079 return [r for r in subset if r in ps]
1088 1080
1089 1081 def p2(repo, subset, x):
1090 1082 """``p2([set])``
1091 1083 Second parent of changesets in set, or the working directory.
1092 1084 """
1093 1085 if x is None:
1094 1086 ps = repo[x].parents()
1095 1087 try:
1096 1088 p = ps[1].rev()
1097 1089 return [r for r in subset if r == p]
1098 1090 except IndexError:
1099 1091 return []
1100 1092
1101 1093 ps = set()
1102 1094 cl = repo.changelog
1103 1095 for r in getset(repo, list(repo), x):
1104 1096 ps.add(cl.parentrevs(r)[1])
1105 1097 return [r for r in subset if r in ps]
1106 1098
1107 1099 def parents(repo, subset, x):
1108 1100 """``parents([set])``
1109 1101 The set of all parents for all changesets in set, or the working directory.
1110 1102 """
1111 1103 if x is None:
1112 1104 ps = tuple(p.rev() for p in repo[x].parents())
1113 1105 return [r for r in subset if r in ps]
1114 1106
1115 1107 ps = set()
1116 1108 cl = repo.changelog
1117 1109 for r in getset(repo, list(repo), x):
1118 1110 ps.update(cl.parentrevs(r))
1119 1111 return [r for r in subset if r in ps]
1120 1112
1121 1113 def parentspec(repo, subset, x, n):
1122 1114 """``set^0``
1123 1115 The set.
1124 1116 ``set^1`` (or ``set^``), ``set^2``
1125 1117 First or second parent, respectively, of all changesets in set.
1126 1118 """
1127 1119 try:
1128 1120 n = int(n[1])
1129 1121 if n not in (0, 1, 2):
1130 1122 raise ValueError
1131 1123 except (TypeError, ValueError):
1132 1124 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1133 1125 ps = set()
1134 1126 cl = repo.changelog
1135 1127 for r in getset(repo, subset, x):
1136 1128 if n == 0:
1137 1129 ps.add(r)
1138 1130 elif n == 1:
1139 1131 ps.add(cl.parentrevs(r)[0])
1140 1132 elif n == 2:
1141 1133 parents = cl.parentrevs(r)
1142 1134 if len(parents) > 1:
1143 1135 ps.add(parents[1])
1144 1136 return [r for r in subset if r in ps]
1145 1137
1146 1138 def present(repo, subset, x):
1147 1139 """``present(set)``
1148 1140 An empty set, if any revision in set isn't found; otherwise,
1149 1141 all revisions in set.
1150 1142
1151 1143 If any of specified revisions is not present in the local repository,
1152 1144 the query is normally aborted. But this predicate allows the query
1153 1145 to continue even in such cases.
1154 1146 """
1155 1147 try:
1156 1148 return getset(repo, subset, x)
1157 1149 except error.RepoLookupError:
1158 1150 return []
1159 1151
1160 1152 def public(repo, subset, x):
1161 1153 """``public()``
1162 1154 Changeset in public phase."""
1163 1155 # i18n: "public" is a keyword
1164 1156 getargs(x, 0, 0, _("public takes no arguments"))
1165 1157 pc = repo._phasecache
1166 1158 return [r for r in subset if pc.phase(repo, r) == phases.public]
1167 1159
1168 1160 def remote(repo, subset, x):
1169 1161 """``remote([id [,path]])``
1170 1162 Local revision that corresponds to the given identifier in a
1171 1163 remote repository, if present. Here, the '.' identifier is a
1172 1164 synonym for the current local branch.
1173 1165 """
1174 1166
1175 1167 import hg # avoid start-up nasties
1176 1168 # i18n: "remote" is a keyword
1177 1169 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1178 1170
1179 1171 q = '.'
1180 1172 if len(l) > 0:
1181 1173 # i18n: "remote" is a keyword
1182 1174 q = getstring(l[0], _("remote requires a string id"))
1183 1175 if q == '.':
1184 1176 q = repo['.'].branch()
1185 1177
1186 1178 dest = ''
1187 1179 if len(l) > 1:
1188 1180 # i18n: "remote" is a keyword
1189 1181 dest = getstring(l[1], _("remote requires a repository path"))
1190 1182 dest = repo.ui.expandpath(dest or 'default')
1191 1183 dest, branches = hg.parseurl(dest)
1192 1184 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1193 1185 if revs:
1194 1186 revs = [repo.lookup(rev) for rev in revs]
1195 1187 other = hg.peer(repo, {}, dest)
1196 1188 n = other.lookup(q)
1197 1189 if n in repo:
1198 1190 r = repo[n].rev()
1199 1191 if r in subset:
1200 1192 return [r]
1201 1193 return []
1202 1194
1203 1195 def removes(repo, subset, x):
1204 1196 """``removes(pattern)``
1205 1197 Changesets which remove files matching pattern.
1206 1198 """
1207 1199 # i18n: "removes" is a keyword
1208 1200 pat = getstring(x, _("removes requires a pattern"))
1209 1201 return checkstatus(repo, subset, pat, 2)
1210 1202
1211 1203 def rev(repo, subset, x):
1212 1204 """``rev(number)``
1213 1205 Revision with the given numeric identifier.
1214 1206 """
1215 1207 # i18n: "rev" is a keyword
1216 1208 l = getargs(x, 1, 1, _("rev requires one argument"))
1217 1209 try:
1218 1210 # i18n: "rev" is a keyword
1219 1211 l = int(getstring(l[0], _("rev requires a number")))
1220 1212 except (TypeError, ValueError):
1221 1213 # i18n: "rev" is a keyword
1222 1214 raise error.ParseError(_("rev expects a number"))
1223 1215 return [r for r in subset if r == l]
1224 1216
1225 1217 def matching(repo, subset, x):
1226 1218 """``matching(revision [, field])``
1227 1219 Changesets in which a given set of fields match the set of fields in the
1228 1220 selected revision or set.
1229 1221
1230 1222 To match more than one field pass the list of fields to match separated
1231 1223 by spaces (e.g. ``author description``).
1232 1224
1233 1225 Valid fields are most regular revision fields and some special fields.
1234 1226
1235 1227 Regular revision fields are ``description``, ``author``, ``branch``,
1236 1228 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1237 1229 and ``diff``.
1238 1230 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1239 1231 contents of the revision. Two revisions matching their ``diff`` will
1240 1232 also match their ``files``.
1241 1233
1242 1234 Special fields are ``summary`` and ``metadata``:
1243 1235 ``summary`` matches the first line of the description.
1244 1236 ``metadata`` is equivalent to matching ``description user date``
1245 1237 (i.e. it matches the main metadata fields).
1246 1238
1247 1239 ``metadata`` is the default field which is used when no fields are
1248 1240 specified. You can match more than one field at a time.
1249 1241 """
1250 1242 # i18n: "matching" is a keyword
1251 1243 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1252 1244
1253 1245 revs = getset(repo, repo.changelog, l[0])
1254 1246
1255 1247 fieldlist = ['metadata']
1256 1248 if len(l) > 1:
1257 1249 fieldlist = getstring(l[1],
1258 1250 # i18n: "matching" is a keyword
1259 1251 _("matching requires a string "
1260 1252 "as its second argument")).split()
1261 1253
1262 1254 # Make sure that there are no repeated fields,
1263 1255 # expand the 'special' 'metadata' field type
1264 1256 # and check the 'files' whenever we check the 'diff'
1265 1257 fields = []
1266 1258 for field in fieldlist:
1267 1259 if field == 'metadata':
1268 1260 fields += ['user', 'description', 'date']
1269 1261 elif field == 'diff':
1270 1262 # a revision matching the diff must also match the files
1271 1263 # since matching the diff is very costly, make sure to
1272 1264 # also match the files first
1273 1265 fields += ['files', 'diff']
1274 1266 else:
1275 1267 if field == 'author':
1276 1268 field = 'user'
1277 1269 fields.append(field)
1278 1270 fields = set(fields)
1279 1271 if 'summary' in fields and 'description' in fields:
1280 1272 # If a revision matches its description it also matches its summary
1281 1273 fields.discard('summary')
1282 1274
1283 1275 # We may want to match more than one field
1284 1276 # Not all fields take the same amount of time to be matched
1285 1277 # Sort the selected fields in order of increasing matching cost
1286 1278 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1287 1279 'files', 'description', 'substate', 'diff']
1288 1280 def fieldkeyfunc(f):
1289 1281 try:
1290 1282 return fieldorder.index(f)
1291 1283 except ValueError:
1292 1284 # assume an unknown field is very costly
1293 1285 return len(fieldorder)
1294 1286 fields = list(fields)
1295 1287 fields.sort(key=fieldkeyfunc)
1296 1288
1297 1289 # Each field will be matched with its own "getfield" function
1298 1290 # which will be added to the getfieldfuncs array of functions
1299 1291 getfieldfuncs = []
1300 1292 _funcs = {
1301 1293 'user': lambda r: repo[r].user(),
1302 1294 'branch': lambda r: repo[r].branch(),
1303 1295 'date': lambda r: repo[r].date(),
1304 1296 'description': lambda r: repo[r].description(),
1305 1297 'files': lambda r: repo[r].files(),
1306 1298 'parents': lambda r: repo[r].parents(),
1307 1299 'phase': lambda r: repo[r].phase(),
1308 1300 'substate': lambda r: repo[r].substate,
1309 1301 'summary': lambda r: repo[r].description().splitlines()[0],
1310 1302 'diff': lambda r: list(repo[r].diff(git=True),)
1311 1303 }
1312 1304 for info in fields:
1313 1305 getfield = _funcs.get(info, None)
1314 1306 if getfield is None:
1315 1307 raise error.ParseError(
1316 1308 # i18n: "matching" is a keyword
1317 1309 _("unexpected field name passed to matching: %s") % info)
1318 1310 getfieldfuncs.append(getfield)
1319 1311 # convert the getfield array of functions into a "getinfo" function
1320 1312 # which returns an array of field values (or a single value if there
1321 1313 # is only one field to match)
1322 1314 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1323 1315
1324 1316 matches = set()
1325 1317 for rev in revs:
1326 1318 target = getinfo(rev)
1327 1319 for r in subset:
1328 1320 match = True
1329 1321 for n, f in enumerate(getfieldfuncs):
1330 1322 if target[n] != f(r):
1331 1323 match = False
1332 1324 break
1333 1325 if match:
1334 1326 matches.add(r)
1335 1327 return [r for r in subset if r in matches]
1336 1328
1337 1329 def reverse(repo, subset, x):
1338 1330 """``reverse(set)``
1339 1331 Reverse order of set.
1340 1332 """
1341 1333 l = getset(repo, subset, x)
1342 1334 if not isinstance(l, list):
1343 1335 l = list(l)
1344 1336 l.reverse()
1345 1337 return l
1346 1338
1347 1339 def roots(repo, subset, x):
1348 1340 """``roots(set)``
1349 1341 Changesets in set with no parent changeset in set.
1350 1342 """
1351 1343 s = set(getset(repo, repo.changelog, x))
1352 if len(subset) == len(repo):
1353 subset = s
1354 else:
1355 1344 subset = [r for r in subset if r in s]
1356 1345 cs = _children(repo, subset, s)
1357 1346 return [r for r in subset if r not in cs]
1358 1347
1359 1348 def secret(repo, subset, x):
1360 1349 """``secret()``
1361 1350 Changeset in secret phase."""
1362 1351 # i18n: "secret" is a keyword
1363 1352 getargs(x, 0, 0, _("secret takes no arguments"))
1364 1353 pc = repo._phasecache
1365 1354 return [r for r in subset if pc.phase(repo, r) == phases.secret]
1366 1355
1367 1356 def sort(repo, subset, x):
1368 1357 """``sort(set[, [-]key...])``
1369 1358 Sort set by keys. The default sort order is ascending, specify a key
1370 1359 as ``-key`` to sort in descending order.
1371 1360
1372 1361 The keys can be:
1373 1362
1374 1363 - ``rev`` for the revision number,
1375 1364 - ``branch`` for the branch name,
1376 1365 - ``desc`` for the commit message (description),
1377 1366 - ``user`` for user name (``author`` can be used as an alias),
1378 1367 - ``date`` for the commit date
1379 1368 """
1380 1369 # i18n: "sort" is a keyword
1381 1370 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1382 1371 keys = "rev"
1383 1372 if len(l) == 2:
1384 1373 # i18n: "sort" is a keyword
1385 1374 keys = getstring(l[1], _("sort spec must be a string"))
1386 1375
1387 1376 s = l[0]
1388 1377 keys = keys.split()
1389 1378 l = []
1390 1379 def invert(s):
1391 1380 return "".join(chr(255 - ord(c)) for c in s)
1392 1381 for r in getset(repo, subset, s):
1393 1382 c = repo[r]
1394 1383 e = []
1395 1384 for k in keys:
1396 1385 if k == 'rev':
1397 1386 e.append(r)
1398 1387 elif k == '-rev':
1399 1388 e.append(-r)
1400 1389 elif k == 'branch':
1401 1390 e.append(c.branch())
1402 1391 elif k == '-branch':
1403 1392 e.append(invert(c.branch()))
1404 1393 elif k == 'desc':
1405 1394 e.append(c.description())
1406 1395 elif k == '-desc':
1407 1396 e.append(invert(c.description()))
1408 1397 elif k in 'user author':
1409 1398 e.append(c.user())
1410 1399 elif k in '-user -author':
1411 1400 e.append(invert(c.user()))
1412 1401 elif k == 'date':
1413 1402 e.append(c.date()[0])
1414 1403 elif k == '-date':
1415 1404 e.append(-c.date()[0])
1416 1405 else:
1417 1406 raise error.ParseError(_("unknown sort key %r") % k)
1418 1407 e.append(r)
1419 1408 l.append(e)
1420 1409 l.sort()
1421 1410 return [e[-1] for e in l]
1422 1411
1423 1412 def _stringmatcher(pattern):
1424 1413 """
1425 1414 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1426 1415 returns the matcher name, pattern, and matcher function.
1427 1416 missing or unknown prefixes are treated as literal matches.
1428 1417
1429 1418 helper for tests:
1430 1419 >>> def test(pattern, *tests):
1431 1420 ... kind, pattern, matcher = _stringmatcher(pattern)
1432 1421 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1433 1422
1434 1423 exact matching (no prefix):
1435 1424 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1436 1425 ('literal', 'abcdefg', [False, False, True])
1437 1426
1438 1427 regex matching ('re:' prefix)
1439 1428 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1440 1429 ('re', 'a.+b', [False, False, True])
1441 1430
1442 1431 force exact matches ('literal:' prefix)
1443 1432 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1444 1433 ('literal', 're:foobar', [False, True])
1445 1434
1446 1435 unknown prefixes are ignored and treated as literals
1447 1436 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1448 1437 ('literal', 'foo:bar', [False, False, True])
1449 1438 """
1450 1439 if pattern.startswith('re:'):
1451 1440 pattern = pattern[3:]
1452 1441 try:
1453 1442 regex = re.compile(pattern)
1454 1443 except re.error, e:
1455 1444 raise error.ParseError(_('invalid regular expression: %s')
1456 1445 % e)
1457 1446 return 're', pattern, regex.search
1458 1447 elif pattern.startswith('literal:'):
1459 1448 pattern = pattern[8:]
1460 1449 return 'literal', pattern, pattern.__eq__
1461 1450
1462 1451 def _substringmatcher(pattern):
1463 1452 kind, pattern, matcher = _stringmatcher(pattern)
1464 1453 if kind == 'literal':
1465 1454 matcher = lambda s: pattern in s
1466 1455 return kind, pattern, matcher
1467 1456
1468 1457 def tag(repo, subset, x):
1469 1458 """``tag([name])``
1470 1459 The specified tag by name, or all tagged revisions if no name is given.
1471 1460 """
1472 1461 # i18n: "tag" is a keyword
1473 1462 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1474 1463 cl = repo.changelog
1475 1464 if args:
1476 1465 pattern = getstring(args[0],
1477 1466 # i18n: "tag" is a keyword
1478 1467 _('the argument to tag must be a string'))
1479 1468 kind, pattern, matcher = _stringmatcher(pattern)
1480 1469 if kind == 'literal':
1481 1470 # avoid resolving all tags
1482 1471 tn = repo._tagscache.tags.get(pattern, None)
1483 1472 if tn is None:
1484 1473 raise util.Abort(_("tag '%s' does not exist") % pattern)
1485 1474 s = set([repo[tn].rev()])
1486 1475 else:
1487 1476 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1488 1477 if not s:
1489 1478 raise util.Abort(_("no tags exist that match '%s'") % pattern)
1490 1479 else:
1491 1480 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1492 1481 return [r for r in subset if r in s]
1493 1482
1494 1483 def tagged(repo, subset, x):
1495 1484 return tag(repo, subset, x)
1496 1485
1497 1486 def unstable(repo, subset, x):
1498 1487 """``unstable()``
1499 1488 Non-obsolete changesets with obsolete ancestors.
1500 1489 """
1501 1490 # i18n: "unstable" is a keyword
1502 1491 getargs(x, 0, 0, _("unstable takes no arguments"))
1503 1492 unstables = obsmod.getrevs(repo, 'unstable')
1504 1493 return [r for r in subset if r in unstables]
1505 1494
1506 1495
1507 1496 def user(repo, subset, x):
1508 1497 """``user(string)``
1509 1498 User name contains string. The match is case-insensitive.
1510 1499
1511 1500 If `string` starts with `re:`, the remainder of the string is treated as
1512 1501 a regular expression. To match a user that actually contains `re:`, use
1513 1502 the prefix `literal:`.
1514 1503 """
1515 1504 return author(repo, subset, x)
1516 1505
1517 1506 # for internal use
1518 1507 def _list(repo, subset, x):
1519 1508 s = getstring(x, "internal error")
1520 1509 if not s:
1521 1510 return []
1522 1511 if not isinstance(subset, set):
1523 1512 subset = set(subset)
1524 1513 ls = [repo[r].rev() for r in s.split('\0')]
1525 1514 return [r for r in ls if r in subset]
1526 1515
1527 1516 symbols = {
1528 1517 "adds": adds,
1529 1518 "all": getall,
1530 1519 "ancestor": ancestor,
1531 1520 "ancestors": ancestors,
1532 1521 "_firstancestors": _firstancestors,
1533 1522 "author": author,
1534 1523 "bisect": bisect,
1535 1524 "bisected": bisected,
1536 1525 "bookmark": bookmark,
1537 1526 "branch": branch,
1538 1527 "branchpoint": branchpoint,
1539 1528 "bumped": bumped,
1540 1529 "bundle": bundle,
1541 1530 "children": children,
1542 1531 "closed": closed,
1543 1532 "contains": contains,
1544 1533 "converted": converted,
1545 1534 "date": date,
1546 1535 "desc": desc,
1547 1536 "descendants": descendants,
1548 1537 "_firstdescendants": _firstdescendants,
1549 1538 "destination": destination,
1550 1539 "draft": draft,
1551 1540 "extinct": extinct,
1552 1541 "extra": extra,
1553 1542 "file": hasfile,
1554 1543 "filelog": filelog,
1555 1544 "first": first,
1556 1545 "follow": follow,
1557 1546 "_followfirst": _followfirst,
1558 1547 "grep": grep,
1559 1548 "head": head,
1560 1549 "heads": heads,
1561 1550 "hidden": hidden,
1562 1551 "id": node_,
1563 1552 "keyword": keyword,
1564 1553 "last": last,
1565 1554 "limit": limit,
1566 1555 "_matchfiles": _matchfiles,
1567 1556 "max": maxrev,
1568 1557 "merge": merge,
1569 1558 "min": minrev,
1570 1559 "modifies": modifies,
1571 1560 "obsolete": obsolete,
1572 1561 "origin": origin,
1573 1562 "outgoing": outgoing,
1574 1563 "p1": p1,
1575 1564 "p2": p2,
1576 1565 "parents": parents,
1577 1566 "present": present,
1578 1567 "public": public,
1579 1568 "remote": remote,
1580 1569 "removes": removes,
1581 1570 "rev": rev,
1582 1571 "reverse": reverse,
1583 1572 "roots": roots,
1584 1573 "sort": sort,
1585 1574 "secret": secret,
1586 1575 "matching": matching,
1587 1576 "tag": tag,
1588 1577 "tagged": tagged,
1589 1578 "user": user,
1590 1579 "unstable": unstable,
1591 1580 "_list": _list,
1592 1581 }
1593 1582
1594 1583 methods = {
1595 1584 "range": rangeset,
1596 1585 "dagrange": dagrange,
1597 1586 "string": stringset,
1598 1587 "symbol": symbolset,
1599 1588 "and": andset,
1600 1589 "or": orset,
1601 1590 "not": notset,
1602 1591 "list": listset,
1603 1592 "func": func,
1604 1593 "ancestor": ancestorspec,
1605 1594 "parent": parentspec,
1606 1595 "parentpost": p1,
1607 1596 }
1608 1597
1609 1598 def optimize(x, small):
1610 1599 if x is None:
1611 1600 return 0, x
1612 1601
1613 1602 smallbonus = 1
1614 1603 if small:
1615 1604 smallbonus = .5
1616 1605
1617 1606 op = x[0]
1618 1607 if op == 'minus':
1619 1608 return optimize(('and', x[1], ('not', x[2])), small)
1620 1609 elif op == 'dagrangepre':
1621 1610 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1622 1611 elif op == 'dagrangepost':
1623 1612 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1624 1613 elif op == 'rangepre':
1625 1614 return optimize(('range', ('string', '0'), x[1]), small)
1626 1615 elif op == 'rangepost':
1627 1616 return optimize(('range', x[1], ('string', 'tip')), small)
1628 1617 elif op == 'negate':
1629 1618 return optimize(('string',
1630 1619 '-' + getstring(x[1], _("can't negate that"))), small)
1631 1620 elif op in 'string symbol negate':
1632 1621 return smallbonus, x # single revisions are small
1633 1622 elif op == 'and':
1634 1623 wa, ta = optimize(x[1], True)
1635 1624 wb, tb = optimize(x[2], True)
1636 1625 w = min(wa, wb)
1637 1626 if wa > wb:
1638 1627 return w, (op, tb, ta)
1639 1628 return w, (op, ta, tb)
1640 1629 elif op == 'or':
1641 1630 wa, ta = optimize(x[1], False)
1642 1631 wb, tb = optimize(x[2], False)
1643 1632 if wb < wa:
1644 1633 wb, wa = wa, wb
1645 1634 return max(wa, wb), (op, ta, tb)
1646 1635 elif op == 'not':
1647 1636 o = optimize(x[1], not small)
1648 1637 return o[0], (op, o[1])
1649 1638 elif op == 'parentpost':
1650 1639 o = optimize(x[1], small)
1651 1640 return o[0], (op, o[1])
1652 1641 elif op == 'group':
1653 1642 return optimize(x[1], small)
1654 1643 elif op in 'dagrange range list parent ancestorspec':
1655 1644 if op == 'parent':
1656 1645 # x^:y means (x^) : y, not x ^ (:y)
1657 1646 post = ('parentpost', x[1])
1658 1647 if x[2][0] == 'dagrangepre':
1659 1648 return optimize(('dagrange', post, x[2][1]), small)
1660 1649 elif x[2][0] == 'rangepre':
1661 1650 return optimize(('range', post, x[2][1]), small)
1662 1651
1663 1652 wa, ta = optimize(x[1], small)
1664 1653 wb, tb = optimize(x[2], small)
1665 1654 return wa + wb, (op, ta, tb)
1666 1655 elif op == 'func':
1667 1656 f = getstring(x[1], _("not a symbol"))
1668 1657 wa, ta = optimize(x[2], small)
1669 1658 if f in ("author branch closed date desc file grep keyword "
1670 1659 "outgoing user"):
1671 1660 w = 10 # slow
1672 1661 elif f in "modifies adds removes":
1673 1662 w = 30 # slower
1674 1663 elif f == "contains":
1675 1664 w = 100 # very slow
1676 1665 elif f == "ancestor":
1677 1666 w = 1 * smallbonus
1678 1667 elif f in "reverse limit first":
1679 1668 w = 0
1680 1669 elif f in "sort":
1681 1670 w = 10 # assume most sorts look at changelog
1682 1671 else:
1683 1672 w = 1
1684 1673 return w + wa, (op, x[1], ta)
1685 1674 return 1, x
1686 1675
1687 1676 _aliasarg = ('func', ('symbol', '_aliasarg'))
1688 1677 def _getaliasarg(tree):
1689 1678 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1690 1679 return X, None otherwise.
1691 1680 """
1692 1681 if (len(tree) == 3 and tree[:2] == _aliasarg
1693 1682 and tree[2][0] == 'string'):
1694 1683 return tree[2][1]
1695 1684 return None
1696 1685
1697 1686 def _checkaliasarg(tree, known=None):
1698 1687 """Check tree contains no _aliasarg construct or only ones which
1699 1688 value is in known. Used to avoid alias placeholders injection.
1700 1689 """
1701 1690 if isinstance(tree, tuple):
1702 1691 arg = _getaliasarg(tree)
1703 1692 if arg is not None and (not known or arg not in known):
1704 1693 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1705 1694 for t in tree:
1706 1695 _checkaliasarg(t, known)
1707 1696
1708 1697 class revsetalias(object):
1709 1698 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1710 1699 args = None
1711 1700
1712 1701 def __init__(self, name, value):
1713 1702 '''Aliases like:
1714 1703
1715 1704 h = heads(default)
1716 1705 b($1) = ancestors($1) - ancestors(default)
1717 1706 '''
1718 1707 m = self.funcre.search(name)
1719 1708 if m:
1720 1709 self.name = m.group(1)
1721 1710 self.tree = ('func', ('symbol', m.group(1)))
1722 1711 self.args = [x.strip() for x in m.group(2).split(',')]
1723 1712 for arg in self.args:
1724 1713 # _aliasarg() is an unknown symbol only used separate
1725 1714 # alias argument placeholders from regular strings.
1726 1715 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1727 1716 else:
1728 1717 self.name = name
1729 1718 self.tree = ('symbol', name)
1730 1719
1731 1720 self.replacement, pos = parse(value)
1732 1721 if pos != len(value):
1733 1722 raise error.ParseError(_('invalid token'), pos)
1734 1723 # Check for placeholder injection
1735 1724 _checkaliasarg(self.replacement, self.args)
1736 1725
1737 1726 def _getalias(aliases, tree):
1738 1727 """If tree looks like an unexpanded alias, return it. Return None
1739 1728 otherwise.
1740 1729 """
1741 1730 if isinstance(tree, tuple) and tree:
1742 1731 if tree[0] == 'symbol' and len(tree) == 2:
1743 1732 name = tree[1]
1744 1733 alias = aliases.get(name)
1745 1734 if alias and alias.args is None and alias.tree == tree:
1746 1735 return alias
1747 1736 if tree[0] == 'func' and len(tree) > 1:
1748 1737 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1749 1738 name = tree[1][1]
1750 1739 alias = aliases.get(name)
1751 1740 if alias and alias.args is not None and alias.tree == tree[:2]:
1752 1741 return alias
1753 1742 return None
1754 1743
1755 1744 def _expandargs(tree, args):
1756 1745 """Replace _aliasarg instances with the substitution value of the
1757 1746 same name in args, recursively.
1758 1747 """
1759 1748 if not tree or not isinstance(tree, tuple):
1760 1749 return tree
1761 1750 arg = _getaliasarg(tree)
1762 1751 if arg is not None:
1763 1752 return args[arg]
1764 1753 return tuple(_expandargs(t, args) for t in tree)
1765 1754
1766 1755 def _expandaliases(aliases, tree, expanding, cache):
1767 1756 """Expand aliases in tree, recursively.
1768 1757
1769 1758 'aliases' is a dictionary mapping user defined aliases to
1770 1759 revsetalias objects.
1771 1760 """
1772 1761 if not isinstance(tree, tuple):
1773 1762 # Do not expand raw strings
1774 1763 return tree
1775 1764 alias = _getalias(aliases, tree)
1776 1765 if alias is not None:
1777 1766 if alias in expanding:
1778 1767 raise error.ParseError(_('infinite expansion of revset alias "%s" '
1779 1768 'detected') % alias.name)
1780 1769 expanding.append(alias)
1781 1770 if alias.name not in cache:
1782 1771 cache[alias.name] = _expandaliases(aliases, alias.replacement,
1783 1772 expanding, cache)
1784 1773 result = cache[alias.name]
1785 1774 expanding.pop()
1786 1775 if alias.args is not None:
1787 1776 l = getlist(tree[2])
1788 1777 if len(l) != len(alias.args):
1789 1778 raise error.ParseError(
1790 1779 _('invalid number of arguments: %s') % len(l))
1791 1780 l = [_expandaliases(aliases, a, [], cache) for a in l]
1792 1781 result = _expandargs(result, dict(zip(alias.args, l)))
1793 1782 else:
1794 1783 result = tuple(_expandaliases(aliases, t, expanding, cache)
1795 1784 for t in tree)
1796 1785 return result
1797 1786
1798 1787 def findaliases(ui, tree):
1799 1788 _checkaliasarg(tree)
1800 1789 aliases = {}
1801 1790 for k, v in ui.configitems('revsetalias'):
1802 1791 alias = revsetalias(k, v)
1803 1792 aliases[alias.name] = alias
1804 1793 return _expandaliases(aliases, tree, [], {})
1805 1794
1806 1795 parse = parser.parser(tokenize, elements).parse
1807 1796
1808 1797 def match(ui, spec):
1809 1798 if not spec:
1810 1799 raise error.ParseError(_("empty query"))
1811 1800 tree, pos = parse(spec)
1812 1801 if (pos != len(spec)):
1813 1802 raise error.ParseError(_("invalid token"), pos)
1814 1803 if ui:
1815 1804 tree = findaliases(ui, tree)
1816 1805 weight, tree = optimize(tree, True)
1817 1806 def mfunc(repo, subset):
1818 1807 return getset(repo, subset, tree)
1819 1808 return mfunc
1820 1809
1821 1810 def formatspec(expr, *args):
1822 1811 '''
1823 1812 This is a convenience function for using revsets internally, and
1824 1813 escapes arguments appropriately. Aliases are intentionally ignored
1825 1814 so that intended expression behavior isn't accidentally subverted.
1826 1815
1827 1816 Supported arguments:
1828 1817
1829 1818 %r = revset expression, parenthesized
1830 1819 %d = int(arg), no quoting
1831 1820 %s = string(arg), escaped and single-quoted
1832 1821 %b = arg.branch(), escaped and single-quoted
1833 1822 %n = hex(arg), single-quoted
1834 1823 %% = a literal '%'
1835 1824
1836 1825 Prefixing the type with 'l' specifies a parenthesized list of that type.
1837 1826
1838 1827 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
1839 1828 '(10 or 11):: and ((this()) or (that()))'
1840 1829 >>> formatspec('%d:: and not %d::', 10, 20)
1841 1830 '10:: and not 20::'
1842 1831 >>> formatspec('%ld or %ld', [], [1])
1843 1832 "_list('') or 1"
1844 1833 >>> formatspec('keyword(%s)', 'foo\\xe9')
1845 1834 "keyword('foo\\\\xe9')"
1846 1835 >>> b = lambda: 'default'
1847 1836 >>> b.branch = b
1848 1837 >>> formatspec('branch(%b)', b)
1849 1838 "branch('default')"
1850 1839 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
1851 1840 "root(_list('a\\x00b\\x00c\\x00d'))"
1852 1841 '''
1853 1842
1854 1843 def quote(s):
1855 1844 return repr(str(s))
1856 1845
1857 1846 def argtype(c, arg):
1858 1847 if c == 'd':
1859 1848 return str(int(arg))
1860 1849 elif c == 's':
1861 1850 return quote(arg)
1862 1851 elif c == 'r':
1863 1852 parse(arg) # make sure syntax errors are confined
1864 1853 return '(%s)' % arg
1865 1854 elif c == 'n':
1866 1855 return quote(node.hex(arg))
1867 1856 elif c == 'b':
1868 1857 return quote(arg.branch())
1869 1858
1870 1859 def listexp(s, t):
1871 1860 l = len(s)
1872 1861 if l == 0:
1873 1862 return "_list('')"
1874 1863 elif l == 1:
1875 1864 return argtype(t, s[0])
1876 1865 elif t == 'd':
1877 1866 return "_list('%s')" % "\0".join(str(int(a)) for a in s)
1878 1867 elif t == 's':
1879 1868 return "_list('%s')" % "\0".join(s)
1880 1869 elif t == 'n':
1881 1870 return "_list('%s')" % "\0".join(node.hex(a) for a in s)
1882 1871 elif t == 'b':
1883 1872 return "_list('%s')" % "\0".join(a.branch() for a in s)
1884 1873
1885 1874 m = l // 2
1886 1875 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
1887 1876
1888 1877 ret = ''
1889 1878 pos = 0
1890 1879 arg = 0
1891 1880 while pos < len(expr):
1892 1881 c = expr[pos]
1893 1882 if c == '%':
1894 1883 pos += 1
1895 1884 d = expr[pos]
1896 1885 if d == '%':
1897 1886 ret += d
1898 1887 elif d in 'dsnbr':
1899 1888 ret += argtype(d, args[arg])
1900 1889 arg += 1
1901 1890 elif d == 'l':
1902 1891 # a list of some type
1903 1892 pos += 1
1904 1893 d = expr[pos]
1905 1894 ret += listexp(list(args[arg]), d)
1906 1895 arg += 1
1907 1896 else:
1908 1897 raise util.Abort('unexpected revspec format character %s' % d)
1909 1898 else:
1910 1899 ret += c
1911 1900 pos += 1
1912 1901
1913 1902 return ret
1914 1903
1915 1904 def prettyformat(tree):
1916 1905 def _prettyformat(tree, level, lines):
1917 1906 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
1918 1907 lines.append((level, str(tree)))
1919 1908 else:
1920 1909 lines.append((level, '(%s' % tree[0]))
1921 1910 for s in tree[1:]:
1922 1911 _prettyformat(s, level + 1, lines)
1923 1912 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
1924 1913
1925 1914 lines = []
1926 1915 _prettyformat(tree, 0, lines)
1927 1916 output = '\n'.join((' '*l + s) for l, s in lines)
1928 1917 return output
1929 1918
1930 1919 # tell hggettext to extract docstrings from these functions:
1931 1920 i18nfunctions = symbols.values()
@@ -1,1805 +1,1805 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from i18n import _
17 17 import error, osutil, encoding, collections
18 18 import errno, re, shutil, sys, tempfile, traceback
19 19 import os, time, datetime, calendar, textwrap, signal
20 20 import imp, socket, urllib
21 21
22 22 if os.name == 'nt':
23 23 import windows as platform
24 24 else:
25 25 import posix as platform
26 26
27 27 cachestat = platform.cachestat
28 28 checkexec = platform.checkexec
29 29 checklink = platform.checklink
30 30 copymode = platform.copymode
31 31 executablepath = platform.executablepath
32 32 expandglobs = platform.expandglobs
33 33 explainexit = platform.explainexit
34 34 findexe = platform.findexe
35 35 gethgcmd = platform.gethgcmd
36 36 getuser = platform.getuser
37 37 groupmembers = platform.groupmembers
38 38 groupname = platform.groupname
39 39 hidewindow = platform.hidewindow
40 40 isexec = platform.isexec
41 41 isowner = platform.isowner
42 42 localpath = platform.localpath
43 43 lookupreg = platform.lookupreg
44 44 makedir = platform.makedir
45 45 nlinks = platform.nlinks
46 46 normpath = platform.normpath
47 47 normcase = platform.normcase
48 48 openhardlinks = platform.openhardlinks
49 49 oslink = platform.oslink
50 50 parsepatchoutput = platform.parsepatchoutput
51 51 pconvert = platform.pconvert
52 52 popen = platform.popen
53 53 posixfile = platform.posixfile
54 54 quotecommand = platform.quotecommand
55 55 realpath = platform.realpath
56 56 rename = platform.rename
57 57 samedevice = platform.samedevice
58 58 samefile = platform.samefile
59 59 samestat = platform.samestat
60 60 setbinary = platform.setbinary
61 61 setflags = platform.setflags
62 62 setsignalhandler = platform.setsignalhandler
63 63 shellquote = platform.shellquote
64 64 spawndetached = platform.spawndetached
65 65 split = platform.split
66 66 sshargs = platform.sshargs
67 67 statfiles = platform.statfiles
68 68 termwidth = platform.termwidth
69 69 testpid = platform.testpid
70 70 umask = platform.umask
71 71 unlink = platform.unlink
72 72 unlinkpath = platform.unlinkpath
73 73 username = platform.username
74 74
75 75 # Python compatibility
76 76
77 77 _notset = object()
78 78
79 79 def safehasattr(thing, attr):
80 80 return getattr(thing, attr, _notset) is not _notset
81 81
82 82 def sha1(s=''):
83 83 '''
84 84 Low-overhead wrapper around Python's SHA support
85 85
86 86 >>> f = _fastsha1
87 87 >>> a = sha1()
88 88 >>> a = f()
89 89 >>> a.hexdigest()
90 90 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
91 91 '''
92 92
93 93 return _fastsha1(s)
94 94
95 95 def _fastsha1(s=''):
96 96 # This function will import sha1 from hashlib or sha (whichever is
97 97 # available) and overwrite itself with it on the first call.
98 98 # Subsequent calls will go directly to the imported function.
99 99 if sys.version_info >= (2, 5):
100 100 from hashlib import sha1 as _sha1
101 101 else:
102 102 from sha import sha as _sha1
103 103 global _fastsha1, sha1
104 104 _fastsha1 = sha1 = _sha1
105 105 return _sha1(s)
106 106
107 107 try:
108 108 buffer = buffer
109 109 except NameError:
110 110 if sys.version_info[0] < 3:
111 111 def buffer(sliceable, offset=0):
112 112 return sliceable[offset:]
113 113 else:
114 114 def buffer(sliceable, offset=0):
115 115 return memoryview(sliceable)[offset:]
116 116
117 117 import subprocess
118 118 closefds = os.name == 'posix'
119 119
120 120 def popen2(cmd, env=None, newlines=False):
121 121 # Setting bufsize to -1 lets the system decide the buffer size.
122 122 # The default for bufsize is 0, meaning unbuffered. This leads to
123 123 # poor performance on Mac OS X: http://bugs.python.org/issue4194
124 124 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
125 125 close_fds=closefds,
126 126 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
127 127 universal_newlines=newlines,
128 128 env=env)
129 129 return p.stdin, p.stdout
130 130
131 131 def popen3(cmd, env=None, newlines=False):
132 132 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
133 133 close_fds=closefds,
134 134 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
135 135 stderr=subprocess.PIPE,
136 136 universal_newlines=newlines,
137 137 env=env)
138 138 return p.stdin, p.stdout, p.stderr
139 139
140 140 def version():
141 141 """Return version information if available."""
142 142 try:
143 143 import __version__
144 144 return __version__.version
145 145 except ImportError:
146 146 return 'unknown'
147 147
148 148 # used by parsedate
149 149 defaultdateformats = (
150 150 '%Y-%m-%d %H:%M:%S',
151 151 '%Y-%m-%d %I:%M:%S%p',
152 152 '%Y-%m-%d %H:%M',
153 153 '%Y-%m-%d %I:%M%p',
154 154 '%Y-%m-%d',
155 155 '%m-%d',
156 156 '%m/%d',
157 157 '%m/%d/%y',
158 158 '%m/%d/%Y',
159 159 '%a %b %d %H:%M:%S %Y',
160 160 '%a %b %d %I:%M:%S%p %Y',
161 161 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
162 162 '%b %d %H:%M:%S %Y',
163 163 '%b %d %I:%M:%S%p %Y',
164 164 '%b %d %H:%M:%S',
165 165 '%b %d %I:%M:%S%p',
166 166 '%b %d %H:%M',
167 167 '%b %d %I:%M%p',
168 168 '%b %d %Y',
169 169 '%b %d',
170 170 '%H:%M:%S',
171 171 '%I:%M:%S%p',
172 172 '%H:%M',
173 173 '%I:%M%p',
174 174 )
175 175
176 176 extendeddateformats = defaultdateformats + (
177 177 "%Y",
178 178 "%Y-%m",
179 179 "%b",
180 180 "%b %Y",
181 181 )
182 182
183 183 def cachefunc(func):
184 184 '''cache the result of function calls'''
185 185 # XXX doesn't handle keywords args
186 186 cache = {}
187 187 if func.func_code.co_argcount == 1:
188 188 # we gain a small amount of time because
189 189 # we don't need to pack/unpack the list
190 190 def f(arg):
191 191 if arg not in cache:
192 192 cache[arg] = func(arg)
193 193 return cache[arg]
194 194 else:
195 195 def f(*args):
196 196 if args not in cache:
197 197 cache[args] = func(*args)
198 198 return cache[args]
199 199
200 200 return f
201 201
202 202 try:
203 203 collections.deque.remove
204 204 deque = collections.deque
205 205 except AttributeError:
206 206 # python 2.4 lacks deque.remove
207 207 class deque(collections.deque):
208 208 def remove(self, val):
209 209 for i, v in enumerate(self):
210 210 if v == val:
211 211 del self[i]
212 212 break
213 213
214 214 def lrucachefunc(func):
215 215 '''cache most recent results of function calls'''
216 216 cache = {}
217 217 order = deque()
218 218 if func.func_code.co_argcount == 1:
219 219 def f(arg):
220 220 if arg not in cache:
221 221 if len(cache) > 20:
222 222 del cache[order.popleft()]
223 223 cache[arg] = func(arg)
224 224 else:
225 225 order.remove(arg)
226 226 order.append(arg)
227 227 return cache[arg]
228 228 else:
229 229 def f(*args):
230 230 if args not in cache:
231 231 if len(cache) > 20:
232 232 del cache[order.popleft()]
233 233 cache[args] = func(*args)
234 234 else:
235 235 order.remove(args)
236 236 order.append(args)
237 237 return cache[args]
238 238
239 239 return f
240 240
241 241 class propertycache(object):
242 242 def __init__(self, func):
243 243 self.func = func
244 244 self.name = func.__name__
245 245 def __get__(self, obj, type=None):
246 246 result = self.func(obj)
247 247 setattr(obj, self.name, result)
248 248 return result
249 249
250 250 def pipefilter(s, cmd):
251 251 '''filter string S through command CMD, returning its output'''
252 252 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
253 253 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
254 254 pout, perr = p.communicate(s)
255 255 return pout
256 256
257 257 def tempfilter(s, cmd):
258 258 '''filter string S through a pair of temporary files with CMD.
259 259 CMD is used as a template to create the real command to be run,
260 260 with the strings INFILE and OUTFILE replaced by the real names of
261 261 the temporary files generated.'''
262 262 inname, outname = None, None
263 263 try:
264 264 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
265 265 fp = os.fdopen(infd, 'wb')
266 266 fp.write(s)
267 267 fp.close()
268 268 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
269 269 os.close(outfd)
270 270 cmd = cmd.replace('INFILE', inname)
271 271 cmd = cmd.replace('OUTFILE', outname)
272 272 code = os.system(cmd)
273 273 if sys.platform == 'OpenVMS' and code & 1:
274 274 code = 0
275 275 if code:
276 276 raise Abort(_("command '%s' failed: %s") %
277 277 (cmd, explainexit(code)))
278 278 fp = open(outname, 'rb')
279 279 r = fp.read()
280 280 fp.close()
281 281 return r
282 282 finally:
283 283 try:
284 284 if inname:
285 285 os.unlink(inname)
286 286 except OSError:
287 287 pass
288 288 try:
289 289 if outname:
290 290 os.unlink(outname)
291 291 except OSError:
292 292 pass
293 293
294 294 filtertable = {
295 295 'tempfile:': tempfilter,
296 296 'pipe:': pipefilter,
297 297 }
298 298
299 299 def filter(s, cmd):
300 300 "filter a string through a command that transforms its input to its output"
301 301 for name, fn in filtertable.iteritems():
302 302 if cmd.startswith(name):
303 303 return fn(s, cmd[len(name):].lstrip())
304 304 return pipefilter(s, cmd)
305 305
306 306 def binary(s):
307 307 """return true if a string is binary data"""
308 308 return bool(s and '\0' in s)
309 309
310 310 def increasingchunks(source, min=1024, max=65536):
311 311 '''return no less than min bytes per chunk while data remains,
312 312 doubling min after each chunk until it reaches max'''
313 313 def log2(x):
314 314 if not x:
315 315 return 0
316 316 i = 0
317 317 while x:
318 318 x >>= 1
319 319 i += 1
320 320 return i - 1
321 321
322 322 buf = []
323 323 blen = 0
324 324 for chunk in source:
325 325 buf.append(chunk)
326 326 blen += len(chunk)
327 327 if blen >= min:
328 328 if min < max:
329 329 min = min << 1
330 330 nmin = 1 << log2(blen)
331 331 if nmin > min:
332 332 min = nmin
333 333 if min > max:
334 334 min = max
335 335 yield ''.join(buf)
336 336 blen = 0
337 337 buf = []
338 338 if buf:
339 339 yield ''.join(buf)
340 340
341 341 Abort = error.Abort
342 342
343 343 def always(fn):
344 344 return True
345 345
346 346 def never(fn):
347 347 return False
348 348
349 349 def pathto(root, n1, n2):
350 350 '''return the relative path from one place to another.
351 351 root should use os.sep to separate directories
352 352 n1 should use os.sep to separate directories
353 353 n2 should use "/" to separate directories
354 354 returns an os.sep-separated path.
355 355
356 356 If n1 is a relative path, it's assumed it's
357 357 relative to root.
358 358 n2 should always be relative to root.
359 359 '''
360 360 if not n1:
361 361 return localpath(n2)
362 362 if os.path.isabs(n1):
363 363 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
364 364 return os.path.join(root, localpath(n2))
365 365 n2 = '/'.join((pconvert(root), n2))
366 366 a, b = splitpath(n1), n2.split('/')
367 367 a.reverse()
368 368 b.reverse()
369 369 while a and b and a[-1] == b[-1]:
370 370 a.pop()
371 371 b.pop()
372 372 b.reverse()
373 373 return os.sep.join((['..'] * len(a)) + b) or '.'
374 374
375 375 _hgexecutable = None
376 376
377 377 def mainfrozen():
378 378 """return True if we are a frozen executable.
379 379
380 380 The code supports py2exe (most common, Windows only) and tools/freeze
381 381 (portable, not much used).
382 382 """
383 383 return (safehasattr(sys, "frozen") or # new py2exe
384 384 safehasattr(sys, "importers") or # old py2exe
385 385 imp.is_frozen("__main__")) # tools/freeze
386 386
387 387 def hgexecutable():
388 388 """return location of the 'hg' executable.
389 389
390 390 Defaults to $HG or 'hg' in the search path.
391 391 """
392 392 if _hgexecutable is None:
393 393 hg = os.environ.get('HG')
394 394 mainmod = sys.modules['__main__']
395 395 if hg:
396 396 _sethgexecutable(hg)
397 397 elif mainfrozen():
398 398 _sethgexecutable(sys.executable)
399 399 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
400 400 _sethgexecutable(mainmod.__file__)
401 401 else:
402 402 exe = findexe('hg') or os.path.basename(sys.argv[0])
403 403 _sethgexecutable(exe)
404 404 return _hgexecutable
405 405
406 406 def _sethgexecutable(path):
407 407 """set location of the 'hg' executable"""
408 408 global _hgexecutable
409 409 _hgexecutable = path
410 410
411 411 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
412 412 '''enhanced shell command execution.
413 413 run with environment maybe modified, maybe in different dir.
414 414
415 415 if command fails and onerr is None, return status. if ui object,
416 416 print error message and return status, else raise onerr object as
417 417 exception.
418 418
419 419 if out is specified, it is assumed to be a file-like object that has a
420 420 write() method. stdout and stderr will be redirected to out.'''
421 421 try:
422 422 sys.stdout.flush()
423 423 except Exception:
424 424 pass
425 425 def py2shell(val):
426 426 'convert python object into string that is useful to shell'
427 427 if val is None or val is False:
428 428 return '0'
429 429 if val is True:
430 430 return '1'
431 431 return str(val)
432 432 origcmd = cmd
433 433 cmd = quotecommand(cmd)
434 434 if sys.platform == 'plan9':
435 435 # subprocess kludge to work around issues in half-baked Python
436 436 # ports, notably bichued/python:
437 437 if not cwd is None:
438 438 os.chdir(cwd)
439 439 rc = os.system(cmd)
440 440 else:
441 441 env = dict(os.environ)
442 442 env.update((k, py2shell(v)) for k, v in environ.iteritems())
443 443 env['HG'] = hgexecutable()
444 444 if out is None or out == sys.__stdout__:
445 445 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
446 446 env=env, cwd=cwd)
447 447 else:
448 448 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
449 449 env=env, cwd=cwd, stdout=subprocess.PIPE,
450 450 stderr=subprocess.STDOUT)
451 451 for line in proc.stdout:
452 452 out.write(line)
453 453 proc.wait()
454 454 rc = proc.returncode
455 455 if sys.platform == 'OpenVMS' and rc & 1:
456 456 rc = 0
457 457 if rc and onerr:
458 458 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
459 459 explainexit(rc)[0])
460 460 if errprefix:
461 461 errmsg = '%s: %s' % (errprefix, errmsg)
462 462 try:
463 463 onerr.warn(errmsg + '\n')
464 464 except AttributeError:
465 465 raise onerr(errmsg)
466 466 return rc
467 467
468 468 def checksignature(func):
469 469 '''wrap a function with code to check for calling errors'''
470 470 def check(*args, **kwargs):
471 471 try:
472 472 return func(*args, **kwargs)
473 473 except TypeError:
474 474 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
475 475 raise error.SignatureError
476 476 raise
477 477
478 478 return check
479 479
480 480 def copyfile(src, dest):
481 481 "copy a file, preserving mode and atime/mtime"
482 482 if os.path.islink(src):
483 483 try:
484 484 os.unlink(dest)
485 485 except OSError:
486 486 pass
487 487 os.symlink(os.readlink(src), dest)
488 488 else:
489 489 try:
490 490 shutil.copyfile(src, dest)
491 491 shutil.copymode(src, dest)
492 492 except shutil.Error, inst:
493 493 raise Abort(str(inst))
494 494
495 495 def copyfiles(src, dst, hardlink=None):
496 496 """Copy a directory tree using hardlinks if possible"""
497 497
498 498 if hardlink is None:
499 499 hardlink = (os.stat(src).st_dev ==
500 500 os.stat(os.path.dirname(dst)).st_dev)
501 501
502 502 num = 0
503 503 if os.path.isdir(src):
504 504 os.mkdir(dst)
505 505 for name, kind in osutil.listdir(src):
506 506 srcname = os.path.join(src, name)
507 507 dstname = os.path.join(dst, name)
508 508 hardlink, n = copyfiles(srcname, dstname, hardlink)
509 509 num += n
510 510 else:
511 511 if hardlink:
512 512 try:
513 513 oslink(src, dst)
514 514 except (IOError, OSError):
515 515 hardlink = False
516 516 shutil.copy(src, dst)
517 517 else:
518 518 shutil.copy(src, dst)
519 519 num += 1
520 520
521 521 return hardlink, num
522 522
523 523 _winreservednames = '''con prn aux nul
524 524 com1 com2 com3 com4 com5 com6 com7 com8 com9
525 525 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
526 526 _winreservedchars = ':*?"<>|'
527 527 def checkwinfilename(path):
528 528 '''Check that the base-relative path is a valid filename on Windows.
529 529 Returns None if the path is ok, or a UI string describing the problem.
530 530
531 531 >>> checkwinfilename("just/a/normal/path")
532 532 >>> checkwinfilename("foo/bar/con.xml")
533 533 "filename contains 'con', which is reserved on Windows"
534 534 >>> checkwinfilename("foo/con.xml/bar")
535 535 "filename contains 'con', which is reserved on Windows"
536 536 >>> checkwinfilename("foo/bar/xml.con")
537 537 >>> checkwinfilename("foo/bar/AUX/bla.txt")
538 538 "filename contains 'AUX', which is reserved on Windows"
539 539 >>> checkwinfilename("foo/bar/bla:.txt")
540 540 "filename contains ':', which is reserved on Windows"
541 541 >>> checkwinfilename("foo/bar/b\07la.txt")
542 542 "filename contains '\\\\x07', which is invalid on Windows"
543 543 >>> checkwinfilename("foo/bar/bla ")
544 544 "filename ends with ' ', which is not allowed on Windows"
545 545 >>> checkwinfilename("../bar")
546 546 '''
547 547 for n in path.replace('\\', '/').split('/'):
548 548 if not n:
549 549 continue
550 550 for c in n:
551 551 if c in _winreservedchars:
552 552 return _("filename contains '%s', which is reserved "
553 553 "on Windows") % c
554 554 if ord(c) <= 31:
555 555 return _("filename contains %r, which is invalid "
556 556 "on Windows") % c
557 557 base = n.split('.')[0]
558 558 if base and base.lower() in _winreservednames:
559 559 return _("filename contains '%s', which is reserved "
560 560 "on Windows") % base
561 561 t = n[-1]
562 562 if t in '. ' and n not in '..':
563 563 return _("filename ends with '%s', which is not allowed "
564 564 "on Windows") % t
565 565
566 566 if os.name == 'nt':
567 567 checkosfilename = checkwinfilename
568 568 else:
569 569 checkosfilename = platform.checkosfilename
570 570
571 571 def makelock(info, pathname):
572 572 try:
573 573 return os.symlink(info, pathname)
574 574 except OSError, why:
575 575 if why.errno == errno.EEXIST:
576 576 raise
577 577 except AttributeError: # no symlink in os
578 578 pass
579 579
580 580 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
581 581 os.write(ld, info)
582 582 os.close(ld)
583 583
584 584 def readlock(pathname):
585 585 try:
586 586 return os.readlink(pathname)
587 587 except OSError, why:
588 588 if why.errno not in (errno.EINVAL, errno.ENOSYS):
589 589 raise
590 590 except AttributeError: # no symlink in os
591 591 pass
592 592 fp = posixfile(pathname)
593 593 r = fp.read()
594 594 fp.close()
595 595 return r
596 596
597 597 def fstat(fp):
598 598 '''stat file object that may not have fileno method.'''
599 599 try:
600 600 return os.fstat(fp.fileno())
601 601 except AttributeError:
602 602 return os.stat(fp.name)
603 603
604 604 # File system features
605 605
606 606 def checkcase(path):
607 607 """
608 608 Check whether the given path is on a case-sensitive filesystem
609 609
610 610 Requires a path (like /foo/.hg) ending with a foldable final
611 611 directory component.
612 612 """
613 613 s1 = os.stat(path)
614 614 d, b = os.path.split(path)
615 615 b2 = b.upper()
616 616 if b == b2:
617 617 b2 = b.lower()
618 618 if b == b2:
619 619 return True # no evidence against case sensitivity
620 620 p2 = os.path.join(d, b2)
621 621 try:
622 622 s2 = os.stat(p2)
623 623 if s2 == s1:
624 624 return False
625 625 return True
626 626 except OSError:
627 627 return True
628 628
629 629 try:
630 630 import re2
631 631 _re2 = None
632 632 except ImportError:
633 633 _re2 = False
634 634
635 635 def compilere(pat):
636 636 '''Compile a regular expression, using re2 if possible
637 637
638 638 For best performance, use only re2-compatible regexp features.'''
639 639 global _re2
640 640 if _re2 is None:
641 641 try:
642 642 re2.compile
643 643 _re2 = True
644 644 except ImportError:
645 645 _re2 = False
646 646 if _re2:
647 647 try:
648 648 return re2.compile(pat)
649 649 except re2.error:
650 650 pass
651 651 return re.compile(pat)
652 652
653 653 _fspathcache = {}
654 654 def fspath(name, root):
655 655 '''Get name in the case stored in the filesystem
656 656
657 657 The name should be relative to root, and be normcase-ed for efficiency.
658 658
659 659 Note that this function is unnecessary, and should not be
660 660 called, for case-sensitive filesystems (simply because it's expensive).
661 661
662 662 The root should be normcase-ed, too.
663 663 '''
664 664 def find(p, contents):
665 665 for n in contents:
666 666 if normcase(n) == p:
667 667 return n
668 668 return None
669 669
670 670 seps = os.sep
671 671 if os.altsep:
672 672 seps = seps + os.altsep
673 673 # Protect backslashes. This gets silly very quickly.
674 674 seps.replace('\\','\\\\')
675 675 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
676 676 dir = os.path.normpath(root)
677 677 result = []
678 678 for part, sep in pattern.findall(name):
679 679 if sep:
680 680 result.append(sep)
681 681 continue
682 682
683 683 if dir not in _fspathcache:
684 684 _fspathcache[dir] = os.listdir(dir)
685 685 contents = _fspathcache[dir]
686 686
687 687 found = find(part, contents)
688 688 if not found:
689 689 # retry "once per directory" per "dirstate.walk" which
690 690 # may take place for each patches of "hg qpush", for example
691 691 contents = os.listdir(dir)
692 692 _fspathcache[dir] = contents
693 693 found = find(part, contents)
694 694
695 695 result.append(found or part)
696 696 dir = os.path.join(dir, part)
697 697
698 698 return ''.join(result)
699 699
700 700 def checknlink(testfile):
701 701 '''check whether hardlink count reporting works properly'''
702 702
703 703 # testfile may be open, so we need a separate file for checking to
704 704 # work around issue2543 (or testfile may get lost on Samba shares)
705 705 f1 = testfile + ".hgtmp1"
706 706 if os.path.lexists(f1):
707 707 return False
708 708 try:
709 709 posixfile(f1, 'w').close()
710 710 except IOError:
711 711 return False
712 712
713 713 f2 = testfile + ".hgtmp2"
714 714 fd = None
715 715 try:
716 716 try:
717 717 oslink(f1, f2)
718 718 except OSError:
719 719 return False
720 720
721 721 # nlinks() may behave differently for files on Windows shares if
722 722 # the file is open.
723 723 fd = posixfile(f2)
724 724 return nlinks(f2) > 1
725 725 finally:
726 726 if fd is not None:
727 727 fd.close()
728 728 for f in (f1, f2):
729 729 try:
730 730 os.unlink(f)
731 731 except OSError:
732 732 pass
733 733
734 734 return False
735 735
736 736 def endswithsep(path):
737 737 '''Check path ends with os.sep or os.altsep.'''
738 738 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
739 739
740 740 def splitpath(path):
741 741 '''Split path by os.sep.
742 742 Note that this function does not use os.altsep because this is
743 743 an alternative of simple "xxx.split(os.sep)".
744 744 It is recommended to use os.path.normpath() before using this
745 745 function if need.'''
746 746 return path.split(os.sep)
747 747
748 748 def gui():
749 749 '''Are we running in a GUI?'''
750 750 if sys.platform == 'darwin':
751 751 if 'SSH_CONNECTION' in os.environ:
752 752 # handle SSH access to a box where the user is logged in
753 753 return False
754 754 elif getattr(osutil, 'isgui', None):
755 755 # check if a CoreGraphics session is available
756 756 return osutil.isgui()
757 757 else:
758 758 # pure build; use a safe default
759 759 return True
760 760 else:
761 761 return os.name == "nt" or os.environ.get("DISPLAY")
762 762
763 763 def mktempcopy(name, emptyok=False, createmode=None):
764 764 """Create a temporary file with the same contents from name
765 765
766 766 The permission bits are copied from the original file.
767 767
768 768 If the temporary file is going to be truncated immediately, you
769 769 can use emptyok=True as an optimization.
770 770
771 771 Returns the name of the temporary file.
772 772 """
773 773 d, fn = os.path.split(name)
774 774 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
775 775 os.close(fd)
776 776 # Temporary files are created with mode 0600, which is usually not
777 777 # what we want. If the original file already exists, just copy
778 778 # its mode. Otherwise, manually obey umask.
779 779 copymode(name, temp, createmode)
780 780 if emptyok:
781 781 return temp
782 782 try:
783 783 try:
784 784 ifp = posixfile(name, "rb")
785 785 except IOError, inst:
786 786 if inst.errno == errno.ENOENT:
787 787 return temp
788 788 if not getattr(inst, 'filename', None):
789 789 inst.filename = name
790 790 raise
791 791 ofp = posixfile(temp, "wb")
792 792 for chunk in filechunkiter(ifp):
793 793 ofp.write(chunk)
794 794 ifp.close()
795 795 ofp.close()
796 796 except: # re-raises
797 797 try: os.unlink(temp)
798 798 except OSError: pass
799 799 raise
800 800 return temp
801 801
802 802 class atomictempfile(object):
803 803 '''writable file object that atomically updates a file
804 804
805 805 All writes will go to a temporary copy of the original file. Call
806 806 close() when you are done writing, and atomictempfile will rename
807 807 the temporary copy to the original name, making the changes
808 808 visible. If the object is destroyed without being closed, all your
809 809 writes are discarded.
810 810 '''
811 811 def __init__(self, name, mode='w+b', createmode=None):
812 812 self.__name = name # permanent name
813 813 self._tempname = mktempcopy(name, emptyok=('w' in mode),
814 814 createmode=createmode)
815 815 self._fp = posixfile(self._tempname, mode)
816 816
817 817 # delegated methods
818 818 self.write = self._fp.write
819 819 self.seek = self._fp.seek
820 820 self.tell = self._fp.tell
821 821 self.fileno = self._fp.fileno
822 822
823 823 def close(self):
824 824 if not self._fp.closed:
825 825 self._fp.close()
826 826 rename(self._tempname, localpath(self.__name))
827 827
828 828 def discard(self):
829 829 if not self._fp.closed:
830 830 try:
831 831 os.unlink(self._tempname)
832 832 except OSError:
833 833 pass
834 834 self._fp.close()
835 835
836 836 def __del__(self):
837 837 if safehasattr(self, '_fp'): # constructor actually did something
838 838 self.discard()
839 839
840 840 def makedirs(name, mode=None):
841 841 """recursive directory creation with parent mode inheritance"""
842 842 try:
843 843 os.mkdir(name)
844 844 except OSError, err:
845 845 if err.errno == errno.EEXIST:
846 846 return
847 847 if err.errno != errno.ENOENT or not name:
848 848 raise
849 849 parent = os.path.dirname(os.path.abspath(name))
850 850 if parent == name:
851 851 raise
852 852 makedirs(parent, mode)
853 853 os.mkdir(name)
854 854 if mode is not None:
855 855 os.chmod(name, mode)
856 856
857 857 def readfile(path):
858 858 fp = open(path, 'rb')
859 859 try:
860 860 return fp.read()
861 861 finally:
862 862 fp.close()
863 863
864 864 def writefile(path, text):
865 865 fp = open(path, 'wb')
866 866 try:
867 867 fp.write(text)
868 868 finally:
869 869 fp.close()
870 870
871 871 def appendfile(path, text):
872 872 fp = open(path, 'ab')
873 873 try:
874 874 fp.write(text)
875 875 finally:
876 876 fp.close()
877 877
878 878 class chunkbuffer(object):
879 879 """Allow arbitrary sized chunks of data to be efficiently read from an
880 880 iterator over chunks of arbitrary size."""
881 881
882 882 def __init__(self, in_iter):
883 883 """in_iter is the iterator that's iterating over the input chunks.
884 884 targetsize is how big a buffer to try to maintain."""
885 885 def splitbig(chunks):
886 886 for chunk in chunks:
887 887 if len(chunk) > 2**20:
888 888 pos = 0
889 889 while pos < len(chunk):
890 890 end = pos + 2 ** 18
891 891 yield chunk[pos:end]
892 892 pos = end
893 893 else:
894 894 yield chunk
895 895 self.iter = splitbig(in_iter)
896 896 self._queue = deque()
897 897
898 898 def read(self, l):
899 899 """Read L bytes of data from the iterator of chunks of data.
900 900 Returns less than L bytes if the iterator runs dry."""
901 901 left = l
902 buf = ''
902 buf = []
903 903 queue = self._queue
904 904 while left > 0:
905 905 # refill the queue
906 906 if not queue:
907 907 target = 2**18
908 908 for chunk in self.iter:
909 909 queue.append(chunk)
910 910 target -= len(chunk)
911 911 if target <= 0:
912 912 break
913 913 if not queue:
914 914 break
915 915
916 916 chunk = queue.popleft()
917 917 left -= len(chunk)
918 918 if left < 0:
919 919 queue.appendleft(chunk[left:])
920 buf += chunk[:left]
920 buf.append(chunk[:left])
921 921 else:
922 buf += chunk
922 buf.append(chunk)
923 923
924 return buf
924 return ''.join(buf)
925 925
926 926 def filechunkiter(f, size=65536, limit=None):
927 927 """Create a generator that produces the data in the file size
928 928 (default 65536) bytes at a time, up to optional limit (default is
929 929 to read all data). Chunks may be less than size bytes if the
930 930 chunk is the last chunk in the file, or the file is a socket or
931 931 some other type of file that sometimes reads less data than is
932 932 requested."""
933 933 assert size >= 0
934 934 assert limit is None or limit >= 0
935 935 while True:
936 936 if limit is None:
937 937 nbytes = size
938 938 else:
939 939 nbytes = min(limit, size)
940 940 s = nbytes and f.read(nbytes)
941 941 if not s:
942 942 break
943 943 if limit:
944 944 limit -= len(s)
945 945 yield s
946 946
947 947 def makedate():
948 948 ct = time.time()
949 949 if ct < 0:
950 950 hint = _("check your clock")
951 951 raise Abort(_("negative timestamp: %d") % ct, hint=hint)
952 952 delta = (datetime.datetime.utcfromtimestamp(ct) -
953 953 datetime.datetime.fromtimestamp(ct))
954 954 tz = delta.days * 86400 + delta.seconds
955 955 return ct, tz
956 956
957 957 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
958 958 """represent a (unixtime, offset) tuple as a localized time.
959 959 unixtime is seconds since the epoch, and offset is the time zone's
960 960 number of seconds away from UTC. if timezone is false, do not
961 961 append time zone to string."""
962 962 t, tz = date or makedate()
963 963 if t < 0:
964 964 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
965 965 tz = 0
966 966 if "%1" in format or "%2" in format:
967 967 sign = (tz > 0) and "-" or "+"
968 968 minutes = abs(tz) // 60
969 969 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
970 970 format = format.replace("%2", "%02d" % (minutes % 60))
971 971 try:
972 972 t = time.gmtime(float(t) - tz)
973 973 except ValueError:
974 974 # time was out of range
975 975 t = time.gmtime(sys.maxint)
976 976 s = time.strftime(format, t)
977 977 return s
978 978
979 979 def shortdate(date=None):
980 980 """turn (timestamp, tzoff) tuple into iso 8631 date."""
981 981 return datestr(date, format='%Y-%m-%d')
982 982
983 983 def strdate(string, format, defaults=[]):
984 984 """parse a localized time string and return a (unixtime, offset) tuple.
985 985 if the string cannot be parsed, ValueError is raised."""
986 986 def timezone(string):
987 987 tz = string.split()[-1]
988 988 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
989 989 sign = (tz[0] == "+") and 1 or -1
990 990 hours = int(tz[1:3])
991 991 minutes = int(tz[3:5])
992 992 return -sign * (hours * 60 + minutes) * 60
993 993 if tz == "GMT" or tz == "UTC":
994 994 return 0
995 995 return None
996 996
997 997 # NOTE: unixtime = localunixtime + offset
998 998 offset, date = timezone(string), string
999 999 if offset is not None:
1000 1000 date = " ".join(string.split()[:-1])
1001 1001
1002 1002 # add missing elements from defaults
1003 1003 usenow = False # default to using biased defaults
1004 1004 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1005 1005 found = [True for p in part if ("%"+p) in format]
1006 1006 if not found:
1007 1007 date += "@" + defaults[part][usenow]
1008 1008 format += "@%" + part[0]
1009 1009 else:
1010 1010 # We've found a specific time element, less specific time
1011 1011 # elements are relative to today
1012 1012 usenow = True
1013 1013
1014 1014 timetuple = time.strptime(date, format)
1015 1015 localunixtime = int(calendar.timegm(timetuple))
1016 1016 if offset is None:
1017 1017 # local timezone
1018 1018 unixtime = int(time.mktime(timetuple))
1019 1019 offset = unixtime - localunixtime
1020 1020 else:
1021 1021 unixtime = localunixtime + offset
1022 1022 return unixtime, offset
1023 1023
1024 1024 def parsedate(date, formats=None, bias={}):
1025 1025 """parse a localized date/time and return a (unixtime, offset) tuple.
1026 1026
1027 1027 The date may be a "unixtime offset" string or in one of the specified
1028 1028 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1029 1029 """
1030 1030 if not date:
1031 1031 return 0, 0
1032 1032 if isinstance(date, tuple) and len(date) == 2:
1033 1033 return date
1034 1034 if not formats:
1035 1035 formats = defaultdateformats
1036 1036 date = date.strip()
1037 1037 try:
1038 1038 when, offset = map(int, date.split(' '))
1039 1039 except ValueError:
1040 1040 # fill out defaults
1041 1041 now = makedate()
1042 1042 defaults = {}
1043 1043 for part in ("d", "mb", "yY", "HI", "M", "S"):
1044 1044 # this piece is for rounding the specific end of unknowns
1045 1045 b = bias.get(part)
1046 1046 if b is None:
1047 1047 if part[0] in "HMS":
1048 1048 b = "00"
1049 1049 else:
1050 1050 b = "0"
1051 1051
1052 1052 # this piece is for matching the generic end to today's date
1053 1053 n = datestr(now, "%" + part[0])
1054 1054
1055 1055 defaults[part] = (b, n)
1056 1056
1057 1057 for format in formats:
1058 1058 try:
1059 1059 when, offset = strdate(date, format, defaults)
1060 1060 except (ValueError, OverflowError):
1061 1061 pass
1062 1062 else:
1063 1063 break
1064 1064 else:
1065 1065 raise Abort(_('invalid date: %r') % date)
1066 1066 # validate explicit (probably user-specified) date and
1067 1067 # time zone offset. values must fit in signed 32 bits for
1068 1068 # current 32-bit linux runtimes. timezones go from UTC-12
1069 1069 # to UTC+14
1070 1070 if abs(when) > 0x7fffffff:
1071 1071 raise Abort(_('date exceeds 32 bits: %d') % when)
1072 1072 if when < 0:
1073 1073 raise Abort(_('negative date value: %d') % when)
1074 1074 if offset < -50400 or offset > 43200:
1075 1075 raise Abort(_('impossible time zone offset: %d') % offset)
1076 1076 return when, offset
1077 1077
1078 1078 def matchdate(date):
1079 1079 """Return a function that matches a given date match specifier
1080 1080
1081 1081 Formats include:
1082 1082
1083 1083 '{date}' match a given date to the accuracy provided
1084 1084
1085 1085 '<{date}' on or before a given date
1086 1086
1087 1087 '>{date}' on or after a given date
1088 1088
1089 1089 >>> p1 = parsedate("10:29:59")
1090 1090 >>> p2 = parsedate("10:30:00")
1091 1091 >>> p3 = parsedate("10:30:59")
1092 1092 >>> p4 = parsedate("10:31:00")
1093 1093 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1094 1094 >>> f = matchdate("10:30")
1095 1095 >>> f(p1[0])
1096 1096 False
1097 1097 >>> f(p2[0])
1098 1098 True
1099 1099 >>> f(p3[0])
1100 1100 True
1101 1101 >>> f(p4[0])
1102 1102 False
1103 1103 >>> f(p5[0])
1104 1104 False
1105 1105 """
1106 1106
1107 1107 def lower(date):
1108 1108 d = dict(mb="1", d="1")
1109 1109 return parsedate(date, extendeddateformats, d)[0]
1110 1110
1111 1111 def upper(date):
1112 1112 d = dict(mb="12", HI="23", M="59", S="59")
1113 1113 for days in ("31", "30", "29"):
1114 1114 try:
1115 1115 d["d"] = days
1116 1116 return parsedate(date, extendeddateformats, d)[0]
1117 1117 except Abort:
1118 1118 pass
1119 1119 d["d"] = "28"
1120 1120 return parsedate(date, extendeddateformats, d)[0]
1121 1121
1122 1122 date = date.strip()
1123 1123
1124 1124 if not date:
1125 1125 raise Abort(_("dates cannot consist entirely of whitespace"))
1126 1126 elif date[0] == "<":
1127 1127 if not date[1:]:
1128 1128 raise Abort(_("invalid day spec, use '<DATE'"))
1129 1129 when = upper(date[1:])
1130 1130 return lambda x: x <= when
1131 1131 elif date[0] == ">":
1132 1132 if not date[1:]:
1133 1133 raise Abort(_("invalid day spec, use '>DATE'"))
1134 1134 when = lower(date[1:])
1135 1135 return lambda x: x >= when
1136 1136 elif date[0] == "-":
1137 1137 try:
1138 1138 days = int(date[1:])
1139 1139 except ValueError:
1140 1140 raise Abort(_("invalid day spec: %s") % date[1:])
1141 1141 if days < 0:
1142 1142 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1143 1143 % date[1:])
1144 1144 when = makedate()[0] - days * 3600 * 24
1145 1145 return lambda x: x >= when
1146 1146 elif " to " in date:
1147 1147 a, b = date.split(" to ")
1148 1148 start, stop = lower(a), upper(b)
1149 1149 return lambda x: x >= start and x <= stop
1150 1150 else:
1151 1151 start, stop = lower(date), upper(date)
1152 1152 return lambda x: x >= start and x <= stop
1153 1153
1154 1154 def shortuser(user):
1155 1155 """Return a short representation of a user name or email address."""
1156 1156 f = user.find('@')
1157 1157 if f >= 0:
1158 1158 user = user[:f]
1159 1159 f = user.find('<')
1160 1160 if f >= 0:
1161 1161 user = user[f + 1:]
1162 1162 f = user.find(' ')
1163 1163 if f >= 0:
1164 1164 user = user[:f]
1165 1165 f = user.find('.')
1166 1166 if f >= 0:
1167 1167 user = user[:f]
1168 1168 return user
1169 1169
1170 1170 def emailuser(user):
1171 1171 """Return the user portion of an email address."""
1172 1172 f = user.find('@')
1173 1173 if f >= 0:
1174 1174 user = user[:f]
1175 1175 f = user.find('<')
1176 1176 if f >= 0:
1177 1177 user = user[f + 1:]
1178 1178 return user
1179 1179
1180 1180 def email(author):
1181 1181 '''get email of author.'''
1182 1182 r = author.find('>')
1183 1183 if r == -1:
1184 1184 r = None
1185 1185 return author[author.find('<') + 1:r]
1186 1186
1187 1187 def _ellipsis(text, maxlength):
1188 1188 if len(text) <= maxlength:
1189 1189 return text, False
1190 1190 else:
1191 1191 return "%s..." % (text[:maxlength - 3]), True
1192 1192
1193 1193 def ellipsis(text, maxlength=400):
1194 1194 """Trim string to at most maxlength (default: 400) characters."""
1195 1195 try:
1196 1196 # use unicode not to split at intermediate multi-byte sequence
1197 1197 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1198 1198 maxlength)
1199 1199 if not truncated:
1200 1200 return text
1201 1201 return utext.encode(encoding.encoding)
1202 1202 except (UnicodeDecodeError, UnicodeEncodeError):
1203 1203 return _ellipsis(text, maxlength)[0]
1204 1204
1205 1205 _byteunits = (
1206 1206 (100, 1 << 30, _('%.0f GB')),
1207 1207 (10, 1 << 30, _('%.1f GB')),
1208 1208 (1, 1 << 30, _('%.2f GB')),
1209 1209 (100, 1 << 20, _('%.0f MB')),
1210 1210 (10, 1 << 20, _('%.1f MB')),
1211 1211 (1, 1 << 20, _('%.2f MB')),
1212 1212 (100, 1 << 10, _('%.0f KB')),
1213 1213 (10, 1 << 10, _('%.1f KB')),
1214 1214 (1, 1 << 10, _('%.2f KB')),
1215 1215 (1, 1, _('%.0f bytes')),
1216 1216 )
1217 1217
1218 1218 def bytecount(nbytes):
1219 1219 '''return byte count formatted as readable string, with units'''
1220 1220
1221 1221 for multiplier, divisor, format in _byteunits:
1222 1222 if nbytes >= divisor * multiplier:
1223 1223 return format % (nbytes / float(divisor))
1224 1224 return _byteunits[-1][2] % nbytes
1225 1225
1226 1226 def uirepr(s):
1227 1227 # Avoid double backslash in Windows path repr()
1228 1228 return repr(s).replace('\\\\', '\\')
1229 1229
1230 1230 # delay import of textwrap
1231 1231 def MBTextWrapper(**kwargs):
1232 1232 class tw(textwrap.TextWrapper):
1233 1233 """
1234 1234 Extend TextWrapper for width-awareness.
1235 1235
1236 1236 Neither number of 'bytes' in any encoding nor 'characters' is
1237 1237 appropriate to calculate terminal columns for specified string.
1238 1238
1239 1239 Original TextWrapper implementation uses built-in 'len()' directly,
1240 1240 so overriding is needed to use width information of each characters.
1241 1241
1242 1242 In addition, characters classified into 'ambiguous' width are
1243 1243 treated as wide in East Asian area, but as narrow in other.
1244 1244
1245 1245 This requires use decision to determine width of such characters.
1246 1246 """
1247 1247 def __init__(self, **kwargs):
1248 1248 textwrap.TextWrapper.__init__(self, **kwargs)
1249 1249
1250 1250 # for compatibility between 2.4 and 2.6
1251 1251 if getattr(self, 'drop_whitespace', None) is None:
1252 1252 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1253 1253
1254 1254 def _cutdown(self, ucstr, space_left):
1255 1255 l = 0
1256 1256 colwidth = encoding.ucolwidth
1257 1257 for i in xrange(len(ucstr)):
1258 1258 l += colwidth(ucstr[i])
1259 1259 if space_left < l:
1260 1260 return (ucstr[:i], ucstr[i:])
1261 1261 return ucstr, ''
1262 1262
1263 1263 # overriding of base class
1264 1264 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1265 1265 space_left = max(width - cur_len, 1)
1266 1266
1267 1267 if self.break_long_words:
1268 1268 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1269 1269 cur_line.append(cut)
1270 1270 reversed_chunks[-1] = res
1271 1271 elif not cur_line:
1272 1272 cur_line.append(reversed_chunks.pop())
1273 1273
1274 1274 # this overriding code is imported from TextWrapper of python 2.6
1275 1275 # to calculate columns of string by 'encoding.ucolwidth()'
1276 1276 def _wrap_chunks(self, chunks):
1277 1277 colwidth = encoding.ucolwidth
1278 1278
1279 1279 lines = []
1280 1280 if self.width <= 0:
1281 1281 raise ValueError("invalid width %r (must be > 0)" % self.width)
1282 1282
1283 1283 # Arrange in reverse order so items can be efficiently popped
1284 1284 # from a stack of chucks.
1285 1285 chunks.reverse()
1286 1286
1287 1287 while chunks:
1288 1288
1289 1289 # Start the list of chunks that will make up the current line.
1290 1290 # cur_len is just the length of all the chunks in cur_line.
1291 1291 cur_line = []
1292 1292 cur_len = 0
1293 1293
1294 1294 # Figure out which static string will prefix this line.
1295 1295 if lines:
1296 1296 indent = self.subsequent_indent
1297 1297 else:
1298 1298 indent = self.initial_indent
1299 1299
1300 1300 # Maximum width for this line.
1301 1301 width = self.width - len(indent)
1302 1302
1303 1303 # First chunk on line is whitespace -- drop it, unless this
1304 1304 # is the very beginning of the text (i.e. no lines started yet).
1305 1305 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1306 1306 del chunks[-1]
1307 1307
1308 1308 while chunks:
1309 1309 l = colwidth(chunks[-1])
1310 1310
1311 1311 # Can at least squeeze this chunk onto the current line.
1312 1312 if cur_len + l <= width:
1313 1313 cur_line.append(chunks.pop())
1314 1314 cur_len += l
1315 1315
1316 1316 # Nope, this line is full.
1317 1317 else:
1318 1318 break
1319 1319
1320 1320 # The current line is full, and the next chunk is too big to
1321 1321 # fit on *any* line (not just this one).
1322 1322 if chunks and colwidth(chunks[-1]) > width:
1323 1323 self._handle_long_word(chunks, cur_line, cur_len, width)
1324 1324
1325 1325 # If the last chunk on this line is all whitespace, drop it.
1326 1326 if (self.drop_whitespace and
1327 1327 cur_line and cur_line[-1].strip() == ''):
1328 1328 del cur_line[-1]
1329 1329
1330 1330 # Convert current line back to a string and store it in list
1331 1331 # of all lines (return value).
1332 1332 if cur_line:
1333 1333 lines.append(indent + ''.join(cur_line))
1334 1334
1335 1335 return lines
1336 1336
1337 1337 global MBTextWrapper
1338 1338 MBTextWrapper = tw
1339 1339 return tw(**kwargs)
1340 1340
1341 1341 def wrap(line, width, initindent='', hangindent=''):
1342 1342 maxindent = max(len(hangindent), len(initindent))
1343 1343 if width <= maxindent:
1344 1344 # adjust for weird terminal size
1345 1345 width = max(78, maxindent + 1)
1346 1346 line = line.decode(encoding.encoding, encoding.encodingmode)
1347 1347 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1348 1348 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1349 1349 wrapper = MBTextWrapper(width=width,
1350 1350 initial_indent=initindent,
1351 1351 subsequent_indent=hangindent)
1352 1352 return wrapper.fill(line).encode(encoding.encoding)
1353 1353
1354 1354 def iterlines(iterator):
1355 1355 for chunk in iterator:
1356 1356 for line in chunk.splitlines():
1357 1357 yield line
1358 1358
1359 1359 def expandpath(path):
1360 1360 return os.path.expanduser(os.path.expandvars(path))
1361 1361
1362 1362 def hgcmd():
1363 1363 """Return the command used to execute current hg
1364 1364
1365 1365 This is different from hgexecutable() because on Windows we want
1366 1366 to avoid things opening new shell windows like batch files, so we
1367 1367 get either the python call or current executable.
1368 1368 """
1369 1369 if mainfrozen():
1370 1370 return [sys.executable]
1371 1371 return gethgcmd()
1372 1372
1373 1373 def rundetached(args, condfn):
1374 1374 """Execute the argument list in a detached process.
1375 1375
1376 1376 condfn is a callable which is called repeatedly and should return
1377 1377 True once the child process is known to have started successfully.
1378 1378 At this point, the child process PID is returned. If the child
1379 1379 process fails to start or finishes before condfn() evaluates to
1380 1380 True, return -1.
1381 1381 """
1382 1382 # Windows case is easier because the child process is either
1383 1383 # successfully starting and validating the condition or exiting
1384 1384 # on failure. We just poll on its PID. On Unix, if the child
1385 1385 # process fails to start, it will be left in a zombie state until
1386 1386 # the parent wait on it, which we cannot do since we expect a long
1387 1387 # running process on success. Instead we listen for SIGCHLD telling
1388 1388 # us our child process terminated.
1389 1389 terminated = set()
1390 1390 def handler(signum, frame):
1391 1391 terminated.add(os.wait())
1392 1392 prevhandler = None
1393 1393 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1394 1394 if SIGCHLD is not None:
1395 1395 prevhandler = signal.signal(SIGCHLD, handler)
1396 1396 try:
1397 1397 pid = spawndetached(args)
1398 1398 while not condfn():
1399 1399 if ((pid in terminated or not testpid(pid))
1400 1400 and not condfn()):
1401 1401 return -1
1402 1402 time.sleep(0.1)
1403 1403 return pid
1404 1404 finally:
1405 1405 if prevhandler is not None:
1406 1406 signal.signal(signal.SIGCHLD, prevhandler)
1407 1407
1408 1408 try:
1409 1409 any, all = any, all
1410 1410 except NameError:
1411 1411 def any(iterable):
1412 1412 for i in iterable:
1413 1413 if i:
1414 1414 return True
1415 1415 return False
1416 1416
1417 1417 def all(iterable):
1418 1418 for i in iterable:
1419 1419 if not i:
1420 1420 return False
1421 1421 return True
1422 1422
1423 1423 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1424 1424 """Return the result of interpolating items in the mapping into string s.
1425 1425
1426 1426 prefix is a single character string, or a two character string with
1427 1427 a backslash as the first character if the prefix needs to be escaped in
1428 1428 a regular expression.
1429 1429
1430 1430 fn is an optional function that will be applied to the replacement text
1431 1431 just before replacement.
1432 1432
1433 1433 escape_prefix is an optional flag that allows using doubled prefix for
1434 1434 its escaping.
1435 1435 """
1436 1436 fn = fn or (lambda s: s)
1437 1437 patterns = '|'.join(mapping.keys())
1438 1438 if escape_prefix:
1439 1439 patterns += '|' + prefix
1440 1440 if len(prefix) > 1:
1441 1441 prefix_char = prefix[1:]
1442 1442 else:
1443 1443 prefix_char = prefix
1444 1444 mapping[prefix_char] = prefix_char
1445 1445 r = re.compile(r'%s(%s)' % (prefix, patterns))
1446 1446 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1447 1447
1448 1448 def getport(port):
1449 1449 """Return the port for a given network service.
1450 1450
1451 1451 If port is an integer, it's returned as is. If it's a string, it's
1452 1452 looked up using socket.getservbyname(). If there's no matching
1453 1453 service, util.Abort is raised.
1454 1454 """
1455 1455 try:
1456 1456 return int(port)
1457 1457 except ValueError:
1458 1458 pass
1459 1459
1460 1460 try:
1461 1461 return socket.getservbyname(port)
1462 1462 except socket.error:
1463 1463 raise Abort(_("no port number associated with service '%s'") % port)
1464 1464
1465 1465 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1466 1466 '0': False, 'no': False, 'false': False, 'off': False,
1467 1467 'never': False}
1468 1468
1469 1469 def parsebool(s):
1470 1470 """Parse s into a boolean.
1471 1471
1472 1472 If s is not a valid boolean, returns None.
1473 1473 """
1474 1474 return _booleans.get(s.lower(), None)
1475 1475
1476 1476 _hexdig = '0123456789ABCDEFabcdef'
1477 1477 _hextochr = dict((a + b, chr(int(a + b, 16)))
1478 1478 for a in _hexdig for b in _hexdig)
1479 1479
1480 1480 def _urlunquote(s):
1481 1481 """Decode HTTP/HTML % encoding.
1482 1482
1483 1483 >>> _urlunquote('abc%20def')
1484 1484 'abc def'
1485 1485 """
1486 1486 res = s.split('%')
1487 1487 # fastpath
1488 1488 if len(res) == 1:
1489 1489 return s
1490 1490 s = res[0]
1491 1491 for item in res[1:]:
1492 1492 try:
1493 1493 s += _hextochr[item[:2]] + item[2:]
1494 1494 except KeyError:
1495 1495 s += '%' + item
1496 1496 except UnicodeDecodeError:
1497 1497 s += unichr(int(item[:2], 16)) + item[2:]
1498 1498 return s
1499 1499
1500 1500 class url(object):
1501 1501 r"""Reliable URL parser.
1502 1502
1503 1503 This parses URLs and provides attributes for the following
1504 1504 components:
1505 1505
1506 1506 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1507 1507
1508 1508 Missing components are set to None. The only exception is
1509 1509 fragment, which is set to '' if present but empty.
1510 1510
1511 1511 If parsefragment is False, fragment is included in query. If
1512 1512 parsequery is False, query is included in path. If both are
1513 1513 False, both fragment and query are included in path.
1514 1514
1515 1515 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1516 1516
1517 1517 Note that for backward compatibility reasons, bundle URLs do not
1518 1518 take host names. That means 'bundle://../' has a path of '../'.
1519 1519
1520 1520 Examples:
1521 1521
1522 1522 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1523 1523 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1524 1524 >>> url('ssh://[::1]:2200//home/joe/repo')
1525 1525 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1526 1526 >>> url('file:///home/joe/repo')
1527 1527 <url scheme: 'file', path: '/home/joe/repo'>
1528 1528 >>> url('file:///c:/temp/foo/')
1529 1529 <url scheme: 'file', path: 'c:/temp/foo/'>
1530 1530 >>> url('bundle:foo')
1531 1531 <url scheme: 'bundle', path: 'foo'>
1532 1532 >>> url('bundle://../foo')
1533 1533 <url scheme: 'bundle', path: '../foo'>
1534 1534 >>> url(r'c:\foo\bar')
1535 1535 <url path: 'c:\\foo\\bar'>
1536 1536 >>> url(r'\\blah\blah\blah')
1537 1537 <url path: '\\\\blah\\blah\\blah'>
1538 1538 >>> url(r'\\blah\blah\blah#baz')
1539 1539 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1540 1540
1541 1541 Authentication credentials:
1542 1542
1543 1543 >>> url('ssh://joe:xyz@x/repo')
1544 1544 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1545 1545 >>> url('ssh://joe@x/repo')
1546 1546 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1547 1547
1548 1548 Query strings and fragments:
1549 1549
1550 1550 >>> url('http://host/a?b#c')
1551 1551 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1552 1552 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1553 1553 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1554 1554 """
1555 1555
1556 1556 _safechars = "!~*'()+"
1557 1557 _safepchars = "/!~*'()+:"
1558 1558 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1559 1559
1560 1560 def __init__(self, path, parsequery=True, parsefragment=True):
1561 1561 # We slowly chomp away at path until we have only the path left
1562 1562 self.scheme = self.user = self.passwd = self.host = None
1563 1563 self.port = self.path = self.query = self.fragment = None
1564 1564 self._localpath = True
1565 1565 self._hostport = ''
1566 1566 self._origpath = path
1567 1567
1568 1568 if parsefragment and '#' in path:
1569 1569 path, self.fragment = path.split('#', 1)
1570 1570 if not path:
1571 1571 path = None
1572 1572
1573 1573 # special case for Windows drive letters and UNC paths
1574 1574 if hasdriveletter(path) or path.startswith(r'\\'):
1575 1575 self.path = path
1576 1576 return
1577 1577
1578 1578 # For compatibility reasons, we can't handle bundle paths as
1579 1579 # normal URLS
1580 1580 if path.startswith('bundle:'):
1581 1581 self.scheme = 'bundle'
1582 1582 path = path[7:]
1583 1583 if path.startswith('//'):
1584 1584 path = path[2:]
1585 1585 self.path = path
1586 1586 return
1587 1587
1588 1588 if self._matchscheme(path):
1589 1589 parts = path.split(':', 1)
1590 1590 if parts[0]:
1591 1591 self.scheme, path = parts
1592 1592 self._localpath = False
1593 1593
1594 1594 if not path:
1595 1595 path = None
1596 1596 if self._localpath:
1597 1597 self.path = ''
1598 1598 return
1599 1599 else:
1600 1600 if self._localpath:
1601 1601 self.path = path
1602 1602 return
1603 1603
1604 1604 if parsequery and '?' in path:
1605 1605 path, self.query = path.split('?', 1)
1606 1606 if not path:
1607 1607 path = None
1608 1608 if not self.query:
1609 1609 self.query = None
1610 1610
1611 1611 # // is required to specify a host/authority
1612 1612 if path and path.startswith('//'):
1613 1613 parts = path[2:].split('/', 1)
1614 1614 if len(parts) > 1:
1615 1615 self.host, path = parts
1616 1616 path = path
1617 1617 else:
1618 1618 self.host = parts[0]
1619 1619 path = None
1620 1620 if not self.host:
1621 1621 self.host = None
1622 1622 # path of file:///d is /d
1623 1623 # path of file:///d:/ is d:/, not /d:/
1624 1624 if path and not hasdriveletter(path):
1625 1625 path = '/' + path
1626 1626
1627 1627 if self.host and '@' in self.host:
1628 1628 self.user, self.host = self.host.rsplit('@', 1)
1629 1629 if ':' in self.user:
1630 1630 self.user, self.passwd = self.user.split(':', 1)
1631 1631 if not self.host:
1632 1632 self.host = None
1633 1633
1634 1634 # Don't split on colons in IPv6 addresses without ports
1635 1635 if (self.host and ':' in self.host and
1636 1636 not (self.host.startswith('[') and self.host.endswith(']'))):
1637 1637 self._hostport = self.host
1638 1638 self.host, self.port = self.host.rsplit(':', 1)
1639 1639 if not self.host:
1640 1640 self.host = None
1641 1641
1642 1642 if (self.host and self.scheme == 'file' and
1643 1643 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1644 1644 raise Abort(_('file:// URLs can only refer to localhost'))
1645 1645
1646 1646 self.path = path
1647 1647
1648 1648 # leave the query string escaped
1649 1649 for a in ('user', 'passwd', 'host', 'port',
1650 1650 'path', 'fragment'):
1651 1651 v = getattr(self, a)
1652 1652 if v is not None:
1653 1653 setattr(self, a, _urlunquote(v))
1654 1654
1655 1655 def __repr__(self):
1656 1656 attrs = []
1657 1657 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1658 1658 'query', 'fragment'):
1659 1659 v = getattr(self, a)
1660 1660 if v is not None:
1661 1661 attrs.append('%s: %r' % (a, v))
1662 1662 return '<url %s>' % ', '.join(attrs)
1663 1663
1664 1664 def __str__(self):
1665 1665 r"""Join the URL's components back into a URL string.
1666 1666
1667 1667 Examples:
1668 1668
1669 1669 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1670 1670 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1671 1671 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1672 1672 'http://user:pw@host:80/?foo=bar&baz=42'
1673 1673 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1674 1674 'http://user:pw@host:80/?foo=bar%3dbaz'
1675 1675 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1676 1676 'ssh://user:pw@[::1]:2200//home/joe#'
1677 1677 >>> str(url('http://localhost:80//'))
1678 1678 'http://localhost:80//'
1679 1679 >>> str(url('http://localhost:80/'))
1680 1680 'http://localhost:80/'
1681 1681 >>> str(url('http://localhost:80'))
1682 1682 'http://localhost:80/'
1683 1683 >>> str(url('bundle:foo'))
1684 1684 'bundle:foo'
1685 1685 >>> str(url('bundle://../foo'))
1686 1686 'bundle:../foo'
1687 1687 >>> str(url('path'))
1688 1688 'path'
1689 1689 >>> str(url('file:///tmp/foo/bar'))
1690 1690 'file:///tmp/foo/bar'
1691 1691 >>> str(url('file:///c:/tmp/foo/bar'))
1692 1692 'file:///c:/tmp/foo/bar'
1693 1693 >>> print url(r'bundle:foo\bar')
1694 1694 bundle:foo\bar
1695 1695 """
1696 1696 if self._localpath:
1697 1697 s = self.path
1698 1698 if self.scheme == 'bundle':
1699 1699 s = 'bundle:' + s
1700 1700 if self.fragment:
1701 1701 s += '#' + self.fragment
1702 1702 return s
1703 1703
1704 1704 s = self.scheme + ':'
1705 1705 if self.user or self.passwd or self.host:
1706 1706 s += '//'
1707 1707 elif self.scheme and (not self.path or self.path.startswith('/')
1708 1708 or hasdriveletter(self.path)):
1709 1709 s += '//'
1710 1710 if hasdriveletter(self.path):
1711 1711 s += '/'
1712 1712 if self.user:
1713 1713 s += urllib.quote(self.user, safe=self._safechars)
1714 1714 if self.passwd:
1715 1715 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1716 1716 if self.user or self.passwd:
1717 1717 s += '@'
1718 1718 if self.host:
1719 1719 if not (self.host.startswith('[') and self.host.endswith(']')):
1720 1720 s += urllib.quote(self.host)
1721 1721 else:
1722 1722 s += self.host
1723 1723 if self.port:
1724 1724 s += ':' + urllib.quote(self.port)
1725 1725 if self.host:
1726 1726 s += '/'
1727 1727 if self.path:
1728 1728 # TODO: similar to the query string, we should not unescape the
1729 1729 # path when we store it, the path might contain '%2f' = '/',
1730 1730 # which we should *not* escape.
1731 1731 s += urllib.quote(self.path, safe=self._safepchars)
1732 1732 if self.query:
1733 1733 # we store the query in escaped form.
1734 1734 s += '?' + self.query
1735 1735 if self.fragment is not None:
1736 1736 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1737 1737 return s
1738 1738
1739 1739 def authinfo(self):
1740 1740 user, passwd = self.user, self.passwd
1741 1741 try:
1742 1742 self.user, self.passwd = None, None
1743 1743 s = str(self)
1744 1744 finally:
1745 1745 self.user, self.passwd = user, passwd
1746 1746 if not self.user:
1747 1747 return (s, None)
1748 1748 # authinfo[1] is passed to urllib2 password manager, and its
1749 1749 # URIs must not contain credentials. The host is passed in the
1750 1750 # URIs list because Python < 2.4.3 uses only that to search for
1751 1751 # a password.
1752 1752 return (s, (None, (s, self.host),
1753 1753 self.user, self.passwd or ''))
1754 1754
1755 1755 def isabs(self):
1756 1756 if self.scheme and self.scheme != 'file':
1757 1757 return True # remote URL
1758 1758 if hasdriveletter(self.path):
1759 1759 return True # absolute for our purposes - can't be joined()
1760 1760 if self.path.startswith(r'\\'):
1761 1761 return True # Windows UNC path
1762 1762 if self.path.startswith('/'):
1763 1763 return True # POSIX-style
1764 1764 return False
1765 1765
1766 1766 def localpath(self):
1767 1767 if self.scheme == 'file' or self.scheme == 'bundle':
1768 1768 path = self.path or '/'
1769 1769 # For Windows, we need to promote hosts containing drive
1770 1770 # letters to paths with drive letters.
1771 1771 if hasdriveletter(self._hostport):
1772 1772 path = self._hostport + '/' + self.path
1773 1773 elif (self.host is not None and self.path
1774 1774 and not hasdriveletter(path)):
1775 1775 path = '/' + path
1776 1776 return path
1777 1777 return self._origpath
1778 1778
1779 1779 def hasscheme(path):
1780 1780 return bool(url(path).scheme)
1781 1781
1782 1782 def hasdriveletter(path):
1783 1783 return path and path[1:2] == ':' and path[0:1].isalpha()
1784 1784
1785 1785 def urllocalpath(path):
1786 1786 return url(path, parsequery=False, parsefragment=False).localpath()
1787 1787
1788 1788 def hidepassword(u):
1789 1789 '''hide user credential in a url string'''
1790 1790 u = url(u)
1791 1791 if u.passwd:
1792 1792 u.passwd = '***'
1793 1793 return str(u)
1794 1794
1795 1795 def removeauth(u):
1796 1796 '''remove all authentication information from a url string'''
1797 1797 u = url(u)
1798 1798 u.user = u.passwd = None
1799 1799 return str(u)
1800 1800
1801 1801 def isatty(fd):
1802 1802 try:
1803 1803 return fd.isatty()
1804 1804 except AttributeError:
1805 1805 return False
@@ -1,479 +1,488 b''
1 1 $ hglog() { hg log --template "{rev} {phaseidx} {desc}\n" $*; }
2 2 $ mkcommit() {
3 3 > echo "$1" > "$1"
4 4 > hg add "$1"
5 5 > message="$1"
6 6 > shift
7 7 > hg ci -m "$message" $*
8 8 > }
9 9
10 10 $ hg init initialrepo
11 11 $ cd initialrepo
12 12
13 13 Cannot change null revision phase
14 14
15 15 $ hg phase --force --secret null
16 16 abort: cannot change null revision phase
17 17 [255]
18 18 $ hg phase null
19 19 -1: public
20 20
21 21 $ mkcommit A
22 22
23 23 New commit are draft by default
24 24
25 25 $ hglog
26 26 0 1 A
27 27
28 28 Following commit are draft too
29 29
30 30 $ mkcommit B
31 31
32 32 $ hglog
33 33 1 1 B
34 34 0 1 A
35 35
36 36 Draft commit are properly created over public one:
37 37
38 38 $ hg phase --public .
39 39 $ hglog
40 40 1 0 B
41 41 0 0 A
42 42
43 43 $ mkcommit C
44 44 $ mkcommit D
45 45
46 46 $ hglog
47 47 3 1 D
48 48 2 1 C
49 49 1 0 B
50 50 0 0 A
51 51
52 52 Test creating changeset as secret
53 53
54 54 $ mkcommit E --config phases.new-commit='secret'
55 55 $ hglog
56 56 4 2 E
57 57 3 1 D
58 58 2 1 C
59 59 1 0 B
60 60 0 0 A
61 61
62 62 Test the secret property is inherited
63 63
64 64 $ mkcommit H
65 65 $ hglog
66 66 5 2 H
67 67 4 2 E
68 68 3 1 D
69 69 2 1 C
70 70 1 0 B
71 71 0 0 A
72 72
73 73 Even on merge
74 74
75 75 $ hg up -q 1
76 76 $ mkcommit "B'"
77 77 created new head
78 78 $ hglog
79 79 6 1 B'
80 80 5 2 H
81 81 4 2 E
82 82 3 1 D
83 83 2 1 C
84 84 1 0 B
85 85 0 0 A
86 86 $ hg merge 4 # E
87 87 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
88 88 (branch merge, don't forget to commit)
89 89 $ hg ci -m "merge B' and E"
90 90 $ hglog
91 91 7 2 merge B' and E
92 92 6 1 B'
93 93 5 2 H
94 94 4 2 E
95 95 3 1 D
96 96 2 1 C
97 97 1 0 B
98 98 0 0 A
99 99
100 100 Test secret changeset are not pushed
101 101
102 102 $ hg init ../push-dest
103 103 $ cat > ../push-dest/.hg/hgrc << EOF
104 104 > [phases]
105 105 > publish=False
106 106 > EOF
107 107 $ hg outgoing ../push-dest --template='{rev} {phase} {desc|firstline}\n'
108 108 comparing with ../push-dest
109 109 searching for changes
110 110 0 public A
111 111 1 public B
112 112 2 draft C
113 113 3 draft D
114 114 6 draft B'
115 115 $ hg outgoing -r 'branch(default)' ../push-dest --template='{rev} {phase} {desc|firstline}\n'
116 116 comparing with ../push-dest
117 117 searching for changes
118 118 0 public A
119 119 1 public B
120 120 2 draft C
121 121 3 draft D
122 122 6 draft B'
123 123
124 124 $ hg push ../push-dest -f # force because we push multiple heads
125 125 pushing to ../push-dest
126 126 searching for changes
127 127 adding changesets
128 128 adding manifests
129 129 adding file changes
130 130 added 5 changesets with 5 changes to 5 files (+1 heads)
131 131 $ hglog
132 132 7 2 merge B' and E
133 133 6 1 B'
134 134 5 2 H
135 135 4 2 E
136 136 3 1 D
137 137 2 1 C
138 138 1 0 B
139 139 0 0 A
140 140 $ cd ../push-dest
141 141 $ hglog
142 142 4 1 B'
143 143 3 1 D
144 144 2 1 C
145 145 1 0 B
146 146 0 0 A
147 147
148 148 (Issue3303)
149 149 Check that remote secret changeset are ignore when checking creation of remote heads
150 150
151 151 We add a secret head into the push destination. This secreat head shadow a
152 152 visible shared between the initial repo and the push destination.
153 153
154 154 $ hg up -q 4 # B'
155 155 $ mkcommit Z --config phases.new-commit=secret
156 156 $ hg phase .
157 157 5: secret
158 158
159 159 # We now try to push a new public changeset that descend from the common public
160 160 # head shadowed by the remote secret head.
161 161
162 162 $ cd ../initialrepo
163 163 $ hg up -q 6 #B'
164 164 $ mkcommit I
165 165 created new head
166 166 $ hg push ../push-dest
167 167 pushing to ../push-dest
168 168 searching for changes
169 169 adding changesets
170 170 adding manifests
171 171 adding file changes
172 172 added 1 changesets with 1 changes to 1 files (+1 heads)
173 173
174 174 :note: The "(+1 heads)" is wrong as we do not had any visible head
175 175
176 176
177 177 Restore condition prior extra insertion.
178 178 $ hg -q --config extensions.mq= strip .
179 179 $ hg up -q 7
180 180 $ cd ..
181 181
182 182 Test secret changeset are not pull
183 183
184 184 $ hg init pull-dest
185 185 $ cd pull-dest
186 186 $ hg pull ../initialrepo
187 187 pulling from ../initialrepo
188 188 requesting all changes
189 189 adding changesets
190 190 adding manifests
191 191 adding file changes
192 192 added 5 changesets with 5 changes to 5 files (+1 heads)
193 193 (run 'hg heads' to see heads, 'hg merge' to merge)
194 194 $ hglog
195 195 4 0 B'
196 196 3 0 D
197 197 2 0 C
198 198 1 0 B
199 199 0 0 A
200 200 $ cd ..
201 201
202 202 But secret can still be bundled explicitly
203 203
204 204 $ cd initialrepo
205 205 $ hg bundle --base '4^' -r 'children(4)' ../secret-bundle.hg
206 206 4 changesets found
207 207 $ cd ..
208 208
209 209 Test secret changeset are not cloned
210 210 (during local clone)
211 211
212 212 $ hg clone -qU initialrepo clone-dest
213 213 $ hglog -R clone-dest
214 214 4 0 B'
215 215 3 0 D
216 216 2 0 C
217 217 1 0 B
218 218 0 0 A
219 219
220 220 Test revset
221 221
222 222 $ cd initialrepo
223 223 $ hglog -r 'public()'
224 224 0 0 A
225 225 1 0 B
226 226 $ hglog -r 'draft()'
227 227 2 1 C
228 228 3 1 D
229 229 6 1 B'
230 230 $ hglog -r 'secret()'
231 231 4 2 E
232 232 5 2 H
233 233 7 2 merge B' and E
234 234
235 235 test that phase are displayed in log at debug level
236 236
237 237 $ hg log --debug
238 238 changeset: 7:17a481b3bccb796c0521ae97903d81c52bfee4af
239 239 tag: tip
240 240 phase: secret
241 241 parent: 6:cf9fe039dfd67e829edf6522a45de057b5c86519
242 242 parent: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
243 243 manifest: 7:5e724ffacba267b2ab726c91fc8b650710deaaa8
244 244 user: test
245 245 date: Thu Jan 01 00:00:00 1970 +0000
246 246 files+: C D E
247 247 extra: branch=default
248 248 description:
249 249 merge B' and E
250 250
251 251
252 252 changeset: 6:cf9fe039dfd67e829edf6522a45de057b5c86519
253 253 phase: draft
254 254 parent: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
255 255 parent: -1:0000000000000000000000000000000000000000
256 256 manifest: 6:ab8bfef2392903058bf4ebb9e7746e8d7026b27a
257 257 user: test
258 258 date: Thu Jan 01 00:00:00 1970 +0000
259 259 files+: B'
260 260 extra: branch=default
261 261 description:
262 262 B'
263 263
264 264
265 265 changeset: 5:a030c6be5127abc010fcbff1851536552e6951a8
266 266 phase: secret
267 267 parent: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
268 268 parent: -1:0000000000000000000000000000000000000000
269 269 manifest: 5:5c710aa854874fe3d5fa7192e77bdb314cc08b5a
270 270 user: test
271 271 date: Thu Jan 01 00:00:00 1970 +0000
272 272 files+: H
273 273 extra: branch=default
274 274 description:
275 275 H
276 276
277 277
278 278 changeset: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
279 279 phase: secret
280 280 parent: 3:b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e
281 281 parent: -1:0000000000000000000000000000000000000000
282 282 manifest: 4:7173fd1c27119750b959e3a0f47ed78abe75d6dc
283 283 user: test
284 284 date: Thu Jan 01 00:00:00 1970 +0000
285 285 files+: E
286 286 extra: branch=default
287 287 description:
288 288 E
289 289
290 290
291 291 changeset: 3:b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e
292 292 phase: draft
293 293 parent: 2:f838bfaca5c7226600ebcfd84f3c3c13a28d3757
294 294 parent: -1:0000000000000000000000000000000000000000
295 295 manifest: 3:6e1f4c47ecb533ffd0c8e52cdc88afb6cd39e20c
296 296 user: test
297 297 date: Thu Jan 01 00:00:00 1970 +0000
298 298 files+: D
299 299 extra: branch=default
300 300 description:
301 301 D
302 302
303 303
304 304 changeset: 2:f838bfaca5c7226600ebcfd84f3c3c13a28d3757
305 305 phase: draft
306 306 parent: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
307 307 parent: -1:0000000000000000000000000000000000000000
308 308 manifest: 2:66a5a01817fdf5239c273802b5b7618d051c89e4
309 309 user: test
310 310 date: Thu Jan 01 00:00:00 1970 +0000
311 311 files+: C
312 312 extra: branch=default
313 313 description:
314 314 C
315 315
316 316
317 317 changeset: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
318 318 parent: 0:4a2df7238c3b48766b5e22fafbb8a2f506ec8256
319 319 parent: -1:0000000000000000000000000000000000000000
320 320 manifest: 1:cb5cbbc1bfbf24cc34b9e8c16914e9caa2d2a7fd
321 321 user: test
322 322 date: Thu Jan 01 00:00:00 1970 +0000
323 323 files+: B
324 324 extra: branch=default
325 325 description:
326 326 B
327 327
328 328
329 329 changeset: 0:4a2df7238c3b48766b5e22fafbb8a2f506ec8256
330 330 parent: -1:0000000000000000000000000000000000000000
331 331 parent: -1:0000000000000000000000000000000000000000
332 332 manifest: 0:007d8c9d88841325f5c6b06371b35b4e8a2b1a83
333 333 user: test
334 334 date: Thu Jan 01 00:00:00 1970 +0000
335 335 files+: A
336 336 extra: branch=default
337 337 description:
338 338 A
339 339
340 340
341 341
342 (Issue3707)
343 test invalid phase name
344
345 $ mkcommit I --config phases.new-commit='babar'
346 transaction abort!
347 rollback completed
348 abort: phases.new-commit: not a valid phase name ('babar')
349 [255]
350
342 351 Test phase command
343 352 ===================
344 353
345 354 initial picture
346 355
347 356 $ cat >> $HGRCPATH << EOF
348 357 > [extensions]
349 358 > hgext.graphlog=
350 359 > EOF
351 360 $ hg log -G --template "{rev} {phase} {desc}\n"
352 361 @ 7 secret merge B' and E
353 362 |\
354 363 | o 6 draft B'
355 364 | |
356 365 +---o 5 secret H
357 366 | |
358 367 o | 4 secret E
359 368 | |
360 369 o | 3 draft D
361 370 | |
362 371 o | 2 draft C
363 372 |/
364 373 o 1 public B
365 374 |
366 375 o 0 public A
367 376
368 377
369 378 display changesets phase
370 379
371 380 (mixing -r and plain rev specification)
372 381
373 382 $ hg phase 1::4 -r 7
374 383 1: public
375 384 2: draft
376 385 3: draft
377 386 4: secret
378 387 7: secret
379 388
380 389
381 390 move changeset forward
382 391
383 392 (with -r option)
384 393
385 394 $ hg phase --public -r 2
386 395 $ hg log -G --template "{rev} {phase} {desc}\n"
387 396 @ 7 secret merge B' and E
388 397 |\
389 398 | o 6 draft B'
390 399 | |
391 400 +---o 5 secret H
392 401 | |
393 402 o | 4 secret E
394 403 | |
395 404 o | 3 draft D
396 405 | |
397 406 o | 2 public C
398 407 |/
399 408 o 1 public B
400 409 |
401 410 o 0 public A
402 411
403 412
404 413 move changeset backward
405 414
406 415 (without -r option)
407 416
408 417 $ hg phase --draft --force 2
409 418 $ hg log -G --template "{rev} {phase} {desc}\n"
410 419 @ 7 secret merge B' and E
411 420 |\
412 421 | o 6 draft B'
413 422 | |
414 423 +---o 5 secret H
415 424 | |
416 425 o | 4 secret E
417 426 | |
418 427 o | 3 draft D
419 428 | |
420 429 o | 2 draft C
421 430 |/
422 431 o 1 public B
423 432 |
424 433 o 0 public A
425 434
426 435
427 436 move changeset forward and backward
428 437
429 438 $ hg phase --draft --force 1::4
430 439 $ hg log -G --template "{rev} {phase} {desc}\n"
431 440 @ 7 secret merge B' and E
432 441 |\
433 442 | o 6 draft B'
434 443 | |
435 444 +---o 5 secret H
436 445 | |
437 446 o | 4 draft E
438 447 | |
439 448 o | 3 draft D
440 449 | |
441 450 o | 2 draft C
442 451 |/
443 452 o 1 draft B
444 453 |
445 454 o 0 public A
446 455
447 456 test partial failure
448 457
449 458 $ hg phase --public 7
450 459 $ hg phase --draft '5 or 7'
451 460 cannot move 1 changesets to a more permissive phase, use --force
452 461 phase changed for 1 changesets
453 462 [1]
454 463 $ hg log -G --template "{rev} {phase} {desc}\n"
455 464 @ 7 public merge B' and E
456 465 |\
457 466 | o 6 public B'
458 467 | |
459 468 +---o 5 draft H
460 469 | |
461 470 o | 4 public E
462 471 | |
463 472 o | 3 public D
464 473 | |
465 474 o | 2 public C
466 475 |/
467 476 o 1 public B
468 477 |
469 478 o 0 public A
470 479
471 480
472 481 test complete failure
473 482
474 483 $ hg phase --draft 7
475 484 cannot move 1 changesets to a more permissive phase, use --force
476 485 no phases changed
477 486 [1]
478 487
479 488 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now