##// END OF EJS Templates
util: make spawndetached() handle subprocess early terminations...
Patrick Mezard -
r10344:9501cde4 default
parent child Browse files
Show More
@@ -1,1178 +1,1180 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, glob, tempfile, time
10 import os, sys, errno, re, glob, tempfile, time
11 import mdiff, bdiff, util, templater, patch, error, encoding, templatekw
11 import mdiff, bdiff, util, templater, patch, error, encoding, templatekw
12 import match as _match
12 import match as _match
13
13
14 revrangesep = ':'
14 revrangesep = ':'
15
15
16 def findpossible(cmd, table, strict=False):
16 def findpossible(cmd, table, strict=False):
17 """
17 """
18 Return cmd -> (aliases, command table entry)
18 Return cmd -> (aliases, command table entry)
19 for each matching command.
19 for each matching command.
20 Return debug commands (or their aliases) only if no normal command matches.
20 Return debug commands (or their aliases) only if no normal command matches.
21 """
21 """
22 choice = {}
22 choice = {}
23 debugchoice = {}
23 debugchoice = {}
24 for e in table.keys():
24 for e in table.keys():
25 aliases = e.lstrip("^").split("|")
25 aliases = e.lstrip("^").split("|")
26 found = None
26 found = None
27 if cmd in aliases:
27 if cmd in aliases:
28 found = cmd
28 found = cmd
29 elif not strict:
29 elif not strict:
30 for a in aliases:
30 for a in aliases:
31 if a.startswith(cmd):
31 if a.startswith(cmd):
32 found = a
32 found = a
33 break
33 break
34 if found is not None:
34 if found is not None:
35 if aliases[0].startswith("debug") or found.startswith("debug"):
35 if aliases[0].startswith("debug") or found.startswith("debug"):
36 debugchoice[found] = (aliases, table[e])
36 debugchoice[found] = (aliases, table[e])
37 else:
37 else:
38 choice[found] = (aliases, table[e])
38 choice[found] = (aliases, table[e])
39
39
40 if not choice and debugchoice:
40 if not choice and debugchoice:
41 choice = debugchoice
41 choice = debugchoice
42
42
43 return choice
43 return choice
44
44
45 def findcmd(cmd, table, strict=True):
45 def findcmd(cmd, table, strict=True):
46 """Return (aliases, command table entry) for command string."""
46 """Return (aliases, command table entry) for command string."""
47 choice = findpossible(cmd, table, strict)
47 choice = findpossible(cmd, table, strict)
48
48
49 if cmd in choice:
49 if cmd in choice:
50 return choice[cmd]
50 return choice[cmd]
51
51
52 if len(choice) > 1:
52 if len(choice) > 1:
53 clist = choice.keys()
53 clist = choice.keys()
54 clist.sort()
54 clist.sort()
55 raise error.AmbiguousCommand(cmd, clist)
55 raise error.AmbiguousCommand(cmd, clist)
56
56
57 if choice:
57 if choice:
58 return choice.values()[0]
58 return choice.values()[0]
59
59
60 raise error.UnknownCommand(cmd)
60 raise error.UnknownCommand(cmd)
61
61
62 def bail_if_changed(repo):
62 def bail_if_changed(repo):
63 if repo.dirstate.parents()[1] != nullid:
63 if repo.dirstate.parents()[1] != nullid:
64 raise util.Abort(_('outstanding uncommitted merge'))
64 raise util.Abort(_('outstanding uncommitted merge'))
65 modified, added, removed, deleted = repo.status()[:4]
65 modified, added, removed, deleted = repo.status()[:4]
66 if modified or added or removed or deleted:
66 if modified or added or removed or deleted:
67 raise util.Abort(_("outstanding uncommitted changes"))
67 raise util.Abort(_("outstanding uncommitted changes"))
68
68
69 def logmessage(opts):
69 def logmessage(opts):
70 """ get the log message according to -m and -l option """
70 """ get the log message according to -m and -l option """
71 message = opts.get('message')
71 message = opts.get('message')
72 logfile = opts.get('logfile')
72 logfile = opts.get('logfile')
73
73
74 if message and logfile:
74 if message and logfile:
75 raise util.Abort(_('options --message and --logfile are mutually '
75 raise util.Abort(_('options --message and --logfile are mutually '
76 'exclusive'))
76 'exclusive'))
77 if not message and logfile:
77 if not message and logfile:
78 try:
78 try:
79 if logfile == '-':
79 if logfile == '-':
80 message = sys.stdin.read()
80 message = sys.stdin.read()
81 else:
81 else:
82 message = open(logfile).read()
82 message = open(logfile).read()
83 except IOError, inst:
83 except IOError, inst:
84 raise util.Abort(_("can't read commit message '%s': %s") %
84 raise util.Abort(_("can't read commit message '%s': %s") %
85 (logfile, inst.strerror))
85 (logfile, inst.strerror))
86 return message
86 return message
87
87
88 def loglimit(opts):
88 def loglimit(opts):
89 """get the log limit according to option -l/--limit"""
89 """get the log limit according to option -l/--limit"""
90 limit = opts.get('limit')
90 limit = opts.get('limit')
91 if limit:
91 if limit:
92 try:
92 try:
93 limit = int(limit)
93 limit = int(limit)
94 except ValueError:
94 except ValueError:
95 raise util.Abort(_('limit must be a positive integer'))
95 raise util.Abort(_('limit must be a positive integer'))
96 if limit <= 0:
96 if limit <= 0:
97 raise util.Abort(_('limit must be positive'))
97 raise util.Abort(_('limit must be positive'))
98 else:
98 else:
99 limit = None
99 limit = None
100 return limit
100 return limit
101
101
102 def remoteui(src, opts):
102 def remoteui(src, opts):
103 'build a remote ui from ui or repo and opts'
103 'build a remote ui from ui or repo and opts'
104 if hasattr(src, 'baseui'): # looks like a repository
104 if hasattr(src, 'baseui'): # looks like a repository
105 dst = src.baseui.copy() # drop repo-specific config
105 dst = src.baseui.copy() # drop repo-specific config
106 src = src.ui # copy target options from repo
106 src = src.ui # copy target options from repo
107 else: # assume it's a global ui object
107 else: # assume it's a global ui object
108 dst = src.copy() # keep all global options
108 dst = src.copy() # keep all global options
109
109
110 # copy ssh-specific options
110 # copy ssh-specific options
111 for o in 'ssh', 'remotecmd':
111 for o in 'ssh', 'remotecmd':
112 v = opts.get(o) or src.config('ui', o)
112 v = opts.get(o) or src.config('ui', o)
113 if v:
113 if v:
114 dst.setconfig("ui", o, v)
114 dst.setconfig("ui", o, v)
115
115
116 # copy bundle-specific options
116 # copy bundle-specific options
117 r = src.config('bundle', 'mainreporoot')
117 r = src.config('bundle', 'mainreporoot')
118 if r:
118 if r:
119 dst.setconfig('bundle', 'mainreporoot', r)
119 dst.setconfig('bundle', 'mainreporoot', r)
120
120
121 # copy auth section settings
121 # copy auth section settings
122 for key, val in src.configitems('auth'):
122 for key, val in src.configitems('auth'):
123 dst.setconfig('auth', key, val)
123 dst.setconfig('auth', key, val)
124
124
125 return dst
125 return dst
126
126
127 def revpair(repo, revs):
127 def revpair(repo, revs):
128 '''return pair of nodes, given list of revisions. second item can
128 '''return pair of nodes, given list of revisions. second item can
129 be None, meaning use working dir.'''
129 be None, meaning use working dir.'''
130
130
131 def revfix(repo, val, defval):
131 def revfix(repo, val, defval):
132 if not val and val != 0 and defval is not None:
132 if not val and val != 0 and defval is not None:
133 val = defval
133 val = defval
134 return repo.lookup(val)
134 return repo.lookup(val)
135
135
136 if not revs:
136 if not revs:
137 return repo.dirstate.parents()[0], None
137 return repo.dirstate.parents()[0], None
138 end = None
138 end = None
139 if len(revs) == 1:
139 if len(revs) == 1:
140 if revrangesep in revs[0]:
140 if revrangesep in revs[0]:
141 start, end = revs[0].split(revrangesep, 1)
141 start, end = revs[0].split(revrangesep, 1)
142 start = revfix(repo, start, 0)
142 start = revfix(repo, start, 0)
143 end = revfix(repo, end, len(repo) - 1)
143 end = revfix(repo, end, len(repo) - 1)
144 else:
144 else:
145 start = revfix(repo, revs[0], None)
145 start = revfix(repo, revs[0], None)
146 elif len(revs) == 2:
146 elif len(revs) == 2:
147 if revrangesep in revs[0] or revrangesep in revs[1]:
147 if revrangesep in revs[0] or revrangesep in revs[1]:
148 raise util.Abort(_('too many revisions specified'))
148 raise util.Abort(_('too many revisions specified'))
149 start = revfix(repo, revs[0], None)
149 start = revfix(repo, revs[0], None)
150 end = revfix(repo, revs[1], None)
150 end = revfix(repo, revs[1], None)
151 else:
151 else:
152 raise util.Abort(_('too many revisions specified'))
152 raise util.Abort(_('too many revisions specified'))
153 return start, end
153 return start, end
154
154
155 def revrange(repo, revs):
155 def revrange(repo, revs):
156 """Yield revision as strings from a list of revision specifications."""
156 """Yield revision as strings from a list of revision specifications."""
157
157
158 def revfix(repo, val, defval):
158 def revfix(repo, val, defval):
159 if not val and val != 0 and defval is not None:
159 if not val and val != 0 and defval is not None:
160 return defval
160 return defval
161 return repo.changelog.rev(repo.lookup(val))
161 return repo.changelog.rev(repo.lookup(val))
162
162
163 seen, l = set(), []
163 seen, l = set(), []
164 for spec in revs:
164 for spec in revs:
165 if revrangesep in spec:
165 if revrangesep in spec:
166 start, end = spec.split(revrangesep, 1)
166 start, end = spec.split(revrangesep, 1)
167 start = revfix(repo, start, 0)
167 start = revfix(repo, start, 0)
168 end = revfix(repo, end, len(repo) - 1)
168 end = revfix(repo, end, len(repo) - 1)
169 step = start > end and -1 or 1
169 step = start > end and -1 or 1
170 for rev in xrange(start, end + step, step):
170 for rev in xrange(start, end + step, step):
171 if rev in seen:
171 if rev in seen:
172 continue
172 continue
173 seen.add(rev)
173 seen.add(rev)
174 l.append(rev)
174 l.append(rev)
175 else:
175 else:
176 rev = revfix(repo, spec, None)
176 rev = revfix(repo, spec, None)
177 if rev in seen:
177 if rev in seen:
178 continue
178 continue
179 seen.add(rev)
179 seen.add(rev)
180 l.append(rev)
180 l.append(rev)
181
181
182 return l
182 return l
183
183
184 def make_filename(repo, pat, node,
184 def make_filename(repo, pat, node,
185 total=None, seqno=None, revwidth=None, pathname=None):
185 total=None, seqno=None, revwidth=None, pathname=None):
186 node_expander = {
186 node_expander = {
187 'H': lambda: hex(node),
187 'H': lambda: hex(node),
188 'R': lambda: str(repo.changelog.rev(node)),
188 'R': lambda: str(repo.changelog.rev(node)),
189 'h': lambda: short(node),
189 'h': lambda: short(node),
190 }
190 }
191 expander = {
191 expander = {
192 '%': lambda: '%',
192 '%': lambda: '%',
193 'b': lambda: os.path.basename(repo.root),
193 'b': lambda: os.path.basename(repo.root),
194 }
194 }
195
195
196 try:
196 try:
197 if node:
197 if node:
198 expander.update(node_expander)
198 expander.update(node_expander)
199 if node:
199 if node:
200 expander['r'] = (lambda:
200 expander['r'] = (lambda:
201 str(repo.changelog.rev(node)).zfill(revwidth or 0))
201 str(repo.changelog.rev(node)).zfill(revwidth or 0))
202 if total is not None:
202 if total is not None:
203 expander['N'] = lambda: str(total)
203 expander['N'] = lambda: str(total)
204 if seqno is not None:
204 if seqno is not None:
205 expander['n'] = lambda: str(seqno)
205 expander['n'] = lambda: str(seqno)
206 if total is not None and seqno is not None:
206 if total is not None and seqno is not None:
207 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
207 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
208 if pathname is not None:
208 if pathname is not None:
209 expander['s'] = lambda: os.path.basename(pathname)
209 expander['s'] = lambda: os.path.basename(pathname)
210 expander['d'] = lambda: os.path.dirname(pathname) or '.'
210 expander['d'] = lambda: os.path.dirname(pathname) or '.'
211 expander['p'] = lambda: pathname
211 expander['p'] = lambda: pathname
212
212
213 newname = []
213 newname = []
214 patlen = len(pat)
214 patlen = len(pat)
215 i = 0
215 i = 0
216 while i < patlen:
216 while i < patlen:
217 c = pat[i]
217 c = pat[i]
218 if c == '%':
218 if c == '%':
219 i += 1
219 i += 1
220 c = pat[i]
220 c = pat[i]
221 c = expander[c]()
221 c = expander[c]()
222 newname.append(c)
222 newname.append(c)
223 i += 1
223 i += 1
224 return ''.join(newname)
224 return ''.join(newname)
225 except KeyError, inst:
225 except KeyError, inst:
226 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
226 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
227 inst.args[0])
227 inst.args[0])
228
228
229 def make_file(repo, pat, node=None,
229 def make_file(repo, pat, node=None,
230 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
230 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
231
231
232 writable = 'w' in mode or 'a' in mode
232 writable = 'w' in mode or 'a' in mode
233
233
234 if not pat or pat == '-':
234 if not pat or pat == '-':
235 return writable and sys.stdout or sys.stdin
235 return writable and sys.stdout or sys.stdin
236 if hasattr(pat, 'write') and writable:
236 if hasattr(pat, 'write') and writable:
237 return pat
237 return pat
238 if hasattr(pat, 'read') and 'r' in mode:
238 if hasattr(pat, 'read') and 'r' in mode:
239 return pat
239 return pat
240 return open(make_filename(repo, pat, node, total, seqno, revwidth,
240 return open(make_filename(repo, pat, node, total, seqno, revwidth,
241 pathname),
241 pathname),
242 mode)
242 mode)
243
243
244 def expandpats(pats):
244 def expandpats(pats):
245 if not util.expandglobs:
245 if not util.expandglobs:
246 return list(pats)
246 return list(pats)
247 ret = []
247 ret = []
248 for p in pats:
248 for p in pats:
249 kind, name = _match._patsplit(p, None)
249 kind, name = _match._patsplit(p, None)
250 if kind is None:
250 if kind is None:
251 try:
251 try:
252 globbed = glob.glob(name)
252 globbed = glob.glob(name)
253 except re.error:
253 except re.error:
254 globbed = [name]
254 globbed = [name]
255 if globbed:
255 if globbed:
256 ret.extend(globbed)
256 ret.extend(globbed)
257 continue
257 continue
258 ret.append(p)
258 ret.append(p)
259 return ret
259 return ret
260
260
261 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
261 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
262 if not globbed and default == 'relpath':
262 if not globbed and default == 'relpath':
263 pats = expandpats(pats or [])
263 pats = expandpats(pats or [])
264 m = _match.match(repo.root, repo.getcwd(), pats,
264 m = _match.match(repo.root, repo.getcwd(), pats,
265 opts.get('include'), opts.get('exclude'), default)
265 opts.get('include'), opts.get('exclude'), default)
266 def badfn(f, msg):
266 def badfn(f, msg):
267 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
267 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
268 m.bad = badfn
268 m.bad = badfn
269 return m
269 return m
270
270
271 def matchall(repo):
271 def matchall(repo):
272 return _match.always(repo.root, repo.getcwd())
272 return _match.always(repo.root, repo.getcwd())
273
273
274 def matchfiles(repo, files):
274 def matchfiles(repo, files):
275 return _match.exact(repo.root, repo.getcwd(), files)
275 return _match.exact(repo.root, repo.getcwd(), files)
276
276
277 def findrenames(repo, added, removed, threshold):
277 def findrenames(repo, added, removed, threshold):
278 '''find renamed files -- yields (before, after, score) tuples'''
278 '''find renamed files -- yields (before, after, score) tuples'''
279 copies = {}
279 copies = {}
280 ctx = repo['.']
280 ctx = repo['.']
281 for r in removed:
281 for r in removed:
282 if r not in ctx:
282 if r not in ctx:
283 continue
283 continue
284 fctx = ctx.filectx(r)
284 fctx = ctx.filectx(r)
285
285
286 def score(text):
286 def score(text):
287 if not len(text):
287 if not len(text):
288 return 0.0
288 return 0.0
289 if not fctx.cmp(text):
289 if not fctx.cmp(text):
290 return 1.0
290 return 1.0
291 if threshold == 1.0:
291 if threshold == 1.0:
292 return 0.0
292 return 0.0
293 orig = fctx.data()
293 orig = fctx.data()
294 # bdiff.blocks() returns blocks of matching lines
294 # bdiff.blocks() returns blocks of matching lines
295 # count the number of bytes in each
295 # count the number of bytes in each
296 equal = 0
296 equal = 0
297 alines = mdiff.splitnewlines(text)
297 alines = mdiff.splitnewlines(text)
298 matches = bdiff.blocks(text, orig)
298 matches = bdiff.blocks(text, orig)
299 for x1, x2, y1, y2 in matches:
299 for x1, x2, y1, y2 in matches:
300 for line in alines[x1:x2]:
300 for line in alines[x1:x2]:
301 equal += len(line)
301 equal += len(line)
302
302
303 lengths = len(text) + len(orig)
303 lengths = len(text) + len(orig)
304 return equal * 2.0 / lengths
304 return equal * 2.0 / lengths
305
305
306 for a in added:
306 for a in added:
307 bestscore = copies.get(a, (None, threshold))[1]
307 bestscore = copies.get(a, (None, threshold))[1]
308 myscore = score(repo.wread(a))
308 myscore = score(repo.wread(a))
309 if myscore >= bestscore:
309 if myscore >= bestscore:
310 copies[a] = (r, myscore)
310 copies[a] = (r, myscore)
311
311
312 for dest, v in copies.iteritems():
312 for dest, v in copies.iteritems():
313 source, score = v
313 source, score = v
314 yield source, dest, score
314 yield source, dest, score
315
315
316 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
316 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
317 if dry_run is None:
317 if dry_run is None:
318 dry_run = opts.get('dry_run')
318 dry_run = opts.get('dry_run')
319 if similarity is None:
319 if similarity is None:
320 similarity = float(opts.get('similarity') or 0)
320 similarity = float(opts.get('similarity') or 0)
321 # we'd use status here, except handling of symlinks and ignore is tricky
321 # we'd use status here, except handling of symlinks and ignore is tricky
322 added, unknown, deleted, removed = [], [], [], []
322 added, unknown, deleted, removed = [], [], [], []
323 audit_path = util.path_auditor(repo.root)
323 audit_path = util.path_auditor(repo.root)
324 m = match(repo, pats, opts)
324 m = match(repo, pats, opts)
325 for abs in repo.walk(m):
325 for abs in repo.walk(m):
326 target = repo.wjoin(abs)
326 target = repo.wjoin(abs)
327 good = True
327 good = True
328 try:
328 try:
329 audit_path(abs)
329 audit_path(abs)
330 except:
330 except:
331 good = False
331 good = False
332 rel = m.rel(abs)
332 rel = m.rel(abs)
333 exact = m.exact(abs)
333 exact = m.exact(abs)
334 if good and abs not in repo.dirstate:
334 if good and abs not in repo.dirstate:
335 unknown.append(abs)
335 unknown.append(abs)
336 if repo.ui.verbose or not exact:
336 if repo.ui.verbose or not exact:
337 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
337 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
338 elif repo.dirstate[abs] != 'r' and (not good or not util.lexists(target)
338 elif repo.dirstate[abs] != 'r' and (not good or not util.lexists(target)
339 or (os.path.isdir(target) and not os.path.islink(target))):
339 or (os.path.isdir(target) and not os.path.islink(target))):
340 deleted.append(abs)
340 deleted.append(abs)
341 if repo.ui.verbose or not exact:
341 if repo.ui.verbose or not exact:
342 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
342 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
343 # for finding renames
343 # for finding renames
344 elif repo.dirstate[abs] == 'r':
344 elif repo.dirstate[abs] == 'r':
345 removed.append(abs)
345 removed.append(abs)
346 elif repo.dirstate[abs] == 'a':
346 elif repo.dirstate[abs] == 'a':
347 added.append(abs)
347 added.append(abs)
348 if not dry_run:
348 if not dry_run:
349 repo.remove(deleted)
349 repo.remove(deleted)
350 repo.add(unknown)
350 repo.add(unknown)
351 if similarity > 0:
351 if similarity > 0:
352 for old, new, score in findrenames(repo, added + unknown,
352 for old, new, score in findrenames(repo, added + unknown,
353 removed + deleted, similarity):
353 removed + deleted, similarity):
354 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
354 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
355 repo.ui.status(_('recording removal of %s as rename to %s '
355 repo.ui.status(_('recording removal of %s as rename to %s '
356 '(%d%% similar)\n') %
356 '(%d%% similar)\n') %
357 (m.rel(old), m.rel(new), score * 100))
357 (m.rel(old), m.rel(new), score * 100))
358 if not dry_run:
358 if not dry_run:
359 repo.copy(old, new)
359 repo.copy(old, new)
360
360
361 def copy(ui, repo, pats, opts, rename=False):
361 def copy(ui, repo, pats, opts, rename=False):
362 # called with the repo lock held
362 # called with the repo lock held
363 #
363 #
364 # hgsep => pathname that uses "/" to separate directories
364 # hgsep => pathname that uses "/" to separate directories
365 # ossep => pathname that uses os.sep to separate directories
365 # ossep => pathname that uses os.sep to separate directories
366 cwd = repo.getcwd()
366 cwd = repo.getcwd()
367 targets = {}
367 targets = {}
368 after = opts.get("after")
368 after = opts.get("after")
369 dryrun = opts.get("dry_run")
369 dryrun = opts.get("dry_run")
370
370
371 def walkpat(pat):
371 def walkpat(pat):
372 srcs = []
372 srcs = []
373 m = match(repo, [pat], opts, globbed=True)
373 m = match(repo, [pat], opts, globbed=True)
374 for abs in repo.walk(m):
374 for abs in repo.walk(m):
375 state = repo.dirstate[abs]
375 state = repo.dirstate[abs]
376 rel = m.rel(abs)
376 rel = m.rel(abs)
377 exact = m.exact(abs)
377 exact = m.exact(abs)
378 if state in '?r':
378 if state in '?r':
379 if exact and state == '?':
379 if exact and state == '?':
380 ui.warn(_('%s: not copying - file is not managed\n') % rel)
380 ui.warn(_('%s: not copying - file is not managed\n') % rel)
381 if exact and state == 'r':
381 if exact and state == 'r':
382 ui.warn(_('%s: not copying - file has been marked for'
382 ui.warn(_('%s: not copying - file has been marked for'
383 ' remove\n') % rel)
383 ' remove\n') % rel)
384 continue
384 continue
385 # abs: hgsep
385 # abs: hgsep
386 # rel: ossep
386 # rel: ossep
387 srcs.append((abs, rel, exact))
387 srcs.append((abs, rel, exact))
388 return srcs
388 return srcs
389
389
390 # abssrc: hgsep
390 # abssrc: hgsep
391 # relsrc: ossep
391 # relsrc: ossep
392 # otarget: ossep
392 # otarget: ossep
393 def copyfile(abssrc, relsrc, otarget, exact):
393 def copyfile(abssrc, relsrc, otarget, exact):
394 abstarget = util.canonpath(repo.root, cwd, otarget)
394 abstarget = util.canonpath(repo.root, cwd, otarget)
395 reltarget = repo.pathto(abstarget, cwd)
395 reltarget = repo.pathto(abstarget, cwd)
396 target = repo.wjoin(abstarget)
396 target = repo.wjoin(abstarget)
397 src = repo.wjoin(abssrc)
397 src = repo.wjoin(abssrc)
398 state = repo.dirstate[abstarget]
398 state = repo.dirstate[abstarget]
399
399
400 # check for collisions
400 # check for collisions
401 prevsrc = targets.get(abstarget)
401 prevsrc = targets.get(abstarget)
402 if prevsrc is not None:
402 if prevsrc is not None:
403 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
403 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
404 (reltarget, repo.pathto(abssrc, cwd),
404 (reltarget, repo.pathto(abssrc, cwd),
405 repo.pathto(prevsrc, cwd)))
405 repo.pathto(prevsrc, cwd)))
406 return
406 return
407
407
408 # check for overwrites
408 # check for overwrites
409 exists = os.path.exists(target)
409 exists = os.path.exists(target)
410 if not after and exists or after and state in 'mn':
410 if not after and exists or after and state in 'mn':
411 if not opts['force']:
411 if not opts['force']:
412 ui.warn(_('%s: not overwriting - file exists\n') %
412 ui.warn(_('%s: not overwriting - file exists\n') %
413 reltarget)
413 reltarget)
414 return
414 return
415
415
416 if after:
416 if after:
417 if not exists:
417 if not exists:
418 return
418 return
419 elif not dryrun:
419 elif not dryrun:
420 try:
420 try:
421 if exists:
421 if exists:
422 os.unlink(target)
422 os.unlink(target)
423 targetdir = os.path.dirname(target) or '.'
423 targetdir = os.path.dirname(target) or '.'
424 if not os.path.isdir(targetdir):
424 if not os.path.isdir(targetdir):
425 os.makedirs(targetdir)
425 os.makedirs(targetdir)
426 util.copyfile(src, target)
426 util.copyfile(src, target)
427 except IOError, inst:
427 except IOError, inst:
428 if inst.errno == errno.ENOENT:
428 if inst.errno == errno.ENOENT:
429 ui.warn(_('%s: deleted in working copy\n') % relsrc)
429 ui.warn(_('%s: deleted in working copy\n') % relsrc)
430 else:
430 else:
431 ui.warn(_('%s: cannot copy - %s\n') %
431 ui.warn(_('%s: cannot copy - %s\n') %
432 (relsrc, inst.strerror))
432 (relsrc, inst.strerror))
433 return True # report a failure
433 return True # report a failure
434
434
435 if ui.verbose or not exact:
435 if ui.verbose or not exact:
436 if rename:
436 if rename:
437 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
437 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
438 else:
438 else:
439 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
439 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
440
440
441 targets[abstarget] = abssrc
441 targets[abstarget] = abssrc
442
442
443 # fix up dirstate
443 # fix up dirstate
444 origsrc = repo.dirstate.copied(abssrc) or abssrc
444 origsrc = repo.dirstate.copied(abssrc) or abssrc
445 if abstarget == origsrc: # copying back a copy?
445 if abstarget == origsrc: # copying back a copy?
446 if state not in 'mn' and not dryrun:
446 if state not in 'mn' and not dryrun:
447 repo.dirstate.normallookup(abstarget)
447 repo.dirstate.normallookup(abstarget)
448 else:
448 else:
449 if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
449 if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
450 if not ui.quiet:
450 if not ui.quiet:
451 ui.warn(_("%s has not been committed yet, so no copy "
451 ui.warn(_("%s has not been committed yet, so no copy "
452 "data will be stored for %s.\n")
452 "data will be stored for %s.\n")
453 % (repo.pathto(origsrc, cwd), reltarget))
453 % (repo.pathto(origsrc, cwd), reltarget))
454 if repo.dirstate[abstarget] in '?r' and not dryrun:
454 if repo.dirstate[abstarget] in '?r' and not dryrun:
455 repo.add([abstarget])
455 repo.add([abstarget])
456 elif not dryrun:
456 elif not dryrun:
457 repo.copy(origsrc, abstarget)
457 repo.copy(origsrc, abstarget)
458
458
459 if rename and not dryrun:
459 if rename and not dryrun:
460 repo.remove([abssrc], not after)
460 repo.remove([abssrc], not after)
461
461
462 # pat: ossep
462 # pat: ossep
463 # dest ossep
463 # dest ossep
464 # srcs: list of (hgsep, hgsep, ossep, bool)
464 # srcs: list of (hgsep, hgsep, ossep, bool)
465 # return: function that takes hgsep and returns ossep
465 # return: function that takes hgsep and returns ossep
466 def targetpathfn(pat, dest, srcs):
466 def targetpathfn(pat, dest, srcs):
467 if os.path.isdir(pat):
467 if os.path.isdir(pat):
468 abspfx = util.canonpath(repo.root, cwd, pat)
468 abspfx = util.canonpath(repo.root, cwd, pat)
469 abspfx = util.localpath(abspfx)
469 abspfx = util.localpath(abspfx)
470 if destdirexists:
470 if destdirexists:
471 striplen = len(os.path.split(abspfx)[0])
471 striplen = len(os.path.split(abspfx)[0])
472 else:
472 else:
473 striplen = len(abspfx)
473 striplen = len(abspfx)
474 if striplen:
474 if striplen:
475 striplen += len(os.sep)
475 striplen += len(os.sep)
476 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
476 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
477 elif destdirexists:
477 elif destdirexists:
478 res = lambda p: os.path.join(dest,
478 res = lambda p: os.path.join(dest,
479 os.path.basename(util.localpath(p)))
479 os.path.basename(util.localpath(p)))
480 else:
480 else:
481 res = lambda p: dest
481 res = lambda p: dest
482 return res
482 return res
483
483
484 # pat: ossep
484 # pat: ossep
485 # dest ossep
485 # dest ossep
486 # srcs: list of (hgsep, hgsep, ossep, bool)
486 # srcs: list of (hgsep, hgsep, ossep, bool)
487 # return: function that takes hgsep and returns ossep
487 # return: function that takes hgsep and returns ossep
488 def targetpathafterfn(pat, dest, srcs):
488 def targetpathafterfn(pat, dest, srcs):
489 if _match.patkind(pat):
489 if _match.patkind(pat):
490 # a mercurial pattern
490 # a mercurial pattern
491 res = lambda p: os.path.join(dest,
491 res = lambda p: os.path.join(dest,
492 os.path.basename(util.localpath(p)))
492 os.path.basename(util.localpath(p)))
493 else:
493 else:
494 abspfx = util.canonpath(repo.root, cwd, pat)
494 abspfx = util.canonpath(repo.root, cwd, pat)
495 if len(abspfx) < len(srcs[0][0]):
495 if len(abspfx) < len(srcs[0][0]):
496 # A directory. Either the target path contains the last
496 # A directory. Either the target path contains the last
497 # component of the source path or it does not.
497 # component of the source path or it does not.
498 def evalpath(striplen):
498 def evalpath(striplen):
499 score = 0
499 score = 0
500 for s in srcs:
500 for s in srcs:
501 t = os.path.join(dest, util.localpath(s[0])[striplen:])
501 t = os.path.join(dest, util.localpath(s[0])[striplen:])
502 if os.path.exists(t):
502 if os.path.exists(t):
503 score += 1
503 score += 1
504 return score
504 return score
505
505
506 abspfx = util.localpath(abspfx)
506 abspfx = util.localpath(abspfx)
507 striplen = len(abspfx)
507 striplen = len(abspfx)
508 if striplen:
508 if striplen:
509 striplen += len(os.sep)
509 striplen += len(os.sep)
510 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
510 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
511 score = evalpath(striplen)
511 score = evalpath(striplen)
512 striplen1 = len(os.path.split(abspfx)[0])
512 striplen1 = len(os.path.split(abspfx)[0])
513 if striplen1:
513 if striplen1:
514 striplen1 += len(os.sep)
514 striplen1 += len(os.sep)
515 if evalpath(striplen1) > score:
515 if evalpath(striplen1) > score:
516 striplen = striplen1
516 striplen = striplen1
517 res = lambda p: os.path.join(dest,
517 res = lambda p: os.path.join(dest,
518 util.localpath(p)[striplen:])
518 util.localpath(p)[striplen:])
519 else:
519 else:
520 # a file
520 # a file
521 if destdirexists:
521 if destdirexists:
522 res = lambda p: os.path.join(dest,
522 res = lambda p: os.path.join(dest,
523 os.path.basename(util.localpath(p)))
523 os.path.basename(util.localpath(p)))
524 else:
524 else:
525 res = lambda p: dest
525 res = lambda p: dest
526 return res
526 return res
527
527
528
528
529 pats = expandpats(pats)
529 pats = expandpats(pats)
530 if not pats:
530 if not pats:
531 raise util.Abort(_('no source or destination specified'))
531 raise util.Abort(_('no source or destination specified'))
532 if len(pats) == 1:
532 if len(pats) == 1:
533 raise util.Abort(_('no destination specified'))
533 raise util.Abort(_('no destination specified'))
534 dest = pats.pop()
534 dest = pats.pop()
535 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
535 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
536 if not destdirexists:
536 if not destdirexists:
537 if len(pats) > 1 or _match.patkind(pats[0]):
537 if len(pats) > 1 or _match.patkind(pats[0]):
538 raise util.Abort(_('with multiple sources, destination must be an '
538 raise util.Abort(_('with multiple sources, destination must be an '
539 'existing directory'))
539 'existing directory'))
540 if util.endswithsep(dest):
540 if util.endswithsep(dest):
541 raise util.Abort(_('destination %s is not a directory') % dest)
541 raise util.Abort(_('destination %s is not a directory') % dest)
542
542
543 tfn = targetpathfn
543 tfn = targetpathfn
544 if after:
544 if after:
545 tfn = targetpathafterfn
545 tfn = targetpathafterfn
546 copylist = []
546 copylist = []
547 for pat in pats:
547 for pat in pats:
548 srcs = walkpat(pat)
548 srcs = walkpat(pat)
549 if not srcs:
549 if not srcs:
550 continue
550 continue
551 copylist.append((tfn(pat, dest, srcs), srcs))
551 copylist.append((tfn(pat, dest, srcs), srcs))
552 if not copylist:
552 if not copylist:
553 raise util.Abort(_('no files to copy'))
553 raise util.Abort(_('no files to copy'))
554
554
555 errors = 0
555 errors = 0
556 for targetpath, srcs in copylist:
556 for targetpath, srcs in copylist:
557 for abssrc, relsrc, exact in srcs:
557 for abssrc, relsrc, exact in srcs:
558 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
558 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
559 errors += 1
559 errors += 1
560
560
561 if errors:
561 if errors:
562 ui.warn(_('(consider using --after)\n'))
562 ui.warn(_('(consider using --after)\n'))
563
563
564 return errors
564 return errors
565
565
566 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
566 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
567 runargs=None, appendpid=False):
567 runargs=None, appendpid=False):
568 '''Run a command as a service.'''
568 '''Run a command as a service.'''
569
569
570 if opts['daemon'] and not opts['daemon_pipefds']:
570 if opts['daemon'] and not opts['daemon_pipefds']:
571 # Signal child process startup with file removal
571 # Signal child process startup with file removal
572 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
572 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
573 os.close(lockfd)
573 os.close(lockfd)
574 try:
574 try:
575 if not runargs:
575 if not runargs:
576 runargs = util.hgcmd() + sys.argv[1:]
576 runargs = util.hgcmd() + sys.argv[1:]
577 runargs.append('--daemon-pipefds=%s' % lockpath)
577 runargs.append('--daemon-pipefds=%s' % lockpath)
578 # Don't pass --cwd to the child process, because we've already
578 # Don't pass --cwd to the child process, because we've already
579 # changed directory.
579 # changed directory.
580 for i in xrange(1, len(runargs)):
580 for i in xrange(1, len(runargs)):
581 if runargs[i].startswith('--cwd='):
581 if runargs[i].startswith('--cwd='):
582 del runargs[i]
582 del runargs[i]
583 break
583 break
584 elif runargs[i].startswith('--cwd'):
584 elif runargs[i].startswith('--cwd'):
585 del runargs[i:i + 2]
585 del runargs[i:i + 2]
586 break
586 break
587 pid = util.spawndetached(runargs)
587 def condfn():
588 while os.path.exists(lockpath):
588 return not os.path.exists(lockpath)
589 time.sleep(0.1)
589 pid = util.rundetached(runargs, condfn)
590 if pid < 0:
591 raise util.Abort(_('child process failed to start'))
590 finally:
592 finally:
591 try:
593 try:
592 os.unlink(lockpath)
594 os.unlink(lockpath)
593 except OSError, e:
595 except OSError, e:
594 if e.errno != errno.ENOENT:
596 if e.errno != errno.ENOENT:
595 raise
597 raise
596 if parentfn:
598 if parentfn:
597 return parentfn(pid)
599 return parentfn(pid)
598 else:
600 else:
599 return
601 return
600
602
601 if initfn:
603 if initfn:
602 initfn()
604 initfn()
603
605
604 if opts['pid_file']:
606 if opts['pid_file']:
605 mode = appendpid and 'a' or 'w'
607 mode = appendpid and 'a' or 'w'
606 fp = open(opts['pid_file'], mode)
608 fp = open(opts['pid_file'], mode)
607 fp.write(str(os.getpid()) + '\n')
609 fp.write(str(os.getpid()) + '\n')
608 fp.close()
610 fp.close()
609
611
610 if opts['daemon_pipefds']:
612 if opts['daemon_pipefds']:
611 lockpath = opts['daemon_pipefds']
613 lockpath = opts['daemon_pipefds']
612 try:
614 try:
613 os.setsid()
615 os.setsid()
614 except AttributeError:
616 except AttributeError:
615 pass
617 pass
616 os.unlink(lockpath)
618 os.unlink(lockpath)
617 util.hidewindow()
619 util.hidewindow()
618 sys.stdout.flush()
620 sys.stdout.flush()
619 sys.stderr.flush()
621 sys.stderr.flush()
620
622
621 nullfd = os.open(util.nulldev, os.O_RDWR)
623 nullfd = os.open(util.nulldev, os.O_RDWR)
622 logfilefd = nullfd
624 logfilefd = nullfd
623 if logfile:
625 if logfile:
624 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
626 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
625 os.dup2(nullfd, 0)
627 os.dup2(nullfd, 0)
626 os.dup2(logfilefd, 1)
628 os.dup2(logfilefd, 1)
627 os.dup2(logfilefd, 2)
629 os.dup2(logfilefd, 2)
628 if nullfd not in (0, 1, 2):
630 if nullfd not in (0, 1, 2):
629 os.close(nullfd)
631 os.close(nullfd)
630 if logfile and logfilefd not in (0, 1, 2):
632 if logfile and logfilefd not in (0, 1, 2):
631 os.close(logfilefd)
633 os.close(logfilefd)
632
634
633 if runfn:
635 if runfn:
634 return runfn()
636 return runfn()
635
637
636 class changeset_printer(object):
638 class changeset_printer(object):
637 '''show changeset information when templating not requested.'''
639 '''show changeset information when templating not requested.'''
638
640
639 def __init__(self, ui, repo, patch, diffopts, buffered):
641 def __init__(self, ui, repo, patch, diffopts, buffered):
640 self.ui = ui
642 self.ui = ui
641 self.repo = repo
643 self.repo = repo
642 self.buffered = buffered
644 self.buffered = buffered
643 self.patch = patch
645 self.patch = patch
644 self.diffopts = diffopts
646 self.diffopts = diffopts
645 self.header = {}
647 self.header = {}
646 self.hunk = {}
648 self.hunk = {}
647 self.lastheader = None
649 self.lastheader = None
648 self.footer = None
650 self.footer = None
649
651
650 def flush(self, rev):
652 def flush(self, rev):
651 if rev in self.header:
653 if rev in self.header:
652 h = self.header[rev]
654 h = self.header[rev]
653 if h != self.lastheader:
655 if h != self.lastheader:
654 self.lastheader = h
656 self.lastheader = h
655 self.ui.write(h)
657 self.ui.write(h)
656 del self.header[rev]
658 del self.header[rev]
657 if rev in self.hunk:
659 if rev in self.hunk:
658 self.ui.write(self.hunk[rev])
660 self.ui.write(self.hunk[rev])
659 del self.hunk[rev]
661 del self.hunk[rev]
660 return 1
662 return 1
661 return 0
663 return 0
662
664
663 def close(self):
665 def close(self):
664 if self.footer:
666 if self.footer:
665 self.ui.write(self.footer)
667 self.ui.write(self.footer)
666
668
667 def show(self, ctx, copies=None, **props):
669 def show(self, ctx, copies=None, **props):
668 if self.buffered:
670 if self.buffered:
669 self.ui.pushbuffer()
671 self.ui.pushbuffer()
670 self._show(ctx, copies, props)
672 self._show(ctx, copies, props)
671 self.hunk[ctx.rev()] = self.ui.popbuffer()
673 self.hunk[ctx.rev()] = self.ui.popbuffer()
672 else:
674 else:
673 self._show(ctx, copies, props)
675 self._show(ctx, copies, props)
674
676
675 def _show(self, ctx, copies, props):
677 def _show(self, ctx, copies, props):
676 '''show a single changeset or file revision'''
678 '''show a single changeset or file revision'''
677 changenode = ctx.node()
679 changenode = ctx.node()
678 rev = ctx.rev()
680 rev = ctx.rev()
679
681
680 if self.ui.quiet:
682 if self.ui.quiet:
681 self.ui.write("%d:%s\n" % (rev, short(changenode)))
683 self.ui.write("%d:%s\n" % (rev, short(changenode)))
682 return
684 return
683
685
684 log = self.repo.changelog
686 log = self.repo.changelog
685 date = util.datestr(ctx.date())
687 date = util.datestr(ctx.date())
686
688
687 hexfunc = self.ui.debugflag and hex or short
689 hexfunc = self.ui.debugflag and hex or short
688
690
689 parents = [(p, hexfunc(log.node(p)))
691 parents = [(p, hexfunc(log.node(p)))
690 for p in self._meaningful_parentrevs(log, rev)]
692 for p in self._meaningful_parentrevs(log, rev)]
691
693
692 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)))
694 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)))
693
695
694 branch = ctx.branch()
696 branch = ctx.branch()
695 # don't show the default branch name
697 # don't show the default branch name
696 if branch != 'default':
698 if branch != 'default':
697 branch = encoding.tolocal(branch)
699 branch = encoding.tolocal(branch)
698 self.ui.write(_("branch: %s\n") % branch)
700 self.ui.write(_("branch: %s\n") % branch)
699 for tag in self.repo.nodetags(changenode):
701 for tag in self.repo.nodetags(changenode):
700 self.ui.write(_("tag: %s\n") % tag)
702 self.ui.write(_("tag: %s\n") % tag)
701 for parent in parents:
703 for parent in parents:
702 self.ui.write(_("parent: %d:%s\n") % parent)
704 self.ui.write(_("parent: %d:%s\n") % parent)
703
705
704 if self.ui.debugflag:
706 if self.ui.debugflag:
705 mnode = ctx.manifestnode()
707 mnode = ctx.manifestnode()
706 self.ui.write(_("manifest: %d:%s\n") %
708 self.ui.write(_("manifest: %d:%s\n") %
707 (self.repo.manifest.rev(mnode), hex(mnode)))
709 (self.repo.manifest.rev(mnode), hex(mnode)))
708 self.ui.write(_("user: %s\n") % ctx.user())
710 self.ui.write(_("user: %s\n") % ctx.user())
709 self.ui.write(_("date: %s\n") % date)
711 self.ui.write(_("date: %s\n") % date)
710
712
711 if self.ui.debugflag:
713 if self.ui.debugflag:
712 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
714 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
713 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
715 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
714 files):
716 files):
715 if value:
717 if value:
716 self.ui.write("%-12s %s\n" % (key, " ".join(value)))
718 self.ui.write("%-12s %s\n" % (key, " ".join(value)))
717 elif ctx.files() and self.ui.verbose:
719 elif ctx.files() and self.ui.verbose:
718 self.ui.write(_("files: %s\n") % " ".join(ctx.files()))
720 self.ui.write(_("files: %s\n") % " ".join(ctx.files()))
719 if copies and self.ui.verbose:
721 if copies and self.ui.verbose:
720 copies = ['%s (%s)' % c for c in copies]
722 copies = ['%s (%s)' % c for c in copies]
721 self.ui.write(_("copies: %s\n") % ' '.join(copies))
723 self.ui.write(_("copies: %s\n") % ' '.join(copies))
722
724
723 extra = ctx.extra()
725 extra = ctx.extra()
724 if extra and self.ui.debugflag:
726 if extra and self.ui.debugflag:
725 for key, value in sorted(extra.items()):
727 for key, value in sorted(extra.items()):
726 self.ui.write(_("extra: %s=%s\n")
728 self.ui.write(_("extra: %s=%s\n")
727 % (key, value.encode('string_escape')))
729 % (key, value.encode('string_escape')))
728
730
729 description = ctx.description().strip()
731 description = ctx.description().strip()
730 if description:
732 if description:
731 if self.ui.verbose:
733 if self.ui.verbose:
732 self.ui.write(_("description:\n"))
734 self.ui.write(_("description:\n"))
733 self.ui.write(description)
735 self.ui.write(description)
734 self.ui.write("\n\n")
736 self.ui.write("\n\n")
735 else:
737 else:
736 self.ui.write(_("summary: %s\n") %
738 self.ui.write(_("summary: %s\n") %
737 description.splitlines()[0])
739 description.splitlines()[0])
738 self.ui.write("\n")
740 self.ui.write("\n")
739
741
740 self.showpatch(changenode)
742 self.showpatch(changenode)
741
743
742 def showpatch(self, node):
744 def showpatch(self, node):
743 if self.patch:
745 if self.patch:
744 prev = self.repo.changelog.parents(node)[0]
746 prev = self.repo.changelog.parents(node)[0]
745 chunks = patch.diff(self.repo, prev, node, match=self.patch,
747 chunks = patch.diff(self.repo, prev, node, match=self.patch,
746 opts=patch.diffopts(self.ui, self.diffopts))
748 opts=patch.diffopts(self.ui, self.diffopts))
747 for chunk in chunks:
749 for chunk in chunks:
748 self.ui.write(chunk)
750 self.ui.write(chunk)
749 self.ui.write("\n")
751 self.ui.write("\n")
750
752
751 def _meaningful_parentrevs(self, log, rev):
753 def _meaningful_parentrevs(self, log, rev):
752 """Return list of meaningful (or all if debug) parentrevs for rev.
754 """Return list of meaningful (or all if debug) parentrevs for rev.
753
755
754 For merges (two non-nullrev revisions) both parents are meaningful.
756 For merges (two non-nullrev revisions) both parents are meaningful.
755 Otherwise the first parent revision is considered meaningful if it
757 Otherwise the first parent revision is considered meaningful if it
756 is not the preceding revision.
758 is not the preceding revision.
757 """
759 """
758 parents = log.parentrevs(rev)
760 parents = log.parentrevs(rev)
759 if not self.ui.debugflag and parents[1] == nullrev:
761 if not self.ui.debugflag and parents[1] == nullrev:
760 if parents[0] >= rev - 1:
762 if parents[0] >= rev - 1:
761 parents = []
763 parents = []
762 else:
764 else:
763 parents = [parents[0]]
765 parents = [parents[0]]
764 return parents
766 return parents
765
767
766
768
767 class changeset_templater(changeset_printer):
769 class changeset_templater(changeset_printer):
768 '''format changeset information.'''
770 '''format changeset information.'''
769
771
770 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
772 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
771 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
773 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
772 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
774 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
773 defaulttempl = {
775 defaulttempl = {
774 'parent': '{rev}:{node|formatnode} ',
776 'parent': '{rev}:{node|formatnode} ',
775 'manifest': '{rev}:{node|formatnode}',
777 'manifest': '{rev}:{node|formatnode}',
776 'file_copy': '{name} ({source})',
778 'file_copy': '{name} ({source})',
777 'extra': '{key}={value|stringescape}'
779 'extra': '{key}={value|stringescape}'
778 }
780 }
779 # filecopy is preserved for compatibility reasons
781 # filecopy is preserved for compatibility reasons
780 defaulttempl['filecopy'] = defaulttempl['file_copy']
782 defaulttempl['filecopy'] = defaulttempl['file_copy']
781 self.t = templater.templater(mapfile, {'formatnode': formatnode},
783 self.t = templater.templater(mapfile, {'formatnode': formatnode},
782 cache=defaulttempl)
784 cache=defaulttempl)
783 self.cache = {}
785 self.cache = {}
784
786
785 def use_template(self, t):
787 def use_template(self, t):
786 '''set template string to use'''
788 '''set template string to use'''
787 self.t.cache['changeset'] = t
789 self.t.cache['changeset'] = t
788
790
789 def _meaningful_parentrevs(self, ctx):
791 def _meaningful_parentrevs(self, ctx):
790 """Return list of meaningful (or all if debug) parentrevs for rev.
792 """Return list of meaningful (or all if debug) parentrevs for rev.
791 """
793 """
792 parents = ctx.parents()
794 parents = ctx.parents()
793 if len(parents) > 1:
795 if len(parents) > 1:
794 return parents
796 return parents
795 if self.ui.debugflag:
797 if self.ui.debugflag:
796 return [parents[0], self.repo['null']]
798 return [parents[0], self.repo['null']]
797 if parents[0].rev() >= ctx.rev() - 1:
799 if parents[0].rev() >= ctx.rev() - 1:
798 return []
800 return []
799 return parents
801 return parents
800
802
801 def _show(self, ctx, copies, props):
803 def _show(self, ctx, copies, props):
802 '''show a single changeset or file revision'''
804 '''show a single changeset or file revision'''
803
805
804 showlist = templatekw.showlist
806 showlist = templatekw.showlist
805
807
806 # showparents() behaviour depends on ui trace level which
808 # showparents() behaviour depends on ui trace level which
807 # causes unexpected behaviours at templating level and makes
809 # causes unexpected behaviours at templating level and makes
808 # it harder to extract it in a standalone function. Its
810 # it harder to extract it in a standalone function. Its
809 # behaviour cannot be changed so leave it here for now.
811 # behaviour cannot be changed so leave it here for now.
810 def showparents(**args):
812 def showparents(**args):
811 ctx = args['ctx']
813 ctx = args['ctx']
812 parents = [[('rev', p.rev()), ('node', p.hex())]
814 parents = [[('rev', p.rev()), ('node', p.hex())]
813 for p in self._meaningful_parentrevs(ctx)]
815 for p in self._meaningful_parentrevs(ctx)]
814 return showlist('parent', parents, **args)
816 return showlist('parent', parents, **args)
815
817
816 props = props.copy()
818 props = props.copy()
817 props.update(templatekw.keywords)
819 props.update(templatekw.keywords)
818 props['parents'] = showparents
820 props['parents'] = showparents
819 props['templ'] = self.t
821 props['templ'] = self.t
820 props['ctx'] = ctx
822 props['ctx'] = ctx
821 props['repo'] = self.repo
823 props['repo'] = self.repo
822 props['revcache'] = {'copies': copies}
824 props['revcache'] = {'copies': copies}
823 props['cache'] = self.cache
825 props['cache'] = self.cache
824
826
825 # find correct templates for current mode
827 # find correct templates for current mode
826
828
827 tmplmodes = [
829 tmplmodes = [
828 (True, None),
830 (True, None),
829 (self.ui.verbose, 'verbose'),
831 (self.ui.verbose, 'verbose'),
830 (self.ui.quiet, 'quiet'),
832 (self.ui.quiet, 'quiet'),
831 (self.ui.debugflag, 'debug'),
833 (self.ui.debugflag, 'debug'),
832 ]
834 ]
833
835
834 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
836 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
835 for mode, postfix in tmplmodes:
837 for mode, postfix in tmplmodes:
836 for type in types:
838 for type in types:
837 cur = postfix and ('%s_%s' % (type, postfix)) or type
839 cur = postfix and ('%s_%s' % (type, postfix)) or type
838 if mode and cur in self.t:
840 if mode and cur in self.t:
839 types[type] = cur
841 types[type] = cur
840
842
841 try:
843 try:
842
844
843 # write header
845 # write header
844 if types['header']:
846 if types['header']:
845 h = templater.stringify(self.t(types['header'], **props))
847 h = templater.stringify(self.t(types['header'], **props))
846 if self.buffered:
848 if self.buffered:
847 self.header[ctx.rev()] = h
849 self.header[ctx.rev()] = h
848 else:
850 else:
849 self.ui.write(h)
851 self.ui.write(h)
850
852
851 # write changeset metadata, then patch if requested
853 # write changeset metadata, then patch if requested
852 key = types['changeset']
854 key = types['changeset']
853 self.ui.write(templater.stringify(self.t(key, **props)))
855 self.ui.write(templater.stringify(self.t(key, **props)))
854 self.showpatch(ctx.node())
856 self.showpatch(ctx.node())
855
857
856 if types['footer']:
858 if types['footer']:
857 if not self.footer:
859 if not self.footer:
858 self.footer = templater.stringify(self.t(types['footer'],
860 self.footer = templater.stringify(self.t(types['footer'],
859 **props))
861 **props))
860
862
861 except KeyError, inst:
863 except KeyError, inst:
862 msg = _("%s: no key named '%s'")
864 msg = _("%s: no key named '%s'")
863 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
865 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
864 except SyntaxError, inst:
866 except SyntaxError, inst:
865 raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
867 raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
866
868
867 def show_changeset(ui, repo, opts, buffered=False, matchfn=False):
869 def show_changeset(ui, repo, opts, buffered=False, matchfn=False):
868 """show one changeset using template or regular display.
870 """show one changeset using template or regular display.
869
871
870 Display format will be the first non-empty hit of:
872 Display format will be the first non-empty hit of:
871 1. option 'template'
873 1. option 'template'
872 2. option 'style'
874 2. option 'style'
873 3. [ui] setting 'logtemplate'
875 3. [ui] setting 'logtemplate'
874 4. [ui] setting 'style'
876 4. [ui] setting 'style'
875 If all of these values are either the unset or the empty string,
877 If all of these values are either the unset or the empty string,
876 regular display via changeset_printer() is done.
878 regular display via changeset_printer() is done.
877 """
879 """
878 # options
880 # options
879 patch = False
881 patch = False
880 if opts.get('patch'):
882 if opts.get('patch'):
881 patch = matchfn or matchall(repo)
883 patch = matchfn or matchall(repo)
882
884
883 tmpl = opts.get('template')
885 tmpl = opts.get('template')
884 style = None
886 style = None
885 if tmpl:
887 if tmpl:
886 tmpl = templater.parsestring(tmpl, quoted=False)
888 tmpl = templater.parsestring(tmpl, quoted=False)
887 else:
889 else:
888 style = opts.get('style')
890 style = opts.get('style')
889
891
890 # ui settings
892 # ui settings
891 if not (tmpl or style):
893 if not (tmpl or style):
892 tmpl = ui.config('ui', 'logtemplate')
894 tmpl = ui.config('ui', 'logtemplate')
893 if tmpl:
895 if tmpl:
894 tmpl = templater.parsestring(tmpl)
896 tmpl = templater.parsestring(tmpl)
895 else:
897 else:
896 style = util.expandpath(ui.config('ui', 'style', ''))
898 style = util.expandpath(ui.config('ui', 'style', ''))
897
899
898 if not (tmpl or style):
900 if not (tmpl or style):
899 return changeset_printer(ui, repo, patch, opts, buffered)
901 return changeset_printer(ui, repo, patch, opts, buffered)
900
902
901 mapfile = None
903 mapfile = None
902 if style and not tmpl:
904 if style and not tmpl:
903 mapfile = style
905 mapfile = style
904 if not os.path.split(mapfile)[0]:
906 if not os.path.split(mapfile)[0]:
905 mapname = (templater.templatepath('map-cmdline.' + mapfile)
907 mapname = (templater.templatepath('map-cmdline.' + mapfile)
906 or templater.templatepath(mapfile))
908 or templater.templatepath(mapfile))
907 if mapname:
909 if mapname:
908 mapfile = mapname
910 mapfile = mapname
909
911
910 try:
912 try:
911 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
913 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
912 except SyntaxError, inst:
914 except SyntaxError, inst:
913 raise util.Abort(inst.args[0])
915 raise util.Abort(inst.args[0])
914 if tmpl:
916 if tmpl:
915 t.use_template(tmpl)
917 t.use_template(tmpl)
916 return t
918 return t
917
919
918 def finddate(ui, repo, date):
920 def finddate(ui, repo, date):
919 """Find the tipmost changeset that matches the given date spec"""
921 """Find the tipmost changeset that matches the given date spec"""
920
922
921 df = util.matchdate(date)
923 df = util.matchdate(date)
922 m = matchall(repo)
924 m = matchall(repo)
923 results = {}
925 results = {}
924
926
925 def prep(ctx, fns):
927 def prep(ctx, fns):
926 d = ctx.date()
928 d = ctx.date()
927 if df(d[0]):
929 if df(d[0]):
928 results[ctx.rev()] = d
930 results[ctx.rev()] = d
929
931
930 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
932 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
931 rev = ctx.rev()
933 rev = ctx.rev()
932 if rev in results:
934 if rev in results:
933 ui.status(_("Found revision %s from %s\n") %
935 ui.status(_("Found revision %s from %s\n") %
934 (rev, util.datestr(results[rev])))
936 (rev, util.datestr(results[rev])))
935 return str(rev)
937 return str(rev)
936
938
937 raise util.Abort(_("revision matching date not found"))
939 raise util.Abort(_("revision matching date not found"))
938
940
939 def walkchangerevs(repo, match, opts, prepare):
941 def walkchangerevs(repo, match, opts, prepare):
940 '''Iterate over files and the revs in which they changed.
942 '''Iterate over files and the revs in which they changed.
941
943
942 Callers most commonly need to iterate backwards over the history
944 Callers most commonly need to iterate backwards over the history
943 in which they are interested. Doing so has awful (quadratic-looking)
945 in which they are interested. Doing so has awful (quadratic-looking)
944 performance, so we use iterators in a "windowed" way.
946 performance, so we use iterators in a "windowed" way.
945
947
946 We walk a window of revisions in the desired order. Within the
948 We walk a window of revisions in the desired order. Within the
947 window, we first walk forwards to gather data, then in the desired
949 window, we first walk forwards to gather data, then in the desired
948 order (usually backwards) to display it.
950 order (usually backwards) to display it.
949
951
950 This function returns an iterator yielding contexts. Before
952 This function returns an iterator yielding contexts. Before
951 yielding each context, the iterator will first call the prepare
953 yielding each context, the iterator will first call the prepare
952 function on each context in the window in forward order.'''
954 function on each context in the window in forward order.'''
953
955
954 def increasing_windows(start, end, windowsize=8, sizelimit=512):
956 def increasing_windows(start, end, windowsize=8, sizelimit=512):
955 if start < end:
957 if start < end:
956 while start < end:
958 while start < end:
957 yield start, min(windowsize, end - start)
959 yield start, min(windowsize, end - start)
958 start += windowsize
960 start += windowsize
959 if windowsize < sizelimit:
961 if windowsize < sizelimit:
960 windowsize *= 2
962 windowsize *= 2
961 else:
963 else:
962 while start > end:
964 while start > end:
963 yield start, min(windowsize, start - end - 1)
965 yield start, min(windowsize, start - end - 1)
964 start -= windowsize
966 start -= windowsize
965 if windowsize < sizelimit:
967 if windowsize < sizelimit:
966 windowsize *= 2
968 windowsize *= 2
967
969
968 follow = opts.get('follow') or opts.get('follow_first')
970 follow = opts.get('follow') or opts.get('follow_first')
969
971
970 if not len(repo):
972 if not len(repo):
971 return []
973 return []
972
974
973 if follow:
975 if follow:
974 defrange = '%s:0' % repo['.'].rev()
976 defrange = '%s:0' % repo['.'].rev()
975 else:
977 else:
976 defrange = '-1:0'
978 defrange = '-1:0'
977 revs = revrange(repo, opts['rev'] or [defrange])
979 revs = revrange(repo, opts['rev'] or [defrange])
978 wanted = set()
980 wanted = set()
979 slowpath = match.anypats() or (match.files() and opts.get('removed'))
981 slowpath = match.anypats() or (match.files() and opts.get('removed'))
980 fncache = {}
982 fncache = {}
981 change = util.cachefunc(repo.changectx)
983 change = util.cachefunc(repo.changectx)
982
984
983 if not slowpath and not match.files():
985 if not slowpath and not match.files():
984 # No files, no patterns. Display all revs.
986 # No files, no patterns. Display all revs.
985 wanted = set(revs)
987 wanted = set(revs)
986 copies = []
988 copies = []
987
989
988 if not slowpath:
990 if not slowpath:
989 # Only files, no patterns. Check the history of each file.
991 # Only files, no patterns. Check the history of each file.
990 def filerevgen(filelog, node):
992 def filerevgen(filelog, node):
991 cl_count = len(repo)
993 cl_count = len(repo)
992 if node is None:
994 if node is None:
993 last = len(filelog) - 1
995 last = len(filelog) - 1
994 else:
996 else:
995 last = filelog.rev(node)
997 last = filelog.rev(node)
996 for i, window in increasing_windows(last, nullrev):
998 for i, window in increasing_windows(last, nullrev):
997 revs = []
999 revs = []
998 for j in xrange(i - window, i + 1):
1000 for j in xrange(i - window, i + 1):
999 n = filelog.node(j)
1001 n = filelog.node(j)
1000 revs.append((filelog.linkrev(j),
1002 revs.append((filelog.linkrev(j),
1001 follow and filelog.renamed(n)))
1003 follow and filelog.renamed(n)))
1002 for rev in reversed(revs):
1004 for rev in reversed(revs):
1003 # only yield rev for which we have the changelog, it can
1005 # only yield rev for which we have the changelog, it can
1004 # happen while doing "hg log" during a pull or commit
1006 # happen while doing "hg log" during a pull or commit
1005 if rev[0] < cl_count:
1007 if rev[0] < cl_count:
1006 yield rev
1008 yield rev
1007 def iterfiles():
1009 def iterfiles():
1008 for filename in match.files():
1010 for filename in match.files():
1009 yield filename, None
1011 yield filename, None
1010 for filename_node in copies:
1012 for filename_node in copies:
1011 yield filename_node
1013 yield filename_node
1012 minrev, maxrev = min(revs), max(revs)
1014 minrev, maxrev = min(revs), max(revs)
1013 for file_, node in iterfiles():
1015 for file_, node in iterfiles():
1014 filelog = repo.file(file_)
1016 filelog = repo.file(file_)
1015 if not len(filelog):
1017 if not len(filelog):
1016 if node is None:
1018 if node is None:
1017 # A zero count may be a directory or deleted file, so
1019 # A zero count may be a directory or deleted file, so
1018 # try to find matching entries on the slow path.
1020 # try to find matching entries on the slow path.
1019 if follow:
1021 if follow:
1020 raise util.Abort(
1022 raise util.Abort(
1021 _('cannot follow nonexistent file: "%s"') % file_)
1023 _('cannot follow nonexistent file: "%s"') % file_)
1022 slowpath = True
1024 slowpath = True
1023 break
1025 break
1024 else:
1026 else:
1025 continue
1027 continue
1026 for rev, copied in filerevgen(filelog, node):
1028 for rev, copied in filerevgen(filelog, node):
1027 if rev <= maxrev:
1029 if rev <= maxrev:
1028 if rev < minrev:
1030 if rev < minrev:
1029 break
1031 break
1030 fncache.setdefault(rev, [])
1032 fncache.setdefault(rev, [])
1031 fncache[rev].append(file_)
1033 fncache[rev].append(file_)
1032 wanted.add(rev)
1034 wanted.add(rev)
1033 if follow and copied:
1035 if follow and copied:
1034 copies.append(copied)
1036 copies.append(copied)
1035 if slowpath:
1037 if slowpath:
1036 if follow:
1038 if follow:
1037 raise util.Abort(_('can only follow copies/renames for explicit '
1039 raise util.Abort(_('can only follow copies/renames for explicit '
1038 'filenames'))
1040 'filenames'))
1039
1041
1040 # The slow path checks files modified in every changeset.
1042 # The slow path checks files modified in every changeset.
1041 def changerevgen():
1043 def changerevgen():
1042 for i, window in increasing_windows(len(repo) - 1, nullrev):
1044 for i, window in increasing_windows(len(repo) - 1, nullrev):
1043 for j in xrange(i - window, i + 1):
1045 for j in xrange(i - window, i + 1):
1044 yield change(j)
1046 yield change(j)
1045
1047
1046 for ctx in changerevgen():
1048 for ctx in changerevgen():
1047 matches = filter(match, ctx.files())
1049 matches = filter(match, ctx.files())
1048 if matches:
1050 if matches:
1049 fncache[ctx.rev()] = matches
1051 fncache[ctx.rev()] = matches
1050 wanted.add(ctx.rev())
1052 wanted.add(ctx.rev())
1051
1053
1052 class followfilter(object):
1054 class followfilter(object):
1053 def __init__(self, onlyfirst=False):
1055 def __init__(self, onlyfirst=False):
1054 self.startrev = nullrev
1056 self.startrev = nullrev
1055 self.roots = set()
1057 self.roots = set()
1056 self.onlyfirst = onlyfirst
1058 self.onlyfirst = onlyfirst
1057
1059
1058 def match(self, rev):
1060 def match(self, rev):
1059 def realparents(rev):
1061 def realparents(rev):
1060 if self.onlyfirst:
1062 if self.onlyfirst:
1061 return repo.changelog.parentrevs(rev)[0:1]
1063 return repo.changelog.parentrevs(rev)[0:1]
1062 else:
1064 else:
1063 return filter(lambda x: x != nullrev,
1065 return filter(lambda x: x != nullrev,
1064 repo.changelog.parentrevs(rev))
1066 repo.changelog.parentrevs(rev))
1065
1067
1066 if self.startrev == nullrev:
1068 if self.startrev == nullrev:
1067 self.startrev = rev
1069 self.startrev = rev
1068 return True
1070 return True
1069
1071
1070 if rev > self.startrev:
1072 if rev > self.startrev:
1071 # forward: all descendants
1073 # forward: all descendants
1072 if not self.roots:
1074 if not self.roots:
1073 self.roots.add(self.startrev)
1075 self.roots.add(self.startrev)
1074 for parent in realparents(rev):
1076 for parent in realparents(rev):
1075 if parent in self.roots:
1077 if parent in self.roots:
1076 self.roots.add(rev)
1078 self.roots.add(rev)
1077 return True
1079 return True
1078 else:
1080 else:
1079 # backwards: all parents
1081 # backwards: all parents
1080 if not self.roots:
1082 if not self.roots:
1081 self.roots.update(realparents(self.startrev))
1083 self.roots.update(realparents(self.startrev))
1082 if rev in self.roots:
1084 if rev in self.roots:
1083 self.roots.remove(rev)
1085 self.roots.remove(rev)
1084 self.roots.update(realparents(rev))
1086 self.roots.update(realparents(rev))
1085 return True
1087 return True
1086
1088
1087 return False
1089 return False
1088
1090
1089 # it might be worthwhile to do this in the iterator if the rev range
1091 # it might be worthwhile to do this in the iterator if the rev range
1090 # is descending and the prune args are all within that range
1092 # is descending and the prune args are all within that range
1091 for rev in opts.get('prune', ()):
1093 for rev in opts.get('prune', ()):
1092 rev = repo.changelog.rev(repo.lookup(rev))
1094 rev = repo.changelog.rev(repo.lookup(rev))
1093 ff = followfilter()
1095 ff = followfilter()
1094 stop = min(revs[0], revs[-1])
1096 stop = min(revs[0], revs[-1])
1095 for x in xrange(rev, stop - 1, -1):
1097 for x in xrange(rev, stop - 1, -1):
1096 if ff.match(x):
1098 if ff.match(x):
1097 wanted.discard(x)
1099 wanted.discard(x)
1098
1100
1099 def iterate():
1101 def iterate():
1100 if follow and not match.files():
1102 if follow and not match.files():
1101 ff = followfilter(onlyfirst=opts.get('follow_first'))
1103 ff = followfilter(onlyfirst=opts.get('follow_first'))
1102 def want(rev):
1104 def want(rev):
1103 return ff.match(rev) and rev in wanted
1105 return ff.match(rev) and rev in wanted
1104 else:
1106 else:
1105 def want(rev):
1107 def want(rev):
1106 return rev in wanted
1108 return rev in wanted
1107
1109
1108 for i, window in increasing_windows(0, len(revs)):
1110 for i, window in increasing_windows(0, len(revs)):
1109 change = util.cachefunc(repo.changectx)
1111 change = util.cachefunc(repo.changectx)
1110 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1112 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1111 for rev in sorted(nrevs):
1113 for rev in sorted(nrevs):
1112 fns = fncache.get(rev)
1114 fns = fncache.get(rev)
1113 ctx = change(rev)
1115 ctx = change(rev)
1114 if not fns:
1116 if not fns:
1115 def fns_generator():
1117 def fns_generator():
1116 for f in ctx.files():
1118 for f in ctx.files():
1117 if match(f):
1119 if match(f):
1118 yield f
1120 yield f
1119 fns = fns_generator()
1121 fns = fns_generator()
1120 prepare(ctx, fns)
1122 prepare(ctx, fns)
1121 for rev in nrevs:
1123 for rev in nrevs:
1122 yield change(rev)
1124 yield change(rev)
1123 return iterate()
1125 return iterate()
1124
1126
1125 def commit(ui, repo, commitfunc, pats, opts):
1127 def commit(ui, repo, commitfunc, pats, opts):
1126 '''commit the specified files or all outstanding changes'''
1128 '''commit the specified files or all outstanding changes'''
1127 date = opts.get('date')
1129 date = opts.get('date')
1128 if date:
1130 if date:
1129 opts['date'] = util.parsedate(date)
1131 opts['date'] = util.parsedate(date)
1130 message = logmessage(opts)
1132 message = logmessage(opts)
1131
1133
1132 # extract addremove carefully -- this function can be called from a command
1134 # extract addremove carefully -- this function can be called from a command
1133 # that doesn't support addremove
1135 # that doesn't support addremove
1134 if opts.get('addremove'):
1136 if opts.get('addremove'):
1135 addremove(repo, pats, opts)
1137 addremove(repo, pats, opts)
1136
1138
1137 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1139 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1138
1140
1139 def commiteditor(repo, ctx, subs):
1141 def commiteditor(repo, ctx, subs):
1140 if ctx.description():
1142 if ctx.description():
1141 return ctx.description()
1143 return ctx.description()
1142 return commitforceeditor(repo, ctx, subs)
1144 return commitforceeditor(repo, ctx, subs)
1143
1145
1144 def commitforceeditor(repo, ctx, subs):
1146 def commitforceeditor(repo, ctx, subs):
1145 edittext = []
1147 edittext = []
1146 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1148 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1147 if ctx.description():
1149 if ctx.description():
1148 edittext.append(ctx.description())
1150 edittext.append(ctx.description())
1149 edittext.append("")
1151 edittext.append("")
1150 edittext.append("") # Empty line between message and comments.
1152 edittext.append("") # Empty line between message and comments.
1151 edittext.append(_("HG: Enter commit message."
1153 edittext.append(_("HG: Enter commit message."
1152 " Lines beginning with 'HG:' are removed."))
1154 " Lines beginning with 'HG:' are removed."))
1153 edittext.append(_("HG: Leave message empty to abort commit."))
1155 edittext.append(_("HG: Leave message empty to abort commit."))
1154 edittext.append("HG: --")
1156 edittext.append("HG: --")
1155 edittext.append(_("HG: user: %s") % ctx.user())
1157 edittext.append(_("HG: user: %s") % ctx.user())
1156 if ctx.p2():
1158 if ctx.p2():
1157 edittext.append(_("HG: branch merge"))
1159 edittext.append(_("HG: branch merge"))
1158 if ctx.branch():
1160 if ctx.branch():
1159 edittext.append(_("HG: branch '%s'")
1161 edittext.append(_("HG: branch '%s'")
1160 % encoding.tolocal(ctx.branch()))
1162 % encoding.tolocal(ctx.branch()))
1161 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1163 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1162 edittext.extend([_("HG: added %s") % f for f in added])
1164 edittext.extend([_("HG: added %s") % f for f in added])
1163 edittext.extend([_("HG: changed %s") % f for f in modified])
1165 edittext.extend([_("HG: changed %s") % f for f in modified])
1164 edittext.extend([_("HG: removed %s") % f for f in removed])
1166 edittext.extend([_("HG: removed %s") % f for f in removed])
1165 if not added and not modified and not removed:
1167 if not added and not modified and not removed:
1166 edittext.append(_("HG: no files changed"))
1168 edittext.append(_("HG: no files changed"))
1167 edittext.append("")
1169 edittext.append("")
1168 # run editor in the repository root
1170 # run editor in the repository root
1169 olddir = os.getcwd()
1171 olddir = os.getcwd()
1170 os.chdir(repo.root)
1172 os.chdir(repo.root)
1171 text = repo.ui.edit("\n".join(edittext), ctx.user())
1173 text = repo.ui.edit("\n".join(edittext), ctx.user())
1172 text = re.sub("(?m)^HG:.*\n", "", text)
1174 text = re.sub("(?m)^HG:.*\n", "", text)
1173 os.chdir(olddir)
1175 os.chdir(olddir)
1174
1176
1175 if not text.strip():
1177 if not text.strip():
1176 raise util.Abort(_("empty commit message"))
1178 raise util.Abort(_("empty commit message"))
1177
1179
1178 return text
1180 return text
@@ -1,1310 +1,1344 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
19 import os, stat, time, calendar, textwrap
19 import os, stat, time, calendar, textwrap, signal
20 import imp
20 import imp
21
21
22 # Python compatibility
22 # Python compatibility
23
23
24 def sha1(s):
24 def sha1(s):
25 return _fastsha1(s)
25 return _fastsha1(s)
26
26
27 def _fastsha1(s):
27 def _fastsha1(s):
28 # This function will import sha1 from hashlib or sha (whichever is
28 # This function will import sha1 from hashlib or sha (whichever is
29 # available) and overwrite itself with it on the first call.
29 # available) and overwrite itself with it on the first call.
30 # Subsequent calls will go directly to the imported function.
30 # Subsequent calls will go directly to the imported function.
31 try:
31 try:
32 from hashlib import sha1 as _sha1
32 from hashlib import sha1 as _sha1
33 except ImportError:
33 except ImportError:
34 from sha import sha as _sha1
34 from sha import sha as _sha1
35 global _fastsha1, sha1
35 global _fastsha1, sha1
36 _fastsha1 = sha1 = _sha1
36 _fastsha1 = sha1 = _sha1
37 return _sha1(s)
37 return _sha1(s)
38
38
39 import subprocess
39 import subprocess
40 closefds = os.name == 'posix'
40 closefds = os.name == 'posix'
41
41
42 def popen2(cmd, env=None, newlines=False):
42 def popen2(cmd, env=None, newlines=False):
43 # Setting bufsize to -1 lets the system decide the buffer size.
43 # Setting bufsize to -1 lets the system decide the buffer size.
44 # The default for bufsize is 0, meaning unbuffered. This leads to
44 # The default for bufsize is 0, meaning unbuffered. This leads to
45 # poor performance on Mac OS X: http://bugs.python.org/issue4194
45 # poor performance on Mac OS X: http://bugs.python.org/issue4194
46 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
46 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
47 close_fds=closefds,
47 close_fds=closefds,
48 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
48 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
49 universal_newlines=newlines,
49 universal_newlines=newlines,
50 env=env)
50 env=env)
51 return p.stdin, p.stdout
51 return p.stdin, p.stdout
52
52
53 def popen3(cmd, env=None, newlines=False):
53 def popen3(cmd, env=None, newlines=False):
54 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
54 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
55 close_fds=closefds,
55 close_fds=closefds,
56 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
56 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
57 stderr=subprocess.PIPE,
57 stderr=subprocess.PIPE,
58 universal_newlines=newlines,
58 universal_newlines=newlines,
59 env=env)
59 env=env)
60 return p.stdin, p.stdout, p.stderr
60 return p.stdin, p.stdout, p.stderr
61
61
62 def version():
62 def version():
63 """Return version information if available."""
63 """Return version information if available."""
64 try:
64 try:
65 import __version__
65 import __version__
66 return __version__.version
66 return __version__.version
67 except ImportError:
67 except ImportError:
68 return 'unknown'
68 return 'unknown'
69
69
70 # used by parsedate
70 # used by parsedate
71 defaultdateformats = (
71 defaultdateformats = (
72 '%Y-%m-%d %H:%M:%S',
72 '%Y-%m-%d %H:%M:%S',
73 '%Y-%m-%d %I:%M:%S%p',
73 '%Y-%m-%d %I:%M:%S%p',
74 '%Y-%m-%d %H:%M',
74 '%Y-%m-%d %H:%M',
75 '%Y-%m-%d %I:%M%p',
75 '%Y-%m-%d %I:%M%p',
76 '%Y-%m-%d',
76 '%Y-%m-%d',
77 '%m-%d',
77 '%m-%d',
78 '%m/%d',
78 '%m/%d',
79 '%m/%d/%y',
79 '%m/%d/%y',
80 '%m/%d/%Y',
80 '%m/%d/%Y',
81 '%a %b %d %H:%M:%S %Y',
81 '%a %b %d %H:%M:%S %Y',
82 '%a %b %d %I:%M:%S%p %Y',
82 '%a %b %d %I:%M:%S%p %Y',
83 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
83 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
84 '%b %d %H:%M:%S %Y',
84 '%b %d %H:%M:%S %Y',
85 '%b %d %I:%M:%S%p %Y',
85 '%b %d %I:%M:%S%p %Y',
86 '%b %d %H:%M:%S',
86 '%b %d %H:%M:%S',
87 '%b %d %I:%M:%S%p',
87 '%b %d %I:%M:%S%p',
88 '%b %d %H:%M',
88 '%b %d %H:%M',
89 '%b %d %I:%M%p',
89 '%b %d %I:%M%p',
90 '%b %d %Y',
90 '%b %d %Y',
91 '%b %d',
91 '%b %d',
92 '%H:%M:%S',
92 '%H:%M:%S',
93 '%I:%M:%S%p',
93 '%I:%M:%S%p',
94 '%H:%M',
94 '%H:%M',
95 '%I:%M%p',
95 '%I:%M%p',
96 )
96 )
97
97
98 extendeddateformats = defaultdateformats + (
98 extendeddateformats = defaultdateformats + (
99 "%Y",
99 "%Y",
100 "%Y-%m",
100 "%Y-%m",
101 "%b",
101 "%b",
102 "%b %Y",
102 "%b %Y",
103 )
103 )
104
104
105 def cachefunc(func):
105 def cachefunc(func):
106 '''cache the result of function calls'''
106 '''cache the result of function calls'''
107 # XXX doesn't handle keywords args
107 # XXX doesn't handle keywords args
108 cache = {}
108 cache = {}
109 if func.func_code.co_argcount == 1:
109 if func.func_code.co_argcount == 1:
110 # we gain a small amount of time because
110 # we gain a small amount of time because
111 # we don't need to pack/unpack the list
111 # we don't need to pack/unpack the list
112 def f(arg):
112 def f(arg):
113 if arg not in cache:
113 if arg not in cache:
114 cache[arg] = func(arg)
114 cache[arg] = func(arg)
115 return cache[arg]
115 return cache[arg]
116 else:
116 else:
117 def f(*args):
117 def f(*args):
118 if args not in cache:
118 if args not in cache:
119 cache[args] = func(*args)
119 cache[args] = func(*args)
120 return cache[args]
120 return cache[args]
121
121
122 return f
122 return f
123
123
124 def lrucachefunc(func):
124 def lrucachefunc(func):
125 '''cache most recent results of function calls'''
125 '''cache most recent results of function calls'''
126 cache = {}
126 cache = {}
127 order = []
127 order = []
128 if func.func_code.co_argcount == 1:
128 if func.func_code.co_argcount == 1:
129 def f(arg):
129 def f(arg):
130 if arg not in cache:
130 if arg not in cache:
131 if len(cache) > 20:
131 if len(cache) > 20:
132 del cache[order.pop(0)]
132 del cache[order.pop(0)]
133 cache[arg] = func(arg)
133 cache[arg] = func(arg)
134 else:
134 else:
135 order.remove(arg)
135 order.remove(arg)
136 order.append(arg)
136 order.append(arg)
137 return cache[arg]
137 return cache[arg]
138 else:
138 else:
139 def f(*args):
139 def f(*args):
140 if args not in cache:
140 if args not in cache:
141 if len(cache) > 20:
141 if len(cache) > 20:
142 del cache[order.pop(0)]
142 del cache[order.pop(0)]
143 cache[args] = func(*args)
143 cache[args] = func(*args)
144 else:
144 else:
145 order.remove(args)
145 order.remove(args)
146 order.append(args)
146 order.append(args)
147 return cache[args]
147 return cache[args]
148
148
149 return f
149 return f
150
150
151 class propertycache(object):
151 class propertycache(object):
152 def __init__(self, func):
152 def __init__(self, func):
153 self.func = func
153 self.func = func
154 self.name = func.__name__
154 self.name = func.__name__
155 def __get__(self, obj, type=None):
155 def __get__(self, obj, type=None):
156 result = self.func(obj)
156 result = self.func(obj)
157 setattr(obj, self.name, result)
157 setattr(obj, self.name, result)
158 return result
158 return result
159
159
160 def pipefilter(s, cmd):
160 def pipefilter(s, cmd):
161 '''filter string S through command CMD, returning its output'''
161 '''filter string S through command CMD, returning its output'''
162 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
162 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
163 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
163 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
164 pout, perr = p.communicate(s)
164 pout, perr = p.communicate(s)
165 return pout
165 return pout
166
166
167 def tempfilter(s, cmd):
167 def tempfilter(s, cmd):
168 '''filter string S through a pair of temporary files with CMD.
168 '''filter string S through a pair of temporary files with CMD.
169 CMD is used as a template to create the real command to be run,
169 CMD is used as a template to create the real command to be run,
170 with the strings INFILE and OUTFILE replaced by the real names of
170 with the strings INFILE and OUTFILE replaced by the real names of
171 the temporary files generated.'''
171 the temporary files generated.'''
172 inname, outname = None, None
172 inname, outname = None, None
173 try:
173 try:
174 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
174 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
175 fp = os.fdopen(infd, 'wb')
175 fp = os.fdopen(infd, 'wb')
176 fp.write(s)
176 fp.write(s)
177 fp.close()
177 fp.close()
178 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
178 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
179 os.close(outfd)
179 os.close(outfd)
180 cmd = cmd.replace('INFILE', inname)
180 cmd = cmd.replace('INFILE', inname)
181 cmd = cmd.replace('OUTFILE', outname)
181 cmd = cmd.replace('OUTFILE', outname)
182 code = os.system(cmd)
182 code = os.system(cmd)
183 if sys.platform == 'OpenVMS' and code & 1:
183 if sys.platform == 'OpenVMS' and code & 1:
184 code = 0
184 code = 0
185 if code:
185 if code:
186 raise Abort(_("command '%s' failed: %s") %
186 raise Abort(_("command '%s' failed: %s") %
187 (cmd, explain_exit(code)))
187 (cmd, explain_exit(code)))
188 return open(outname, 'rb').read()
188 return open(outname, 'rb').read()
189 finally:
189 finally:
190 try:
190 try:
191 if inname:
191 if inname:
192 os.unlink(inname)
192 os.unlink(inname)
193 except:
193 except:
194 pass
194 pass
195 try:
195 try:
196 if outname:
196 if outname:
197 os.unlink(outname)
197 os.unlink(outname)
198 except:
198 except:
199 pass
199 pass
200
200
201 filtertable = {
201 filtertable = {
202 'tempfile:': tempfilter,
202 'tempfile:': tempfilter,
203 'pipe:': pipefilter,
203 'pipe:': pipefilter,
204 }
204 }
205
205
206 def filter(s, cmd):
206 def filter(s, cmd):
207 "filter a string through a command that transforms its input to its output"
207 "filter a string through a command that transforms its input to its output"
208 for name, fn in filtertable.iteritems():
208 for name, fn in filtertable.iteritems():
209 if cmd.startswith(name):
209 if cmd.startswith(name):
210 return fn(s, cmd[len(name):].lstrip())
210 return fn(s, cmd[len(name):].lstrip())
211 return pipefilter(s, cmd)
211 return pipefilter(s, cmd)
212
212
213 def binary(s):
213 def binary(s):
214 """return true if a string is binary data"""
214 """return true if a string is binary data"""
215 return bool(s and '\0' in s)
215 return bool(s and '\0' in s)
216
216
217 def increasingchunks(source, min=1024, max=65536):
217 def increasingchunks(source, min=1024, max=65536):
218 '''return no less than min bytes per chunk while data remains,
218 '''return no less than min bytes per chunk while data remains,
219 doubling min after each chunk until it reaches max'''
219 doubling min after each chunk until it reaches max'''
220 def log2(x):
220 def log2(x):
221 if not x:
221 if not x:
222 return 0
222 return 0
223 i = 0
223 i = 0
224 while x:
224 while x:
225 x >>= 1
225 x >>= 1
226 i += 1
226 i += 1
227 return i - 1
227 return i - 1
228
228
229 buf = []
229 buf = []
230 blen = 0
230 blen = 0
231 for chunk in source:
231 for chunk in source:
232 buf.append(chunk)
232 buf.append(chunk)
233 blen += len(chunk)
233 blen += len(chunk)
234 if blen >= min:
234 if blen >= min:
235 if min < max:
235 if min < max:
236 min = min << 1
236 min = min << 1
237 nmin = 1 << log2(blen)
237 nmin = 1 << log2(blen)
238 if nmin > min:
238 if nmin > min:
239 min = nmin
239 min = nmin
240 if min > max:
240 if min > max:
241 min = max
241 min = max
242 yield ''.join(buf)
242 yield ''.join(buf)
243 blen = 0
243 blen = 0
244 buf = []
244 buf = []
245 if buf:
245 if buf:
246 yield ''.join(buf)
246 yield ''.join(buf)
247
247
248 Abort = error.Abort
248 Abort = error.Abort
249
249
250 def always(fn):
250 def always(fn):
251 return True
251 return True
252
252
253 def never(fn):
253 def never(fn):
254 return False
254 return False
255
255
256 def pathto(root, n1, n2):
256 def pathto(root, n1, n2):
257 '''return the relative path from one place to another.
257 '''return the relative path from one place to another.
258 root should use os.sep to separate directories
258 root should use os.sep to separate directories
259 n1 should use os.sep to separate directories
259 n1 should use os.sep to separate directories
260 n2 should use "/" to separate directories
260 n2 should use "/" to separate directories
261 returns an os.sep-separated path.
261 returns an os.sep-separated path.
262
262
263 If n1 is a relative path, it's assumed it's
263 If n1 is a relative path, it's assumed it's
264 relative to root.
264 relative to root.
265 n2 should always be relative to root.
265 n2 should always be relative to root.
266 '''
266 '''
267 if not n1:
267 if not n1:
268 return localpath(n2)
268 return localpath(n2)
269 if os.path.isabs(n1):
269 if os.path.isabs(n1):
270 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
270 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
271 return os.path.join(root, localpath(n2))
271 return os.path.join(root, localpath(n2))
272 n2 = '/'.join((pconvert(root), n2))
272 n2 = '/'.join((pconvert(root), n2))
273 a, b = splitpath(n1), n2.split('/')
273 a, b = splitpath(n1), n2.split('/')
274 a.reverse()
274 a.reverse()
275 b.reverse()
275 b.reverse()
276 while a and b and a[-1] == b[-1]:
276 while a and b and a[-1] == b[-1]:
277 a.pop()
277 a.pop()
278 b.pop()
278 b.pop()
279 b.reverse()
279 b.reverse()
280 return os.sep.join((['..'] * len(a)) + b) or '.'
280 return os.sep.join((['..'] * len(a)) + b) or '.'
281
281
282 def canonpath(root, cwd, myname):
282 def canonpath(root, cwd, myname):
283 """return the canonical path of myname, given cwd and root"""
283 """return the canonical path of myname, given cwd and root"""
284 if endswithsep(root):
284 if endswithsep(root):
285 rootsep = root
285 rootsep = root
286 else:
286 else:
287 rootsep = root + os.sep
287 rootsep = root + os.sep
288 name = myname
288 name = myname
289 if not os.path.isabs(name):
289 if not os.path.isabs(name):
290 name = os.path.join(root, cwd, name)
290 name = os.path.join(root, cwd, name)
291 name = os.path.normpath(name)
291 name = os.path.normpath(name)
292 audit_path = path_auditor(root)
292 audit_path = path_auditor(root)
293 if name != rootsep and name.startswith(rootsep):
293 if name != rootsep and name.startswith(rootsep):
294 name = name[len(rootsep):]
294 name = name[len(rootsep):]
295 audit_path(name)
295 audit_path(name)
296 return pconvert(name)
296 return pconvert(name)
297 elif name == root:
297 elif name == root:
298 return ''
298 return ''
299 else:
299 else:
300 # Determine whether `name' is in the hierarchy at or beneath `root',
300 # Determine whether `name' is in the hierarchy at or beneath `root',
301 # by iterating name=dirname(name) until that causes no change (can't
301 # by iterating name=dirname(name) until that causes no change (can't
302 # check name == '/', because that doesn't work on windows). For each
302 # check name == '/', because that doesn't work on windows). For each
303 # `name', compare dev/inode numbers. If they match, the list `rel'
303 # `name', compare dev/inode numbers. If they match, the list `rel'
304 # holds the reversed list of components making up the relative file
304 # holds the reversed list of components making up the relative file
305 # name we want.
305 # name we want.
306 root_st = os.stat(root)
306 root_st = os.stat(root)
307 rel = []
307 rel = []
308 while True:
308 while True:
309 try:
309 try:
310 name_st = os.stat(name)
310 name_st = os.stat(name)
311 except OSError:
311 except OSError:
312 break
312 break
313 if samestat(name_st, root_st):
313 if samestat(name_st, root_st):
314 if not rel:
314 if not rel:
315 # name was actually the same as root (maybe a symlink)
315 # name was actually the same as root (maybe a symlink)
316 return ''
316 return ''
317 rel.reverse()
317 rel.reverse()
318 name = os.path.join(*rel)
318 name = os.path.join(*rel)
319 audit_path(name)
319 audit_path(name)
320 return pconvert(name)
320 return pconvert(name)
321 dirname, basename = os.path.split(name)
321 dirname, basename = os.path.split(name)
322 rel.append(basename)
322 rel.append(basename)
323 if dirname == name:
323 if dirname == name:
324 break
324 break
325 name = dirname
325 name = dirname
326
326
327 raise Abort('%s not under root' % myname)
327 raise Abort('%s not under root' % myname)
328
328
329 _hgexecutable = None
329 _hgexecutable = None
330
330
331 def main_is_frozen():
331 def main_is_frozen():
332 """return True if we are a frozen executable.
332 """return True if we are a frozen executable.
333
333
334 The code supports py2exe (most common, Windows only) and tools/freeze
334 The code supports py2exe (most common, Windows only) and tools/freeze
335 (portable, not much used).
335 (portable, not much used).
336 """
336 """
337 return (hasattr(sys, "frozen") or # new py2exe
337 return (hasattr(sys, "frozen") or # new py2exe
338 hasattr(sys, "importers") or # old py2exe
338 hasattr(sys, "importers") or # old py2exe
339 imp.is_frozen("__main__")) # tools/freeze
339 imp.is_frozen("__main__")) # tools/freeze
340
340
341 def hgexecutable():
341 def hgexecutable():
342 """return location of the 'hg' executable.
342 """return location of the 'hg' executable.
343
343
344 Defaults to $HG or 'hg' in the search path.
344 Defaults to $HG or 'hg' in the search path.
345 """
345 """
346 if _hgexecutable is None:
346 if _hgexecutable is None:
347 hg = os.environ.get('HG')
347 hg = os.environ.get('HG')
348 if hg:
348 if hg:
349 set_hgexecutable(hg)
349 set_hgexecutable(hg)
350 elif main_is_frozen():
350 elif main_is_frozen():
351 set_hgexecutable(sys.executable)
351 set_hgexecutable(sys.executable)
352 else:
352 else:
353 exe = find_exe('hg') or os.path.basename(sys.argv[0])
353 exe = find_exe('hg') or os.path.basename(sys.argv[0])
354 set_hgexecutable(exe)
354 set_hgexecutable(exe)
355 return _hgexecutable
355 return _hgexecutable
356
356
357 def set_hgexecutable(path):
357 def set_hgexecutable(path):
358 """set location of the 'hg' executable"""
358 """set location of the 'hg' executable"""
359 global _hgexecutable
359 global _hgexecutable
360 _hgexecutable = path
360 _hgexecutable = path
361
361
362 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
362 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
363 '''enhanced shell command execution.
363 '''enhanced shell command execution.
364 run with environment maybe modified, maybe in different dir.
364 run with environment maybe modified, maybe in different dir.
365
365
366 if command fails and onerr is None, return status. if ui object,
366 if command fails and onerr is None, return status. if ui object,
367 print error message and return status, else raise onerr object as
367 print error message and return status, else raise onerr object as
368 exception.'''
368 exception.'''
369 def py2shell(val):
369 def py2shell(val):
370 'convert python object into string that is useful to shell'
370 'convert python object into string that is useful to shell'
371 if val is None or val is False:
371 if val is None or val is False:
372 return '0'
372 return '0'
373 if val is True:
373 if val is True:
374 return '1'
374 return '1'
375 return str(val)
375 return str(val)
376 origcmd = cmd
376 origcmd = cmd
377 if os.name == 'nt':
377 if os.name == 'nt':
378 cmd = '"%s"' % cmd
378 cmd = '"%s"' % cmd
379 env = dict(os.environ)
379 env = dict(os.environ)
380 env.update((k, py2shell(v)) for k, v in environ.iteritems())
380 env.update((k, py2shell(v)) for k, v in environ.iteritems())
381 env['HG'] = hgexecutable()
381 env['HG'] = hgexecutable()
382 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
382 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
383 env=env, cwd=cwd)
383 env=env, cwd=cwd)
384 if sys.platform == 'OpenVMS' and rc & 1:
384 if sys.platform == 'OpenVMS' and rc & 1:
385 rc = 0
385 rc = 0
386 if rc and onerr:
386 if rc and onerr:
387 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
387 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
388 explain_exit(rc)[0])
388 explain_exit(rc)[0])
389 if errprefix:
389 if errprefix:
390 errmsg = '%s: %s' % (errprefix, errmsg)
390 errmsg = '%s: %s' % (errprefix, errmsg)
391 try:
391 try:
392 onerr.warn(errmsg + '\n')
392 onerr.warn(errmsg + '\n')
393 except AttributeError:
393 except AttributeError:
394 raise onerr(errmsg)
394 raise onerr(errmsg)
395 return rc
395 return rc
396
396
397 def checksignature(func):
397 def checksignature(func):
398 '''wrap a function with code to check for calling errors'''
398 '''wrap a function with code to check for calling errors'''
399 def check(*args, **kwargs):
399 def check(*args, **kwargs):
400 try:
400 try:
401 return func(*args, **kwargs)
401 return func(*args, **kwargs)
402 except TypeError:
402 except TypeError:
403 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
403 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
404 raise error.SignatureError
404 raise error.SignatureError
405 raise
405 raise
406
406
407 return check
407 return check
408
408
409 # os.path.lexists is not available on python2.3
409 # os.path.lexists is not available on python2.3
410 def lexists(filename):
410 def lexists(filename):
411 "test whether a file with this name exists. does not follow symlinks"
411 "test whether a file with this name exists. does not follow symlinks"
412 try:
412 try:
413 os.lstat(filename)
413 os.lstat(filename)
414 except:
414 except:
415 return False
415 return False
416 return True
416 return True
417
417
418 def unlink(f):
418 def unlink(f):
419 """unlink and remove the directory if it is empty"""
419 """unlink and remove the directory if it is empty"""
420 os.unlink(f)
420 os.unlink(f)
421 # try removing directories that might now be empty
421 # try removing directories that might now be empty
422 try:
422 try:
423 os.removedirs(os.path.dirname(f))
423 os.removedirs(os.path.dirname(f))
424 except OSError:
424 except OSError:
425 pass
425 pass
426
426
427 def copyfile(src, dest):
427 def copyfile(src, dest):
428 "copy a file, preserving mode and atime/mtime"
428 "copy a file, preserving mode and atime/mtime"
429 if os.path.islink(src):
429 if os.path.islink(src):
430 try:
430 try:
431 os.unlink(dest)
431 os.unlink(dest)
432 except:
432 except:
433 pass
433 pass
434 os.symlink(os.readlink(src), dest)
434 os.symlink(os.readlink(src), dest)
435 else:
435 else:
436 try:
436 try:
437 shutil.copyfile(src, dest)
437 shutil.copyfile(src, dest)
438 shutil.copystat(src, dest)
438 shutil.copystat(src, dest)
439 except shutil.Error, inst:
439 except shutil.Error, inst:
440 raise Abort(str(inst))
440 raise Abort(str(inst))
441
441
442 def copyfiles(src, dst, hardlink=None):
442 def copyfiles(src, dst, hardlink=None):
443 """Copy a directory tree using hardlinks if possible"""
443 """Copy a directory tree using hardlinks if possible"""
444
444
445 if hardlink is None:
445 if hardlink is None:
446 hardlink = (os.stat(src).st_dev ==
446 hardlink = (os.stat(src).st_dev ==
447 os.stat(os.path.dirname(dst)).st_dev)
447 os.stat(os.path.dirname(dst)).st_dev)
448
448
449 if os.path.isdir(src):
449 if os.path.isdir(src):
450 os.mkdir(dst)
450 os.mkdir(dst)
451 for name, kind in osutil.listdir(src):
451 for name, kind in osutil.listdir(src):
452 srcname = os.path.join(src, name)
452 srcname = os.path.join(src, name)
453 dstname = os.path.join(dst, name)
453 dstname = os.path.join(dst, name)
454 copyfiles(srcname, dstname, hardlink)
454 copyfiles(srcname, dstname, hardlink)
455 else:
455 else:
456 if hardlink:
456 if hardlink:
457 try:
457 try:
458 os_link(src, dst)
458 os_link(src, dst)
459 except (IOError, OSError):
459 except (IOError, OSError):
460 hardlink = False
460 hardlink = False
461 shutil.copy(src, dst)
461 shutil.copy(src, dst)
462 else:
462 else:
463 shutil.copy(src, dst)
463 shutil.copy(src, dst)
464
464
465 class path_auditor(object):
465 class path_auditor(object):
466 '''ensure that a filesystem path contains no banned components.
466 '''ensure that a filesystem path contains no banned components.
467 the following properties of a path are checked:
467 the following properties of a path are checked:
468
468
469 - under top-level .hg
469 - under top-level .hg
470 - starts at the root of a windows drive
470 - starts at the root of a windows drive
471 - contains ".."
471 - contains ".."
472 - traverses a symlink (e.g. a/symlink_here/b)
472 - traverses a symlink (e.g. a/symlink_here/b)
473 - inside a nested repository'''
473 - inside a nested repository'''
474
474
475 def __init__(self, root):
475 def __init__(self, root):
476 self.audited = set()
476 self.audited = set()
477 self.auditeddir = set()
477 self.auditeddir = set()
478 self.root = root
478 self.root = root
479
479
480 def __call__(self, path):
480 def __call__(self, path):
481 if path in self.audited:
481 if path in self.audited:
482 return
482 return
483 normpath = os.path.normcase(path)
483 normpath = os.path.normcase(path)
484 parts = splitpath(normpath)
484 parts = splitpath(normpath)
485 if (os.path.splitdrive(path)[0]
485 if (os.path.splitdrive(path)[0]
486 or parts[0].lower() in ('.hg', '.hg.', '')
486 or parts[0].lower() in ('.hg', '.hg.', '')
487 or os.pardir in parts):
487 or os.pardir in parts):
488 raise Abort(_("path contains illegal component: %s") % path)
488 raise Abort(_("path contains illegal component: %s") % path)
489 if '.hg' in path.lower():
489 if '.hg' in path.lower():
490 lparts = [p.lower() for p in parts]
490 lparts = [p.lower() for p in parts]
491 for p in '.hg', '.hg.':
491 for p in '.hg', '.hg.':
492 if p in lparts[1:]:
492 if p in lparts[1:]:
493 pos = lparts.index(p)
493 pos = lparts.index(p)
494 base = os.path.join(*parts[:pos])
494 base = os.path.join(*parts[:pos])
495 raise Abort(_('path %r is inside repo %r') % (path, base))
495 raise Abort(_('path %r is inside repo %r') % (path, base))
496 def check(prefix):
496 def check(prefix):
497 curpath = os.path.join(self.root, prefix)
497 curpath = os.path.join(self.root, prefix)
498 try:
498 try:
499 st = os.lstat(curpath)
499 st = os.lstat(curpath)
500 except OSError, err:
500 except OSError, err:
501 # EINVAL can be raised as invalid path syntax under win32.
501 # EINVAL can be raised as invalid path syntax under win32.
502 # They must be ignored for patterns can be checked too.
502 # They must be ignored for patterns can be checked too.
503 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
503 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
504 raise
504 raise
505 else:
505 else:
506 if stat.S_ISLNK(st.st_mode):
506 if stat.S_ISLNK(st.st_mode):
507 raise Abort(_('path %r traverses symbolic link %r') %
507 raise Abort(_('path %r traverses symbolic link %r') %
508 (path, prefix))
508 (path, prefix))
509 elif (stat.S_ISDIR(st.st_mode) and
509 elif (stat.S_ISDIR(st.st_mode) and
510 os.path.isdir(os.path.join(curpath, '.hg'))):
510 os.path.isdir(os.path.join(curpath, '.hg'))):
511 raise Abort(_('path %r is inside repo %r') %
511 raise Abort(_('path %r is inside repo %r') %
512 (path, prefix))
512 (path, prefix))
513 parts.pop()
513 parts.pop()
514 prefixes = []
514 prefixes = []
515 while parts:
515 while parts:
516 prefix = os.sep.join(parts)
516 prefix = os.sep.join(parts)
517 if prefix in self.auditeddir:
517 if prefix in self.auditeddir:
518 break
518 break
519 check(prefix)
519 check(prefix)
520 prefixes.append(prefix)
520 prefixes.append(prefix)
521 parts.pop()
521 parts.pop()
522
522
523 self.audited.add(path)
523 self.audited.add(path)
524 # only add prefixes to the cache after checking everything: we don't
524 # only add prefixes to the cache after checking everything: we don't
525 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
525 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
526 self.auditeddir.update(prefixes)
526 self.auditeddir.update(prefixes)
527
527
528 def nlinks(pathname):
528 def nlinks(pathname):
529 """Return number of hardlinks for the given file."""
529 """Return number of hardlinks for the given file."""
530 return os.lstat(pathname).st_nlink
530 return os.lstat(pathname).st_nlink
531
531
532 if hasattr(os, 'link'):
532 if hasattr(os, 'link'):
533 os_link = os.link
533 os_link = os.link
534 else:
534 else:
535 def os_link(src, dst):
535 def os_link(src, dst):
536 raise OSError(0, _("Hardlinks not supported"))
536 raise OSError(0, _("Hardlinks not supported"))
537
537
538 def lookup_reg(key, name=None, scope=None):
538 def lookup_reg(key, name=None, scope=None):
539 return None
539 return None
540
540
541 def hidewindow():
541 def hidewindow():
542 """Hide current shell window.
542 """Hide current shell window.
543
543
544 Used to hide the window opened when starting asynchronous
544 Used to hide the window opened when starting asynchronous
545 child process under Windows, unneeded on other systems.
545 child process under Windows, unneeded on other systems.
546 """
546 """
547 pass
547 pass
548
548
549 if os.name == 'nt':
549 if os.name == 'nt':
550 from windows import *
550 from windows import *
551 else:
551 else:
552 from posix import *
552 from posix import *
553
553
554 def makelock(info, pathname):
554 def makelock(info, pathname):
555 try:
555 try:
556 return os.symlink(info, pathname)
556 return os.symlink(info, pathname)
557 except OSError, why:
557 except OSError, why:
558 if why.errno == errno.EEXIST:
558 if why.errno == errno.EEXIST:
559 raise
559 raise
560 except AttributeError: # no symlink in os
560 except AttributeError: # no symlink in os
561 pass
561 pass
562
562
563 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
563 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
564 os.write(ld, info)
564 os.write(ld, info)
565 os.close(ld)
565 os.close(ld)
566
566
567 def readlock(pathname):
567 def readlock(pathname):
568 try:
568 try:
569 return os.readlink(pathname)
569 return os.readlink(pathname)
570 except OSError, why:
570 except OSError, why:
571 if why.errno not in (errno.EINVAL, errno.ENOSYS):
571 if why.errno not in (errno.EINVAL, errno.ENOSYS):
572 raise
572 raise
573 except AttributeError: # no symlink in os
573 except AttributeError: # no symlink in os
574 pass
574 pass
575 return posixfile(pathname).read()
575 return posixfile(pathname).read()
576
576
577 def fstat(fp):
577 def fstat(fp):
578 '''stat file object that may not have fileno method.'''
578 '''stat file object that may not have fileno method.'''
579 try:
579 try:
580 return os.fstat(fp.fileno())
580 return os.fstat(fp.fileno())
581 except AttributeError:
581 except AttributeError:
582 return os.stat(fp.name)
582 return os.stat(fp.name)
583
583
584 # File system features
584 # File system features
585
585
586 def checkcase(path):
586 def checkcase(path):
587 """
587 """
588 Check whether the given path is on a case-sensitive filesystem
588 Check whether the given path is on a case-sensitive filesystem
589
589
590 Requires a path (like /foo/.hg) ending with a foldable final
590 Requires a path (like /foo/.hg) ending with a foldable final
591 directory component.
591 directory component.
592 """
592 """
593 s1 = os.stat(path)
593 s1 = os.stat(path)
594 d, b = os.path.split(path)
594 d, b = os.path.split(path)
595 p2 = os.path.join(d, b.upper())
595 p2 = os.path.join(d, b.upper())
596 if path == p2:
596 if path == p2:
597 p2 = os.path.join(d, b.lower())
597 p2 = os.path.join(d, b.lower())
598 try:
598 try:
599 s2 = os.stat(p2)
599 s2 = os.stat(p2)
600 if s2 == s1:
600 if s2 == s1:
601 return False
601 return False
602 return True
602 return True
603 except:
603 except:
604 return True
604 return True
605
605
606 _fspathcache = {}
606 _fspathcache = {}
607 def fspath(name, root):
607 def fspath(name, root):
608 '''Get name in the case stored in the filesystem
608 '''Get name in the case stored in the filesystem
609
609
610 The name is either relative to root, or it is an absolute path starting
610 The name is either relative to root, or it is an absolute path starting
611 with root. Note that this function is unnecessary, and should not be
611 with root. Note that this function is unnecessary, and should not be
612 called, for case-sensitive filesystems (simply because it's expensive).
612 called, for case-sensitive filesystems (simply because it's expensive).
613 '''
613 '''
614 # If name is absolute, make it relative
614 # If name is absolute, make it relative
615 if name.lower().startswith(root.lower()):
615 if name.lower().startswith(root.lower()):
616 l = len(root)
616 l = len(root)
617 if name[l] == os.sep or name[l] == os.altsep:
617 if name[l] == os.sep or name[l] == os.altsep:
618 l = l + 1
618 l = l + 1
619 name = name[l:]
619 name = name[l:]
620
620
621 if not os.path.exists(os.path.join(root, name)):
621 if not os.path.exists(os.path.join(root, name)):
622 return None
622 return None
623
623
624 seps = os.sep
624 seps = os.sep
625 if os.altsep:
625 if os.altsep:
626 seps = seps + os.altsep
626 seps = seps + os.altsep
627 # Protect backslashes. This gets silly very quickly.
627 # Protect backslashes. This gets silly very quickly.
628 seps.replace('\\','\\\\')
628 seps.replace('\\','\\\\')
629 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
629 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
630 dir = os.path.normcase(os.path.normpath(root))
630 dir = os.path.normcase(os.path.normpath(root))
631 result = []
631 result = []
632 for part, sep in pattern.findall(name):
632 for part, sep in pattern.findall(name):
633 if sep:
633 if sep:
634 result.append(sep)
634 result.append(sep)
635 continue
635 continue
636
636
637 if dir not in _fspathcache:
637 if dir not in _fspathcache:
638 _fspathcache[dir] = os.listdir(dir)
638 _fspathcache[dir] = os.listdir(dir)
639 contents = _fspathcache[dir]
639 contents = _fspathcache[dir]
640
640
641 lpart = part.lower()
641 lpart = part.lower()
642 lenp = len(part)
642 lenp = len(part)
643 for n in contents:
643 for n in contents:
644 if lenp == len(n) and n.lower() == lpart:
644 if lenp == len(n) and n.lower() == lpart:
645 result.append(n)
645 result.append(n)
646 break
646 break
647 else:
647 else:
648 # Cannot happen, as the file exists!
648 # Cannot happen, as the file exists!
649 result.append(part)
649 result.append(part)
650 dir = os.path.join(dir, lpart)
650 dir = os.path.join(dir, lpart)
651
651
652 return ''.join(result)
652 return ''.join(result)
653
653
654 def checkexec(path):
654 def checkexec(path):
655 """
655 """
656 Check whether the given path is on a filesystem with UNIX-like exec flags
656 Check whether the given path is on a filesystem with UNIX-like exec flags
657
657
658 Requires a directory (like /foo/.hg)
658 Requires a directory (like /foo/.hg)
659 """
659 """
660
660
661 # VFAT on some Linux versions can flip mode but it doesn't persist
661 # VFAT on some Linux versions can flip mode but it doesn't persist
662 # a FS remount. Frequently we can detect it if files are created
662 # a FS remount. Frequently we can detect it if files are created
663 # with exec bit on.
663 # with exec bit on.
664
664
665 try:
665 try:
666 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
666 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
667 fh, fn = tempfile.mkstemp("", "", path)
667 fh, fn = tempfile.mkstemp("", "", path)
668 try:
668 try:
669 os.close(fh)
669 os.close(fh)
670 m = os.stat(fn).st_mode & 0777
670 m = os.stat(fn).st_mode & 0777
671 new_file_has_exec = m & EXECFLAGS
671 new_file_has_exec = m & EXECFLAGS
672 os.chmod(fn, m ^ EXECFLAGS)
672 os.chmod(fn, m ^ EXECFLAGS)
673 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
673 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
674 finally:
674 finally:
675 os.unlink(fn)
675 os.unlink(fn)
676 except (IOError, OSError):
676 except (IOError, OSError):
677 # we don't care, the user probably won't be able to commit anyway
677 # we don't care, the user probably won't be able to commit anyway
678 return False
678 return False
679 return not (new_file_has_exec or exec_flags_cannot_flip)
679 return not (new_file_has_exec or exec_flags_cannot_flip)
680
680
681 def checklink(path):
681 def checklink(path):
682 """check whether the given path is on a symlink-capable filesystem"""
682 """check whether the given path is on a symlink-capable filesystem"""
683 # mktemp is not racy because symlink creation will fail if the
683 # mktemp is not racy because symlink creation will fail if the
684 # file already exists
684 # file already exists
685 name = tempfile.mktemp(dir=path)
685 name = tempfile.mktemp(dir=path)
686 try:
686 try:
687 os.symlink(".", name)
687 os.symlink(".", name)
688 os.unlink(name)
688 os.unlink(name)
689 return True
689 return True
690 except (OSError, AttributeError):
690 except (OSError, AttributeError):
691 return False
691 return False
692
692
693 def needbinarypatch():
693 def needbinarypatch():
694 """return True if patches should be applied in binary mode by default."""
694 """return True if patches should be applied in binary mode by default."""
695 return os.name == 'nt'
695 return os.name == 'nt'
696
696
697 def endswithsep(path):
697 def endswithsep(path):
698 '''Check path ends with os.sep or os.altsep.'''
698 '''Check path ends with os.sep or os.altsep.'''
699 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
699 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
700
700
701 def splitpath(path):
701 def splitpath(path):
702 '''Split path by os.sep.
702 '''Split path by os.sep.
703 Note that this function does not use os.altsep because this is
703 Note that this function does not use os.altsep because this is
704 an alternative of simple "xxx.split(os.sep)".
704 an alternative of simple "xxx.split(os.sep)".
705 It is recommended to use os.path.normpath() before using this
705 It is recommended to use os.path.normpath() before using this
706 function if need.'''
706 function if need.'''
707 return path.split(os.sep)
707 return path.split(os.sep)
708
708
709 def gui():
709 def gui():
710 '''Are we running in a GUI?'''
710 '''Are we running in a GUI?'''
711 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
711 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
712
712
713 def mktempcopy(name, emptyok=False, createmode=None):
713 def mktempcopy(name, emptyok=False, createmode=None):
714 """Create a temporary file with the same contents from name
714 """Create a temporary file with the same contents from name
715
715
716 The permission bits are copied from the original file.
716 The permission bits are copied from the original file.
717
717
718 If the temporary file is going to be truncated immediately, you
718 If the temporary file is going to be truncated immediately, you
719 can use emptyok=True as an optimization.
719 can use emptyok=True as an optimization.
720
720
721 Returns the name of the temporary file.
721 Returns the name of the temporary file.
722 """
722 """
723 d, fn = os.path.split(name)
723 d, fn = os.path.split(name)
724 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
724 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
725 os.close(fd)
725 os.close(fd)
726 # Temporary files are created with mode 0600, which is usually not
726 # Temporary files are created with mode 0600, which is usually not
727 # what we want. If the original file already exists, just copy
727 # what we want. If the original file already exists, just copy
728 # its mode. Otherwise, manually obey umask.
728 # its mode. Otherwise, manually obey umask.
729 try:
729 try:
730 st_mode = os.lstat(name).st_mode & 0777
730 st_mode = os.lstat(name).st_mode & 0777
731 except OSError, inst:
731 except OSError, inst:
732 if inst.errno != errno.ENOENT:
732 if inst.errno != errno.ENOENT:
733 raise
733 raise
734 st_mode = createmode
734 st_mode = createmode
735 if st_mode is None:
735 if st_mode is None:
736 st_mode = ~umask
736 st_mode = ~umask
737 st_mode &= 0666
737 st_mode &= 0666
738 os.chmod(temp, st_mode)
738 os.chmod(temp, st_mode)
739 if emptyok:
739 if emptyok:
740 return temp
740 return temp
741 try:
741 try:
742 try:
742 try:
743 ifp = posixfile(name, "rb")
743 ifp = posixfile(name, "rb")
744 except IOError, inst:
744 except IOError, inst:
745 if inst.errno == errno.ENOENT:
745 if inst.errno == errno.ENOENT:
746 return temp
746 return temp
747 if not getattr(inst, 'filename', None):
747 if not getattr(inst, 'filename', None):
748 inst.filename = name
748 inst.filename = name
749 raise
749 raise
750 ofp = posixfile(temp, "wb")
750 ofp = posixfile(temp, "wb")
751 for chunk in filechunkiter(ifp):
751 for chunk in filechunkiter(ifp):
752 ofp.write(chunk)
752 ofp.write(chunk)
753 ifp.close()
753 ifp.close()
754 ofp.close()
754 ofp.close()
755 except:
755 except:
756 try: os.unlink(temp)
756 try: os.unlink(temp)
757 except: pass
757 except: pass
758 raise
758 raise
759 return temp
759 return temp
760
760
761 class atomictempfile(object):
761 class atomictempfile(object):
762 """file-like object that atomically updates a file
762 """file-like object that atomically updates a file
763
763
764 All writes will be redirected to a temporary copy of the original
764 All writes will be redirected to a temporary copy of the original
765 file. When rename is called, the copy is renamed to the original
765 file. When rename is called, the copy is renamed to the original
766 name, making the changes visible.
766 name, making the changes visible.
767 """
767 """
768 def __init__(self, name, mode, createmode):
768 def __init__(self, name, mode, createmode):
769 self.__name = name
769 self.__name = name
770 self._fp = None
770 self._fp = None
771 self.temp = mktempcopy(name, emptyok=('w' in mode),
771 self.temp = mktempcopy(name, emptyok=('w' in mode),
772 createmode=createmode)
772 createmode=createmode)
773 self._fp = posixfile(self.temp, mode)
773 self._fp = posixfile(self.temp, mode)
774
774
775 def __getattr__(self, name):
775 def __getattr__(self, name):
776 return getattr(self._fp, name)
776 return getattr(self._fp, name)
777
777
778 def rename(self):
778 def rename(self):
779 if not self._fp.closed:
779 if not self._fp.closed:
780 self._fp.close()
780 self._fp.close()
781 rename(self.temp, localpath(self.__name))
781 rename(self.temp, localpath(self.__name))
782
782
783 def __del__(self):
783 def __del__(self):
784 if not self._fp:
784 if not self._fp:
785 return
785 return
786 if not self._fp.closed:
786 if not self._fp.closed:
787 try:
787 try:
788 os.unlink(self.temp)
788 os.unlink(self.temp)
789 except: pass
789 except: pass
790 self._fp.close()
790 self._fp.close()
791
791
792 def makedirs(name, mode=None):
792 def makedirs(name, mode=None):
793 """recursive directory creation with parent mode inheritance"""
793 """recursive directory creation with parent mode inheritance"""
794 try:
794 try:
795 os.mkdir(name)
795 os.mkdir(name)
796 if mode is not None:
796 if mode is not None:
797 os.chmod(name, mode)
797 os.chmod(name, mode)
798 return
798 return
799 except OSError, err:
799 except OSError, err:
800 if err.errno == errno.EEXIST:
800 if err.errno == errno.EEXIST:
801 return
801 return
802 if err.errno != errno.ENOENT:
802 if err.errno != errno.ENOENT:
803 raise
803 raise
804 parent = os.path.abspath(os.path.dirname(name))
804 parent = os.path.abspath(os.path.dirname(name))
805 makedirs(parent, mode)
805 makedirs(parent, mode)
806 makedirs(name, mode)
806 makedirs(name, mode)
807
807
808 class opener(object):
808 class opener(object):
809 """Open files relative to a base directory
809 """Open files relative to a base directory
810
810
811 This class is used to hide the details of COW semantics and
811 This class is used to hide the details of COW semantics and
812 remote file access from higher level code.
812 remote file access from higher level code.
813 """
813 """
814 def __init__(self, base, audit=True):
814 def __init__(self, base, audit=True):
815 self.base = base
815 self.base = base
816 if audit:
816 if audit:
817 self.audit_path = path_auditor(base)
817 self.audit_path = path_auditor(base)
818 else:
818 else:
819 self.audit_path = always
819 self.audit_path = always
820 self.createmode = None
820 self.createmode = None
821
821
822 @propertycache
822 @propertycache
823 def _can_symlink(self):
823 def _can_symlink(self):
824 return checklink(self.base)
824 return checklink(self.base)
825
825
826 def _fixfilemode(self, name):
826 def _fixfilemode(self, name):
827 if self.createmode is None:
827 if self.createmode is None:
828 return
828 return
829 os.chmod(name, self.createmode & 0666)
829 os.chmod(name, self.createmode & 0666)
830
830
831 def __call__(self, path, mode="r", text=False, atomictemp=False):
831 def __call__(self, path, mode="r", text=False, atomictemp=False):
832 self.audit_path(path)
832 self.audit_path(path)
833 f = os.path.join(self.base, path)
833 f = os.path.join(self.base, path)
834
834
835 if not text and "b" not in mode:
835 if not text and "b" not in mode:
836 mode += "b" # for that other OS
836 mode += "b" # for that other OS
837
837
838 nlink = -1
838 nlink = -1
839 if mode not in ("r", "rb"):
839 if mode not in ("r", "rb"):
840 try:
840 try:
841 nlink = nlinks(f)
841 nlink = nlinks(f)
842 except OSError:
842 except OSError:
843 nlink = 0
843 nlink = 0
844 d = os.path.dirname(f)
844 d = os.path.dirname(f)
845 if not os.path.isdir(d):
845 if not os.path.isdir(d):
846 makedirs(d, self.createmode)
846 makedirs(d, self.createmode)
847 if atomictemp:
847 if atomictemp:
848 return atomictempfile(f, mode, self.createmode)
848 return atomictempfile(f, mode, self.createmode)
849 if nlink > 1:
849 if nlink > 1:
850 rename(mktempcopy(f), f)
850 rename(mktempcopy(f), f)
851 fp = posixfile(f, mode)
851 fp = posixfile(f, mode)
852 if nlink == 0:
852 if nlink == 0:
853 self._fixfilemode(f)
853 self._fixfilemode(f)
854 return fp
854 return fp
855
855
856 def symlink(self, src, dst):
856 def symlink(self, src, dst):
857 self.audit_path(dst)
857 self.audit_path(dst)
858 linkname = os.path.join(self.base, dst)
858 linkname = os.path.join(self.base, dst)
859 try:
859 try:
860 os.unlink(linkname)
860 os.unlink(linkname)
861 except OSError:
861 except OSError:
862 pass
862 pass
863
863
864 dirname = os.path.dirname(linkname)
864 dirname = os.path.dirname(linkname)
865 if not os.path.exists(dirname):
865 if not os.path.exists(dirname):
866 makedirs(dirname, self.createmode)
866 makedirs(dirname, self.createmode)
867
867
868 if self._can_symlink:
868 if self._can_symlink:
869 try:
869 try:
870 os.symlink(src, linkname)
870 os.symlink(src, linkname)
871 except OSError, err:
871 except OSError, err:
872 raise OSError(err.errno, _('could not symlink to %r: %s') %
872 raise OSError(err.errno, _('could not symlink to %r: %s') %
873 (src, err.strerror), linkname)
873 (src, err.strerror), linkname)
874 else:
874 else:
875 f = self(dst, "w")
875 f = self(dst, "w")
876 f.write(src)
876 f.write(src)
877 f.close()
877 f.close()
878 self._fixfilemode(dst)
878 self._fixfilemode(dst)
879
879
880 class chunkbuffer(object):
880 class chunkbuffer(object):
881 """Allow arbitrary sized chunks of data to be efficiently read from an
881 """Allow arbitrary sized chunks of data to be efficiently read from an
882 iterator over chunks of arbitrary size."""
882 iterator over chunks of arbitrary size."""
883
883
884 def __init__(self, in_iter):
884 def __init__(self, in_iter):
885 """in_iter is the iterator that's iterating over the input chunks.
885 """in_iter is the iterator that's iterating over the input chunks.
886 targetsize is how big a buffer to try to maintain."""
886 targetsize is how big a buffer to try to maintain."""
887 self.iter = iter(in_iter)
887 self.iter = iter(in_iter)
888 self.buf = ''
888 self.buf = ''
889 self.targetsize = 2**16
889 self.targetsize = 2**16
890
890
891 def read(self, l):
891 def read(self, l):
892 """Read L bytes of data from the iterator of chunks of data.
892 """Read L bytes of data from the iterator of chunks of data.
893 Returns less than L bytes if the iterator runs dry."""
893 Returns less than L bytes if the iterator runs dry."""
894 if l > len(self.buf) and self.iter:
894 if l > len(self.buf) and self.iter:
895 # Clamp to a multiple of self.targetsize
895 # Clamp to a multiple of self.targetsize
896 targetsize = max(l, self.targetsize)
896 targetsize = max(l, self.targetsize)
897 collector = cStringIO.StringIO()
897 collector = cStringIO.StringIO()
898 collector.write(self.buf)
898 collector.write(self.buf)
899 collected = len(self.buf)
899 collected = len(self.buf)
900 for chunk in self.iter:
900 for chunk in self.iter:
901 collector.write(chunk)
901 collector.write(chunk)
902 collected += len(chunk)
902 collected += len(chunk)
903 if collected >= targetsize:
903 if collected >= targetsize:
904 break
904 break
905 if collected < targetsize:
905 if collected < targetsize:
906 self.iter = False
906 self.iter = False
907 self.buf = collector.getvalue()
907 self.buf = collector.getvalue()
908 if len(self.buf) == l:
908 if len(self.buf) == l:
909 s, self.buf = str(self.buf), ''
909 s, self.buf = str(self.buf), ''
910 else:
910 else:
911 s, self.buf = self.buf[:l], buffer(self.buf, l)
911 s, self.buf = self.buf[:l], buffer(self.buf, l)
912 return s
912 return s
913
913
914 def filechunkiter(f, size=65536, limit=None):
914 def filechunkiter(f, size=65536, limit=None):
915 """Create a generator that produces the data in the file size
915 """Create a generator that produces the data in the file size
916 (default 65536) bytes at a time, up to optional limit (default is
916 (default 65536) bytes at a time, up to optional limit (default is
917 to read all data). Chunks may be less than size bytes if the
917 to read all data). Chunks may be less than size bytes if the
918 chunk is the last chunk in the file, or the file is a socket or
918 chunk is the last chunk in the file, or the file is a socket or
919 some other type of file that sometimes reads less data than is
919 some other type of file that sometimes reads less data than is
920 requested."""
920 requested."""
921 assert size >= 0
921 assert size >= 0
922 assert limit is None or limit >= 0
922 assert limit is None or limit >= 0
923 while True:
923 while True:
924 if limit is None:
924 if limit is None:
925 nbytes = size
925 nbytes = size
926 else:
926 else:
927 nbytes = min(limit, size)
927 nbytes = min(limit, size)
928 s = nbytes and f.read(nbytes)
928 s = nbytes and f.read(nbytes)
929 if not s:
929 if not s:
930 break
930 break
931 if limit:
931 if limit:
932 limit -= len(s)
932 limit -= len(s)
933 yield s
933 yield s
934
934
935 def makedate():
935 def makedate():
936 lt = time.localtime()
936 lt = time.localtime()
937 if lt[8] == 1 and time.daylight:
937 if lt[8] == 1 and time.daylight:
938 tz = time.altzone
938 tz = time.altzone
939 else:
939 else:
940 tz = time.timezone
940 tz = time.timezone
941 return time.mktime(lt), tz
941 return time.mktime(lt), tz
942
942
943 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
943 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
944 """represent a (unixtime, offset) tuple as a localized time.
944 """represent a (unixtime, offset) tuple as a localized time.
945 unixtime is seconds since the epoch, and offset is the time zone's
945 unixtime is seconds since the epoch, and offset is the time zone's
946 number of seconds away from UTC. if timezone is false, do not
946 number of seconds away from UTC. if timezone is false, do not
947 append time zone to string."""
947 append time zone to string."""
948 t, tz = date or makedate()
948 t, tz = date or makedate()
949 if "%1" in format or "%2" in format:
949 if "%1" in format or "%2" in format:
950 sign = (tz > 0) and "-" or "+"
950 sign = (tz > 0) and "-" or "+"
951 minutes = abs(tz) // 60
951 minutes = abs(tz) // 60
952 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
952 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
953 format = format.replace("%2", "%02d" % (minutes % 60))
953 format = format.replace("%2", "%02d" % (minutes % 60))
954 s = time.strftime(format, time.gmtime(float(t) - tz))
954 s = time.strftime(format, time.gmtime(float(t) - tz))
955 return s
955 return s
956
956
957 def shortdate(date=None):
957 def shortdate(date=None):
958 """turn (timestamp, tzoff) tuple into iso 8631 date."""
958 """turn (timestamp, tzoff) tuple into iso 8631 date."""
959 return datestr(date, format='%Y-%m-%d')
959 return datestr(date, format='%Y-%m-%d')
960
960
961 def strdate(string, format, defaults=[]):
961 def strdate(string, format, defaults=[]):
962 """parse a localized time string and return a (unixtime, offset) tuple.
962 """parse a localized time string and return a (unixtime, offset) tuple.
963 if the string cannot be parsed, ValueError is raised."""
963 if the string cannot be parsed, ValueError is raised."""
964 def timezone(string):
964 def timezone(string):
965 tz = string.split()[-1]
965 tz = string.split()[-1]
966 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
966 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
967 sign = (tz[0] == "+") and 1 or -1
967 sign = (tz[0] == "+") and 1 or -1
968 hours = int(tz[1:3])
968 hours = int(tz[1:3])
969 minutes = int(tz[3:5])
969 minutes = int(tz[3:5])
970 return -sign * (hours * 60 + minutes) * 60
970 return -sign * (hours * 60 + minutes) * 60
971 if tz == "GMT" or tz == "UTC":
971 if tz == "GMT" or tz == "UTC":
972 return 0
972 return 0
973 return None
973 return None
974
974
975 # NOTE: unixtime = localunixtime + offset
975 # NOTE: unixtime = localunixtime + offset
976 offset, date = timezone(string), string
976 offset, date = timezone(string), string
977 if offset != None:
977 if offset != None:
978 date = " ".join(string.split()[:-1])
978 date = " ".join(string.split()[:-1])
979
979
980 # add missing elements from defaults
980 # add missing elements from defaults
981 for part in defaults:
981 for part in defaults:
982 found = [True for p in part if ("%"+p) in format]
982 found = [True for p in part if ("%"+p) in format]
983 if not found:
983 if not found:
984 date += "@" + defaults[part]
984 date += "@" + defaults[part]
985 format += "@%" + part[0]
985 format += "@%" + part[0]
986
986
987 timetuple = time.strptime(date, format)
987 timetuple = time.strptime(date, format)
988 localunixtime = int(calendar.timegm(timetuple))
988 localunixtime = int(calendar.timegm(timetuple))
989 if offset is None:
989 if offset is None:
990 # local timezone
990 # local timezone
991 unixtime = int(time.mktime(timetuple))
991 unixtime = int(time.mktime(timetuple))
992 offset = unixtime - localunixtime
992 offset = unixtime - localunixtime
993 else:
993 else:
994 unixtime = localunixtime + offset
994 unixtime = localunixtime + offset
995 return unixtime, offset
995 return unixtime, offset
996
996
997 def parsedate(date, formats=None, defaults=None):
997 def parsedate(date, formats=None, defaults=None):
998 """parse a localized date/time string and return a (unixtime, offset) tuple.
998 """parse a localized date/time string and return a (unixtime, offset) tuple.
999
999
1000 The date may be a "unixtime offset" string or in one of the specified
1000 The date may be a "unixtime offset" string or in one of the specified
1001 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1001 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1002 """
1002 """
1003 if not date:
1003 if not date:
1004 return 0, 0
1004 return 0, 0
1005 if isinstance(date, tuple) and len(date) == 2:
1005 if isinstance(date, tuple) and len(date) == 2:
1006 return date
1006 return date
1007 if not formats:
1007 if not formats:
1008 formats = defaultdateformats
1008 formats = defaultdateformats
1009 date = date.strip()
1009 date = date.strip()
1010 try:
1010 try:
1011 when, offset = map(int, date.split(' '))
1011 when, offset = map(int, date.split(' '))
1012 except ValueError:
1012 except ValueError:
1013 # fill out defaults
1013 # fill out defaults
1014 if not defaults:
1014 if not defaults:
1015 defaults = {}
1015 defaults = {}
1016 now = makedate()
1016 now = makedate()
1017 for part in "d mb yY HI M S".split():
1017 for part in "d mb yY HI M S".split():
1018 if part not in defaults:
1018 if part not in defaults:
1019 if part[0] in "HMS":
1019 if part[0] in "HMS":
1020 defaults[part] = "00"
1020 defaults[part] = "00"
1021 else:
1021 else:
1022 defaults[part] = datestr(now, "%" + part[0])
1022 defaults[part] = datestr(now, "%" + part[0])
1023
1023
1024 for format in formats:
1024 for format in formats:
1025 try:
1025 try:
1026 when, offset = strdate(date, format, defaults)
1026 when, offset = strdate(date, format, defaults)
1027 except (ValueError, OverflowError):
1027 except (ValueError, OverflowError):
1028 pass
1028 pass
1029 else:
1029 else:
1030 break
1030 break
1031 else:
1031 else:
1032 raise Abort(_('invalid date: %r ') % date)
1032 raise Abort(_('invalid date: %r ') % date)
1033 # validate explicit (probably user-specified) date and
1033 # validate explicit (probably user-specified) date and
1034 # time zone offset. values must fit in signed 32 bits for
1034 # time zone offset. values must fit in signed 32 bits for
1035 # current 32-bit linux runtimes. timezones go from UTC-12
1035 # current 32-bit linux runtimes. timezones go from UTC-12
1036 # to UTC+14
1036 # to UTC+14
1037 if abs(when) > 0x7fffffff:
1037 if abs(when) > 0x7fffffff:
1038 raise Abort(_('date exceeds 32 bits: %d') % when)
1038 raise Abort(_('date exceeds 32 bits: %d') % when)
1039 if offset < -50400 or offset > 43200:
1039 if offset < -50400 or offset > 43200:
1040 raise Abort(_('impossible time zone offset: %d') % offset)
1040 raise Abort(_('impossible time zone offset: %d') % offset)
1041 return when, offset
1041 return when, offset
1042
1042
1043 def matchdate(date):
1043 def matchdate(date):
1044 """Return a function that matches a given date match specifier
1044 """Return a function that matches a given date match specifier
1045
1045
1046 Formats include:
1046 Formats include:
1047
1047
1048 '{date}' match a given date to the accuracy provided
1048 '{date}' match a given date to the accuracy provided
1049
1049
1050 '<{date}' on or before a given date
1050 '<{date}' on or before a given date
1051
1051
1052 '>{date}' on or after a given date
1052 '>{date}' on or after a given date
1053
1053
1054 """
1054 """
1055
1055
1056 def lower(date):
1056 def lower(date):
1057 d = dict(mb="1", d="1")
1057 d = dict(mb="1", d="1")
1058 return parsedate(date, extendeddateformats, d)[0]
1058 return parsedate(date, extendeddateformats, d)[0]
1059
1059
1060 def upper(date):
1060 def upper(date):
1061 d = dict(mb="12", HI="23", M="59", S="59")
1061 d = dict(mb="12", HI="23", M="59", S="59")
1062 for days in "31 30 29".split():
1062 for days in "31 30 29".split():
1063 try:
1063 try:
1064 d["d"] = days
1064 d["d"] = days
1065 return parsedate(date, extendeddateformats, d)[0]
1065 return parsedate(date, extendeddateformats, d)[0]
1066 except:
1066 except:
1067 pass
1067 pass
1068 d["d"] = "28"
1068 d["d"] = "28"
1069 return parsedate(date, extendeddateformats, d)[0]
1069 return parsedate(date, extendeddateformats, d)[0]
1070
1070
1071 date = date.strip()
1071 date = date.strip()
1072 if date[0] == "<":
1072 if date[0] == "<":
1073 when = upper(date[1:])
1073 when = upper(date[1:])
1074 return lambda x: x <= when
1074 return lambda x: x <= when
1075 elif date[0] == ">":
1075 elif date[0] == ">":
1076 when = lower(date[1:])
1076 when = lower(date[1:])
1077 return lambda x: x >= when
1077 return lambda x: x >= when
1078 elif date[0] == "-":
1078 elif date[0] == "-":
1079 try:
1079 try:
1080 days = int(date[1:])
1080 days = int(date[1:])
1081 except ValueError:
1081 except ValueError:
1082 raise Abort(_("invalid day spec: %s") % date[1:])
1082 raise Abort(_("invalid day spec: %s") % date[1:])
1083 when = makedate()[0] - days * 3600 * 24
1083 when = makedate()[0] - days * 3600 * 24
1084 return lambda x: x >= when
1084 return lambda x: x >= when
1085 elif " to " in date:
1085 elif " to " in date:
1086 a, b = date.split(" to ")
1086 a, b = date.split(" to ")
1087 start, stop = lower(a), upper(b)
1087 start, stop = lower(a), upper(b)
1088 return lambda x: x >= start and x <= stop
1088 return lambda x: x >= start and x <= stop
1089 else:
1089 else:
1090 start, stop = lower(date), upper(date)
1090 start, stop = lower(date), upper(date)
1091 return lambda x: x >= start and x <= stop
1091 return lambda x: x >= start and x <= stop
1092
1092
1093 def shortuser(user):
1093 def shortuser(user):
1094 """Return a short representation of a user name or email address."""
1094 """Return a short representation of a user name or email address."""
1095 f = user.find('@')
1095 f = user.find('@')
1096 if f >= 0:
1096 if f >= 0:
1097 user = user[:f]
1097 user = user[:f]
1098 f = user.find('<')
1098 f = user.find('<')
1099 if f >= 0:
1099 if f >= 0:
1100 user = user[f + 1:]
1100 user = user[f + 1:]
1101 f = user.find(' ')
1101 f = user.find(' ')
1102 if f >= 0:
1102 if f >= 0:
1103 user = user[:f]
1103 user = user[:f]
1104 f = user.find('.')
1104 f = user.find('.')
1105 if f >= 0:
1105 if f >= 0:
1106 user = user[:f]
1106 user = user[:f]
1107 return user
1107 return user
1108
1108
1109 def email(author):
1109 def email(author):
1110 '''get email of author.'''
1110 '''get email of author.'''
1111 r = author.find('>')
1111 r = author.find('>')
1112 if r == -1:
1112 if r == -1:
1113 r = None
1113 r = None
1114 return author[author.find('<') + 1:r]
1114 return author[author.find('<') + 1:r]
1115
1115
1116 def ellipsis(text, maxlength=400):
1116 def ellipsis(text, maxlength=400):
1117 """Trim string to at most maxlength (default: 400) characters."""
1117 """Trim string to at most maxlength (default: 400) characters."""
1118 if len(text) <= maxlength:
1118 if len(text) <= maxlength:
1119 return text
1119 return text
1120 else:
1120 else:
1121 return "%s..." % (text[:maxlength - 3])
1121 return "%s..." % (text[:maxlength - 3])
1122
1122
1123 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1123 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1124 '''yield every hg repository under path, recursively.'''
1124 '''yield every hg repository under path, recursively.'''
1125 def errhandler(err):
1125 def errhandler(err):
1126 if err.filename == path:
1126 if err.filename == path:
1127 raise err
1127 raise err
1128 if followsym and hasattr(os.path, 'samestat'):
1128 if followsym and hasattr(os.path, 'samestat'):
1129 def _add_dir_if_not_there(dirlst, dirname):
1129 def _add_dir_if_not_there(dirlst, dirname):
1130 match = False
1130 match = False
1131 samestat = os.path.samestat
1131 samestat = os.path.samestat
1132 dirstat = os.stat(dirname)
1132 dirstat = os.stat(dirname)
1133 for lstdirstat in dirlst:
1133 for lstdirstat in dirlst:
1134 if samestat(dirstat, lstdirstat):
1134 if samestat(dirstat, lstdirstat):
1135 match = True
1135 match = True
1136 break
1136 break
1137 if not match:
1137 if not match:
1138 dirlst.append(dirstat)
1138 dirlst.append(dirstat)
1139 return not match
1139 return not match
1140 else:
1140 else:
1141 followsym = False
1141 followsym = False
1142
1142
1143 if (seen_dirs is None) and followsym:
1143 if (seen_dirs is None) and followsym:
1144 seen_dirs = []
1144 seen_dirs = []
1145 _add_dir_if_not_there(seen_dirs, path)
1145 _add_dir_if_not_there(seen_dirs, path)
1146 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1146 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1147 dirs.sort()
1147 dirs.sort()
1148 if '.hg' in dirs:
1148 if '.hg' in dirs:
1149 yield root # found a repository
1149 yield root # found a repository
1150 qroot = os.path.join(root, '.hg', 'patches')
1150 qroot = os.path.join(root, '.hg', 'patches')
1151 if os.path.isdir(os.path.join(qroot, '.hg')):
1151 if os.path.isdir(os.path.join(qroot, '.hg')):
1152 yield qroot # we have a patch queue repo here
1152 yield qroot # we have a patch queue repo here
1153 if recurse:
1153 if recurse:
1154 # avoid recursing inside the .hg directory
1154 # avoid recursing inside the .hg directory
1155 dirs.remove('.hg')
1155 dirs.remove('.hg')
1156 else:
1156 else:
1157 dirs[:] = [] # don't descend further
1157 dirs[:] = [] # don't descend further
1158 elif followsym:
1158 elif followsym:
1159 newdirs = []
1159 newdirs = []
1160 for d in dirs:
1160 for d in dirs:
1161 fname = os.path.join(root, d)
1161 fname = os.path.join(root, d)
1162 if _add_dir_if_not_there(seen_dirs, fname):
1162 if _add_dir_if_not_there(seen_dirs, fname):
1163 if os.path.islink(fname):
1163 if os.path.islink(fname):
1164 for hgname in walkrepos(fname, True, seen_dirs):
1164 for hgname in walkrepos(fname, True, seen_dirs):
1165 yield hgname
1165 yield hgname
1166 else:
1166 else:
1167 newdirs.append(d)
1167 newdirs.append(d)
1168 dirs[:] = newdirs
1168 dirs[:] = newdirs
1169
1169
1170 _rcpath = None
1170 _rcpath = None
1171
1171
1172 def os_rcpath():
1172 def os_rcpath():
1173 '''return default os-specific hgrc search path'''
1173 '''return default os-specific hgrc search path'''
1174 path = system_rcpath()
1174 path = system_rcpath()
1175 path.extend(user_rcpath())
1175 path.extend(user_rcpath())
1176 path = [os.path.normpath(f) for f in path]
1176 path = [os.path.normpath(f) for f in path]
1177 return path
1177 return path
1178
1178
1179 def rcpath():
1179 def rcpath():
1180 '''return hgrc search path. if env var HGRCPATH is set, use it.
1180 '''return hgrc search path. if env var HGRCPATH is set, use it.
1181 for each item in path, if directory, use files ending in .rc,
1181 for each item in path, if directory, use files ending in .rc,
1182 else use item.
1182 else use item.
1183 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1183 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1184 if no HGRCPATH, use default os-specific path.'''
1184 if no HGRCPATH, use default os-specific path.'''
1185 global _rcpath
1185 global _rcpath
1186 if _rcpath is None:
1186 if _rcpath is None:
1187 if 'HGRCPATH' in os.environ:
1187 if 'HGRCPATH' in os.environ:
1188 _rcpath = []
1188 _rcpath = []
1189 for p in os.environ['HGRCPATH'].split(os.pathsep):
1189 for p in os.environ['HGRCPATH'].split(os.pathsep):
1190 if not p:
1190 if not p:
1191 continue
1191 continue
1192 p = expandpath(p)
1192 p = expandpath(p)
1193 if os.path.isdir(p):
1193 if os.path.isdir(p):
1194 for f, kind in osutil.listdir(p):
1194 for f, kind in osutil.listdir(p):
1195 if f.endswith('.rc'):
1195 if f.endswith('.rc'):
1196 _rcpath.append(os.path.join(p, f))
1196 _rcpath.append(os.path.join(p, f))
1197 else:
1197 else:
1198 _rcpath.append(p)
1198 _rcpath.append(p)
1199 else:
1199 else:
1200 _rcpath = os_rcpath()
1200 _rcpath = os_rcpath()
1201 return _rcpath
1201 return _rcpath
1202
1202
1203 def bytecount(nbytes):
1203 def bytecount(nbytes):
1204 '''return byte count formatted as readable string, with units'''
1204 '''return byte count formatted as readable string, with units'''
1205
1205
1206 units = (
1206 units = (
1207 (100, 1 << 30, _('%.0f GB')),
1207 (100, 1 << 30, _('%.0f GB')),
1208 (10, 1 << 30, _('%.1f GB')),
1208 (10, 1 << 30, _('%.1f GB')),
1209 (1, 1 << 30, _('%.2f GB')),
1209 (1, 1 << 30, _('%.2f GB')),
1210 (100, 1 << 20, _('%.0f MB')),
1210 (100, 1 << 20, _('%.0f MB')),
1211 (10, 1 << 20, _('%.1f MB')),
1211 (10, 1 << 20, _('%.1f MB')),
1212 (1, 1 << 20, _('%.2f MB')),
1212 (1, 1 << 20, _('%.2f MB')),
1213 (100, 1 << 10, _('%.0f KB')),
1213 (100, 1 << 10, _('%.0f KB')),
1214 (10, 1 << 10, _('%.1f KB')),
1214 (10, 1 << 10, _('%.1f KB')),
1215 (1, 1 << 10, _('%.2f KB')),
1215 (1, 1 << 10, _('%.2f KB')),
1216 (1, 1, _('%.0f bytes')),
1216 (1, 1, _('%.0f bytes')),
1217 )
1217 )
1218
1218
1219 for multiplier, divisor, format in units:
1219 for multiplier, divisor, format in units:
1220 if nbytes >= divisor * multiplier:
1220 if nbytes >= divisor * multiplier:
1221 return format % (nbytes / float(divisor))
1221 return format % (nbytes / float(divisor))
1222 return units[-1][2] % nbytes
1222 return units[-1][2] % nbytes
1223
1223
1224 def drop_scheme(scheme, path):
1224 def drop_scheme(scheme, path):
1225 sc = scheme + ':'
1225 sc = scheme + ':'
1226 if path.startswith(sc):
1226 if path.startswith(sc):
1227 path = path[len(sc):]
1227 path = path[len(sc):]
1228 if path.startswith('//'):
1228 if path.startswith('//'):
1229 if scheme == 'file':
1229 if scheme == 'file':
1230 i = path.find('/', 2)
1230 i = path.find('/', 2)
1231 if i == -1:
1231 if i == -1:
1232 return ''
1232 return ''
1233 # On Windows, absolute paths are rooted at the current drive
1233 # On Windows, absolute paths are rooted at the current drive
1234 # root. On POSIX they are rooted at the file system root.
1234 # root. On POSIX they are rooted at the file system root.
1235 if os.name == 'nt':
1235 if os.name == 'nt':
1236 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1236 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1237 path = os.path.join(droot, path[i + 1:])
1237 path = os.path.join(droot, path[i + 1:])
1238 else:
1238 else:
1239 path = path[i:]
1239 path = path[i:]
1240 else:
1240 else:
1241 path = path[2:]
1241 path = path[2:]
1242 return path
1242 return path
1243
1243
1244 def uirepr(s):
1244 def uirepr(s):
1245 # Avoid double backslash in Windows path repr()
1245 # Avoid double backslash in Windows path repr()
1246 return repr(s).replace('\\\\', '\\')
1246 return repr(s).replace('\\\\', '\\')
1247
1247
1248 def termwidth():
1248 def termwidth():
1249 if 'COLUMNS' in os.environ:
1249 if 'COLUMNS' in os.environ:
1250 try:
1250 try:
1251 return int(os.environ['COLUMNS'])
1251 return int(os.environ['COLUMNS'])
1252 except ValueError:
1252 except ValueError:
1253 pass
1253 pass
1254 try:
1254 try:
1255 import termios, array, fcntl
1255 import termios, array, fcntl
1256 for dev in (sys.stdout, sys.stdin):
1256 for dev in (sys.stdout, sys.stdin):
1257 try:
1257 try:
1258 try:
1258 try:
1259 fd = dev.fileno()
1259 fd = dev.fileno()
1260 except AttributeError:
1260 except AttributeError:
1261 continue
1261 continue
1262 if not os.isatty(fd):
1262 if not os.isatty(fd):
1263 continue
1263 continue
1264 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1264 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1265 return array.array('h', arri)[1]
1265 return array.array('h', arri)[1]
1266 except ValueError:
1266 except ValueError:
1267 pass
1267 pass
1268 except IOError, e:
1268 except IOError, e:
1269 if e[0] == errno.EINVAL:
1269 if e[0] == errno.EINVAL:
1270 pass
1270 pass
1271 else:
1271 else:
1272 raise
1272 raise
1273 except ImportError:
1273 except ImportError:
1274 pass
1274 pass
1275 return 80
1275 return 80
1276
1276
1277 def wrap(line, hangindent, width=None):
1277 def wrap(line, hangindent, width=None):
1278 if width is None:
1278 if width is None:
1279 width = termwidth() - 2
1279 width = termwidth() - 2
1280 if width <= hangindent:
1280 if width <= hangindent:
1281 # adjust for weird terminal size
1281 # adjust for weird terminal size
1282 width = max(78, hangindent + 1)
1282 width = max(78, hangindent + 1)
1283 padding = '\n' + ' ' * hangindent
1283 padding = '\n' + ' ' * hangindent
1284 # To avoid corrupting multi-byte characters in line, we must wrap
1284 # To avoid corrupting multi-byte characters in line, we must wrap
1285 # a Unicode string instead of a bytestring.
1285 # a Unicode string instead of a bytestring.
1286 try:
1286 try:
1287 u = line.decode(encoding.encoding)
1287 u = line.decode(encoding.encoding)
1288 w = padding.join(textwrap.wrap(u, width=width - hangindent))
1288 w = padding.join(textwrap.wrap(u, width=width - hangindent))
1289 return w.encode(encoding.encoding)
1289 return w.encode(encoding.encoding)
1290 except UnicodeDecodeError:
1290 except UnicodeDecodeError:
1291 return padding.join(textwrap.wrap(line, width=width - hangindent))
1291 return padding.join(textwrap.wrap(line, width=width - hangindent))
1292
1292
1293 def iterlines(iterator):
1293 def iterlines(iterator):
1294 for chunk in iterator:
1294 for chunk in iterator:
1295 for line in chunk.splitlines():
1295 for line in chunk.splitlines():
1296 yield line
1296 yield line
1297
1297
1298 def expandpath(path):
1298 def expandpath(path):
1299 return os.path.expanduser(os.path.expandvars(path))
1299 return os.path.expanduser(os.path.expandvars(path))
1300
1300
1301 def hgcmd():
1301 def hgcmd():
1302 """Return the command used to execute current hg
1302 """Return the command used to execute current hg
1303
1303
1304 This is different from hgexecutable() because on Windows we want
1304 This is different from hgexecutable() because on Windows we want
1305 to avoid things opening new shell windows like batch files, so we
1305 to avoid things opening new shell windows like batch files, so we
1306 get either the python call or current executable.
1306 get either the python call or current executable.
1307 """
1307 """
1308 if main_is_frozen():
1308 if main_is_frozen():
1309 return [sys.executable]
1309 return [sys.executable]
1310 return gethgcmd()
1310 return gethgcmd()
1311
1312 def rundetached(args, condfn):
1313 """Execute the argument list in a detached process.
1314
1315 condfn is a callable which is called repeatedly and should return
1316 True once the child process is known to have started successfully.
1317 At this point, the child process PID is returned. If the child
1318 process fails to start or finishes before condfn() evaluates to
1319 True, return -1.
1320 """
1321 # Windows case is easier because the child process is either
1322 # successfully starting and validating the condition or exiting
1323 # on failure. We just poll on its PID. On Unix, if the child
1324 # process fails to start, it will be left in a zombie state until
1325 # the parent wait on it, which we cannot do since we expect a long
1326 # running process on success. Instead we listen for SIGCHLD telling
1327 # us our child process terminated.
1328 terminated = set()
1329 def handler(signum, frame):
1330 terminated.add(os.wait())
1331 prevhandler = None
1332 if hasattr(signal, 'SIGCHLD'):
1333 prevhandler = signal.signal(signal.SIGCHLD, handler)
1334 try:
1335 pid = spawndetached(args)
1336 while not condfn():
1337 if ((pid in terminated or not testpid(pid))
1338 and not condfn()):
1339 return -1
1340 time.sleep(0.1)
1341 return pid
1342 finally:
1343 if prevhandler is not None:
1344 signal.signal(signal.SIGCHLD, prevhandler)
@@ -1,7 +1,7 b''
1 % fail
1 % fail
2 abort: inotify-server: cannot start: .hg/inotify.sock is a broken symlink
2 abort: inotify-server: cannot start: .hg/inotify.sock is a broken symlink
3 inotify-client: could not talk to new inotify server: No such file or directory
3 inotify-client: could not start inotify server: child process failed to start
4 abort: inotify-server: cannot start: .hg/inotify.sock is a broken symlink
4 abort: inotify-server: cannot start: .hg/inotify.sock is a broken symlink
5 % inserve
5 % inserve
6 % status
6 % status
7 ? hg.pid
7 ? hg.pid
General Comments 0
You need to be logged in to leave comments. Login now