##// END OF EJS Templates
util: replace util.nulldev with os.devnull...
Ross Lagerwall -
r17391:fc24c104 stable
parent child Browse files
Show More
@@ -1,445 +1,445
1 1 # common.py - common code for the convert extension
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import base64, errno
9 9 import os
10 10 import cPickle as pickle
11 11 from mercurial import util
12 12 from mercurial.i18n import _
13 13
14 14 propertycache = util.propertycache
15 15
16 16 def encodeargs(args):
17 17 def encodearg(s):
18 18 lines = base64.encodestring(s)
19 19 lines = [l.splitlines()[0] for l in lines]
20 20 return ''.join(lines)
21 21
22 22 s = pickle.dumps(args)
23 23 return encodearg(s)
24 24
25 25 def decodeargs(s):
26 26 s = base64.decodestring(s)
27 27 return pickle.loads(s)
28 28
29 29 class MissingTool(Exception):
30 30 pass
31 31
32 32 def checktool(exe, name=None, abort=True):
33 33 name = name or exe
34 34 if not util.findexe(exe):
35 35 exc = abort and util.Abort or MissingTool
36 36 raise exc(_('cannot find required "%s" tool') % name)
37 37
38 38 class NoRepo(Exception):
39 39 pass
40 40
41 41 SKIPREV = 'SKIP'
42 42
43 43 class commit(object):
44 44 def __init__(self, author, date, desc, parents, branch=None, rev=None,
45 45 extra={}, sortkey=None):
46 46 self.author = author or 'unknown'
47 47 self.date = date or '0 0'
48 48 self.desc = desc
49 49 self.parents = parents
50 50 self.branch = branch
51 51 self.rev = rev
52 52 self.extra = extra
53 53 self.sortkey = sortkey
54 54
55 55 class converter_source(object):
56 56 """Conversion source interface"""
57 57
58 58 def __init__(self, ui, path=None, rev=None):
59 59 """Initialize conversion source (or raise NoRepo("message")
60 60 exception if path is not a valid repository)"""
61 61 self.ui = ui
62 62 self.path = path
63 63 self.rev = rev
64 64
65 65 self.encoding = 'utf-8'
66 66
67 67 def before(self):
68 68 pass
69 69
70 70 def after(self):
71 71 pass
72 72
73 73 def setrevmap(self, revmap):
74 74 """set the map of already-converted revisions"""
75 75 pass
76 76
77 77 def getheads(self):
78 78 """Return a list of this repository's heads"""
79 79 raise NotImplementedError
80 80
81 81 def getfile(self, name, rev):
82 82 """Return a pair (data, mode) where data is the file content
83 83 as a string and mode one of '', 'x' or 'l'. rev is the
84 84 identifier returned by a previous call to getchanges(). Raise
85 85 IOError to indicate that name was deleted in rev.
86 86 """
87 87 raise NotImplementedError
88 88
89 89 def getchanges(self, version):
90 90 """Returns a tuple of (files, copies).
91 91
92 92 files is a sorted list of (filename, id) tuples for all files
93 93 changed between version and its first parent returned by
94 94 getcommit(). id is the source revision id of the file.
95 95
96 96 copies is a dictionary of dest: source
97 97 """
98 98 raise NotImplementedError
99 99
100 100 def getcommit(self, version):
101 101 """Return the commit object for version"""
102 102 raise NotImplementedError
103 103
104 104 def gettags(self):
105 105 """Return the tags as a dictionary of name: revision
106 106
107 107 Tag names must be UTF-8 strings.
108 108 """
109 109 raise NotImplementedError
110 110
111 111 def recode(self, s, encoding=None):
112 112 if not encoding:
113 113 encoding = self.encoding or 'utf-8'
114 114
115 115 if isinstance(s, unicode):
116 116 return s.encode("utf-8")
117 117 try:
118 118 return s.decode(encoding).encode("utf-8")
119 119 except UnicodeError:
120 120 try:
121 121 return s.decode("latin-1").encode("utf-8")
122 122 except UnicodeError:
123 123 return s.decode(encoding, "replace").encode("utf-8")
124 124
125 125 def getchangedfiles(self, rev, i):
126 126 """Return the files changed by rev compared to parent[i].
127 127
128 128 i is an index selecting one of the parents of rev. The return
129 129 value should be the list of files that are different in rev and
130 130 this parent.
131 131
132 132 If rev has no parents, i is None.
133 133
134 134 This function is only needed to support --filemap
135 135 """
136 136 raise NotImplementedError
137 137
138 138 def converted(self, rev, sinkrev):
139 139 '''Notify the source that a revision has been converted.'''
140 140 pass
141 141
142 142 def hasnativeorder(self):
143 143 """Return true if this source has a meaningful, native revision
144 144 order. For instance, Mercurial revisions are store sequentially
145 145 while there is no such global ordering with Darcs.
146 146 """
147 147 return False
148 148
149 149 def lookuprev(self, rev):
150 150 """If rev is a meaningful revision reference in source, return
151 151 the referenced identifier in the same format used by getcommit().
152 152 return None otherwise.
153 153 """
154 154 return None
155 155
156 156 def getbookmarks(self):
157 157 """Return the bookmarks as a dictionary of name: revision
158 158
159 159 Bookmark names are to be UTF-8 strings.
160 160 """
161 161 return {}
162 162
163 163 class converter_sink(object):
164 164 """Conversion sink (target) interface"""
165 165
166 166 def __init__(self, ui, path):
167 167 """Initialize conversion sink (or raise NoRepo("message")
168 168 exception if path is not a valid repository)
169 169
170 170 created is a list of paths to remove if a fatal error occurs
171 171 later"""
172 172 self.ui = ui
173 173 self.path = path
174 174 self.created = []
175 175
176 176 def getheads(self):
177 177 """Return a list of this repository's heads"""
178 178 raise NotImplementedError
179 179
180 180 def revmapfile(self):
181 181 """Path to a file that will contain lines
182 182 source_rev_id sink_rev_id
183 183 mapping equivalent revision identifiers for each system."""
184 184 raise NotImplementedError
185 185
186 186 def authorfile(self):
187 187 """Path to a file that will contain lines
188 188 srcauthor=dstauthor
189 189 mapping equivalent authors identifiers for each system."""
190 190 return None
191 191
192 192 def putcommit(self, files, copies, parents, commit, source, revmap):
193 193 """Create a revision with all changed files listed in 'files'
194 194 and having listed parents. 'commit' is a commit object
195 195 containing at a minimum the author, date, and message for this
196 196 changeset. 'files' is a list of (path, version) tuples,
197 197 'copies' is a dictionary mapping destinations to sources,
198 198 'source' is the source repository, and 'revmap' is a mapfile
199 199 of source revisions to converted revisions. Only getfile() and
200 200 lookuprev() should be called on 'source'.
201 201
202 202 Note that the sink repository is not told to update itself to
203 203 a particular revision (or even what that revision would be)
204 204 before it receives the file data.
205 205 """
206 206 raise NotImplementedError
207 207
208 208 def puttags(self, tags):
209 209 """Put tags into sink.
210 210
211 211 tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string.
212 212 Return a pair (tag_revision, tag_parent_revision), or (None, None)
213 213 if nothing was changed.
214 214 """
215 215 raise NotImplementedError
216 216
217 217 def setbranch(self, branch, pbranches):
218 218 """Set the current branch name. Called before the first putcommit
219 219 on the branch.
220 220 branch: branch name for subsequent commits
221 221 pbranches: (converted parent revision, parent branch) tuples"""
222 222 pass
223 223
224 224 def setfilemapmode(self, active):
225 225 """Tell the destination that we're using a filemap
226 226
227 227 Some converter_sources (svn in particular) can claim that a file
228 228 was changed in a revision, even if there was no change. This method
229 229 tells the destination that we're using a filemap and that it should
230 230 filter empty revisions.
231 231 """
232 232 pass
233 233
234 234 def before(self):
235 235 pass
236 236
237 237 def after(self):
238 238 pass
239 239
240 240 def putbookmarks(self, bookmarks):
241 241 """Put bookmarks into sink.
242 242
243 243 bookmarks: {bookmarkname: sink_rev_id, ...}
244 244 where bookmarkname is an UTF-8 string.
245 245 """
246 246 pass
247 247
248 248 def hascommit(self, rev):
249 249 """Return True if the sink contains rev"""
250 250 raise NotImplementedError
251 251
252 252 class commandline(object):
253 253 def __init__(self, ui, command):
254 254 self.ui = ui
255 255 self.command = command
256 256
257 257 def prerun(self):
258 258 pass
259 259
260 260 def postrun(self):
261 261 pass
262 262
263 263 def _cmdline(self, cmd, closestdin, *args, **kwargs):
264 264 cmdline = [self.command, cmd] + list(args)
265 265 for k, v in kwargs.iteritems():
266 266 if len(k) == 1:
267 267 cmdline.append('-' + k)
268 268 else:
269 269 cmdline.append('--' + k.replace('_', '-'))
270 270 try:
271 271 if len(k) == 1:
272 272 cmdline.append('' + v)
273 273 else:
274 274 cmdline[-1] += '=' + v
275 275 except TypeError:
276 276 pass
277 277 cmdline = [util.shellquote(arg) for arg in cmdline]
278 278 if not self.ui.debugflag:
279 cmdline += ['2>', util.nulldev]
279 cmdline += ['2>', os.devnull]
280 280 if closestdin:
281 cmdline += ['<', util.nulldev]
281 cmdline += ['<', os.devnull]
282 282 cmdline = ' '.join(cmdline)
283 283 return cmdline
284 284
285 285 def _run(self, cmd, *args, **kwargs):
286 286 return self._dorun(util.popen, cmd, True, *args, **kwargs)
287 287
288 288 def _run2(self, cmd, *args, **kwargs):
289 289 return self._dorun(util.popen2, cmd, False, *args, **kwargs)
290 290
291 291 def _dorun(self, openfunc, cmd, closestdin, *args, **kwargs):
292 292 cmdline = self._cmdline(cmd, closestdin, *args, **kwargs)
293 293 self.ui.debug('running: %s\n' % (cmdline,))
294 294 self.prerun()
295 295 try:
296 296 return openfunc(cmdline)
297 297 finally:
298 298 self.postrun()
299 299
300 300 def run(self, cmd, *args, **kwargs):
301 301 fp = self._run(cmd, *args, **kwargs)
302 302 output = fp.read()
303 303 self.ui.debug(output)
304 304 return output, fp.close()
305 305
306 306 def runlines(self, cmd, *args, **kwargs):
307 307 fp = self._run(cmd, *args, **kwargs)
308 308 output = fp.readlines()
309 309 self.ui.debug(''.join(output))
310 310 return output, fp.close()
311 311
312 312 def checkexit(self, status, output=''):
313 313 if status:
314 314 if output:
315 315 self.ui.warn(_('%s error:\n') % self.command)
316 316 self.ui.warn(output)
317 317 msg = util.explainexit(status)[0]
318 318 raise util.Abort('%s %s' % (self.command, msg))
319 319
320 320 def run0(self, cmd, *args, **kwargs):
321 321 output, status = self.run(cmd, *args, **kwargs)
322 322 self.checkexit(status, output)
323 323 return output
324 324
325 325 def runlines0(self, cmd, *args, **kwargs):
326 326 output, status = self.runlines(cmd, *args, **kwargs)
327 327 self.checkexit(status, ''.join(output))
328 328 return output
329 329
330 330 @propertycache
331 331 def argmax(self):
332 332 # POSIX requires at least 4096 bytes for ARG_MAX
333 333 argmax = 4096
334 334 try:
335 335 argmax = os.sysconf("SC_ARG_MAX")
336 336 except (AttributeError, ValueError):
337 337 pass
338 338
339 339 # Windows shells impose their own limits on command line length,
340 340 # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes
341 341 # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for
342 342 # details about cmd.exe limitations.
343 343
344 344 # Since ARG_MAX is for command line _and_ environment, lower our limit
345 345 # (and make happy Windows shells while doing this).
346 346 return argmax // 2 - 1
347 347
348 348 def limit_arglist(self, arglist, cmd, closestdin, *args, **kwargs):
349 349 cmdlen = len(self._cmdline(cmd, closestdin, *args, **kwargs))
350 350 limit = self.argmax - cmdlen
351 351 bytes = 0
352 352 fl = []
353 353 for fn in arglist:
354 354 b = len(fn) + 3
355 355 if bytes + b < limit or len(fl) == 0:
356 356 fl.append(fn)
357 357 bytes += b
358 358 else:
359 359 yield fl
360 360 fl = [fn]
361 361 bytes = b
362 362 if fl:
363 363 yield fl
364 364
365 365 def xargs(self, arglist, cmd, *args, **kwargs):
366 366 for l in self.limit_arglist(arglist, cmd, True, *args, **kwargs):
367 367 self.run0(cmd, *(list(args) + l), **kwargs)
368 368
369 369 class mapfile(dict):
370 370 def __init__(self, ui, path):
371 371 super(mapfile, self).__init__()
372 372 self.ui = ui
373 373 self.path = path
374 374 self.fp = None
375 375 self.order = []
376 376 self._read()
377 377
378 378 def _read(self):
379 379 if not self.path:
380 380 return
381 381 try:
382 382 fp = open(self.path, 'r')
383 383 except IOError, err:
384 384 if err.errno != errno.ENOENT:
385 385 raise
386 386 return
387 387 for i, line in enumerate(fp):
388 388 line = line.splitlines()[0].rstrip()
389 389 if not line:
390 390 # Ignore blank lines
391 391 continue
392 392 try:
393 393 key, value = line.rsplit(' ', 1)
394 394 except ValueError:
395 395 raise util.Abort(
396 396 _('syntax error in %s(%d): key/value pair expected')
397 397 % (self.path, i + 1))
398 398 if key not in self:
399 399 self.order.append(key)
400 400 super(mapfile, self).__setitem__(key, value)
401 401 fp.close()
402 402
403 403 def __setitem__(self, key, value):
404 404 if self.fp is None:
405 405 try:
406 406 self.fp = open(self.path, 'a')
407 407 except IOError, err:
408 408 raise util.Abort(_('could not open map file %r: %s') %
409 409 (self.path, err.strerror))
410 410 self.fp.write('%s %s\n' % (key, value))
411 411 self.fp.flush()
412 412 super(mapfile, self).__setitem__(key, value)
413 413
414 414 def close(self):
415 415 if self.fp:
416 416 self.fp.close()
417 417 self.fp = None
418 418
419 419 def parsesplicemap(path):
420 420 """Parse a splicemap, return a child/parents dictionary."""
421 421 if not path:
422 422 return {}
423 423 m = {}
424 424 try:
425 425 fp = open(path, 'r')
426 426 for i, line in enumerate(fp):
427 427 line = line.splitlines()[0].rstrip()
428 428 if not line:
429 429 # Ignore blank lines
430 430 continue
431 431 try:
432 432 child, parents = line.split(' ', 1)
433 433 parents = parents.replace(',', ' ').split()
434 434 except ValueError:
435 435 raise util.Abort(_('syntax error in %s(%d): child parent1'
436 436 '[,parent2] expected') % (path, i + 1))
437 437 pp = []
438 438 for p in parents:
439 439 if p not in pp:
440 440 pp.append(p)
441 441 m[child] = pp
442 442 except IOError, e:
443 443 if e.errno != errno.ENOENT:
444 444 raise
445 445 return m
@@ -1,338 +1,338
1 1 # gnuarch.py - GNU Arch support for the convert extension
2 2 #
3 3 # Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org>
4 4 # and others
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from common import NoRepo, commandline, commit, converter_source
10 10 from mercurial.i18n import _
11 11 from mercurial import encoding, util
12 12 import os, shutil, tempfile, stat
13 13 from email.Parser import Parser
14 14
15 15 class gnuarch_source(converter_source, commandline):
16 16
17 17 class gnuarch_rev(object):
18 18 def __init__(self, rev):
19 19 self.rev = rev
20 20 self.summary = ''
21 21 self.date = None
22 22 self.author = ''
23 23 self.continuationof = None
24 24 self.add_files = []
25 25 self.mod_files = []
26 26 self.del_files = []
27 27 self.ren_files = {}
28 28 self.ren_dirs = {}
29 29
30 30 def __init__(self, ui, path, rev=None):
31 31 super(gnuarch_source, self).__init__(ui, path, rev=rev)
32 32
33 33 if not os.path.exists(os.path.join(path, '{arch}')):
34 34 raise NoRepo(_("%s does not look like a GNU Arch repository")
35 35 % path)
36 36
37 37 # Could use checktool, but we want to check for baz or tla.
38 38 self.execmd = None
39 39 if util.findexe('baz'):
40 40 self.execmd = 'baz'
41 41 else:
42 42 if util.findexe('tla'):
43 43 self.execmd = 'tla'
44 44 else:
45 45 raise util.Abort(_('cannot find a GNU Arch tool'))
46 46
47 47 commandline.__init__(self, ui, self.execmd)
48 48
49 49 self.path = os.path.realpath(path)
50 50 self.tmppath = None
51 51
52 52 self.treeversion = None
53 53 self.lastrev = None
54 54 self.changes = {}
55 55 self.parents = {}
56 56 self.tags = {}
57 57 self.catlogparser = Parser()
58 58 self.encoding = encoding.encoding
59 59 self.archives = []
60 60
61 61 def before(self):
62 62 # Get registered archives
63 63 self.archives = [i.rstrip('\n')
64 64 for i in self.runlines0('archives', '-n')]
65 65
66 66 if self.execmd == 'tla':
67 67 output = self.run0('tree-version', self.path)
68 68 else:
69 69 output = self.run0('tree-version', '-d', self.path)
70 70 self.treeversion = output.strip()
71 71
72 72 # Get name of temporary directory
73 73 version = self.treeversion.split('/')
74 74 self.tmppath = os.path.join(tempfile.gettempdir(),
75 75 'hg-%s' % version[1])
76 76
77 77 # Generate parents dictionary
78 78 self.parents[None] = []
79 79 treeversion = self.treeversion
80 80 child = None
81 81 while treeversion:
82 82 self.ui.status(_('analyzing tree version %s...\n') % treeversion)
83 83
84 84 archive = treeversion.split('/')[0]
85 85 if archive not in self.archives:
86 86 self.ui.status(_('tree analysis stopped because it points to '
87 87 'an unregistered archive %s...\n') % archive)
88 88 break
89 89
90 90 # Get the complete list of revisions for that tree version
91 91 output, status = self.runlines('revisions', '-r', '-f', treeversion)
92 92 self.checkexit(status, 'failed retrieveing revisions for %s'
93 93 % treeversion)
94 94
95 95 # No new iteration unless a revision has a continuation-of header
96 96 treeversion = None
97 97
98 98 for l in output:
99 99 rev = l.strip()
100 100 self.changes[rev] = self.gnuarch_rev(rev)
101 101 self.parents[rev] = []
102 102
103 103 # Read author, date and summary
104 104 catlog, status = self.run('cat-log', '-d', self.path, rev)
105 105 if status:
106 106 catlog = self.run0('cat-archive-log', rev)
107 107 self._parsecatlog(catlog, rev)
108 108
109 109 # Populate the parents map
110 110 self.parents[child].append(rev)
111 111
112 112 # Keep track of the current revision as the child of the next
113 113 # revision scanned
114 114 child = rev
115 115
116 116 # Check if we have to follow the usual incremental history
117 117 # or if we have to 'jump' to a different treeversion given
118 118 # by the continuation-of header.
119 119 if self.changes[rev].continuationof:
120 120 treeversion = '--'.join(
121 121 self.changes[rev].continuationof.split('--')[:-1])
122 122 break
123 123
124 124 # If we reached a base-0 revision w/o any continuation-of
125 125 # header, it means the tree history ends here.
126 126 if rev[-6:] == 'base-0':
127 127 break
128 128
129 129 def after(self):
130 130 self.ui.debug('cleaning up %s\n' % self.tmppath)
131 131 shutil.rmtree(self.tmppath, ignore_errors=True)
132 132
133 133 def getheads(self):
134 134 return self.parents[None]
135 135
136 136 def getfile(self, name, rev):
137 137 if rev != self.lastrev:
138 138 raise util.Abort(_('internal calling inconsistency'))
139 139
140 140 # Raise IOError if necessary (i.e. deleted files).
141 141 if not os.path.lexists(os.path.join(self.tmppath, name)):
142 142 raise IOError
143 143
144 144 return self._getfile(name, rev)
145 145
146 146 def getchanges(self, rev):
147 147 self._update(rev)
148 148 changes = []
149 149 copies = {}
150 150
151 151 for f in self.changes[rev].add_files:
152 152 changes.append((f, rev))
153 153
154 154 for f in self.changes[rev].mod_files:
155 155 changes.append((f, rev))
156 156
157 157 for f in self.changes[rev].del_files:
158 158 changes.append((f, rev))
159 159
160 160 for src in self.changes[rev].ren_files:
161 161 to = self.changes[rev].ren_files[src]
162 162 changes.append((src, rev))
163 163 changes.append((to, rev))
164 164 copies[to] = src
165 165
166 166 for src in self.changes[rev].ren_dirs:
167 167 to = self.changes[rev].ren_dirs[src]
168 168 chgs, cps = self._rendirchanges(src, to)
169 169 changes += [(f, rev) for f in chgs]
170 170 copies.update(cps)
171 171
172 172 self.lastrev = rev
173 173 return sorted(set(changes)), copies
174 174
175 175 def getcommit(self, rev):
176 176 changes = self.changes[rev]
177 177 return commit(author=changes.author, date=changes.date,
178 178 desc=changes.summary, parents=self.parents[rev], rev=rev)
179 179
180 180 def gettags(self):
181 181 return self.tags
182 182
183 183 def _execute(self, cmd, *args, **kwargs):
184 184 cmdline = [self.execmd, cmd]
185 185 cmdline += args
186 186 cmdline = [util.shellquote(arg) for arg in cmdline]
187 cmdline += ['>', util.nulldev, '2>', util.nulldev]
187 cmdline += ['>', os.devnull, '2>', os.devnull]
188 188 cmdline = util.quotecommand(' '.join(cmdline))
189 189 self.ui.debug(cmdline, '\n')
190 190 return os.system(cmdline)
191 191
192 192 def _update(self, rev):
193 193 self.ui.debug('applying revision %s...\n' % rev)
194 194 changeset, status = self.runlines('replay', '-d', self.tmppath,
195 195 rev)
196 196 if status:
197 197 # Something went wrong while merging (baz or tla
198 198 # issue?), get latest revision and try from there
199 199 shutil.rmtree(self.tmppath, ignore_errors=True)
200 200 self._obtainrevision(rev)
201 201 else:
202 202 old_rev = self.parents[rev][0]
203 203 self.ui.debug('computing changeset between %s and %s...\n'
204 204 % (old_rev, rev))
205 205 self._parsechangeset(changeset, rev)
206 206
207 207 def _getfile(self, name, rev):
208 208 mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
209 209 if stat.S_ISLNK(mode):
210 210 data = os.readlink(os.path.join(self.tmppath, name))
211 211 mode = mode and 'l' or ''
212 212 else:
213 213 data = open(os.path.join(self.tmppath, name), 'rb').read()
214 214 mode = (mode & 0111) and 'x' or ''
215 215 return data, mode
216 216
217 217 def _exclude(self, name):
218 218 exclude = ['{arch}', '.arch-ids', '.arch-inventory']
219 219 for exc in exclude:
220 220 if name.find(exc) != -1:
221 221 return True
222 222 return False
223 223
224 224 def _readcontents(self, path):
225 225 files = []
226 226 contents = os.listdir(path)
227 227 while len(contents) > 0:
228 228 c = contents.pop()
229 229 p = os.path.join(path, c)
230 230 # os.walk could be used, but here we avoid internal GNU
231 231 # Arch files and directories, thus saving a lot time.
232 232 if not self._exclude(p):
233 233 if os.path.isdir(p):
234 234 contents += [os.path.join(c, f) for f in os.listdir(p)]
235 235 else:
236 236 files.append(c)
237 237 return files
238 238
239 239 def _rendirchanges(self, src, dest):
240 240 changes = []
241 241 copies = {}
242 242 files = self._readcontents(os.path.join(self.tmppath, dest))
243 243 for f in files:
244 244 s = os.path.join(src, f)
245 245 d = os.path.join(dest, f)
246 246 changes.append(s)
247 247 changes.append(d)
248 248 copies[d] = s
249 249 return changes, copies
250 250
251 251 def _obtainrevision(self, rev):
252 252 self.ui.debug('obtaining revision %s...\n' % rev)
253 253 output = self._execute('get', rev, self.tmppath)
254 254 self.checkexit(output)
255 255 self.ui.debug('analyzing revision %s...\n' % rev)
256 256 files = self._readcontents(self.tmppath)
257 257 self.changes[rev].add_files += files
258 258
259 259 def _stripbasepath(self, path):
260 260 if path.startswith('./'):
261 261 return path[2:]
262 262 return path
263 263
264 264 def _parsecatlog(self, data, rev):
265 265 try:
266 266 catlog = self.catlogparser.parsestr(data)
267 267
268 268 # Commit date
269 269 self.changes[rev].date = util.datestr(
270 270 util.strdate(catlog['Standard-date'],
271 271 '%Y-%m-%d %H:%M:%S'))
272 272
273 273 # Commit author
274 274 self.changes[rev].author = self.recode(catlog['Creator'])
275 275
276 276 # Commit description
277 277 self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
278 278 catlog.get_payload()))
279 279 self.changes[rev].summary = self.recode(self.changes[rev].summary)
280 280
281 281 # Commit revision origin when dealing with a branch or tag
282 282 if 'Continuation-of' in catlog:
283 283 self.changes[rev].continuationof = self.recode(
284 284 catlog['Continuation-of'])
285 285 except Exception:
286 286 raise util.Abort(_('could not parse cat-log of %s') % rev)
287 287
288 288 def _parsechangeset(self, data, rev):
289 289 for l in data:
290 290 l = l.strip()
291 291 # Added file (ignore added directory)
292 292 if l.startswith('A') and not l.startswith('A/'):
293 293 file = self._stripbasepath(l[1:].strip())
294 294 if not self._exclude(file):
295 295 self.changes[rev].add_files.append(file)
296 296 # Deleted file (ignore deleted directory)
297 297 elif l.startswith('D') and not l.startswith('D/'):
298 298 file = self._stripbasepath(l[1:].strip())
299 299 if not self._exclude(file):
300 300 self.changes[rev].del_files.append(file)
301 301 # Modified binary file
302 302 elif l.startswith('Mb'):
303 303 file = self._stripbasepath(l[2:].strip())
304 304 if not self._exclude(file):
305 305 self.changes[rev].mod_files.append(file)
306 306 # Modified link
307 307 elif l.startswith('M->'):
308 308 file = self._stripbasepath(l[3:].strip())
309 309 if not self._exclude(file):
310 310 self.changes[rev].mod_files.append(file)
311 311 # Modified file
312 312 elif l.startswith('M'):
313 313 file = self._stripbasepath(l[1:].strip())
314 314 if not self._exclude(file):
315 315 self.changes[rev].mod_files.append(file)
316 316 # Renamed file (or link)
317 317 elif l.startswith('=>'):
318 318 files = l[2:].strip().split(' ')
319 319 if len(files) == 1:
320 320 files = l[2:].strip().split('\t')
321 321 src = self._stripbasepath(files[0])
322 322 dst = self._stripbasepath(files[1])
323 323 if not self._exclude(src) and not self._exclude(dst):
324 324 self.changes[rev].ren_files[src] = dst
325 325 # Conversion from file to link or from link to file (modified)
326 326 elif l.startswith('ch'):
327 327 file = self._stripbasepath(l[2:].strip())
328 328 if not self._exclude(file):
329 329 self.changes[rev].mod_files.append(file)
330 330 # Renamed directory
331 331 elif l.startswith('/>'):
332 332 dirs = l[2:].strip().split(' ')
333 333 if len(dirs) == 1:
334 334 dirs = l[2:].strip().split('\t')
335 335 src = self._stripbasepath(dirs[0])
336 336 dst = self._stripbasepath(dirs[1])
337 337 if not self._exclude(src) and not self._exclude(dst):
338 338 self.changes[rev].ren_dirs[src] = dst
@@ -1,1931 +1,1931
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, tempfile
11 11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 12 import match as matchmod
13 13 import subrepo, context, repair, bookmarks, graphmod, revset
14 14
15 15 def parsealiases(cmd):
16 16 return cmd.lstrip("^").split("|")
17 17
18 18 def findpossible(cmd, table, strict=False):
19 19 """
20 20 Return cmd -> (aliases, command table entry)
21 21 for each matching command.
22 22 Return debug commands (or their aliases) only if no normal command matches.
23 23 """
24 24 choice = {}
25 25 debugchoice = {}
26 26
27 27 if cmd in table:
28 28 # short-circuit exact matches, "log" alias beats "^log|history"
29 29 keys = [cmd]
30 30 else:
31 31 keys = table.keys()
32 32
33 33 for e in keys:
34 34 aliases = parsealiases(e)
35 35 found = None
36 36 if cmd in aliases:
37 37 found = cmd
38 38 elif not strict:
39 39 for a in aliases:
40 40 if a.startswith(cmd):
41 41 found = a
42 42 break
43 43 if found is not None:
44 44 if aliases[0].startswith("debug") or found.startswith("debug"):
45 45 debugchoice[found] = (aliases, table[e])
46 46 else:
47 47 choice[found] = (aliases, table[e])
48 48
49 49 if not choice and debugchoice:
50 50 choice = debugchoice
51 51
52 52 return choice
53 53
54 54 def findcmd(cmd, table, strict=True):
55 55 """Return (aliases, command table entry) for command string."""
56 56 choice = findpossible(cmd, table, strict)
57 57
58 58 if cmd in choice:
59 59 return choice[cmd]
60 60
61 61 if len(choice) > 1:
62 62 clist = choice.keys()
63 63 clist.sort()
64 64 raise error.AmbiguousCommand(cmd, clist)
65 65
66 66 if choice:
67 67 return choice.values()[0]
68 68
69 69 raise error.UnknownCommand(cmd)
70 70
71 71 def findrepo(p):
72 72 while not os.path.isdir(os.path.join(p, ".hg")):
73 73 oldp, p = p, os.path.dirname(p)
74 74 if p == oldp:
75 75 return None
76 76
77 77 return p
78 78
79 79 def bailifchanged(repo):
80 80 if repo.dirstate.p2() != nullid:
81 81 raise util.Abort(_('outstanding uncommitted merge'))
82 82 modified, added, removed, deleted = repo.status()[:4]
83 83 if modified or added or removed or deleted:
84 84 raise util.Abort(_("outstanding uncommitted changes"))
85 85 ctx = repo[None]
86 86 for s in ctx.substate:
87 87 if ctx.sub(s).dirty():
88 88 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
89 89
90 90 def logmessage(ui, opts):
91 91 """ get the log message according to -m and -l option """
92 92 message = opts.get('message')
93 93 logfile = opts.get('logfile')
94 94
95 95 if message and logfile:
96 96 raise util.Abort(_('options --message and --logfile are mutually '
97 97 'exclusive'))
98 98 if not message and logfile:
99 99 try:
100 100 if logfile == '-':
101 101 message = ui.fin.read()
102 102 else:
103 103 message = '\n'.join(util.readfile(logfile).splitlines())
104 104 except IOError, inst:
105 105 raise util.Abort(_("can't read commit message '%s': %s") %
106 106 (logfile, inst.strerror))
107 107 return message
108 108
109 109 def loglimit(opts):
110 110 """get the log limit according to option -l/--limit"""
111 111 limit = opts.get('limit')
112 112 if limit:
113 113 try:
114 114 limit = int(limit)
115 115 except ValueError:
116 116 raise util.Abort(_('limit must be a positive integer'))
117 117 if limit <= 0:
118 118 raise util.Abort(_('limit must be positive'))
119 119 else:
120 120 limit = None
121 121 return limit
122 122
123 123 def makefilename(repo, pat, node, desc=None,
124 124 total=None, seqno=None, revwidth=None, pathname=None):
125 125 node_expander = {
126 126 'H': lambda: hex(node),
127 127 'R': lambda: str(repo.changelog.rev(node)),
128 128 'h': lambda: short(node),
129 129 'm': lambda: re.sub('[^\w]', '_', str(desc))
130 130 }
131 131 expander = {
132 132 '%': lambda: '%',
133 133 'b': lambda: os.path.basename(repo.root),
134 134 }
135 135
136 136 try:
137 137 if node:
138 138 expander.update(node_expander)
139 139 if node:
140 140 expander['r'] = (lambda:
141 141 str(repo.changelog.rev(node)).zfill(revwidth or 0))
142 142 if total is not None:
143 143 expander['N'] = lambda: str(total)
144 144 if seqno is not None:
145 145 expander['n'] = lambda: str(seqno)
146 146 if total is not None and seqno is not None:
147 147 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
148 148 if pathname is not None:
149 149 expander['s'] = lambda: os.path.basename(pathname)
150 150 expander['d'] = lambda: os.path.dirname(pathname) or '.'
151 151 expander['p'] = lambda: pathname
152 152
153 153 newname = []
154 154 patlen = len(pat)
155 155 i = 0
156 156 while i < patlen:
157 157 c = pat[i]
158 158 if c == '%':
159 159 i += 1
160 160 c = pat[i]
161 161 c = expander[c]()
162 162 newname.append(c)
163 163 i += 1
164 164 return ''.join(newname)
165 165 except KeyError, inst:
166 166 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
167 167 inst.args[0])
168 168
169 169 def makefileobj(repo, pat, node=None, desc=None, total=None,
170 170 seqno=None, revwidth=None, mode='wb', pathname=None):
171 171
172 172 writable = mode not in ('r', 'rb')
173 173
174 174 if not pat or pat == '-':
175 175 fp = writable and repo.ui.fout or repo.ui.fin
176 176 if util.safehasattr(fp, 'fileno'):
177 177 return os.fdopen(os.dup(fp.fileno()), mode)
178 178 else:
179 179 # if this fp can't be duped properly, return
180 180 # a dummy object that can be closed
181 181 class wrappedfileobj(object):
182 182 noop = lambda x: None
183 183 def __init__(self, f):
184 184 self.f = f
185 185 def __getattr__(self, attr):
186 186 if attr == 'close':
187 187 return self.noop
188 188 else:
189 189 return getattr(self.f, attr)
190 190
191 191 return wrappedfileobj(fp)
192 192 if util.safehasattr(pat, 'write') and writable:
193 193 return pat
194 194 if util.safehasattr(pat, 'read') and 'r' in mode:
195 195 return pat
196 196 return open(makefilename(repo, pat, node, desc, total, seqno, revwidth,
197 197 pathname),
198 198 mode)
199 199
200 200 def openrevlog(repo, cmd, file_, opts):
201 201 """opens the changelog, manifest, a filelog or a given revlog"""
202 202 cl = opts['changelog']
203 203 mf = opts['manifest']
204 204 msg = None
205 205 if cl and mf:
206 206 msg = _('cannot specify --changelog and --manifest at the same time')
207 207 elif cl or mf:
208 208 if file_:
209 209 msg = _('cannot specify filename with --changelog or --manifest')
210 210 elif not repo:
211 211 msg = _('cannot specify --changelog or --manifest '
212 212 'without a repository')
213 213 if msg:
214 214 raise util.Abort(msg)
215 215
216 216 r = None
217 217 if repo:
218 218 if cl:
219 219 r = repo.changelog
220 220 elif mf:
221 221 r = repo.manifest
222 222 elif file_:
223 223 filelog = repo.file(file_)
224 224 if len(filelog):
225 225 r = filelog
226 226 if not r:
227 227 if not file_:
228 228 raise error.CommandError(cmd, _('invalid arguments'))
229 229 if not os.path.isfile(file_):
230 230 raise util.Abort(_("revlog '%s' not found") % file_)
231 231 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
232 232 file_[:-2] + ".i")
233 233 return r
234 234
235 235 def copy(ui, repo, pats, opts, rename=False):
236 236 # called with the repo lock held
237 237 #
238 238 # hgsep => pathname that uses "/" to separate directories
239 239 # ossep => pathname that uses os.sep to separate directories
240 240 cwd = repo.getcwd()
241 241 targets = {}
242 242 after = opts.get("after")
243 243 dryrun = opts.get("dry_run")
244 244 wctx = repo[None]
245 245
246 246 def walkpat(pat):
247 247 srcs = []
248 248 badstates = after and '?' or '?r'
249 249 m = scmutil.match(repo[None], [pat], opts, globbed=True)
250 250 for abs in repo.walk(m):
251 251 state = repo.dirstate[abs]
252 252 rel = m.rel(abs)
253 253 exact = m.exact(abs)
254 254 if state in badstates:
255 255 if exact and state == '?':
256 256 ui.warn(_('%s: not copying - file is not managed\n') % rel)
257 257 if exact and state == 'r':
258 258 ui.warn(_('%s: not copying - file has been marked for'
259 259 ' remove\n') % rel)
260 260 continue
261 261 # abs: hgsep
262 262 # rel: ossep
263 263 srcs.append((abs, rel, exact))
264 264 return srcs
265 265
266 266 # abssrc: hgsep
267 267 # relsrc: ossep
268 268 # otarget: ossep
269 269 def copyfile(abssrc, relsrc, otarget, exact):
270 270 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
271 271 if '/' in abstarget:
272 272 # We cannot normalize abstarget itself, this would prevent
273 273 # case only renames, like a => A.
274 274 abspath, absname = abstarget.rsplit('/', 1)
275 275 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
276 276 reltarget = repo.pathto(abstarget, cwd)
277 277 target = repo.wjoin(abstarget)
278 278 src = repo.wjoin(abssrc)
279 279 state = repo.dirstate[abstarget]
280 280
281 281 scmutil.checkportable(ui, abstarget)
282 282
283 283 # check for collisions
284 284 prevsrc = targets.get(abstarget)
285 285 if prevsrc is not None:
286 286 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
287 287 (reltarget, repo.pathto(abssrc, cwd),
288 288 repo.pathto(prevsrc, cwd)))
289 289 return
290 290
291 291 # check for overwrites
292 292 exists = os.path.lexists(target)
293 293 samefile = False
294 294 if exists and abssrc != abstarget:
295 295 if (repo.dirstate.normalize(abssrc) ==
296 296 repo.dirstate.normalize(abstarget)):
297 297 if not rename:
298 298 ui.warn(_("%s: can't copy - same file\n") % reltarget)
299 299 return
300 300 exists = False
301 301 samefile = True
302 302
303 303 if not after and exists or after and state in 'mn':
304 304 if not opts['force']:
305 305 ui.warn(_('%s: not overwriting - file exists\n') %
306 306 reltarget)
307 307 return
308 308
309 309 if after:
310 310 if not exists:
311 311 if rename:
312 312 ui.warn(_('%s: not recording move - %s does not exist\n') %
313 313 (relsrc, reltarget))
314 314 else:
315 315 ui.warn(_('%s: not recording copy - %s does not exist\n') %
316 316 (relsrc, reltarget))
317 317 return
318 318 elif not dryrun:
319 319 try:
320 320 if exists:
321 321 os.unlink(target)
322 322 targetdir = os.path.dirname(target) or '.'
323 323 if not os.path.isdir(targetdir):
324 324 os.makedirs(targetdir)
325 325 if samefile:
326 326 tmp = target + "~hgrename"
327 327 os.rename(src, tmp)
328 328 os.rename(tmp, target)
329 329 else:
330 330 util.copyfile(src, target)
331 331 srcexists = True
332 332 except IOError, inst:
333 333 if inst.errno == errno.ENOENT:
334 334 ui.warn(_('%s: deleted in working copy\n') % relsrc)
335 335 srcexists = False
336 336 else:
337 337 ui.warn(_('%s: cannot copy - %s\n') %
338 338 (relsrc, inst.strerror))
339 339 return True # report a failure
340 340
341 341 if ui.verbose or not exact:
342 342 if rename:
343 343 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
344 344 else:
345 345 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
346 346
347 347 targets[abstarget] = abssrc
348 348
349 349 # fix up dirstate
350 350 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
351 351 dryrun=dryrun, cwd=cwd)
352 352 if rename and not dryrun:
353 353 if not after and srcexists and not samefile:
354 354 util.unlinkpath(repo.wjoin(abssrc))
355 355 wctx.forget([abssrc])
356 356
357 357 # pat: ossep
358 358 # dest ossep
359 359 # srcs: list of (hgsep, hgsep, ossep, bool)
360 360 # return: function that takes hgsep and returns ossep
361 361 def targetpathfn(pat, dest, srcs):
362 362 if os.path.isdir(pat):
363 363 abspfx = scmutil.canonpath(repo.root, cwd, pat)
364 364 abspfx = util.localpath(abspfx)
365 365 if destdirexists:
366 366 striplen = len(os.path.split(abspfx)[0])
367 367 else:
368 368 striplen = len(abspfx)
369 369 if striplen:
370 370 striplen += len(os.sep)
371 371 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
372 372 elif destdirexists:
373 373 res = lambda p: os.path.join(dest,
374 374 os.path.basename(util.localpath(p)))
375 375 else:
376 376 res = lambda p: dest
377 377 return res
378 378
379 379 # pat: ossep
380 380 # dest ossep
381 381 # srcs: list of (hgsep, hgsep, ossep, bool)
382 382 # return: function that takes hgsep and returns ossep
383 383 def targetpathafterfn(pat, dest, srcs):
384 384 if matchmod.patkind(pat):
385 385 # a mercurial pattern
386 386 res = lambda p: os.path.join(dest,
387 387 os.path.basename(util.localpath(p)))
388 388 else:
389 389 abspfx = scmutil.canonpath(repo.root, cwd, pat)
390 390 if len(abspfx) < len(srcs[0][0]):
391 391 # A directory. Either the target path contains the last
392 392 # component of the source path or it does not.
393 393 def evalpath(striplen):
394 394 score = 0
395 395 for s in srcs:
396 396 t = os.path.join(dest, util.localpath(s[0])[striplen:])
397 397 if os.path.lexists(t):
398 398 score += 1
399 399 return score
400 400
401 401 abspfx = util.localpath(abspfx)
402 402 striplen = len(abspfx)
403 403 if striplen:
404 404 striplen += len(os.sep)
405 405 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
406 406 score = evalpath(striplen)
407 407 striplen1 = len(os.path.split(abspfx)[0])
408 408 if striplen1:
409 409 striplen1 += len(os.sep)
410 410 if evalpath(striplen1) > score:
411 411 striplen = striplen1
412 412 res = lambda p: os.path.join(dest,
413 413 util.localpath(p)[striplen:])
414 414 else:
415 415 # a file
416 416 if destdirexists:
417 417 res = lambda p: os.path.join(dest,
418 418 os.path.basename(util.localpath(p)))
419 419 else:
420 420 res = lambda p: dest
421 421 return res
422 422
423 423
424 424 pats = scmutil.expandpats(pats)
425 425 if not pats:
426 426 raise util.Abort(_('no source or destination specified'))
427 427 if len(pats) == 1:
428 428 raise util.Abort(_('no destination specified'))
429 429 dest = pats.pop()
430 430 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
431 431 if not destdirexists:
432 432 if len(pats) > 1 or matchmod.patkind(pats[0]):
433 433 raise util.Abort(_('with multiple sources, destination must be an '
434 434 'existing directory'))
435 435 if util.endswithsep(dest):
436 436 raise util.Abort(_('destination %s is not a directory') % dest)
437 437
438 438 tfn = targetpathfn
439 439 if after:
440 440 tfn = targetpathafterfn
441 441 copylist = []
442 442 for pat in pats:
443 443 srcs = walkpat(pat)
444 444 if not srcs:
445 445 continue
446 446 copylist.append((tfn(pat, dest, srcs), srcs))
447 447 if not copylist:
448 448 raise util.Abort(_('no files to copy'))
449 449
450 450 errors = 0
451 451 for targetpath, srcs in copylist:
452 452 for abssrc, relsrc, exact in srcs:
453 453 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
454 454 errors += 1
455 455
456 456 if errors:
457 457 ui.warn(_('(consider using --after)\n'))
458 458
459 459 return errors != 0
460 460
461 461 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
462 462 runargs=None, appendpid=False):
463 463 '''Run a command as a service.'''
464 464
465 465 if opts['daemon'] and not opts['daemon_pipefds']:
466 466 # Signal child process startup with file removal
467 467 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
468 468 os.close(lockfd)
469 469 try:
470 470 if not runargs:
471 471 runargs = util.hgcmd() + sys.argv[1:]
472 472 runargs.append('--daemon-pipefds=%s' % lockpath)
473 473 # Don't pass --cwd to the child process, because we've already
474 474 # changed directory.
475 475 for i in xrange(1, len(runargs)):
476 476 if runargs[i].startswith('--cwd='):
477 477 del runargs[i]
478 478 break
479 479 elif runargs[i].startswith('--cwd'):
480 480 del runargs[i:i + 2]
481 481 break
482 482 def condfn():
483 483 return not os.path.exists(lockpath)
484 484 pid = util.rundetached(runargs, condfn)
485 485 if pid < 0:
486 486 raise util.Abort(_('child process failed to start'))
487 487 finally:
488 488 try:
489 489 os.unlink(lockpath)
490 490 except OSError, e:
491 491 if e.errno != errno.ENOENT:
492 492 raise
493 493 if parentfn:
494 494 return parentfn(pid)
495 495 else:
496 496 return
497 497
498 498 if initfn:
499 499 initfn()
500 500
501 501 if opts['pid_file']:
502 502 mode = appendpid and 'a' or 'w'
503 503 fp = open(opts['pid_file'], mode)
504 504 fp.write(str(os.getpid()) + '\n')
505 505 fp.close()
506 506
507 507 if opts['daemon_pipefds']:
508 508 lockpath = opts['daemon_pipefds']
509 509 try:
510 510 os.setsid()
511 511 except AttributeError:
512 512 pass
513 513 os.unlink(lockpath)
514 514 util.hidewindow()
515 515 sys.stdout.flush()
516 516 sys.stderr.flush()
517 517
518 nullfd = os.open(util.nulldev, os.O_RDWR)
518 nullfd = os.open(os.devnull, os.O_RDWR)
519 519 logfilefd = nullfd
520 520 if logfile:
521 521 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
522 522 os.dup2(nullfd, 0)
523 523 os.dup2(logfilefd, 1)
524 524 os.dup2(logfilefd, 2)
525 525 if nullfd not in (0, 1, 2):
526 526 os.close(nullfd)
527 527 if logfile and logfilefd not in (0, 1, 2):
528 528 os.close(logfilefd)
529 529
530 530 if runfn:
531 531 return runfn()
532 532
533 533 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
534 534 opts=None):
535 535 '''export changesets as hg patches.'''
536 536
537 537 total = len(revs)
538 538 revwidth = max([len(str(rev)) for rev in revs])
539 539
540 540 def single(rev, seqno, fp):
541 541 ctx = repo[rev]
542 542 node = ctx.node()
543 543 parents = [p.node() for p in ctx.parents() if p]
544 544 branch = ctx.branch()
545 545 if switch_parent:
546 546 parents.reverse()
547 547 prev = (parents and parents[0]) or nullid
548 548
549 549 shouldclose = False
550 550 if not fp:
551 551 desc_lines = ctx.description().rstrip().split('\n')
552 552 desc = desc_lines[0] #Commit always has a first line.
553 553 fp = makefileobj(repo, template, node, desc=desc, total=total,
554 554 seqno=seqno, revwidth=revwidth, mode='ab')
555 555 if fp != template:
556 556 shouldclose = True
557 557 if fp != sys.stdout and util.safehasattr(fp, 'name'):
558 558 repo.ui.note("%s\n" % fp.name)
559 559
560 560 fp.write("# HG changeset patch\n")
561 561 fp.write("# User %s\n" % ctx.user())
562 562 fp.write("# Date %d %d\n" % ctx.date())
563 563 if branch and branch != 'default':
564 564 fp.write("# Branch %s\n" % branch)
565 565 fp.write("# Node ID %s\n" % hex(node))
566 566 fp.write("# Parent %s\n" % hex(prev))
567 567 if len(parents) > 1:
568 568 fp.write("# Parent %s\n" % hex(parents[1]))
569 569 fp.write(ctx.description().rstrip())
570 570 fp.write("\n\n")
571 571
572 572 for chunk in patch.diff(repo, prev, node, opts=opts):
573 573 fp.write(chunk)
574 574
575 575 if shouldclose:
576 576 fp.close()
577 577
578 578 for seqno, rev in enumerate(revs):
579 579 single(rev, seqno + 1, fp)
580 580
581 581 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
582 582 changes=None, stat=False, fp=None, prefix='',
583 583 listsubrepos=False):
584 584 '''show diff or diffstat.'''
585 585 if fp is None:
586 586 write = ui.write
587 587 else:
588 588 def write(s, **kw):
589 589 fp.write(s)
590 590
591 591 if stat:
592 592 diffopts = diffopts.copy(context=0)
593 593 width = 80
594 594 if not ui.plain():
595 595 width = ui.termwidth()
596 596 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
597 597 prefix=prefix)
598 598 for chunk, label in patch.diffstatui(util.iterlines(chunks),
599 599 width=width,
600 600 git=diffopts.git):
601 601 write(chunk, label=label)
602 602 else:
603 603 for chunk, label in patch.diffui(repo, node1, node2, match,
604 604 changes, diffopts, prefix=prefix):
605 605 write(chunk, label=label)
606 606
607 607 if listsubrepos:
608 608 ctx1 = repo[node1]
609 609 ctx2 = repo[node2]
610 610 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
611 611 tempnode2 = node2
612 612 try:
613 613 if node2 is not None:
614 614 tempnode2 = ctx2.substate[subpath][1]
615 615 except KeyError:
616 616 # A subrepo that existed in node1 was deleted between node1 and
617 617 # node2 (inclusive). Thus, ctx2's substate won't contain that
618 618 # subpath. The best we can do is to ignore it.
619 619 tempnode2 = None
620 620 submatch = matchmod.narrowmatcher(subpath, match)
621 621 sub.diff(diffopts, tempnode2, submatch, changes=changes,
622 622 stat=stat, fp=fp, prefix=prefix)
623 623
624 624 class changeset_printer(object):
625 625 '''show changeset information when templating not requested.'''
626 626
627 627 def __init__(self, ui, repo, patch, diffopts, buffered):
628 628 self.ui = ui
629 629 self.repo = repo
630 630 self.buffered = buffered
631 631 self.patch = patch
632 632 self.diffopts = diffopts
633 633 self.header = {}
634 634 self.hunk = {}
635 635 self.lastheader = None
636 636 self.footer = None
637 637
638 638 def flush(self, rev):
639 639 if rev in self.header:
640 640 h = self.header[rev]
641 641 if h != self.lastheader:
642 642 self.lastheader = h
643 643 self.ui.write(h)
644 644 del self.header[rev]
645 645 if rev in self.hunk:
646 646 self.ui.write(self.hunk[rev])
647 647 del self.hunk[rev]
648 648 return 1
649 649 return 0
650 650
651 651 def close(self):
652 652 if self.footer:
653 653 self.ui.write(self.footer)
654 654
655 655 def show(self, ctx, copies=None, matchfn=None, **props):
656 656 if self.buffered:
657 657 self.ui.pushbuffer()
658 658 self._show(ctx, copies, matchfn, props)
659 659 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
660 660 else:
661 661 self._show(ctx, copies, matchfn, props)
662 662
663 663 def _show(self, ctx, copies, matchfn, props):
664 664 '''show a single changeset or file revision'''
665 665 changenode = ctx.node()
666 666 rev = ctx.rev()
667 667
668 668 if self.ui.quiet:
669 669 self.ui.write("%d:%s\n" % (rev, short(changenode)),
670 670 label='log.node')
671 671 return
672 672
673 673 log = self.repo.changelog
674 674 date = util.datestr(ctx.date())
675 675
676 676 hexfunc = self.ui.debugflag and hex or short
677 677
678 678 parents = [(p, hexfunc(log.node(p)))
679 679 for p in self._meaningful_parentrevs(log, rev)]
680 680
681 681 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
682 682 label='log.changeset')
683 683
684 684 branch = ctx.branch()
685 685 # don't show the default branch name
686 686 if branch != 'default':
687 687 self.ui.write(_("branch: %s\n") % branch,
688 688 label='log.branch')
689 689 for bookmark in self.repo.nodebookmarks(changenode):
690 690 self.ui.write(_("bookmark: %s\n") % bookmark,
691 691 label='log.bookmark')
692 692 for tag in self.repo.nodetags(changenode):
693 693 self.ui.write(_("tag: %s\n") % tag,
694 694 label='log.tag')
695 695 if self.ui.debugflag and ctx.phase():
696 696 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
697 697 label='log.phase')
698 698 for parent in parents:
699 699 self.ui.write(_("parent: %d:%s\n") % parent,
700 700 label='log.parent')
701 701
702 702 if self.ui.debugflag:
703 703 mnode = ctx.manifestnode()
704 704 self.ui.write(_("manifest: %d:%s\n") %
705 705 (self.repo.manifest.rev(mnode), hex(mnode)),
706 706 label='ui.debug log.manifest')
707 707 self.ui.write(_("user: %s\n") % ctx.user(),
708 708 label='log.user')
709 709 self.ui.write(_("date: %s\n") % date,
710 710 label='log.date')
711 711
712 712 if self.ui.debugflag:
713 713 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
714 714 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
715 715 files):
716 716 if value:
717 717 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
718 718 label='ui.debug log.files')
719 719 elif ctx.files() and self.ui.verbose:
720 720 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
721 721 label='ui.note log.files')
722 722 if copies and self.ui.verbose:
723 723 copies = ['%s (%s)' % c for c in copies]
724 724 self.ui.write(_("copies: %s\n") % ' '.join(copies),
725 725 label='ui.note log.copies')
726 726
727 727 extra = ctx.extra()
728 728 if extra and self.ui.debugflag:
729 729 for key, value in sorted(extra.items()):
730 730 self.ui.write(_("extra: %s=%s\n")
731 731 % (key, value.encode('string_escape')),
732 732 label='ui.debug log.extra')
733 733
734 734 description = ctx.description().strip()
735 735 if description:
736 736 if self.ui.verbose:
737 737 self.ui.write(_("description:\n"),
738 738 label='ui.note log.description')
739 739 self.ui.write(description,
740 740 label='ui.note log.description')
741 741 self.ui.write("\n\n")
742 742 else:
743 743 self.ui.write(_("summary: %s\n") %
744 744 description.splitlines()[0],
745 745 label='log.summary')
746 746 self.ui.write("\n")
747 747
748 748 self.showpatch(changenode, matchfn)
749 749
750 750 def showpatch(self, node, matchfn):
751 751 if not matchfn:
752 752 matchfn = self.patch
753 753 if matchfn:
754 754 stat = self.diffopts.get('stat')
755 755 diff = self.diffopts.get('patch')
756 756 diffopts = patch.diffopts(self.ui, self.diffopts)
757 757 prev = self.repo.changelog.parents(node)[0]
758 758 if stat:
759 759 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
760 760 match=matchfn, stat=True)
761 761 if diff:
762 762 if stat:
763 763 self.ui.write("\n")
764 764 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
765 765 match=matchfn, stat=False)
766 766 self.ui.write("\n")
767 767
768 768 def _meaningful_parentrevs(self, log, rev):
769 769 """Return list of meaningful (or all if debug) parentrevs for rev.
770 770
771 771 For merges (two non-nullrev revisions) both parents are meaningful.
772 772 Otherwise the first parent revision is considered meaningful if it
773 773 is not the preceding revision.
774 774 """
775 775 parents = log.parentrevs(rev)
776 776 if not self.ui.debugflag and parents[1] == nullrev:
777 777 if parents[0] >= rev - 1:
778 778 parents = []
779 779 else:
780 780 parents = [parents[0]]
781 781 return parents
782 782
783 783
784 784 class changeset_templater(changeset_printer):
785 785 '''format changeset information.'''
786 786
787 787 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
788 788 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
789 789 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
790 790 defaulttempl = {
791 791 'parent': '{rev}:{node|formatnode} ',
792 792 'manifest': '{rev}:{node|formatnode}',
793 793 'file_copy': '{name} ({source})',
794 794 'extra': '{key}={value|stringescape}'
795 795 }
796 796 # filecopy is preserved for compatibility reasons
797 797 defaulttempl['filecopy'] = defaulttempl['file_copy']
798 798 self.t = templater.templater(mapfile, {'formatnode': formatnode},
799 799 cache=defaulttempl)
800 800 self.cache = {}
801 801
802 802 def use_template(self, t):
803 803 '''set template string to use'''
804 804 self.t.cache['changeset'] = t
805 805
806 806 def _meaningful_parentrevs(self, ctx):
807 807 """Return list of meaningful (or all if debug) parentrevs for rev.
808 808 """
809 809 parents = ctx.parents()
810 810 if len(parents) > 1:
811 811 return parents
812 812 if self.ui.debugflag:
813 813 return [parents[0], self.repo['null']]
814 814 if parents[0].rev() >= ctx.rev() - 1:
815 815 return []
816 816 return parents
817 817
818 818 def _show(self, ctx, copies, matchfn, props):
819 819 '''show a single changeset or file revision'''
820 820
821 821 showlist = templatekw.showlist
822 822
823 823 # showparents() behaviour depends on ui trace level which
824 824 # causes unexpected behaviours at templating level and makes
825 825 # it harder to extract it in a standalone function. Its
826 826 # behaviour cannot be changed so leave it here for now.
827 827 def showparents(**args):
828 828 ctx = args['ctx']
829 829 parents = [[('rev', p.rev()), ('node', p.hex())]
830 830 for p in self._meaningful_parentrevs(ctx)]
831 831 return showlist('parent', parents, **args)
832 832
833 833 props = props.copy()
834 834 props.update(templatekw.keywords)
835 835 props['parents'] = showparents
836 836 props['templ'] = self.t
837 837 props['ctx'] = ctx
838 838 props['repo'] = self.repo
839 839 props['revcache'] = {'copies': copies}
840 840 props['cache'] = self.cache
841 841
842 842 # find correct templates for current mode
843 843
844 844 tmplmodes = [
845 845 (True, None),
846 846 (self.ui.verbose, 'verbose'),
847 847 (self.ui.quiet, 'quiet'),
848 848 (self.ui.debugflag, 'debug'),
849 849 ]
850 850
851 851 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
852 852 for mode, postfix in tmplmodes:
853 853 for type in types:
854 854 cur = postfix and ('%s_%s' % (type, postfix)) or type
855 855 if mode and cur in self.t:
856 856 types[type] = cur
857 857
858 858 try:
859 859
860 860 # write header
861 861 if types['header']:
862 862 h = templater.stringify(self.t(types['header'], **props))
863 863 if self.buffered:
864 864 self.header[ctx.rev()] = h
865 865 else:
866 866 if self.lastheader != h:
867 867 self.lastheader = h
868 868 self.ui.write(h)
869 869
870 870 # write changeset metadata, then patch if requested
871 871 key = types['changeset']
872 872 self.ui.write(templater.stringify(self.t(key, **props)))
873 873 self.showpatch(ctx.node(), matchfn)
874 874
875 875 if types['footer']:
876 876 if not self.footer:
877 877 self.footer = templater.stringify(self.t(types['footer'],
878 878 **props))
879 879
880 880 except KeyError, inst:
881 881 msg = _("%s: no key named '%s'")
882 882 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
883 883 except SyntaxError, inst:
884 884 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
885 885
886 886 def show_changeset(ui, repo, opts, buffered=False):
887 887 """show one changeset using template or regular display.
888 888
889 889 Display format will be the first non-empty hit of:
890 890 1. option 'template'
891 891 2. option 'style'
892 892 3. [ui] setting 'logtemplate'
893 893 4. [ui] setting 'style'
894 894 If all of these values are either the unset or the empty string,
895 895 regular display via changeset_printer() is done.
896 896 """
897 897 # options
898 898 patch = False
899 899 if opts.get('patch') or opts.get('stat'):
900 900 patch = scmutil.matchall(repo)
901 901
902 902 tmpl = opts.get('template')
903 903 style = None
904 904 if tmpl:
905 905 tmpl = templater.parsestring(tmpl, quoted=False)
906 906 else:
907 907 style = opts.get('style')
908 908
909 909 # ui settings
910 910 if not (tmpl or style):
911 911 tmpl = ui.config('ui', 'logtemplate')
912 912 if tmpl:
913 913 try:
914 914 tmpl = templater.parsestring(tmpl)
915 915 except SyntaxError:
916 916 tmpl = templater.parsestring(tmpl, quoted=False)
917 917 else:
918 918 style = util.expandpath(ui.config('ui', 'style', ''))
919 919
920 920 if not (tmpl or style):
921 921 return changeset_printer(ui, repo, patch, opts, buffered)
922 922
923 923 mapfile = None
924 924 if style and not tmpl:
925 925 mapfile = style
926 926 if not os.path.split(mapfile)[0]:
927 927 mapname = (templater.templatepath('map-cmdline.' + mapfile)
928 928 or templater.templatepath(mapfile))
929 929 if mapname:
930 930 mapfile = mapname
931 931
932 932 try:
933 933 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
934 934 except SyntaxError, inst:
935 935 raise util.Abort(inst.args[0])
936 936 if tmpl:
937 937 t.use_template(tmpl)
938 938 return t
939 939
940 940 def finddate(ui, repo, date):
941 941 """Find the tipmost changeset that matches the given date spec"""
942 942
943 943 df = util.matchdate(date)
944 944 m = scmutil.matchall(repo)
945 945 results = {}
946 946
947 947 def prep(ctx, fns):
948 948 d = ctx.date()
949 949 if df(d[0]):
950 950 results[ctx.rev()] = d
951 951
952 952 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
953 953 rev = ctx.rev()
954 954 if rev in results:
955 955 ui.status(_("found revision %s from %s\n") %
956 956 (rev, util.datestr(results[rev])))
957 957 return str(rev)
958 958
959 959 raise util.Abort(_("revision matching date not found"))
960 960
961 961 def increasingwindows(start, end, windowsize=8, sizelimit=512):
962 962 if start < end:
963 963 while start < end:
964 964 yield start, min(windowsize, end - start)
965 965 start += windowsize
966 966 if windowsize < sizelimit:
967 967 windowsize *= 2
968 968 else:
969 969 while start > end:
970 970 yield start, min(windowsize, start - end - 1)
971 971 start -= windowsize
972 972 if windowsize < sizelimit:
973 973 windowsize *= 2
974 974
975 975 def walkchangerevs(repo, match, opts, prepare):
976 976 '''Iterate over files and the revs in which they changed.
977 977
978 978 Callers most commonly need to iterate backwards over the history
979 979 in which they are interested. Doing so has awful (quadratic-looking)
980 980 performance, so we use iterators in a "windowed" way.
981 981
982 982 We walk a window of revisions in the desired order. Within the
983 983 window, we first walk forwards to gather data, then in the desired
984 984 order (usually backwards) to display it.
985 985
986 986 This function returns an iterator yielding contexts. Before
987 987 yielding each context, the iterator will first call the prepare
988 988 function on each context in the window in forward order.'''
989 989
990 990 follow = opts.get('follow') or opts.get('follow_first')
991 991
992 992 if not len(repo):
993 993 return []
994 994
995 995 if follow:
996 996 defrange = '%s:0' % repo['.'].rev()
997 997 else:
998 998 defrange = '-1:0'
999 999 revs = scmutil.revrange(repo, opts.get('rev') or [defrange])
1000 1000 if not revs:
1001 1001 return []
1002 1002 wanted = set()
1003 1003 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1004 1004 fncache = {}
1005 1005 change = repo.changectx
1006 1006
1007 1007 # First step is to fill wanted, the set of revisions that we want to yield.
1008 1008 # When it does not induce extra cost, we also fill fncache for revisions in
1009 1009 # wanted: a cache of filenames that were changed (ctx.files()) and that
1010 1010 # match the file filtering conditions.
1011 1011
1012 1012 if not slowpath and not match.files():
1013 1013 # No files, no patterns. Display all revs.
1014 1014 wanted = set(revs)
1015 1015 copies = []
1016 1016
1017 1017 if not slowpath and match.files():
1018 1018 # We only have to read through the filelog to find wanted revisions
1019 1019
1020 1020 minrev, maxrev = min(revs), max(revs)
1021 1021 def filerevgen(filelog, last):
1022 1022 """
1023 1023 Only files, no patterns. Check the history of each file.
1024 1024
1025 1025 Examines filelog entries within minrev, maxrev linkrev range
1026 1026 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1027 1027 tuples in backwards order
1028 1028 """
1029 1029 cl_count = len(repo)
1030 1030 revs = []
1031 1031 for j in xrange(0, last + 1):
1032 1032 linkrev = filelog.linkrev(j)
1033 1033 if linkrev < minrev:
1034 1034 continue
1035 1035 # only yield rev for which we have the changelog, it can
1036 1036 # happen while doing "hg log" during a pull or commit
1037 1037 if linkrev >= cl_count:
1038 1038 break
1039 1039
1040 1040 parentlinkrevs = []
1041 1041 for p in filelog.parentrevs(j):
1042 1042 if p != nullrev:
1043 1043 parentlinkrevs.append(filelog.linkrev(p))
1044 1044 n = filelog.node(j)
1045 1045 revs.append((linkrev, parentlinkrevs,
1046 1046 follow and filelog.renamed(n)))
1047 1047
1048 1048 return reversed(revs)
1049 1049 def iterfiles():
1050 1050 pctx = repo['.']
1051 1051 for filename in match.files():
1052 1052 if follow:
1053 1053 if filename not in pctx:
1054 1054 raise util.Abort(_('cannot follow file not in parent '
1055 1055 'revision: "%s"') % filename)
1056 1056 yield filename, pctx[filename].filenode()
1057 1057 else:
1058 1058 yield filename, None
1059 1059 for filename_node in copies:
1060 1060 yield filename_node
1061 1061 for file_, node in iterfiles():
1062 1062 filelog = repo.file(file_)
1063 1063 if not len(filelog):
1064 1064 if node is None:
1065 1065 # A zero count may be a directory or deleted file, so
1066 1066 # try to find matching entries on the slow path.
1067 1067 if follow:
1068 1068 raise util.Abort(
1069 1069 _('cannot follow nonexistent file: "%s"') % file_)
1070 1070 slowpath = True
1071 1071 break
1072 1072 else:
1073 1073 continue
1074 1074
1075 1075 if node is None:
1076 1076 last = len(filelog) - 1
1077 1077 else:
1078 1078 last = filelog.rev(node)
1079 1079
1080 1080
1081 1081 # keep track of all ancestors of the file
1082 1082 ancestors = set([filelog.linkrev(last)])
1083 1083
1084 1084 # iterate from latest to oldest revision
1085 1085 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1086 1086 if not follow:
1087 1087 if rev > maxrev:
1088 1088 continue
1089 1089 else:
1090 1090 # Note that last might not be the first interesting
1091 1091 # rev to us:
1092 1092 # if the file has been changed after maxrev, we'll
1093 1093 # have linkrev(last) > maxrev, and we still need
1094 1094 # to explore the file graph
1095 1095 if rev not in ancestors:
1096 1096 continue
1097 1097 # XXX insert 1327 fix here
1098 1098 if flparentlinkrevs:
1099 1099 ancestors.update(flparentlinkrevs)
1100 1100
1101 1101 fncache.setdefault(rev, []).append(file_)
1102 1102 wanted.add(rev)
1103 1103 if copied:
1104 1104 copies.append(copied)
1105 1105 if slowpath:
1106 1106 # We have to read the changelog to match filenames against
1107 1107 # changed files
1108 1108
1109 1109 if follow:
1110 1110 raise util.Abort(_('can only follow copies/renames for explicit '
1111 1111 'filenames'))
1112 1112
1113 1113 # The slow path checks files modified in every changeset.
1114 1114 for i in sorted(revs):
1115 1115 ctx = change(i)
1116 1116 matches = filter(match, ctx.files())
1117 1117 if matches:
1118 1118 fncache[i] = matches
1119 1119 wanted.add(i)
1120 1120
1121 1121 class followfilter(object):
1122 1122 def __init__(self, onlyfirst=False):
1123 1123 self.startrev = nullrev
1124 1124 self.roots = set()
1125 1125 self.onlyfirst = onlyfirst
1126 1126
1127 1127 def match(self, rev):
1128 1128 def realparents(rev):
1129 1129 if self.onlyfirst:
1130 1130 return repo.changelog.parentrevs(rev)[0:1]
1131 1131 else:
1132 1132 return filter(lambda x: x != nullrev,
1133 1133 repo.changelog.parentrevs(rev))
1134 1134
1135 1135 if self.startrev == nullrev:
1136 1136 self.startrev = rev
1137 1137 return True
1138 1138
1139 1139 if rev > self.startrev:
1140 1140 # forward: all descendants
1141 1141 if not self.roots:
1142 1142 self.roots.add(self.startrev)
1143 1143 for parent in realparents(rev):
1144 1144 if parent in self.roots:
1145 1145 self.roots.add(rev)
1146 1146 return True
1147 1147 else:
1148 1148 # backwards: all parents
1149 1149 if not self.roots:
1150 1150 self.roots.update(realparents(self.startrev))
1151 1151 if rev in self.roots:
1152 1152 self.roots.remove(rev)
1153 1153 self.roots.update(realparents(rev))
1154 1154 return True
1155 1155
1156 1156 return False
1157 1157
1158 1158 # it might be worthwhile to do this in the iterator if the rev range
1159 1159 # is descending and the prune args are all within that range
1160 1160 for rev in opts.get('prune', ()):
1161 1161 rev = repo[rev].rev()
1162 1162 ff = followfilter()
1163 1163 stop = min(revs[0], revs[-1])
1164 1164 for x in xrange(rev, stop - 1, -1):
1165 1165 if ff.match(x):
1166 1166 wanted.discard(x)
1167 1167
1168 1168 # Now that wanted is correctly initialized, we can iterate over the
1169 1169 # revision range, yielding only revisions in wanted.
1170 1170 def iterate():
1171 1171 if follow and not match.files():
1172 1172 ff = followfilter(onlyfirst=opts.get('follow_first'))
1173 1173 def want(rev):
1174 1174 return ff.match(rev) and rev in wanted
1175 1175 else:
1176 1176 def want(rev):
1177 1177 return rev in wanted
1178 1178
1179 1179 for i, window in increasingwindows(0, len(revs)):
1180 1180 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1181 1181 for rev in sorted(nrevs):
1182 1182 fns = fncache.get(rev)
1183 1183 ctx = change(rev)
1184 1184 if not fns:
1185 1185 def fns_generator():
1186 1186 for f in ctx.files():
1187 1187 if match(f):
1188 1188 yield f
1189 1189 fns = fns_generator()
1190 1190 prepare(ctx, fns)
1191 1191 for rev in nrevs:
1192 1192 yield change(rev)
1193 1193 return iterate()
1194 1194
1195 1195 def _makegraphfilematcher(repo, pats, followfirst):
1196 1196 # When displaying a revision with --patch --follow FILE, we have
1197 1197 # to know which file of the revision must be diffed. With
1198 1198 # --follow, we want the names of the ancestors of FILE in the
1199 1199 # revision, stored in "fcache". "fcache" is populated by
1200 1200 # reproducing the graph traversal already done by --follow revset
1201 1201 # and relating linkrevs to file names (which is not "correct" but
1202 1202 # good enough).
1203 1203 fcache = {}
1204 1204 fcacheready = [False]
1205 1205 pctx = repo['.']
1206 1206 wctx = repo[None]
1207 1207
1208 1208 def populate():
1209 1209 for fn in pats:
1210 1210 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1211 1211 for c in i:
1212 1212 fcache.setdefault(c.linkrev(), set()).add(c.path())
1213 1213
1214 1214 def filematcher(rev):
1215 1215 if not fcacheready[0]:
1216 1216 # Lazy initialization
1217 1217 fcacheready[0] = True
1218 1218 populate()
1219 1219 return scmutil.match(wctx, fcache.get(rev, []), default='path')
1220 1220
1221 1221 return filematcher
1222 1222
1223 1223 def _makegraphlogrevset(repo, pats, opts, revs):
1224 1224 """Return (expr, filematcher) where expr is a revset string built
1225 1225 from log options and file patterns or None. If --stat or --patch
1226 1226 are not passed filematcher is None. Otherwise it is a callable
1227 1227 taking a revision number and returning a match objects filtering
1228 1228 the files to be detailed when displaying the revision.
1229 1229 """
1230 1230 opt2revset = {
1231 1231 'no_merges': ('not merge()', None),
1232 1232 'only_merges': ('merge()', None),
1233 1233 '_ancestors': ('ancestors(%(val)s)', None),
1234 1234 '_fancestors': ('_firstancestors(%(val)s)', None),
1235 1235 '_descendants': ('descendants(%(val)s)', None),
1236 1236 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1237 1237 '_matchfiles': ('_matchfiles(%(val)s)', None),
1238 1238 'date': ('date(%(val)r)', None),
1239 1239 'branch': ('branch(%(val)r)', ' or '),
1240 1240 '_patslog': ('filelog(%(val)r)', ' or '),
1241 1241 '_patsfollow': ('follow(%(val)r)', ' or '),
1242 1242 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1243 1243 'keyword': ('keyword(%(val)r)', ' or '),
1244 1244 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1245 1245 'user': ('user(%(val)r)', ' or '),
1246 1246 }
1247 1247
1248 1248 opts = dict(opts)
1249 1249 # follow or not follow?
1250 1250 follow = opts.get('follow') or opts.get('follow_first')
1251 1251 followfirst = opts.get('follow_first') and 1 or 0
1252 1252 # --follow with FILE behaviour depends on revs...
1253 1253 startrev = revs[0]
1254 1254 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
1255 1255
1256 1256 # branch and only_branch are really aliases and must be handled at
1257 1257 # the same time
1258 1258 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1259 1259 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1260 1260 # pats/include/exclude are passed to match.match() directly in
1261 1261 # _matchfile() revset but walkchangerevs() builds its matcher with
1262 1262 # scmutil.match(). The difference is input pats are globbed on
1263 1263 # platforms without shell expansion (windows).
1264 1264 pctx = repo[None]
1265 1265 match, pats = scmutil.matchandpats(pctx, pats, opts)
1266 1266 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1267 1267 if not slowpath:
1268 1268 for f in match.files():
1269 1269 if follow and f not in pctx:
1270 1270 raise util.Abort(_('cannot follow file not in parent '
1271 1271 'revision: "%s"') % f)
1272 1272 filelog = repo.file(f)
1273 1273 if not len(filelog):
1274 1274 # A zero count may be a directory or deleted file, so
1275 1275 # try to find matching entries on the slow path.
1276 1276 if follow:
1277 1277 raise util.Abort(
1278 1278 _('cannot follow nonexistent file: "%s"') % f)
1279 1279 slowpath = True
1280 1280 if slowpath:
1281 1281 # See walkchangerevs() slow path.
1282 1282 #
1283 1283 if follow:
1284 1284 raise util.Abort(_('can only follow copies/renames for explicit '
1285 1285 'filenames'))
1286 1286 # pats/include/exclude cannot be represented as separate
1287 1287 # revset expressions as their filtering logic applies at file
1288 1288 # level. For instance "-I a -X a" matches a revision touching
1289 1289 # "a" and "b" while "file(a) and not file(b)" does
1290 1290 # not. Besides, filesets are evaluated against the working
1291 1291 # directory.
1292 1292 matchargs = ['r:', 'd:relpath']
1293 1293 for p in pats:
1294 1294 matchargs.append('p:' + p)
1295 1295 for p in opts.get('include', []):
1296 1296 matchargs.append('i:' + p)
1297 1297 for p in opts.get('exclude', []):
1298 1298 matchargs.append('x:' + p)
1299 1299 matchargs = ','.join(('%r' % p) for p in matchargs)
1300 1300 opts['_matchfiles'] = matchargs
1301 1301 else:
1302 1302 if follow:
1303 1303 fpats = ('_patsfollow', '_patsfollowfirst')
1304 1304 fnopats = (('_ancestors', '_fancestors'),
1305 1305 ('_descendants', '_fdescendants'))
1306 1306 if pats:
1307 1307 # follow() revset inteprets its file argument as a
1308 1308 # manifest entry, so use match.files(), not pats.
1309 1309 opts[fpats[followfirst]] = list(match.files())
1310 1310 else:
1311 1311 opts[fnopats[followdescendants][followfirst]] = str(startrev)
1312 1312 else:
1313 1313 opts['_patslog'] = list(pats)
1314 1314
1315 1315 filematcher = None
1316 1316 if opts.get('patch') or opts.get('stat'):
1317 1317 if follow:
1318 1318 filematcher = _makegraphfilematcher(repo, pats, followfirst)
1319 1319 else:
1320 1320 filematcher = lambda rev: match
1321 1321
1322 1322 expr = []
1323 1323 for op, val in opts.iteritems():
1324 1324 if not val:
1325 1325 continue
1326 1326 if op not in opt2revset:
1327 1327 continue
1328 1328 revop, andor = opt2revset[op]
1329 1329 if '%(val)' not in revop:
1330 1330 expr.append(revop)
1331 1331 else:
1332 1332 if not isinstance(val, list):
1333 1333 e = revop % {'val': val}
1334 1334 else:
1335 1335 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
1336 1336 expr.append(e)
1337 1337
1338 1338 if expr:
1339 1339 expr = '(' + ' and '.join(expr) + ')'
1340 1340 else:
1341 1341 expr = None
1342 1342 return expr, filematcher
1343 1343
1344 1344 def getgraphlogrevs(repo, pats, opts):
1345 1345 """Return (revs, expr, filematcher) where revs is an iterable of
1346 1346 revision numbers, expr is a revset string built from log options
1347 1347 and file patterns or None, and used to filter 'revs'. If --stat or
1348 1348 --patch are not passed filematcher is None. Otherwise it is a
1349 1349 callable taking a revision number and returning a match objects
1350 1350 filtering the files to be detailed when displaying the revision.
1351 1351 """
1352 1352 def increasingrevs(repo, revs, matcher):
1353 1353 # The sorted input rev sequence is chopped in sub-sequences
1354 1354 # which are sorted in ascending order and passed to the
1355 1355 # matcher. The filtered revs are sorted again as they were in
1356 1356 # the original sub-sequence. This achieve several things:
1357 1357 #
1358 1358 # - getlogrevs() now returns a generator which behaviour is
1359 1359 # adapted to log need. First results come fast, last ones
1360 1360 # are batched for performances.
1361 1361 #
1362 1362 # - revset matchers often operate faster on revision in
1363 1363 # changelog order, because most filters deal with the
1364 1364 # changelog.
1365 1365 #
1366 1366 # - revset matchers can reorder revisions. "A or B" typically
1367 1367 # returns returns the revision matching A then the revision
1368 1368 # matching B. We want to hide this internal implementation
1369 1369 # detail from the caller, and sorting the filtered revision
1370 1370 # again achieves this.
1371 1371 for i, window in increasingwindows(0, len(revs), windowsize=1):
1372 1372 orevs = revs[i:i + window]
1373 1373 nrevs = set(matcher(repo, sorted(orevs)))
1374 1374 for rev in orevs:
1375 1375 if rev in nrevs:
1376 1376 yield rev
1377 1377
1378 1378 if not len(repo):
1379 1379 return iter([]), None, None
1380 1380 # Default --rev value depends on --follow but --follow behaviour
1381 1381 # depends on revisions resolved from --rev...
1382 1382 follow = opts.get('follow') or opts.get('follow_first')
1383 1383 if opts.get('rev'):
1384 1384 revs = scmutil.revrange(repo, opts['rev'])
1385 1385 else:
1386 1386 if follow and len(repo) > 0:
1387 1387 revs = scmutil.revrange(repo, ['.:0'])
1388 1388 else:
1389 1389 revs = range(len(repo) - 1, -1, -1)
1390 1390 if not revs:
1391 1391 return iter([]), None, None
1392 1392 expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
1393 1393 if expr:
1394 1394 matcher = revset.match(repo.ui, expr)
1395 1395 revs = increasingrevs(repo, revs, matcher)
1396 1396 if not opts.get('hidden'):
1397 1397 # --hidden is still experimental and not worth a dedicated revset
1398 1398 # yet. Fortunately, filtering revision number is fast.
1399 1399 revs = (r for r in revs if r not in repo.hiddenrevs)
1400 1400 else:
1401 1401 revs = iter(revs)
1402 1402 return revs, expr, filematcher
1403 1403
1404 1404 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
1405 1405 filematcher=None):
1406 1406 seen, state = [], graphmod.asciistate()
1407 1407 for rev, type, ctx, parents in dag:
1408 1408 char = 'o'
1409 1409 if ctx.node() in showparents:
1410 1410 char = '@'
1411 1411 elif ctx.obsolete():
1412 1412 char = 'x'
1413 1413 copies = None
1414 1414 if getrenamed and ctx.rev():
1415 1415 copies = []
1416 1416 for fn in ctx.files():
1417 1417 rename = getrenamed(fn, ctx.rev())
1418 1418 if rename:
1419 1419 copies.append((fn, rename[0]))
1420 1420 revmatchfn = None
1421 1421 if filematcher is not None:
1422 1422 revmatchfn = filematcher(ctx.rev())
1423 1423 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
1424 1424 lines = displayer.hunk.pop(rev).split('\n')
1425 1425 if not lines[-1]:
1426 1426 del lines[-1]
1427 1427 displayer.flush(rev)
1428 1428 edges = edgefn(type, char, lines, seen, rev, parents)
1429 1429 for type, char, lines, coldata in edges:
1430 1430 graphmod.ascii(ui, state, type, char, lines, coldata)
1431 1431 displayer.close()
1432 1432
1433 1433 def graphlog(ui, repo, *pats, **opts):
1434 1434 # Parameters are identical to log command ones
1435 1435 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
1436 1436 revs = sorted(revs, reverse=1)
1437 1437 limit = loglimit(opts)
1438 1438 if limit is not None:
1439 1439 revs = revs[:limit]
1440 1440 revdag = graphmod.dagwalker(repo, revs)
1441 1441
1442 1442 getrenamed = None
1443 1443 if opts.get('copies'):
1444 1444 endrev = None
1445 1445 if opts.get('rev'):
1446 1446 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
1447 1447 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
1448 1448 displayer = show_changeset(ui, repo, opts, buffered=True)
1449 1449 showparents = [ctx.node() for ctx in repo[None].parents()]
1450 1450 displaygraph(ui, revdag, displayer, showparents,
1451 1451 graphmod.asciiedges, getrenamed, filematcher)
1452 1452
1453 1453 def checkunsupportedgraphflags(pats, opts):
1454 1454 for op in ["newest_first"]:
1455 1455 if op in opts and opts[op]:
1456 1456 raise util.Abort(_("-G/--graph option is incompatible with --%s")
1457 1457 % op.replace("_", "-"))
1458 1458
1459 1459 def graphrevs(repo, nodes, opts):
1460 1460 limit = loglimit(opts)
1461 1461 nodes.reverse()
1462 1462 if limit is not None:
1463 1463 nodes = nodes[:limit]
1464 1464 return graphmod.nodes(repo, nodes)
1465 1465
1466 1466 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1467 1467 join = lambda f: os.path.join(prefix, f)
1468 1468 bad = []
1469 1469 oldbad = match.bad
1470 1470 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1471 1471 names = []
1472 1472 wctx = repo[None]
1473 1473 cca = None
1474 1474 abort, warn = scmutil.checkportabilityalert(ui)
1475 1475 if abort or warn:
1476 1476 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1477 1477 for f in repo.walk(match):
1478 1478 exact = match.exact(f)
1479 1479 if exact or not explicitonly and f not in repo.dirstate:
1480 1480 if cca:
1481 1481 cca(f)
1482 1482 names.append(f)
1483 1483 if ui.verbose or not exact:
1484 1484 ui.status(_('adding %s\n') % match.rel(join(f)))
1485 1485
1486 1486 for subpath in wctx.substate:
1487 1487 sub = wctx.sub(subpath)
1488 1488 try:
1489 1489 submatch = matchmod.narrowmatcher(subpath, match)
1490 1490 if listsubrepos:
1491 1491 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1492 1492 False))
1493 1493 else:
1494 1494 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1495 1495 True))
1496 1496 except error.LookupError:
1497 1497 ui.status(_("skipping missing subrepository: %s\n")
1498 1498 % join(subpath))
1499 1499
1500 1500 if not dryrun:
1501 1501 rejected = wctx.add(names, prefix)
1502 1502 bad.extend(f for f in rejected if f in match.files())
1503 1503 return bad
1504 1504
1505 1505 def forget(ui, repo, match, prefix, explicitonly):
1506 1506 join = lambda f: os.path.join(prefix, f)
1507 1507 bad = []
1508 1508 oldbad = match.bad
1509 1509 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1510 1510 wctx = repo[None]
1511 1511 forgot = []
1512 1512 s = repo.status(match=match, clean=True)
1513 1513 forget = sorted(s[0] + s[1] + s[3] + s[6])
1514 1514 if explicitonly:
1515 1515 forget = [f for f in forget if match.exact(f)]
1516 1516
1517 1517 for subpath in wctx.substate:
1518 1518 sub = wctx.sub(subpath)
1519 1519 try:
1520 1520 submatch = matchmod.narrowmatcher(subpath, match)
1521 1521 subbad, subforgot = sub.forget(ui, submatch, prefix)
1522 1522 bad.extend([subpath + '/' + f for f in subbad])
1523 1523 forgot.extend([subpath + '/' + f for f in subforgot])
1524 1524 except error.LookupError:
1525 1525 ui.status(_("skipping missing subrepository: %s\n")
1526 1526 % join(subpath))
1527 1527
1528 1528 if not explicitonly:
1529 1529 for f in match.files():
1530 1530 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1531 1531 if f not in forgot:
1532 1532 if os.path.exists(match.rel(join(f))):
1533 1533 ui.warn(_('not removing %s: '
1534 1534 'file is already untracked\n')
1535 1535 % match.rel(join(f)))
1536 1536 bad.append(f)
1537 1537
1538 1538 for f in forget:
1539 1539 if ui.verbose or not match.exact(f):
1540 1540 ui.status(_('removing %s\n') % match.rel(join(f)))
1541 1541
1542 1542 rejected = wctx.forget(forget, prefix)
1543 1543 bad.extend(f for f in rejected if f in match.files())
1544 1544 forgot.extend(forget)
1545 1545 return bad, forgot
1546 1546
1547 1547 def duplicatecopies(repo, rev, p1):
1548 1548 "Reproduce copies found in the source revision in the dirstate for grafts"
1549 1549 for dst, src in copies.pathcopies(repo[p1], repo[rev]).iteritems():
1550 1550 repo.dirstate.copy(src, dst)
1551 1551
1552 1552 def commit(ui, repo, commitfunc, pats, opts):
1553 1553 '''commit the specified files or all outstanding changes'''
1554 1554 date = opts.get('date')
1555 1555 if date:
1556 1556 opts['date'] = util.parsedate(date)
1557 1557 message = logmessage(ui, opts)
1558 1558
1559 1559 # extract addremove carefully -- this function can be called from a command
1560 1560 # that doesn't support addremove
1561 1561 if opts.get('addremove'):
1562 1562 scmutil.addremove(repo, pats, opts)
1563 1563
1564 1564 return commitfunc(ui, repo, message,
1565 1565 scmutil.match(repo[None], pats, opts), opts)
1566 1566
1567 1567 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1568 1568 ui.note(_('amending changeset %s\n') % old)
1569 1569 base = old.p1()
1570 1570
1571 1571 wlock = repo.wlock()
1572 1572 try:
1573 1573 # First, do a regular commit to record all changes in the working
1574 1574 # directory (if there are any)
1575 1575 ui.callhooks = False
1576 1576 try:
1577 1577 node = commit(ui, repo, commitfunc, pats, opts)
1578 1578 finally:
1579 1579 ui.callhooks = True
1580 1580 ctx = repo[node]
1581 1581
1582 1582 # Participating changesets:
1583 1583 #
1584 1584 # node/ctx o - new (intermediate) commit that contains changes from
1585 1585 # | working dir to go into amending commit (or a workingctx
1586 1586 # | if there were no changes)
1587 1587 # |
1588 1588 # old o - changeset to amend
1589 1589 # |
1590 1590 # base o - parent of amending changeset
1591 1591
1592 1592 # Update extra dict from amended commit (e.g. to preserve graft source)
1593 1593 extra.update(old.extra())
1594 1594
1595 1595 # Also update it from the intermediate commit or from the wctx
1596 1596 extra.update(ctx.extra())
1597 1597
1598 1598 files = set(old.files())
1599 1599
1600 1600 # Second, we use either the commit we just did, or if there were no
1601 1601 # changes the parent of the working directory as the version of the
1602 1602 # files in the final amend commit
1603 1603 if node:
1604 1604 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1605 1605
1606 1606 user = ctx.user()
1607 1607 date = ctx.date()
1608 1608 message = ctx.description()
1609 1609 # Recompute copies (avoid recording a -> b -> a)
1610 1610 copied = copies.pathcopies(base, ctx)
1611 1611
1612 1612 # Prune files which were reverted by the updates: if old introduced
1613 1613 # file X and our intermediate commit, node, renamed that file, then
1614 1614 # those two files are the same and we can discard X from our list
1615 1615 # of files. Likewise if X was deleted, it's no longer relevant
1616 1616 files.update(ctx.files())
1617 1617
1618 1618 def samefile(f):
1619 1619 if f in ctx.manifest():
1620 1620 a = ctx.filectx(f)
1621 1621 if f in base.manifest():
1622 1622 b = base.filectx(f)
1623 1623 return (not a.cmp(b)
1624 1624 and a.flags() == b.flags())
1625 1625 else:
1626 1626 return False
1627 1627 else:
1628 1628 return f not in base.manifest()
1629 1629 files = [f for f in files if not samefile(f)]
1630 1630
1631 1631 def filectxfn(repo, ctx_, path):
1632 1632 try:
1633 1633 fctx = ctx[path]
1634 1634 flags = fctx.flags()
1635 1635 mctx = context.memfilectx(fctx.path(), fctx.data(),
1636 1636 islink='l' in flags,
1637 1637 isexec='x' in flags,
1638 1638 copied=copied.get(path))
1639 1639 return mctx
1640 1640 except KeyError:
1641 1641 raise IOError
1642 1642 else:
1643 1643 ui.note(_('copying changeset %s to %s\n') % (old, base))
1644 1644
1645 1645 # Use version of files as in the old cset
1646 1646 def filectxfn(repo, ctx_, path):
1647 1647 try:
1648 1648 return old.filectx(path)
1649 1649 except KeyError:
1650 1650 raise IOError
1651 1651
1652 1652 # See if we got a message from -m or -l, if not, open the editor
1653 1653 # with the message of the changeset to amend
1654 1654 user = opts.get('user') or old.user()
1655 1655 date = opts.get('date') or old.date()
1656 1656 message = logmessage(ui, opts)
1657 1657 if not message:
1658 1658 cctx = context.workingctx(repo, old.description(), user, date,
1659 1659 extra,
1660 1660 repo.status(base.node(), old.node()))
1661 1661 message = commitforceeditor(repo, cctx, [])
1662 1662
1663 1663 new = context.memctx(repo,
1664 1664 parents=[base.node(), nullid],
1665 1665 text=message,
1666 1666 files=files,
1667 1667 filectxfn=filectxfn,
1668 1668 user=user,
1669 1669 date=date,
1670 1670 extra=extra)
1671 1671 newid = repo.commitctx(new)
1672 1672 if newid != old.node():
1673 1673 # Reroute the working copy parent to the new changeset
1674 1674 repo.setparents(newid, nullid)
1675 1675
1676 1676 # Move bookmarks from old parent to amend commit
1677 1677 bms = repo.nodebookmarks(old.node())
1678 1678 if bms:
1679 1679 for bm in bms:
1680 1680 repo._bookmarks[bm] = newid
1681 1681 bookmarks.write(repo)
1682 1682
1683 1683 # Strip the intermediate commit (if there was one) and the amended
1684 1684 # commit
1685 1685 lock = repo.lock()
1686 1686 try:
1687 1687 if node:
1688 1688 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1689 1689 ui.note(_('stripping amended changeset %s\n') % old)
1690 1690 repair.strip(ui, repo, old.node(), topic='amend-backup')
1691 1691 finally:
1692 1692 lock.release()
1693 1693 finally:
1694 1694 wlock.release()
1695 1695 return newid
1696 1696
1697 1697 def commiteditor(repo, ctx, subs):
1698 1698 if ctx.description():
1699 1699 return ctx.description()
1700 1700 return commitforceeditor(repo, ctx, subs)
1701 1701
1702 1702 def commitforceeditor(repo, ctx, subs):
1703 1703 edittext = []
1704 1704 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1705 1705 if ctx.description():
1706 1706 edittext.append(ctx.description())
1707 1707 edittext.append("")
1708 1708 edittext.append("") # Empty line between message and comments.
1709 1709 edittext.append(_("HG: Enter commit message."
1710 1710 " Lines beginning with 'HG:' are removed."))
1711 1711 edittext.append(_("HG: Leave message empty to abort commit."))
1712 1712 edittext.append("HG: --")
1713 1713 edittext.append(_("HG: user: %s") % ctx.user())
1714 1714 if ctx.p2():
1715 1715 edittext.append(_("HG: branch merge"))
1716 1716 if ctx.branch():
1717 1717 edittext.append(_("HG: branch '%s'") % ctx.branch())
1718 1718 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1719 1719 edittext.extend([_("HG: added %s") % f for f in added])
1720 1720 edittext.extend([_("HG: changed %s") % f for f in modified])
1721 1721 edittext.extend([_("HG: removed %s") % f for f in removed])
1722 1722 if not added and not modified and not removed:
1723 1723 edittext.append(_("HG: no files changed"))
1724 1724 edittext.append("")
1725 1725 # run editor in the repository root
1726 1726 olddir = os.getcwd()
1727 1727 os.chdir(repo.root)
1728 1728 text = repo.ui.edit("\n".join(edittext), ctx.user())
1729 1729 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1730 1730 os.chdir(olddir)
1731 1731
1732 1732 if not text.strip():
1733 1733 raise util.Abort(_("empty commit message"))
1734 1734
1735 1735 return text
1736 1736
1737 1737 def revert(ui, repo, ctx, parents, *pats, **opts):
1738 1738 parent, p2 = parents
1739 1739 node = ctx.node()
1740 1740
1741 1741 mf = ctx.manifest()
1742 1742 if node == parent:
1743 1743 pmf = mf
1744 1744 else:
1745 1745 pmf = None
1746 1746
1747 1747 # need all matching names in dirstate and manifest of target rev,
1748 1748 # so have to walk both. do not print errors if files exist in one
1749 1749 # but not other.
1750 1750
1751 1751 names = {}
1752 1752
1753 1753 wlock = repo.wlock()
1754 1754 try:
1755 1755 # walk dirstate.
1756 1756
1757 1757 m = scmutil.match(repo[None], pats, opts)
1758 1758 m.bad = lambda x, y: False
1759 1759 for abs in repo.walk(m):
1760 1760 names[abs] = m.rel(abs), m.exact(abs)
1761 1761
1762 1762 # walk target manifest.
1763 1763
1764 1764 def badfn(path, msg):
1765 1765 if path in names:
1766 1766 return
1767 1767 if path in ctx.substate:
1768 1768 return
1769 1769 path_ = path + '/'
1770 1770 for f in names:
1771 1771 if f.startswith(path_):
1772 1772 return
1773 1773 ui.warn("%s: %s\n" % (m.rel(path), msg))
1774 1774
1775 1775 m = scmutil.match(ctx, pats, opts)
1776 1776 m.bad = badfn
1777 1777 for abs in ctx.walk(m):
1778 1778 if abs not in names:
1779 1779 names[abs] = m.rel(abs), m.exact(abs)
1780 1780
1781 1781 # get the list of subrepos that must be reverted
1782 1782 targetsubs = [s for s in ctx.substate if m(s)]
1783 1783 m = scmutil.matchfiles(repo, names)
1784 1784 changes = repo.status(match=m)[:4]
1785 1785 modified, added, removed, deleted = map(set, changes)
1786 1786
1787 1787 # if f is a rename, also revert the source
1788 1788 cwd = repo.getcwd()
1789 1789 for f in added:
1790 1790 src = repo.dirstate.copied(f)
1791 1791 if src and src not in names and repo.dirstate[src] == 'r':
1792 1792 removed.add(src)
1793 1793 names[src] = (repo.pathto(src, cwd), True)
1794 1794
1795 1795 def removeforget(abs):
1796 1796 if repo.dirstate[abs] == 'a':
1797 1797 return _('forgetting %s\n')
1798 1798 return _('removing %s\n')
1799 1799
1800 1800 revert = ([], _('reverting %s\n'))
1801 1801 add = ([], _('adding %s\n'))
1802 1802 remove = ([], removeforget)
1803 1803 undelete = ([], _('undeleting %s\n'))
1804 1804
1805 1805 disptable = (
1806 1806 # dispatch table:
1807 1807 # file state
1808 1808 # action if in target manifest
1809 1809 # action if not in target manifest
1810 1810 # make backup if in target manifest
1811 1811 # make backup if not in target manifest
1812 1812 (modified, revert, remove, True, True),
1813 1813 (added, revert, remove, True, False),
1814 1814 (removed, undelete, None, False, False),
1815 1815 (deleted, revert, remove, False, False),
1816 1816 )
1817 1817
1818 1818 for abs, (rel, exact) in sorted(names.items()):
1819 1819 mfentry = mf.get(abs)
1820 1820 target = repo.wjoin(abs)
1821 1821 def handle(xlist, dobackup):
1822 1822 xlist[0].append(abs)
1823 1823 if (dobackup and not opts.get('no_backup') and
1824 1824 os.path.lexists(target)):
1825 1825 bakname = "%s.orig" % rel
1826 1826 ui.note(_('saving current version of %s as %s\n') %
1827 1827 (rel, bakname))
1828 1828 if not opts.get('dry_run'):
1829 1829 util.rename(target, bakname)
1830 1830 if ui.verbose or not exact:
1831 1831 msg = xlist[1]
1832 1832 if not isinstance(msg, basestring):
1833 1833 msg = msg(abs)
1834 1834 ui.status(msg % rel)
1835 1835 for table, hitlist, misslist, backuphit, backupmiss in disptable:
1836 1836 if abs not in table:
1837 1837 continue
1838 1838 # file has changed in dirstate
1839 1839 if mfentry:
1840 1840 handle(hitlist, backuphit)
1841 1841 elif misslist is not None:
1842 1842 handle(misslist, backupmiss)
1843 1843 break
1844 1844 else:
1845 1845 if abs not in repo.dirstate:
1846 1846 if mfentry:
1847 1847 handle(add, True)
1848 1848 elif exact:
1849 1849 ui.warn(_('file not managed: %s\n') % rel)
1850 1850 continue
1851 1851 # file has not changed in dirstate
1852 1852 if node == parent:
1853 1853 if exact:
1854 1854 ui.warn(_('no changes needed to %s\n') % rel)
1855 1855 continue
1856 1856 if pmf is None:
1857 1857 # only need parent manifest in this unlikely case,
1858 1858 # so do not read by default
1859 1859 pmf = repo[parent].manifest()
1860 1860 if abs in pmf and mfentry:
1861 1861 # if version of file is same in parent and target
1862 1862 # manifests, do nothing
1863 1863 if (pmf[abs] != mfentry or
1864 1864 pmf.flags(abs) != mf.flags(abs)):
1865 1865 handle(revert, False)
1866 1866 else:
1867 1867 handle(remove, False)
1868 1868
1869 1869 if not opts.get('dry_run'):
1870 1870 def checkout(f):
1871 1871 fc = ctx[f]
1872 1872 repo.wwrite(f, fc.data(), fc.flags())
1873 1873
1874 1874 audit_path = scmutil.pathauditor(repo.root)
1875 1875 for f in remove[0]:
1876 1876 if repo.dirstate[f] == 'a':
1877 1877 repo.dirstate.drop(f)
1878 1878 continue
1879 1879 audit_path(f)
1880 1880 try:
1881 1881 util.unlinkpath(repo.wjoin(f))
1882 1882 except OSError:
1883 1883 pass
1884 1884 repo.dirstate.remove(f)
1885 1885
1886 1886 normal = None
1887 1887 if node == parent:
1888 1888 # We're reverting to our parent. If possible, we'd like status
1889 1889 # to report the file as clean. We have to use normallookup for
1890 1890 # merges to avoid losing information about merged/dirty files.
1891 1891 if p2 != nullid:
1892 1892 normal = repo.dirstate.normallookup
1893 1893 else:
1894 1894 normal = repo.dirstate.normal
1895 1895 for f in revert[0]:
1896 1896 checkout(f)
1897 1897 if normal:
1898 1898 normal(f)
1899 1899
1900 1900 for f in add[0]:
1901 1901 checkout(f)
1902 1902 repo.dirstate.add(f)
1903 1903
1904 1904 normal = repo.dirstate.normallookup
1905 1905 if node == parent and p2 == nullid:
1906 1906 normal = repo.dirstate.normal
1907 1907 for f in undelete[0]:
1908 1908 checkout(f)
1909 1909 normal(f)
1910 1910
1911 1911 if targetsubs:
1912 1912 # Revert the subrepos on the revert list
1913 1913 for sub in targetsubs:
1914 1914 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
1915 1915 finally:
1916 1916 wlock.release()
1917 1917
1918 1918 def command(table):
1919 1919 '''returns a function object bound to table which can be used as
1920 1920 a decorator for populating table as a command table'''
1921 1921
1922 1922 def cmd(name, options, synopsis=None):
1923 1923 def decorator(func):
1924 1924 if synopsis:
1925 1925 table[name] = func, options[:], synopsis
1926 1926 else:
1927 1927 table[name] = func, options[:]
1928 1928 return func
1929 1929 return decorator
1930 1930
1931 1931 return cmd
@@ -1,470 +1,469
1 1 # posix.py - Posix utility function implementations for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import encoding
10 10 import os, sys, errno, stat, getpass, pwd, grp, tempfile, unicodedata
11 11
12 12 posixfile = open
13 nulldev = '/dev/null'
14 13 normpath = os.path.normpath
15 14 samestat = os.path.samestat
16 15 oslink = os.link
17 16 unlink = os.unlink
18 17 rename = os.rename
19 18 expandglobs = False
20 19
21 20 umask = os.umask(0)
22 21 os.umask(umask)
23 22
24 23 def openhardlinks():
25 24 '''return true if it is safe to hold open file handles to hardlinks'''
26 25 return True
27 26
28 27 def nlinks(name):
29 28 '''return number of hardlinks for the given file'''
30 29 return os.lstat(name).st_nlink
31 30
32 31 def parsepatchoutput(output_line):
33 32 """parses the output produced by patch and returns the filename"""
34 33 pf = output_line[14:]
35 34 if os.sys.platform == 'OpenVMS':
36 35 if pf[0] == '`':
37 36 pf = pf[1:-1] # Remove the quotes
38 37 else:
39 38 if pf.startswith("'") and pf.endswith("'") and " " in pf:
40 39 pf = pf[1:-1] # Remove the quotes
41 40 return pf
42 41
43 42 def sshargs(sshcmd, host, user, port):
44 43 '''Build argument list for ssh'''
45 44 args = user and ("%s@%s" % (user, host)) or host
46 45 return port and ("%s -p %s" % (args, port)) or args
47 46
48 47 def isexec(f):
49 48 """check whether a file is executable"""
50 49 return (os.lstat(f).st_mode & 0100 != 0)
51 50
52 51 def setflags(f, l, x):
53 52 s = os.lstat(f).st_mode
54 53 if l:
55 54 if not stat.S_ISLNK(s):
56 55 # switch file to link
57 56 fp = open(f)
58 57 data = fp.read()
59 58 fp.close()
60 59 os.unlink(f)
61 60 try:
62 61 os.symlink(data, f)
63 62 except OSError:
64 63 # failed to make a link, rewrite file
65 64 fp = open(f, "w")
66 65 fp.write(data)
67 66 fp.close()
68 67 # no chmod needed at this point
69 68 return
70 69 if stat.S_ISLNK(s):
71 70 # switch link to file
72 71 data = os.readlink(f)
73 72 os.unlink(f)
74 73 fp = open(f, "w")
75 74 fp.write(data)
76 75 fp.close()
77 76 s = 0666 & ~umask # avoid restatting for chmod
78 77
79 78 sx = s & 0100
80 79 if x and not sx:
81 80 # Turn on +x for every +r bit when making a file executable
82 81 # and obey umask.
83 82 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
84 83 elif not x and sx:
85 84 # Turn off all +x bits
86 85 os.chmod(f, s & 0666)
87 86
88 87 def copymode(src, dst, mode=None):
89 88 '''Copy the file mode from the file at path src to dst.
90 89 If src doesn't exist, we're using mode instead. If mode is None, we're
91 90 using umask.'''
92 91 try:
93 92 st_mode = os.lstat(src).st_mode & 0777
94 93 except OSError, inst:
95 94 if inst.errno != errno.ENOENT:
96 95 raise
97 96 st_mode = mode
98 97 if st_mode is None:
99 98 st_mode = ~umask
100 99 st_mode &= 0666
101 100 os.chmod(dst, st_mode)
102 101
103 102 def checkexec(path):
104 103 """
105 104 Check whether the given path is on a filesystem with UNIX-like exec flags
106 105
107 106 Requires a directory (like /foo/.hg)
108 107 """
109 108
110 109 # VFAT on some Linux versions can flip mode but it doesn't persist
111 110 # a FS remount. Frequently we can detect it if files are created
112 111 # with exec bit on.
113 112
114 113 try:
115 114 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
116 115 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
117 116 try:
118 117 os.close(fh)
119 118 m = os.stat(fn).st_mode & 0777
120 119 new_file_has_exec = m & EXECFLAGS
121 120 os.chmod(fn, m ^ EXECFLAGS)
122 121 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
123 122 finally:
124 123 os.unlink(fn)
125 124 except (IOError, OSError):
126 125 # we don't care, the user probably won't be able to commit anyway
127 126 return False
128 127 return not (new_file_has_exec or exec_flags_cannot_flip)
129 128
130 129 def checklink(path):
131 130 """check whether the given path is on a symlink-capable filesystem"""
132 131 # mktemp is not racy because symlink creation will fail if the
133 132 # file already exists
134 133 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
135 134 try:
136 135 os.symlink(".", name)
137 136 os.unlink(name)
138 137 return True
139 138 except (OSError, AttributeError):
140 139 return False
141 140
142 141 def checkosfilename(path):
143 142 '''Check that the base-relative path is a valid filename on this platform.
144 143 Returns None if the path is ok, or a UI string describing the problem.'''
145 144 pass # on posix platforms, every path is ok
146 145
147 146 def setbinary(fd):
148 147 pass
149 148
150 149 def pconvert(path):
151 150 return path
152 151
153 152 def localpath(path):
154 153 return path
155 154
156 155 def samefile(fpath1, fpath2):
157 156 """Returns whether path1 and path2 refer to the same file. This is only
158 157 guaranteed to work for files, not directories."""
159 158 return os.path.samefile(fpath1, fpath2)
160 159
161 160 def samedevice(fpath1, fpath2):
162 161 """Returns whether fpath1 and fpath2 are on the same device. This is only
163 162 guaranteed to work for files, not directories."""
164 163 st1 = os.lstat(fpath1)
165 164 st2 = os.lstat(fpath2)
166 165 return st1.st_dev == st2.st_dev
167 166
168 167 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
169 168 def normcase(path):
170 169 return path.lower()
171 170
172 171 if sys.platform == 'darwin':
173 172 import fcntl # only needed on darwin, missing on jython
174 173
175 174 def normcase(path):
176 175 try:
177 176 u = path.decode('utf-8')
178 177 except UnicodeDecodeError:
179 178 # percent-encode any characters that don't round-trip
180 179 p2 = path.decode('utf-8', 'ignore').encode('utf-8')
181 180 s = ""
182 181 pos = 0
183 182 for c in path:
184 183 if p2[pos:pos + 1] == c:
185 184 s += c
186 185 pos += 1
187 186 else:
188 187 s += "%%%02X" % ord(c)
189 188 u = s.decode('utf-8')
190 189
191 190 # Decompose then lowercase (HFS+ technote specifies lower)
192 191 return unicodedata.normalize('NFD', u).lower().encode('utf-8')
193 192
194 193 def realpath(path):
195 194 '''
196 195 Returns the true, canonical file system path equivalent to the given
197 196 path.
198 197
199 198 Equivalent means, in this case, resulting in the same, unique
200 199 file system link to the path. Every file system entry, whether a file,
201 200 directory, hard link or symbolic link or special, will have a single
202 201 path preferred by the system, but may allow multiple, differing path
203 202 lookups to point to it.
204 203
205 204 Most regular UNIX file systems only allow a file system entry to be
206 205 looked up by its distinct path. Obviously, this does not apply to case
207 206 insensitive file systems, whether case preserving or not. The most
208 207 complex issue to deal with is file systems transparently reencoding the
209 208 path, such as the non-standard Unicode normalisation required for HFS+
210 209 and HFSX.
211 210 '''
212 211 # Constants copied from /usr/include/sys/fcntl.h
213 212 F_GETPATH = 50
214 213 O_SYMLINK = 0x200000
215 214
216 215 try:
217 216 fd = os.open(path, O_SYMLINK)
218 217 except OSError, err:
219 218 if err.errno == errno.ENOENT:
220 219 return path
221 220 raise
222 221
223 222 try:
224 223 return fcntl.fcntl(fd, F_GETPATH, '\0' * 1024).rstrip('\0')
225 224 finally:
226 225 os.close(fd)
227 226 elif sys.version_info < (2, 4, 2, 'final'):
228 227 # Workaround for http://bugs.python.org/issue1213894 (os.path.realpath
229 228 # didn't resolve symlinks that were the first component of the path.)
230 229 def realpath(path):
231 230 if os.path.isabs(path):
232 231 return os.path.realpath(path)
233 232 else:
234 233 return os.path.realpath('./' + path)
235 234 else:
236 235 # Fallback to the likely inadequate Python builtin function.
237 236 realpath = os.path.realpath
238 237
239 238 if sys.platform == 'cygwin':
240 239 # workaround for cygwin, in which mount point part of path is
241 240 # treated as case sensitive, even though underlying NTFS is case
242 241 # insensitive.
243 242
244 243 # default mount points
245 244 cygwinmountpoints = sorted([
246 245 "/usr/bin",
247 246 "/usr/lib",
248 247 "/cygdrive",
249 248 ], reverse=True)
250 249
251 250 # use upper-ing as normcase as same as NTFS workaround
252 251 def normcase(path):
253 252 pathlen = len(path)
254 253 if (pathlen == 0) or (path[0] != os.sep):
255 254 # treat as relative
256 255 return encoding.upper(path)
257 256
258 257 # to preserve case of mountpoint part
259 258 for mp in cygwinmountpoints:
260 259 if not path.startswith(mp):
261 260 continue
262 261
263 262 mplen = len(mp)
264 263 if mplen == pathlen: # mount point itself
265 264 return mp
266 265 if path[mplen] == os.sep:
267 266 return mp + encoding.upper(path[mplen:])
268 267
269 268 return encoding.upper(path)
270 269
271 270 # Cygwin translates native ACLs to POSIX permissions,
272 271 # but these translations are not supported by native
273 272 # tools, so the exec bit tends to be set erroneously.
274 273 # Therefore, disable executable bit access on Cygwin.
275 274 def checkexec(path):
276 275 return False
277 276
278 277 # Similarly, Cygwin's symlink emulation is likely to create
279 278 # problems when Mercurial is used from both Cygwin and native
280 279 # Windows, with other native tools, or on shared volumes
281 280 def checklink(path):
282 281 return False
283 282
284 283 def shellquote(s):
285 284 if os.sys.platform == 'OpenVMS':
286 285 return '"%s"' % s
287 286 else:
288 287 return "'%s'" % s.replace("'", "'\\''")
289 288
290 289 def quotecommand(cmd):
291 290 return cmd
292 291
293 292 def popen(command, mode='r'):
294 293 return os.popen(command, mode)
295 294
296 295 def testpid(pid):
297 296 '''return False if pid dead, True if running or not sure'''
298 297 if os.sys.platform == 'OpenVMS':
299 298 return True
300 299 try:
301 300 os.kill(pid, 0)
302 301 return True
303 302 except OSError, inst:
304 303 return inst.errno != errno.ESRCH
305 304
306 305 def explainexit(code):
307 306 """return a 2-tuple (desc, code) describing a subprocess status
308 307 (codes from kill are negative - not os.system/wait encoding)"""
309 308 if code >= 0:
310 309 return _("exited with status %d") % code, code
311 310 return _("killed by signal %d") % -code, -code
312 311
313 312 def isowner(st):
314 313 """Return True if the stat object st is from the current user."""
315 314 return st.st_uid == os.getuid()
316 315
317 316 def findexe(command):
318 317 '''Find executable for command searching like which does.
319 318 If command is a basename then PATH is searched for command.
320 319 PATH isn't searched if command is an absolute or relative path.
321 320 If command isn't found None is returned.'''
322 321 if sys.platform == 'OpenVMS':
323 322 return command
324 323
325 324 def findexisting(executable):
326 325 'Will return executable if existing file'
327 326 if os.path.isfile(executable) and os.access(executable, os.X_OK):
328 327 return executable
329 328 return None
330 329
331 330 if os.sep in command:
332 331 return findexisting(command)
333 332
334 333 if sys.platform == 'plan9':
335 334 return findexisting(os.path.join('/bin', command))
336 335
337 336 for path in os.environ.get('PATH', '').split(os.pathsep):
338 337 executable = findexisting(os.path.join(path, command))
339 338 if executable is not None:
340 339 return executable
341 340 return None
342 341
343 342 def setsignalhandler():
344 343 pass
345 344
346 345 def statfiles(files):
347 346 'Stat each file in files and yield stat or None if file does not exist.'
348 347 lstat = os.lstat
349 348 for nf in files:
350 349 try:
351 350 st = lstat(nf)
352 351 except OSError, err:
353 352 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
354 353 raise
355 354 st = None
356 355 yield st
357 356
358 357 def getuser():
359 358 '''return name of current user'''
360 359 return getpass.getuser()
361 360
362 361 def username(uid=None):
363 362 """Return the name of the user with the given uid.
364 363
365 364 If uid is None, return the name of the current user."""
366 365
367 366 if uid is None:
368 367 uid = os.getuid()
369 368 try:
370 369 return pwd.getpwuid(uid)[0]
371 370 except KeyError:
372 371 return str(uid)
373 372
374 373 def groupname(gid=None):
375 374 """Return the name of the group with the given gid.
376 375
377 376 If gid is None, return the name of the current group."""
378 377
379 378 if gid is None:
380 379 gid = os.getgid()
381 380 try:
382 381 return grp.getgrgid(gid)[0]
383 382 except KeyError:
384 383 return str(gid)
385 384
386 385 def groupmembers(name):
387 386 """Return the list of members of the group with the given
388 387 name, KeyError if the group does not exist.
389 388 """
390 389 return list(grp.getgrnam(name).gr_mem)
391 390
392 391 def spawndetached(args):
393 392 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
394 393 args[0], args)
395 394
396 395 def gethgcmd():
397 396 return sys.argv[:1]
398 397
399 398 def termwidth():
400 399 try:
401 400 import termios, array, fcntl
402 401 for dev in (sys.stderr, sys.stdout, sys.stdin):
403 402 try:
404 403 try:
405 404 fd = dev.fileno()
406 405 except AttributeError:
407 406 continue
408 407 if not os.isatty(fd):
409 408 continue
410 409 try:
411 410 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
412 411 width = array.array('h', arri)[1]
413 412 if width > 0:
414 413 return width
415 414 except AttributeError:
416 415 pass
417 416 except ValueError:
418 417 pass
419 418 except IOError, e:
420 419 if e[0] == errno.EINVAL:
421 420 pass
422 421 else:
423 422 raise
424 423 except ImportError:
425 424 pass
426 425 return 80
427 426
428 427 def makedir(path, notindexed):
429 428 os.mkdir(path)
430 429
431 430 def unlinkpath(f):
432 431 """unlink and remove the directory if it is empty"""
433 432 os.unlink(f)
434 433 # try removing directories that might now be empty
435 434 try:
436 435 os.removedirs(os.path.dirname(f))
437 436 except OSError:
438 437 pass
439 438
440 439 def lookupreg(key, name=None, scope=None):
441 440 return None
442 441
443 442 def hidewindow():
444 443 """Hide current shell window.
445 444
446 445 Used to hide the window opened when starting asynchronous
447 446 child process under Windows, unneeded on other systems.
448 447 """
449 448 pass
450 449
451 450 class cachestat(object):
452 451 def __init__(self, path):
453 452 self.stat = os.stat(path)
454 453
455 454 def cacheable(self):
456 455 return bool(self.stat.st_ino)
457 456
458 457 __hash__ = object.__hash__
459 458
460 459 def __eq__(self, other):
461 460 try:
462 461 return self.stat == other.stat
463 462 except AttributeError:
464 463 return False
465 464
466 465 def __ne__(self, other):
467 466 return not self == other
468 467
469 468 def executablepath():
470 469 return None # available on Windows only
@@ -1,1801 +1,1800
1 1 # util.py - Mercurial utility functions and platform specfic implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specfic implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from i18n import _
17 17 import error, osutil, encoding, collections
18 18 import errno, re, shutil, sys, tempfile, traceback
19 19 import os, time, datetime, calendar, textwrap, signal
20 20 import imp, socket, urllib
21 21
22 22 if os.name == 'nt':
23 23 import windows as platform
24 24 else:
25 25 import posix as platform
26 26
27 27 cachestat = platform.cachestat
28 28 checkexec = platform.checkexec
29 29 checklink = platform.checklink
30 30 copymode = platform.copymode
31 31 executablepath = platform.executablepath
32 32 expandglobs = platform.expandglobs
33 33 explainexit = platform.explainexit
34 34 findexe = platform.findexe
35 35 gethgcmd = platform.gethgcmd
36 36 getuser = platform.getuser
37 37 groupmembers = platform.groupmembers
38 38 groupname = platform.groupname
39 39 hidewindow = platform.hidewindow
40 40 isexec = platform.isexec
41 41 isowner = platform.isowner
42 42 localpath = platform.localpath
43 43 lookupreg = platform.lookupreg
44 44 makedir = platform.makedir
45 45 nlinks = platform.nlinks
46 46 normpath = platform.normpath
47 47 normcase = platform.normcase
48 nulldev = platform.nulldev
49 48 openhardlinks = platform.openhardlinks
50 49 oslink = platform.oslink
51 50 parsepatchoutput = platform.parsepatchoutput
52 51 pconvert = platform.pconvert
53 52 popen = platform.popen
54 53 posixfile = platform.posixfile
55 54 quotecommand = platform.quotecommand
56 55 realpath = platform.realpath
57 56 rename = platform.rename
58 57 samedevice = platform.samedevice
59 58 samefile = platform.samefile
60 59 samestat = platform.samestat
61 60 setbinary = platform.setbinary
62 61 setflags = platform.setflags
63 62 setsignalhandler = platform.setsignalhandler
64 63 shellquote = platform.shellquote
65 64 spawndetached = platform.spawndetached
66 65 sshargs = platform.sshargs
67 66 statfiles = platform.statfiles
68 67 termwidth = platform.termwidth
69 68 testpid = platform.testpid
70 69 umask = platform.umask
71 70 unlink = platform.unlink
72 71 unlinkpath = platform.unlinkpath
73 72 username = platform.username
74 73
75 74 # Python compatibility
76 75
77 76 _notset = object()
78 77
79 78 def safehasattr(thing, attr):
80 79 return getattr(thing, attr, _notset) is not _notset
81 80
82 81 def sha1(s=''):
83 82 '''
84 83 Low-overhead wrapper around Python's SHA support
85 84
86 85 >>> f = _fastsha1
87 86 >>> a = sha1()
88 87 >>> a = f()
89 88 >>> a.hexdigest()
90 89 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
91 90 '''
92 91
93 92 return _fastsha1(s)
94 93
95 94 def _fastsha1(s=''):
96 95 # This function will import sha1 from hashlib or sha (whichever is
97 96 # available) and overwrite itself with it on the first call.
98 97 # Subsequent calls will go directly to the imported function.
99 98 if sys.version_info >= (2, 5):
100 99 from hashlib import sha1 as _sha1
101 100 else:
102 101 from sha import sha as _sha1
103 102 global _fastsha1, sha1
104 103 _fastsha1 = sha1 = _sha1
105 104 return _sha1(s)
106 105
107 106 try:
108 107 buffer = buffer
109 108 except NameError:
110 109 if sys.version_info[0] < 3:
111 110 def buffer(sliceable, offset=0):
112 111 return sliceable[offset:]
113 112 else:
114 113 def buffer(sliceable, offset=0):
115 114 return memoryview(sliceable)[offset:]
116 115
117 116 import subprocess
118 117 closefds = os.name == 'posix'
119 118
120 119 def popen2(cmd, env=None, newlines=False):
121 120 # Setting bufsize to -1 lets the system decide the buffer size.
122 121 # The default for bufsize is 0, meaning unbuffered. This leads to
123 122 # poor performance on Mac OS X: http://bugs.python.org/issue4194
124 123 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
125 124 close_fds=closefds,
126 125 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
127 126 universal_newlines=newlines,
128 127 env=env)
129 128 return p.stdin, p.stdout
130 129
131 130 def popen3(cmd, env=None, newlines=False):
132 131 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
133 132 close_fds=closefds,
134 133 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
135 134 stderr=subprocess.PIPE,
136 135 universal_newlines=newlines,
137 136 env=env)
138 137 return p.stdin, p.stdout, p.stderr
139 138
140 139 def version():
141 140 """Return version information if available."""
142 141 try:
143 142 import __version__
144 143 return __version__.version
145 144 except ImportError:
146 145 return 'unknown'
147 146
148 147 # used by parsedate
149 148 defaultdateformats = (
150 149 '%Y-%m-%d %H:%M:%S',
151 150 '%Y-%m-%d %I:%M:%S%p',
152 151 '%Y-%m-%d %H:%M',
153 152 '%Y-%m-%d %I:%M%p',
154 153 '%Y-%m-%d',
155 154 '%m-%d',
156 155 '%m/%d',
157 156 '%m/%d/%y',
158 157 '%m/%d/%Y',
159 158 '%a %b %d %H:%M:%S %Y',
160 159 '%a %b %d %I:%M:%S%p %Y',
161 160 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
162 161 '%b %d %H:%M:%S %Y',
163 162 '%b %d %I:%M:%S%p %Y',
164 163 '%b %d %H:%M:%S',
165 164 '%b %d %I:%M:%S%p',
166 165 '%b %d %H:%M',
167 166 '%b %d %I:%M%p',
168 167 '%b %d %Y',
169 168 '%b %d',
170 169 '%H:%M:%S',
171 170 '%I:%M:%S%p',
172 171 '%H:%M',
173 172 '%I:%M%p',
174 173 )
175 174
176 175 extendeddateformats = defaultdateformats + (
177 176 "%Y",
178 177 "%Y-%m",
179 178 "%b",
180 179 "%b %Y",
181 180 )
182 181
183 182 def cachefunc(func):
184 183 '''cache the result of function calls'''
185 184 # XXX doesn't handle keywords args
186 185 cache = {}
187 186 if func.func_code.co_argcount == 1:
188 187 # we gain a small amount of time because
189 188 # we don't need to pack/unpack the list
190 189 def f(arg):
191 190 if arg not in cache:
192 191 cache[arg] = func(arg)
193 192 return cache[arg]
194 193 else:
195 194 def f(*args):
196 195 if args not in cache:
197 196 cache[args] = func(*args)
198 197 return cache[args]
199 198
200 199 return f
201 200
202 201 try:
203 202 collections.deque.remove
204 203 deque = collections.deque
205 204 except AttributeError:
206 205 # python 2.4 lacks deque.remove
207 206 class deque(collections.deque):
208 207 def remove(self, val):
209 208 for i, v in enumerate(self):
210 209 if v == val:
211 210 del self[i]
212 211 break
213 212
214 213 def lrucachefunc(func):
215 214 '''cache most recent results of function calls'''
216 215 cache = {}
217 216 order = deque()
218 217 if func.func_code.co_argcount == 1:
219 218 def f(arg):
220 219 if arg not in cache:
221 220 if len(cache) > 20:
222 221 del cache[order.popleft()]
223 222 cache[arg] = func(arg)
224 223 else:
225 224 order.remove(arg)
226 225 order.append(arg)
227 226 return cache[arg]
228 227 else:
229 228 def f(*args):
230 229 if args not in cache:
231 230 if len(cache) > 20:
232 231 del cache[order.popleft()]
233 232 cache[args] = func(*args)
234 233 else:
235 234 order.remove(args)
236 235 order.append(args)
237 236 return cache[args]
238 237
239 238 return f
240 239
241 240 class propertycache(object):
242 241 def __init__(self, func):
243 242 self.func = func
244 243 self.name = func.__name__
245 244 def __get__(self, obj, type=None):
246 245 result = self.func(obj)
247 246 setattr(obj, self.name, result)
248 247 return result
249 248
250 249 def pipefilter(s, cmd):
251 250 '''filter string S through command CMD, returning its output'''
252 251 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
253 252 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
254 253 pout, perr = p.communicate(s)
255 254 return pout
256 255
257 256 def tempfilter(s, cmd):
258 257 '''filter string S through a pair of temporary files with CMD.
259 258 CMD is used as a template to create the real command to be run,
260 259 with the strings INFILE and OUTFILE replaced by the real names of
261 260 the temporary files generated.'''
262 261 inname, outname = None, None
263 262 try:
264 263 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
265 264 fp = os.fdopen(infd, 'wb')
266 265 fp.write(s)
267 266 fp.close()
268 267 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
269 268 os.close(outfd)
270 269 cmd = cmd.replace('INFILE', inname)
271 270 cmd = cmd.replace('OUTFILE', outname)
272 271 code = os.system(cmd)
273 272 if sys.platform == 'OpenVMS' and code & 1:
274 273 code = 0
275 274 if code:
276 275 raise Abort(_("command '%s' failed: %s") %
277 276 (cmd, explainexit(code)))
278 277 fp = open(outname, 'rb')
279 278 r = fp.read()
280 279 fp.close()
281 280 return r
282 281 finally:
283 282 try:
284 283 if inname:
285 284 os.unlink(inname)
286 285 except OSError:
287 286 pass
288 287 try:
289 288 if outname:
290 289 os.unlink(outname)
291 290 except OSError:
292 291 pass
293 292
294 293 filtertable = {
295 294 'tempfile:': tempfilter,
296 295 'pipe:': pipefilter,
297 296 }
298 297
299 298 def filter(s, cmd):
300 299 "filter a string through a command that transforms its input to its output"
301 300 for name, fn in filtertable.iteritems():
302 301 if cmd.startswith(name):
303 302 return fn(s, cmd[len(name):].lstrip())
304 303 return pipefilter(s, cmd)
305 304
306 305 def binary(s):
307 306 """return true if a string is binary data"""
308 307 return bool(s and '\0' in s)
309 308
310 309 def increasingchunks(source, min=1024, max=65536):
311 310 '''return no less than min bytes per chunk while data remains,
312 311 doubling min after each chunk until it reaches max'''
313 312 def log2(x):
314 313 if not x:
315 314 return 0
316 315 i = 0
317 316 while x:
318 317 x >>= 1
319 318 i += 1
320 319 return i - 1
321 320
322 321 buf = []
323 322 blen = 0
324 323 for chunk in source:
325 324 buf.append(chunk)
326 325 blen += len(chunk)
327 326 if blen >= min:
328 327 if min < max:
329 328 min = min << 1
330 329 nmin = 1 << log2(blen)
331 330 if nmin > min:
332 331 min = nmin
333 332 if min > max:
334 333 min = max
335 334 yield ''.join(buf)
336 335 blen = 0
337 336 buf = []
338 337 if buf:
339 338 yield ''.join(buf)
340 339
341 340 Abort = error.Abort
342 341
343 342 def always(fn):
344 343 return True
345 344
346 345 def never(fn):
347 346 return False
348 347
349 348 def pathto(root, n1, n2):
350 349 '''return the relative path from one place to another.
351 350 root should use os.sep to separate directories
352 351 n1 should use os.sep to separate directories
353 352 n2 should use "/" to separate directories
354 353 returns an os.sep-separated path.
355 354
356 355 If n1 is a relative path, it's assumed it's
357 356 relative to root.
358 357 n2 should always be relative to root.
359 358 '''
360 359 if not n1:
361 360 return localpath(n2)
362 361 if os.path.isabs(n1):
363 362 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
364 363 return os.path.join(root, localpath(n2))
365 364 n2 = '/'.join((pconvert(root), n2))
366 365 a, b = splitpath(n1), n2.split('/')
367 366 a.reverse()
368 367 b.reverse()
369 368 while a and b and a[-1] == b[-1]:
370 369 a.pop()
371 370 b.pop()
372 371 b.reverse()
373 372 return os.sep.join((['..'] * len(a)) + b) or '.'
374 373
375 374 _hgexecutable = None
376 375
377 376 def mainfrozen():
378 377 """return True if we are a frozen executable.
379 378
380 379 The code supports py2exe (most common, Windows only) and tools/freeze
381 380 (portable, not much used).
382 381 """
383 382 return (safehasattr(sys, "frozen") or # new py2exe
384 383 safehasattr(sys, "importers") or # old py2exe
385 384 imp.is_frozen("__main__")) # tools/freeze
386 385
387 386 def hgexecutable():
388 387 """return location of the 'hg' executable.
389 388
390 389 Defaults to $HG or 'hg' in the search path.
391 390 """
392 391 if _hgexecutable is None:
393 392 hg = os.environ.get('HG')
394 393 mainmod = sys.modules['__main__']
395 394 if hg:
396 395 _sethgexecutable(hg)
397 396 elif mainfrozen():
398 397 _sethgexecutable(sys.executable)
399 398 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
400 399 _sethgexecutable(mainmod.__file__)
401 400 else:
402 401 exe = findexe('hg') or os.path.basename(sys.argv[0])
403 402 _sethgexecutable(exe)
404 403 return _hgexecutable
405 404
406 405 def _sethgexecutable(path):
407 406 """set location of the 'hg' executable"""
408 407 global _hgexecutable
409 408 _hgexecutable = path
410 409
411 410 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
412 411 '''enhanced shell command execution.
413 412 run with environment maybe modified, maybe in different dir.
414 413
415 414 if command fails and onerr is None, return status. if ui object,
416 415 print error message and return status, else raise onerr object as
417 416 exception.
418 417
419 418 if out is specified, it is assumed to be a file-like object that has a
420 419 write() method. stdout and stderr will be redirected to out.'''
421 420 try:
422 421 sys.stdout.flush()
423 422 except Exception:
424 423 pass
425 424 def py2shell(val):
426 425 'convert python object into string that is useful to shell'
427 426 if val is None or val is False:
428 427 return '0'
429 428 if val is True:
430 429 return '1'
431 430 return str(val)
432 431 origcmd = cmd
433 432 cmd = quotecommand(cmd)
434 433 if sys.platform == 'plan9':
435 434 # subprocess kludge to work around issues in half-baked Python
436 435 # ports, notably bichued/python:
437 436 if not cwd is None:
438 437 os.chdir(cwd)
439 438 rc = os.system(cmd)
440 439 else:
441 440 env = dict(os.environ)
442 441 env.update((k, py2shell(v)) for k, v in environ.iteritems())
443 442 env['HG'] = hgexecutable()
444 443 if out is None or out == sys.__stdout__:
445 444 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
446 445 env=env, cwd=cwd)
447 446 else:
448 447 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
449 448 env=env, cwd=cwd, stdout=subprocess.PIPE,
450 449 stderr=subprocess.STDOUT)
451 450 for line in proc.stdout:
452 451 out.write(line)
453 452 proc.wait()
454 453 rc = proc.returncode
455 454 if sys.platform == 'OpenVMS' and rc & 1:
456 455 rc = 0
457 456 if rc and onerr:
458 457 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
459 458 explainexit(rc)[0])
460 459 if errprefix:
461 460 errmsg = '%s: %s' % (errprefix, errmsg)
462 461 try:
463 462 onerr.warn(errmsg + '\n')
464 463 except AttributeError:
465 464 raise onerr(errmsg)
466 465 return rc
467 466
468 467 def checksignature(func):
469 468 '''wrap a function with code to check for calling errors'''
470 469 def check(*args, **kwargs):
471 470 try:
472 471 return func(*args, **kwargs)
473 472 except TypeError:
474 473 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
475 474 raise error.SignatureError
476 475 raise
477 476
478 477 return check
479 478
480 479 def copyfile(src, dest):
481 480 "copy a file, preserving mode and atime/mtime"
482 481 if os.path.islink(src):
483 482 try:
484 483 os.unlink(dest)
485 484 except OSError:
486 485 pass
487 486 os.symlink(os.readlink(src), dest)
488 487 else:
489 488 try:
490 489 shutil.copyfile(src, dest)
491 490 shutil.copymode(src, dest)
492 491 except shutil.Error, inst:
493 492 raise Abort(str(inst))
494 493
495 494 def copyfiles(src, dst, hardlink=None):
496 495 """Copy a directory tree using hardlinks if possible"""
497 496
498 497 if hardlink is None:
499 498 hardlink = (os.stat(src).st_dev ==
500 499 os.stat(os.path.dirname(dst)).st_dev)
501 500
502 501 num = 0
503 502 if os.path.isdir(src):
504 503 os.mkdir(dst)
505 504 for name, kind in osutil.listdir(src):
506 505 srcname = os.path.join(src, name)
507 506 dstname = os.path.join(dst, name)
508 507 hardlink, n = copyfiles(srcname, dstname, hardlink)
509 508 num += n
510 509 else:
511 510 if hardlink:
512 511 try:
513 512 oslink(src, dst)
514 513 except (IOError, OSError):
515 514 hardlink = False
516 515 shutil.copy(src, dst)
517 516 else:
518 517 shutil.copy(src, dst)
519 518 num += 1
520 519
521 520 return hardlink, num
522 521
523 522 _winreservednames = '''con prn aux nul
524 523 com1 com2 com3 com4 com5 com6 com7 com8 com9
525 524 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
526 525 _winreservedchars = ':*?"<>|'
527 526 def checkwinfilename(path):
528 527 '''Check that the base-relative path is a valid filename on Windows.
529 528 Returns None if the path is ok, or a UI string describing the problem.
530 529
531 530 >>> checkwinfilename("just/a/normal/path")
532 531 >>> checkwinfilename("foo/bar/con.xml")
533 532 "filename contains 'con', which is reserved on Windows"
534 533 >>> checkwinfilename("foo/con.xml/bar")
535 534 "filename contains 'con', which is reserved on Windows"
536 535 >>> checkwinfilename("foo/bar/xml.con")
537 536 >>> checkwinfilename("foo/bar/AUX/bla.txt")
538 537 "filename contains 'AUX', which is reserved on Windows"
539 538 >>> checkwinfilename("foo/bar/bla:.txt")
540 539 "filename contains ':', which is reserved on Windows"
541 540 >>> checkwinfilename("foo/bar/b\07la.txt")
542 541 "filename contains '\\\\x07', which is invalid on Windows"
543 542 >>> checkwinfilename("foo/bar/bla ")
544 543 "filename ends with ' ', which is not allowed on Windows"
545 544 >>> checkwinfilename("../bar")
546 545 '''
547 546 for n in path.replace('\\', '/').split('/'):
548 547 if not n:
549 548 continue
550 549 for c in n:
551 550 if c in _winreservedchars:
552 551 return _("filename contains '%s', which is reserved "
553 552 "on Windows") % c
554 553 if ord(c) <= 31:
555 554 return _("filename contains %r, which is invalid "
556 555 "on Windows") % c
557 556 base = n.split('.')[0]
558 557 if base and base.lower() in _winreservednames:
559 558 return _("filename contains '%s', which is reserved "
560 559 "on Windows") % base
561 560 t = n[-1]
562 561 if t in '. ' and n not in '..':
563 562 return _("filename ends with '%s', which is not allowed "
564 563 "on Windows") % t
565 564
566 565 if os.name == 'nt':
567 566 checkosfilename = checkwinfilename
568 567 else:
569 568 checkosfilename = platform.checkosfilename
570 569
571 570 def makelock(info, pathname):
572 571 try:
573 572 return os.symlink(info, pathname)
574 573 except OSError, why:
575 574 if why.errno == errno.EEXIST:
576 575 raise
577 576 except AttributeError: # no symlink in os
578 577 pass
579 578
580 579 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
581 580 os.write(ld, info)
582 581 os.close(ld)
583 582
584 583 def readlock(pathname):
585 584 try:
586 585 return os.readlink(pathname)
587 586 except OSError, why:
588 587 if why.errno not in (errno.EINVAL, errno.ENOSYS):
589 588 raise
590 589 except AttributeError: # no symlink in os
591 590 pass
592 591 fp = posixfile(pathname)
593 592 r = fp.read()
594 593 fp.close()
595 594 return r
596 595
597 596 def fstat(fp):
598 597 '''stat file object that may not have fileno method.'''
599 598 try:
600 599 return os.fstat(fp.fileno())
601 600 except AttributeError:
602 601 return os.stat(fp.name)
603 602
604 603 # File system features
605 604
606 605 def checkcase(path):
607 606 """
608 607 Check whether the given path is on a case-sensitive filesystem
609 608
610 609 Requires a path (like /foo/.hg) ending with a foldable final
611 610 directory component.
612 611 """
613 612 s1 = os.stat(path)
614 613 d, b = os.path.split(path)
615 614 b2 = b.upper()
616 615 if b == b2:
617 616 b2 = b.lower()
618 617 if b == b2:
619 618 return True # no evidence against case sensitivity
620 619 p2 = os.path.join(d, b2)
621 620 try:
622 621 s2 = os.stat(p2)
623 622 if s2 == s1:
624 623 return False
625 624 return True
626 625 except OSError:
627 626 return True
628 627
629 628 try:
630 629 import re2
631 630 _re2 = None
632 631 except ImportError:
633 632 _re2 = False
634 633
635 634 def compilere(pat):
636 635 '''Compile a regular expression, using re2 if possible
637 636
638 637 For best performance, use only re2-compatible regexp features.'''
639 638 global _re2
640 639 if _re2 is None:
641 640 try:
642 641 re2.compile
643 642 _re2 = True
644 643 except ImportError:
645 644 _re2 = False
646 645 if _re2:
647 646 try:
648 647 return re2.compile(pat)
649 648 except re2.error:
650 649 pass
651 650 return re.compile(pat)
652 651
653 652 _fspathcache = {}
654 653 def fspath(name, root):
655 654 '''Get name in the case stored in the filesystem
656 655
657 656 The name should be relative to root, and be normcase-ed for efficiency.
658 657
659 658 Note that this function is unnecessary, and should not be
660 659 called, for case-sensitive filesystems (simply because it's expensive).
661 660
662 661 The root should be normcase-ed, too.
663 662 '''
664 663 def find(p, contents):
665 664 for n in contents:
666 665 if normcase(n) == p:
667 666 return n
668 667 return None
669 668
670 669 seps = os.sep
671 670 if os.altsep:
672 671 seps = seps + os.altsep
673 672 # Protect backslashes. This gets silly very quickly.
674 673 seps.replace('\\','\\\\')
675 674 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
676 675 dir = os.path.normpath(root)
677 676 result = []
678 677 for part, sep in pattern.findall(name):
679 678 if sep:
680 679 result.append(sep)
681 680 continue
682 681
683 682 if dir not in _fspathcache:
684 683 _fspathcache[dir] = os.listdir(dir)
685 684 contents = _fspathcache[dir]
686 685
687 686 found = find(part, contents)
688 687 if not found:
689 688 # retry "once per directory" per "dirstate.walk" which
690 689 # may take place for each patches of "hg qpush", for example
691 690 contents = os.listdir(dir)
692 691 _fspathcache[dir] = contents
693 692 found = find(part, contents)
694 693
695 694 result.append(found or part)
696 695 dir = os.path.join(dir, part)
697 696
698 697 return ''.join(result)
699 698
700 699 def checknlink(testfile):
701 700 '''check whether hardlink count reporting works properly'''
702 701
703 702 # testfile may be open, so we need a separate file for checking to
704 703 # work around issue2543 (or testfile may get lost on Samba shares)
705 704 f1 = testfile + ".hgtmp1"
706 705 if os.path.lexists(f1):
707 706 return False
708 707 try:
709 708 posixfile(f1, 'w').close()
710 709 except IOError:
711 710 return False
712 711
713 712 f2 = testfile + ".hgtmp2"
714 713 fd = None
715 714 try:
716 715 try:
717 716 oslink(f1, f2)
718 717 except OSError:
719 718 return False
720 719
721 720 # nlinks() may behave differently for files on Windows shares if
722 721 # the file is open.
723 722 fd = posixfile(f2)
724 723 return nlinks(f2) > 1
725 724 finally:
726 725 if fd is not None:
727 726 fd.close()
728 727 for f in (f1, f2):
729 728 try:
730 729 os.unlink(f)
731 730 except OSError:
732 731 pass
733 732
734 733 return False
735 734
736 735 def endswithsep(path):
737 736 '''Check path ends with os.sep or os.altsep.'''
738 737 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
739 738
740 739 def splitpath(path):
741 740 '''Split path by os.sep.
742 741 Note that this function does not use os.altsep because this is
743 742 an alternative of simple "xxx.split(os.sep)".
744 743 It is recommended to use os.path.normpath() before using this
745 744 function if need.'''
746 745 return path.split(os.sep)
747 746
748 747 def gui():
749 748 '''Are we running in a GUI?'''
750 749 if sys.platform == 'darwin':
751 750 if 'SSH_CONNECTION' in os.environ:
752 751 # handle SSH access to a box where the user is logged in
753 752 return False
754 753 elif getattr(osutil, 'isgui', None):
755 754 # check if a CoreGraphics session is available
756 755 return osutil.isgui()
757 756 else:
758 757 # pure build; use a safe default
759 758 return True
760 759 else:
761 760 return os.name == "nt" or os.environ.get("DISPLAY")
762 761
763 762 def mktempcopy(name, emptyok=False, createmode=None):
764 763 """Create a temporary file with the same contents from name
765 764
766 765 The permission bits are copied from the original file.
767 766
768 767 If the temporary file is going to be truncated immediately, you
769 768 can use emptyok=True as an optimization.
770 769
771 770 Returns the name of the temporary file.
772 771 """
773 772 d, fn = os.path.split(name)
774 773 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
775 774 os.close(fd)
776 775 # Temporary files are created with mode 0600, which is usually not
777 776 # what we want. If the original file already exists, just copy
778 777 # its mode. Otherwise, manually obey umask.
779 778 copymode(name, temp, createmode)
780 779 if emptyok:
781 780 return temp
782 781 try:
783 782 try:
784 783 ifp = posixfile(name, "rb")
785 784 except IOError, inst:
786 785 if inst.errno == errno.ENOENT:
787 786 return temp
788 787 if not getattr(inst, 'filename', None):
789 788 inst.filename = name
790 789 raise
791 790 ofp = posixfile(temp, "wb")
792 791 for chunk in filechunkiter(ifp):
793 792 ofp.write(chunk)
794 793 ifp.close()
795 794 ofp.close()
796 795 except: # re-raises
797 796 try: os.unlink(temp)
798 797 except OSError: pass
799 798 raise
800 799 return temp
801 800
802 801 class atomictempfile(object):
803 802 '''writeable file object that atomically updates a file
804 803
805 804 All writes will go to a temporary copy of the original file. Call
806 805 close() when you are done writing, and atomictempfile will rename
807 806 the temporary copy to the original name, making the changes
808 807 visible. If the object is destroyed without being closed, all your
809 808 writes are discarded.
810 809 '''
811 810 def __init__(self, name, mode='w+b', createmode=None):
812 811 self.__name = name # permanent name
813 812 self._tempname = mktempcopy(name, emptyok=('w' in mode),
814 813 createmode=createmode)
815 814 self._fp = posixfile(self._tempname, mode)
816 815
817 816 # delegated methods
818 817 self.write = self._fp.write
819 818 self.seek = self._fp.seek
820 819 self.tell = self._fp.tell
821 820 self.fileno = self._fp.fileno
822 821
823 822 def close(self):
824 823 if not self._fp.closed:
825 824 self._fp.close()
826 825 rename(self._tempname, localpath(self.__name))
827 826
828 827 def discard(self):
829 828 if not self._fp.closed:
830 829 try:
831 830 os.unlink(self._tempname)
832 831 except OSError:
833 832 pass
834 833 self._fp.close()
835 834
836 835 def __del__(self):
837 836 if safehasattr(self, '_fp'): # constructor actually did something
838 837 self.discard()
839 838
840 839 def makedirs(name, mode=None):
841 840 """recursive directory creation with parent mode inheritance"""
842 841 try:
843 842 os.mkdir(name)
844 843 except OSError, err:
845 844 if err.errno == errno.EEXIST:
846 845 return
847 846 if err.errno != errno.ENOENT or not name:
848 847 raise
849 848 parent = os.path.dirname(os.path.abspath(name))
850 849 if parent == name:
851 850 raise
852 851 makedirs(parent, mode)
853 852 os.mkdir(name)
854 853 if mode is not None:
855 854 os.chmod(name, mode)
856 855
857 856 def readfile(path):
858 857 fp = open(path, 'rb')
859 858 try:
860 859 return fp.read()
861 860 finally:
862 861 fp.close()
863 862
864 863 def writefile(path, text):
865 864 fp = open(path, 'wb')
866 865 try:
867 866 fp.write(text)
868 867 finally:
869 868 fp.close()
870 869
871 870 def appendfile(path, text):
872 871 fp = open(path, 'ab')
873 872 try:
874 873 fp.write(text)
875 874 finally:
876 875 fp.close()
877 876
878 877 class chunkbuffer(object):
879 878 """Allow arbitrary sized chunks of data to be efficiently read from an
880 879 iterator over chunks of arbitrary size."""
881 880
882 881 def __init__(self, in_iter):
883 882 """in_iter is the iterator that's iterating over the input chunks.
884 883 targetsize is how big a buffer to try to maintain."""
885 884 def splitbig(chunks):
886 885 for chunk in chunks:
887 886 if len(chunk) > 2**20:
888 887 pos = 0
889 888 while pos < len(chunk):
890 889 end = pos + 2 ** 18
891 890 yield chunk[pos:end]
892 891 pos = end
893 892 else:
894 893 yield chunk
895 894 self.iter = splitbig(in_iter)
896 895 self._queue = deque()
897 896
898 897 def read(self, l):
899 898 """Read L bytes of data from the iterator of chunks of data.
900 899 Returns less than L bytes if the iterator runs dry."""
901 900 left = l
902 901 buf = ''
903 902 queue = self._queue
904 903 while left > 0:
905 904 # refill the queue
906 905 if not queue:
907 906 target = 2**18
908 907 for chunk in self.iter:
909 908 queue.append(chunk)
910 909 target -= len(chunk)
911 910 if target <= 0:
912 911 break
913 912 if not queue:
914 913 break
915 914
916 915 chunk = queue.popleft()
917 916 left -= len(chunk)
918 917 if left < 0:
919 918 queue.appendleft(chunk[left:])
920 919 buf += chunk[:left]
921 920 else:
922 921 buf += chunk
923 922
924 923 return buf
925 924
926 925 def filechunkiter(f, size=65536, limit=None):
927 926 """Create a generator that produces the data in the file size
928 927 (default 65536) bytes at a time, up to optional limit (default is
929 928 to read all data). Chunks may be less than size bytes if the
930 929 chunk is the last chunk in the file, or the file is a socket or
931 930 some other type of file that sometimes reads less data than is
932 931 requested."""
933 932 assert size >= 0
934 933 assert limit is None or limit >= 0
935 934 while True:
936 935 if limit is None:
937 936 nbytes = size
938 937 else:
939 938 nbytes = min(limit, size)
940 939 s = nbytes and f.read(nbytes)
941 940 if not s:
942 941 break
943 942 if limit:
944 943 limit -= len(s)
945 944 yield s
946 945
947 946 def makedate():
948 947 ct = time.time()
949 948 if ct < 0:
950 949 hint = _("check your clock")
951 950 raise Abort(_("negative timestamp: %d") % ct, hint=hint)
952 951 delta = (datetime.datetime.utcfromtimestamp(ct) -
953 952 datetime.datetime.fromtimestamp(ct))
954 953 tz = delta.days * 86400 + delta.seconds
955 954 return ct, tz
956 955
957 956 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
958 957 """represent a (unixtime, offset) tuple as a localized time.
959 958 unixtime is seconds since the epoch, and offset is the time zone's
960 959 number of seconds away from UTC. if timezone is false, do not
961 960 append time zone to string."""
962 961 t, tz = date or makedate()
963 962 if t < 0:
964 963 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
965 964 tz = 0
966 965 if "%1" in format or "%2" in format:
967 966 sign = (tz > 0) and "-" or "+"
968 967 minutes = abs(tz) // 60
969 968 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
970 969 format = format.replace("%2", "%02d" % (minutes % 60))
971 970 try:
972 971 t = time.gmtime(float(t) - tz)
973 972 except ValueError:
974 973 # time was out of range
975 974 t = time.gmtime(sys.maxint)
976 975 s = time.strftime(format, t)
977 976 return s
978 977
979 978 def shortdate(date=None):
980 979 """turn (timestamp, tzoff) tuple into iso 8631 date."""
981 980 return datestr(date, format='%Y-%m-%d')
982 981
983 982 def strdate(string, format, defaults=[]):
984 983 """parse a localized time string and return a (unixtime, offset) tuple.
985 984 if the string cannot be parsed, ValueError is raised."""
986 985 def timezone(string):
987 986 tz = string.split()[-1]
988 987 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
989 988 sign = (tz[0] == "+") and 1 or -1
990 989 hours = int(tz[1:3])
991 990 minutes = int(tz[3:5])
992 991 return -sign * (hours * 60 + minutes) * 60
993 992 if tz == "GMT" or tz == "UTC":
994 993 return 0
995 994 return None
996 995
997 996 # NOTE: unixtime = localunixtime + offset
998 997 offset, date = timezone(string), string
999 998 if offset is not None:
1000 999 date = " ".join(string.split()[:-1])
1001 1000
1002 1001 # add missing elements from defaults
1003 1002 usenow = False # default to using biased defaults
1004 1003 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1005 1004 found = [True for p in part if ("%"+p) in format]
1006 1005 if not found:
1007 1006 date += "@" + defaults[part][usenow]
1008 1007 format += "@%" + part[0]
1009 1008 else:
1010 1009 # We've found a specific time element, less specific time
1011 1010 # elements are relative to today
1012 1011 usenow = True
1013 1012
1014 1013 timetuple = time.strptime(date, format)
1015 1014 localunixtime = int(calendar.timegm(timetuple))
1016 1015 if offset is None:
1017 1016 # local timezone
1018 1017 unixtime = int(time.mktime(timetuple))
1019 1018 offset = unixtime - localunixtime
1020 1019 else:
1021 1020 unixtime = localunixtime + offset
1022 1021 return unixtime, offset
1023 1022
1024 1023 def parsedate(date, formats=None, bias={}):
1025 1024 """parse a localized date/time and return a (unixtime, offset) tuple.
1026 1025
1027 1026 The date may be a "unixtime offset" string or in one of the specified
1028 1027 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1029 1028 """
1030 1029 if not date:
1031 1030 return 0, 0
1032 1031 if isinstance(date, tuple) and len(date) == 2:
1033 1032 return date
1034 1033 if not formats:
1035 1034 formats = defaultdateformats
1036 1035 date = date.strip()
1037 1036 try:
1038 1037 when, offset = map(int, date.split(' '))
1039 1038 except ValueError:
1040 1039 # fill out defaults
1041 1040 now = makedate()
1042 1041 defaults = {}
1043 1042 for part in ("d", "mb", "yY", "HI", "M", "S"):
1044 1043 # this piece is for rounding the specific end of unknowns
1045 1044 b = bias.get(part)
1046 1045 if b is None:
1047 1046 if part[0] in "HMS":
1048 1047 b = "00"
1049 1048 else:
1050 1049 b = "0"
1051 1050
1052 1051 # this piece is for matching the generic end to today's date
1053 1052 n = datestr(now, "%" + part[0])
1054 1053
1055 1054 defaults[part] = (b, n)
1056 1055
1057 1056 for format in formats:
1058 1057 try:
1059 1058 when, offset = strdate(date, format, defaults)
1060 1059 except (ValueError, OverflowError):
1061 1060 pass
1062 1061 else:
1063 1062 break
1064 1063 else:
1065 1064 raise Abort(_('invalid date: %r') % date)
1066 1065 # validate explicit (probably user-specified) date and
1067 1066 # time zone offset. values must fit in signed 32 bits for
1068 1067 # current 32-bit linux runtimes. timezones go from UTC-12
1069 1068 # to UTC+14
1070 1069 if abs(when) > 0x7fffffff:
1071 1070 raise Abort(_('date exceeds 32 bits: %d') % when)
1072 1071 if when < 0:
1073 1072 raise Abort(_('negative date value: %d') % when)
1074 1073 if offset < -50400 or offset > 43200:
1075 1074 raise Abort(_('impossible time zone offset: %d') % offset)
1076 1075 return when, offset
1077 1076
1078 1077 def matchdate(date):
1079 1078 """Return a function that matches a given date match specifier
1080 1079
1081 1080 Formats include:
1082 1081
1083 1082 '{date}' match a given date to the accuracy provided
1084 1083
1085 1084 '<{date}' on or before a given date
1086 1085
1087 1086 '>{date}' on or after a given date
1088 1087
1089 1088 >>> p1 = parsedate("10:29:59")
1090 1089 >>> p2 = parsedate("10:30:00")
1091 1090 >>> p3 = parsedate("10:30:59")
1092 1091 >>> p4 = parsedate("10:31:00")
1093 1092 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1094 1093 >>> f = matchdate("10:30")
1095 1094 >>> f(p1[0])
1096 1095 False
1097 1096 >>> f(p2[0])
1098 1097 True
1099 1098 >>> f(p3[0])
1100 1099 True
1101 1100 >>> f(p4[0])
1102 1101 False
1103 1102 >>> f(p5[0])
1104 1103 False
1105 1104 """
1106 1105
1107 1106 def lower(date):
1108 1107 d = dict(mb="1", d="1")
1109 1108 return parsedate(date, extendeddateformats, d)[0]
1110 1109
1111 1110 def upper(date):
1112 1111 d = dict(mb="12", HI="23", M="59", S="59")
1113 1112 for days in ("31", "30", "29"):
1114 1113 try:
1115 1114 d["d"] = days
1116 1115 return parsedate(date, extendeddateformats, d)[0]
1117 1116 except Abort:
1118 1117 pass
1119 1118 d["d"] = "28"
1120 1119 return parsedate(date, extendeddateformats, d)[0]
1121 1120
1122 1121 date = date.strip()
1123 1122
1124 1123 if not date:
1125 1124 raise Abort(_("dates cannot consist entirely of whitespace"))
1126 1125 elif date[0] == "<":
1127 1126 if not date[1:]:
1128 1127 raise Abort(_("invalid day spec, use '<DATE'"))
1129 1128 when = upper(date[1:])
1130 1129 return lambda x: x <= when
1131 1130 elif date[0] == ">":
1132 1131 if not date[1:]:
1133 1132 raise Abort(_("invalid day spec, use '>DATE'"))
1134 1133 when = lower(date[1:])
1135 1134 return lambda x: x >= when
1136 1135 elif date[0] == "-":
1137 1136 try:
1138 1137 days = int(date[1:])
1139 1138 except ValueError:
1140 1139 raise Abort(_("invalid day spec: %s") % date[1:])
1141 1140 if days < 0:
1142 1141 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1143 1142 % date[1:])
1144 1143 when = makedate()[0] - days * 3600 * 24
1145 1144 return lambda x: x >= when
1146 1145 elif " to " in date:
1147 1146 a, b = date.split(" to ")
1148 1147 start, stop = lower(a), upper(b)
1149 1148 return lambda x: x >= start and x <= stop
1150 1149 else:
1151 1150 start, stop = lower(date), upper(date)
1152 1151 return lambda x: x >= start and x <= stop
1153 1152
1154 1153 def shortuser(user):
1155 1154 """Return a short representation of a user name or email address."""
1156 1155 f = user.find('@')
1157 1156 if f >= 0:
1158 1157 user = user[:f]
1159 1158 f = user.find('<')
1160 1159 if f >= 0:
1161 1160 user = user[f + 1:]
1162 1161 f = user.find(' ')
1163 1162 if f >= 0:
1164 1163 user = user[:f]
1165 1164 f = user.find('.')
1166 1165 if f >= 0:
1167 1166 user = user[:f]
1168 1167 return user
1169 1168
1170 1169 def emailuser(user):
1171 1170 """Return the user portion of an email address."""
1172 1171 f = user.find('@')
1173 1172 if f >= 0:
1174 1173 user = user[:f]
1175 1174 f = user.find('<')
1176 1175 if f >= 0:
1177 1176 user = user[f + 1:]
1178 1177 return user
1179 1178
1180 1179 def email(author):
1181 1180 '''get email of author.'''
1182 1181 r = author.find('>')
1183 1182 if r == -1:
1184 1183 r = None
1185 1184 return author[author.find('<') + 1:r]
1186 1185
1187 1186 def _ellipsis(text, maxlength):
1188 1187 if len(text) <= maxlength:
1189 1188 return text, False
1190 1189 else:
1191 1190 return "%s..." % (text[:maxlength - 3]), True
1192 1191
1193 1192 def ellipsis(text, maxlength=400):
1194 1193 """Trim string to at most maxlength (default: 400) characters."""
1195 1194 try:
1196 1195 # use unicode not to split at intermediate multi-byte sequence
1197 1196 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1198 1197 maxlength)
1199 1198 if not truncated:
1200 1199 return text
1201 1200 return utext.encode(encoding.encoding)
1202 1201 except (UnicodeDecodeError, UnicodeEncodeError):
1203 1202 return _ellipsis(text, maxlength)[0]
1204 1203
1205 1204 _byteunits = (
1206 1205 (100, 1 << 30, _('%.0f GB')),
1207 1206 (10, 1 << 30, _('%.1f GB')),
1208 1207 (1, 1 << 30, _('%.2f GB')),
1209 1208 (100, 1 << 20, _('%.0f MB')),
1210 1209 (10, 1 << 20, _('%.1f MB')),
1211 1210 (1, 1 << 20, _('%.2f MB')),
1212 1211 (100, 1 << 10, _('%.0f KB')),
1213 1212 (10, 1 << 10, _('%.1f KB')),
1214 1213 (1, 1 << 10, _('%.2f KB')),
1215 1214 (1, 1, _('%.0f bytes')),
1216 1215 )
1217 1216
1218 1217 def bytecount(nbytes):
1219 1218 '''return byte count formatted as readable string, with units'''
1220 1219
1221 1220 for multiplier, divisor, format in _byteunits:
1222 1221 if nbytes >= divisor * multiplier:
1223 1222 return format % (nbytes / float(divisor))
1224 1223 return _byteunits[-1][2] % nbytes
1225 1224
1226 1225 def uirepr(s):
1227 1226 # Avoid double backslash in Windows path repr()
1228 1227 return repr(s).replace('\\\\', '\\')
1229 1228
1230 1229 # delay import of textwrap
1231 1230 def MBTextWrapper(**kwargs):
1232 1231 class tw(textwrap.TextWrapper):
1233 1232 """
1234 1233 Extend TextWrapper for width-awareness.
1235 1234
1236 1235 Neither number of 'bytes' in any encoding nor 'characters' is
1237 1236 appropriate to calculate terminal columns for specified string.
1238 1237
1239 1238 Original TextWrapper implementation uses built-in 'len()' directly,
1240 1239 so overriding is needed to use width information of each characters.
1241 1240
1242 1241 In addition, characters classified into 'ambiguous' width are
1243 1242 treated as wide in east asian area, but as narrow in other.
1244 1243
1245 1244 This requires use decision to determine width of such characters.
1246 1245 """
1247 1246 def __init__(self, **kwargs):
1248 1247 textwrap.TextWrapper.__init__(self, **kwargs)
1249 1248
1250 1249 # for compatibility between 2.4 and 2.6
1251 1250 if getattr(self, 'drop_whitespace', None) is None:
1252 1251 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1253 1252
1254 1253 def _cutdown(self, ucstr, space_left):
1255 1254 l = 0
1256 1255 colwidth = encoding.ucolwidth
1257 1256 for i in xrange(len(ucstr)):
1258 1257 l += colwidth(ucstr[i])
1259 1258 if space_left < l:
1260 1259 return (ucstr[:i], ucstr[i:])
1261 1260 return ucstr, ''
1262 1261
1263 1262 # overriding of base class
1264 1263 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1265 1264 space_left = max(width - cur_len, 1)
1266 1265
1267 1266 if self.break_long_words:
1268 1267 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1269 1268 cur_line.append(cut)
1270 1269 reversed_chunks[-1] = res
1271 1270 elif not cur_line:
1272 1271 cur_line.append(reversed_chunks.pop())
1273 1272
1274 1273 # this overriding code is imported from TextWrapper of python 2.6
1275 1274 # to calculate columns of string by 'encoding.ucolwidth()'
1276 1275 def _wrap_chunks(self, chunks):
1277 1276 colwidth = encoding.ucolwidth
1278 1277
1279 1278 lines = []
1280 1279 if self.width <= 0:
1281 1280 raise ValueError("invalid width %r (must be > 0)" % self.width)
1282 1281
1283 1282 # Arrange in reverse order so items can be efficiently popped
1284 1283 # from a stack of chucks.
1285 1284 chunks.reverse()
1286 1285
1287 1286 while chunks:
1288 1287
1289 1288 # Start the list of chunks that will make up the current line.
1290 1289 # cur_len is just the length of all the chunks in cur_line.
1291 1290 cur_line = []
1292 1291 cur_len = 0
1293 1292
1294 1293 # Figure out which static string will prefix this line.
1295 1294 if lines:
1296 1295 indent = self.subsequent_indent
1297 1296 else:
1298 1297 indent = self.initial_indent
1299 1298
1300 1299 # Maximum width for this line.
1301 1300 width = self.width - len(indent)
1302 1301
1303 1302 # First chunk on line is whitespace -- drop it, unless this
1304 1303 # is the very beginning of the text (ie. no lines started yet).
1305 1304 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1306 1305 del chunks[-1]
1307 1306
1308 1307 while chunks:
1309 1308 l = colwidth(chunks[-1])
1310 1309
1311 1310 # Can at least squeeze this chunk onto the current line.
1312 1311 if cur_len + l <= width:
1313 1312 cur_line.append(chunks.pop())
1314 1313 cur_len += l
1315 1314
1316 1315 # Nope, this line is full.
1317 1316 else:
1318 1317 break
1319 1318
1320 1319 # The current line is full, and the next chunk is too big to
1321 1320 # fit on *any* line (not just this one).
1322 1321 if chunks and colwidth(chunks[-1]) > width:
1323 1322 self._handle_long_word(chunks, cur_line, cur_len, width)
1324 1323
1325 1324 # If the last chunk on this line is all whitespace, drop it.
1326 1325 if (self.drop_whitespace and
1327 1326 cur_line and cur_line[-1].strip() == ''):
1328 1327 del cur_line[-1]
1329 1328
1330 1329 # Convert current line back to a string and store it in list
1331 1330 # of all lines (return value).
1332 1331 if cur_line:
1333 1332 lines.append(indent + ''.join(cur_line))
1334 1333
1335 1334 return lines
1336 1335
1337 1336 global MBTextWrapper
1338 1337 MBTextWrapper = tw
1339 1338 return tw(**kwargs)
1340 1339
1341 1340 def wrap(line, width, initindent='', hangindent=''):
1342 1341 maxindent = max(len(hangindent), len(initindent))
1343 1342 if width <= maxindent:
1344 1343 # adjust for weird terminal size
1345 1344 width = max(78, maxindent + 1)
1346 1345 line = line.decode(encoding.encoding, encoding.encodingmode)
1347 1346 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1348 1347 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1349 1348 wrapper = MBTextWrapper(width=width,
1350 1349 initial_indent=initindent,
1351 1350 subsequent_indent=hangindent)
1352 1351 return wrapper.fill(line).encode(encoding.encoding)
1353 1352
1354 1353 def iterlines(iterator):
1355 1354 for chunk in iterator:
1356 1355 for line in chunk.splitlines():
1357 1356 yield line
1358 1357
1359 1358 def expandpath(path):
1360 1359 return os.path.expanduser(os.path.expandvars(path))
1361 1360
1362 1361 def hgcmd():
1363 1362 """Return the command used to execute current hg
1364 1363
1365 1364 This is different from hgexecutable() because on Windows we want
1366 1365 to avoid things opening new shell windows like batch files, so we
1367 1366 get either the python call or current executable.
1368 1367 """
1369 1368 if mainfrozen():
1370 1369 return [sys.executable]
1371 1370 return gethgcmd()
1372 1371
1373 1372 def rundetached(args, condfn):
1374 1373 """Execute the argument list in a detached process.
1375 1374
1376 1375 condfn is a callable which is called repeatedly and should return
1377 1376 True once the child process is known to have started successfully.
1378 1377 At this point, the child process PID is returned. If the child
1379 1378 process fails to start or finishes before condfn() evaluates to
1380 1379 True, return -1.
1381 1380 """
1382 1381 # Windows case is easier because the child process is either
1383 1382 # successfully starting and validating the condition or exiting
1384 1383 # on failure. We just poll on its PID. On Unix, if the child
1385 1384 # process fails to start, it will be left in a zombie state until
1386 1385 # the parent wait on it, which we cannot do since we expect a long
1387 1386 # running process on success. Instead we listen for SIGCHLD telling
1388 1387 # us our child process terminated.
1389 1388 terminated = set()
1390 1389 def handler(signum, frame):
1391 1390 terminated.add(os.wait())
1392 1391 prevhandler = None
1393 1392 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1394 1393 if SIGCHLD is not None:
1395 1394 prevhandler = signal.signal(SIGCHLD, handler)
1396 1395 try:
1397 1396 pid = spawndetached(args)
1398 1397 while not condfn():
1399 1398 if ((pid in terminated or not testpid(pid))
1400 1399 and not condfn()):
1401 1400 return -1
1402 1401 time.sleep(0.1)
1403 1402 return pid
1404 1403 finally:
1405 1404 if prevhandler is not None:
1406 1405 signal.signal(signal.SIGCHLD, prevhandler)
1407 1406
1408 1407 try:
1409 1408 any, all = any, all
1410 1409 except NameError:
1411 1410 def any(iterable):
1412 1411 for i in iterable:
1413 1412 if i:
1414 1413 return True
1415 1414 return False
1416 1415
1417 1416 def all(iterable):
1418 1417 for i in iterable:
1419 1418 if not i:
1420 1419 return False
1421 1420 return True
1422 1421
1423 1422 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1424 1423 """Return the result of interpolating items in the mapping into string s.
1425 1424
1426 1425 prefix is a single character string, or a two character string with
1427 1426 a backslash as the first character if the prefix needs to be escaped in
1428 1427 a regular expression.
1429 1428
1430 1429 fn is an optional function that will be applied to the replacement text
1431 1430 just before replacement.
1432 1431
1433 1432 escape_prefix is an optional flag that allows using doubled prefix for
1434 1433 its escaping.
1435 1434 """
1436 1435 fn = fn or (lambda s: s)
1437 1436 patterns = '|'.join(mapping.keys())
1438 1437 if escape_prefix:
1439 1438 patterns += '|' + prefix
1440 1439 if len(prefix) > 1:
1441 1440 prefix_char = prefix[1:]
1442 1441 else:
1443 1442 prefix_char = prefix
1444 1443 mapping[prefix_char] = prefix_char
1445 1444 r = re.compile(r'%s(%s)' % (prefix, patterns))
1446 1445 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1447 1446
1448 1447 def getport(port):
1449 1448 """Return the port for a given network service.
1450 1449
1451 1450 If port is an integer, it's returned as is. If it's a string, it's
1452 1451 looked up using socket.getservbyname(). If there's no matching
1453 1452 service, util.Abort is raised.
1454 1453 """
1455 1454 try:
1456 1455 return int(port)
1457 1456 except ValueError:
1458 1457 pass
1459 1458
1460 1459 try:
1461 1460 return socket.getservbyname(port)
1462 1461 except socket.error:
1463 1462 raise Abort(_("no port number associated with service '%s'") % port)
1464 1463
1465 1464 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1466 1465 '0': False, 'no': False, 'false': False, 'off': False,
1467 1466 'never': False}
1468 1467
1469 1468 def parsebool(s):
1470 1469 """Parse s into a boolean.
1471 1470
1472 1471 If s is not a valid boolean, returns None.
1473 1472 """
1474 1473 return _booleans.get(s.lower(), None)
1475 1474
1476 1475 _hexdig = '0123456789ABCDEFabcdef'
1477 1476 _hextochr = dict((a + b, chr(int(a + b, 16)))
1478 1477 for a in _hexdig for b in _hexdig)
1479 1478
1480 1479 def _urlunquote(s):
1481 1480 """unquote('abc%20def') -> 'abc def'."""
1482 1481 res = s.split('%')
1483 1482 # fastpath
1484 1483 if len(res) == 1:
1485 1484 return s
1486 1485 s = res[0]
1487 1486 for item in res[1:]:
1488 1487 try:
1489 1488 s += _hextochr[item[:2]] + item[2:]
1490 1489 except KeyError:
1491 1490 s += '%' + item
1492 1491 except UnicodeDecodeError:
1493 1492 s += unichr(int(item[:2], 16)) + item[2:]
1494 1493 return s
1495 1494
1496 1495 class url(object):
1497 1496 r"""Reliable URL parser.
1498 1497
1499 1498 This parses URLs and provides attributes for the following
1500 1499 components:
1501 1500
1502 1501 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1503 1502
1504 1503 Missing components are set to None. The only exception is
1505 1504 fragment, which is set to '' if present but empty.
1506 1505
1507 1506 If parsefragment is False, fragment is included in query. If
1508 1507 parsequery is False, query is included in path. If both are
1509 1508 False, both fragment and query are included in path.
1510 1509
1511 1510 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1512 1511
1513 1512 Note that for backward compatibility reasons, bundle URLs do not
1514 1513 take host names. That means 'bundle://../' has a path of '../'.
1515 1514
1516 1515 Examples:
1517 1516
1518 1517 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1519 1518 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1520 1519 >>> url('ssh://[::1]:2200//home/joe/repo')
1521 1520 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1522 1521 >>> url('file:///home/joe/repo')
1523 1522 <url scheme: 'file', path: '/home/joe/repo'>
1524 1523 >>> url('file:///c:/temp/foo/')
1525 1524 <url scheme: 'file', path: 'c:/temp/foo/'>
1526 1525 >>> url('bundle:foo')
1527 1526 <url scheme: 'bundle', path: 'foo'>
1528 1527 >>> url('bundle://../foo')
1529 1528 <url scheme: 'bundle', path: '../foo'>
1530 1529 >>> url(r'c:\foo\bar')
1531 1530 <url path: 'c:\\foo\\bar'>
1532 1531 >>> url(r'\\blah\blah\blah')
1533 1532 <url path: '\\\\blah\\blah\\blah'>
1534 1533 >>> url(r'\\blah\blah\blah#baz')
1535 1534 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1536 1535
1537 1536 Authentication credentials:
1538 1537
1539 1538 >>> url('ssh://joe:xyz@x/repo')
1540 1539 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1541 1540 >>> url('ssh://joe@x/repo')
1542 1541 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1543 1542
1544 1543 Query strings and fragments:
1545 1544
1546 1545 >>> url('http://host/a?b#c')
1547 1546 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1548 1547 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1549 1548 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1550 1549 """
1551 1550
1552 1551 _safechars = "!~*'()+"
1553 1552 _safepchars = "/!~*'()+:"
1554 1553 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1555 1554
1556 1555 def __init__(self, path, parsequery=True, parsefragment=True):
1557 1556 # We slowly chomp away at path until we have only the path left
1558 1557 self.scheme = self.user = self.passwd = self.host = None
1559 1558 self.port = self.path = self.query = self.fragment = None
1560 1559 self._localpath = True
1561 1560 self._hostport = ''
1562 1561 self._origpath = path
1563 1562
1564 1563 if parsefragment and '#' in path:
1565 1564 path, self.fragment = path.split('#', 1)
1566 1565 if not path:
1567 1566 path = None
1568 1567
1569 1568 # special case for Windows drive letters and UNC paths
1570 1569 if hasdriveletter(path) or path.startswith(r'\\'):
1571 1570 self.path = path
1572 1571 return
1573 1572
1574 1573 # For compatibility reasons, we can't handle bundle paths as
1575 1574 # normal URLS
1576 1575 if path.startswith('bundle:'):
1577 1576 self.scheme = 'bundle'
1578 1577 path = path[7:]
1579 1578 if path.startswith('//'):
1580 1579 path = path[2:]
1581 1580 self.path = path
1582 1581 return
1583 1582
1584 1583 if self._matchscheme(path):
1585 1584 parts = path.split(':', 1)
1586 1585 if parts[0]:
1587 1586 self.scheme, path = parts
1588 1587 self._localpath = False
1589 1588
1590 1589 if not path:
1591 1590 path = None
1592 1591 if self._localpath:
1593 1592 self.path = ''
1594 1593 return
1595 1594 else:
1596 1595 if self._localpath:
1597 1596 self.path = path
1598 1597 return
1599 1598
1600 1599 if parsequery and '?' in path:
1601 1600 path, self.query = path.split('?', 1)
1602 1601 if not path:
1603 1602 path = None
1604 1603 if not self.query:
1605 1604 self.query = None
1606 1605
1607 1606 # // is required to specify a host/authority
1608 1607 if path and path.startswith('//'):
1609 1608 parts = path[2:].split('/', 1)
1610 1609 if len(parts) > 1:
1611 1610 self.host, path = parts
1612 1611 path = path
1613 1612 else:
1614 1613 self.host = parts[0]
1615 1614 path = None
1616 1615 if not self.host:
1617 1616 self.host = None
1618 1617 # path of file:///d is /d
1619 1618 # path of file:///d:/ is d:/, not /d:/
1620 1619 if path and not hasdriveletter(path):
1621 1620 path = '/' + path
1622 1621
1623 1622 if self.host and '@' in self.host:
1624 1623 self.user, self.host = self.host.rsplit('@', 1)
1625 1624 if ':' in self.user:
1626 1625 self.user, self.passwd = self.user.split(':', 1)
1627 1626 if not self.host:
1628 1627 self.host = None
1629 1628
1630 1629 # Don't split on colons in IPv6 addresses without ports
1631 1630 if (self.host and ':' in self.host and
1632 1631 not (self.host.startswith('[') and self.host.endswith(']'))):
1633 1632 self._hostport = self.host
1634 1633 self.host, self.port = self.host.rsplit(':', 1)
1635 1634 if not self.host:
1636 1635 self.host = None
1637 1636
1638 1637 if (self.host and self.scheme == 'file' and
1639 1638 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1640 1639 raise Abort(_('file:// URLs can only refer to localhost'))
1641 1640
1642 1641 self.path = path
1643 1642
1644 1643 # leave the query string escaped
1645 1644 for a in ('user', 'passwd', 'host', 'port',
1646 1645 'path', 'fragment'):
1647 1646 v = getattr(self, a)
1648 1647 if v is not None:
1649 1648 setattr(self, a, _urlunquote(v))
1650 1649
1651 1650 def __repr__(self):
1652 1651 attrs = []
1653 1652 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1654 1653 'query', 'fragment'):
1655 1654 v = getattr(self, a)
1656 1655 if v is not None:
1657 1656 attrs.append('%s: %r' % (a, v))
1658 1657 return '<url %s>' % ', '.join(attrs)
1659 1658
1660 1659 def __str__(self):
1661 1660 r"""Join the URL's components back into a URL string.
1662 1661
1663 1662 Examples:
1664 1663
1665 1664 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1666 1665 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1667 1666 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1668 1667 'http://user:pw@host:80/?foo=bar&baz=42'
1669 1668 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1670 1669 'http://user:pw@host:80/?foo=bar%3dbaz'
1671 1670 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1672 1671 'ssh://user:pw@[::1]:2200//home/joe#'
1673 1672 >>> str(url('http://localhost:80//'))
1674 1673 'http://localhost:80//'
1675 1674 >>> str(url('http://localhost:80/'))
1676 1675 'http://localhost:80/'
1677 1676 >>> str(url('http://localhost:80'))
1678 1677 'http://localhost:80/'
1679 1678 >>> str(url('bundle:foo'))
1680 1679 'bundle:foo'
1681 1680 >>> str(url('bundle://../foo'))
1682 1681 'bundle:../foo'
1683 1682 >>> str(url('path'))
1684 1683 'path'
1685 1684 >>> str(url('file:///tmp/foo/bar'))
1686 1685 'file:///tmp/foo/bar'
1687 1686 >>> str(url('file:///c:/tmp/foo/bar'))
1688 1687 'file:///c:/tmp/foo/bar'
1689 1688 >>> print url(r'bundle:foo\bar')
1690 1689 bundle:foo\bar
1691 1690 """
1692 1691 if self._localpath:
1693 1692 s = self.path
1694 1693 if self.scheme == 'bundle':
1695 1694 s = 'bundle:' + s
1696 1695 if self.fragment:
1697 1696 s += '#' + self.fragment
1698 1697 return s
1699 1698
1700 1699 s = self.scheme + ':'
1701 1700 if self.user or self.passwd or self.host:
1702 1701 s += '//'
1703 1702 elif self.scheme and (not self.path or self.path.startswith('/')
1704 1703 or hasdriveletter(self.path)):
1705 1704 s += '//'
1706 1705 if hasdriveletter(self.path):
1707 1706 s += '/'
1708 1707 if self.user:
1709 1708 s += urllib.quote(self.user, safe=self._safechars)
1710 1709 if self.passwd:
1711 1710 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1712 1711 if self.user or self.passwd:
1713 1712 s += '@'
1714 1713 if self.host:
1715 1714 if not (self.host.startswith('[') and self.host.endswith(']')):
1716 1715 s += urllib.quote(self.host)
1717 1716 else:
1718 1717 s += self.host
1719 1718 if self.port:
1720 1719 s += ':' + urllib.quote(self.port)
1721 1720 if self.host:
1722 1721 s += '/'
1723 1722 if self.path:
1724 1723 # TODO: similar to the query string, we should not unescape the
1725 1724 # path when we store it, the path might contain '%2f' = '/',
1726 1725 # which we should *not* escape.
1727 1726 s += urllib.quote(self.path, safe=self._safepchars)
1728 1727 if self.query:
1729 1728 # we store the query in escaped form.
1730 1729 s += '?' + self.query
1731 1730 if self.fragment is not None:
1732 1731 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1733 1732 return s
1734 1733
1735 1734 def authinfo(self):
1736 1735 user, passwd = self.user, self.passwd
1737 1736 try:
1738 1737 self.user, self.passwd = None, None
1739 1738 s = str(self)
1740 1739 finally:
1741 1740 self.user, self.passwd = user, passwd
1742 1741 if not self.user:
1743 1742 return (s, None)
1744 1743 # authinfo[1] is passed to urllib2 password manager, and its
1745 1744 # URIs must not contain credentials. The host is passed in the
1746 1745 # URIs list because Python < 2.4.3 uses only that to search for
1747 1746 # a password.
1748 1747 return (s, (None, (s, self.host),
1749 1748 self.user, self.passwd or ''))
1750 1749
1751 1750 def isabs(self):
1752 1751 if self.scheme and self.scheme != 'file':
1753 1752 return True # remote URL
1754 1753 if hasdriveletter(self.path):
1755 1754 return True # absolute for our purposes - can't be joined()
1756 1755 if self.path.startswith(r'\\'):
1757 1756 return True # Windows UNC path
1758 1757 if self.path.startswith('/'):
1759 1758 return True # POSIX-style
1760 1759 return False
1761 1760
1762 1761 def localpath(self):
1763 1762 if self.scheme == 'file' or self.scheme == 'bundle':
1764 1763 path = self.path or '/'
1765 1764 # For Windows, we need to promote hosts containing drive
1766 1765 # letters to paths with drive letters.
1767 1766 if hasdriveletter(self._hostport):
1768 1767 path = self._hostport + '/' + self.path
1769 1768 elif (self.host is not None and self.path
1770 1769 and not hasdriveletter(path)):
1771 1770 path = '/' + path
1772 1771 return path
1773 1772 return self._origpath
1774 1773
1775 1774 def hasscheme(path):
1776 1775 return bool(url(path).scheme)
1777 1776
1778 1777 def hasdriveletter(path):
1779 1778 return path and path[1:2] == ':' and path[0:1].isalpha()
1780 1779
1781 1780 def urllocalpath(path):
1782 1781 return url(path, parsequery=False, parsefragment=False).localpath()
1783 1782
1784 1783 def hidepassword(u):
1785 1784 '''hide user credential in a url string'''
1786 1785 u = url(u)
1787 1786 if u.passwd:
1788 1787 u.passwd = '***'
1789 1788 return str(u)
1790 1789
1791 1790 def removeauth(u):
1792 1791 '''remove all authentication information from a url string'''
1793 1792 u = url(u)
1794 1793 u.user = u.passwd = None
1795 1794 return str(u)
1796 1795
1797 1796 def isatty(fd):
1798 1797 try:
1799 1798 return fd.isatty()
1800 1799 except AttributeError:
1801 1800 return False
@@ -1,329 +1,328
1 1 # windows.py - Windows utility function implementations for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import osutil, encoding
10 10 import errno, msvcrt, os, re, sys, _winreg
11 11
12 12 import win32
13 13 executablepath = win32.executablepath
14 14 getuser = win32.getuser
15 15 hidewindow = win32.hidewindow
16 16 makedir = win32.makedir
17 17 nlinks = win32.nlinks
18 18 oslink = win32.oslink
19 19 samedevice = win32.samedevice
20 20 samefile = win32.samefile
21 21 setsignalhandler = win32.setsignalhandler
22 22 spawndetached = win32.spawndetached
23 23 termwidth = win32.termwidth
24 24 testpid = win32.testpid
25 25 unlink = win32.unlink
26 26
27 nulldev = 'NUL:'
28 27 umask = 0022
29 28
30 29 # wrap osutil.posixfile to provide friendlier exceptions
31 30 def posixfile(name, mode='r', buffering=-1):
32 31 try:
33 32 return osutil.posixfile(name, mode, buffering)
34 33 except WindowsError, err:
35 34 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
36 35 posixfile.__doc__ = osutil.posixfile.__doc__
37 36
38 37 class winstdout(object):
39 38 '''stdout on windows misbehaves if sent through a pipe'''
40 39
41 40 def __init__(self, fp):
42 41 self.fp = fp
43 42
44 43 def __getattr__(self, key):
45 44 return getattr(self.fp, key)
46 45
47 46 def close(self):
48 47 try:
49 48 self.fp.close()
50 49 except IOError:
51 50 pass
52 51
53 52 def write(self, s):
54 53 try:
55 54 # This is workaround for "Not enough space" error on
56 55 # writing large size of data to console.
57 56 limit = 16000
58 57 l = len(s)
59 58 start = 0
60 59 self.softspace = 0
61 60 while start < l:
62 61 end = start + limit
63 62 self.fp.write(s[start:end])
64 63 start = end
65 64 except IOError, inst:
66 65 if inst.errno != 0:
67 66 raise
68 67 self.close()
69 68 raise IOError(errno.EPIPE, 'Broken pipe')
70 69
71 70 def flush(self):
72 71 try:
73 72 return self.fp.flush()
74 73 except IOError, inst:
75 74 if inst.errno != errno.EINVAL:
76 75 raise
77 76 self.close()
78 77 raise IOError(errno.EPIPE, 'Broken pipe')
79 78
80 79 sys.__stdout__ = sys.stdout = winstdout(sys.stdout)
81 80
82 81 def _is_win_9x():
83 82 '''return true if run on windows 95, 98 or me.'''
84 83 try:
85 84 return sys.getwindowsversion()[3] == 1
86 85 except AttributeError:
87 86 return 'command' in os.environ.get('comspec', '')
88 87
89 88 def openhardlinks():
90 89 return not _is_win_9x()
91 90
92 91 def parsepatchoutput(output_line):
93 92 """parses the output produced by patch and returns the filename"""
94 93 pf = output_line[14:]
95 94 if pf[0] == '`':
96 95 pf = pf[1:-1] # Remove the quotes
97 96 return pf
98 97
99 98 def sshargs(sshcmd, host, user, port):
100 99 '''Build argument list for ssh or Plink'''
101 100 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
102 101 args = user and ("%s@%s" % (user, host)) or host
103 102 return port and ("%s %s %s" % (args, pflag, port)) or args
104 103
105 104 def setflags(f, l, x):
106 105 pass
107 106
108 107 def copymode(src, dst, mode=None):
109 108 pass
110 109
111 110 def checkexec(path):
112 111 return False
113 112
114 113 def checklink(path):
115 114 return False
116 115
117 116 def setbinary(fd):
118 117 # When run without console, pipes may expose invalid
119 118 # fileno(), usually set to -1.
120 119 fno = getattr(fd, 'fileno', None)
121 120 if fno is not None and fno() >= 0:
122 121 msvcrt.setmode(fno(), os.O_BINARY)
123 122
124 123 def pconvert(path):
125 124 return path.replace(os.sep, '/')
126 125
127 126 def localpath(path):
128 127 return path.replace('/', '\\')
129 128
130 129 def normpath(path):
131 130 return pconvert(os.path.normpath(path))
132 131
133 132 def normcase(path):
134 133 return encoding.upper(path)
135 134
136 135 def realpath(path):
137 136 '''
138 137 Returns the true, canonical file system path equivalent to the given
139 138 path.
140 139 '''
141 140 # TODO: There may be a more clever way to do this that also handles other,
142 141 # less common file systems.
143 142 return os.path.normpath(normcase(os.path.realpath(path)))
144 143
145 144 def samestat(s1, s2):
146 145 return False
147 146
148 147 # A sequence of backslashes is special iff it precedes a double quote:
149 148 # - if there's an even number of backslashes, the double quote is not
150 149 # quoted (i.e. it ends the quoted region)
151 150 # - if there's an odd number of backslashes, the double quote is quoted
152 151 # - in both cases, every pair of backslashes is unquoted into a single
153 152 # backslash
154 153 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
155 154 # So, to quote a string, we must surround it in double quotes, double
156 155 # the number of backslashes that preceed double quotes and add another
157 156 # backslash before every double quote (being careful with the double
158 157 # quote we've appended to the end)
159 158 _quotere = None
160 159 def shellquote(s):
161 160 global _quotere
162 161 if _quotere is None:
163 162 _quotere = re.compile(r'(\\*)("|\\$)')
164 163 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
165 164
166 165 def quotecommand(cmd):
167 166 """Build a command string suitable for os.popen* calls."""
168 167 if sys.version_info < (2, 7, 1):
169 168 # Python versions since 2.7.1 do this extra quoting themselves
170 169 return '"' + cmd + '"'
171 170 return cmd
172 171
173 172 def popen(command, mode='r'):
174 173 # Work around "popen spawned process may not write to stdout
175 174 # under windows"
176 175 # http://bugs.python.org/issue1366
177 command += " 2> %s" % nulldev
176 command += " 2> %s" % os.devnull
178 177 return os.popen(quotecommand(command), mode)
179 178
180 179 def explainexit(code):
181 180 return _("exited with status %d") % code, code
182 181
183 182 # if you change this stub into a real check, please try to implement the
184 183 # username and groupname functions above, too.
185 184 def isowner(st):
186 185 return True
187 186
188 187 def findexe(command):
189 188 '''Find executable for command searching like cmd.exe does.
190 189 If command is a basename then PATH is searched for command.
191 190 PATH isn't searched if command is an absolute or relative path.
192 191 An extension from PATHEXT is found and added if not present.
193 192 If command isn't found None is returned.'''
194 193 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
195 194 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
196 195 if os.path.splitext(command)[1].lower() in pathexts:
197 196 pathexts = ['']
198 197
199 198 def findexisting(pathcommand):
200 199 'Will append extension (if needed) and return existing file'
201 200 for ext in pathexts:
202 201 executable = pathcommand + ext
203 202 if os.path.exists(executable):
204 203 return executable
205 204 return None
206 205
207 206 if os.sep in command:
208 207 return findexisting(command)
209 208
210 209 for path in os.environ.get('PATH', '').split(os.pathsep):
211 210 executable = findexisting(os.path.join(path, command))
212 211 if executable is not None:
213 212 return executable
214 213 return findexisting(os.path.expanduser(os.path.expandvars(command)))
215 214
216 215 def statfiles(files):
217 216 '''Stat each file in files and yield stat or None if file does not exist.
218 217 Cluster and cache stat per directory to minimize number of OS stat calls.'''
219 218 dircache = {} # dirname -> filename -> status | None if file does not exist
220 219 for nf in files:
221 220 nf = normcase(nf)
222 221 dir, base = os.path.split(nf)
223 222 if not dir:
224 223 dir = '.'
225 224 cache = dircache.get(dir, None)
226 225 if cache is None:
227 226 try:
228 227 dmap = dict([(normcase(n), s)
229 228 for n, k, s in osutil.listdir(dir, True)])
230 229 except OSError, err:
231 230 # handle directory not found in Python version prior to 2.5
232 231 # Python <= 2.4 returns native Windows code 3 in errno
233 232 # Python >= 2.5 returns ENOENT and adds winerror field
234 233 # EINVAL is raised if dir is not a directory.
235 234 if err.errno not in (3, errno.ENOENT, errno.EINVAL,
236 235 errno.ENOTDIR):
237 236 raise
238 237 dmap = {}
239 238 cache = dircache.setdefault(dir, dmap)
240 239 yield cache.get(base, None)
241 240
242 241 def username(uid=None):
243 242 """Return the name of the user with the given uid.
244 243
245 244 If uid is None, return the name of the current user."""
246 245 return None
247 246
248 247 def groupname(gid=None):
249 248 """Return the name of the group with the given gid.
250 249
251 250 If gid is None, return the name of the current group."""
252 251 return None
253 252
254 253 def _removedirs(name):
255 254 """special version of os.removedirs that does not remove symlinked
256 255 directories or junction points if they actually contain files"""
257 256 if osutil.listdir(name):
258 257 return
259 258 os.rmdir(name)
260 259 head, tail = os.path.split(name)
261 260 if not tail:
262 261 head, tail = os.path.split(head)
263 262 while head and tail:
264 263 try:
265 264 if osutil.listdir(head):
266 265 return
267 266 os.rmdir(head)
268 267 except (ValueError, OSError):
269 268 break
270 269 head, tail = os.path.split(head)
271 270
272 271 def unlinkpath(f):
273 272 """unlink and remove the directory if it is empty"""
274 273 unlink(f)
275 274 # try removing directories that might now be empty
276 275 try:
277 276 _removedirs(os.path.dirname(f))
278 277 except OSError:
279 278 pass
280 279
281 280 def rename(src, dst):
282 281 '''atomically rename file src to dst, replacing dst if it exists'''
283 282 try:
284 283 os.rename(src, dst)
285 284 except OSError, e:
286 285 if e.errno != errno.EEXIST:
287 286 raise
288 287 unlink(dst)
289 288 os.rename(src, dst)
290 289
291 290 def gethgcmd():
292 291 return [sys.executable] + sys.argv[:1]
293 292
294 293 def groupmembers(name):
295 294 # Don't support groups on Windows for now
296 295 raise KeyError
297 296
298 297 def isexec(f):
299 298 return False
300 299
301 300 class cachestat(object):
302 301 def __init__(self, path):
303 302 pass
304 303
305 304 def cacheable(self):
306 305 return False
307 306
308 307 def lookupreg(key, valname=None, scope=None):
309 308 ''' Look up a key/value name in the Windows registry.
310 309
311 310 valname: value name. If unspecified, the default value for the key
312 311 is used.
313 312 scope: optionally specify scope for registry lookup, this can be
314 313 a sequence of scopes to look up in order. Default (CURRENT_USER,
315 314 LOCAL_MACHINE).
316 315 '''
317 316 if scope is None:
318 317 scope = (_winreg.HKEY_CURRENT_USER, _winreg.HKEY_LOCAL_MACHINE)
319 318 elif not isinstance(scope, (list, tuple)):
320 319 scope = (scope,)
321 320 for s in scope:
322 321 try:
323 322 val = _winreg.QueryValueEx(_winreg.OpenKey(s, key), valname)[0]
324 323 # never let a Unicode string escape into the wild
325 324 return encoding.tolocal(val.encode('UTF-8'))
326 325 except EnvironmentError:
327 326 pass
328 327
329 328 expandglobs = True
General Comments 0
You need to be logged in to leave comments. Login now