##// END OF EJS Templates
Find right hg command for detached process...
Patrick Mezard -
r10239:8e4be44a default
parent child Browse files
Show More
@@ -1,478 +1,478 b''
1 # server.py - common entry point for inotify status server
1 # server.py - common entry point for inotify status server
2 #
2 #
3 # Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com>
3 # Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial import cmdutil, osutil, util
9 from mercurial import cmdutil, osutil, util
10 import common
10 import common
11
11
12 import errno
12 import errno
13 import os
13 import os
14 import socket
14 import socket
15 import stat
15 import stat
16 import struct
16 import struct
17 import sys
17 import sys
18 import tempfile
18 import tempfile
19
19
20 class AlreadyStartedException(Exception): pass
20 class AlreadyStartedException(Exception): pass
21
21
22 def join(a, b):
22 def join(a, b):
23 if a:
23 if a:
24 if a[-1] == '/':
24 if a[-1] == '/':
25 return a + b
25 return a + b
26 return a + '/' + b
26 return a + '/' + b
27 return b
27 return b
28
28
29 def split(path):
29 def split(path):
30 c = path.rfind('/')
30 c = path.rfind('/')
31 if c == -1:
31 if c == -1:
32 return '', path
32 return '', path
33 return path[:c], path[c+1:]
33 return path[:c], path[c+1:]
34
34
35 walk_ignored_errors = (errno.ENOENT, errno.ENAMETOOLONG)
35 walk_ignored_errors = (errno.ENOENT, errno.ENAMETOOLONG)
36
36
37 def walk(dirstate, absroot, root):
37 def walk(dirstate, absroot, root):
38 '''Like os.walk, but only yields regular files.'''
38 '''Like os.walk, but only yields regular files.'''
39
39
40 # This function is critical to performance during startup.
40 # This function is critical to performance during startup.
41
41
42 def walkit(root, reporoot):
42 def walkit(root, reporoot):
43 files, dirs = [], []
43 files, dirs = [], []
44
44
45 try:
45 try:
46 fullpath = join(absroot, root)
46 fullpath = join(absroot, root)
47 for name, kind in osutil.listdir(fullpath):
47 for name, kind in osutil.listdir(fullpath):
48 if kind == stat.S_IFDIR:
48 if kind == stat.S_IFDIR:
49 if name == '.hg':
49 if name == '.hg':
50 if not reporoot:
50 if not reporoot:
51 return
51 return
52 else:
52 else:
53 dirs.append(name)
53 dirs.append(name)
54 path = join(root, name)
54 path = join(root, name)
55 if dirstate._ignore(path):
55 if dirstate._ignore(path):
56 continue
56 continue
57 for result in walkit(path, False):
57 for result in walkit(path, False):
58 yield result
58 yield result
59 elif kind in (stat.S_IFREG, stat.S_IFLNK):
59 elif kind in (stat.S_IFREG, stat.S_IFLNK):
60 files.append(name)
60 files.append(name)
61 yield fullpath, dirs, files
61 yield fullpath, dirs, files
62
62
63 except OSError, err:
63 except OSError, err:
64 if err.errno == errno.ENOTDIR:
64 if err.errno == errno.ENOTDIR:
65 # fullpath was a directory, but has since been replaced
65 # fullpath was a directory, but has since been replaced
66 # by a file.
66 # by a file.
67 yield fullpath, dirs, files
67 yield fullpath, dirs, files
68 elif err.errno not in walk_ignored_errors:
68 elif err.errno not in walk_ignored_errors:
69 raise
69 raise
70
70
71 return walkit(root, root == '')
71 return walkit(root, root == '')
72
72
73 class directory(object):
73 class directory(object):
74 """
74 """
75 Representing a directory
75 Representing a directory
76
76
77 * path is the relative path from repo root to this directory
77 * path is the relative path from repo root to this directory
78 * files is a dict listing the files in this directory
78 * files is a dict listing the files in this directory
79 - keys are file names
79 - keys are file names
80 - values are file status
80 - values are file status
81 * dirs is a dict listing the subdirectories
81 * dirs is a dict listing the subdirectories
82 - key are subdirectories names
82 - key are subdirectories names
83 - values are directory objects
83 - values are directory objects
84 """
84 """
85 def __init__(self, relpath=''):
85 def __init__(self, relpath=''):
86 self.path = relpath
86 self.path = relpath
87 self.files = {}
87 self.files = {}
88 self.dirs = {}
88 self.dirs = {}
89
89
90 def dir(self, relpath):
90 def dir(self, relpath):
91 """
91 """
92 Returns the directory contained at the relative path relpath.
92 Returns the directory contained at the relative path relpath.
93 Creates the intermediate directories if necessary.
93 Creates the intermediate directories if necessary.
94 """
94 """
95 if not relpath:
95 if not relpath:
96 return self
96 return self
97 l = relpath.split('/')
97 l = relpath.split('/')
98 ret = self
98 ret = self
99 while l:
99 while l:
100 next = l.pop(0)
100 next = l.pop(0)
101 try:
101 try:
102 ret = ret.dirs[next]
102 ret = ret.dirs[next]
103 except KeyError:
103 except KeyError:
104 d = directory(join(ret.path, next))
104 d = directory(join(ret.path, next))
105 ret.dirs[next] = d
105 ret.dirs[next] = d
106 ret = d
106 ret = d
107 return ret
107 return ret
108
108
109 def walk(self, states, visited=None):
109 def walk(self, states, visited=None):
110 """
110 """
111 yield (filename, status) pairs for items in the trees
111 yield (filename, status) pairs for items in the trees
112 that have status in states.
112 that have status in states.
113 filenames are relative to the repo root
113 filenames are relative to the repo root
114 """
114 """
115 for file, st in self.files.iteritems():
115 for file, st in self.files.iteritems():
116 if st in states:
116 if st in states:
117 yield join(self.path, file), st
117 yield join(self.path, file), st
118 for dir in self.dirs.itervalues():
118 for dir in self.dirs.itervalues():
119 if visited is not None:
119 if visited is not None:
120 visited.add(dir.path)
120 visited.add(dir.path)
121 for e in dir.walk(states):
121 for e in dir.walk(states):
122 yield e
122 yield e
123
123
124 def lookup(self, states, path, visited):
124 def lookup(self, states, path, visited):
125 """
125 """
126 yield root-relative filenames that match path, and whose
126 yield root-relative filenames that match path, and whose
127 status are in states:
127 status are in states:
128 * if path is a file, yield path
128 * if path is a file, yield path
129 * if path is a directory, yield directory files
129 * if path is a directory, yield directory files
130 * if path is not tracked, yield nothing
130 * if path is not tracked, yield nothing
131 """
131 """
132 if path[-1] == '/':
132 if path[-1] == '/':
133 path = path[:-1]
133 path = path[:-1]
134
134
135 paths = path.split('/')
135 paths = path.split('/')
136
136
137 # we need to check separately for last node
137 # we need to check separately for last node
138 last = paths.pop()
138 last = paths.pop()
139
139
140 tree = self
140 tree = self
141 try:
141 try:
142 for dir in paths:
142 for dir in paths:
143 tree = tree.dirs[dir]
143 tree = tree.dirs[dir]
144 except KeyError:
144 except KeyError:
145 # path is not tracked
145 # path is not tracked
146 visited.add(tree.path)
146 visited.add(tree.path)
147 return
147 return
148
148
149 try:
149 try:
150 # if path is a directory, walk it
150 # if path is a directory, walk it
151 target = tree.dirs[last]
151 target = tree.dirs[last]
152 visited.add(target.path)
152 visited.add(target.path)
153 for file, st in target.walk(states, visited):
153 for file, st in target.walk(states, visited):
154 yield file
154 yield file
155 except KeyError:
155 except KeyError:
156 try:
156 try:
157 if tree.files[last] in states:
157 if tree.files[last] in states:
158 # path is a file
158 # path is a file
159 visited.add(tree.path)
159 visited.add(tree.path)
160 yield path
160 yield path
161 except KeyError:
161 except KeyError:
162 # path is not tracked
162 # path is not tracked
163 pass
163 pass
164
164
165 class repowatcher(object):
165 class repowatcher(object):
166 """
166 """
167 Watches inotify events
167 Watches inotify events
168 """
168 """
169 statuskeys = 'almr!?'
169 statuskeys = 'almr!?'
170
170
171 def __init__(self, ui, dirstate, root):
171 def __init__(self, ui, dirstate, root):
172 self.ui = ui
172 self.ui = ui
173 self.dirstate = dirstate
173 self.dirstate = dirstate
174
174
175 self.wprefix = join(root, '')
175 self.wprefix = join(root, '')
176 self.prefixlen = len(self.wprefix)
176 self.prefixlen = len(self.wprefix)
177
177
178 self.tree = directory()
178 self.tree = directory()
179 self.statcache = {}
179 self.statcache = {}
180 self.statustrees = dict([(s, directory()) for s in self.statuskeys])
180 self.statustrees = dict([(s, directory()) for s in self.statuskeys])
181
181
182 self.ds_info = self.dirstate_info()
182 self.ds_info = self.dirstate_info()
183
183
184 self.last_event = None
184 self.last_event = None
185
185
186
186
187 def handle_timeout(self):
187 def handle_timeout(self):
188 pass
188 pass
189
189
190 def dirstate_info(self):
190 def dirstate_info(self):
191 try:
191 try:
192 st = os.lstat(self.wprefix + '.hg/dirstate')
192 st = os.lstat(self.wprefix + '.hg/dirstate')
193 return st.st_mtime, st.st_ino
193 return st.st_mtime, st.st_ino
194 except OSError, err:
194 except OSError, err:
195 if err.errno != errno.ENOENT:
195 if err.errno != errno.ENOENT:
196 raise
196 raise
197 return 0, 0
197 return 0, 0
198
198
199 def filestatus(self, fn, st):
199 def filestatus(self, fn, st):
200 try:
200 try:
201 type_, mode, size, time = self.dirstate._map[fn][:4]
201 type_, mode, size, time = self.dirstate._map[fn][:4]
202 except KeyError:
202 except KeyError:
203 type_ = '?'
203 type_ = '?'
204 if type_ == 'n':
204 if type_ == 'n':
205 st_mode, st_size, st_mtime = st
205 st_mode, st_size, st_mtime = st
206 if size == -1:
206 if size == -1:
207 return 'l'
207 return 'l'
208 if size and (size != st_size or (mode ^ st_mode) & 0100):
208 if size and (size != st_size or (mode ^ st_mode) & 0100):
209 return 'm'
209 return 'm'
210 if time != int(st_mtime):
210 if time != int(st_mtime):
211 return 'l'
211 return 'l'
212 return 'n'
212 return 'n'
213 if type_ == '?' and self.dirstate._ignore(fn):
213 if type_ == '?' and self.dirstate._ignore(fn):
214 return 'i'
214 return 'i'
215 return type_
215 return type_
216
216
217 def updatefile(self, wfn, osstat):
217 def updatefile(self, wfn, osstat):
218 '''
218 '''
219 update the file entry of an existing file.
219 update the file entry of an existing file.
220
220
221 osstat: (mode, size, time) tuple, as returned by os.lstat(wfn)
221 osstat: (mode, size, time) tuple, as returned by os.lstat(wfn)
222 '''
222 '''
223
223
224 self._updatestatus(wfn, self.filestatus(wfn, osstat))
224 self._updatestatus(wfn, self.filestatus(wfn, osstat))
225
225
226 def deletefile(self, wfn, oldstatus):
226 def deletefile(self, wfn, oldstatus):
227 '''
227 '''
228 update the entry of a file which has been deleted.
228 update the entry of a file which has been deleted.
229
229
230 oldstatus: char in statuskeys, status of the file before deletion
230 oldstatus: char in statuskeys, status of the file before deletion
231 '''
231 '''
232 if oldstatus == 'r':
232 if oldstatus == 'r':
233 newstatus = 'r'
233 newstatus = 'r'
234 elif oldstatus in 'almn':
234 elif oldstatus in 'almn':
235 newstatus = '!'
235 newstatus = '!'
236 else:
236 else:
237 newstatus = None
237 newstatus = None
238
238
239 self.statcache.pop(wfn, None)
239 self.statcache.pop(wfn, None)
240 self._updatestatus(wfn, newstatus)
240 self._updatestatus(wfn, newstatus)
241
241
242 def _updatestatus(self, wfn, newstatus):
242 def _updatestatus(self, wfn, newstatus):
243 '''
243 '''
244 Update the stored status of a file.
244 Update the stored status of a file.
245
245
246 newstatus: - char in (statuskeys + 'ni'), new status to apply.
246 newstatus: - char in (statuskeys + 'ni'), new status to apply.
247 - or None, to stop tracking wfn
247 - or None, to stop tracking wfn
248 '''
248 '''
249 root, fn = split(wfn)
249 root, fn = split(wfn)
250 d = self.tree.dir(root)
250 d = self.tree.dir(root)
251
251
252 oldstatus = d.files.get(fn)
252 oldstatus = d.files.get(fn)
253 # oldstatus can be either:
253 # oldstatus can be either:
254 # - None : fn is new
254 # - None : fn is new
255 # - a char in statuskeys: fn is a (tracked) file
255 # - a char in statuskeys: fn is a (tracked) file
256
256
257 if self.ui.debugflag and oldstatus != newstatus:
257 if self.ui.debugflag and oldstatus != newstatus:
258 self.ui.note(_('status: %r %s -> %s\n') %
258 self.ui.note(_('status: %r %s -> %s\n') %
259 (wfn, oldstatus, newstatus))
259 (wfn, oldstatus, newstatus))
260
260
261 if oldstatus and oldstatus in self.statuskeys \
261 if oldstatus and oldstatus in self.statuskeys \
262 and oldstatus != newstatus:
262 and oldstatus != newstatus:
263 del self.statustrees[oldstatus].dir(root).files[fn]
263 del self.statustrees[oldstatus].dir(root).files[fn]
264
264
265 if newstatus in (None, 'i'):
265 if newstatus in (None, 'i'):
266 d.files.pop(fn, None)
266 d.files.pop(fn, None)
267 elif oldstatus != newstatus:
267 elif oldstatus != newstatus:
268 d.files[fn] = newstatus
268 d.files[fn] = newstatus
269 if newstatus != 'n':
269 if newstatus != 'n':
270 self.statustrees[newstatus].dir(root).files[fn] = newstatus
270 self.statustrees[newstatus].dir(root).files[fn] = newstatus
271
271
272 def check_deleted(self, key):
272 def check_deleted(self, key):
273 # Files that had been deleted but were present in the dirstate
273 # Files that had been deleted but were present in the dirstate
274 # may have vanished from the dirstate; we must clean them up.
274 # may have vanished from the dirstate; we must clean them up.
275 nuke = []
275 nuke = []
276 for wfn, ignore in self.statustrees[key].walk(key):
276 for wfn, ignore in self.statustrees[key].walk(key):
277 if wfn not in self.dirstate:
277 if wfn not in self.dirstate:
278 nuke.append(wfn)
278 nuke.append(wfn)
279 for wfn in nuke:
279 for wfn in nuke:
280 root, fn = split(wfn)
280 root, fn = split(wfn)
281 del self.statustrees[key].dir(root).files[fn]
281 del self.statustrees[key].dir(root).files[fn]
282 del self.tree.dir(root).files[fn]
282 del self.tree.dir(root).files[fn]
283
283
284 def update_hgignore(self):
284 def update_hgignore(self):
285 # An update of the ignore file can potentially change the
285 # An update of the ignore file can potentially change the
286 # states of all unknown and ignored files.
286 # states of all unknown and ignored files.
287
287
288 # XXX If the user has other ignore files outside the repo, or
288 # XXX If the user has other ignore files outside the repo, or
289 # changes their list of ignore files at run time, we'll
289 # changes their list of ignore files at run time, we'll
290 # potentially never see changes to them. We could get the
290 # potentially never see changes to them. We could get the
291 # client to report to us what ignore data they're using.
291 # client to report to us what ignore data they're using.
292 # But it's easier to do nothing than to open that can of
292 # But it's easier to do nothing than to open that can of
293 # worms.
293 # worms.
294
294
295 if '_ignore' in self.dirstate.__dict__:
295 if '_ignore' in self.dirstate.__dict__:
296 delattr(self.dirstate, '_ignore')
296 delattr(self.dirstate, '_ignore')
297 self.ui.note(_('rescanning due to .hgignore change\n'))
297 self.ui.note(_('rescanning due to .hgignore change\n'))
298 self.handle_timeout()
298 self.handle_timeout()
299 self.scan()
299 self.scan()
300
300
301 def getstat(self, wpath):
301 def getstat(self, wpath):
302 try:
302 try:
303 return self.statcache[wpath]
303 return self.statcache[wpath]
304 except KeyError:
304 except KeyError:
305 try:
305 try:
306 return self.stat(wpath)
306 return self.stat(wpath)
307 except OSError, err:
307 except OSError, err:
308 if err.errno != errno.ENOENT:
308 if err.errno != errno.ENOENT:
309 raise
309 raise
310
310
311 def stat(self, wpath):
311 def stat(self, wpath):
312 try:
312 try:
313 st = os.lstat(join(self.wprefix, wpath))
313 st = os.lstat(join(self.wprefix, wpath))
314 ret = st.st_mode, st.st_size, st.st_mtime
314 ret = st.st_mode, st.st_size, st.st_mtime
315 self.statcache[wpath] = ret
315 self.statcache[wpath] = ret
316 return ret
316 return ret
317 except OSError:
317 except OSError:
318 self.statcache.pop(wpath, None)
318 self.statcache.pop(wpath, None)
319 raise
319 raise
320
320
321 class socketlistener(object):
321 class socketlistener(object):
322 """
322 """
323 Listens for client queries on unix socket inotify.sock
323 Listens for client queries on unix socket inotify.sock
324 """
324 """
325 def __init__(self, ui, root, repowatcher, timeout):
325 def __init__(self, ui, root, repowatcher, timeout):
326 self.ui = ui
326 self.ui = ui
327 self.repowatcher = repowatcher
327 self.repowatcher = repowatcher
328 self.sock = socket.socket(socket.AF_UNIX)
328 self.sock = socket.socket(socket.AF_UNIX)
329 self.sockpath = join(root, '.hg/inotify.sock')
329 self.sockpath = join(root, '.hg/inotify.sock')
330 self.realsockpath = None
330 self.realsockpath = None
331 try:
331 try:
332 self.sock.bind(self.sockpath)
332 self.sock.bind(self.sockpath)
333 except socket.error, err:
333 except socket.error, err:
334 if err[0] == errno.EADDRINUSE:
334 if err[0] == errno.EADDRINUSE:
335 raise AlreadyStartedException( _('cannot start: socket is '
335 raise AlreadyStartedException( _('cannot start: socket is '
336 'already bound'))
336 'already bound'))
337 if err[0] == "AF_UNIX path too long":
337 if err[0] == "AF_UNIX path too long":
338 if os.path.islink(self.sockpath) and \
338 if os.path.islink(self.sockpath) and \
339 not os.path.exists(self.sockpath):
339 not os.path.exists(self.sockpath):
340 raise util.Abort('inotify-server: cannot start: '
340 raise util.Abort('inotify-server: cannot start: '
341 '.hg/inotify.sock is a broken symlink')
341 '.hg/inotify.sock is a broken symlink')
342 tempdir = tempfile.mkdtemp(prefix="hg-inotify-")
342 tempdir = tempfile.mkdtemp(prefix="hg-inotify-")
343 self.realsockpath = os.path.join(tempdir, "inotify.sock")
343 self.realsockpath = os.path.join(tempdir, "inotify.sock")
344 try:
344 try:
345 self.sock.bind(self.realsockpath)
345 self.sock.bind(self.realsockpath)
346 os.symlink(self.realsockpath, self.sockpath)
346 os.symlink(self.realsockpath, self.sockpath)
347 except (OSError, socket.error), inst:
347 except (OSError, socket.error), inst:
348 try:
348 try:
349 os.unlink(self.realsockpath)
349 os.unlink(self.realsockpath)
350 except:
350 except:
351 pass
351 pass
352 os.rmdir(tempdir)
352 os.rmdir(tempdir)
353 if inst.errno == errno.EEXIST:
353 if inst.errno == errno.EEXIST:
354 raise AlreadyStartedException(_('cannot start: tried '
354 raise AlreadyStartedException(_('cannot start: tried '
355 'linking .hg/inotify.sock to a temporary socket but'
355 'linking .hg/inotify.sock to a temporary socket but'
356 ' .hg/inotify.sock already exists'))
356 ' .hg/inotify.sock already exists'))
357 raise
357 raise
358 else:
358 else:
359 raise
359 raise
360 self.sock.listen(5)
360 self.sock.listen(5)
361 self.fileno = self.sock.fileno
361 self.fileno = self.sock.fileno
362
362
363 def answer_stat_query(self, cs):
363 def answer_stat_query(self, cs):
364 names = cs.read().split('\0')
364 names = cs.read().split('\0')
365
365
366 states = names.pop()
366 states = names.pop()
367
367
368 self.ui.note(_('answering query for %r\n') % states)
368 self.ui.note(_('answering query for %r\n') % states)
369
369
370 visited = set()
370 visited = set()
371 if not names:
371 if not names:
372 def genresult(states, tree):
372 def genresult(states, tree):
373 for fn, state in tree.walk(states):
373 for fn, state in tree.walk(states):
374 yield fn
374 yield fn
375 else:
375 else:
376 def genresult(states, tree):
376 def genresult(states, tree):
377 for fn in names:
377 for fn in names:
378 for f in tree.lookup(states, fn, visited):
378 for f in tree.lookup(states, fn, visited):
379 yield f
379 yield f
380
380
381 return ['\0'.join(r) for r in [
381 return ['\0'.join(r) for r in [
382 genresult('l', self.repowatcher.statustrees['l']),
382 genresult('l', self.repowatcher.statustrees['l']),
383 genresult('m', self.repowatcher.statustrees['m']),
383 genresult('m', self.repowatcher.statustrees['m']),
384 genresult('a', self.repowatcher.statustrees['a']),
384 genresult('a', self.repowatcher.statustrees['a']),
385 genresult('r', self.repowatcher.statustrees['r']),
385 genresult('r', self.repowatcher.statustrees['r']),
386 genresult('!', self.repowatcher.statustrees['!']),
386 genresult('!', self.repowatcher.statustrees['!']),
387 '?' in states
387 '?' in states
388 and genresult('?', self.repowatcher.statustrees['?'])
388 and genresult('?', self.repowatcher.statustrees['?'])
389 or [],
389 or [],
390 [],
390 [],
391 'c' in states and genresult('n', self.repowatcher.tree) or [],
391 'c' in states and genresult('n', self.repowatcher.tree) or [],
392 visited
392 visited
393 ]]
393 ]]
394
394
395 def answer_dbug_query(self):
395 def answer_dbug_query(self):
396 return ['\0'.join(self.repowatcher.debug())]
396 return ['\0'.join(self.repowatcher.debug())]
397
397
398 def accept_connection(self):
398 def accept_connection(self):
399 sock, addr = self.sock.accept()
399 sock, addr = self.sock.accept()
400
400
401 cs = common.recvcs(sock)
401 cs = common.recvcs(sock)
402 version = ord(cs.read(1))
402 version = ord(cs.read(1))
403
403
404 if version != common.version:
404 if version != common.version:
405 self.ui.warn(_('received query from incompatible client '
405 self.ui.warn(_('received query from incompatible client '
406 'version %d\n') % version)
406 'version %d\n') % version)
407 try:
407 try:
408 # try to send back our version to the client
408 # try to send back our version to the client
409 # this way, the client too is informed of the mismatch
409 # this way, the client too is informed of the mismatch
410 sock.sendall(chr(common.version))
410 sock.sendall(chr(common.version))
411 except:
411 except:
412 pass
412 pass
413 return
413 return
414
414
415 type = cs.read(4)
415 type = cs.read(4)
416
416
417 if type == 'STAT':
417 if type == 'STAT':
418 results = self.answer_stat_query(cs)
418 results = self.answer_stat_query(cs)
419 elif type == 'DBUG':
419 elif type == 'DBUG':
420 results = self.answer_dbug_query()
420 results = self.answer_dbug_query()
421 else:
421 else:
422 self.ui.warn(_('unrecognized query type: %s\n') % type)
422 self.ui.warn(_('unrecognized query type: %s\n') % type)
423 return
423 return
424
424
425 try:
425 try:
426 try:
426 try:
427 v = chr(common.version)
427 v = chr(common.version)
428
428
429 sock.sendall(v + type + struct.pack(common.resphdrfmts[type],
429 sock.sendall(v + type + struct.pack(common.resphdrfmts[type],
430 *map(len, results)))
430 *map(len, results)))
431 sock.sendall(''.join(results))
431 sock.sendall(''.join(results))
432 finally:
432 finally:
433 sock.shutdown(socket.SHUT_WR)
433 sock.shutdown(socket.SHUT_WR)
434 except socket.error, err:
434 except socket.error, err:
435 if err[0] != errno.EPIPE:
435 if err[0] != errno.EPIPE:
436 raise
436 raise
437
437
438 if sys.platform == 'linux2':
438 if sys.platform == 'linux2':
439 import linuxserver as _server
439 import linuxserver as _server
440 else:
440 else:
441 raise ImportError
441 raise ImportError
442
442
443 master = _server.master
443 master = _server.master
444
444
445 def start(ui, dirstate, root, opts):
445 def start(ui, dirstate, root, opts):
446 timeout = opts.get('timeout')
446 timeout = opts.get('timeout')
447 if timeout:
447 if timeout:
448 timeout = float(timeout) * 1e3
448 timeout = float(timeout) * 1e3
449
449
450 class service(object):
450 class service(object):
451 def init(self):
451 def init(self):
452 try:
452 try:
453 self.master = master(ui, dirstate, root, timeout)
453 self.master = master(ui, dirstate, root, timeout)
454 except AlreadyStartedException, inst:
454 except AlreadyStartedException, inst:
455 raise util.Abort("inotify-server: %s" % inst)
455 raise util.Abort("inotify-server: %s" % inst)
456
456
457 def run(self):
457 def run(self):
458 try:
458 try:
459 self.master.run()
459 self.master.run()
460 finally:
460 finally:
461 self.master.shutdown()
461 self.master.shutdown()
462
462
463 if 'inserve' not in sys.argv:
463 if 'inserve' not in sys.argv:
464 runargs = [sys.argv[0], 'inserve', '-R', root]
464 runargs = util.hgcmd() + ['inserve', '-R', root]
465 else:
465 else:
466 runargs = sys.argv[:]
466 runargs = util.hgcmd() + sys.argv[1:]
467
467
468 pidfile = ui.config('inotify', 'pidfile')
468 pidfile = ui.config('inotify', 'pidfile')
469 if opts['daemon'] and pidfile is not None and 'pid-file' not in runargs:
469 if opts['daemon'] and pidfile is not None and 'pid-file' not in runargs:
470 runargs.append("--pid-file=%s" % pidfile)
470 runargs.append("--pid-file=%s" % pidfile)
471
471
472 service = service()
472 service = service()
473 logfile = ui.config('inotify', 'log')
473 logfile = ui.config('inotify', 'log')
474
474
475 appendpid = ui.configbool('inotify', 'appendpid', False)
475 appendpid = ui.configbool('inotify', 'appendpid', False)
476
476
477 cmdutil.service(opts, initfn=service.init, runfn=service.run,
477 cmdutil.service(opts, initfn=service.init, runfn=service.run,
478 logfile=logfile, runargs=runargs, appendpid=appendpid)
478 logfile=logfile, runargs=runargs, appendpid=appendpid)
@@ -1,1172 +1,1172 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, glob, tempfile, time
10 import os, sys, errno, re, glob, tempfile, time
11 import mdiff, bdiff, util, templater, patch, error, encoding, templatekw
11 import mdiff, bdiff, util, templater, patch, error, encoding, templatekw
12 import match as _match
12 import match as _match
13
13
14 revrangesep = ':'
14 revrangesep = ':'
15
15
16 def findpossible(cmd, table, strict=False):
16 def findpossible(cmd, table, strict=False):
17 """
17 """
18 Return cmd -> (aliases, command table entry)
18 Return cmd -> (aliases, command table entry)
19 for each matching command.
19 for each matching command.
20 Return debug commands (or their aliases) only if no normal command matches.
20 Return debug commands (or their aliases) only if no normal command matches.
21 """
21 """
22 choice = {}
22 choice = {}
23 debugchoice = {}
23 debugchoice = {}
24 for e in table.keys():
24 for e in table.keys():
25 aliases = e.lstrip("^").split("|")
25 aliases = e.lstrip("^").split("|")
26 found = None
26 found = None
27 if cmd in aliases:
27 if cmd in aliases:
28 found = cmd
28 found = cmd
29 elif not strict:
29 elif not strict:
30 for a in aliases:
30 for a in aliases:
31 if a.startswith(cmd):
31 if a.startswith(cmd):
32 found = a
32 found = a
33 break
33 break
34 if found is not None:
34 if found is not None:
35 if aliases[0].startswith("debug") or found.startswith("debug"):
35 if aliases[0].startswith("debug") or found.startswith("debug"):
36 debugchoice[found] = (aliases, table[e])
36 debugchoice[found] = (aliases, table[e])
37 else:
37 else:
38 choice[found] = (aliases, table[e])
38 choice[found] = (aliases, table[e])
39
39
40 if not choice and debugchoice:
40 if not choice and debugchoice:
41 choice = debugchoice
41 choice = debugchoice
42
42
43 return choice
43 return choice
44
44
45 def findcmd(cmd, table, strict=True):
45 def findcmd(cmd, table, strict=True):
46 """Return (aliases, command table entry) for command string."""
46 """Return (aliases, command table entry) for command string."""
47 choice = findpossible(cmd, table, strict)
47 choice = findpossible(cmd, table, strict)
48
48
49 if cmd in choice:
49 if cmd in choice:
50 return choice[cmd]
50 return choice[cmd]
51
51
52 if len(choice) > 1:
52 if len(choice) > 1:
53 clist = choice.keys()
53 clist = choice.keys()
54 clist.sort()
54 clist.sort()
55 raise error.AmbiguousCommand(cmd, clist)
55 raise error.AmbiguousCommand(cmd, clist)
56
56
57 if choice:
57 if choice:
58 return choice.values()[0]
58 return choice.values()[0]
59
59
60 raise error.UnknownCommand(cmd)
60 raise error.UnknownCommand(cmd)
61
61
62 def bail_if_changed(repo):
62 def bail_if_changed(repo):
63 if repo.dirstate.parents()[1] != nullid:
63 if repo.dirstate.parents()[1] != nullid:
64 raise util.Abort(_('outstanding uncommitted merge'))
64 raise util.Abort(_('outstanding uncommitted merge'))
65 modified, added, removed, deleted = repo.status()[:4]
65 modified, added, removed, deleted = repo.status()[:4]
66 if modified or added or removed or deleted:
66 if modified or added or removed or deleted:
67 raise util.Abort(_("outstanding uncommitted changes"))
67 raise util.Abort(_("outstanding uncommitted changes"))
68
68
69 def logmessage(opts):
69 def logmessage(opts):
70 """ get the log message according to -m and -l option """
70 """ get the log message according to -m and -l option """
71 message = opts.get('message')
71 message = opts.get('message')
72 logfile = opts.get('logfile')
72 logfile = opts.get('logfile')
73
73
74 if message and logfile:
74 if message and logfile:
75 raise util.Abort(_('options --message and --logfile are mutually '
75 raise util.Abort(_('options --message and --logfile are mutually '
76 'exclusive'))
76 'exclusive'))
77 if not message and logfile:
77 if not message and logfile:
78 try:
78 try:
79 if logfile == '-':
79 if logfile == '-':
80 message = sys.stdin.read()
80 message = sys.stdin.read()
81 else:
81 else:
82 message = open(logfile).read()
82 message = open(logfile).read()
83 except IOError, inst:
83 except IOError, inst:
84 raise util.Abort(_("can't read commit message '%s': %s") %
84 raise util.Abort(_("can't read commit message '%s': %s") %
85 (logfile, inst.strerror))
85 (logfile, inst.strerror))
86 return message
86 return message
87
87
88 def loglimit(opts):
88 def loglimit(opts):
89 """get the log limit according to option -l/--limit"""
89 """get the log limit according to option -l/--limit"""
90 limit = opts.get('limit')
90 limit = opts.get('limit')
91 if limit:
91 if limit:
92 try:
92 try:
93 limit = int(limit)
93 limit = int(limit)
94 except ValueError:
94 except ValueError:
95 raise util.Abort(_('limit must be a positive integer'))
95 raise util.Abort(_('limit must be a positive integer'))
96 if limit <= 0: raise util.Abort(_('limit must be positive'))
96 if limit <= 0: raise util.Abort(_('limit must be positive'))
97 else:
97 else:
98 limit = None
98 limit = None
99 return limit
99 return limit
100
100
101 def remoteui(src, opts):
101 def remoteui(src, opts):
102 'build a remote ui from ui or repo and opts'
102 'build a remote ui from ui or repo and opts'
103 if hasattr(src, 'baseui'): # looks like a repository
103 if hasattr(src, 'baseui'): # looks like a repository
104 dst = src.baseui.copy() # drop repo-specific config
104 dst = src.baseui.copy() # drop repo-specific config
105 src = src.ui # copy target options from repo
105 src = src.ui # copy target options from repo
106 else: # assume it's a global ui object
106 else: # assume it's a global ui object
107 dst = src.copy() # keep all global options
107 dst = src.copy() # keep all global options
108
108
109 # copy ssh-specific options
109 # copy ssh-specific options
110 for o in 'ssh', 'remotecmd':
110 for o in 'ssh', 'remotecmd':
111 v = opts.get(o) or src.config('ui', o)
111 v = opts.get(o) or src.config('ui', o)
112 if v:
112 if v:
113 dst.setconfig("ui", o, v)
113 dst.setconfig("ui", o, v)
114
114
115 # copy bundle-specific options
115 # copy bundle-specific options
116 r = src.config('bundle', 'mainreporoot')
116 r = src.config('bundle', 'mainreporoot')
117 if r:
117 if r:
118 dst.setconfig('bundle', 'mainreporoot', r)
118 dst.setconfig('bundle', 'mainreporoot', r)
119
119
120 # copy auth section settings
120 # copy auth section settings
121 for key, val in src.configitems('auth'):
121 for key, val in src.configitems('auth'):
122 dst.setconfig('auth', key, val)
122 dst.setconfig('auth', key, val)
123
123
124 return dst
124 return dst
125
125
126 def revpair(repo, revs):
126 def revpair(repo, revs):
127 '''return pair of nodes, given list of revisions. second item can
127 '''return pair of nodes, given list of revisions. second item can
128 be None, meaning use working dir.'''
128 be None, meaning use working dir.'''
129
129
130 def revfix(repo, val, defval):
130 def revfix(repo, val, defval):
131 if not val and val != 0 and defval is not None:
131 if not val and val != 0 and defval is not None:
132 val = defval
132 val = defval
133 return repo.lookup(val)
133 return repo.lookup(val)
134
134
135 if not revs:
135 if not revs:
136 return repo.dirstate.parents()[0], None
136 return repo.dirstate.parents()[0], None
137 end = None
137 end = None
138 if len(revs) == 1:
138 if len(revs) == 1:
139 if revrangesep in revs[0]:
139 if revrangesep in revs[0]:
140 start, end = revs[0].split(revrangesep, 1)
140 start, end = revs[0].split(revrangesep, 1)
141 start = revfix(repo, start, 0)
141 start = revfix(repo, start, 0)
142 end = revfix(repo, end, len(repo) - 1)
142 end = revfix(repo, end, len(repo) - 1)
143 else:
143 else:
144 start = revfix(repo, revs[0], None)
144 start = revfix(repo, revs[0], None)
145 elif len(revs) == 2:
145 elif len(revs) == 2:
146 if revrangesep in revs[0] or revrangesep in revs[1]:
146 if revrangesep in revs[0] or revrangesep in revs[1]:
147 raise util.Abort(_('too many revisions specified'))
147 raise util.Abort(_('too many revisions specified'))
148 start = revfix(repo, revs[0], None)
148 start = revfix(repo, revs[0], None)
149 end = revfix(repo, revs[1], None)
149 end = revfix(repo, revs[1], None)
150 else:
150 else:
151 raise util.Abort(_('too many revisions specified'))
151 raise util.Abort(_('too many revisions specified'))
152 return start, end
152 return start, end
153
153
154 def revrange(repo, revs):
154 def revrange(repo, revs):
155 """Yield revision as strings from a list of revision specifications."""
155 """Yield revision as strings from a list of revision specifications."""
156
156
157 def revfix(repo, val, defval):
157 def revfix(repo, val, defval):
158 if not val and val != 0 and defval is not None:
158 if not val and val != 0 and defval is not None:
159 return defval
159 return defval
160 return repo.changelog.rev(repo.lookup(val))
160 return repo.changelog.rev(repo.lookup(val))
161
161
162 seen, l = set(), []
162 seen, l = set(), []
163 for spec in revs:
163 for spec in revs:
164 if revrangesep in spec:
164 if revrangesep in spec:
165 start, end = spec.split(revrangesep, 1)
165 start, end = spec.split(revrangesep, 1)
166 start = revfix(repo, start, 0)
166 start = revfix(repo, start, 0)
167 end = revfix(repo, end, len(repo) - 1)
167 end = revfix(repo, end, len(repo) - 1)
168 step = start > end and -1 or 1
168 step = start > end and -1 or 1
169 for rev in xrange(start, end+step, step):
169 for rev in xrange(start, end+step, step):
170 if rev in seen:
170 if rev in seen:
171 continue
171 continue
172 seen.add(rev)
172 seen.add(rev)
173 l.append(rev)
173 l.append(rev)
174 else:
174 else:
175 rev = revfix(repo, spec, None)
175 rev = revfix(repo, spec, None)
176 if rev in seen:
176 if rev in seen:
177 continue
177 continue
178 seen.add(rev)
178 seen.add(rev)
179 l.append(rev)
179 l.append(rev)
180
180
181 return l
181 return l
182
182
183 def make_filename(repo, pat, node,
183 def make_filename(repo, pat, node,
184 total=None, seqno=None, revwidth=None, pathname=None):
184 total=None, seqno=None, revwidth=None, pathname=None):
185 node_expander = {
185 node_expander = {
186 'H': lambda: hex(node),
186 'H': lambda: hex(node),
187 'R': lambda: str(repo.changelog.rev(node)),
187 'R': lambda: str(repo.changelog.rev(node)),
188 'h': lambda: short(node),
188 'h': lambda: short(node),
189 }
189 }
190 expander = {
190 expander = {
191 '%': lambda: '%',
191 '%': lambda: '%',
192 'b': lambda: os.path.basename(repo.root),
192 'b': lambda: os.path.basename(repo.root),
193 }
193 }
194
194
195 try:
195 try:
196 if node:
196 if node:
197 expander.update(node_expander)
197 expander.update(node_expander)
198 if node:
198 if node:
199 expander['r'] = (lambda:
199 expander['r'] = (lambda:
200 str(repo.changelog.rev(node)).zfill(revwidth or 0))
200 str(repo.changelog.rev(node)).zfill(revwidth or 0))
201 if total is not None:
201 if total is not None:
202 expander['N'] = lambda: str(total)
202 expander['N'] = lambda: str(total)
203 if seqno is not None:
203 if seqno is not None:
204 expander['n'] = lambda: str(seqno)
204 expander['n'] = lambda: str(seqno)
205 if total is not None and seqno is not None:
205 if total is not None and seqno is not None:
206 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
206 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
207 if pathname is not None:
207 if pathname is not None:
208 expander['s'] = lambda: os.path.basename(pathname)
208 expander['s'] = lambda: os.path.basename(pathname)
209 expander['d'] = lambda: os.path.dirname(pathname) or '.'
209 expander['d'] = lambda: os.path.dirname(pathname) or '.'
210 expander['p'] = lambda: pathname
210 expander['p'] = lambda: pathname
211
211
212 newname = []
212 newname = []
213 patlen = len(pat)
213 patlen = len(pat)
214 i = 0
214 i = 0
215 while i < patlen:
215 while i < patlen:
216 c = pat[i]
216 c = pat[i]
217 if c == '%':
217 if c == '%':
218 i += 1
218 i += 1
219 c = pat[i]
219 c = pat[i]
220 c = expander[c]()
220 c = expander[c]()
221 newname.append(c)
221 newname.append(c)
222 i += 1
222 i += 1
223 return ''.join(newname)
223 return ''.join(newname)
224 except KeyError, inst:
224 except KeyError, inst:
225 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
225 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
226 inst.args[0])
226 inst.args[0])
227
227
228 def make_file(repo, pat, node=None,
228 def make_file(repo, pat, node=None,
229 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
229 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
230
230
231 writable = 'w' in mode or 'a' in mode
231 writable = 'w' in mode or 'a' in mode
232
232
233 if not pat or pat == '-':
233 if not pat or pat == '-':
234 return writable and sys.stdout or sys.stdin
234 return writable and sys.stdout or sys.stdin
235 if hasattr(pat, 'write') and writable:
235 if hasattr(pat, 'write') and writable:
236 return pat
236 return pat
237 if hasattr(pat, 'read') and 'r' in mode:
237 if hasattr(pat, 'read') and 'r' in mode:
238 return pat
238 return pat
239 return open(make_filename(repo, pat, node, total, seqno, revwidth,
239 return open(make_filename(repo, pat, node, total, seqno, revwidth,
240 pathname),
240 pathname),
241 mode)
241 mode)
242
242
243 def expandpats(pats):
243 def expandpats(pats):
244 if not util.expandglobs:
244 if not util.expandglobs:
245 return list(pats)
245 return list(pats)
246 ret = []
246 ret = []
247 for p in pats:
247 for p in pats:
248 kind, name = _match._patsplit(p, None)
248 kind, name = _match._patsplit(p, None)
249 if kind is None:
249 if kind is None:
250 try:
250 try:
251 globbed = glob.glob(name)
251 globbed = glob.glob(name)
252 except re.error:
252 except re.error:
253 globbed = [name]
253 globbed = [name]
254 if globbed:
254 if globbed:
255 ret.extend(globbed)
255 ret.extend(globbed)
256 continue
256 continue
257 ret.append(p)
257 ret.append(p)
258 return ret
258 return ret
259
259
260 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
260 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
261 if not globbed and default == 'relpath':
261 if not globbed and default == 'relpath':
262 pats = expandpats(pats or [])
262 pats = expandpats(pats or [])
263 m = _match.match(repo.root, repo.getcwd(), pats,
263 m = _match.match(repo.root, repo.getcwd(), pats,
264 opts.get('include'), opts.get('exclude'), default)
264 opts.get('include'), opts.get('exclude'), default)
265 def badfn(f, msg):
265 def badfn(f, msg):
266 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
266 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
267 m.bad = badfn
267 m.bad = badfn
268 return m
268 return m
269
269
270 def matchall(repo):
270 def matchall(repo):
271 return _match.always(repo.root, repo.getcwd())
271 return _match.always(repo.root, repo.getcwd())
272
272
273 def matchfiles(repo, files):
273 def matchfiles(repo, files):
274 return _match.exact(repo.root, repo.getcwd(), files)
274 return _match.exact(repo.root, repo.getcwd(), files)
275
275
276 def findrenames(repo, added, removed, threshold):
276 def findrenames(repo, added, removed, threshold):
277 '''find renamed files -- yields (before, after, score) tuples'''
277 '''find renamed files -- yields (before, after, score) tuples'''
278 copies = {}
278 copies = {}
279 ctx = repo['.']
279 ctx = repo['.']
280 for r in removed:
280 for r in removed:
281 if r not in ctx:
281 if r not in ctx:
282 continue
282 continue
283 fctx = ctx.filectx(r)
283 fctx = ctx.filectx(r)
284
284
285 def score(text):
285 def score(text):
286 if not len(text):
286 if not len(text):
287 return 0.0
287 return 0.0
288 if not fctx.cmp(text):
288 if not fctx.cmp(text):
289 return 1.0
289 return 1.0
290 if threshold == 1.0:
290 if threshold == 1.0:
291 return 0.0
291 return 0.0
292 orig = fctx.data()
292 orig = fctx.data()
293 # bdiff.blocks() returns blocks of matching lines
293 # bdiff.blocks() returns blocks of matching lines
294 # count the number of bytes in each
294 # count the number of bytes in each
295 equal = 0
295 equal = 0
296 alines = mdiff.splitnewlines(text)
296 alines = mdiff.splitnewlines(text)
297 matches = bdiff.blocks(text, orig)
297 matches = bdiff.blocks(text, orig)
298 for x1, x2, y1, y2 in matches:
298 for x1, x2, y1, y2 in matches:
299 for line in alines[x1:x2]:
299 for line in alines[x1:x2]:
300 equal += len(line)
300 equal += len(line)
301
301
302 lengths = len(text) + len(orig)
302 lengths = len(text) + len(orig)
303 return equal * 2.0 / lengths
303 return equal * 2.0 / lengths
304
304
305 for a in added:
305 for a in added:
306 bestscore = copies.get(a, (None, threshold))[1]
306 bestscore = copies.get(a, (None, threshold))[1]
307 myscore = score(repo.wread(a))
307 myscore = score(repo.wread(a))
308 if myscore >= bestscore:
308 if myscore >= bestscore:
309 copies[a] = (r, myscore)
309 copies[a] = (r, myscore)
310
310
311 for dest, v in copies.iteritems():
311 for dest, v in copies.iteritems():
312 source, score = v
312 source, score = v
313 yield source, dest, score
313 yield source, dest, score
314
314
315 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
315 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
316 if dry_run is None:
316 if dry_run is None:
317 dry_run = opts.get('dry_run')
317 dry_run = opts.get('dry_run')
318 if similarity is None:
318 if similarity is None:
319 similarity = float(opts.get('similarity') or 0)
319 similarity = float(opts.get('similarity') or 0)
320 # we'd use status here, except handling of symlinks and ignore is tricky
320 # we'd use status here, except handling of symlinks and ignore is tricky
321 added, unknown, deleted, removed = [], [], [], []
321 added, unknown, deleted, removed = [], [], [], []
322 audit_path = util.path_auditor(repo.root)
322 audit_path = util.path_auditor(repo.root)
323 m = match(repo, pats, opts)
323 m = match(repo, pats, opts)
324 for abs in repo.walk(m):
324 for abs in repo.walk(m):
325 target = repo.wjoin(abs)
325 target = repo.wjoin(abs)
326 good = True
326 good = True
327 try:
327 try:
328 audit_path(abs)
328 audit_path(abs)
329 except:
329 except:
330 good = False
330 good = False
331 rel = m.rel(abs)
331 rel = m.rel(abs)
332 exact = m.exact(abs)
332 exact = m.exact(abs)
333 if good and abs not in repo.dirstate:
333 if good and abs not in repo.dirstate:
334 unknown.append(abs)
334 unknown.append(abs)
335 if repo.ui.verbose or not exact:
335 if repo.ui.verbose or not exact:
336 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
336 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
337 elif repo.dirstate[abs] != 'r' and (not good or not util.lexists(target)
337 elif repo.dirstate[abs] != 'r' and (not good or not util.lexists(target)
338 or (os.path.isdir(target) and not os.path.islink(target))):
338 or (os.path.isdir(target) and not os.path.islink(target))):
339 deleted.append(abs)
339 deleted.append(abs)
340 if repo.ui.verbose or not exact:
340 if repo.ui.verbose or not exact:
341 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
341 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
342 # for finding renames
342 # for finding renames
343 elif repo.dirstate[abs] == 'r':
343 elif repo.dirstate[abs] == 'r':
344 removed.append(abs)
344 removed.append(abs)
345 elif repo.dirstate[abs] == 'a':
345 elif repo.dirstate[abs] == 'a':
346 added.append(abs)
346 added.append(abs)
347 if not dry_run:
347 if not dry_run:
348 repo.remove(deleted)
348 repo.remove(deleted)
349 repo.add(unknown)
349 repo.add(unknown)
350 if similarity > 0:
350 if similarity > 0:
351 for old, new, score in findrenames(repo, added + unknown,
351 for old, new, score in findrenames(repo, added + unknown,
352 removed + deleted, similarity):
352 removed + deleted, similarity):
353 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
353 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
354 repo.ui.status(_('recording removal of %s as rename to %s '
354 repo.ui.status(_('recording removal of %s as rename to %s '
355 '(%d%% similar)\n') %
355 '(%d%% similar)\n') %
356 (m.rel(old), m.rel(new), score * 100))
356 (m.rel(old), m.rel(new), score * 100))
357 if not dry_run:
357 if not dry_run:
358 repo.copy(old, new)
358 repo.copy(old, new)
359
359
360 def copy(ui, repo, pats, opts, rename=False):
360 def copy(ui, repo, pats, opts, rename=False):
361 # called with the repo lock held
361 # called with the repo lock held
362 #
362 #
363 # hgsep => pathname that uses "/" to separate directories
363 # hgsep => pathname that uses "/" to separate directories
364 # ossep => pathname that uses os.sep to separate directories
364 # ossep => pathname that uses os.sep to separate directories
365 cwd = repo.getcwd()
365 cwd = repo.getcwd()
366 targets = {}
366 targets = {}
367 after = opts.get("after")
367 after = opts.get("after")
368 dryrun = opts.get("dry_run")
368 dryrun = opts.get("dry_run")
369
369
370 def walkpat(pat):
370 def walkpat(pat):
371 srcs = []
371 srcs = []
372 m = match(repo, [pat], opts, globbed=True)
372 m = match(repo, [pat], opts, globbed=True)
373 for abs in repo.walk(m):
373 for abs in repo.walk(m):
374 state = repo.dirstate[abs]
374 state = repo.dirstate[abs]
375 rel = m.rel(abs)
375 rel = m.rel(abs)
376 exact = m.exact(abs)
376 exact = m.exact(abs)
377 if state in '?r':
377 if state in '?r':
378 if exact and state == '?':
378 if exact and state == '?':
379 ui.warn(_('%s: not copying - file is not managed\n') % rel)
379 ui.warn(_('%s: not copying - file is not managed\n') % rel)
380 if exact and state == 'r':
380 if exact and state == 'r':
381 ui.warn(_('%s: not copying - file has been marked for'
381 ui.warn(_('%s: not copying - file has been marked for'
382 ' remove\n') % rel)
382 ' remove\n') % rel)
383 continue
383 continue
384 # abs: hgsep
384 # abs: hgsep
385 # rel: ossep
385 # rel: ossep
386 srcs.append((abs, rel, exact))
386 srcs.append((abs, rel, exact))
387 return srcs
387 return srcs
388
388
389 # abssrc: hgsep
389 # abssrc: hgsep
390 # relsrc: ossep
390 # relsrc: ossep
391 # otarget: ossep
391 # otarget: ossep
392 def copyfile(abssrc, relsrc, otarget, exact):
392 def copyfile(abssrc, relsrc, otarget, exact):
393 abstarget = util.canonpath(repo.root, cwd, otarget)
393 abstarget = util.canonpath(repo.root, cwd, otarget)
394 reltarget = repo.pathto(abstarget, cwd)
394 reltarget = repo.pathto(abstarget, cwd)
395 target = repo.wjoin(abstarget)
395 target = repo.wjoin(abstarget)
396 src = repo.wjoin(abssrc)
396 src = repo.wjoin(abssrc)
397 state = repo.dirstate[abstarget]
397 state = repo.dirstate[abstarget]
398
398
399 # check for collisions
399 # check for collisions
400 prevsrc = targets.get(abstarget)
400 prevsrc = targets.get(abstarget)
401 if prevsrc is not None:
401 if prevsrc is not None:
402 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
402 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
403 (reltarget, repo.pathto(abssrc, cwd),
403 (reltarget, repo.pathto(abssrc, cwd),
404 repo.pathto(prevsrc, cwd)))
404 repo.pathto(prevsrc, cwd)))
405 return
405 return
406
406
407 # check for overwrites
407 # check for overwrites
408 exists = os.path.exists(target)
408 exists = os.path.exists(target)
409 if not after and exists or after and state in 'mn':
409 if not after and exists or after and state in 'mn':
410 if not opts['force']:
410 if not opts['force']:
411 ui.warn(_('%s: not overwriting - file exists\n') %
411 ui.warn(_('%s: not overwriting - file exists\n') %
412 reltarget)
412 reltarget)
413 return
413 return
414
414
415 if after:
415 if after:
416 if not exists:
416 if not exists:
417 return
417 return
418 elif not dryrun:
418 elif not dryrun:
419 try:
419 try:
420 if exists:
420 if exists:
421 os.unlink(target)
421 os.unlink(target)
422 targetdir = os.path.dirname(target) or '.'
422 targetdir = os.path.dirname(target) or '.'
423 if not os.path.isdir(targetdir):
423 if not os.path.isdir(targetdir):
424 os.makedirs(targetdir)
424 os.makedirs(targetdir)
425 util.copyfile(src, target)
425 util.copyfile(src, target)
426 except IOError, inst:
426 except IOError, inst:
427 if inst.errno == errno.ENOENT:
427 if inst.errno == errno.ENOENT:
428 ui.warn(_('%s: deleted in working copy\n') % relsrc)
428 ui.warn(_('%s: deleted in working copy\n') % relsrc)
429 else:
429 else:
430 ui.warn(_('%s: cannot copy - %s\n') %
430 ui.warn(_('%s: cannot copy - %s\n') %
431 (relsrc, inst.strerror))
431 (relsrc, inst.strerror))
432 return True # report a failure
432 return True # report a failure
433
433
434 if ui.verbose or not exact:
434 if ui.verbose or not exact:
435 if rename:
435 if rename:
436 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
436 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
437 else:
437 else:
438 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
438 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
439
439
440 targets[abstarget] = abssrc
440 targets[abstarget] = abssrc
441
441
442 # fix up dirstate
442 # fix up dirstate
443 origsrc = repo.dirstate.copied(abssrc) or abssrc
443 origsrc = repo.dirstate.copied(abssrc) or abssrc
444 if abstarget == origsrc: # copying back a copy?
444 if abstarget == origsrc: # copying back a copy?
445 if state not in 'mn' and not dryrun:
445 if state not in 'mn' and not dryrun:
446 repo.dirstate.normallookup(abstarget)
446 repo.dirstate.normallookup(abstarget)
447 else:
447 else:
448 if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
448 if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
449 if not ui.quiet:
449 if not ui.quiet:
450 ui.warn(_("%s has not been committed yet, so no copy "
450 ui.warn(_("%s has not been committed yet, so no copy "
451 "data will be stored for %s.\n")
451 "data will be stored for %s.\n")
452 % (repo.pathto(origsrc, cwd), reltarget))
452 % (repo.pathto(origsrc, cwd), reltarget))
453 if repo.dirstate[abstarget] in '?r' and not dryrun:
453 if repo.dirstate[abstarget] in '?r' and not dryrun:
454 repo.add([abstarget])
454 repo.add([abstarget])
455 elif not dryrun:
455 elif not dryrun:
456 repo.copy(origsrc, abstarget)
456 repo.copy(origsrc, abstarget)
457
457
458 if rename and not dryrun:
458 if rename and not dryrun:
459 repo.remove([abssrc], not after)
459 repo.remove([abssrc], not after)
460
460
461 # pat: ossep
461 # pat: ossep
462 # dest ossep
462 # dest ossep
463 # srcs: list of (hgsep, hgsep, ossep, bool)
463 # srcs: list of (hgsep, hgsep, ossep, bool)
464 # return: function that takes hgsep and returns ossep
464 # return: function that takes hgsep and returns ossep
465 def targetpathfn(pat, dest, srcs):
465 def targetpathfn(pat, dest, srcs):
466 if os.path.isdir(pat):
466 if os.path.isdir(pat):
467 abspfx = util.canonpath(repo.root, cwd, pat)
467 abspfx = util.canonpath(repo.root, cwd, pat)
468 abspfx = util.localpath(abspfx)
468 abspfx = util.localpath(abspfx)
469 if destdirexists:
469 if destdirexists:
470 striplen = len(os.path.split(abspfx)[0])
470 striplen = len(os.path.split(abspfx)[0])
471 else:
471 else:
472 striplen = len(abspfx)
472 striplen = len(abspfx)
473 if striplen:
473 if striplen:
474 striplen += len(os.sep)
474 striplen += len(os.sep)
475 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
475 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
476 elif destdirexists:
476 elif destdirexists:
477 res = lambda p: os.path.join(dest,
477 res = lambda p: os.path.join(dest,
478 os.path.basename(util.localpath(p)))
478 os.path.basename(util.localpath(p)))
479 else:
479 else:
480 res = lambda p: dest
480 res = lambda p: dest
481 return res
481 return res
482
482
483 # pat: ossep
483 # pat: ossep
484 # dest ossep
484 # dest ossep
485 # srcs: list of (hgsep, hgsep, ossep, bool)
485 # srcs: list of (hgsep, hgsep, ossep, bool)
486 # return: function that takes hgsep and returns ossep
486 # return: function that takes hgsep and returns ossep
487 def targetpathafterfn(pat, dest, srcs):
487 def targetpathafterfn(pat, dest, srcs):
488 if _match.patkind(pat):
488 if _match.patkind(pat):
489 # a mercurial pattern
489 # a mercurial pattern
490 res = lambda p: os.path.join(dest,
490 res = lambda p: os.path.join(dest,
491 os.path.basename(util.localpath(p)))
491 os.path.basename(util.localpath(p)))
492 else:
492 else:
493 abspfx = util.canonpath(repo.root, cwd, pat)
493 abspfx = util.canonpath(repo.root, cwd, pat)
494 if len(abspfx) < len(srcs[0][0]):
494 if len(abspfx) < len(srcs[0][0]):
495 # A directory. Either the target path contains the last
495 # A directory. Either the target path contains the last
496 # component of the source path or it does not.
496 # component of the source path or it does not.
497 def evalpath(striplen):
497 def evalpath(striplen):
498 score = 0
498 score = 0
499 for s in srcs:
499 for s in srcs:
500 t = os.path.join(dest, util.localpath(s[0])[striplen:])
500 t = os.path.join(dest, util.localpath(s[0])[striplen:])
501 if os.path.exists(t):
501 if os.path.exists(t):
502 score += 1
502 score += 1
503 return score
503 return score
504
504
505 abspfx = util.localpath(abspfx)
505 abspfx = util.localpath(abspfx)
506 striplen = len(abspfx)
506 striplen = len(abspfx)
507 if striplen:
507 if striplen:
508 striplen += len(os.sep)
508 striplen += len(os.sep)
509 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
509 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
510 score = evalpath(striplen)
510 score = evalpath(striplen)
511 striplen1 = len(os.path.split(abspfx)[0])
511 striplen1 = len(os.path.split(abspfx)[0])
512 if striplen1:
512 if striplen1:
513 striplen1 += len(os.sep)
513 striplen1 += len(os.sep)
514 if evalpath(striplen1) > score:
514 if evalpath(striplen1) > score:
515 striplen = striplen1
515 striplen = striplen1
516 res = lambda p: os.path.join(dest,
516 res = lambda p: os.path.join(dest,
517 util.localpath(p)[striplen:])
517 util.localpath(p)[striplen:])
518 else:
518 else:
519 # a file
519 # a file
520 if destdirexists:
520 if destdirexists:
521 res = lambda p: os.path.join(dest,
521 res = lambda p: os.path.join(dest,
522 os.path.basename(util.localpath(p)))
522 os.path.basename(util.localpath(p)))
523 else:
523 else:
524 res = lambda p: dest
524 res = lambda p: dest
525 return res
525 return res
526
526
527
527
528 pats = expandpats(pats)
528 pats = expandpats(pats)
529 if not pats:
529 if not pats:
530 raise util.Abort(_('no source or destination specified'))
530 raise util.Abort(_('no source or destination specified'))
531 if len(pats) == 1:
531 if len(pats) == 1:
532 raise util.Abort(_('no destination specified'))
532 raise util.Abort(_('no destination specified'))
533 dest = pats.pop()
533 dest = pats.pop()
534 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
534 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
535 if not destdirexists:
535 if not destdirexists:
536 if len(pats) > 1 or _match.patkind(pats[0]):
536 if len(pats) > 1 or _match.patkind(pats[0]):
537 raise util.Abort(_('with multiple sources, destination must be an '
537 raise util.Abort(_('with multiple sources, destination must be an '
538 'existing directory'))
538 'existing directory'))
539 if util.endswithsep(dest):
539 if util.endswithsep(dest):
540 raise util.Abort(_('destination %s is not a directory') % dest)
540 raise util.Abort(_('destination %s is not a directory') % dest)
541
541
542 tfn = targetpathfn
542 tfn = targetpathfn
543 if after:
543 if after:
544 tfn = targetpathafterfn
544 tfn = targetpathafterfn
545 copylist = []
545 copylist = []
546 for pat in pats:
546 for pat in pats:
547 srcs = walkpat(pat)
547 srcs = walkpat(pat)
548 if not srcs:
548 if not srcs:
549 continue
549 continue
550 copylist.append((tfn(pat, dest, srcs), srcs))
550 copylist.append((tfn(pat, dest, srcs), srcs))
551 if not copylist:
551 if not copylist:
552 raise util.Abort(_('no files to copy'))
552 raise util.Abort(_('no files to copy'))
553
553
554 errors = 0
554 errors = 0
555 for targetpath, srcs in copylist:
555 for targetpath, srcs in copylist:
556 for abssrc, relsrc, exact in srcs:
556 for abssrc, relsrc, exact in srcs:
557 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
557 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
558 errors += 1
558 errors += 1
559
559
560 if errors:
560 if errors:
561 ui.warn(_('(consider using --after)\n'))
561 ui.warn(_('(consider using --after)\n'))
562
562
563 return errors
563 return errors
564
564
565 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
565 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
566 runargs=None, appendpid=False):
566 runargs=None, appendpid=False):
567 '''Run a command as a service.'''
567 '''Run a command as a service.'''
568
568
569 if opts['daemon'] and not opts['daemon_pipefds']:
569 if opts['daemon'] and not opts['daemon_pipefds']:
570 # Signal child process startup with file removal
570 # Signal child process startup with file removal
571 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
571 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
572 os.close(lockfd)
572 os.close(lockfd)
573 try:
573 try:
574 if not runargs:
574 if not runargs:
575 runargs = sys.argv[:]
575 runargs = util.hgcmd() + sys.argv[1:]
576 runargs.append('--daemon-pipefds=%s' % lockpath)
576 runargs.append('--daemon-pipefds=%s' % lockpath)
577 # Don't pass --cwd to the child process, because we've already
577 # Don't pass --cwd to the child process, because we've already
578 # changed directory.
578 # changed directory.
579 for i in xrange(1,len(runargs)):
579 for i in xrange(1,len(runargs)):
580 if runargs[i].startswith('--cwd='):
580 if runargs[i].startswith('--cwd='):
581 del runargs[i]
581 del runargs[i]
582 break
582 break
583 elif runargs[i].startswith('--cwd'):
583 elif runargs[i].startswith('--cwd'):
584 del runargs[i:i+2]
584 del runargs[i:i+2]
585 break
585 break
586 pid = util.spawndetached(runargs)
586 pid = util.spawndetached(runargs)
587 while os.path.exists(lockpath):
587 while os.path.exists(lockpath):
588 time.sleep(0.1)
588 time.sleep(0.1)
589 finally:
589 finally:
590 try:
590 try:
591 os.unlink(lockpath)
591 os.unlink(lockpath)
592 except OSError, e:
592 except OSError, e:
593 if e.errno != errno.ENOENT:
593 if e.errno != errno.ENOENT:
594 raise
594 raise
595 if parentfn:
595 if parentfn:
596 return parentfn(pid)
596 return parentfn(pid)
597 else:
597 else:
598 return
598 return
599
599
600 if initfn:
600 if initfn:
601 initfn()
601 initfn()
602
602
603 if opts['pid_file']:
603 if opts['pid_file']:
604 mode = appendpid and 'a' or 'w'
604 mode = appendpid and 'a' or 'w'
605 fp = open(opts['pid_file'], mode)
605 fp = open(opts['pid_file'], mode)
606 fp.write(str(os.getpid()) + '\n')
606 fp.write(str(os.getpid()) + '\n')
607 fp.close()
607 fp.close()
608
608
609 if opts['daemon_pipefds']:
609 if opts['daemon_pipefds']:
610 lockpath = opts['daemon_pipefds']
610 lockpath = opts['daemon_pipefds']
611 try:
611 try:
612 os.setsid()
612 os.setsid()
613 except AttributeError:
613 except AttributeError:
614 pass
614 pass
615 os.unlink(lockpath)
615 os.unlink(lockpath)
616 sys.stdout.flush()
616 sys.stdout.flush()
617 sys.stderr.flush()
617 sys.stderr.flush()
618
618
619 nullfd = os.open(util.nulldev, os.O_RDWR)
619 nullfd = os.open(util.nulldev, os.O_RDWR)
620 logfilefd = nullfd
620 logfilefd = nullfd
621 if logfile:
621 if logfile:
622 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
622 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
623 os.dup2(nullfd, 0)
623 os.dup2(nullfd, 0)
624 os.dup2(logfilefd, 1)
624 os.dup2(logfilefd, 1)
625 os.dup2(logfilefd, 2)
625 os.dup2(logfilefd, 2)
626 if nullfd not in (0, 1, 2):
626 if nullfd not in (0, 1, 2):
627 os.close(nullfd)
627 os.close(nullfd)
628 if logfile and logfilefd not in (0, 1, 2):
628 if logfile and logfilefd not in (0, 1, 2):
629 os.close(logfilefd)
629 os.close(logfilefd)
630
630
631 if runfn:
631 if runfn:
632 return runfn()
632 return runfn()
633
633
634 class changeset_printer(object):
634 class changeset_printer(object):
635 '''show changeset information when templating not requested.'''
635 '''show changeset information when templating not requested.'''
636
636
637 def __init__(self, ui, repo, patch, diffopts, buffered):
637 def __init__(self, ui, repo, patch, diffopts, buffered):
638 self.ui = ui
638 self.ui = ui
639 self.repo = repo
639 self.repo = repo
640 self.buffered = buffered
640 self.buffered = buffered
641 self.patch = patch
641 self.patch = patch
642 self.diffopts = diffopts
642 self.diffopts = diffopts
643 self.header = {}
643 self.header = {}
644 self.hunk = {}
644 self.hunk = {}
645 self.lastheader = None
645 self.lastheader = None
646 self.footer = None
646 self.footer = None
647
647
648 def flush(self, rev):
648 def flush(self, rev):
649 if rev in self.header:
649 if rev in self.header:
650 h = self.header[rev]
650 h = self.header[rev]
651 if h != self.lastheader:
651 if h != self.lastheader:
652 self.lastheader = h
652 self.lastheader = h
653 self.ui.write(h)
653 self.ui.write(h)
654 del self.header[rev]
654 del self.header[rev]
655 if rev in self.hunk:
655 if rev in self.hunk:
656 self.ui.write(self.hunk[rev])
656 self.ui.write(self.hunk[rev])
657 del self.hunk[rev]
657 del self.hunk[rev]
658 return 1
658 return 1
659 return 0
659 return 0
660
660
661 def close(self):
661 def close(self):
662 if self.footer:
662 if self.footer:
663 self.ui.write(self.footer)
663 self.ui.write(self.footer)
664
664
665 def show(self, ctx, copies=None, **props):
665 def show(self, ctx, copies=None, **props):
666 if self.buffered:
666 if self.buffered:
667 self.ui.pushbuffer()
667 self.ui.pushbuffer()
668 self._show(ctx, copies, props)
668 self._show(ctx, copies, props)
669 self.hunk[ctx.rev()] = self.ui.popbuffer()
669 self.hunk[ctx.rev()] = self.ui.popbuffer()
670 else:
670 else:
671 self._show(ctx, copies, props)
671 self._show(ctx, copies, props)
672
672
673 def _show(self, ctx, copies, props):
673 def _show(self, ctx, copies, props):
674 '''show a single changeset or file revision'''
674 '''show a single changeset or file revision'''
675 changenode = ctx.node()
675 changenode = ctx.node()
676 rev = ctx.rev()
676 rev = ctx.rev()
677
677
678 if self.ui.quiet:
678 if self.ui.quiet:
679 self.ui.write("%d:%s\n" % (rev, short(changenode)))
679 self.ui.write("%d:%s\n" % (rev, short(changenode)))
680 return
680 return
681
681
682 log = self.repo.changelog
682 log = self.repo.changelog
683 date = util.datestr(ctx.date())
683 date = util.datestr(ctx.date())
684
684
685 hexfunc = self.ui.debugflag and hex or short
685 hexfunc = self.ui.debugflag and hex or short
686
686
687 parents = [(p, hexfunc(log.node(p)))
687 parents = [(p, hexfunc(log.node(p)))
688 for p in self._meaningful_parentrevs(log, rev)]
688 for p in self._meaningful_parentrevs(log, rev)]
689
689
690 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)))
690 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)))
691
691
692 branch = ctx.branch()
692 branch = ctx.branch()
693 # don't show the default branch name
693 # don't show the default branch name
694 if branch != 'default':
694 if branch != 'default':
695 branch = encoding.tolocal(branch)
695 branch = encoding.tolocal(branch)
696 self.ui.write(_("branch: %s\n") % branch)
696 self.ui.write(_("branch: %s\n") % branch)
697 for tag in self.repo.nodetags(changenode):
697 for tag in self.repo.nodetags(changenode):
698 self.ui.write(_("tag: %s\n") % tag)
698 self.ui.write(_("tag: %s\n") % tag)
699 for parent in parents:
699 for parent in parents:
700 self.ui.write(_("parent: %d:%s\n") % parent)
700 self.ui.write(_("parent: %d:%s\n") % parent)
701
701
702 if self.ui.debugflag:
702 if self.ui.debugflag:
703 mnode = ctx.manifestnode()
703 mnode = ctx.manifestnode()
704 self.ui.write(_("manifest: %d:%s\n") %
704 self.ui.write(_("manifest: %d:%s\n") %
705 (self.repo.manifest.rev(mnode), hex(mnode)))
705 (self.repo.manifest.rev(mnode), hex(mnode)))
706 self.ui.write(_("user: %s\n") % ctx.user())
706 self.ui.write(_("user: %s\n") % ctx.user())
707 self.ui.write(_("date: %s\n") % date)
707 self.ui.write(_("date: %s\n") % date)
708
708
709 if self.ui.debugflag:
709 if self.ui.debugflag:
710 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
710 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
711 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
711 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
712 files):
712 files):
713 if value:
713 if value:
714 self.ui.write("%-12s %s\n" % (key, " ".join(value)))
714 self.ui.write("%-12s %s\n" % (key, " ".join(value)))
715 elif ctx.files() and self.ui.verbose:
715 elif ctx.files() and self.ui.verbose:
716 self.ui.write(_("files: %s\n") % " ".join(ctx.files()))
716 self.ui.write(_("files: %s\n") % " ".join(ctx.files()))
717 if copies and self.ui.verbose:
717 if copies and self.ui.verbose:
718 copies = ['%s (%s)' % c for c in copies]
718 copies = ['%s (%s)' % c for c in copies]
719 self.ui.write(_("copies: %s\n") % ' '.join(copies))
719 self.ui.write(_("copies: %s\n") % ' '.join(copies))
720
720
721 extra = ctx.extra()
721 extra = ctx.extra()
722 if extra and self.ui.debugflag:
722 if extra and self.ui.debugflag:
723 for key, value in sorted(extra.items()):
723 for key, value in sorted(extra.items()):
724 self.ui.write(_("extra: %s=%s\n")
724 self.ui.write(_("extra: %s=%s\n")
725 % (key, value.encode('string_escape')))
725 % (key, value.encode('string_escape')))
726
726
727 description = ctx.description().strip()
727 description = ctx.description().strip()
728 if description:
728 if description:
729 if self.ui.verbose:
729 if self.ui.verbose:
730 self.ui.write(_("description:\n"))
730 self.ui.write(_("description:\n"))
731 self.ui.write(description)
731 self.ui.write(description)
732 self.ui.write("\n\n")
732 self.ui.write("\n\n")
733 else:
733 else:
734 self.ui.write(_("summary: %s\n") %
734 self.ui.write(_("summary: %s\n") %
735 description.splitlines()[0])
735 description.splitlines()[0])
736 self.ui.write("\n")
736 self.ui.write("\n")
737
737
738 self.showpatch(changenode)
738 self.showpatch(changenode)
739
739
740 def showpatch(self, node):
740 def showpatch(self, node):
741 if self.patch:
741 if self.patch:
742 prev = self.repo.changelog.parents(node)[0]
742 prev = self.repo.changelog.parents(node)[0]
743 chunks = patch.diff(self.repo, prev, node, match=self.patch,
743 chunks = patch.diff(self.repo, prev, node, match=self.patch,
744 opts=patch.diffopts(self.ui, self.diffopts))
744 opts=patch.diffopts(self.ui, self.diffopts))
745 for chunk in chunks:
745 for chunk in chunks:
746 self.ui.write(chunk)
746 self.ui.write(chunk)
747 self.ui.write("\n")
747 self.ui.write("\n")
748
748
749 def _meaningful_parentrevs(self, log, rev):
749 def _meaningful_parentrevs(self, log, rev):
750 """Return list of meaningful (or all if debug) parentrevs for rev.
750 """Return list of meaningful (or all if debug) parentrevs for rev.
751
751
752 For merges (two non-nullrev revisions) both parents are meaningful.
752 For merges (two non-nullrev revisions) both parents are meaningful.
753 Otherwise the first parent revision is considered meaningful if it
753 Otherwise the first parent revision is considered meaningful if it
754 is not the preceding revision.
754 is not the preceding revision.
755 """
755 """
756 parents = log.parentrevs(rev)
756 parents = log.parentrevs(rev)
757 if not self.ui.debugflag and parents[1] == nullrev:
757 if not self.ui.debugflag and parents[1] == nullrev:
758 if parents[0] >= rev - 1:
758 if parents[0] >= rev - 1:
759 parents = []
759 parents = []
760 else:
760 else:
761 parents = [parents[0]]
761 parents = [parents[0]]
762 return parents
762 return parents
763
763
764
764
765 class changeset_templater(changeset_printer):
765 class changeset_templater(changeset_printer):
766 '''format changeset information.'''
766 '''format changeset information.'''
767
767
768 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
768 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
769 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
769 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
770 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
770 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
771 defaulttempl = {
771 defaulttempl = {
772 'parent': '{rev}:{node|formatnode} ',
772 'parent': '{rev}:{node|formatnode} ',
773 'manifest': '{rev}:{node|formatnode}',
773 'manifest': '{rev}:{node|formatnode}',
774 'file_copy': '{name} ({source})',
774 'file_copy': '{name} ({source})',
775 'extra': '{key}={value|stringescape}'
775 'extra': '{key}={value|stringescape}'
776 }
776 }
777 # filecopy is preserved for compatibility reasons
777 # filecopy is preserved for compatibility reasons
778 defaulttempl['filecopy'] = defaulttempl['file_copy']
778 defaulttempl['filecopy'] = defaulttempl['file_copy']
779 self.t = templater.templater(mapfile, {'formatnode': formatnode},
779 self.t = templater.templater(mapfile, {'formatnode': formatnode},
780 cache=defaulttempl)
780 cache=defaulttempl)
781 self.cache = {}
781 self.cache = {}
782
782
783 def use_template(self, t):
783 def use_template(self, t):
784 '''set template string to use'''
784 '''set template string to use'''
785 self.t.cache['changeset'] = t
785 self.t.cache['changeset'] = t
786
786
787 def _meaningful_parentrevs(self, ctx):
787 def _meaningful_parentrevs(self, ctx):
788 """Return list of meaningful (or all if debug) parentrevs for rev.
788 """Return list of meaningful (or all if debug) parentrevs for rev.
789 """
789 """
790 parents = ctx.parents()
790 parents = ctx.parents()
791 if len(parents) > 1:
791 if len(parents) > 1:
792 return parents
792 return parents
793 if self.ui.debugflag:
793 if self.ui.debugflag:
794 return [parents[0], self.repo['null']]
794 return [parents[0], self.repo['null']]
795 if parents[0].rev() >= ctx.rev() - 1:
795 if parents[0].rev() >= ctx.rev() - 1:
796 return []
796 return []
797 return parents
797 return parents
798
798
799 def _show(self, ctx, copies, props):
799 def _show(self, ctx, copies, props):
800 '''show a single changeset or file revision'''
800 '''show a single changeset or file revision'''
801
801
802 showlist = templatekw.showlist
802 showlist = templatekw.showlist
803
803
804 # showparents() behaviour depends on ui trace level which
804 # showparents() behaviour depends on ui trace level which
805 # causes unexpected behaviours at templating level and makes
805 # causes unexpected behaviours at templating level and makes
806 # it harder to extract it in a standalone function. Its
806 # it harder to extract it in a standalone function. Its
807 # behaviour cannot be changed so leave it here for now.
807 # behaviour cannot be changed so leave it here for now.
808 def showparents(repo, ctx, templ, **args):
808 def showparents(repo, ctx, templ, **args):
809 parents = [[('rev', p.rev()), ('node', p.hex())]
809 parents = [[('rev', p.rev()), ('node', p.hex())]
810 for p in self._meaningful_parentrevs(ctx)]
810 for p in self._meaningful_parentrevs(ctx)]
811 return showlist(templ, 'parent', parents, **args)
811 return showlist(templ, 'parent', parents, **args)
812
812
813 props = props.copy()
813 props = props.copy()
814 props.update(templatekw.keywords)
814 props.update(templatekw.keywords)
815 props['parents'] = showparents
815 props['parents'] = showparents
816 props['templ'] = self.t
816 props['templ'] = self.t
817 props['ctx'] = ctx
817 props['ctx'] = ctx
818 props['repo'] = self.repo
818 props['repo'] = self.repo
819 props['revcache'] = {'copies': copies}
819 props['revcache'] = {'copies': copies}
820 props['cache'] = self.cache
820 props['cache'] = self.cache
821
821
822 # find correct templates for current mode
822 # find correct templates for current mode
823
823
824 tmplmodes = [
824 tmplmodes = [
825 (True, None),
825 (True, None),
826 (self.ui.verbose, 'verbose'),
826 (self.ui.verbose, 'verbose'),
827 (self.ui.quiet, 'quiet'),
827 (self.ui.quiet, 'quiet'),
828 (self.ui.debugflag, 'debug'),
828 (self.ui.debugflag, 'debug'),
829 ]
829 ]
830
830
831 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
831 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
832 for mode, postfix in tmplmodes:
832 for mode, postfix in tmplmodes:
833 for type in types:
833 for type in types:
834 cur = postfix and ('%s_%s' % (type, postfix)) or type
834 cur = postfix and ('%s_%s' % (type, postfix)) or type
835 if mode and cur in self.t:
835 if mode and cur in self.t:
836 types[type] = cur
836 types[type] = cur
837
837
838 try:
838 try:
839
839
840 # write header
840 # write header
841 if types['header']:
841 if types['header']:
842 h = templater.stringify(self.t(types['header'], **props))
842 h = templater.stringify(self.t(types['header'], **props))
843 if self.buffered:
843 if self.buffered:
844 self.header[ctx.rev()] = h
844 self.header[ctx.rev()] = h
845 else:
845 else:
846 self.ui.write(h)
846 self.ui.write(h)
847
847
848 # write changeset metadata, then patch if requested
848 # write changeset metadata, then patch if requested
849 key = types['changeset']
849 key = types['changeset']
850 self.ui.write(templater.stringify(self.t(key, **props)))
850 self.ui.write(templater.stringify(self.t(key, **props)))
851 self.showpatch(ctx.node())
851 self.showpatch(ctx.node())
852
852
853 if types['footer']:
853 if types['footer']:
854 if not self.footer:
854 if not self.footer:
855 self.footer = templater.stringify(self.t(types['footer'],
855 self.footer = templater.stringify(self.t(types['footer'],
856 **props))
856 **props))
857
857
858 except KeyError, inst:
858 except KeyError, inst:
859 msg = _("%s: no key named '%s'")
859 msg = _("%s: no key named '%s'")
860 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
860 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
861 except SyntaxError, inst:
861 except SyntaxError, inst:
862 raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
862 raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
863
863
864 def show_changeset(ui, repo, opts, buffered=False, matchfn=False):
864 def show_changeset(ui, repo, opts, buffered=False, matchfn=False):
865 """show one changeset using template or regular display.
865 """show one changeset using template or regular display.
866
866
867 Display format will be the first non-empty hit of:
867 Display format will be the first non-empty hit of:
868 1. option 'template'
868 1. option 'template'
869 2. option 'style'
869 2. option 'style'
870 3. [ui] setting 'logtemplate'
870 3. [ui] setting 'logtemplate'
871 4. [ui] setting 'style'
871 4. [ui] setting 'style'
872 If all of these values are either the unset or the empty string,
872 If all of these values are either the unset or the empty string,
873 regular display via changeset_printer() is done.
873 regular display via changeset_printer() is done.
874 """
874 """
875 # options
875 # options
876 patch = False
876 patch = False
877 if opts.get('patch'):
877 if opts.get('patch'):
878 patch = matchfn or matchall(repo)
878 patch = matchfn or matchall(repo)
879
879
880 tmpl = opts.get('template')
880 tmpl = opts.get('template')
881 style = None
881 style = None
882 if tmpl:
882 if tmpl:
883 tmpl = templater.parsestring(tmpl, quoted=False)
883 tmpl = templater.parsestring(tmpl, quoted=False)
884 else:
884 else:
885 style = opts.get('style')
885 style = opts.get('style')
886
886
887 # ui settings
887 # ui settings
888 if not (tmpl or style):
888 if not (tmpl or style):
889 tmpl = ui.config('ui', 'logtemplate')
889 tmpl = ui.config('ui', 'logtemplate')
890 if tmpl:
890 if tmpl:
891 tmpl = templater.parsestring(tmpl)
891 tmpl = templater.parsestring(tmpl)
892 else:
892 else:
893 style = ui.config('ui', 'style')
893 style = ui.config('ui', 'style')
894
894
895 if not (tmpl or style):
895 if not (tmpl or style):
896 return changeset_printer(ui, repo, patch, opts, buffered)
896 return changeset_printer(ui, repo, patch, opts, buffered)
897
897
898 mapfile = None
898 mapfile = None
899 if style and not tmpl:
899 if style and not tmpl:
900 mapfile = style
900 mapfile = style
901 if not os.path.split(mapfile)[0]:
901 if not os.path.split(mapfile)[0]:
902 mapname = (templater.templatepath('map-cmdline.' + mapfile)
902 mapname = (templater.templatepath('map-cmdline.' + mapfile)
903 or templater.templatepath(mapfile))
903 or templater.templatepath(mapfile))
904 if mapname: mapfile = mapname
904 if mapname: mapfile = mapname
905
905
906 try:
906 try:
907 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
907 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
908 except SyntaxError, inst:
908 except SyntaxError, inst:
909 raise util.Abort(inst.args[0])
909 raise util.Abort(inst.args[0])
910 if tmpl: t.use_template(tmpl)
910 if tmpl: t.use_template(tmpl)
911 return t
911 return t
912
912
913 def finddate(ui, repo, date):
913 def finddate(ui, repo, date):
914 """Find the tipmost changeset that matches the given date spec"""
914 """Find the tipmost changeset that matches the given date spec"""
915
915
916 df = util.matchdate(date)
916 df = util.matchdate(date)
917 m = matchall(repo)
917 m = matchall(repo)
918 results = {}
918 results = {}
919
919
920 def prep(ctx, fns):
920 def prep(ctx, fns):
921 d = ctx.date()
921 d = ctx.date()
922 if df(d[0]):
922 if df(d[0]):
923 results[ctx.rev()] = d
923 results[ctx.rev()] = d
924
924
925 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
925 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
926 rev = ctx.rev()
926 rev = ctx.rev()
927 if rev in results:
927 if rev in results:
928 ui.status(_("Found revision %s from %s\n") %
928 ui.status(_("Found revision %s from %s\n") %
929 (rev, util.datestr(results[rev])))
929 (rev, util.datestr(results[rev])))
930 return str(rev)
930 return str(rev)
931
931
932 raise util.Abort(_("revision matching date not found"))
932 raise util.Abort(_("revision matching date not found"))
933
933
934 def walkchangerevs(repo, match, opts, prepare):
934 def walkchangerevs(repo, match, opts, prepare):
935 '''Iterate over files and the revs in which they changed.
935 '''Iterate over files and the revs in which they changed.
936
936
937 Callers most commonly need to iterate backwards over the history
937 Callers most commonly need to iterate backwards over the history
938 in which they are interested. Doing so has awful (quadratic-looking)
938 in which they are interested. Doing so has awful (quadratic-looking)
939 performance, so we use iterators in a "windowed" way.
939 performance, so we use iterators in a "windowed" way.
940
940
941 We walk a window of revisions in the desired order. Within the
941 We walk a window of revisions in the desired order. Within the
942 window, we first walk forwards to gather data, then in the desired
942 window, we first walk forwards to gather data, then in the desired
943 order (usually backwards) to display it.
943 order (usually backwards) to display it.
944
944
945 This function returns an iterator yielding contexts. Before
945 This function returns an iterator yielding contexts. Before
946 yielding each context, the iterator will first call the prepare
946 yielding each context, the iterator will first call the prepare
947 function on each context in the window in forward order.'''
947 function on each context in the window in forward order.'''
948
948
949 def increasing_windows(start, end, windowsize=8, sizelimit=512):
949 def increasing_windows(start, end, windowsize=8, sizelimit=512):
950 if start < end:
950 if start < end:
951 while start < end:
951 while start < end:
952 yield start, min(windowsize, end-start)
952 yield start, min(windowsize, end-start)
953 start += windowsize
953 start += windowsize
954 if windowsize < sizelimit:
954 if windowsize < sizelimit:
955 windowsize *= 2
955 windowsize *= 2
956 else:
956 else:
957 while start > end:
957 while start > end:
958 yield start, min(windowsize, start-end-1)
958 yield start, min(windowsize, start-end-1)
959 start -= windowsize
959 start -= windowsize
960 if windowsize < sizelimit:
960 if windowsize < sizelimit:
961 windowsize *= 2
961 windowsize *= 2
962
962
963 follow = opts.get('follow') or opts.get('follow_first')
963 follow = opts.get('follow') or opts.get('follow_first')
964
964
965 if not len(repo):
965 if not len(repo):
966 return []
966 return []
967
967
968 if follow:
968 if follow:
969 defrange = '%s:0' % repo['.'].rev()
969 defrange = '%s:0' % repo['.'].rev()
970 else:
970 else:
971 defrange = '-1:0'
971 defrange = '-1:0'
972 revs = revrange(repo, opts['rev'] or [defrange])
972 revs = revrange(repo, opts['rev'] or [defrange])
973 wanted = set()
973 wanted = set()
974 slowpath = match.anypats() or (match.files() and opts.get('removed'))
974 slowpath = match.anypats() or (match.files() and opts.get('removed'))
975 fncache = {}
975 fncache = {}
976 change = util.cachefunc(repo.changectx)
976 change = util.cachefunc(repo.changectx)
977
977
978 if not slowpath and not match.files():
978 if not slowpath and not match.files():
979 # No files, no patterns. Display all revs.
979 # No files, no patterns. Display all revs.
980 wanted = set(revs)
980 wanted = set(revs)
981 copies = []
981 copies = []
982
982
983 if not slowpath:
983 if not slowpath:
984 # Only files, no patterns. Check the history of each file.
984 # Only files, no patterns. Check the history of each file.
985 def filerevgen(filelog, node):
985 def filerevgen(filelog, node):
986 cl_count = len(repo)
986 cl_count = len(repo)
987 if node is None:
987 if node is None:
988 last = len(filelog) - 1
988 last = len(filelog) - 1
989 else:
989 else:
990 last = filelog.rev(node)
990 last = filelog.rev(node)
991 for i, window in increasing_windows(last, nullrev):
991 for i, window in increasing_windows(last, nullrev):
992 revs = []
992 revs = []
993 for j in xrange(i - window, i + 1):
993 for j in xrange(i - window, i + 1):
994 n = filelog.node(j)
994 n = filelog.node(j)
995 revs.append((filelog.linkrev(j),
995 revs.append((filelog.linkrev(j),
996 follow and filelog.renamed(n)))
996 follow and filelog.renamed(n)))
997 for rev in reversed(revs):
997 for rev in reversed(revs):
998 # only yield rev for which we have the changelog, it can
998 # only yield rev for which we have the changelog, it can
999 # happen while doing "hg log" during a pull or commit
999 # happen while doing "hg log" during a pull or commit
1000 if rev[0] < cl_count:
1000 if rev[0] < cl_count:
1001 yield rev
1001 yield rev
1002 def iterfiles():
1002 def iterfiles():
1003 for filename in match.files():
1003 for filename in match.files():
1004 yield filename, None
1004 yield filename, None
1005 for filename_node in copies:
1005 for filename_node in copies:
1006 yield filename_node
1006 yield filename_node
1007 minrev, maxrev = min(revs), max(revs)
1007 minrev, maxrev = min(revs), max(revs)
1008 for file_, node in iterfiles():
1008 for file_, node in iterfiles():
1009 filelog = repo.file(file_)
1009 filelog = repo.file(file_)
1010 if not len(filelog):
1010 if not len(filelog):
1011 if node is None:
1011 if node is None:
1012 # A zero count may be a directory or deleted file, so
1012 # A zero count may be a directory or deleted file, so
1013 # try to find matching entries on the slow path.
1013 # try to find matching entries on the slow path.
1014 if follow:
1014 if follow:
1015 raise util.Abort(_('cannot follow nonexistent file: "%s"') % file_)
1015 raise util.Abort(_('cannot follow nonexistent file: "%s"') % file_)
1016 slowpath = True
1016 slowpath = True
1017 break
1017 break
1018 else:
1018 else:
1019 continue
1019 continue
1020 for rev, copied in filerevgen(filelog, node):
1020 for rev, copied in filerevgen(filelog, node):
1021 if rev <= maxrev:
1021 if rev <= maxrev:
1022 if rev < minrev:
1022 if rev < minrev:
1023 break
1023 break
1024 fncache.setdefault(rev, [])
1024 fncache.setdefault(rev, [])
1025 fncache[rev].append(file_)
1025 fncache[rev].append(file_)
1026 wanted.add(rev)
1026 wanted.add(rev)
1027 if follow and copied:
1027 if follow and copied:
1028 copies.append(copied)
1028 copies.append(copied)
1029 if slowpath:
1029 if slowpath:
1030 if follow:
1030 if follow:
1031 raise util.Abort(_('can only follow copies/renames for explicit '
1031 raise util.Abort(_('can only follow copies/renames for explicit '
1032 'filenames'))
1032 'filenames'))
1033
1033
1034 # The slow path checks files modified in every changeset.
1034 # The slow path checks files modified in every changeset.
1035 def changerevgen():
1035 def changerevgen():
1036 for i, window in increasing_windows(len(repo) - 1, nullrev):
1036 for i, window in increasing_windows(len(repo) - 1, nullrev):
1037 for j in xrange(i - window, i + 1):
1037 for j in xrange(i - window, i + 1):
1038 yield change(j)
1038 yield change(j)
1039
1039
1040 for ctx in changerevgen():
1040 for ctx in changerevgen():
1041 matches = filter(match, ctx.files())
1041 matches = filter(match, ctx.files())
1042 if matches:
1042 if matches:
1043 fncache[ctx.rev()] = matches
1043 fncache[ctx.rev()] = matches
1044 wanted.add(ctx.rev())
1044 wanted.add(ctx.rev())
1045
1045
1046 class followfilter(object):
1046 class followfilter(object):
1047 def __init__(self, onlyfirst=False):
1047 def __init__(self, onlyfirst=False):
1048 self.startrev = nullrev
1048 self.startrev = nullrev
1049 self.roots = set()
1049 self.roots = set()
1050 self.onlyfirst = onlyfirst
1050 self.onlyfirst = onlyfirst
1051
1051
1052 def match(self, rev):
1052 def match(self, rev):
1053 def realparents(rev):
1053 def realparents(rev):
1054 if self.onlyfirst:
1054 if self.onlyfirst:
1055 return repo.changelog.parentrevs(rev)[0:1]
1055 return repo.changelog.parentrevs(rev)[0:1]
1056 else:
1056 else:
1057 return filter(lambda x: x != nullrev,
1057 return filter(lambda x: x != nullrev,
1058 repo.changelog.parentrevs(rev))
1058 repo.changelog.parentrevs(rev))
1059
1059
1060 if self.startrev == nullrev:
1060 if self.startrev == nullrev:
1061 self.startrev = rev
1061 self.startrev = rev
1062 return True
1062 return True
1063
1063
1064 if rev > self.startrev:
1064 if rev > self.startrev:
1065 # forward: all descendants
1065 # forward: all descendants
1066 if not self.roots:
1066 if not self.roots:
1067 self.roots.add(self.startrev)
1067 self.roots.add(self.startrev)
1068 for parent in realparents(rev):
1068 for parent in realparents(rev):
1069 if parent in self.roots:
1069 if parent in self.roots:
1070 self.roots.add(rev)
1070 self.roots.add(rev)
1071 return True
1071 return True
1072 else:
1072 else:
1073 # backwards: all parents
1073 # backwards: all parents
1074 if not self.roots:
1074 if not self.roots:
1075 self.roots.update(realparents(self.startrev))
1075 self.roots.update(realparents(self.startrev))
1076 if rev in self.roots:
1076 if rev in self.roots:
1077 self.roots.remove(rev)
1077 self.roots.remove(rev)
1078 self.roots.update(realparents(rev))
1078 self.roots.update(realparents(rev))
1079 return True
1079 return True
1080
1080
1081 return False
1081 return False
1082
1082
1083 # it might be worthwhile to do this in the iterator if the rev range
1083 # it might be worthwhile to do this in the iterator if the rev range
1084 # is descending and the prune args are all within that range
1084 # is descending and the prune args are all within that range
1085 for rev in opts.get('prune', ()):
1085 for rev in opts.get('prune', ()):
1086 rev = repo.changelog.rev(repo.lookup(rev))
1086 rev = repo.changelog.rev(repo.lookup(rev))
1087 ff = followfilter()
1087 ff = followfilter()
1088 stop = min(revs[0], revs[-1])
1088 stop = min(revs[0], revs[-1])
1089 for x in xrange(rev, stop-1, -1):
1089 for x in xrange(rev, stop-1, -1):
1090 if ff.match(x):
1090 if ff.match(x):
1091 wanted.discard(x)
1091 wanted.discard(x)
1092
1092
1093 def iterate():
1093 def iterate():
1094 if follow and not match.files():
1094 if follow and not match.files():
1095 ff = followfilter(onlyfirst=opts.get('follow_first'))
1095 ff = followfilter(onlyfirst=opts.get('follow_first'))
1096 def want(rev):
1096 def want(rev):
1097 return ff.match(rev) and rev in wanted
1097 return ff.match(rev) and rev in wanted
1098 else:
1098 else:
1099 def want(rev):
1099 def want(rev):
1100 return rev in wanted
1100 return rev in wanted
1101
1101
1102 for i, window in increasing_windows(0, len(revs)):
1102 for i, window in increasing_windows(0, len(revs)):
1103 change = util.cachefunc(repo.changectx)
1103 change = util.cachefunc(repo.changectx)
1104 nrevs = [rev for rev in revs[i:i+window] if want(rev)]
1104 nrevs = [rev for rev in revs[i:i+window] if want(rev)]
1105 for rev in sorted(nrevs):
1105 for rev in sorted(nrevs):
1106 fns = fncache.get(rev)
1106 fns = fncache.get(rev)
1107 ctx = change(rev)
1107 ctx = change(rev)
1108 if not fns:
1108 if not fns:
1109 def fns_generator():
1109 def fns_generator():
1110 for f in ctx.files():
1110 for f in ctx.files():
1111 if match(f):
1111 if match(f):
1112 yield f
1112 yield f
1113 fns = fns_generator()
1113 fns = fns_generator()
1114 prepare(ctx, fns)
1114 prepare(ctx, fns)
1115 for rev in nrevs:
1115 for rev in nrevs:
1116 yield change(rev)
1116 yield change(rev)
1117 return iterate()
1117 return iterate()
1118
1118
1119 def commit(ui, repo, commitfunc, pats, opts):
1119 def commit(ui, repo, commitfunc, pats, opts):
1120 '''commit the specified files or all outstanding changes'''
1120 '''commit the specified files or all outstanding changes'''
1121 date = opts.get('date')
1121 date = opts.get('date')
1122 if date:
1122 if date:
1123 opts['date'] = util.parsedate(date)
1123 opts['date'] = util.parsedate(date)
1124 message = logmessage(opts)
1124 message = logmessage(opts)
1125
1125
1126 # extract addremove carefully -- this function can be called from a command
1126 # extract addremove carefully -- this function can be called from a command
1127 # that doesn't support addremove
1127 # that doesn't support addremove
1128 if opts.get('addremove'):
1128 if opts.get('addremove'):
1129 addremove(repo, pats, opts)
1129 addremove(repo, pats, opts)
1130
1130
1131 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1131 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1132
1132
1133 def commiteditor(repo, ctx, subs):
1133 def commiteditor(repo, ctx, subs):
1134 if ctx.description():
1134 if ctx.description():
1135 return ctx.description()
1135 return ctx.description()
1136 return commitforceeditor(repo, ctx, subs)
1136 return commitforceeditor(repo, ctx, subs)
1137
1137
1138 def commitforceeditor(repo, ctx, subs):
1138 def commitforceeditor(repo, ctx, subs):
1139 edittext = []
1139 edittext = []
1140 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1140 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1141 if ctx.description():
1141 if ctx.description():
1142 edittext.append(ctx.description())
1142 edittext.append(ctx.description())
1143 edittext.append("")
1143 edittext.append("")
1144 edittext.append("") # Empty line between message and comments.
1144 edittext.append("") # Empty line between message and comments.
1145 edittext.append(_("HG: Enter commit message."
1145 edittext.append(_("HG: Enter commit message."
1146 " Lines beginning with 'HG:' are removed."))
1146 " Lines beginning with 'HG:' are removed."))
1147 edittext.append(_("HG: Leave message empty to abort commit."))
1147 edittext.append(_("HG: Leave message empty to abort commit."))
1148 edittext.append("HG: --")
1148 edittext.append("HG: --")
1149 edittext.append(_("HG: user: %s") % ctx.user())
1149 edittext.append(_("HG: user: %s") % ctx.user())
1150 if ctx.p2():
1150 if ctx.p2():
1151 edittext.append(_("HG: branch merge"))
1151 edittext.append(_("HG: branch merge"))
1152 if ctx.branch():
1152 if ctx.branch():
1153 edittext.append(_("HG: branch '%s'")
1153 edittext.append(_("HG: branch '%s'")
1154 % encoding.tolocal(ctx.branch()))
1154 % encoding.tolocal(ctx.branch()))
1155 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1155 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1156 edittext.extend([_("HG: added %s") % f for f in added])
1156 edittext.extend([_("HG: added %s") % f for f in added])
1157 edittext.extend([_("HG: changed %s") % f for f in modified])
1157 edittext.extend([_("HG: changed %s") % f for f in modified])
1158 edittext.extend([_("HG: removed %s") % f for f in removed])
1158 edittext.extend([_("HG: removed %s") % f for f in removed])
1159 if not added and not modified and not removed:
1159 if not added and not modified and not removed:
1160 edittext.append(_("HG: no files changed"))
1160 edittext.append(_("HG: no files changed"))
1161 edittext.append("")
1161 edittext.append("")
1162 # run editor in the repository root
1162 # run editor in the repository root
1163 olddir = os.getcwd()
1163 olddir = os.getcwd()
1164 os.chdir(repo.root)
1164 os.chdir(repo.root)
1165 text = repo.ui.edit("\n".join(edittext), ctx.user())
1165 text = repo.ui.edit("\n".join(edittext), ctx.user())
1166 text = re.sub("(?m)^HG:.*\n", "", text)
1166 text = re.sub("(?m)^HG:.*\n", "", text)
1167 os.chdir(olddir)
1167 os.chdir(olddir)
1168
1168
1169 if not text.strip():
1169 if not text.strip():
1170 raise util.Abort(_("empty commit message"))
1170 raise util.Abort(_("empty commit message"))
1171
1171
1172 return text
1172 return text
@@ -1,264 +1,266 b''
1 # posix.py - Posix utility function implementations for Mercurial
1 # posix.py - Posix utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from i18n import _
8 from i18n import _
9 import osutil
9 import osutil
10 import os, sys, errno, stat, getpass, pwd, grp, fcntl
10 import os, sys, errno, stat, getpass, pwd, grp, fcntl
11
11
12 posixfile = open
12 posixfile = open
13 nulldev = '/dev/null'
13 nulldev = '/dev/null'
14 normpath = os.path.normpath
14 normpath = os.path.normpath
15 samestat = os.path.samestat
15 samestat = os.path.samestat
16 rename = os.rename
16 rename = os.rename
17 expandglobs = False
17 expandglobs = False
18
18
19 umask = os.umask(0)
19 umask = os.umask(0)
20 os.umask(umask)
20 os.umask(umask)
21
21
22 def openhardlinks():
22 def openhardlinks():
23 '''return true if it is safe to hold open file handles to hardlinks'''
23 '''return true if it is safe to hold open file handles to hardlinks'''
24 return True
24 return True
25
25
26 def rcfiles(path):
26 def rcfiles(path):
27 rcs = [os.path.join(path, 'hgrc')]
27 rcs = [os.path.join(path, 'hgrc')]
28 rcdir = os.path.join(path, 'hgrc.d')
28 rcdir = os.path.join(path, 'hgrc.d')
29 try:
29 try:
30 rcs.extend([os.path.join(rcdir, f)
30 rcs.extend([os.path.join(rcdir, f)
31 for f, kind in osutil.listdir(rcdir)
31 for f, kind in osutil.listdir(rcdir)
32 if f.endswith(".rc")])
32 if f.endswith(".rc")])
33 except OSError:
33 except OSError:
34 pass
34 pass
35 return rcs
35 return rcs
36
36
37 def system_rcpath():
37 def system_rcpath():
38 path = []
38 path = []
39 # old mod_python does not set sys.argv
39 # old mod_python does not set sys.argv
40 if len(getattr(sys, 'argv', [])) > 0:
40 if len(getattr(sys, 'argv', [])) > 0:
41 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
41 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
42 '/../etc/mercurial'))
42 '/../etc/mercurial'))
43 path.extend(rcfiles('/etc/mercurial'))
43 path.extend(rcfiles('/etc/mercurial'))
44 return path
44 return path
45
45
46 def user_rcpath():
46 def user_rcpath():
47 return [os.path.expanduser('~/.hgrc')]
47 return [os.path.expanduser('~/.hgrc')]
48
48
49 def parse_patch_output(output_line):
49 def parse_patch_output(output_line):
50 """parses the output produced by patch and returns the filename"""
50 """parses the output produced by patch and returns the filename"""
51 pf = output_line[14:]
51 pf = output_line[14:]
52 if os.sys.platform == 'OpenVMS':
52 if os.sys.platform == 'OpenVMS':
53 if pf[0] == '`':
53 if pf[0] == '`':
54 pf = pf[1:-1] # Remove the quotes
54 pf = pf[1:-1] # Remove the quotes
55 else:
55 else:
56 if pf.startswith("'") and pf.endswith("'") and " " in pf:
56 if pf.startswith("'") and pf.endswith("'") and " " in pf:
57 pf = pf[1:-1] # Remove the quotes
57 pf = pf[1:-1] # Remove the quotes
58 return pf
58 return pf
59
59
60 def sshargs(sshcmd, host, user, port):
60 def sshargs(sshcmd, host, user, port):
61 '''Build argument list for ssh'''
61 '''Build argument list for ssh'''
62 args = user and ("%s@%s" % (user, host)) or host
62 args = user and ("%s@%s" % (user, host)) or host
63 return port and ("%s -p %s" % (args, port)) or args
63 return port and ("%s -p %s" % (args, port)) or args
64
64
65 def is_exec(f):
65 def is_exec(f):
66 """check whether a file is executable"""
66 """check whether a file is executable"""
67 return (os.lstat(f).st_mode & 0100 != 0)
67 return (os.lstat(f).st_mode & 0100 != 0)
68
68
69 def set_flags(f, l, x):
69 def set_flags(f, l, x):
70 s = os.lstat(f).st_mode
70 s = os.lstat(f).st_mode
71 if l:
71 if l:
72 if not stat.S_ISLNK(s):
72 if not stat.S_ISLNK(s):
73 # switch file to link
73 # switch file to link
74 data = open(f).read()
74 data = open(f).read()
75 os.unlink(f)
75 os.unlink(f)
76 try:
76 try:
77 os.symlink(data, f)
77 os.symlink(data, f)
78 except:
78 except:
79 # failed to make a link, rewrite file
79 # failed to make a link, rewrite file
80 open(f, "w").write(data)
80 open(f, "w").write(data)
81 # no chmod needed at this point
81 # no chmod needed at this point
82 return
82 return
83 if stat.S_ISLNK(s):
83 if stat.S_ISLNK(s):
84 # switch link to file
84 # switch link to file
85 data = os.readlink(f)
85 data = os.readlink(f)
86 os.unlink(f)
86 os.unlink(f)
87 open(f, "w").write(data)
87 open(f, "w").write(data)
88 s = 0666 & ~umask # avoid restatting for chmod
88 s = 0666 & ~umask # avoid restatting for chmod
89
89
90 sx = s & 0100
90 sx = s & 0100
91 if x and not sx:
91 if x and not sx:
92 # Turn on +x for every +r bit when making a file executable
92 # Turn on +x for every +r bit when making a file executable
93 # and obey umask.
93 # and obey umask.
94 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
94 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
95 elif not x and sx:
95 elif not x and sx:
96 # Turn off all +x bits
96 # Turn off all +x bits
97 os.chmod(f, s & 0666)
97 os.chmod(f, s & 0666)
98
98
99 def set_binary(fd):
99 def set_binary(fd):
100 pass
100 pass
101
101
102 def pconvert(path):
102 def pconvert(path):
103 return path
103 return path
104
104
105 def localpath(path):
105 def localpath(path):
106 return path
106 return path
107
107
108 def samefile(fpath1, fpath2):
108 def samefile(fpath1, fpath2):
109 """Returns whether path1 and path2 refer to the same file. This is only
109 """Returns whether path1 and path2 refer to the same file. This is only
110 guaranteed to work for files, not directories."""
110 guaranteed to work for files, not directories."""
111 return os.path.samefile(fpath1, fpath2)
111 return os.path.samefile(fpath1, fpath2)
112
112
113 def samedevice(fpath1, fpath2):
113 def samedevice(fpath1, fpath2):
114 """Returns whether fpath1 and fpath2 are on the same device. This is only
114 """Returns whether fpath1 and fpath2 are on the same device. This is only
115 guaranteed to work for files, not directories."""
115 guaranteed to work for files, not directories."""
116 st1 = os.lstat(fpath1)
116 st1 = os.lstat(fpath1)
117 st2 = os.lstat(fpath2)
117 st2 = os.lstat(fpath2)
118 return st1.st_dev == st2.st_dev
118 return st1.st_dev == st2.st_dev
119
119
120 if sys.platform == 'darwin':
120 if sys.platform == 'darwin':
121 def realpath(path):
121 def realpath(path):
122 '''
122 '''
123 Returns the true, canonical file system path equivalent to the given
123 Returns the true, canonical file system path equivalent to the given
124 path.
124 path.
125
125
126 Equivalent means, in this case, resulting in the same, unique
126 Equivalent means, in this case, resulting in the same, unique
127 file system link to the path. Every file system entry, whether a file,
127 file system link to the path. Every file system entry, whether a file,
128 directory, hard link or symbolic link or special, will have a single
128 directory, hard link or symbolic link or special, will have a single
129 path preferred by the system, but may allow multiple, differing path
129 path preferred by the system, but may allow multiple, differing path
130 lookups to point to it.
130 lookups to point to it.
131
131
132 Most regular UNIX file systems only allow a file system entry to be
132 Most regular UNIX file systems only allow a file system entry to be
133 looked up by its distinct path. Obviously, this does not apply to case
133 looked up by its distinct path. Obviously, this does not apply to case
134 insensitive file systems, whether case preserving or not. The most
134 insensitive file systems, whether case preserving or not. The most
135 complex issue to deal with is file systems transparently reencoding the
135 complex issue to deal with is file systems transparently reencoding the
136 path, such as the non-standard Unicode normalisation required for HFS+
136 path, such as the non-standard Unicode normalisation required for HFS+
137 and HFSX.
137 and HFSX.
138 '''
138 '''
139 # Constants copied from /usr/include/sys/fcntl.h
139 # Constants copied from /usr/include/sys/fcntl.h
140 F_GETPATH = 50
140 F_GETPATH = 50
141 O_SYMLINK = 0x200000
141 O_SYMLINK = 0x200000
142
142
143 try:
143 try:
144 fd = os.open(path, O_SYMLINK)
144 fd = os.open(path, O_SYMLINK)
145 except OSError, err:
145 except OSError, err:
146 if err.errno is errno.ENOENT:
146 if err.errno is errno.ENOENT:
147 return path
147 return path
148 raise
148 raise
149
149
150 try:
150 try:
151 return fcntl.fcntl(fd, F_GETPATH, '\0' * 1024).rstrip('\0')
151 return fcntl.fcntl(fd, F_GETPATH, '\0' * 1024).rstrip('\0')
152 finally:
152 finally:
153 os.close(fd)
153 os.close(fd)
154 else:
154 else:
155 # Fallback to the likely inadequate Python builtin function.
155 # Fallback to the likely inadequate Python builtin function.
156 realpath = os.path.realpath
156 realpath = os.path.realpath
157
157
158 def shellquote(s):
158 def shellquote(s):
159 if os.sys.platform == 'OpenVMS':
159 if os.sys.platform == 'OpenVMS':
160 return '"%s"' % s
160 return '"%s"' % s
161 else:
161 else:
162 return "'%s'" % s.replace("'", "'\\''")
162 return "'%s'" % s.replace("'", "'\\''")
163
163
164 def quotecommand(cmd):
164 def quotecommand(cmd):
165 return cmd
165 return cmd
166
166
167 def popen(command, mode='r'):
167 def popen(command, mode='r'):
168 return os.popen(command, mode)
168 return os.popen(command, mode)
169
169
170 def testpid(pid):
170 def testpid(pid):
171 '''return False if pid dead, True if running or not sure'''
171 '''return False if pid dead, True if running or not sure'''
172 if os.sys.platform == 'OpenVMS':
172 if os.sys.platform == 'OpenVMS':
173 return True
173 return True
174 try:
174 try:
175 os.kill(pid, 0)
175 os.kill(pid, 0)
176 return True
176 return True
177 except OSError, inst:
177 except OSError, inst:
178 return inst.errno != errno.ESRCH
178 return inst.errno != errno.ESRCH
179
179
180 def explain_exit(code):
180 def explain_exit(code):
181 """return a 2-tuple (desc, code) describing a subprocess status
181 """return a 2-tuple (desc, code) describing a subprocess status
182 (codes from kill are negative - not os.system/wait encoding)"""
182 (codes from kill are negative - not os.system/wait encoding)"""
183 if code >= 0:
183 if code >= 0:
184 return _("exited with status %d") % code, code
184 return _("exited with status %d") % code, code
185 return _("killed by signal %d") % -code, -code
185 return _("killed by signal %d") % -code, -code
186
186
187 def isowner(st):
187 def isowner(st):
188 """Return True if the stat object st is from the current user."""
188 """Return True if the stat object st is from the current user."""
189 return st.st_uid == os.getuid()
189 return st.st_uid == os.getuid()
190
190
191 def find_exe(command):
191 def find_exe(command):
192 '''Find executable for command searching like which does.
192 '''Find executable for command searching like which does.
193 If command is a basename then PATH is searched for command.
193 If command is a basename then PATH is searched for command.
194 PATH isn't searched if command is an absolute or relative path.
194 PATH isn't searched if command is an absolute or relative path.
195 If command isn't found None is returned.'''
195 If command isn't found None is returned.'''
196 if sys.platform == 'OpenVMS':
196 if sys.platform == 'OpenVMS':
197 return command
197 return command
198
198
199 def findexisting(executable):
199 def findexisting(executable):
200 'Will return executable if existing file'
200 'Will return executable if existing file'
201 if os.path.exists(executable):
201 if os.path.exists(executable):
202 return executable
202 return executable
203 return None
203 return None
204
204
205 if os.sep in command:
205 if os.sep in command:
206 return findexisting(command)
206 return findexisting(command)
207
207
208 for path in os.environ.get('PATH', '').split(os.pathsep):
208 for path in os.environ.get('PATH', '').split(os.pathsep):
209 executable = findexisting(os.path.join(path, command))
209 executable = findexisting(os.path.join(path, command))
210 if executable is not None:
210 if executable is not None:
211 return executable
211 return executable
212 return None
212 return None
213
213
214 def set_signal_handler():
214 def set_signal_handler():
215 pass
215 pass
216
216
217 def statfiles(files):
217 def statfiles(files):
218 'Stat each file in files and yield stat or None if file does not exist.'
218 'Stat each file in files and yield stat or None if file does not exist.'
219 lstat = os.lstat
219 lstat = os.lstat
220 for nf in files:
220 for nf in files:
221 try:
221 try:
222 st = lstat(nf)
222 st = lstat(nf)
223 except OSError, err:
223 except OSError, err:
224 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
224 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
225 raise
225 raise
226 st = None
226 st = None
227 yield st
227 yield st
228
228
229 def getuser():
229 def getuser():
230 '''return name of current user'''
230 '''return name of current user'''
231 return getpass.getuser()
231 return getpass.getuser()
232
232
233 def expand_glob(pats):
233 def expand_glob(pats):
234 '''On Windows, expand the implicit globs in a list of patterns'''
234 '''On Windows, expand the implicit globs in a list of patterns'''
235 return list(pats)
235 return list(pats)
236
236
237 def username(uid=None):
237 def username(uid=None):
238 """Return the name of the user with the given uid.
238 """Return the name of the user with the given uid.
239
239
240 If uid is None, return the name of the current user."""
240 If uid is None, return the name of the current user."""
241
241
242 if uid is None:
242 if uid is None:
243 uid = os.getuid()
243 uid = os.getuid()
244 try:
244 try:
245 return pwd.getpwuid(uid)[0]
245 return pwd.getpwuid(uid)[0]
246 except KeyError:
246 except KeyError:
247 return str(uid)
247 return str(uid)
248
248
249 def groupname(gid=None):
249 def groupname(gid=None):
250 """Return the name of the group with the given gid.
250 """Return the name of the group with the given gid.
251
251
252 If gid is None, return the name of the current group."""
252 If gid is None, return the name of the current group."""
253
253
254 if gid is None:
254 if gid is None:
255 gid = os.getgid()
255 gid = os.getgid()
256 try:
256 try:
257 return grp.getgrgid(gid)[0]
257 return grp.getgrgid(gid)[0]
258 except KeyError:
258 except KeyError:
259 return str(gid)
259 return str(gid)
260
260
261 def spawndetached(args):
261 def spawndetached(args):
262 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
262 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
263 args[0], args)
263 args[0], args)
264
264
265 def gethgcmd():
266 return sys.argv[:1]
@@ -1,1276 +1,1287 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2, incorporated herein by reference.
8 # GNU General Public License version 2, incorporated herein by reference.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
19 import os, stat, time, calendar, textwrap
19 import os, stat, time, calendar, textwrap
20 import imp
20 import imp
21
21
22 # Python compatibility
22 # Python compatibility
23
23
24 def sha1(s):
24 def sha1(s):
25 return _fastsha1(s)
25 return _fastsha1(s)
26
26
27 def _fastsha1(s):
27 def _fastsha1(s):
28 # This function will import sha1 from hashlib or sha (whichever is
28 # This function will import sha1 from hashlib or sha (whichever is
29 # available) and overwrite itself with it on the first call.
29 # available) and overwrite itself with it on the first call.
30 # Subsequent calls will go directly to the imported function.
30 # Subsequent calls will go directly to the imported function.
31 try:
31 try:
32 from hashlib import sha1 as _sha1
32 from hashlib import sha1 as _sha1
33 except ImportError:
33 except ImportError:
34 from sha import sha as _sha1
34 from sha import sha as _sha1
35 global _fastsha1, sha1
35 global _fastsha1, sha1
36 _fastsha1 = sha1 = _sha1
36 _fastsha1 = sha1 = _sha1
37 return _sha1(s)
37 return _sha1(s)
38
38
39 import subprocess
39 import subprocess
40 closefds = os.name == 'posix'
40 closefds = os.name == 'posix'
41
41
42 def popen2(cmd, env=None, newlines=False):
42 def popen2(cmd, env=None, newlines=False):
43 # Setting bufsize to -1 lets the system decide the buffer size.
43 # Setting bufsize to -1 lets the system decide the buffer size.
44 # The default for bufsize is 0, meaning unbuffered. This leads to
44 # The default for bufsize is 0, meaning unbuffered. This leads to
45 # poor performance on Mac OS X: http://bugs.python.org/issue4194
45 # poor performance on Mac OS X: http://bugs.python.org/issue4194
46 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
46 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
47 close_fds=closefds,
47 close_fds=closefds,
48 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
48 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
49 universal_newlines=newlines,
49 universal_newlines=newlines,
50 env=env)
50 env=env)
51 return p.stdin, p.stdout
51 return p.stdin, p.stdout
52
52
53 def popen3(cmd, env=None, newlines=False):
53 def popen3(cmd, env=None, newlines=False):
54 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
54 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
55 close_fds=closefds,
55 close_fds=closefds,
56 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
56 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
57 stderr=subprocess.PIPE,
57 stderr=subprocess.PIPE,
58 universal_newlines=newlines,
58 universal_newlines=newlines,
59 env=env)
59 env=env)
60 return p.stdin, p.stdout, p.stderr
60 return p.stdin, p.stdout, p.stderr
61
61
62 def version():
62 def version():
63 """Return version information if available."""
63 """Return version information if available."""
64 try:
64 try:
65 import __version__
65 import __version__
66 return __version__.version
66 return __version__.version
67 except ImportError:
67 except ImportError:
68 return 'unknown'
68 return 'unknown'
69
69
70 # used by parsedate
70 # used by parsedate
71 defaultdateformats = (
71 defaultdateformats = (
72 '%Y-%m-%d %H:%M:%S',
72 '%Y-%m-%d %H:%M:%S',
73 '%Y-%m-%d %I:%M:%S%p',
73 '%Y-%m-%d %I:%M:%S%p',
74 '%Y-%m-%d %H:%M',
74 '%Y-%m-%d %H:%M',
75 '%Y-%m-%d %I:%M%p',
75 '%Y-%m-%d %I:%M%p',
76 '%Y-%m-%d',
76 '%Y-%m-%d',
77 '%m-%d',
77 '%m-%d',
78 '%m/%d',
78 '%m/%d',
79 '%m/%d/%y',
79 '%m/%d/%y',
80 '%m/%d/%Y',
80 '%m/%d/%Y',
81 '%a %b %d %H:%M:%S %Y',
81 '%a %b %d %H:%M:%S %Y',
82 '%a %b %d %I:%M:%S%p %Y',
82 '%a %b %d %I:%M:%S%p %Y',
83 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
83 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
84 '%b %d %H:%M:%S %Y',
84 '%b %d %H:%M:%S %Y',
85 '%b %d %I:%M:%S%p %Y',
85 '%b %d %I:%M:%S%p %Y',
86 '%b %d %H:%M:%S',
86 '%b %d %H:%M:%S',
87 '%b %d %I:%M:%S%p',
87 '%b %d %I:%M:%S%p',
88 '%b %d %H:%M',
88 '%b %d %H:%M',
89 '%b %d %I:%M%p',
89 '%b %d %I:%M%p',
90 '%b %d %Y',
90 '%b %d %Y',
91 '%b %d',
91 '%b %d',
92 '%H:%M:%S',
92 '%H:%M:%S',
93 '%I:%M:%S%p',
93 '%I:%M:%S%p',
94 '%H:%M',
94 '%H:%M',
95 '%I:%M%p',
95 '%I:%M%p',
96 )
96 )
97
97
98 extendeddateformats = defaultdateformats + (
98 extendeddateformats = defaultdateformats + (
99 "%Y",
99 "%Y",
100 "%Y-%m",
100 "%Y-%m",
101 "%b",
101 "%b",
102 "%b %Y",
102 "%b %Y",
103 )
103 )
104
104
105 def cachefunc(func):
105 def cachefunc(func):
106 '''cache the result of function calls'''
106 '''cache the result of function calls'''
107 # XXX doesn't handle keywords args
107 # XXX doesn't handle keywords args
108 cache = {}
108 cache = {}
109 if func.func_code.co_argcount == 1:
109 if func.func_code.co_argcount == 1:
110 # we gain a small amount of time because
110 # we gain a small amount of time because
111 # we don't need to pack/unpack the list
111 # we don't need to pack/unpack the list
112 def f(arg):
112 def f(arg):
113 if arg not in cache:
113 if arg not in cache:
114 cache[arg] = func(arg)
114 cache[arg] = func(arg)
115 return cache[arg]
115 return cache[arg]
116 else:
116 else:
117 def f(*args):
117 def f(*args):
118 if args not in cache:
118 if args not in cache:
119 cache[args] = func(*args)
119 cache[args] = func(*args)
120 return cache[args]
120 return cache[args]
121
121
122 return f
122 return f
123
123
124 def lrucachefunc(func):
124 def lrucachefunc(func):
125 '''cache most recent results of function calls'''
125 '''cache most recent results of function calls'''
126 cache = {}
126 cache = {}
127 order = []
127 order = []
128 if func.func_code.co_argcount == 1:
128 if func.func_code.co_argcount == 1:
129 def f(arg):
129 def f(arg):
130 if arg not in cache:
130 if arg not in cache:
131 if len(cache) > 20:
131 if len(cache) > 20:
132 del cache[order.pop(0)]
132 del cache[order.pop(0)]
133 cache[arg] = func(arg)
133 cache[arg] = func(arg)
134 else:
134 else:
135 order.remove(arg)
135 order.remove(arg)
136 order.append(arg)
136 order.append(arg)
137 return cache[arg]
137 return cache[arg]
138 else:
138 else:
139 def f(*args):
139 def f(*args):
140 if args not in cache:
140 if args not in cache:
141 if len(cache) > 20:
141 if len(cache) > 20:
142 del cache[order.pop(0)]
142 del cache[order.pop(0)]
143 cache[args] = func(*args)
143 cache[args] = func(*args)
144 else:
144 else:
145 order.remove(args)
145 order.remove(args)
146 order.append(args)
146 order.append(args)
147 return cache[args]
147 return cache[args]
148
148
149 return f
149 return f
150
150
151 class propertycache(object):
151 class propertycache(object):
152 def __init__(self, func):
152 def __init__(self, func):
153 self.func = func
153 self.func = func
154 self.name = func.__name__
154 self.name = func.__name__
155 def __get__(self, obj, type=None):
155 def __get__(self, obj, type=None):
156 result = self.func(obj)
156 result = self.func(obj)
157 setattr(obj, self.name, result)
157 setattr(obj, self.name, result)
158 return result
158 return result
159
159
160 def pipefilter(s, cmd):
160 def pipefilter(s, cmd):
161 '''filter string S through command CMD, returning its output'''
161 '''filter string S through command CMD, returning its output'''
162 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
162 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
163 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
163 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
164 pout, perr = p.communicate(s)
164 pout, perr = p.communicate(s)
165 return pout
165 return pout
166
166
167 def tempfilter(s, cmd):
167 def tempfilter(s, cmd):
168 '''filter string S through a pair of temporary files with CMD.
168 '''filter string S through a pair of temporary files with CMD.
169 CMD is used as a template to create the real command to be run,
169 CMD is used as a template to create the real command to be run,
170 with the strings INFILE and OUTFILE replaced by the real names of
170 with the strings INFILE and OUTFILE replaced by the real names of
171 the temporary files generated.'''
171 the temporary files generated.'''
172 inname, outname = None, None
172 inname, outname = None, None
173 try:
173 try:
174 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
174 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
175 fp = os.fdopen(infd, 'wb')
175 fp = os.fdopen(infd, 'wb')
176 fp.write(s)
176 fp.write(s)
177 fp.close()
177 fp.close()
178 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
178 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
179 os.close(outfd)
179 os.close(outfd)
180 cmd = cmd.replace('INFILE', inname)
180 cmd = cmd.replace('INFILE', inname)
181 cmd = cmd.replace('OUTFILE', outname)
181 cmd = cmd.replace('OUTFILE', outname)
182 code = os.system(cmd)
182 code = os.system(cmd)
183 if sys.platform == 'OpenVMS' and code & 1:
183 if sys.platform == 'OpenVMS' and code & 1:
184 code = 0
184 code = 0
185 if code: raise Abort(_("command '%s' failed: %s") %
185 if code: raise Abort(_("command '%s' failed: %s") %
186 (cmd, explain_exit(code)))
186 (cmd, explain_exit(code)))
187 return open(outname, 'rb').read()
187 return open(outname, 'rb').read()
188 finally:
188 finally:
189 try:
189 try:
190 if inname: os.unlink(inname)
190 if inname: os.unlink(inname)
191 except: pass
191 except: pass
192 try:
192 try:
193 if outname: os.unlink(outname)
193 if outname: os.unlink(outname)
194 except: pass
194 except: pass
195
195
196 filtertable = {
196 filtertable = {
197 'tempfile:': tempfilter,
197 'tempfile:': tempfilter,
198 'pipe:': pipefilter,
198 'pipe:': pipefilter,
199 }
199 }
200
200
201 def filter(s, cmd):
201 def filter(s, cmd):
202 "filter a string through a command that transforms its input to its output"
202 "filter a string through a command that transforms its input to its output"
203 for name, fn in filtertable.iteritems():
203 for name, fn in filtertable.iteritems():
204 if cmd.startswith(name):
204 if cmd.startswith(name):
205 return fn(s, cmd[len(name):].lstrip())
205 return fn(s, cmd[len(name):].lstrip())
206 return pipefilter(s, cmd)
206 return pipefilter(s, cmd)
207
207
208 def binary(s):
208 def binary(s):
209 """return true if a string is binary data"""
209 """return true if a string is binary data"""
210 return bool(s and '\0' in s)
210 return bool(s and '\0' in s)
211
211
212 def increasingchunks(source, min=1024, max=65536):
212 def increasingchunks(source, min=1024, max=65536):
213 '''return no less than min bytes per chunk while data remains,
213 '''return no less than min bytes per chunk while data remains,
214 doubling min after each chunk until it reaches max'''
214 doubling min after each chunk until it reaches max'''
215 def log2(x):
215 def log2(x):
216 if not x:
216 if not x:
217 return 0
217 return 0
218 i = 0
218 i = 0
219 while x:
219 while x:
220 x >>= 1
220 x >>= 1
221 i += 1
221 i += 1
222 return i - 1
222 return i - 1
223
223
224 buf = []
224 buf = []
225 blen = 0
225 blen = 0
226 for chunk in source:
226 for chunk in source:
227 buf.append(chunk)
227 buf.append(chunk)
228 blen += len(chunk)
228 blen += len(chunk)
229 if blen >= min:
229 if blen >= min:
230 if min < max:
230 if min < max:
231 min = min << 1
231 min = min << 1
232 nmin = 1 << log2(blen)
232 nmin = 1 << log2(blen)
233 if nmin > min:
233 if nmin > min:
234 min = nmin
234 min = nmin
235 if min > max:
235 if min > max:
236 min = max
236 min = max
237 yield ''.join(buf)
237 yield ''.join(buf)
238 blen = 0
238 blen = 0
239 buf = []
239 buf = []
240 if buf:
240 if buf:
241 yield ''.join(buf)
241 yield ''.join(buf)
242
242
243 Abort = error.Abort
243 Abort = error.Abort
244
244
245 def always(fn): return True
245 def always(fn): return True
246 def never(fn): return False
246 def never(fn): return False
247
247
248 def pathto(root, n1, n2):
248 def pathto(root, n1, n2):
249 '''return the relative path from one place to another.
249 '''return the relative path from one place to another.
250 root should use os.sep to separate directories
250 root should use os.sep to separate directories
251 n1 should use os.sep to separate directories
251 n1 should use os.sep to separate directories
252 n2 should use "/" to separate directories
252 n2 should use "/" to separate directories
253 returns an os.sep-separated path.
253 returns an os.sep-separated path.
254
254
255 If n1 is a relative path, it's assumed it's
255 If n1 is a relative path, it's assumed it's
256 relative to root.
256 relative to root.
257 n2 should always be relative to root.
257 n2 should always be relative to root.
258 '''
258 '''
259 if not n1: return localpath(n2)
259 if not n1: return localpath(n2)
260 if os.path.isabs(n1):
260 if os.path.isabs(n1):
261 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
261 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
262 return os.path.join(root, localpath(n2))
262 return os.path.join(root, localpath(n2))
263 n2 = '/'.join((pconvert(root), n2))
263 n2 = '/'.join((pconvert(root), n2))
264 a, b = splitpath(n1), n2.split('/')
264 a, b = splitpath(n1), n2.split('/')
265 a.reverse()
265 a.reverse()
266 b.reverse()
266 b.reverse()
267 while a and b and a[-1] == b[-1]:
267 while a and b and a[-1] == b[-1]:
268 a.pop()
268 a.pop()
269 b.pop()
269 b.pop()
270 b.reverse()
270 b.reverse()
271 return os.sep.join((['..'] * len(a)) + b) or '.'
271 return os.sep.join((['..'] * len(a)) + b) or '.'
272
272
273 def canonpath(root, cwd, myname):
273 def canonpath(root, cwd, myname):
274 """return the canonical path of myname, given cwd and root"""
274 """return the canonical path of myname, given cwd and root"""
275 if endswithsep(root):
275 if endswithsep(root):
276 rootsep = root
276 rootsep = root
277 else:
277 else:
278 rootsep = root + os.sep
278 rootsep = root + os.sep
279 name = myname
279 name = myname
280 if not os.path.isabs(name):
280 if not os.path.isabs(name):
281 name = os.path.join(root, cwd, name)
281 name = os.path.join(root, cwd, name)
282 name = os.path.normpath(name)
282 name = os.path.normpath(name)
283 audit_path = path_auditor(root)
283 audit_path = path_auditor(root)
284 if name != rootsep and name.startswith(rootsep):
284 if name != rootsep and name.startswith(rootsep):
285 name = name[len(rootsep):]
285 name = name[len(rootsep):]
286 audit_path(name)
286 audit_path(name)
287 return pconvert(name)
287 return pconvert(name)
288 elif name == root:
288 elif name == root:
289 return ''
289 return ''
290 else:
290 else:
291 # Determine whether `name' is in the hierarchy at or beneath `root',
291 # Determine whether `name' is in the hierarchy at or beneath `root',
292 # by iterating name=dirname(name) until that causes no change (can't
292 # by iterating name=dirname(name) until that causes no change (can't
293 # check name == '/', because that doesn't work on windows). For each
293 # check name == '/', because that doesn't work on windows). For each
294 # `name', compare dev/inode numbers. If they match, the list `rel'
294 # `name', compare dev/inode numbers. If they match, the list `rel'
295 # holds the reversed list of components making up the relative file
295 # holds the reversed list of components making up the relative file
296 # name we want.
296 # name we want.
297 root_st = os.stat(root)
297 root_st = os.stat(root)
298 rel = []
298 rel = []
299 while True:
299 while True:
300 try:
300 try:
301 name_st = os.stat(name)
301 name_st = os.stat(name)
302 except OSError:
302 except OSError:
303 break
303 break
304 if samestat(name_st, root_st):
304 if samestat(name_st, root_st):
305 if not rel:
305 if not rel:
306 # name was actually the same as root (maybe a symlink)
306 # name was actually the same as root (maybe a symlink)
307 return ''
307 return ''
308 rel.reverse()
308 rel.reverse()
309 name = os.path.join(*rel)
309 name = os.path.join(*rel)
310 audit_path(name)
310 audit_path(name)
311 return pconvert(name)
311 return pconvert(name)
312 dirname, basename = os.path.split(name)
312 dirname, basename = os.path.split(name)
313 rel.append(basename)
313 rel.append(basename)
314 if dirname == name:
314 if dirname == name:
315 break
315 break
316 name = dirname
316 name = dirname
317
317
318 raise Abort('%s not under root' % myname)
318 raise Abort('%s not under root' % myname)
319
319
320 _hgexecutable = None
320 _hgexecutable = None
321
321
322 def main_is_frozen():
322 def main_is_frozen():
323 """return True if we are a frozen executable.
323 """return True if we are a frozen executable.
324
324
325 The code supports py2exe (most common, Windows only) and tools/freeze
325 The code supports py2exe (most common, Windows only) and tools/freeze
326 (portable, not much used).
326 (portable, not much used).
327 """
327 """
328 return (hasattr(sys, "frozen") or # new py2exe
328 return (hasattr(sys, "frozen") or # new py2exe
329 hasattr(sys, "importers") or # old py2exe
329 hasattr(sys, "importers") or # old py2exe
330 imp.is_frozen("__main__")) # tools/freeze
330 imp.is_frozen("__main__")) # tools/freeze
331
331
332 def hgexecutable():
332 def hgexecutable():
333 """return location of the 'hg' executable.
333 """return location of the 'hg' executable.
334
334
335 Defaults to $HG or 'hg' in the search path.
335 Defaults to $HG or 'hg' in the search path.
336 """
336 """
337 if _hgexecutable is None:
337 if _hgexecutable is None:
338 hg = os.environ.get('HG')
338 hg = os.environ.get('HG')
339 if hg:
339 if hg:
340 set_hgexecutable(hg)
340 set_hgexecutable(hg)
341 elif main_is_frozen():
341 elif main_is_frozen():
342 set_hgexecutable(sys.executable)
342 set_hgexecutable(sys.executable)
343 else:
343 else:
344 exe = find_exe('hg') or os.path.basename(sys.argv[0])
344 exe = find_exe('hg') or os.path.basename(sys.argv[0])
345 set_hgexecutable(exe)
345 set_hgexecutable(exe)
346 return _hgexecutable
346 return _hgexecutable
347
347
348 def set_hgexecutable(path):
348 def set_hgexecutable(path):
349 """set location of the 'hg' executable"""
349 """set location of the 'hg' executable"""
350 global _hgexecutable
350 global _hgexecutable
351 _hgexecutable = path
351 _hgexecutable = path
352
352
353 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
353 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
354 '''enhanced shell command execution.
354 '''enhanced shell command execution.
355 run with environment maybe modified, maybe in different dir.
355 run with environment maybe modified, maybe in different dir.
356
356
357 if command fails and onerr is None, return status. if ui object,
357 if command fails and onerr is None, return status. if ui object,
358 print error message and return status, else raise onerr object as
358 print error message and return status, else raise onerr object as
359 exception.'''
359 exception.'''
360 def py2shell(val):
360 def py2shell(val):
361 'convert python object into string that is useful to shell'
361 'convert python object into string that is useful to shell'
362 if val is None or val is False:
362 if val is None or val is False:
363 return '0'
363 return '0'
364 if val is True:
364 if val is True:
365 return '1'
365 return '1'
366 return str(val)
366 return str(val)
367 origcmd = cmd
367 origcmd = cmd
368 if os.name == 'nt':
368 if os.name == 'nt':
369 cmd = '"%s"' % cmd
369 cmd = '"%s"' % cmd
370 env = dict(os.environ)
370 env = dict(os.environ)
371 env.update((k, py2shell(v)) for k, v in environ.iteritems())
371 env.update((k, py2shell(v)) for k, v in environ.iteritems())
372 env['HG'] = hgexecutable()
372 env['HG'] = hgexecutable()
373 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
373 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
374 env=env, cwd=cwd)
374 env=env, cwd=cwd)
375 if sys.platform == 'OpenVMS' and rc & 1:
375 if sys.platform == 'OpenVMS' and rc & 1:
376 rc = 0
376 rc = 0
377 if rc and onerr:
377 if rc and onerr:
378 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
378 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
379 explain_exit(rc)[0])
379 explain_exit(rc)[0])
380 if errprefix:
380 if errprefix:
381 errmsg = '%s: %s' % (errprefix, errmsg)
381 errmsg = '%s: %s' % (errprefix, errmsg)
382 try:
382 try:
383 onerr.warn(errmsg + '\n')
383 onerr.warn(errmsg + '\n')
384 except AttributeError:
384 except AttributeError:
385 raise onerr(errmsg)
385 raise onerr(errmsg)
386 return rc
386 return rc
387
387
388 def checksignature(func):
388 def checksignature(func):
389 '''wrap a function with code to check for calling errors'''
389 '''wrap a function with code to check for calling errors'''
390 def check(*args, **kwargs):
390 def check(*args, **kwargs):
391 try:
391 try:
392 return func(*args, **kwargs)
392 return func(*args, **kwargs)
393 except TypeError:
393 except TypeError:
394 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
394 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
395 raise error.SignatureError
395 raise error.SignatureError
396 raise
396 raise
397
397
398 return check
398 return check
399
399
400 # os.path.lexists is not available on python2.3
400 # os.path.lexists is not available on python2.3
401 def lexists(filename):
401 def lexists(filename):
402 "test whether a file with this name exists. does not follow symlinks"
402 "test whether a file with this name exists. does not follow symlinks"
403 try:
403 try:
404 os.lstat(filename)
404 os.lstat(filename)
405 except:
405 except:
406 return False
406 return False
407 return True
407 return True
408
408
409 def unlink(f):
409 def unlink(f):
410 """unlink and remove the directory if it is empty"""
410 """unlink and remove the directory if it is empty"""
411 os.unlink(f)
411 os.unlink(f)
412 # try removing directories that might now be empty
412 # try removing directories that might now be empty
413 try:
413 try:
414 os.removedirs(os.path.dirname(f))
414 os.removedirs(os.path.dirname(f))
415 except OSError:
415 except OSError:
416 pass
416 pass
417
417
418 def copyfile(src, dest):
418 def copyfile(src, dest):
419 "copy a file, preserving mode and atime/mtime"
419 "copy a file, preserving mode and atime/mtime"
420 if os.path.islink(src):
420 if os.path.islink(src):
421 try:
421 try:
422 os.unlink(dest)
422 os.unlink(dest)
423 except:
423 except:
424 pass
424 pass
425 os.symlink(os.readlink(src), dest)
425 os.symlink(os.readlink(src), dest)
426 else:
426 else:
427 try:
427 try:
428 shutil.copyfile(src, dest)
428 shutil.copyfile(src, dest)
429 shutil.copystat(src, dest)
429 shutil.copystat(src, dest)
430 except shutil.Error, inst:
430 except shutil.Error, inst:
431 raise Abort(str(inst))
431 raise Abort(str(inst))
432
432
433 def copyfiles(src, dst, hardlink=None):
433 def copyfiles(src, dst, hardlink=None):
434 """Copy a directory tree using hardlinks if possible"""
434 """Copy a directory tree using hardlinks if possible"""
435
435
436 if hardlink is None:
436 if hardlink is None:
437 hardlink = (os.stat(src).st_dev ==
437 hardlink = (os.stat(src).st_dev ==
438 os.stat(os.path.dirname(dst)).st_dev)
438 os.stat(os.path.dirname(dst)).st_dev)
439
439
440 if os.path.isdir(src):
440 if os.path.isdir(src):
441 os.mkdir(dst)
441 os.mkdir(dst)
442 for name, kind in osutil.listdir(src):
442 for name, kind in osutil.listdir(src):
443 srcname = os.path.join(src, name)
443 srcname = os.path.join(src, name)
444 dstname = os.path.join(dst, name)
444 dstname = os.path.join(dst, name)
445 copyfiles(srcname, dstname, hardlink)
445 copyfiles(srcname, dstname, hardlink)
446 else:
446 else:
447 if hardlink:
447 if hardlink:
448 try:
448 try:
449 os_link(src, dst)
449 os_link(src, dst)
450 except (IOError, OSError):
450 except (IOError, OSError):
451 hardlink = False
451 hardlink = False
452 shutil.copy(src, dst)
452 shutil.copy(src, dst)
453 else:
453 else:
454 shutil.copy(src, dst)
454 shutil.copy(src, dst)
455
455
456 class path_auditor(object):
456 class path_auditor(object):
457 '''ensure that a filesystem path contains no banned components.
457 '''ensure that a filesystem path contains no banned components.
458 the following properties of a path are checked:
458 the following properties of a path are checked:
459
459
460 - under top-level .hg
460 - under top-level .hg
461 - starts at the root of a windows drive
461 - starts at the root of a windows drive
462 - contains ".."
462 - contains ".."
463 - traverses a symlink (e.g. a/symlink_here/b)
463 - traverses a symlink (e.g. a/symlink_here/b)
464 - inside a nested repository'''
464 - inside a nested repository'''
465
465
466 def __init__(self, root):
466 def __init__(self, root):
467 self.audited = set()
467 self.audited = set()
468 self.auditeddir = set()
468 self.auditeddir = set()
469 self.root = root
469 self.root = root
470
470
471 def __call__(self, path):
471 def __call__(self, path):
472 if path in self.audited:
472 if path in self.audited:
473 return
473 return
474 normpath = os.path.normcase(path)
474 normpath = os.path.normcase(path)
475 parts = splitpath(normpath)
475 parts = splitpath(normpath)
476 if (os.path.splitdrive(path)[0]
476 if (os.path.splitdrive(path)[0]
477 or parts[0].lower() in ('.hg', '.hg.', '')
477 or parts[0].lower() in ('.hg', '.hg.', '')
478 or os.pardir in parts):
478 or os.pardir in parts):
479 raise Abort(_("path contains illegal component: %s") % path)
479 raise Abort(_("path contains illegal component: %s") % path)
480 if '.hg' in path.lower():
480 if '.hg' in path.lower():
481 lparts = [p.lower() for p in parts]
481 lparts = [p.lower() for p in parts]
482 for p in '.hg', '.hg.':
482 for p in '.hg', '.hg.':
483 if p in lparts[1:]:
483 if p in lparts[1:]:
484 pos = lparts.index(p)
484 pos = lparts.index(p)
485 base = os.path.join(*parts[:pos])
485 base = os.path.join(*parts[:pos])
486 raise Abort(_('path %r is inside repo %r') % (path, base))
486 raise Abort(_('path %r is inside repo %r') % (path, base))
487 def check(prefix):
487 def check(prefix):
488 curpath = os.path.join(self.root, prefix)
488 curpath = os.path.join(self.root, prefix)
489 try:
489 try:
490 st = os.lstat(curpath)
490 st = os.lstat(curpath)
491 except OSError, err:
491 except OSError, err:
492 # EINVAL can be raised as invalid path syntax under win32.
492 # EINVAL can be raised as invalid path syntax under win32.
493 # They must be ignored for patterns can be checked too.
493 # They must be ignored for patterns can be checked too.
494 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
494 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
495 raise
495 raise
496 else:
496 else:
497 if stat.S_ISLNK(st.st_mode):
497 if stat.S_ISLNK(st.st_mode):
498 raise Abort(_('path %r traverses symbolic link %r') %
498 raise Abort(_('path %r traverses symbolic link %r') %
499 (path, prefix))
499 (path, prefix))
500 elif (stat.S_ISDIR(st.st_mode) and
500 elif (stat.S_ISDIR(st.st_mode) and
501 os.path.isdir(os.path.join(curpath, '.hg'))):
501 os.path.isdir(os.path.join(curpath, '.hg'))):
502 raise Abort(_('path %r is inside repo %r') %
502 raise Abort(_('path %r is inside repo %r') %
503 (path, prefix))
503 (path, prefix))
504 parts.pop()
504 parts.pop()
505 prefixes = []
505 prefixes = []
506 while parts:
506 while parts:
507 prefix = os.sep.join(parts)
507 prefix = os.sep.join(parts)
508 if prefix in self.auditeddir:
508 if prefix in self.auditeddir:
509 break
509 break
510 check(prefix)
510 check(prefix)
511 prefixes.append(prefix)
511 prefixes.append(prefix)
512 parts.pop()
512 parts.pop()
513
513
514 self.audited.add(path)
514 self.audited.add(path)
515 # only add prefixes to the cache after checking everything: we don't
515 # only add prefixes to the cache after checking everything: we don't
516 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
516 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
517 self.auditeddir.update(prefixes)
517 self.auditeddir.update(prefixes)
518
518
519 def nlinks(pathname):
519 def nlinks(pathname):
520 """Return number of hardlinks for the given file."""
520 """Return number of hardlinks for the given file."""
521 return os.lstat(pathname).st_nlink
521 return os.lstat(pathname).st_nlink
522
522
523 if hasattr(os, 'link'):
523 if hasattr(os, 'link'):
524 os_link = os.link
524 os_link = os.link
525 else:
525 else:
526 def os_link(src, dst):
526 def os_link(src, dst):
527 raise OSError(0, _("Hardlinks not supported"))
527 raise OSError(0, _("Hardlinks not supported"))
528
528
529 def lookup_reg(key, name=None, scope=None):
529 def lookup_reg(key, name=None, scope=None):
530 return None
530 return None
531
531
532 if os.name == 'nt':
532 if os.name == 'nt':
533 from windows import *
533 from windows import *
534 else:
534 else:
535 from posix import *
535 from posix import *
536
536
537 def makelock(info, pathname):
537 def makelock(info, pathname):
538 try:
538 try:
539 return os.symlink(info, pathname)
539 return os.symlink(info, pathname)
540 except OSError, why:
540 except OSError, why:
541 if why.errno == errno.EEXIST:
541 if why.errno == errno.EEXIST:
542 raise
542 raise
543 except AttributeError: # no symlink in os
543 except AttributeError: # no symlink in os
544 pass
544 pass
545
545
546 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
546 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
547 os.write(ld, info)
547 os.write(ld, info)
548 os.close(ld)
548 os.close(ld)
549
549
550 def readlock(pathname):
550 def readlock(pathname):
551 try:
551 try:
552 return os.readlink(pathname)
552 return os.readlink(pathname)
553 except OSError, why:
553 except OSError, why:
554 if why.errno not in (errno.EINVAL, errno.ENOSYS):
554 if why.errno not in (errno.EINVAL, errno.ENOSYS):
555 raise
555 raise
556 except AttributeError: # no symlink in os
556 except AttributeError: # no symlink in os
557 pass
557 pass
558 return posixfile(pathname).read()
558 return posixfile(pathname).read()
559
559
560 def fstat(fp):
560 def fstat(fp):
561 '''stat file object that may not have fileno method.'''
561 '''stat file object that may not have fileno method.'''
562 try:
562 try:
563 return os.fstat(fp.fileno())
563 return os.fstat(fp.fileno())
564 except AttributeError:
564 except AttributeError:
565 return os.stat(fp.name)
565 return os.stat(fp.name)
566
566
567 # File system features
567 # File system features
568
568
569 def checkcase(path):
569 def checkcase(path):
570 """
570 """
571 Check whether the given path is on a case-sensitive filesystem
571 Check whether the given path is on a case-sensitive filesystem
572
572
573 Requires a path (like /foo/.hg) ending with a foldable final
573 Requires a path (like /foo/.hg) ending with a foldable final
574 directory component.
574 directory component.
575 """
575 """
576 s1 = os.stat(path)
576 s1 = os.stat(path)
577 d, b = os.path.split(path)
577 d, b = os.path.split(path)
578 p2 = os.path.join(d, b.upper())
578 p2 = os.path.join(d, b.upper())
579 if path == p2:
579 if path == p2:
580 p2 = os.path.join(d, b.lower())
580 p2 = os.path.join(d, b.lower())
581 try:
581 try:
582 s2 = os.stat(p2)
582 s2 = os.stat(p2)
583 if s2 == s1:
583 if s2 == s1:
584 return False
584 return False
585 return True
585 return True
586 except:
586 except:
587 return True
587 return True
588
588
589 _fspathcache = {}
589 _fspathcache = {}
590 def fspath(name, root):
590 def fspath(name, root):
591 '''Get name in the case stored in the filesystem
591 '''Get name in the case stored in the filesystem
592
592
593 The name is either relative to root, or it is an absolute path starting
593 The name is either relative to root, or it is an absolute path starting
594 with root. Note that this function is unnecessary, and should not be
594 with root. Note that this function is unnecessary, and should not be
595 called, for case-sensitive filesystems (simply because it's expensive).
595 called, for case-sensitive filesystems (simply because it's expensive).
596 '''
596 '''
597 # If name is absolute, make it relative
597 # If name is absolute, make it relative
598 if name.lower().startswith(root.lower()):
598 if name.lower().startswith(root.lower()):
599 l = len(root)
599 l = len(root)
600 if name[l] == os.sep or name[l] == os.altsep:
600 if name[l] == os.sep or name[l] == os.altsep:
601 l = l + 1
601 l = l + 1
602 name = name[l:]
602 name = name[l:]
603
603
604 if not os.path.exists(os.path.join(root, name)):
604 if not os.path.exists(os.path.join(root, name)):
605 return None
605 return None
606
606
607 seps = os.sep
607 seps = os.sep
608 if os.altsep:
608 if os.altsep:
609 seps = seps + os.altsep
609 seps = seps + os.altsep
610 # Protect backslashes. This gets silly very quickly.
610 # Protect backslashes. This gets silly very quickly.
611 seps.replace('\\','\\\\')
611 seps.replace('\\','\\\\')
612 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
612 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
613 dir = os.path.normcase(os.path.normpath(root))
613 dir = os.path.normcase(os.path.normpath(root))
614 result = []
614 result = []
615 for part, sep in pattern.findall(name):
615 for part, sep in pattern.findall(name):
616 if sep:
616 if sep:
617 result.append(sep)
617 result.append(sep)
618 continue
618 continue
619
619
620 if dir not in _fspathcache:
620 if dir not in _fspathcache:
621 _fspathcache[dir] = os.listdir(dir)
621 _fspathcache[dir] = os.listdir(dir)
622 contents = _fspathcache[dir]
622 contents = _fspathcache[dir]
623
623
624 lpart = part.lower()
624 lpart = part.lower()
625 lenp = len(part)
625 lenp = len(part)
626 for n in contents:
626 for n in contents:
627 if lenp == len(n) and n.lower() == lpart:
627 if lenp == len(n) and n.lower() == lpart:
628 result.append(n)
628 result.append(n)
629 break
629 break
630 else:
630 else:
631 # Cannot happen, as the file exists!
631 # Cannot happen, as the file exists!
632 result.append(part)
632 result.append(part)
633 dir = os.path.join(dir, lpart)
633 dir = os.path.join(dir, lpart)
634
634
635 return ''.join(result)
635 return ''.join(result)
636
636
637 def checkexec(path):
637 def checkexec(path):
638 """
638 """
639 Check whether the given path is on a filesystem with UNIX-like exec flags
639 Check whether the given path is on a filesystem with UNIX-like exec flags
640
640
641 Requires a directory (like /foo/.hg)
641 Requires a directory (like /foo/.hg)
642 """
642 """
643
643
644 # VFAT on some Linux versions can flip mode but it doesn't persist
644 # VFAT on some Linux versions can flip mode but it doesn't persist
645 # a FS remount. Frequently we can detect it if files are created
645 # a FS remount. Frequently we can detect it if files are created
646 # with exec bit on.
646 # with exec bit on.
647
647
648 try:
648 try:
649 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
649 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
650 fh, fn = tempfile.mkstemp("", "", path)
650 fh, fn = tempfile.mkstemp("", "", path)
651 try:
651 try:
652 os.close(fh)
652 os.close(fh)
653 m = os.stat(fn).st_mode & 0777
653 m = os.stat(fn).st_mode & 0777
654 new_file_has_exec = m & EXECFLAGS
654 new_file_has_exec = m & EXECFLAGS
655 os.chmod(fn, m ^ EXECFLAGS)
655 os.chmod(fn, m ^ EXECFLAGS)
656 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
656 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
657 finally:
657 finally:
658 os.unlink(fn)
658 os.unlink(fn)
659 except (IOError, OSError):
659 except (IOError, OSError):
660 # we don't care, the user probably won't be able to commit anyway
660 # we don't care, the user probably won't be able to commit anyway
661 return False
661 return False
662 return not (new_file_has_exec or exec_flags_cannot_flip)
662 return not (new_file_has_exec or exec_flags_cannot_flip)
663
663
664 def checklink(path):
664 def checklink(path):
665 """check whether the given path is on a symlink-capable filesystem"""
665 """check whether the given path is on a symlink-capable filesystem"""
666 # mktemp is not racy because symlink creation will fail if the
666 # mktemp is not racy because symlink creation will fail if the
667 # file already exists
667 # file already exists
668 name = tempfile.mktemp(dir=path)
668 name = tempfile.mktemp(dir=path)
669 try:
669 try:
670 os.symlink(".", name)
670 os.symlink(".", name)
671 os.unlink(name)
671 os.unlink(name)
672 return True
672 return True
673 except (OSError, AttributeError):
673 except (OSError, AttributeError):
674 return False
674 return False
675
675
676 def needbinarypatch():
676 def needbinarypatch():
677 """return True if patches should be applied in binary mode by default."""
677 """return True if patches should be applied in binary mode by default."""
678 return os.name == 'nt'
678 return os.name == 'nt'
679
679
680 def endswithsep(path):
680 def endswithsep(path):
681 '''Check path ends with os.sep or os.altsep.'''
681 '''Check path ends with os.sep or os.altsep.'''
682 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
682 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
683
683
684 def splitpath(path):
684 def splitpath(path):
685 '''Split path by os.sep.
685 '''Split path by os.sep.
686 Note that this function does not use os.altsep because this is
686 Note that this function does not use os.altsep because this is
687 an alternative of simple "xxx.split(os.sep)".
687 an alternative of simple "xxx.split(os.sep)".
688 It is recommended to use os.path.normpath() before using this
688 It is recommended to use os.path.normpath() before using this
689 function if need.'''
689 function if need.'''
690 return path.split(os.sep)
690 return path.split(os.sep)
691
691
692 def gui():
692 def gui():
693 '''Are we running in a GUI?'''
693 '''Are we running in a GUI?'''
694 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
694 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
695
695
696 def mktempcopy(name, emptyok=False, createmode=None):
696 def mktempcopy(name, emptyok=False, createmode=None):
697 """Create a temporary file with the same contents from name
697 """Create a temporary file with the same contents from name
698
698
699 The permission bits are copied from the original file.
699 The permission bits are copied from the original file.
700
700
701 If the temporary file is going to be truncated immediately, you
701 If the temporary file is going to be truncated immediately, you
702 can use emptyok=True as an optimization.
702 can use emptyok=True as an optimization.
703
703
704 Returns the name of the temporary file.
704 Returns the name of the temporary file.
705 """
705 """
706 d, fn = os.path.split(name)
706 d, fn = os.path.split(name)
707 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
707 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
708 os.close(fd)
708 os.close(fd)
709 # Temporary files are created with mode 0600, which is usually not
709 # Temporary files are created with mode 0600, which is usually not
710 # what we want. If the original file already exists, just copy
710 # what we want. If the original file already exists, just copy
711 # its mode. Otherwise, manually obey umask.
711 # its mode. Otherwise, manually obey umask.
712 try:
712 try:
713 st_mode = os.lstat(name).st_mode & 0777
713 st_mode = os.lstat(name).st_mode & 0777
714 except OSError, inst:
714 except OSError, inst:
715 if inst.errno != errno.ENOENT:
715 if inst.errno != errno.ENOENT:
716 raise
716 raise
717 st_mode = createmode
717 st_mode = createmode
718 if st_mode is None:
718 if st_mode is None:
719 st_mode = ~umask
719 st_mode = ~umask
720 st_mode &= 0666
720 st_mode &= 0666
721 os.chmod(temp, st_mode)
721 os.chmod(temp, st_mode)
722 if emptyok:
722 if emptyok:
723 return temp
723 return temp
724 try:
724 try:
725 try:
725 try:
726 ifp = posixfile(name, "rb")
726 ifp = posixfile(name, "rb")
727 except IOError, inst:
727 except IOError, inst:
728 if inst.errno == errno.ENOENT:
728 if inst.errno == errno.ENOENT:
729 return temp
729 return temp
730 if not getattr(inst, 'filename', None):
730 if not getattr(inst, 'filename', None):
731 inst.filename = name
731 inst.filename = name
732 raise
732 raise
733 ofp = posixfile(temp, "wb")
733 ofp = posixfile(temp, "wb")
734 for chunk in filechunkiter(ifp):
734 for chunk in filechunkiter(ifp):
735 ofp.write(chunk)
735 ofp.write(chunk)
736 ifp.close()
736 ifp.close()
737 ofp.close()
737 ofp.close()
738 except:
738 except:
739 try: os.unlink(temp)
739 try: os.unlink(temp)
740 except: pass
740 except: pass
741 raise
741 raise
742 return temp
742 return temp
743
743
744 class atomictempfile(object):
744 class atomictempfile(object):
745 """file-like object that atomically updates a file
745 """file-like object that atomically updates a file
746
746
747 All writes will be redirected to a temporary copy of the original
747 All writes will be redirected to a temporary copy of the original
748 file. When rename is called, the copy is renamed to the original
748 file. When rename is called, the copy is renamed to the original
749 name, making the changes visible.
749 name, making the changes visible.
750 """
750 """
751 def __init__(self, name, mode, createmode):
751 def __init__(self, name, mode, createmode):
752 self.__name = name
752 self.__name = name
753 self._fp = None
753 self._fp = None
754 self.temp = mktempcopy(name, emptyok=('w' in mode),
754 self.temp = mktempcopy(name, emptyok=('w' in mode),
755 createmode=createmode)
755 createmode=createmode)
756 self._fp = posixfile(self.temp, mode)
756 self._fp = posixfile(self.temp, mode)
757
757
758 def __getattr__(self, name):
758 def __getattr__(self, name):
759 return getattr(self._fp, name)
759 return getattr(self._fp, name)
760
760
761 def rename(self):
761 def rename(self):
762 if not self._fp.closed:
762 if not self._fp.closed:
763 self._fp.close()
763 self._fp.close()
764 rename(self.temp, localpath(self.__name))
764 rename(self.temp, localpath(self.__name))
765
765
766 def __del__(self):
766 def __del__(self):
767 if not self._fp:
767 if not self._fp:
768 return
768 return
769 if not self._fp.closed:
769 if not self._fp.closed:
770 try:
770 try:
771 os.unlink(self.temp)
771 os.unlink(self.temp)
772 except: pass
772 except: pass
773 self._fp.close()
773 self._fp.close()
774
774
775 def makedirs(name, mode=None):
775 def makedirs(name, mode=None):
776 """recursive directory creation with parent mode inheritance"""
776 """recursive directory creation with parent mode inheritance"""
777 try:
777 try:
778 os.mkdir(name)
778 os.mkdir(name)
779 if mode is not None:
779 if mode is not None:
780 os.chmod(name, mode)
780 os.chmod(name, mode)
781 return
781 return
782 except OSError, err:
782 except OSError, err:
783 if err.errno == errno.EEXIST:
783 if err.errno == errno.EEXIST:
784 return
784 return
785 if err.errno != errno.ENOENT:
785 if err.errno != errno.ENOENT:
786 raise
786 raise
787 parent = os.path.abspath(os.path.dirname(name))
787 parent = os.path.abspath(os.path.dirname(name))
788 makedirs(parent, mode)
788 makedirs(parent, mode)
789 makedirs(name, mode)
789 makedirs(name, mode)
790
790
791 class opener(object):
791 class opener(object):
792 """Open files relative to a base directory
792 """Open files relative to a base directory
793
793
794 This class is used to hide the details of COW semantics and
794 This class is used to hide the details of COW semantics and
795 remote file access from higher level code.
795 remote file access from higher level code.
796 """
796 """
797 def __init__(self, base, audit=True):
797 def __init__(self, base, audit=True):
798 self.base = base
798 self.base = base
799 if audit:
799 if audit:
800 self.audit_path = path_auditor(base)
800 self.audit_path = path_auditor(base)
801 else:
801 else:
802 self.audit_path = always
802 self.audit_path = always
803 self.createmode = None
803 self.createmode = None
804
804
805 @propertycache
805 @propertycache
806 def _can_symlink(self):
806 def _can_symlink(self):
807 return checklink(self.base)
807 return checklink(self.base)
808
808
809 def _fixfilemode(self, name):
809 def _fixfilemode(self, name):
810 if self.createmode is None:
810 if self.createmode is None:
811 return
811 return
812 os.chmod(name, self.createmode & 0666)
812 os.chmod(name, self.createmode & 0666)
813
813
814 def __call__(self, path, mode="r", text=False, atomictemp=False):
814 def __call__(self, path, mode="r", text=False, atomictemp=False):
815 self.audit_path(path)
815 self.audit_path(path)
816 f = os.path.join(self.base, path)
816 f = os.path.join(self.base, path)
817
817
818 if not text and "b" not in mode:
818 if not text and "b" not in mode:
819 mode += "b" # for that other OS
819 mode += "b" # for that other OS
820
820
821 nlink = -1
821 nlink = -1
822 if mode not in ("r", "rb"):
822 if mode not in ("r", "rb"):
823 try:
823 try:
824 nlink = nlinks(f)
824 nlink = nlinks(f)
825 except OSError:
825 except OSError:
826 nlink = 0
826 nlink = 0
827 d = os.path.dirname(f)
827 d = os.path.dirname(f)
828 if not os.path.isdir(d):
828 if not os.path.isdir(d):
829 makedirs(d, self.createmode)
829 makedirs(d, self.createmode)
830 if atomictemp:
830 if atomictemp:
831 return atomictempfile(f, mode, self.createmode)
831 return atomictempfile(f, mode, self.createmode)
832 if nlink > 1:
832 if nlink > 1:
833 rename(mktempcopy(f), f)
833 rename(mktempcopy(f), f)
834 fp = posixfile(f, mode)
834 fp = posixfile(f, mode)
835 if nlink == 0:
835 if nlink == 0:
836 self._fixfilemode(f)
836 self._fixfilemode(f)
837 return fp
837 return fp
838
838
839 def symlink(self, src, dst):
839 def symlink(self, src, dst):
840 self.audit_path(dst)
840 self.audit_path(dst)
841 linkname = os.path.join(self.base, dst)
841 linkname = os.path.join(self.base, dst)
842 try:
842 try:
843 os.unlink(linkname)
843 os.unlink(linkname)
844 except OSError:
844 except OSError:
845 pass
845 pass
846
846
847 dirname = os.path.dirname(linkname)
847 dirname = os.path.dirname(linkname)
848 if not os.path.exists(dirname):
848 if not os.path.exists(dirname):
849 makedirs(dirname, self.createmode)
849 makedirs(dirname, self.createmode)
850
850
851 if self._can_symlink:
851 if self._can_symlink:
852 try:
852 try:
853 os.symlink(src, linkname)
853 os.symlink(src, linkname)
854 except OSError, err:
854 except OSError, err:
855 raise OSError(err.errno, _('could not symlink to %r: %s') %
855 raise OSError(err.errno, _('could not symlink to %r: %s') %
856 (src, err.strerror), linkname)
856 (src, err.strerror), linkname)
857 else:
857 else:
858 f = self(dst, "w")
858 f = self(dst, "w")
859 f.write(src)
859 f.write(src)
860 f.close()
860 f.close()
861 self._fixfilemode(dst)
861 self._fixfilemode(dst)
862
862
863 class chunkbuffer(object):
863 class chunkbuffer(object):
864 """Allow arbitrary sized chunks of data to be efficiently read from an
864 """Allow arbitrary sized chunks of data to be efficiently read from an
865 iterator over chunks of arbitrary size."""
865 iterator over chunks of arbitrary size."""
866
866
867 def __init__(self, in_iter):
867 def __init__(self, in_iter):
868 """in_iter is the iterator that's iterating over the input chunks.
868 """in_iter is the iterator that's iterating over the input chunks.
869 targetsize is how big a buffer to try to maintain."""
869 targetsize is how big a buffer to try to maintain."""
870 self.iter = iter(in_iter)
870 self.iter = iter(in_iter)
871 self.buf = ''
871 self.buf = ''
872 self.targetsize = 2**16
872 self.targetsize = 2**16
873
873
874 def read(self, l):
874 def read(self, l):
875 """Read L bytes of data from the iterator of chunks of data.
875 """Read L bytes of data from the iterator of chunks of data.
876 Returns less than L bytes if the iterator runs dry."""
876 Returns less than L bytes if the iterator runs dry."""
877 if l > len(self.buf) and self.iter:
877 if l > len(self.buf) and self.iter:
878 # Clamp to a multiple of self.targetsize
878 # Clamp to a multiple of self.targetsize
879 targetsize = max(l, self.targetsize)
879 targetsize = max(l, self.targetsize)
880 collector = cStringIO.StringIO()
880 collector = cStringIO.StringIO()
881 collector.write(self.buf)
881 collector.write(self.buf)
882 collected = len(self.buf)
882 collected = len(self.buf)
883 for chunk in self.iter:
883 for chunk in self.iter:
884 collector.write(chunk)
884 collector.write(chunk)
885 collected += len(chunk)
885 collected += len(chunk)
886 if collected >= targetsize:
886 if collected >= targetsize:
887 break
887 break
888 if collected < targetsize:
888 if collected < targetsize:
889 self.iter = False
889 self.iter = False
890 self.buf = collector.getvalue()
890 self.buf = collector.getvalue()
891 if len(self.buf) == l:
891 if len(self.buf) == l:
892 s, self.buf = str(self.buf), ''
892 s, self.buf = str(self.buf), ''
893 else:
893 else:
894 s, self.buf = self.buf[:l], buffer(self.buf, l)
894 s, self.buf = self.buf[:l], buffer(self.buf, l)
895 return s
895 return s
896
896
897 def filechunkiter(f, size=65536, limit=None):
897 def filechunkiter(f, size=65536, limit=None):
898 """Create a generator that produces the data in the file size
898 """Create a generator that produces the data in the file size
899 (default 65536) bytes at a time, up to optional limit (default is
899 (default 65536) bytes at a time, up to optional limit (default is
900 to read all data). Chunks may be less than size bytes if the
900 to read all data). Chunks may be less than size bytes if the
901 chunk is the last chunk in the file, or the file is a socket or
901 chunk is the last chunk in the file, or the file is a socket or
902 some other type of file that sometimes reads less data than is
902 some other type of file that sometimes reads less data than is
903 requested."""
903 requested."""
904 assert size >= 0
904 assert size >= 0
905 assert limit is None or limit >= 0
905 assert limit is None or limit >= 0
906 while True:
906 while True:
907 if limit is None: nbytes = size
907 if limit is None: nbytes = size
908 else: nbytes = min(limit, size)
908 else: nbytes = min(limit, size)
909 s = nbytes and f.read(nbytes)
909 s = nbytes and f.read(nbytes)
910 if not s: break
910 if not s: break
911 if limit: limit -= len(s)
911 if limit: limit -= len(s)
912 yield s
912 yield s
913
913
914 def makedate():
914 def makedate():
915 lt = time.localtime()
915 lt = time.localtime()
916 if lt[8] == 1 and time.daylight:
916 if lt[8] == 1 and time.daylight:
917 tz = time.altzone
917 tz = time.altzone
918 else:
918 else:
919 tz = time.timezone
919 tz = time.timezone
920 return time.mktime(lt), tz
920 return time.mktime(lt), tz
921
921
922 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
922 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
923 """represent a (unixtime, offset) tuple as a localized time.
923 """represent a (unixtime, offset) tuple as a localized time.
924 unixtime is seconds since the epoch, and offset is the time zone's
924 unixtime is seconds since the epoch, and offset is the time zone's
925 number of seconds away from UTC. if timezone is false, do not
925 number of seconds away from UTC. if timezone is false, do not
926 append time zone to string."""
926 append time zone to string."""
927 t, tz = date or makedate()
927 t, tz = date or makedate()
928 if "%1" in format or "%2" in format:
928 if "%1" in format or "%2" in format:
929 sign = (tz > 0) and "-" or "+"
929 sign = (tz > 0) and "-" or "+"
930 minutes = abs(tz) // 60
930 minutes = abs(tz) // 60
931 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
931 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
932 format = format.replace("%2", "%02d" % (minutes % 60))
932 format = format.replace("%2", "%02d" % (minutes % 60))
933 s = time.strftime(format, time.gmtime(float(t) - tz))
933 s = time.strftime(format, time.gmtime(float(t) - tz))
934 return s
934 return s
935
935
936 def shortdate(date=None):
936 def shortdate(date=None):
937 """turn (timestamp, tzoff) tuple into iso 8631 date."""
937 """turn (timestamp, tzoff) tuple into iso 8631 date."""
938 return datestr(date, format='%Y-%m-%d')
938 return datestr(date, format='%Y-%m-%d')
939
939
940 def strdate(string, format, defaults=[]):
940 def strdate(string, format, defaults=[]):
941 """parse a localized time string and return a (unixtime, offset) tuple.
941 """parse a localized time string and return a (unixtime, offset) tuple.
942 if the string cannot be parsed, ValueError is raised."""
942 if the string cannot be parsed, ValueError is raised."""
943 def timezone(string):
943 def timezone(string):
944 tz = string.split()[-1]
944 tz = string.split()[-1]
945 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
945 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
946 sign = (tz[0] == "+") and 1 or -1
946 sign = (tz[0] == "+") and 1 or -1
947 hours = int(tz[1:3])
947 hours = int(tz[1:3])
948 minutes = int(tz[3:5])
948 minutes = int(tz[3:5])
949 return -sign * (hours * 60 + minutes) * 60
949 return -sign * (hours * 60 + minutes) * 60
950 if tz == "GMT" or tz == "UTC":
950 if tz == "GMT" or tz == "UTC":
951 return 0
951 return 0
952 return None
952 return None
953
953
954 # NOTE: unixtime = localunixtime + offset
954 # NOTE: unixtime = localunixtime + offset
955 offset, date = timezone(string), string
955 offset, date = timezone(string), string
956 if offset != None:
956 if offset != None:
957 date = " ".join(string.split()[:-1])
957 date = " ".join(string.split()[:-1])
958
958
959 # add missing elements from defaults
959 # add missing elements from defaults
960 for part in defaults:
960 for part in defaults:
961 found = [True for p in part if ("%"+p) in format]
961 found = [True for p in part if ("%"+p) in format]
962 if not found:
962 if not found:
963 date += "@" + defaults[part]
963 date += "@" + defaults[part]
964 format += "@%" + part[0]
964 format += "@%" + part[0]
965
965
966 timetuple = time.strptime(date, format)
966 timetuple = time.strptime(date, format)
967 localunixtime = int(calendar.timegm(timetuple))
967 localunixtime = int(calendar.timegm(timetuple))
968 if offset is None:
968 if offset is None:
969 # local timezone
969 # local timezone
970 unixtime = int(time.mktime(timetuple))
970 unixtime = int(time.mktime(timetuple))
971 offset = unixtime - localunixtime
971 offset = unixtime - localunixtime
972 else:
972 else:
973 unixtime = localunixtime + offset
973 unixtime = localunixtime + offset
974 return unixtime, offset
974 return unixtime, offset
975
975
976 def parsedate(date, formats=None, defaults=None):
976 def parsedate(date, formats=None, defaults=None):
977 """parse a localized date/time string and return a (unixtime, offset) tuple.
977 """parse a localized date/time string and return a (unixtime, offset) tuple.
978
978
979 The date may be a "unixtime offset" string or in one of the specified
979 The date may be a "unixtime offset" string or in one of the specified
980 formats. If the date already is a (unixtime, offset) tuple, it is returned.
980 formats. If the date already is a (unixtime, offset) tuple, it is returned.
981 """
981 """
982 if not date:
982 if not date:
983 return 0, 0
983 return 0, 0
984 if isinstance(date, tuple) and len(date) == 2:
984 if isinstance(date, tuple) and len(date) == 2:
985 return date
985 return date
986 if not formats:
986 if not formats:
987 formats = defaultdateformats
987 formats = defaultdateformats
988 date = date.strip()
988 date = date.strip()
989 try:
989 try:
990 when, offset = map(int, date.split(' '))
990 when, offset = map(int, date.split(' '))
991 except ValueError:
991 except ValueError:
992 # fill out defaults
992 # fill out defaults
993 if not defaults:
993 if not defaults:
994 defaults = {}
994 defaults = {}
995 now = makedate()
995 now = makedate()
996 for part in "d mb yY HI M S".split():
996 for part in "d mb yY HI M S".split():
997 if part not in defaults:
997 if part not in defaults:
998 if part[0] in "HMS":
998 if part[0] in "HMS":
999 defaults[part] = "00"
999 defaults[part] = "00"
1000 else:
1000 else:
1001 defaults[part] = datestr(now, "%" + part[0])
1001 defaults[part] = datestr(now, "%" + part[0])
1002
1002
1003 for format in formats:
1003 for format in formats:
1004 try:
1004 try:
1005 when, offset = strdate(date, format, defaults)
1005 when, offset = strdate(date, format, defaults)
1006 except (ValueError, OverflowError):
1006 except (ValueError, OverflowError):
1007 pass
1007 pass
1008 else:
1008 else:
1009 break
1009 break
1010 else:
1010 else:
1011 raise Abort(_('invalid date: %r ') % date)
1011 raise Abort(_('invalid date: %r ') % date)
1012 # validate explicit (probably user-specified) date and
1012 # validate explicit (probably user-specified) date and
1013 # time zone offset. values must fit in signed 32 bits for
1013 # time zone offset. values must fit in signed 32 bits for
1014 # current 32-bit linux runtimes. timezones go from UTC-12
1014 # current 32-bit linux runtimes. timezones go from UTC-12
1015 # to UTC+14
1015 # to UTC+14
1016 if abs(when) > 0x7fffffff:
1016 if abs(when) > 0x7fffffff:
1017 raise Abort(_('date exceeds 32 bits: %d') % when)
1017 raise Abort(_('date exceeds 32 bits: %d') % when)
1018 if offset < -50400 or offset > 43200:
1018 if offset < -50400 or offset > 43200:
1019 raise Abort(_('impossible time zone offset: %d') % offset)
1019 raise Abort(_('impossible time zone offset: %d') % offset)
1020 return when, offset
1020 return when, offset
1021
1021
1022 def matchdate(date):
1022 def matchdate(date):
1023 """Return a function that matches a given date match specifier
1023 """Return a function that matches a given date match specifier
1024
1024
1025 Formats include:
1025 Formats include:
1026
1026
1027 '{date}' match a given date to the accuracy provided
1027 '{date}' match a given date to the accuracy provided
1028
1028
1029 '<{date}' on or before a given date
1029 '<{date}' on or before a given date
1030
1030
1031 '>{date}' on or after a given date
1031 '>{date}' on or after a given date
1032
1032
1033 """
1033 """
1034
1034
1035 def lower(date):
1035 def lower(date):
1036 d = dict(mb="1", d="1")
1036 d = dict(mb="1", d="1")
1037 return parsedate(date, extendeddateformats, d)[0]
1037 return parsedate(date, extendeddateformats, d)[0]
1038
1038
1039 def upper(date):
1039 def upper(date):
1040 d = dict(mb="12", HI="23", M="59", S="59")
1040 d = dict(mb="12", HI="23", M="59", S="59")
1041 for days in "31 30 29".split():
1041 for days in "31 30 29".split():
1042 try:
1042 try:
1043 d["d"] = days
1043 d["d"] = days
1044 return parsedate(date, extendeddateformats, d)[0]
1044 return parsedate(date, extendeddateformats, d)[0]
1045 except:
1045 except:
1046 pass
1046 pass
1047 d["d"] = "28"
1047 d["d"] = "28"
1048 return parsedate(date, extendeddateformats, d)[0]
1048 return parsedate(date, extendeddateformats, d)[0]
1049
1049
1050 date = date.strip()
1050 date = date.strip()
1051 if date[0] == "<":
1051 if date[0] == "<":
1052 when = upper(date[1:])
1052 when = upper(date[1:])
1053 return lambda x: x <= when
1053 return lambda x: x <= when
1054 elif date[0] == ">":
1054 elif date[0] == ">":
1055 when = lower(date[1:])
1055 when = lower(date[1:])
1056 return lambda x: x >= when
1056 return lambda x: x >= when
1057 elif date[0] == "-":
1057 elif date[0] == "-":
1058 try:
1058 try:
1059 days = int(date[1:])
1059 days = int(date[1:])
1060 except ValueError:
1060 except ValueError:
1061 raise Abort(_("invalid day spec: %s") % date[1:])
1061 raise Abort(_("invalid day spec: %s") % date[1:])
1062 when = makedate()[0] - days * 3600 * 24
1062 when = makedate()[0] - days * 3600 * 24
1063 return lambda x: x >= when
1063 return lambda x: x >= when
1064 elif " to " in date:
1064 elif " to " in date:
1065 a, b = date.split(" to ")
1065 a, b = date.split(" to ")
1066 start, stop = lower(a), upper(b)
1066 start, stop = lower(a), upper(b)
1067 return lambda x: x >= start and x <= stop
1067 return lambda x: x >= start and x <= stop
1068 else:
1068 else:
1069 start, stop = lower(date), upper(date)
1069 start, stop = lower(date), upper(date)
1070 return lambda x: x >= start and x <= stop
1070 return lambda x: x >= start and x <= stop
1071
1071
1072 def shortuser(user):
1072 def shortuser(user):
1073 """Return a short representation of a user name or email address."""
1073 """Return a short representation of a user name or email address."""
1074 f = user.find('@')
1074 f = user.find('@')
1075 if f >= 0:
1075 if f >= 0:
1076 user = user[:f]
1076 user = user[:f]
1077 f = user.find('<')
1077 f = user.find('<')
1078 if f >= 0:
1078 if f >= 0:
1079 user = user[f+1:]
1079 user = user[f+1:]
1080 f = user.find(' ')
1080 f = user.find(' ')
1081 if f >= 0:
1081 if f >= 0:
1082 user = user[:f]
1082 user = user[:f]
1083 f = user.find('.')
1083 f = user.find('.')
1084 if f >= 0:
1084 if f >= 0:
1085 user = user[:f]
1085 user = user[:f]
1086 return user
1086 return user
1087
1087
1088 def email(author):
1088 def email(author):
1089 '''get email of author.'''
1089 '''get email of author.'''
1090 r = author.find('>')
1090 r = author.find('>')
1091 if r == -1: r = None
1091 if r == -1: r = None
1092 return author[author.find('<')+1:r]
1092 return author[author.find('<')+1:r]
1093
1093
1094 def ellipsis(text, maxlength=400):
1094 def ellipsis(text, maxlength=400):
1095 """Trim string to at most maxlength (default: 400) characters."""
1095 """Trim string to at most maxlength (default: 400) characters."""
1096 if len(text) <= maxlength:
1096 if len(text) <= maxlength:
1097 return text
1097 return text
1098 else:
1098 else:
1099 return "%s..." % (text[:maxlength-3])
1099 return "%s..." % (text[:maxlength-3])
1100
1100
1101 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1101 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1102 '''yield every hg repository under path, recursively.'''
1102 '''yield every hg repository under path, recursively.'''
1103 def errhandler(err):
1103 def errhandler(err):
1104 if err.filename == path:
1104 if err.filename == path:
1105 raise err
1105 raise err
1106 if followsym and hasattr(os.path, 'samestat'):
1106 if followsym and hasattr(os.path, 'samestat'):
1107 def _add_dir_if_not_there(dirlst, dirname):
1107 def _add_dir_if_not_there(dirlst, dirname):
1108 match = False
1108 match = False
1109 samestat = os.path.samestat
1109 samestat = os.path.samestat
1110 dirstat = os.stat(dirname)
1110 dirstat = os.stat(dirname)
1111 for lstdirstat in dirlst:
1111 for lstdirstat in dirlst:
1112 if samestat(dirstat, lstdirstat):
1112 if samestat(dirstat, lstdirstat):
1113 match = True
1113 match = True
1114 break
1114 break
1115 if not match:
1115 if not match:
1116 dirlst.append(dirstat)
1116 dirlst.append(dirstat)
1117 return not match
1117 return not match
1118 else:
1118 else:
1119 followsym = False
1119 followsym = False
1120
1120
1121 if (seen_dirs is None) and followsym:
1121 if (seen_dirs is None) and followsym:
1122 seen_dirs = []
1122 seen_dirs = []
1123 _add_dir_if_not_there(seen_dirs, path)
1123 _add_dir_if_not_there(seen_dirs, path)
1124 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1124 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1125 dirs.sort()
1125 dirs.sort()
1126 if '.hg' in dirs:
1126 if '.hg' in dirs:
1127 yield root # found a repository
1127 yield root # found a repository
1128 qroot = os.path.join(root, '.hg', 'patches')
1128 qroot = os.path.join(root, '.hg', 'patches')
1129 if os.path.isdir(os.path.join(qroot, '.hg')):
1129 if os.path.isdir(os.path.join(qroot, '.hg')):
1130 yield qroot # we have a patch queue repo here
1130 yield qroot # we have a patch queue repo here
1131 if recurse:
1131 if recurse:
1132 # avoid recursing inside the .hg directory
1132 # avoid recursing inside the .hg directory
1133 dirs.remove('.hg')
1133 dirs.remove('.hg')
1134 else:
1134 else:
1135 dirs[:] = [] # don't descend further
1135 dirs[:] = [] # don't descend further
1136 elif followsym:
1136 elif followsym:
1137 newdirs = []
1137 newdirs = []
1138 for d in dirs:
1138 for d in dirs:
1139 fname = os.path.join(root, d)
1139 fname = os.path.join(root, d)
1140 if _add_dir_if_not_there(seen_dirs, fname):
1140 if _add_dir_if_not_there(seen_dirs, fname):
1141 if os.path.islink(fname):
1141 if os.path.islink(fname):
1142 for hgname in walkrepos(fname, True, seen_dirs):
1142 for hgname in walkrepos(fname, True, seen_dirs):
1143 yield hgname
1143 yield hgname
1144 else:
1144 else:
1145 newdirs.append(d)
1145 newdirs.append(d)
1146 dirs[:] = newdirs
1146 dirs[:] = newdirs
1147
1147
1148 _rcpath = None
1148 _rcpath = None
1149
1149
1150 def os_rcpath():
1150 def os_rcpath():
1151 '''return default os-specific hgrc search path'''
1151 '''return default os-specific hgrc search path'''
1152 path = system_rcpath()
1152 path = system_rcpath()
1153 path.extend(user_rcpath())
1153 path.extend(user_rcpath())
1154 path = [os.path.normpath(f) for f in path]
1154 path = [os.path.normpath(f) for f in path]
1155 return path
1155 return path
1156
1156
1157 def rcpath():
1157 def rcpath():
1158 '''return hgrc search path. if env var HGRCPATH is set, use it.
1158 '''return hgrc search path. if env var HGRCPATH is set, use it.
1159 for each item in path, if directory, use files ending in .rc,
1159 for each item in path, if directory, use files ending in .rc,
1160 else use item.
1160 else use item.
1161 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1161 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1162 if no HGRCPATH, use default os-specific path.'''
1162 if no HGRCPATH, use default os-specific path.'''
1163 global _rcpath
1163 global _rcpath
1164 if _rcpath is None:
1164 if _rcpath is None:
1165 if 'HGRCPATH' in os.environ:
1165 if 'HGRCPATH' in os.environ:
1166 _rcpath = []
1166 _rcpath = []
1167 for p in os.environ['HGRCPATH'].split(os.pathsep):
1167 for p in os.environ['HGRCPATH'].split(os.pathsep):
1168 if not p: continue
1168 if not p: continue
1169 p = expandpath(p)
1169 p = expandpath(p)
1170 if os.path.isdir(p):
1170 if os.path.isdir(p):
1171 for f, kind in osutil.listdir(p):
1171 for f, kind in osutil.listdir(p):
1172 if f.endswith('.rc'):
1172 if f.endswith('.rc'):
1173 _rcpath.append(os.path.join(p, f))
1173 _rcpath.append(os.path.join(p, f))
1174 else:
1174 else:
1175 _rcpath.append(p)
1175 _rcpath.append(p)
1176 else:
1176 else:
1177 _rcpath = os_rcpath()
1177 _rcpath = os_rcpath()
1178 return _rcpath
1178 return _rcpath
1179
1179
1180 def bytecount(nbytes):
1180 def bytecount(nbytes):
1181 '''return byte count formatted as readable string, with units'''
1181 '''return byte count formatted as readable string, with units'''
1182
1182
1183 units = (
1183 units = (
1184 (100, 1<<30, _('%.0f GB')),
1184 (100, 1<<30, _('%.0f GB')),
1185 (10, 1<<30, _('%.1f GB')),
1185 (10, 1<<30, _('%.1f GB')),
1186 (1, 1<<30, _('%.2f GB')),
1186 (1, 1<<30, _('%.2f GB')),
1187 (100, 1<<20, _('%.0f MB')),
1187 (100, 1<<20, _('%.0f MB')),
1188 (10, 1<<20, _('%.1f MB')),
1188 (10, 1<<20, _('%.1f MB')),
1189 (1, 1<<20, _('%.2f MB')),
1189 (1, 1<<20, _('%.2f MB')),
1190 (100, 1<<10, _('%.0f KB')),
1190 (100, 1<<10, _('%.0f KB')),
1191 (10, 1<<10, _('%.1f KB')),
1191 (10, 1<<10, _('%.1f KB')),
1192 (1, 1<<10, _('%.2f KB')),
1192 (1, 1<<10, _('%.2f KB')),
1193 (1, 1, _('%.0f bytes')),
1193 (1, 1, _('%.0f bytes')),
1194 )
1194 )
1195
1195
1196 for multiplier, divisor, format in units:
1196 for multiplier, divisor, format in units:
1197 if nbytes >= divisor * multiplier:
1197 if nbytes >= divisor * multiplier:
1198 return format % (nbytes / float(divisor))
1198 return format % (nbytes / float(divisor))
1199 return units[-1][2] % nbytes
1199 return units[-1][2] % nbytes
1200
1200
1201 def drop_scheme(scheme, path):
1201 def drop_scheme(scheme, path):
1202 sc = scheme + ':'
1202 sc = scheme + ':'
1203 if path.startswith(sc):
1203 if path.startswith(sc):
1204 path = path[len(sc):]
1204 path = path[len(sc):]
1205 if path.startswith('//'):
1205 if path.startswith('//'):
1206 if scheme == 'file':
1206 if scheme == 'file':
1207 i = path.find('/', 2)
1207 i = path.find('/', 2)
1208 if i == -1:
1208 if i == -1:
1209 return ''
1209 return ''
1210 # On Windows, absolute paths are rooted at the current drive
1210 # On Windows, absolute paths are rooted at the current drive
1211 # root. On POSIX they are rooted at the file system root.
1211 # root. On POSIX they are rooted at the file system root.
1212 if os.name == 'nt':
1212 if os.name == 'nt':
1213 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1213 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1214 path = os.path.join(droot, path[i+1:])
1214 path = os.path.join(droot, path[i+1:])
1215 else:
1215 else:
1216 path = path[i:]
1216 path = path[i:]
1217 else:
1217 else:
1218 path = path[2:]
1218 path = path[2:]
1219 return path
1219 return path
1220
1220
1221 def uirepr(s):
1221 def uirepr(s):
1222 # Avoid double backslash in Windows path repr()
1222 # Avoid double backslash in Windows path repr()
1223 return repr(s).replace('\\\\', '\\')
1223 return repr(s).replace('\\\\', '\\')
1224
1224
1225 def termwidth():
1225 def termwidth():
1226 if 'COLUMNS' in os.environ:
1226 if 'COLUMNS' in os.environ:
1227 try:
1227 try:
1228 return int(os.environ['COLUMNS'])
1228 return int(os.environ['COLUMNS'])
1229 except ValueError:
1229 except ValueError:
1230 pass
1230 pass
1231 try:
1231 try:
1232 import termios, array, fcntl
1232 import termios, array, fcntl
1233 for dev in (sys.stdout, sys.stdin):
1233 for dev in (sys.stdout, sys.stdin):
1234 try:
1234 try:
1235 try:
1235 try:
1236 fd = dev.fileno()
1236 fd = dev.fileno()
1237 except AttributeError:
1237 except AttributeError:
1238 continue
1238 continue
1239 if not os.isatty(fd):
1239 if not os.isatty(fd):
1240 continue
1240 continue
1241 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1241 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1242 return array.array('h', arri)[1]
1242 return array.array('h', arri)[1]
1243 except ValueError:
1243 except ValueError:
1244 pass
1244 pass
1245 except IOError, e:
1245 except IOError, e:
1246 if e[0] == errno.EINVAL:
1246 if e[0] == errno.EINVAL:
1247 pass
1247 pass
1248 else:
1248 else:
1249 raise
1249 raise
1250 except ImportError:
1250 except ImportError:
1251 pass
1251 pass
1252 return 80
1252 return 80
1253
1253
1254 def wrap(line, hangindent, width=None):
1254 def wrap(line, hangindent, width=None):
1255 if width is None:
1255 if width is None:
1256 width = termwidth() - 2
1256 width = termwidth() - 2
1257 if width <= hangindent:
1257 if width <= hangindent:
1258 # adjust for weird terminal size
1258 # adjust for weird terminal size
1259 width = max(78, hangindent + 1)
1259 width = max(78, hangindent + 1)
1260 padding = '\n' + ' ' * hangindent
1260 padding = '\n' + ' ' * hangindent
1261 # To avoid corrupting multi-byte characters in line, we must wrap
1261 # To avoid corrupting multi-byte characters in line, we must wrap
1262 # a Unicode string instead of a bytestring.
1262 # a Unicode string instead of a bytestring.
1263 try:
1263 try:
1264 u = line.decode(encoding.encoding)
1264 u = line.decode(encoding.encoding)
1265 w = padding.join(textwrap.wrap(u, width=width - hangindent))
1265 w = padding.join(textwrap.wrap(u, width=width - hangindent))
1266 return w.encode(encoding.encoding)
1266 return w.encode(encoding.encoding)
1267 except UnicodeDecodeError:
1267 except UnicodeDecodeError:
1268 return padding.join(textwrap.wrap(line, width=width - hangindent))
1268 return padding.join(textwrap.wrap(line, width=width - hangindent))
1269
1269
1270 def iterlines(iterator):
1270 def iterlines(iterator):
1271 for chunk in iterator:
1271 for chunk in iterator:
1272 for line in chunk.splitlines():
1272 for line in chunk.splitlines():
1273 yield line
1273 yield line
1274
1274
1275 def expandpath(path):
1275 def expandpath(path):
1276 return os.path.expanduser(os.path.expandvars(path))
1276 return os.path.expanduser(os.path.expandvars(path))
1277
1278 def hgcmd():
1279 """Return the command used to execute current hg
1280
1281 This is different from hgexecutable() because on Windows we want
1282 to avoid things opening new shell windows like batch files, so we
1283 get either the python call or current executable.
1284 """
1285 if main_is_frozen():
1286 return [sys.executable]
1287 return gethgcmd()
@@ -1,361 +1,364 b''
1 # windows.py - Windows utility function implementations for Mercurial
1 # windows.py - Windows utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from i18n import _
8 from i18n import _
9 import osutil, error
9 import osutil, error
10 import errno, msvcrt, os, re, sys, random, subprocess
10 import errno, msvcrt, os, re, sys, random, subprocess
11
11
12 nulldev = 'NUL:'
12 nulldev = 'NUL:'
13 umask = 002
13 umask = 002
14
14
15 # wrap osutil.posixfile to provide friendlier exceptions
15 # wrap osutil.posixfile to provide friendlier exceptions
16 def posixfile(name, mode='r', buffering=-1):
16 def posixfile(name, mode='r', buffering=-1):
17 try:
17 try:
18 return osutil.posixfile(name, mode, buffering)
18 return osutil.posixfile(name, mode, buffering)
19 except WindowsError, err:
19 except WindowsError, err:
20 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
20 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
21 posixfile.__doc__ = osutil.posixfile.__doc__
21 posixfile.__doc__ = osutil.posixfile.__doc__
22
22
23 class winstdout(object):
23 class winstdout(object):
24 '''stdout on windows misbehaves if sent through a pipe'''
24 '''stdout on windows misbehaves if sent through a pipe'''
25
25
26 def __init__(self, fp):
26 def __init__(self, fp):
27 self.fp = fp
27 self.fp = fp
28
28
29 def __getattr__(self, key):
29 def __getattr__(self, key):
30 return getattr(self.fp, key)
30 return getattr(self.fp, key)
31
31
32 def close(self):
32 def close(self):
33 try:
33 try:
34 self.fp.close()
34 self.fp.close()
35 except: pass
35 except: pass
36
36
37 def write(self, s):
37 def write(self, s):
38 try:
38 try:
39 # This is workaround for "Not enough space" error on
39 # This is workaround for "Not enough space" error on
40 # writing large size of data to console.
40 # writing large size of data to console.
41 limit = 16000
41 limit = 16000
42 l = len(s)
42 l = len(s)
43 start = 0
43 start = 0
44 self.softspace = 0;
44 self.softspace = 0;
45 while start < l:
45 while start < l:
46 end = start + limit
46 end = start + limit
47 self.fp.write(s[start:end])
47 self.fp.write(s[start:end])
48 start = end
48 start = end
49 except IOError, inst:
49 except IOError, inst:
50 if inst.errno != 0: raise
50 if inst.errno != 0: raise
51 self.close()
51 self.close()
52 raise IOError(errno.EPIPE, 'Broken pipe')
52 raise IOError(errno.EPIPE, 'Broken pipe')
53
53
54 def flush(self):
54 def flush(self):
55 try:
55 try:
56 return self.fp.flush()
56 return self.fp.flush()
57 except IOError, inst:
57 except IOError, inst:
58 if inst.errno != errno.EINVAL: raise
58 if inst.errno != errno.EINVAL: raise
59 self.close()
59 self.close()
60 raise IOError(errno.EPIPE, 'Broken pipe')
60 raise IOError(errno.EPIPE, 'Broken pipe')
61
61
62 sys.stdout = winstdout(sys.stdout)
62 sys.stdout = winstdout(sys.stdout)
63
63
64 def _is_win_9x():
64 def _is_win_9x():
65 '''return true if run on windows 95, 98 or me.'''
65 '''return true if run on windows 95, 98 or me.'''
66 try:
66 try:
67 return sys.getwindowsversion()[3] == 1
67 return sys.getwindowsversion()[3] == 1
68 except AttributeError:
68 except AttributeError:
69 return 'command' in os.environ.get('comspec', '')
69 return 'command' in os.environ.get('comspec', '')
70
70
71 def openhardlinks():
71 def openhardlinks():
72 return not _is_win_9x() and "win32api" in globals()
72 return not _is_win_9x() and "win32api" in globals()
73
73
74 def system_rcpath():
74 def system_rcpath():
75 try:
75 try:
76 return system_rcpath_win32()
76 return system_rcpath_win32()
77 except:
77 except:
78 return [r'c:\mercurial\mercurial.ini']
78 return [r'c:\mercurial\mercurial.ini']
79
79
80 def user_rcpath():
80 def user_rcpath():
81 '''return os-specific hgrc search path to the user dir'''
81 '''return os-specific hgrc search path to the user dir'''
82 try:
82 try:
83 path = user_rcpath_win32()
83 path = user_rcpath_win32()
84 except:
84 except:
85 home = os.path.expanduser('~')
85 home = os.path.expanduser('~')
86 path = [os.path.join(home, 'mercurial.ini'),
86 path = [os.path.join(home, 'mercurial.ini'),
87 os.path.join(home, '.hgrc')]
87 os.path.join(home, '.hgrc')]
88 userprofile = os.environ.get('USERPROFILE')
88 userprofile = os.environ.get('USERPROFILE')
89 if userprofile:
89 if userprofile:
90 path.append(os.path.join(userprofile, 'mercurial.ini'))
90 path.append(os.path.join(userprofile, 'mercurial.ini'))
91 path.append(os.path.join(userprofile, '.hgrc'))
91 path.append(os.path.join(userprofile, '.hgrc'))
92 return path
92 return path
93
93
94 def parse_patch_output(output_line):
94 def parse_patch_output(output_line):
95 """parses the output produced by patch and returns the filename"""
95 """parses the output produced by patch and returns the filename"""
96 pf = output_line[14:]
96 pf = output_line[14:]
97 if pf[0] == '`':
97 if pf[0] == '`':
98 pf = pf[1:-1] # Remove the quotes
98 pf = pf[1:-1] # Remove the quotes
99 return pf
99 return pf
100
100
101 def sshargs(sshcmd, host, user, port):
101 def sshargs(sshcmd, host, user, port):
102 '''Build argument list for ssh or Plink'''
102 '''Build argument list for ssh or Plink'''
103 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
103 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
104 args = user and ("%s@%s" % (user, host)) or host
104 args = user and ("%s@%s" % (user, host)) or host
105 return port and ("%s %s %s" % (args, pflag, port)) or args
105 return port and ("%s %s %s" % (args, pflag, port)) or args
106
106
107 def testpid(pid):
107 def testpid(pid):
108 '''return False if pid dead, True if running or not known'''
108 '''return False if pid dead, True if running or not known'''
109 return True
109 return True
110
110
111 def set_flags(f, l, x):
111 def set_flags(f, l, x):
112 pass
112 pass
113
113
114 def set_binary(fd):
114 def set_binary(fd):
115 # When run without console, pipes may expose invalid
115 # When run without console, pipes may expose invalid
116 # fileno(), usually set to -1.
116 # fileno(), usually set to -1.
117 if hasattr(fd, 'fileno') and fd.fileno() >= 0:
117 if hasattr(fd, 'fileno') and fd.fileno() >= 0:
118 msvcrt.setmode(fd.fileno(), os.O_BINARY)
118 msvcrt.setmode(fd.fileno(), os.O_BINARY)
119
119
120 def pconvert(path):
120 def pconvert(path):
121 return '/'.join(path.split(os.sep))
121 return '/'.join(path.split(os.sep))
122
122
123 def localpath(path):
123 def localpath(path):
124 return path.replace('/', '\\')
124 return path.replace('/', '\\')
125
125
126 def normpath(path):
126 def normpath(path):
127 return pconvert(os.path.normpath(path))
127 return pconvert(os.path.normpath(path))
128
128
129 def realpath(path):
129 def realpath(path):
130 '''
130 '''
131 Returns the true, canonical file system path equivalent to the given
131 Returns the true, canonical file system path equivalent to the given
132 path.
132 path.
133 '''
133 '''
134 # TODO: There may be a more clever way to do this that also handles other,
134 # TODO: There may be a more clever way to do this that also handles other,
135 # less common file systems.
135 # less common file systems.
136 return os.path.normpath(os.path.normcase(os.path.realpath(path)))
136 return os.path.normpath(os.path.normcase(os.path.realpath(path)))
137
137
138 def samestat(s1, s2):
138 def samestat(s1, s2):
139 return False
139 return False
140
140
141 # A sequence of backslashes is special iff it precedes a double quote:
141 # A sequence of backslashes is special iff it precedes a double quote:
142 # - if there's an even number of backslashes, the double quote is not
142 # - if there's an even number of backslashes, the double quote is not
143 # quoted (i.e. it ends the quoted region)
143 # quoted (i.e. it ends the quoted region)
144 # - if there's an odd number of backslashes, the double quote is quoted
144 # - if there's an odd number of backslashes, the double quote is quoted
145 # - in both cases, every pair of backslashes is unquoted into a single
145 # - in both cases, every pair of backslashes is unquoted into a single
146 # backslash
146 # backslash
147 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
147 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
148 # So, to quote a string, we must surround it in double quotes, double
148 # So, to quote a string, we must surround it in double quotes, double
149 # the number of backslashes that preceed double quotes and add another
149 # the number of backslashes that preceed double quotes and add another
150 # backslash before every double quote (being careful with the double
150 # backslash before every double quote (being careful with the double
151 # quote we've appended to the end)
151 # quote we've appended to the end)
152 _quotere = None
152 _quotere = None
153 def shellquote(s):
153 def shellquote(s):
154 global _quotere
154 global _quotere
155 if _quotere is None:
155 if _quotere is None:
156 _quotere = re.compile(r'(\\*)("|\\$)')
156 _quotere = re.compile(r'(\\*)("|\\$)')
157 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
157 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
158
158
159 def quotecommand(cmd):
159 def quotecommand(cmd):
160 """Build a command string suitable for os.popen* calls."""
160 """Build a command string suitable for os.popen* calls."""
161 # The extra quotes are needed because popen* runs the command
161 # The extra quotes are needed because popen* runs the command
162 # through the current COMSPEC. cmd.exe suppress enclosing quotes.
162 # through the current COMSPEC. cmd.exe suppress enclosing quotes.
163 return '"' + cmd + '"'
163 return '"' + cmd + '"'
164
164
165 def popen(command, mode='r'):
165 def popen(command, mode='r'):
166 # Work around "popen spawned process may not write to stdout
166 # Work around "popen spawned process may not write to stdout
167 # under windows"
167 # under windows"
168 # http://bugs.python.org/issue1366
168 # http://bugs.python.org/issue1366
169 command += " 2> %s" % nulldev
169 command += " 2> %s" % nulldev
170 return os.popen(quotecommand(command), mode)
170 return os.popen(quotecommand(command), mode)
171
171
172 def explain_exit(code):
172 def explain_exit(code):
173 return _("exited with status %d") % code, code
173 return _("exited with status %d") % code, code
174
174
175 # if you change this stub into a real check, please try to implement the
175 # if you change this stub into a real check, please try to implement the
176 # username and groupname functions above, too.
176 # username and groupname functions above, too.
177 def isowner(st):
177 def isowner(st):
178 return True
178 return True
179
179
180 def find_exe(command):
180 def find_exe(command):
181 '''Find executable for command searching like cmd.exe does.
181 '''Find executable for command searching like cmd.exe does.
182 If command is a basename then PATH is searched for command.
182 If command is a basename then PATH is searched for command.
183 PATH isn't searched if command is an absolute or relative path.
183 PATH isn't searched if command is an absolute or relative path.
184 An extension from PATHEXT is found and added if not present.
184 An extension from PATHEXT is found and added if not present.
185 If command isn't found None is returned.'''
185 If command isn't found None is returned.'''
186 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
186 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
187 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
187 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
188 if os.path.splitext(command)[1].lower() in pathexts:
188 if os.path.splitext(command)[1].lower() in pathexts:
189 pathexts = ['']
189 pathexts = ['']
190
190
191 def findexisting(pathcommand):
191 def findexisting(pathcommand):
192 'Will append extension (if needed) and return existing file'
192 'Will append extension (if needed) and return existing file'
193 for ext in pathexts:
193 for ext in pathexts:
194 executable = pathcommand + ext
194 executable = pathcommand + ext
195 if os.path.exists(executable):
195 if os.path.exists(executable):
196 return executable
196 return executable
197 return None
197 return None
198
198
199 if os.sep in command:
199 if os.sep in command:
200 return findexisting(command)
200 return findexisting(command)
201
201
202 for path in os.environ.get('PATH', '').split(os.pathsep):
202 for path in os.environ.get('PATH', '').split(os.pathsep):
203 executable = findexisting(os.path.join(path, command))
203 executable = findexisting(os.path.join(path, command))
204 if executable is not None:
204 if executable is not None:
205 return executable
205 return executable
206 return findexisting(os.path.expanduser(os.path.expandvars(command)))
206 return findexisting(os.path.expanduser(os.path.expandvars(command)))
207
207
208 def set_signal_handler():
208 def set_signal_handler():
209 try:
209 try:
210 set_signal_handler_win32()
210 set_signal_handler_win32()
211 except NameError:
211 except NameError:
212 pass
212 pass
213
213
214 def statfiles(files):
214 def statfiles(files):
215 '''Stat each file in files and yield stat or None if file does not exist.
215 '''Stat each file in files and yield stat or None if file does not exist.
216 Cluster and cache stat per directory to minimize number of OS stat calls.'''
216 Cluster and cache stat per directory to minimize number of OS stat calls.'''
217 ncase = os.path.normcase
217 ncase = os.path.normcase
218 sep = os.sep
218 sep = os.sep
219 dircache = {} # dirname -> filename -> status | None if file does not exist
219 dircache = {} # dirname -> filename -> status | None if file does not exist
220 for nf in files:
220 for nf in files:
221 nf = ncase(nf)
221 nf = ncase(nf)
222 dir, base = os.path.split(nf)
222 dir, base = os.path.split(nf)
223 if not dir:
223 if not dir:
224 dir = '.'
224 dir = '.'
225 cache = dircache.get(dir, None)
225 cache = dircache.get(dir, None)
226 if cache is None:
226 if cache is None:
227 try:
227 try:
228 dmap = dict([(ncase(n), s)
228 dmap = dict([(ncase(n), s)
229 for n, k, s in osutil.listdir(dir, True)])
229 for n, k, s in osutil.listdir(dir, True)])
230 except OSError, err:
230 except OSError, err:
231 # handle directory not found in Python version prior to 2.5
231 # handle directory not found in Python version prior to 2.5
232 # Python <= 2.4 returns native Windows code 3 in errno
232 # Python <= 2.4 returns native Windows code 3 in errno
233 # Python >= 2.5 returns ENOENT and adds winerror field
233 # Python >= 2.5 returns ENOENT and adds winerror field
234 # EINVAL is raised if dir is not a directory.
234 # EINVAL is raised if dir is not a directory.
235 if err.errno not in (3, errno.ENOENT, errno.EINVAL,
235 if err.errno not in (3, errno.ENOENT, errno.EINVAL,
236 errno.ENOTDIR):
236 errno.ENOTDIR):
237 raise
237 raise
238 dmap = {}
238 dmap = {}
239 cache = dircache.setdefault(dir, dmap)
239 cache = dircache.setdefault(dir, dmap)
240 yield cache.get(base, None)
240 yield cache.get(base, None)
241
241
242 def getuser():
242 def getuser():
243 '''return name of current user'''
243 '''return name of current user'''
244 raise error.Abort(_('user name not available - set USERNAME '
244 raise error.Abort(_('user name not available - set USERNAME '
245 'environment variable'))
245 'environment variable'))
246
246
247 def username(uid=None):
247 def username(uid=None):
248 """Return the name of the user with the given uid.
248 """Return the name of the user with the given uid.
249
249
250 If uid is None, return the name of the current user."""
250 If uid is None, return the name of the current user."""
251 return None
251 return None
252
252
253 def groupname(gid=None):
253 def groupname(gid=None):
254 """Return the name of the group with the given gid.
254 """Return the name of the group with the given gid.
255
255
256 If gid is None, return the name of the current group."""
256 If gid is None, return the name of the current group."""
257 return None
257 return None
258
258
259 def _removedirs(name):
259 def _removedirs(name):
260 """special version of os.removedirs that does not remove symlinked
260 """special version of os.removedirs that does not remove symlinked
261 directories or junction points if they actually contain files"""
261 directories or junction points if they actually contain files"""
262 if osutil.listdir(name):
262 if osutil.listdir(name):
263 return
263 return
264 os.rmdir(name)
264 os.rmdir(name)
265 head, tail = os.path.split(name)
265 head, tail = os.path.split(name)
266 if not tail:
266 if not tail:
267 head, tail = os.path.split(head)
267 head, tail = os.path.split(head)
268 while head and tail:
268 while head and tail:
269 try:
269 try:
270 if osutil.listdir(head):
270 if osutil.listdir(head):
271 return
271 return
272 os.rmdir(head)
272 os.rmdir(head)
273 except:
273 except:
274 break
274 break
275 head, tail = os.path.split(head)
275 head, tail = os.path.split(head)
276
276
277 def unlink(f):
277 def unlink(f):
278 """unlink and remove the directory if it is empty"""
278 """unlink and remove the directory if it is empty"""
279 os.unlink(f)
279 os.unlink(f)
280 # try removing directories that might now be empty
280 # try removing directories that might now be empty
281 try:
281 try:
282 _removedirs(os.path.dirname(f))
282 _removedirs(os.path.dirname(f))
283 except OSError:
283 except OSError:
284 pass
284 pass
285
285
286 def rename(src, dst):
286 def rename(src, dst):
287 '''atomically rename file src to dst, replacing dst if it exists'''
287 '''atomically rename file src to dst, replacing dst if it exists'''
288 try:
288 try:
289 os.rename(src, dst)
289 os.rename(src, dst)
290 except OSError, err: # FIXME: check err (EEXIST ?)
290 except OSError, err: # FIXME: check err (EEXIST ?)
291
291
292 # On windows, rename to existing file is not allowed, so we
292 # On windows, rename to existing file is not allowed, so we
293 # must delete destination first. But if a file is open, unlink
293 # must delete destination first. But if a file is open, unlink
294 # schedules it for delete but does not delete it. Rename
294 # schedules it for delete but does not delete it. Rename
295 # happens immediately even for open files, so we rename
295 # happens immediately even for open files, so we rename
296 # destination to a temporary name, then delete that. Then
296 # destination to a temporary name, then delete that. Then
297 # rename is safe to do.
297 # rename is safe to do.
298 # The temporary name is chosen at random to avoid the situation
298 # The temporary name is chosen at random to avoid the situation
299 # where a file is left lying around from a previous aborted run.
299 # where a file is left lying around from a previous aborted run.
300 # The usual race condition this introduces can't be avoided as
300 # The usual race condition this introduces can't be avoided as
301 # we need the name to rename into, and not the file itself. Due
301 # we need the name to rename into, and not the file itself. Due
302 # to the nature of the operation however, any races will at worst
302 # to the nature of the operation however, any races will at worst
303 # lead to the rename failing and the current operation aborting.
303 # lead to the rename failing and the current operation aborting.
304
304
305 def tempname(prefix):
305 def tempname(prefix):
306 for tries in xrange(10):
306 for tries in xrange(10):
307 temp = '%s-%08x' % (prefix, random.randint(0, 0xffffffff))
307 temp = '%s-%08x' % (prefix, random.randint(0, 0xffffffff))
308 if not os.path.exists(temp):
308 if not os.path.exists(temp):
309 return temp
309 return temp
310 raise IOError, (errno.EEXIST, "No usable temporary filename found")
310 raise IOError, (errno.EEXIST, "No usable temporary filename found")
311
311
312 temp = tempname(dst)
312 temp = tempname(dst)
313 os.rename(dst, temp)
313 os.rename(dst, temp)
314 try:
314 try:
315 os.unlink(temp)
315 os.unlink(temp)
316 except:
316 except:
317 # Some rude AV-scanners on Windows may cause the unlink to
317 # Some rude AV-scanners on Windows may cause the unlink to
318 # fail. Not aborting here just leaks the temp file, whereas
318 # fail. Not aborting here just leaks the temp file, whereas
319 # aborting at this point may leave serious inconsistencies.
319 # aborting at this point may leave serious inconsistencies.
320 # Ideally, we would notify the user here.
320 # Ideally, we would notify the user here.
321 pass
321 pass
322 os.rename(src, dst)
322 os.rename(src, dst)
323
323
324 def spawndetached(args):
324 def spawndetached(args):
325 # No standard library function really spawns a fully detached
325 # No standard library function really spawns a fully detached
326 # process under win32 because they allocate pipes or other objects
326 # process under win32 because they allocate pipes or other objects
327 # to handle standard streams communications. Passing these objects
327 # to handle standard streams communications. Passing these objects
328 # to the child process requires handle inheritance to be enabled
328 # to the child process requires handle inheritance to be enabled
329 # which makes really detached processes impossible.
329 # which makes really detached processes impossible.
330 class STARTUPINFO:
330 class STARTUPINFO:
331 dwFlags = subprocess.STARTF_USESHOWWINDOW
331 dwFlags = subprocess.STARTF_USESHOWWINDOW
332 hStdInput = None
332 hStdInput = None
333 hStdOutput = None
333 hStdOutput = None
334 hStdError = None
334 hStdError = None
335 wShowWindow = subprocess.SW_HIDE
335 wShowWindow = subprocess.SW_HIDE
336
336
337 args = subprocess.list2cmdline(args)
337 args = subprocess.list2cmdline(args)
338 # Not running the command in shell mode makes python26 hang when
338 # Not running the command in shell mode makes python26 hang when
339 # writing to hgweb output socket.
339 # writing to hgweb output socket.
340 comspec = os.environ.get("COMSPEC", "cmd.exe")
340 comspec = os.environ.get("COMSPEC", "cmd.exe")
341 args = comspec + " /c " + args
341 args = comspec + " /c " + args
342 hp, ht, pid, tid = subprocess.CreateProcess(
342 hp, ht, pid, tid = subprocess.CreateProcess(
343 None, args,
343 None, args,
344 # no special security
344 # no special security
345 None, None,
345 None, None,
346 # Do not inherit handles
346 # Do not inherit handles
347 0,
347 0,
348 # DETACHED_PROCESS
348 # DETACHED_PROCESS
349 0x00000008,
349 0x00000008,
350 os.environ,
350 os.environ,
351 os.getcwd(),
351 os.getcwd(),
352 STARTUPINFO())
352 STARTUPINFO())
353 return pid
353 return pid
354
354
355 def gethgcmd():
356 return [sys.executable] + sys.argv[:1]
357
355 try:
358 try:
356 # override functions with win32 versions if possible
359 # override functions with win32 versions if possible
357 from win32 import *
360 from win32 import *
358 except ImportError:
361 except ImportError:
359 pass
362 pass
360
363
361 expandglobs = True
364 expandglobs = True
General Comments 0
You need to be logged in to leave comments. Login now