##// END OF EJS Templates
merge with stable
Matt Mackall -
r15411:afc02adf merge default
parent child Browse files
Show More
@@ -1,451 +1,453 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16 import tempfile
16 import tempfile
17
17
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20
20
21 shortname = '.hglf'
21 shortname = '.hglf'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Portability wrappers ----------------------------------------------
25 # -- Portability wrappers ----------------------------------------------
26
26
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
28 return dirstate.walk(matcher, [], unknown, ignored)
28 return dirstate.walk(matcher, [], unknown, ignored)
29
29
30 def repo_add(repo, list):
30 def repo_add(repo, list):
31 add = repo[None].add
31 add = repo[None].add
32 return add(list)
32 return add(list)
33
33
34 def repo_remove(repo, list, unlink=False):
34 def repo_remove(repo, list, unlink=False):
35 def remove(list, unlink):
35 def remove(list, unlink):
36 wlock = repo.wlock()
36 wlock = repo.wlock()
37 try:
37 try:
38 if unlink:
38 if unlink:
39 for f in list:
39 for f in list:
40 try:
40 try:
41 util.unlinkpath(repo.wjoin(f))
41 util.unlinkpath(repo.wjoin(f))
42 except OSError, inst:
42 except OSError, inst:
43 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
44 raise
44 raise
45 repo[None].forget(list)
45 repo[None].forget(list)
46 finally:
46 finally:
47 wlock.release()
47 wlock.release()
48 return remove(list, unlink=unlink)
48 return remove(list, unlink=unlink)
49
49
50 def repo_forget(repo, list):
50 def repo_forget(repo, list):
51 forget = repo[None].forget
51 forget = repo[None].forget
52 return forget(list)
52 return forget(list)
53
53
54 def findoutgoing(repo, remote, force):
54 def findoutgoing(repo, remote, force):
55 from mercurial import discovery
55 from mercurial import discovery
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
57 remote, force=force)
57 remote, force=force)
58 return repo.changelog.findmissing(common)
58 return repo.changelog.findmissing(common)
59
59
60 # -- Private worker functions ------------------------------------------
60 # -- Private worker functions ------------------------------------------
61
61
62 def getminsize(ui, assumelfiles, opt, default=10):
62 def getminsize(ui, assumelfiles, opt, default=10):
63 lfsize = opt
63 lfsize = opt
64 if not lfsize and assumelfiles:
64 if not lfsize and assumelfiles:
65 lfsize = ui.config(longname, 'minsize', default=default)
65 lfsize = ui.config(longname, 'minsize', default=default)
66 if lfsize:
66 if lfsize:
67 try:
67 try:
68 lfsize = float(lfsize)
68 lfsize = float(lfsize)
69 except ValueError:
69 except ValueError:
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
71 % lfsize)
71 % lfsize)
72 if lfsize is None:
72 if lfsize is None:
73 raise util.Abort(_('minimum size for largefiles must be specified'))
73 raise util.Abort(_('minimum size for largefiles must be specified'))
74 return lfsize
74 return lfsize
75
75
76 def link(src, dest):
76 def link(src, dest):
77 try:
77 try:
78 util.oslink(src, dest)
78 util.oslink(src, dest)
79 except OSError:
79 except OSError:
80 # if hardlinks fail, fallback on copy
80 # if hardlinks fail, fallback on copy
81 shutil.copyfile(src, dest)
81 shutil.copyfile(src, dest)
82 os.chmod(dest, os.stat(src).st_mode)
82 os.chmod(dest, os.stat(src).st_mode)
83
83
84 def usercachepath(ui, hash):
84 def usercachepath(ui, hash):
85 path = ui.configpath(longname, 'usercache', None)
85 path = ui.configpath(longname, 'usercache', None)
86 if path:
86 if path:
87 path = os.path.join(path, hash)
87 path = os.path.join(path, hash)
88 else:
88 else:
89 if os.name == 'nt':
89 if os.name == 'nt':
90 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
90 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
91 path = os.path.join(appdata, longname, hash)
91 path = os.path.join(appdata, longname, hash)
92 elif platform.system() == 'Darwin':
92 elif platform.system() == 'Darwin':
93 path = os.path.join(os.getenv('HOME'), 'Library', 'Caches',
93 path = os.path.join(os.getenv('HOME'), 'Library', 'Caches',
94 longname, hash)
94 longname, hash)
95 elif os.name == 'posix':
95 elif os.name == 'posix':
96 path = os.getenv('XDG_CACHE_HOME')
96 path = os.getenv('XDG_CACHE_HOME')
97 if path:
97 if path:
98 path = os.path.join(path, longname, hash)
98 path = os.path.join(path, longname, hash)
99 else:
99 else:
100 path = os.path.join(os.getenv('HOME'), '.cache', longname, hash)
100 path = os.path.join(os.getenv('HOME'), '.cache', longname, hash)
101 else:
101 else:
102 raise util.Abort(_('unknown operating system: %s\n') % os.name)
102 raise util.Abort(_('unknown operating system: %s\n') % os.name)
103 return path
103 return path
104
104
105 def inusercache(ui, hash):
105 def inusercache(ui, hash):
106 return os.path.exists(usercachepath(ui, hash))
106 return os.path.exists(usercachepath(ui, hash))
107
107
108 def findfile(repo, hash):
108 def findfile(repo, hash):
109 if instore(repo, hash):
109 if instore(repo, hash):
110 repo.ui.note(_('Found %s in store\n') % hash)
110 repo.ui.note(_('Found %s in store\n') % hash)
111 elif inusercache(repo.ui, hash):
111 elif inusercache(repo.ui, hash):
112 repo.ui.note(_('Found %s in system cache\n') % hash)
112 repo.ui.note(_('Found %s in system cache\n') % hash)
113 link(usercachepath(repo.ui, hash), storepath(repo, hash))
113 path = storepath(repo, hash)
114 util.makedirs(os.path.dirname(path))
115 link(usercachepath(repo.ui, hash), path)
114 else:
116 else:
115 return None
117 return None
116 return storepath(repo, hash)
118 return storepath(repo, hash)
117
119
118 class largefiles_dirstate(dirstate.dirstate):
120 class largefiles_dirstate(dirstate.dirstate):
119 def __getitem__(self, key):
121 def __getitem__(self, key):
120 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
122 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
121 def normal(self, f):
123 def normal(self, f):
122 return super(largefiles_dirstate, self).normal(unixpath(f))
124 return super(largefiles_dirstate, self).normal(unixpath(f))
123 def remove(self, f):
125 def remove(self, f):
124 return super(largefiles_dirstate, self).remove(unixpath(f))
126 return super(largefiles_dirstate, self).remove(unixpath(f))
125 def add(self, f):
127 def add(self, f):
126 return super(largefiles_dirstate, self).add(unixpath(f))
128 return super(largefiles_dirstate, self).add(unixpath(f))
127 def drop(self, f):
129 def drop(self, f):
128 return super(largefiles_dirstate, self).drop(unixpath(f))
130 return super(largefiles_dirstate, self).drop(unixpath(f))
129 def forget(self, f):
131 def forget(self, f):
130 return super(largefiles_dirstate, self).forget(unixpath(f))
132 return super(largefiles_dirstate, self).forget(unixpath(f))
131
133
132 def openlfdirstate(ui, repo):
134 def openlfdirstate(ui, repo):
133 '''
135 '''
134 Return a dirstate object that tracks largefiles: i.e. its root is
136 Return a dirstate object that tracks largefiles: i.e. its root is
135 the repo root, but it is saved in .hg/largefiles/dirstate.
137 the repo root, but it is saved in .hg/largefiles/dirstate.
136 '''
138 '''
137 admin = repo.join(longname)
139 admin = repo.join(longname)
138 opener = scmutil.opener(admin)
140 opener = scmutil.opener(admin)
139 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
141 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
140 repo.dirstate._validate)
142 repo.dirstate._validate)
141
143
142 # If the largefiles dirstate does not exist, populate and create
144 # If the largefiles dirstate does not exist, populate and create
143 # it. This ensures that we create it on the first meaningful
145 # it. This ensures that we create it on the first meaningful
144 # largefiles operation in a new clone. It also gives us an easy
146 # largefiles operation in a new clone. It also gives us an easy
145 # way to forcibly rebuild largefiles state:
147 # way to forcibly rebuild largefiles state:
146 # rm .hg/largefiles/dirstate && hg status
148 # rm .hg/largefiles/dirstate && hg status
147 # Or even, if things are really messed up:
149 # Or even, if things are really messed up:
148 # rm -rf .hg/largefiles && hg status
150 # rm -rf .hg/largefiles && hg status
149 if not os.path.exists(os.path.join(admin, 'dirstate')):
151 if not os.path.exists(os.path.join(admin, 'dirstate')):
150 util.makedirs(admin)
152 util.makedirs(admin)
151 matcher = getstandinmatcher(repo)
153 matcher = getstandinmatcher(repo)
152 for standin in dirstate_walk(repo.dirstate, matcher):
154 for standin in dirstate_walk(repo.dirstate, matcher):
153 lfile = splitstandin(standin)
155 lfile = splitstandin(standin)
154 hash = readstandin(repo, lfile)
156 hash = readstandin(repo, lfile)
155 lfdirstate.normallookup(lfile)
157 lfdirstate.normallookup(lfile)
156 try:
158 try:
157 if hash == hashfile(lfile):
159 if hash == hashfile(lfile):
158 lfdirstate.normal(lfile)
160 lfdirstate.normal(lfile)
159 except IOError, err:
161 except IOError, err:
160 if err.errno != errno.ENOENT:
162 if err.errno != errno.ENOENT:
161 raise
163 raise
162
164
163 lfdirstate.write()
165 lfdirstate.write()
164
166
165 return lfdirstate
167 return lfdirstate
166
168
167 def lfdirstate_status(lfdirstate, repo, rev):
169 def lfdirstate_status(lfdirstate, repo, rev):
168 wlock = repo.wlock()
170 wlock = repo.wlock()
169 try:
171 try:
170 match = match_.always(repo.root, repo.getcwd())
172 match = match_.always(repo.root, repo.getcwd())
171 s = lfdirstate.status(match, [], False, False, False)
173 s = lfdirstate.status(match, [], False, False, False)
172 unsure, modified, added, removed, missing, unknown, ignored, clean = s
174 unsure, modified, added, removed, missing, unknown, ignored, clean = s
173 for lfile in unsure:
175 for lfile in unsure:
174 if repo[rev][standin(lfile)].data().strip() != \
176 if repo[rev][standin(lfile)].data().strip() != \
175 hashfile(repo.wjoin(lfile)):
177 hashfile(repo.wjoin(lfile)):
176 modified.append(lfile)
178 modified.append(lfile)
177 else:
179 else:
178 clean.append(lfile)
180 clean.append(lfile)
179 lfdirstate.normal(lfile)
181 lfdirstate.normal(lfile)
180 lfdirstate.write()
182 lfdirstate.write()
181 finally:
183 finally:
182 wlock.release()
184 wlock.release()
183 return (modified, added, removed, missing, unknown, ignored, clean)
185 return (modified, added, removed, missing, unknown, ignored, clean)
184
186
185 def listlfiles(repo, rev=None, matcher=None):
187 def listlfiles(repo, rev=None, matcher=None):
186 '''return a list of largefiles in the working copy or the
188 '''return a list of largefiles in the working copy or the
187 specified changeset'''
189 specified changeset'''
188
190
189 if matcher is None:
191 if matcher is None:
190 matcher = getstandinmatcher(repo)
192 matcher = getstandinmatcher(repo)
191
193
192 # ignore unknown files in working directory
194 # ignore unknown files in working directory
193 return [splitstandin(f)
195 return [splitstandin(f)
194 for f in repo[rev].walk(matcher)
196 for f in repo[rev].walk(matcher)
195 if rev is not None or repo.dirstate[f] != '?']
197 if rev is not None or repo.dirstate[f] != '?']
196
198
197 def instore(repo, hash):
199 def instore(repo, hash):
198 return os.path.exists(storepath(repo, hash))
200 return os.path.exists(storepath(repo, hash))
199
201
200 def storepath(repo, hash):
202 def storepath(repo, hash):
201 return repo.join(os.path.join(longname, hash))
203 return repo.join(os.path.join(longname, hash))
202
204
203 def copyfromcache(repo, hash, filename):
205 def copyfromcache(repo, hash, filename):
204 '''Copy the specified largefile from the repo or system cache to
206 '''Copy the specified largefile from the repo or system cache to
205 filename in the repository. Return true on success or false if the
207 filename in the repository. Return true on success or false if the
206 file was not found in either cache (which should not happened:
208 file was not found in either cache (which should not happened:
207 this is meant to be called only after ensuring that the needed
209 this is meant to be called only after ensuring that the needed
208 largefile exists in the cache).'''
210 largefile exists in the cache).'''
209 path = findfile(repo, hash)
211 path = findfile(repo, hash)
210 if path is None:
212 if path is None:
211 return False
213 return False
212 util.makedirs(os.path.dirname(repo.wjoin(filename)))
214 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 shutil.copy(path, repo.wjoin(filename))
215 shutil.copy(path, repo.wjoin(filename))
214 return True
216 return True
215
217
216 def copytostore(repo, rev, file, uploaded=False):
218 def copytostore(repo, rev, file, uploaded=False):
217 hash = readstandin(repo, file)
219 hash = readstandin(repo, file)
218 if instore(repo, hash):
220 if instore(repo, hash):
219 return
221 return
220 copytostoreabsolute(repo, repo.wjoin(file), hash)
222 copytostoreabsolute(repo, repo.wjoin(file), hash)
221
223
222 def copytostoreabsolute(repo, file, hash):
224 def copytostoreabsolute(repo, file, hash):
223 util.makedirs(os.path.dirname(storepath(repo, hash)))
225 util.makedirs(os.path.dirname(storepath(repo, hash)))
224 if inusercache(repo.ui, hash):
226 if inusercache(repo.ui, hash):
225 link(usercachepath(repo.ui, hash), storepath(repo, hash))
227 link(usercachepath(repo.ui, hash), storepath(repo, hash))
226 else:
228 else:
227 shutil.copyfile(file, storepath(repo, hash))
229 shutil.copyfile(file, storepath(repo, hash))
228 os.chmod(storepath(repo, hash), os.stat(file).st_mode)
230 os.chmod(storepath(repo, hash), os.stat(file).st_mode)
229 linktousercache(repo, hash)
231 linktousercache(repo, hash)
230
232
231 def linktousercache(repo, hash):
233 def linktousercache(repo, hash):
232 util.makedirs(os.path.dirname(usercachepath(repo.ui, hash)))
234 util.makedirs(os.path.dirname(usercachepath(repo.ui, hash)))
233 link(storepath(repo, hash), usercachepath(repo.ui, hash))
235 link(storepath(repo, hash), usercachepath(repo.ui, hash))
234
236
235 def getstandinmatcher(repo, pats=[], opts={}):
237 def getstandinmatcher(repo, pats=[], opts={}):
236 '''Return a match object that applies pats to the standin directory'''
238 '''Return a match object that applies pats to the standin directory'''
237 standindir = repo.pathto(shortname)
239 standindir = repo.pathto(shortname)
238 if pats:
240 if pats:
239 # patterns supplied: search standin directory relative to current dir
241 # patterns supplied: search standin directory relative to current dir
240 cwd = repo.getcwd()
242 cwd = repo.getcwd()
241 if os.path.isabs(cwd):
243 if os.path.isabs(cwd):
242 # cwd is an absolute path for hg -R <reponame>
244 # cwd is an absolute path for hg -R <reponame>
243 # work relative to the repository root in this case
245 # work relative to the repository root in this case
244 cwd = ''
246 cwd = ''
245 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
247 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
246 elif os.path.isdir(standindir):
248 elif os.path.isdir(standindir):
247 # no patterns: relative to repo root
249 # no patterns: relative to repo root
248 pats = [standindir]
250 pats = [standindir]
249 else:
251 else:
250 # no patterns and no standin dir: return matcher that matches nothing
252 # no patterns and no standin dir: return matcher that matches nothing
251 match = match_.match(repo.root, None, [], exact=True)
253 match = match_.match(repo.root, None, [], exact=True)
252 match.matchfn = lambda f: False
254 match.matchfn = lambda f: False
253 return match
255 return match
254 return getmatcher(repo, pats, opts, showbad=False)
256 return getmatcher(repo, pats, opts, showbad=False)
255
257
256 def getmatcher(repo, pats=[], opts={}, showbad=True):
258 def getmatcher(repo, pats=[], opts={}, showbad=True):
257 '''Wrapper around scmutil.match() that adds showbad: if false,
259 '''Wrapper around scmutil.match() that adds showbad: if false,
258 neuter the match object's bad() method so it does not print any
260 neuter the match object's bad() method so it does not print any
259 warnings about missing files or directories.'''
261 warnings about missing files or directories.'''
260 match = scmutil.match(repo[None], pats, opts)
262 match = scmutil.match(repo[None], pats, opts)
261
263
262 if not showbad:
264 if not showbad:
263 match.bad = lambda f, msg: None
265 match.bad = lambda f, msg: None
264 return match
266 return match
265
267
266 def composestandinmatcher(repo, rmatcher):
268 def composestandinmatcher(repo, rmatcher):
267 '''Return a matcher that accepts standins corresponding to the
269 '''Return a matcher that accepts standins corresponding to the
268 files accepted by rmatcher. Pass the list of files in the matcher
270 files accepted by rmatcher. Pass the list of files in the matcher
269 as the paths specified by the user.'''
271 as the paths specified by the user.'''
270 smatcher = getstandinmatcher(repo, rmatcher.files())
272 smatcher = getstandinmatcher(repo, rmatcher.files())
271 isstandin = smatcher.matchfn
273 isstandin = smatcher.matchfn
272 def composed_matchfn(f):
274 def composed_matchfn(f):
273 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
275 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
274 smatcher.matchfn = composed_matchfn
276 smatcher.matchfn = composed_matchfn
275
277
276 return smatcher
278 return smatcher
277
279
278 def standin(filename):
280 def standin(filename):
279 '''Return the repo-relative path to the standin for the specified big
281 '''Return the repo-relative path to the standin for the specified big
280 file.'''
282 file.'''
281 # Notes:
283 # Notes:
282 # 1) Most callers want an absolute path, but _create_standin() needs
284 # 1) Most callers want an absolute path, but _create_standin() needs
283 # it repo-relative so lfadd() can pass it to repo_add(). So leave
285 # it repo-relative so lfadd() can pass it to repo_add(). So leave
284 # it up to the caller to use repo.wjoin() to get an absolute path.
286 # it up to the caller to use repo.wjoin() to get an absolute path.
285 # 2) Join with '/' because that's what dirstate always uses, even on
287 # 2) Join with '/' because that's what dirstate always uses, even on
286 # Windows. Change existing separator to '/' first in case we are
288 # Windows. Change existing separator to '/' first in case we are
287 # passed filenames from an external source (like the command line).
289 # passed filenames from an external source (like the command line).
288 return shortname + '/' + filename.replace(os.sep, '/')
290 return shortname + '/' + filename.replace(os.sep, '/')
289
291
290 def isstandin(filename):
292 def isstandin(filename):
291 '''Return true if filename is a big file standin. filename must be
293 '''Return true if filename is a big file standin. filename must be
292 in Mercurial's internal form (slash-separated).'''
294 in Mercurial's internal form (slash-separated).'''
293 return filename.startswith(shortname + '/')
295 return filename.startswith(shortname + '/')
294
296
295 def splitstandin(filename):
297 def splitstandin(filename):
296 # Split on / because that's what dirstate always uses, even on Windows.
298 # Split on / because that's what dirstate always uses, even on Windows.
297 # Change local separator to / first just in case we are passed filenames
299 # Change local separator to / first just in case we are passed filenames
298 # from an external source (like the command line).
300 # from an external source (like the command line).
299 bits = filename.replace(os.sep, '/').split('/', 1)
301 bits = filename.replace(os.sep, '/').split('/', 1)
300 if len(bits) == 2 and bits[0] == shortname:
302 if len(bits) == 2 and bits[0] == shortname:
301 return bits[1]
303 return bits[1]
302 else:
304 else:
303 return None
305 return None
304
306
305 def updatestandin(repo, standin):
307 def updatestandin(repo, standin):
306 file = repo.wjoin(splitstandin(standin))
308 file = repo.wjoin(splitstandin(standin))
307 if os.path.exists(file):
309 if os.path.exists(file):
308 hash = hashfile(file)
310 hash = hashfile(file)
309 executable = getexecutable(file)
311 executable = getexecutable(file)
310 writestandin(repo, standin, hash, executable)
312 writestandin(repo, standin, hash, executable)
311
313
312 def readstandin(repo, filename, node=None):
314 def readstandin(repo, filename, node=None):
313 '''read hex hash from standin for filename at given node, or working
315 '''read hex hash from standin for filename at given node, or working
314 directory if no node is given'''
316 directory if no node is given'''
315 return repo[node][standin(filename)].data().strip()
317 return repo[node][standin(filename)].data().strip()
316
318
317 def writestandin(repo, standin, hash, executable):
319 def writestandin(repo, standin, hash, executable):
318 '''write hash to <repo.root>/<standin>'''
320 '''write hash to <repo.root>/<standin>'''
319 writehash(hash, repo.wjoin(standin), executable)
321 writehash(hash, repo.wjoin(standin), executable)
320
322
321 def copyandhash(instream, outfile):
323 def copyandhash(instream, outfile):
322 '''Read bytes from instream (iterable) and write them to outfile,
324 '''Read bytes from instream (iterable) and write them to outfile,
323 computing the SHA-1 hash of the data along the way. Close outfile
325 computing the SHA-1 hash of the data along the way. Close outfile
324 when done and return the binary hash.'''
326 when done and return the binary hash.'''
325 hasher = util.sha1('')
327 hasher = util.sha1('')
326 for data in instream:
328 for data in instream:
327 hasher.update(data)
329 hasher.update(data)
328 outfile.write(data)
330 outfile.write(data)
329
331
330 # Blecch: closing a file that somebody else opened is rude and
332 # Blecch: closing a file that somebody else opened is rude and
331 # wrong. But it's so darn convenient and practical! After all,
333 # wrong. But it's so darn convenient and practical! After all,
332 # outfile was opened just to copy and hash.
334 # outfile was opened just to copy and hash.
333 outfile.close()
335 outfile.close()
334
336
335 return hasher.digest()
337 return hasher.digest()
336
338
337 def hashrepofile(repo, file):
339 def hashrepofile(repo, file):
338 return hashfile(repo.wjoin(file))
340 return hashfile(repo.wjoin(file))
339
341
340 def hashfile(file):
342 def hashfile(file):
341 if not os.path.exists(file):
343 if not os.path.exists(file):
342 return ''
344 return ''
343 hasher = util.sha1('')
345 hasher = util.sha1('')
344 fd = open(file, 'rb')
346 fd = open(file, 'rb')
345 for data in blockstream(fd):
347 for data in blockstream(fd):
346 hasher.update(data)
348 hasher.update(data)
347 fd.close()
349 fd.close()
348 return hasher.hexdigest()
350 return hasher.hexdigest()
349
351
350 class limitreader(object):
352 class limitreader(object):
351 def __init__(self, f, limit):
353 def __init__(self, f, limit):
352 self.f = f
354 self.f = f
353 self.limit = limit
355 self.limit = limit
354
356
355 def read(self, length):
357 def read(self, length):
356 if self.limit == 0:
358 if self.limit == 0:
357 return ''
359 return ''
358 length = length > self.limit and self.limit or length
360 length = length > self.limit and self.limit or length
359 self.limit -= length
361 self.limit -= length
360 return self.f.read(length)
362 return self.f.read(length)
361
363
362 def close(self):
364 def close(self):
363 pass
365 pass
364
366
365 def blockstream(infile, blocksize=128 * 1024):
367 def blockstream(infile, blocksize=128 * 1024):
366 """Generator that yields blocks of data from infile and closes infile."""
368 """Generator that yields blocks of data from infile and closes infile."""
367 while True:
369 while True:
368 data = infile.read(blocksize)
370 data = infile.read(blocksize)
369 if not data:
371 if not data:
370 break
372 break
371 yield data
373 yield data
372 # same blecch as copyandhash() above
374 # same blecch as copyandhash() above
373 infile.close()
375 infile.close()
374
376
375 def readhash(filename):
377 def readhash(filename):
376 rfile = open(filename, 'rb')
378 rfile = open(filename, 'rb')
377 hash = rfile.read(40)
379 hash = rfile.read(40)
378 rfile.close()
380 rfile.close()
379 if len(hash) < 40:
381 if len(hash) < 40:
380 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
382 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
381 % (filename, len(hash)))
383 % (filename, len(hash)))
382 return hash
384 return hash
383
385
384 def writehash(hash, filename, executable):
386 def writehash(hash, filename, executable):
385 util.makedirs(os.path.dirname(filename))
387 util.makedirs(os.path.dirname(filename))
386 if os.path.exists(filename):
388 if os.path.exists(filename):
387 os.unlink(filename)
389 os.unlink(filename)
388 wfile = open(filename, 'wb')
390 wfile = open(filename, 'wb')
389
391
390 try:
392 try:
391 wfile.write(hash)
393 wfile.write(hash)
392 wfile.write('\n')
394 wfile.write('\n')
393 finally:
395 finally:
394 wfile.close()
396 wfile.close()
395 if os.path.exists(filename):
397 if os.path.exists(filename):
396 os.chmod(filename, getmode(executable))
398 os.chmod(filename, getmode(executable))
397
399
398 def getexecutable(filename):
400 def getexecutable(filename):
399 mode = os.stat(filename).st_mode
401 mode = os.stat(filename).st_mode
400 return ((mode & stat.S_IXUSR) and
402 return ((mode & stat.S_IXUSR) and
401 (mode & stat.S_IXGRP) and
403 (mode & stat.S_IXGRP) and
402 (mode & stat.S_IXOTH))
404 (mode & stat.S_IXOTH))
403
405
404 def getmode(executable):
406 def getmode(executable):
405 if executable:
407 if executable:
406 return 0755
408 return 0755
407 else:
409 else:
408 return 0644
410 return 0644
409
411
410 def urljoin(first, second, *arg):
412 def urljoin(first, second, *arg):
411 def join(left, right):
413 def join(left, right):
412 if not left.endswith('/'):
414 if not left.endswith('/'):
413 left += '/'
415 left += '/'
414 if right.startswith('/'):
416 if right.startswith('/'):
415 right = right[1:]
417 right = right[1:]
416 return left + right
418 return left + right
417
419
418 url = join(first, second)
420 url = join(first, second)
419 for a in arg:
421 for a in arg:
420 url = join(url, a)
422 url = join(url, a)
421 return url
423 return url
422
424
423 def hexsha1(data):
425 def hexsha1(data):
424 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
426 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
425 object data"""
427 object data"""
426 h = util.sha1()
428 h = util.sha1()
427 for chunk in util.filechunkiter(data):
429 for chunk in util.filechunkiter(data):
428 h.update(chunk)
430 h.update(chunk)
429 return h.hexdigest()
431 return h.hexdigest()
430
432
431 def httpsendfile(ui, filename):
433 def httpsendfile(ui, filename):
432 return httpconnection.httpsendfile(ui, filename, 'rb')
434 return httpconnection.httpsendfile(ui, filename, 'rb')
433
435
434 def unixpath(path):
436 def unixpath(path):
435 '''Return a version of path normalized for use with the lfdirstate.'''
437 '''Return a version of path normalized for use with the lfdirstate.'''
436 return os.path.normpath(path).replace(os.sep, '/')
438 return os.path.normpath(path).replace(os.sep, '/')
437
439
438 def islfilesrepo(repo):
440 def islfilesrepo(repo):
439 return ('largefiles' in repo.requirements and
441 return ('largefiles' in repo.requirements and
440 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
442 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
441
443
442 def mkstemp(repo, prefix):
444 def mkstemp(repo, prefix):
443 '''Returns a file descriptor and a filename corresponding to a temporary
445 '''Returns a file descriptor and a filename corresponding to a temporary
444 file in the repo's largefiles store.'''
446 file in the repo's largefiles store.'''
445 path = repo.join(longname)
447 path = repo.join(longname)
446 util.makedirs(path)
448 util.makedirs(path)
447 return tempfile.mkstemp(prefix=prefix, dir=path)
449 return tempfile.mkstemp(prefix=prefix, dir=path)
448
450
449 class storeprotonotcapable(Exception):
451 class storeprotonotcapable(Exception):
450 def __init__(self, storetypes):
452 def __init__(self, storetypes):
451 self.storetypes = storetypes
453 self.storetypes = storetypes
@@ -1,104 +1,105 b''
1 # ignore.py - ignored file handling for mercurial
1 # ignore.py - ignored file handling for mercurial
2 #
2 #
3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import util, match
9 import util, match
10 import re
10 import re
11
11
12 _commentre = None
12 _commentre = None
13
13
14 def ignorepats(lines):
14 def ignorepats(lines):
15 '''parse lines (iterable) of .hgignore text, returning a tuple of
15 '''parse lines (iterable) of .hgignore text, returning a tuple of
16 (patterns, parse errors). These patterns should be given to compile()
16 (patterns, parse errors). These patterns should be given to compile()
17 to be validated and converted into a match function.'''
17 to be validated and converted into a match function.'''
18 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
18 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
19 syntax = 'relre:'
19 syntax = 'relre:'
20 patterns = []
20 patterns = []
21 warnings = []
21 warnings = []
22
22
23 for line in lines:
23 for line in lines:
24 if "#" in line:
24 if "#" in line:
25 global _commentre
25 global _commentre
26 if not _commentre:
26 if not _commentre:
27 _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
27 _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
28 # remove comments prefixed by an even number of escapes
28 # remove comments prefixed by an even number of escapes
29 line = _commentre.sub(r'\1', line)
29 line = _commentre.sub(r'\1', line)
30 # fixup properly escaped comments that survived the above
30 # fixup properly escaped comments that survived the above
31 line = line.replace("\\#", "#")
31 line = line.replace("\\#", "#")
32 line = line.rstrip()
32 line = line.rstrip()
33 if not line:
33 if not line:
34 continue
34 continue
35
35
36 if line.startswith('syntax:'):
36 if line.startswith('syntax:'):
37 s = line[7:].strip()
37 s = line[7:].strip()
38 try:
38 try:
39 syntax = syntaxes[s]
39 syntax = syntaxes[s]
40 except KeyError:
40 except KeyError:
41 warnings.append(_("ignoring invalid syntax '%s'") % s)
41 warnings.append(_("ignoring invalid syntax '%s'") % s)
42 continue
42 continue
43 pat = syntax + line
43 pat = syntax + line
44 for s, rels in syntaxes.iteritems():
44 for s, rels in syntaxes.iteritems():
45 if line.startswith(rels):
45 if line.startswith(rels):
46 pat = line
46 pat = line
47 break
47 break
48 elif line.startswith(s+':'):
48 elif line.startswith(s+':'):
49 pat = rels + line[len(s)+1:]
49 pat = rels + line[len(s)+1:]
50 break
50 break
51 patterns.append(pat)
51 patterns.append(pat)
52
52
53 return patterns, warnings
53 return patterns, warnings
54
54
55 def ignore(root, files, warn):
55 def ignore(root, files, warn):
56 '''return matcher covering patterns in 'files'.
56 '''return matcher covering patterns in 'files'.
57
57
58 the files parsed for patterns include:
58 the files parsed for patterns include:
59 .hgignore in the repository root
59 .hgignore in the repository root
60 any additional files specified in the [ui] section of ~/.hgrc
60 any additional files specified in the [ui] section of ~/.hgrc
61
61
62 trailing white space is dropped.
62 trailing white space is dropped.
63 the escape character is backslash.
63 the escape character is backslash.
64 comments start with #.
64 comments start with #.
65 empty lines are skipped.
65 empty lines are skipped.
66
66
67 lines can be of the following formats:
67 lines can be of the following formats:
68
68
69 syntax: regexp # defaults following lines to non-rooted regexps
69 syntax: regexp # defaults following lines to non-rooted regexps
70 syntax: glob # defaults following lines to non-rooted globs
70 syntax: glob # defaults following lines to non-rooted globs
71 re:pattern # non-rooted regular expression
71 re:pattern # non-rooted regular expression
72 glob:pattern # non-rooted glob
72 glob:pattern # non-rooted glob
73 pattern # pattern of the current default type'''
73 pattern # pattern of the current default type'''
74
74
75 pats = {}
75 pats = {}
76 for f in files:
76 for f in files:
77 try:
77 try:
78 pats[f] = []
78 pats[f] = []
79 fp = open(f)
79 fp = open(f)
80 pats[f], warnings = ignorepats(fp)
80 pats[f], warnings = ignorepats(fp)
81 fp.close()
81 for warning in warnings:
82 for warning in warnings:
82 warn("%s: %s\n" % (f, warning))
83 warn("%s: %s\n" % (f, warning))
83 except IOError, inst:
84 except IOError, inst:
84 if f != files[0]:
85 if f != files[0]:
85 warn(_("skipping unreadable ignore file '%s': %s\n") %
86 warn(_("skipping unreadable ignore file '%s': %s\n") %
86 (f, inst.strerror))
87 (f, inst.strerror))
87
88
88 allpats = []
89 allpats = []
89 for patlist in pats.values():
90 for patlist in pats.values():
90 allpats.extend(patlist)
91 allpats.extend(patlist)
91 if not allpats:
92 if not allpats:
92 return util.never
93 return util.never
93
94
94 try:
95 try:
95 ignorefunc = match.match(root, '', [], allpats)
96 ignorefunc = match.match(root, '', [], allpats)
96 except util.Abort:
97 except util.Abort:
97 # Re-raise an exception where the src is the right file
98 # Re-raise an exception where the src is the right file
98 for f, patlist in pats.iteritems():
99 for f, patlist in pats.iteritems():
99 try:
100 try:
100 match.match(root, '', [], patlist)
101 match.match(root, '', [], patlist)
101 except util.Abort, inst:
102 except util.Abort, inst:
102 raise util.Abort('%s: %s' % (f, inst[0]))
103 raise util.Abort('%s: %s' % (f, inst[0]))
103
104
104 return ignorefunc
105 return ignorefunc
@@ -1,1279 +1,1280 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev
15 from node import bin, hex, nullid, nullrev
16 from i18n import _
16 from i18n import _
17 import ancestor, mdiff, parsers, error, util, dagutil
17 import ancestor, mdiff, parsers, error, util, dagutil
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGGENERALDELTA = (1 << 17)
30 REVLOGGENERALDELTA = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_KNOWN_FLAGS = 0
37 REVIDX_KNOWN_FLAGS = 0
38
38
39 # max size of revlog with inline data
39 # max size of revlog with inline data
40 _maxinline = 131072
40 _maxinline = 131072
41 _chunksize = 1048576
41 _chunksize = 1048576
42
42
43 RevlogError = error.RevlogError
43 RevlogError = error.RevlogError
44 LookupError = error.LookupError
44 LookupError = error.LookupError
45
45
46 def getoffset(q):
46 def getoffset(q):
47 return int(q >> 16)
47 return int(q >> 16)
48
48
49 def gettype(q):
49 def gettype(q):
50 return int(q & 0xFFFF)
50 return int(q & 0xFFFF)
51
51
52 def offset_type(offset, type):
52 def offset_type(offset, type):
53 return long(long(offset) << 16 | type)
53 return long(long(offset) << 16 | type)
54
54
55 nullhash = _sha(nullid)
55 nullhash = _sha(nullid)
56
56
57 def hash(text, p1, p2):
57 def hash(text, p1, p2):
58 """generate a hash from the given text and its parent hashes
58 """generate a hash from the given text and its parent hashes
59
59
60 This hash combines both the current file contents and its history
60 This hash combines both the current file contents and its history
61 in a manner that makes it easy to distinguish nodes with the same
61 in a manner that makes it easy to distinguish nodes with the same
62 content in the revision graph.
62 content in the revision graph.
63 """
63 """
64 # As of now, if one of the parent node is null, p2 is null
64 # As of now, if one of the parent node is null, p2 is null
65 if p2 == nullid:
65 if p2 == nullid:
66 # deep copy of a hash is faster than creating one
66 # deep copy of a hash is faster than creating one
67 s = nullhash.copy()
67 s = nullhash.copy()
68 s.update(p1)
68 s.update(p1)
69 else:
69 else:
70 # none of the parent nodes are nullid
70 # none of the parent nodes are nullid
71 l = [p1, p2]
71 l = [p1, p2]
72 l.sort()
72 l.sort()
73 s = _sha(l[0])
73 s = _sha(l[0])
74 s.update(l[1])
74 s.update(l[1])
75 s.update(text)
75 s.update(text)
76 return s.digest()
76 return s.digest()
77
77
78 def compress(text):
78 def compress(text):
79 """ generate a possibly-compressed representation of text """
79 """ generate a possibly-compressed representation of text """
80 if not text:
80 if not text:
81 return ("", text)
81 return ("", text)
82 l = len(text)
82 l = len(text)
83 bin = None
83 bin = None
84 if l < 44:
84 if l < 44:
85 pass
85 pass
86 elif l > 1000000:
86 elif l > 1000000:
87 # zlib makes an internal copy, thus doubling memory usage for
87 # zlib makes an internal copy, thus doubling memory usage for
88 # large files, so lets do this in pieces
88 # large files, so lets do this in pieces
89 z = zlib.compressobj()
89 z = zlib.compressobj()
90 p = []
90 p = []
91 pos = 0
91 pos = 0
92 while pos < l:
92 while pos < l:
93 pos2 = pos + 2**20
93 pos2 = pos + 2**20
94 p.append(z.compress(text[pos:pos2]))
94 p.append(z.compress(text[pos:pos2]))
95 pos = pos2
95 pos = pos2
96 p.append(z.flush())
96 p.append(z.flush())
97 if sum(map(len, p)) < l:
97 if sum(map(len, p)) < l:
98 bin = "".join(p)
98 bin = "".join(p)
99 else:
99 else:
100 bin = _compress(text)
100 bin = _compress(text)
101 if bin is None or len(bin) > l:
101 if bin is None or len(bin) > l:
102 if text[0] == '\0':
102 if text[0] == '\0':
103 return ("", text)
103 return ("", text)
104 return ('u', text)
104 return ('u', text)
105 return ("", bin)
105 return ("", bin)
106
106
107 def decompress(bin):
107 def decompress(bin):
108 """ decompress the given input """
108 """ decompress the given input """
109 if not bin:
109 if not bin:
110 return bin
110 return bin
111 t = bin[0]
111 t = bin[0]
112 if t == '\0':
112 if t == '\0':
113 return bin
113 return bin
114 if t == 'x':
114 if t == 'x':
115 return _decompress(bin)
115 return _decompress(bin)
116 if t == 'u':
116 if t == 'u':
117 return bin[1:]
117 return bin[1:]
118 raise RevlogError(_("unknown compression type %r") % t)
118 raise RevlogError(_("unknown compression type %r") % t)
119
119
120 indexformatv0 = ">4l20s20s20s"
120 indexformatv0 = ">4l20s20s20s"
121 v0shaoffset = 56
121 v0shaoffset = 56
122
122
123 class revlogoldio(object):
123 class revlogoldio(object):
124 def __init__(self):
124 def __init__(self):
125 self.size = struct.calcsize(indexformatv0)
125 self.size = struct.calcsize(indexformatv0)
126
126
127 def parseindex(self, data, inline):
127 def parseindex(self, data, inline):
128 s = self.size
128 s = self.size
129 index = []
129 index = []
130 nodemap = {nullid: nullrev}
130 nodemap = {nullid: nullrev}
131 n = off = 0
131 n = off = 0
132 l = len(data)
132 l = len(data)
133 while off + s <= l:
133 while off + s <= l:
134 cur = data[off:off + s]
134 cur = data[off:off + s]
135 off += s
135 off += s
136 e = _unpack(indexformatv0, cur)
136 e = _unpack(indexformatv0, cur)
137 # transform to revlogv1 format
137 # transform to revlogv1 format
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
140 index.append(e2)
140 index.append(e2)
141 nodemap[e[6]] = n
141 nodemap[e[6]] = n
142 n += 1
142 n += 1
143
143
144 # add the magic null revision at -1
144 # add the magic null revision at -1
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146
146
147 return index, nodemap, None
147 return index, nodemap, None
148
148
149 def packentry(self, entry, node, version, rev):
149 def packentry(self, entry, node, version, rev):
150 if gettype(entry[0]):
150 if gettype(entry[0]):
151 raise RevlogError(_("index entry flags need RevlogNG"))
151 raise RevlogError(_("index entry flags need RevlogNG"))
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
153 node(entry[5]), node(entry[6]), entry[7])
153 node(entry[5]), node(entry[6]), entry[7])
154 return _pack(indexformatv0, *e2)
154 return _pack(indexformatv0, *e2)
155
155
156 # index ng:
156 # index ng:
157 # 6 bytes: offset
157 # 6 bytes: offset
158 # 2 bytes: flags
158 # 2 bytes: flags
159 # 4 bytes: compressed length
159 # 4 bytes: compressed length
160 # 4 bytes: uncompressed length
160 # 4 bytes: uncompressed length
161 # 4 bytes: base rev
161 # 4 bytes: base rev
162 # 4 bytes: link rev
162 # 4 bytes: link rev
163 # 4 bytes: parent 1 rev
163 # 4 bytes: parent 1 rev
164 # 4 bytes: parent 2 rev
164 # 4 bytes: parent 2 rev
165 # 32 bytes: nodeid
165 # 32 bytes: nodeid
166 indexformatng = ">Qiiiiii20s12x"
166 indexformatng = ">Qiiiiii20s12x"
167 ngshaoffset = 32
167 ngshaoffset = 32
168 versionformat = ">I"
168 versionformat = ">I"
169
169
170 class revlogio(object):
170 class revlogio(object):
171 def __init__(self):
171 def __init__(self):
172 self.size = struct.calcsize(indexformatng)
172 self.size = struct.calcsize(indexformatng)
173
173
174 def parseindex(self, data, inline):
174 def parseindex(self, data, inline):
175 # call the C implementation to parse the index data
175 # call the C implementation to parse the index data
176 index, cache = parsers.parse_index2(data, inline)
176 index, cache = parsers.parse_index2(data, inline)
177 return index, None, cache
177 return index, None, cache
178
178
179 def packentry(self, entry, node, version, rev):
179 def packentry(self, entry, node, version, rev):
180 p = _pack(indexformatng, *entry)
180 p = _pack(indexformatng, *entry)
181 if rev == 0:
181 if rev == 0:
182 p = _pack(versionformat, version) + p[4:]
182 p = _pack(versionformat, version) + p[4:]
183 return p
183 return p
184
184
185 class revlog(object):
185 class revlog(object):
186 """
186 """
187 the underlying revision storage object
187 the underlying revision storage object
188
188
189 A revlog consists of two parts, an index and the revision data.
189 A revlog consists of two parts, an index and the revision data.
190
190
191 The index is a file with a fixed record size containing
191 The index is a file with a fixed record size containing
192 information on each revision, including its nodeid (hash), the
192 information on each revision, including its nodeid (hash), the
193 nodeids of its parents, the position and offset of its data within
193 nodeids of its parents, the position and offset of its data within
194 the data file, and the revision it's based on. Finally, each entry
194 the data file, and the revision it's based on. Finally, each entry
195 contains a linkrev entry that can serve as a pointer to external
195 contains a linkrev entry that can serve as a pointer to external
196 data.
196 data.
197
197
198 The revision data itself is a linear collection of data chunks.
198 The revision data itself is a linear collection of data chunks.
199 Each chunk represents a revision and is usually represented as a
199 Each chunk represents a revision and is usually represented as a
200 delta against the previous chunk. To bound lookup time, runs of
200 delta against the previous chunk. To bound lookup time, runs of
201 deltas are limited to about 2 times the length of the original
201 deltas are limited to about 2 times the length of the original
202 version data. This makes retrieval of a version proportional to
202 version data. This makes retrieval of a version proportional to
203 its size, or O(1) relative to the number of revisions.
203 its size, or O(1) relative to the number of revisions.
204
204
205 Both pieces of the revlog are written to in an append-only
205 Both pieces of the revlog are written to in an append-only
206 fashion, which means we never need to rewrite a file to insert or
206 fashion, which means we never need to rewrite a file to insert or
207 remove data, and can use some simple techniques to avoid the need
207 remove data, and can use some simple techniques to avoid the need
208 for locking while reading.
208 for locking while reading.
209 """
209 """
210 def __init__(self, opener, indexfile):
210 def __init__(self, opener, indexfile):
211 """
211 """
212 create a revlog object
212 create a revlog object
213
213
214 opener is a function that abstracts the file opening operation
214 opener is a function that abstracts the file opening operation
215 and can be used to implement COW semantics or the like.
215 and can be used to implement COW semantics or the like.
216 """
216 """
217 self.indexfile = indexfile
217 self.indexfile = indexfile
218 self.datafile = indexfile[:-2] + ".d"
218 self.datafile = indexfile[:-2] + ".d"
219 self.opener = opener
219 self.opener = opener
220 self._cache = None
220 self._cache = None
221 self._basecache = (0, 0)
221 self._basecache = (0, 0)
222 self._chunkcache = (0, '')
222 self._chunkcache = (0, '')
223 self.index = []
223 self.index = []
224 self._pcache = {}
224 self._pcache = {}
225 self._nodecache = {nullid: nullrev}
225 self._nodecache = {nullid: nullrev}
226 self._nodepos = None
226 self._nodepos = None
227
227
228 v = REVLOG_DEFAULT_VERSION
228 v = REVLOG_DEFAULT_VERSION
229 opts = getattr(opener, 'options', None)
229 opts = getattr(opener, 'options', None)
230 if opts is not None:
230 if opts is not None:
231 if 'revlogv1' in opts:
231 if 'revlogv1' in opts:
232 if 'generaldelta' in opts:
232 if 'generaldelta' in opts:
233 v |= REVLOGGENERALDELTA
233 v |= REVLOGGENERALDELTA
234 else:
234 else:
235 v = 0
235 v = 0
236
236
237 i = ''
237 i = ''
238 self._initempty = True
238 self._initempty = True
239 try:
239 try:
240 f = self.opener(self.indexfile)
240 f = self.opener(self.indexfile)
241 i = f.read()
241 i = f.read()
242 f.close()
242 f.close()
243 if len(i) > 0:
243 if len(i) > 0:
244 v = struct.unpack(versionformat, i[:4])[0]
244 v = struct.unpack(versionformat, i[:4])[0]
245 self._initempty = False
245 self._initempty = False
246 except IOError, inst:
246 except IOError, inst:
247 if inst.errno != errno.ENOENT:
247 if inst.errno != errno.ENOENT:
248 raise
248 raise
249
249
250 self.version = v
250 self.version = v
251 self._inline = v & REVLOGNGINLINEDATA
251 self._inline = v & REVLOGNGINLINEDATA
252 self._generaldelta = v & REVLOGGENERALDELTA
252 self._generaldelta = v & REVLOGGENERALDELTA
253 flags = v & ~0xFFFF
253 flags = v & ~0xFFFF
254 fmt = v & 0xFFFF
254 fmt = v & 0xFFFF
255 if fmt == REVLOGV0 and flags:
255 if fmt == REVLOGV0 and flags:
256 raise RevlogError(_("index %s unknown flags %#04x for format v0")
256 raise RevlogError(_("index %s unknown flags %#04x for format v0")
257 % (self.indexfile, flags >> 16))
257 % (self.indexfile, flags >> 16))
258 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
258 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
259 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
259 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
260 % (self.indexfile, flags >> 16))
260 % (self.indexfile, flags >> 16))
261 elif fmt > REVLOGNG:
261 elif fmt > REVLOGNG:
262 raise RevlogError(_("index %s unknown format %d")
262 raise RevlogError(_("index %s unknown format %d")
263 % (self.indexfile, fmt))
263 % (self.indexfile, fmt))
264
264
265 self._io = revlogio()
265 self._io = revlogio()
266 if self.version == REVLOGV0:
266 if self.version == REVLOGV0:
267 self._io = revlogoldio()
267 self._io = revlogoldio()
268 try:
268 try:
269 d = self._io.parseindex(i, self._inline)
269 d = self._io.parseindex(i, self._inline)
270 except (ValueError, IndexError):
270 except (ValueError, IndexError):
271 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
271 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
272 self.index, nodemap, self._chunkcache = d
272 self.index, nodemap, self._chunkcache = d
273 if nodemap is not None:
273 if nodemap is not None:
274 self.nodemap = self._nodecache = nodemap
274 self.nodemap = self._nodecache = nodemap
275 if not self._chunkcache:
275 if not self._chunkcache:
276 self._chunkclear()
276 self._chunkclear()
277
277
278 def tip(self):
278 def tip(self):
279 return self.node(len(self.index) - 2)
279 return self.node(len(self.index) - 2)
280 def __len__(self):
280 def __len__(self):
281 return len(self.index) - 1
281 return len(self.index) - 1
282 def __iter__(self):
282 def __iter__(self):
283 for i in xrange(len(self)):
283 for i in xrange(len(self)):
284 yield i
284 yield i
285
285
286 @util.propertycache
286 @util.propertycache
287 def nodemap(self):
287 def nodemap(self):
288 self.rev(self.node(0))
288 self.rev(self.node(0))
289 return self._nodecache
289 return self._nodecache
290
290
291 def rev(self, node):
291 def rev(self, node):
292 try:
292 try:
293 return self._nodecache[node]
293 return self._nodecache[node]
294 except KeyError:
294 except KeyError:
295 n = self._nodecache
295 n = self._nodecache
296 i = self.index
296 i = self.index
297 p = self._nodepos
297 p = self._nodepos
298 if p is None:
298 if p is None:
299 p = len(i) - 2
299 p = len(i) - 2
300 for r in xrange(p, -1, -1):
300 for r in xrange(p, -1, -1):
301 v = i[r][7]
301 v = i[r][7]
302 n[v] = r
302 n[v] = r
303 if v == node:
303 if v == node:
304 self._nodepos = r - 1
304 self._nodepos = r - 1
305 return r
305 return r
306 raise LookupError(node, self.indexfile, _('no node'))
306 raise LookupError(node, self.indexfile, _('no node'))
307
307
308 def node(self, rev):
308 def node(self, rev):
309 return self.index[rev][7]
309 return self.index[rev][7]
310 def linkrev(self, rev):
310 def linkrev(self, rev):
311 return self.index[rev][4]
311 return self.index[rev][4]
312 def parents(self, node):
312 def parents(self, node):
313 i = self.index
313 i = self.index
314 d = i[self.rev(node)]
314 d = i[self.rev(node)]
315 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
315 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
316 def parentrevs(self, rev):
316 def parentrevs(self, rev):
317 return self.index[rev][5:7]
317 return self.index[rev][5:7]
318 def start(self, rev):
318 def start(self, rev):
319 return int(self.index[rev][0] >> 16)
319 return int(self.index[rev][0] >> 16)
320 def end(self, rev):
320 def end(self, rev):
321 return self.start(rev) + self.length(rev)
321 return self.start(rev) + self.length(rev)
322 def length(self, rev):
322 def length(self, rev):
323 return self.index[rev][1]
323 return self.index[rev][1]
324 def chainbase(self, rev):
324 def chainbase(self, rev):
325 index = self.index
325 index = self.index
326 base = index[rev][3]
326 base = index[rev][3]
327 while base != rev:
327 while base != rev:
328 rev = base
328 rev = base
329 base = index[rev][3]
329 base = index[rev][3]
330 return base
330 return base
331 def flags(self, rev):
331 def flags(self, rev):
332 return self.index[rev][0] & 0xFFFF
332 return self.index[rev][0] & 0xFFFF
333 def rawsize(self, rev):
333 def rawsize(self, rev):
334 """return the length of the uncompressed text for a given revision"""
334 """return the length of the uncompressed text for a given revision"""
335 l = self.index[rev][2]
335 l = self.index[rev][2]
336 if l >= 0:
336 if l >= 0:
337 return l
337 return l
338
338
339 t = self.revision(self.node(rev))
339 t = self.revision(self.node(rev))
340 return len(t)
340 return len(t)
341 size = rawsize
341 size = rawsize
342
342
343 def reachable(self, node, stop=None):
343 def reachable(self, node, stop=None):
344 """return the set of all nodes ancestral to a given node, including
344 """return the set of all nodes ancestral to a given node, including
345 the node itself, stopping when stop is matched"""
345 the node itself, stopping when stop is matched"""
346 reachable = set((node,))
346 reachable = set((node,))
347 visit = [node]
347 visit = [node]
348 if stop:
348 if stop:
349 stopn = self.rev(stop)
349 stopn = self.rev(stop)
350 else:
350 else:
351 stopn = 0
351 stopn = 0
352 while visit:
352 while visit:
353 n = visit.pop(0)
353 n = visit.pop(0)
354 if n == stop:
354 if n == stop:
355 continue
355 continue
356 if n == nullid:
356 if n == nullid:
357 continue
357 continue
358 for p in self.parents(n):
358 for p in self.parents(n):
359 if self.rev(p) < stopn:
359 if self.rev(p) < stopn:
360 continue
360 continue
361 if p not in reachable:
361 if p not in reachable:
362 reachable.add(p)
362 reachable.add(p)
363 visit.append(p)
363 visit.append(p)
364 return reachable
364 return reachable
365
365
366 def ancestors(self, *revs):
366 def ancestors(self, *revs):
367 """Generate the ancestors of 'revs' in reverse topological order.
367 """Generate the ancestors of 'revs' in reverse topological order.
368
368
369 Yield a sequence of revision numbers starting with the parents
369 Yield a sequence of revision numbers starting with the parents
370 of each revision in revs, i.e., each revision is *not* considered
370 of each revision in revs, i.e., each revision is *not* considered
371 an ancestor of itself. Results are in breadth-first order:
371 an ancestor of itself. Results are in breadth-first order:
372 parents of each rev in revs, then parents of those, etc. Result
372 parents of each rev in revs, then parents of those, etc. Result
373 does not include the null revision."""
373 does not include the null revision."""
374 visit = list(revs)
374 visit = list(revs)
375 seen = set([nullrev])
375 seen = set([nullrev])
376 while visit:
376 while visit:
377 for parent in self.parentrevs(visit.pop(0)):
377 for parent in self.parentrevs(visit.pop(0)):
378 if parent not in seen:
378 if parent not in seen:
379 visit.append(parent)
379 visit.append(parent)
380 seen.add(parent)
380 seen.add(parent)
381 yield parent
381 yield parent
382
382
383 def descendants(self, *revs):
383 def descendants(self, *revs):
384 """Generate the descendants of 'revs' in revision order.
384 """Generate the descendants of 'revs' in revision order.
385
385
386 Yield a sequence of revision numbers starting with a child of
386 Yield a sequence of revision numbers starting with a child of
387 some rev in revs, i.e., each revision is *not* considered a
387 some rev in revs, i.e., each revision is *not* considered a
388 descendant of itself. Results are ordered by revision number (a
388 descendant of itself. Results are ordered by revision number (a
389 topological sort)."""
389 topological sort)."""
390 first = min(revs)
390 first = min(revs)
391 if first == nullrev:
391 if first == nullrev:
392 for i in self:
392 for i in self:
393 yield i
393 yield i
394 return
394 return
395
395
396 seen = set(revs)
396 seen = set(revs)
397 for i in xrange(first + 1, len(self)):
397 for i in xrange(first + 1, len(self)):
398 for x in self.parentrevs(i):
398 for x in self.parentrevs(i):
399 if x != nullrev and x in seen:
399 if x != nullrev and x in seen:
400 seen.add(i)
400 seen.add(i)
401 yield i
401 yield i
402 break
402 break
403
403
404 def findcommonmissing(self, common=None, heads=None):
404 def findcommonmissing(self, common=None, heads=None):
405 """Return a tuple of the ancestors of common and the ancestors of heads
405 """Return a tuple of the ancestors of common and the ancestors of heads
406 that are not ancestors of common.
406 that are not ancestors of common.
407
407
408 More specifically, the second element is a list of nodes N such that
408 More specifically, the second element is a list of nodes N such that
409 every N satisfies the following constraints:
409 every N satisfies the following constraints:
410
410
411 1. N is an ancestor of some node in 'heads'
411 1. N is an ancestor of some node in 'heads'
412 2. N is not an ancestor of any node in 'common'
412 2. N is not an ancestor of any node in 'common'
413
413
414 The list is sorted by revision number, meaning it is
414 The list is sorted by revision number, meaning it is
415 topologically sorted.
415 topologically sorted.
416
416
417 'heads' and 'common' are both lists of node IDs. If heads is
417 'heads' and 'common' are both lists of node IDs. If heads is
418 not supplied, uses all of the revlog's heads. If common is not
418 not supplied, uses all of the revlog's heads. If common is not
419 supplied, uses nullid."""
419 supplied, uses nullid."""
420 if common is None:
420 if common is None:
421 common = [nullid]
421 common = [nullid]
422 if heads is None:
422 if heads is None:
423 heads = self.heads()
423 heads = self.heads()
424
424
425 common = [self.rev(n) for n in common]
425 common = [self.rev(n) for n in common]
426 heads = [self.rev(n) for n in heads]
426 heads = [self.rev(n) for n in heads]
427
427
428 # we want the ancestors, but inclusive
428 # we want the ancestors, but inclusive
429 has = set(self.ancestors(*common))
429 has = set(self.ancestors(*common))
430 has.add(nullrev)
430 has.add(nullrev)
431 has.update(common)
431 has.update(common)
432
432
433 # take all ancestors from heads that aren't in has
433 # take all ancestors from heads that aren't in has
434 missing = set()
434 missing = set()
435 visit = [r for r in heads if r not in has]
435 visit = [r for r in heads if r not in has]
436 while visit:
436 while visit:
437 r = visit.pop(0)
437 r = visit.pop(0)
438 if r in missing:
438 if r in missing:
439 continue
439 continue
440 else:
440 else:
441 missing.add(r)
441 missing.add(r)
442 for p in self.parentrevs(r):
442 for p in self.parentrevs(r):
443 if p not in has:
443 if p not in has:
444 visit.append(p)
444 visit.append(p)
445 missing = list(missing)
445 missing = list(missing)
446 missing.sort()
446 missing.sort()
447 return has, [self.node(r) for r in missing]
447 return has, [self.node(r) for r in missing]
448
448
449 def findmissing(self, common=None, heads=None):
449 def findmissing(self, common=None, heads=None):
450 """Return the ancestors of heads that are not ancestors of common.
450 """Return the ancestors of heads that are not ancestors of common.
451
451
452 More specifically, return a list of nodes N such that every N
452 More specifically, return a list of nodes N such that every N
453 satisfies the following constraints:
453 satisfies the following constraints:
454
454
455 1. N is an ancestor of some node in 'heads'
455 1. N is an ancestor of some node in 'heads'
456 2. N is not an ancestor of any node in 'common'
456 2. N is not an ancestor of any node in 'common'
457
457
458 The list is sorted by revision number, meaning it is
458 The list is sorted by revision number, meaning it is
459 topologically sorted.
459 topologically sorted.
460
460
461 'heads' and 'common' are both lists of node IDs. If heads is
461 'heads' and 'common' are both lists of node IDs. If heads is
462 not supplied, uses all of the revlog's heads. If common is not
462 not supplied, uses all of the revlog's heads. If common is not
463 supplied, uses nullid."""
463 supplied, uses nullid."""
464 _common, missing = self.findcommonmissing(common, heads)
464 _common, missing = self.findcommonmissing(common, heads)
465 return missing
465 return missing
466
466
467 def nodesbetween(self, roots=None, heads=None):
467 def nodesbetween(self, roots=None, heads=None):
468 """Return a topological path from 'roots' to 'heads'.
468 """Return a topological path from 'roots' to 'heads'.
469
469
470 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
470 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
471 topologically sorted list of all nodes N that satisfy both of
471 topologically sorted list of all nodes N that satisfy both of
472 these constraints:
472 these constraints:
473
473
474 1. N is a descendant of some node in 'roots'
474 1. N is a descendant of some node in 'roots'
475 2. N is an ancestor of some node in 'heads'
475 2. N is an ancestor of some node in 'heads'
476
476
477 Every node is considered to be both a descendant and an ancestor
477 Every node is considered to be both a descendant and an ancestor
478 of itself, so every reachable node in 'roots' and 'heads' will be
478 of itself, so every reachable node in 'roots' and 'heads' will be
479 included in 'nodes'.
479 included in 'nodes'.
480
480
481 'outroots' is the list of reachable nodes in 'roots', i.e., the
481 'outroots' is the list of reachable nodes in 'roots', i.e., the
482 subset of 'roots' that is returned in 'nodes'. Likewise,
482 subset of 'roots' that is returned in 'nodes'. Likewise,
483 'outheads' is the subset of 'heads' that is also in 'nodes'.
483 'outheads' is the subset of 'heads' that is also in 'nodes'.
484
484
485 'roots' and 'heads' are both lists of node IDs. If 'roots' is
485 'roots' and 'heads' are both lists of node IDs. If 'roots' is
486 unspecified, uses nullid as the only root. If 'heads' is
486 unspecified, uses nullid as the only root. If 'heads' is
487 unspecified, uses list of all of the revlog's heads."""
487 unspecified, uses list of all of the revlog's heads."""
488 nonodes = ([], [], [])
488 nonodes = ([], [], [])
489 if roots is not None:
489 if roots is not None:
490 roots = list(roots)
490 roots = list(roots)
491 if not roots:
491 if not roots:
492 return nonodes
492 return nonodes
493 lowestrev = min([self.rev(n) for n in roots])
493 lowestrev = min([self.rev(n) for n in roots])
494 else:
494 else:
495 roots = [nullid] # Everybody's a descendant of nullid
495 roots = [nullid] # Everybody's a descendant of nullid
496 lowestrev = nullrev
496 lowestrev = nullrev
497 if (lowestrev == nullrev) and (heads is None):
497 if (lowestrev == nullrev) and (heads is None):
498 # We want _all_ the nodes!
498 # We want _all_ the nodes!
499 return ([self.node(r) for r in self], [nullid], list(self.heads()))
499 return ([self.node(r) for r in self], [nullid], list(self.heads()))
500 if heads is None:
500 if heads is None:
501 # All nodes are ancestors, so the latest ancestor is the last
501 # All nodes are ancestors, so the latest ancestor is the last
502 # node.
502 # node.
503 highestrev = len(self) - 1
503 highestrev = len(self) - 1
504 # Set ancestors to None to signal that every node is an ancestor.
504 # Set ancestors to None to signal that every node is an ancestor.
505 ancestors = None
505 ancestors = None
506 # Set heads to an empty dictionary for later discovery of heads
506 # Set heads to an empty dictionary for later discovery of heads
507 heads = {}
507 heads = {}
508 else:
508 else:
509 heads = list(heads)
509 heads = list(heads)
510 if not heads:
510 if not heads:
511 return nonodes
511 return nonodes
512 ancestors = set()
512 ancestors = set()
513 # Turn heads into a dictionary so we can remove 'fake' heads.
513 # Turn heads into a dictionary so we can remove 'fake' heads.
514 # Also, later we will be using it to filter out the heads we can't
514 # Also, later we will be using it to filter out the heads we can't
515 # find from roots.
515 # find from roots.
516 heads = dict.fromkeys(heads, False)
516 heads = dict.fromkeys(heads, False)
517 # Start at the top and keep marking parents until we're done.
517 # Start at the top and keep marking parents until we're done.
518 nodestotag = set(heads)
518 nodestotag = set(heads)
519 # Remember where the top was so we can use it as a limit later.
519 # Remember where the top was so we can use it as a limit later.
520 highestrev = max([self.rev(n) for n in nodestotag])
520 highestrev = max([self.rev(n) for n in nodestotag])
521 while nodestotag:
521 while nodestotag:
522 # grab a node to tag
522 # grab a node to tag
523 n = nodestotag.pop()
523 n = nodestotag.pop()
524 # Never tag nullid
524 # Never tag nullid
525 if n == nullid:
525 if n == nullid:
526 continue
526 continue
527 # A node's revision number represents its place in a
527 # A node's revision number represents its place in a
528 # topologically sorted list of nodes.
528 # topologically sorted list of nodes.
529 r = self.rev(n)
529 r = self.rev(n)
530 if r >= lowestrev:
530 if r >= lowestrev:
531 if n not in ancestors:
531 if n not in ancestors:
532 # If we are possibly a descendant of one of the roots
532 # If we are possibly a descendant of one of the roots
533 # and we haven't already been marked as an ancestor
533 # and we haven't already been marked as an ancestor
534 ancestors.add(n) # Mark as ancestor
534 ancestors.add(n) # Mark as ancestor
535 # Add non-nullid parents to list of nodes to tag.
535 # Add non-nullid parents to list of nodes to tag.
536 nodestotag.update([p for p in self.parents(n) if
536 nodestotag.update([p for p in self.parents(n) if
537 p != nullid])
537 p != nullid])
538 elif n in heads: # We've seen it before, is it a fake head?
538 elif n in heads: # We've seen it before, is it a fake head?
539 # So it is, real heads should not be the ancestors of
539 # So it is, real heads should not be the ancestors of
540 # any other heads.
540 # any other heads.
541 heads.pop(n)
541 heads.pop(n)
542 if not ancestors:
542 if not ancestors:
543 return nonodes
543 return nonodes
544 # Now that we have our set of ancestors, we want to remove any
544 # Now that we have our set of ancestors, we want to remove any
545 # roots that are not ancestors.
545 # roots that are not ancestors.
546
546
547 # If one of the roots was nullid, everything is included anyway.
547 # If one of the roots was nullid, everything is included anyway.
548 if lowestrev > nullrev:
548 if lowestrev > nullrev:
549 # But, since we weren't, let's recompute the lowest rev to not
549 # But, since we weren't, let's recompute the lowest rev to not
550 # include roots that aren't ancestors.
550 # include roots that aren't ancestors.
551
551
552 # Filter out roots that aren't ancestors of heads
552 # Filter out roots that aren't ancestors of heads
553 roots = [n for n in roots if n in ancestors]
553 roots = [n for n in roots if n in ancestors]
554 # Recompute the lowest revision
554 # Recompute the lowest revision
555 if roots:
555 if roots:
556 lowestrev = min([self.rev(n) for n in roots])
556 lowestrev = min([self.rev(n) for n in roots])
557 else:
557 else:
558 # No more roots? Return empty list
558 # No more roots? Return empty list
559 return nonodes
559 return nonodes
560 else:
560 else:
561 # We are descending from nullid, and don't need to care about
561 # We are descending from nullid, and don't need to care about
562 # any other roots.
562 # any other roots.
563 lowestrev = nullrev
563 lowestrev = nullrev
564 roots = [nullid]
564 roots = [nullid]
565 # Transform our roots list into a set.
565 # Transform our roots list into a set.
566 descendants = set(roots)
566 descendants = set(roots)
567 # Also, keep the original roots so we can filter out roots that aren't
567 # Also, keep the original roots so we can filter out roots that aren't
568 # 'real' roots (i.e. are descended from other roots).
568 # 'real' roots (i.e. are descended from other roots).
569 roots = descendants.copy()
569 roots = descendants.copy()
570 # Our topologically sorted list of output nodes.
570 # Our topologically sorted list of output nodes.
571 orderedout = []
571 orderedout = []
572 # Don't start at nullid since we don't want nullid in our output list,
572 # Don't start at nullid since we don't want nullid in our output list,
573 # and if nullid shows up in descedents, empty parents will look like
573 # and if nullid shows up in descedents, empty parents will look like
574 # they're descendants.
574 # they're descendants.
575 for r in xrange(max(lowestrev, 0), highestrev + 1):
575 for r in xrange(max(lowestrev, 0), highestrev + 1):
576 n = self.node(r)
576 n = self.node(r)
577 isdescendant = False
577 isdescendant = False
578 if lowestrev == nullrev: # Everybody is a descendant of nullid
578 if lowestrev == nullrev: # Everybody is a descendant of nullid
579 isdescendant = True
579 isdescendant = True
580 elif n in descendants:
580 elif n in descendants:
581 # n is already a descendant
581 # n is already a descendant
582 isdescendant = True
582 isdescendant = True
583 # This check only needs to be done here because all the roots
583 # This check only needs to be done here because all the roots
584 # will start being marked is descendants before the loop.
584 # will start being marked is descendants before the loop.
585 if n in roots:
585 if n in roots:
586 # If n was a root, check if it's a 'real' root.
586 # If n was a root, check if it's a 'real' root.
587 p = tuple(self.parents(n))
587 p = tuple(self.parents(n))
588 # If any of its parents are descendants, it's not a root.
588 # If any of its parents are descendants, it's not a root.
589 if (p[0] in descendants) or (p[1] in descendants):
589 if (p[0] in descendants) or (p[1] in descendants):
590 roots.remove(n)
590 roots.remove(n)
591 else:
591 else:
592 p = tuple(self.parents(n))
592 p = tuple(self.parents(n))
593 # A node is a descendant if either of its parents are
593 # A node is a descendant if either of its parents are
594 # descendants. (We seeded the dependents list with the roots
594 # descendants. (We seeded the dependents list with the roots
595 # up there, remember?)
595 # up there, remember?)
596 if (p[0] in descendants) or (p[1] in descendants):
596 if (p[0] in descendants) or (p[1] in descendants):
597 descendants.add(n)
597 descendants.add(n)
598 isdescendant = True
598 isdescendant = True
599 if isdescendant and ((ancestors is None) or (n in ancestors)):
599 if isdescendant and ((ancestors is None) or (n in ancestors)):
600 # Only include nodes that are both descendants and ancestors.
600 # Only include nodes that are both descendants and ancestors.
601 orderedout.append(n)
601 orderedout.append(n)
602 if (ancestors is not None) and (n in heads):
602 if (ancestors is not None) and (n in heads):
603 # We're trying to figure out which heads are reachable
603 # We're trying to figure out which heads are reachable
604 # from roots.
604 # from roots.
605 # Mark this head as having been reached
605 # Mark this head as having been reached
606 heads[n] = True
606 heads[n] = True
607 elif ancestors is None:
607 elif ancestors is None:
608 # Otherwise, we're trying to discover the heads.
608 # Otherwise, we're trying to discover the heads.
609 # Assume this is a head because if it isn't, the next step
609 # Assume this is a head because if it isn't, the next step
610 # will eventually remove it.
610 # will eventually remove it.
611 heads[n] = True
611 heads[n] = True
612 # But, obviously its parents aren't.
612 # But, obviously its parents aren't.
613 for p in self.parents(n):
613 for p in self.parents(n):
614 heads.pop(p, None)
614 heads.pop(p, None)
615 heads = [n for n, flag in heads.iteritems() if flag]
615 heads = [n for n, flag in heads.iteritems() if flag]
616 roots = list(roots)
616 roots = list(roots)
617 assert orderedout
617 assert orderedout
618 assert roots
618 assert roots
619 assert heads
619 assert heads
620 return (orderedout, roots, heads)
620 return (orderedout, roots, heads)
621
621
622 def headrevs(self):
622 def headrevs(self):
623 count = len(self)
623 count = len(self)
624 if not count:
624 if not count:
625 return [nullrev]
625 return [nullrev]
626 ishead = [1] * (count + 1)
626 ishead = [1] * (count + 1)
627 index = self.index
627 index = self.index
628 for r in xrange(count):
628 for r in xrange(count):
629 e = index[r]
629 e = index[r]
630 ishead[e[5]] = ishead[e[6]] = 0
630 ishead[e[5]] = ishead[e[6]] = 0
631 return [r for r in xrange(count) if ishead[r]]
631 return [r for r in xrange(count) if ishead[r]]
632
632
633 def heads(self, start=None, stop=None):
633 def heads(self, start=None, stop=None):
634 """return the list of all nodes that have no children
634 """return the list of all nodes that have no children
635
635
636 if start is specified, only heads that are descendants of
636 if start is specified, only heads that are descendants of
637 start will be returned
637 start will be returned
638 if stop is specified, it will consider all the revs from stop
638 if stop is specified, it will consider all the revs from stop
639 as if they had no children
639 as if they had no children
640 """
640 """
641 if start is None and stop is None:
641 if start is None and stop is None:
642 if not len(self):
642 if not len(self):
643 return [nullid]
643 return [nullid]
644 return [self.node(r) for r in self.headrevs()]
644 return [self.node(r) for r in self.headrevs()]
645
645
646 if start is None:
646 if start is None:
647 start = nullid
647 start = nullid
648 if stop is None:
648 if stop is None:
649 stop = []
649 stop = []
650 stoprevs = set([self.rev(n) for n in stop])
650 stoprevs = set([self.rev(n) for n in stop])
651 startrev = self.rev(start)
651 startrev = self.rev(start)
652 reachable = set((startrev,))
652 reachable = set((startrev,))
653 heads = set((startrev,))
653 heads = set((startrev,))
654
654
655 parentrevs = self.parentrevs
655 parentrevs = self.parentrevs
656 for r in xrange(startrev + 1, len(self)):
656 for r in xrange(startrev + 1, len(self)):
657 for p in parentrevs(r):
657 for p in parentrevs(r):
658 if p in reachable:
658 if p in reachable:
659 if r not in stoprevs:
659 if r not in stoprevs:
660 reachable.add(r)
660 reachable.add(r)
661 heads.add(r)
661 heads.add(r)
662 if p in heads and p not in stoprevs:
662 if p in heads and p not in stoprevs:
663 heads.remove(p)
663 heads.remove(p)
664
664
665 return [self.node(r) for r in heads]
665 return [self.node(r) for r in heads]
666
666
667 def children(self, node):
667 def children(self, node):
668 """find the children of a given node"""
668 """find the children of a given node"""
669 c = []
669 c = []
670 p = self.rev(node)
670 p = self.rev(node)
671 for r in range(p + 1, len(self)):
671 for r in range(p + 1, len(self)):
672 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
672 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
673 if prevs:
673 if prevs:
674 for pr in prevs:
674 for pr in prevs:
675 if pr == p:
675 if pr == p:
676 c.append(self.node(r))
676 c.append(self.node(r))
677 elif p == nullrev:
677 elif p == nullrev:
678 c.append(self.node(r))
678 c.append(self.node(r))
679 return c
679 return c
680
680
681 def descendant(self, start, end):
681 def descendant(self, start, end):
682 if start == nullrev:
682 if start == nullrev:
683 return True
683 return True
684 for i in self.descendants(start):
684 for i in self.descendants(start):
685 if i == end:
685 if i == end:
686 return True
686 return True
687 elif i > end:
687 elif i > end:
688 break
688 break
689 return False
689 return False
690
690
691 def ancestor(self, a, b):
691 def ancestor(self, a, b):
692 """calculate the least common ancestor of nodes a and b"""
692 """calculate the least common ancestor of nodes a and b"""
693
693
694 # fast path, check if it is a descendant
694 # fast path, check if it is a descendant
695 a, b = self.rev(a), self.rev(b)
695 a, b = self.rev(a), self.rev(b)
696 start, end = sorted((a, b))
696 start, end = sorted((a, b))
697 if self.descendant(start, end):
697 if self.descendant(start, end):
698 return self.node(start)
698 return self.node(start)
699
699
700 def parents(rev):
700 def parents(rev):
701 return [p for p in self.parentrevs(rev) if p != nullrev]
701 return [p for p in self.parentrevs(rev) if p != nullrev]
702
702
703 c = ancestor.ancestor(a, b, parents)
703 c = ancestor.ancestor(a, b, parents)
704 if c is None:
704 if c is None:
705 return nullid
705 return nullid
706
706
707 return self.node(c)
707 return self.node(c)
708
708
709 def _match(self, id):
709 def _match(self, id):
710 if isinstance(id, (long, int)):
710 if isinstance(id, (long, int)):
711 # rev
711 # rev
712 return self.node(id)
712 return self.node(id)
713 if len(id) == 20:
713 if len(id) == 20:
714 # possibly a binary node
714 # possibly a binary node
715 # odds of a binary node being all hex in ASCII are 1 in 10**25
715 # odds of a binary node being all hex in ASCII are 1 in 10**25
716 try:
716 try:
717 node = id
717 node = id
718 self.rev(node) # quick search the index
718 self.rev(node) # quick search the index
719 return node
719 return node
720 except LookupError:
720 except LookupError:
721 pass # may be partial hex id
721 pass # may be partial hex id
722 try:
722 try:
723 # str(rev)
723 # str(rev)
724 rev = int(id)
724 rev = int(id)
725 if str(rev) != id:
725 if str(rev) != id:
726 raise ValueError
726 raise ValueError
727 if rev < 0:
727 if rev < 0:
728 rev = len(self) + rev
728 rev = len(self) + rev
729 if rev < 0 or rev >= len(self):
729 if rev < 0 or rev >= len(self):
730 raise ValueError
730 raise ValueError
731 return self.node(rev)
731 return self.node(rev)
732 except (ValueError, OverflowError):
732 except (ValueError, OverflowError):
733 pass
733 pass
734 if len(id) == 40:
734 if len(id) == 40:
735 try:
735 try:
736 # a full hex nodeid?
736 # a full hex nodeid?
737 node = bin(id)
737 node = bin(id)
738 self.rev(node)
738 self.rev(node)
739 return node
739 return node
740 except (TypeError, LookupError):
740 except (TypeError, LookupError):
741 pass
741 pass
742
742
743 def _partialmatch(self, id):
743 def _partialmatch(self, id):
744 if id in self._pcache:
744 if id in self._pcache:
745 return self._pcache[id]
745 return self._pcache[id]
746
746
747 if len(id) < 40:
747 if len(id) < 40:
748 try:
748 try:
749 # hex(node)[:...]
749 # hex(node)[:...]
750 l = len(id) // 2 # grab an even number of digits
750 l = len(id) // 2 # grab an even number of digits
751 prefix = bin(id[:l * 2])
751 prefix = bin(id[:l * 2])
752 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
752 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
753 nl = [n for n in nl if hex(n).startswith(id)]
753 nl = [n for n in nl if hex(n).startswith(id)]
754 if len(nl) > 0:
754 if len(nl) > 0:
755 if len(nl) == 1:
755 if len(nl) == 1:
756 self._pcache[id] = nl[0]
756 self._pcache[id] = nl[0]
757 return nl[0]
757 return nl[0]
758 raise LookupError(id, self.indexfile,
758 raise LookupError(id, self.indexfile,
759 _('ambiguous identifier'))
759 _('ambiguous identifier'))
760 return None
760 return None
761 except TypeError:
761 except TypeError:
762 pass
762 pass
763
763
764 def lookup(self, id):
764 def lookup(self, id):
765 """locate a node based on:
765 """locate a node based on:
766 - revision number or str(revision number)
766 - revision number or str(revision number)
767 - nodeid or subset of hex nodeid
767 - nodeid or subset of hex nodeid
768 """
768 """
769 n = self._match(id)
769 n = self._match(id)
770 if n is not None:
770 if n is not None:
771 return n
771 return n
772 n = self._partialmatch(id)
772 n = self._partialmatch(id)
773 if n:
773 if n:
774 return n
774 return n
775
775
776 raise LookupError(id, self.indexfile, _('no match found'))
776 raise LookupError(id, self.indexfile, _('no match found'))
777
777
778 def cmp(self, node, text):
778 def cmp(self, node, text):
779 """compare text with a given file revision
779 """compare text with a given file revision
780
780
781 returns True if text is different than what is stored.
781 returns True if text is different than what is stored.
782 """
782 """
783 p1, p2 = self.parents(node)
783 p1, p2 = self.parents(node)
784 return hash(text, p1, p2) != node
784 return hash(text, p1, p2) != node
785
785
786 def _addchunk(self, offset, data):
786 def _addchunk(self, offset, data):
787 o, d = self._chunkcache
787 o, d = self._chunkcache
788 # try to add to existing cache
788 # try to add to existing cache
789 if o + len(d) == offset and len(d) + len(data) < _chunksize:
789 if o + len(d) == offset and len(d) + len(data) < _chunksize:
790 self._chunkcache = o, d + data
790 self._chunkcache = o, d + data
791 else:
791 else:
792 self._chunkcache = offset, data
792 self._chunkcache = offset, data
793
793
794 def _loadchunk(self, offset, length):
794 def _loadchunk(self, offset, length):
795 if self._inline:
795 if self._inline:
796 df = self.opener(self.indexfile)
796 df = self.opener(self.indexfile)
797 else:
797 else:
798 df = self.opener(self.datafile)
798 df = self.opener(self.datafile)
799
799
800 readahead = max(65536, length)
800 readahead = max(65536, length)
801 df.seek(offset)
801 df.seek(offset)
802 d = df.read(readahead)
802 d = df.read(readahead)
803 df.close()
803 self._addchunk(offset, d)
804 self._addchunk(offset, d)
804 if readahead > length:
805 if readahead > length:
805 return d[:length]
806 return d[:length]
806 return d
807 return d
807
808
808 def _getchunk(self, offset, length):
809 def _getchunk(self, offset, length):
809 o, d = self._chunkcache
810 o, d = self._chunkcache
810 l = len(d)
811 l = len(d)
811
812
812 # is it in the cache?
813 # is it in the cache?
813 cachestart = offset - o
814 cachestart = offset - o
814 cacheend = cachestart + length
815 cacheend = cachestart + length
815 if cachestart >= 0 and cacheend <= l:
816 if cachestart >= 0 and cacheend <= l:
816 if cachestart == 0 and cacheend == l:
817 if cachestart == 0 and cacheend == l:
817 return d # avoid a copy
818 return d # avoid a copy
818 return d[cachestart:cacheend]
819 return d[cachestart:cacheend]
819
820
820 return self._loadchunk(offset, length)
821 return self._loadchunk(offset, length)
821
822
822 def _chunkraw(self, startrev, endrev):
823 def _chunkraw(self, startrev, endrev):
823 start = self.start(startrev)
824 start = self.start(startrev)
824 length = self.end(endrev) - start
825 length = self.end(endrev) - start
825 if self._inline:
826 if self._inline:
826 start += (startrev + 1) * self._io.size
827 start += (startrev + 1) * self._io.size
827 return self._getchunk(start, length)
828 return self._getchunk(start, length)
828
829
829 def _chunk(self, rev):
830 def _chunk(self, rev):
830 return decompress(self._chunkraw(rev, rev))
831 return decompress(self._chunkraw(rev, rev))
831
832
832 def _chunkbase(self, rev):
833 def _chunkbase(self, rev):
833 return self._chunk(rev)
834 return self._chunk(rev)
834
835
835 def _chunkclear(self):
836 def _chunkclear(self):
836 self._chunkcache = (0, '')
837 self._chunkcache = (0, '')
837
838
838 def deltaparent(self, rev):
839 def deltaparent(self, rev):
839 """return deltaparent of the given revision"""
840 """return deltaparent of the given revision"""
840 base = self.index[rev][3]
841 base = self.index[rev][3]
841 if base == rev:
842 if base == rev:
842 return nullrev
843 return nullrev
843 elif self._generaldelta:
844 elif self._generaldelta:
844 return base
845 return base
845 else:
846 else:
846 return rev - 1
847 return rev - 1
847
848
848 def revdiff(self, rev1, rev2):
849 def revdiff(self, rev1, rev2):
849 """return or calculate a delta between two revisions"""
850 """return or calculate a delta between two revisions"""
850 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
851 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
851 return self._chunk(rev2)
852 return self._chunk(rev2)
852
853
853 return mdiff.textdiff(self.revision(self.node(rev1)),
854 return mdiff.textdiff(self.revision(self.node(rev1)),
854 self.revision(self.node(rev2)))
855 self.revision(self.node(rev2)))
855
856
856 def revision(self, node):
857 def revision(self, node):
857 """return an uncompressed revision of a given node"""
858 """return an uncompressed revision of a given node"""
858 cachedrev = None
859 cachedrev = None
859 if node == nullid:
860 if node == nullid:
860 return ""
861 return ""
861 if self._cache:
862 if self._cache:
862 if self._cache[0] == node:
863 if self._cache[0] == node:
863 return self._cache[2]
864 return self._cache[2]
864 cachedrev = self._cache[1]
865 cachedrev = self._cache[1]
865
866
866 # look up what we need to read
867 # look up what we need to read
867 text = None
868 text = None
868 rev = self.rev(node)
869 rev = self.rev(node)
869
870
870 # check rev flags
871 # check rev flags
871 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
872 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
872 raise RevlogError(_('incompatible revision flag %x') %
873 raise RevlogError(_('incompatible revision flag %x') %
873 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
874 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
874
875
875 # build delta chain
876 # build delta chain
876 chain = []
877 chain = []
877 index = self.index # for performance
878 index = self.index # for performance
878 generaldelta = self._generaldelta
879 generaldelta = self._generaldelta
879 iterrev = rev
880 iterrev = rev
880 e = index[iterrev]
881 e = index[iterrev]
881 while iterrev != e[3] and iterrev != cachedrev:
882 while iterrev != e[3] and iterrev != cachedrev:
882 chain.append(iterrev)
883 chain.append(iterrev)
883 if generaldelta:
884 if generaldelta:
884 iterrev = e[3]
885 iterrev = e[3]
885 else:
886 else:
886 iterrev -= 1
887 iterrev -= 1
887 e = index[iterrev]
888 e = index[iterrev]
888 chain.reverse()
889 chain.reverse()
889 base = iterrev
890 base = iterrev
890
891
891 if iterrev == cachedrev:
892 if iterrev == cachedrev:
892 # cache hit
893 # cache hit
893 text = self._cache[2]
894 text = self._cache[2]
894
895
895 # drop cache to save memory
896 # drop cache to save memory
896 self._cache = None
897 self._cache = None
897
898
898 self._chunkraw(base, rev)
899 self._chunkraw(base, rev)
899 if text is None:
900 if text is None:
900 text = self._chunkbase(base)
901 text = self._chunkbase(base)
901
902
902 bins = [self._chunk(r) for r in chain]
903 bins = [self._chunk(r) for r in chain]
903 text = mdiff.patches(text, bins)
904 text = mdiff.patches(text, bins)
904
905
905 text = self._checkhash(text, node, rev)
906 text = self._checkhash(text, node, rev)
906
907
907 self._cache = (node, rev, text)
908 self._cache = (node, rev, text)
908 return text
909 return text
909
910
910 def _checkhash(self, text, node, rev):
911 def _checkhash(self, text, node, rev):
911 p1, p2 = self.parents(node)
912 p1, p2 = self.parents(node)
912 if node != hash(text, p1, p2):
913 if node != hash(text, p1, p2):
913 raise RevlogError(_("integrity check failed on %s:%d")
914 raise RevlogError(_("integrity check failed on %s:%d")
914 % (self.indexfile, rev))
915 % (self.indexfile, rev))
915 return text
916 return text
916
917
917 def checkinlinesize(self, tr, fp=None):
918 def checkinlinesize(self, tr, fp=None):
918 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
919 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
919 return
920 return
920
921
921 trinfo = tr.find(self.indexfile)
922 trinfo = tr.find(self.indexfile)
922 if trinfo is None:
923 if trinfo is None:
923 raise RevlogError(_("%s not found in the transaction")
924 raise RevlogError(_("%s not found in the transaction")
924 % self.indexfile)
925 % self.indexfile)
925
926
926 trindex = trinfo[2]
927 trindex = trinfo[2]
927 dataoff = self.start(trindex)
928 dataoff = self.start(trindex)
928
929
929 tr.add(self.datafile, dataoff)
930 tr.add(self.datafile, dataoff)
930
931
931 if fp:
932 if fp:
932 fp.flush()
933 fp.flush()
933 fp.close()
934 fp.close()
934
935
935 df = self.opener(self.datafile, 'w')
936 df = self.opener(self.datafile, 'w')
936 try:
937 try:
937 for r in self:
938 for r in self:
938 df.write(self._chunkraw(r, r))
939 df.write(self._chunkraw(r, r))
939 finally:
940 finally:
940 df.close()
941 df.close()
941
942
942 fp = self.opener(self.indexfile, 'w', atomictemp=True)
943 fp = self.opener(self.indexfile, 'w', atomictemp=True)
943 self.version &= ~(REVLOGNGINLINEDATA)
944 self.version &= ~(REVLOGNGINLINEDATA)
944 self._inline = False
945 self._inline = False
945 for i in self:
946 for i in self:
946 e = self._io.packentry(self.index[i], self.node, self.version, i)
947 e = self._io.packentry(self.index[i], self.node, self.version, i)
947 fp.write(e)
948 fp.write(e)
948
949
949 # if we don't call close, the temp file will never replace the
950 # if we don't call close, the temp file will never replace the
950 # real index
951 # real index
951 fp.close()
952 fp.close()
952
953
953 tr.replace(self.indexfile, trindex * self._io.size)
954 tr.replace(self.indexfile, trindex * self._io.size)
954 self._chunkclear()
955 self._chunkclear()
955
956
956 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
957 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
957 """add a revision to the log
958 """add a revision to the log
958
959
959 text - the revision data to add
960 text - the revision data to add
960 transaction - the transaction object used for rollback
961 transaction - the transaction object used for rollback
961 link - the linkrev data to add
962 link - the linkrev data to add
962 p1, p2 - the parent nodeids of the revision
963 p1, p2 - the parent nodeids of the revision
963 cachedelta - an optional precomputed delta
964 cachedelta - an optional precomputed delta
964 """
965 """
965 node = hash(text, p1, p2)
966 node = hash(text, p1, p2)
966 if node in self.nodemap:
967 if node in self.nodemap:
967 return node
968 return node
968
969
969 dfh = None
970 dfh = None
970 if not self._inline:
971 if not self._inline:
971 dfh = self.opener(self.datafile, "a")
972 dfh = self.opener(self.datafile, "a")
972 ifh = self.opener(self.indexfile, "a+")
973 ifh = self.opener(self.indexfile, "a+")
973 try:
974 try:
974 return self._addrevision(node, text, transaction, link, p1, p2,
975 return self._addrevision(node, text, transaction, link, p1, p2,
975 cachedelta, ifh, dfh)
976 cachedelta, ifh, dfh)
976 finally:
977 finally:
977 if dfh:
978 if dfh:
978 dfh.close()
979 dfh.close()
979 ifh.close()
980 ifh.close()
980
981
981 def _addrevision(self, node, text, transaction, link, p1, p2,
982 def _addrevision(self, node, text, transaction, link, p1, p2,
982 cachedelta, ifh, dfh):
983 cachedelta, ifh, dfh):
983 """internal function to add revisions to the log
984 """internal function to add revisions to the log
984
985
985 see addrevision for argument descriptions.
986 see addrevision for argument descriptions.
986 invariants:
987 invariants:
987 - text is optional (can be None); if not set, cachedelta must be set.
988 - text is optional (can be None); if not set, cachedelta must be set.
988 if both are set, they must correspond to eachother.
989 if both are set, they must correspond to eachother.
989 """
990 """
990 btext = [text]
991 btext = [text]
991 def buildtext():
992 def buildtext():
992 if btext[0] is not None:
993 if btext[0] is not None:
993 return btext[0]
994 return btext[0]
994 # flush any pending writes here so we can read it in revision
995 # flush any pending writes here so we can read it in revision
995 if dfh:
996 if dfh:
996 dfh.flush()
997 dfh.flush()
997 ifh.flush()
998 ifh.flush()
998 basetext = self.revision(self.node(cachedelta[0]))
999 basetext = self.revision(self.node(cachedelta[0]))
999 btext[0] = mdiff.patch(basetext, cachedelta[1])
1000 btext[0] = mdiff.patch(basetext, cachedelta[1])
1000 chk = hash(btext[0], p1, p2)
1001 chk = hash(btext[0], p1, p2)
1001 if chk != node:
1002 if chk != node:
1002 raise RevlogError(_("consistency error in delta"))
1003 raise RevlogError(_("consistency error in delta"))
1003 return btext[0]
1004 return btext[0]
1004
1005
1005 def builddelta(rev):
1006 def builddelta(rev):
1006 # can we use the cached delta?
1007 # can we use the cached delta?
1007 if cachedelta and cachedelta[0] == rev:
1008 if cachedelta and cachedelta[0] == rev:
1008 delta = cachedelta[1]
1009 delta = cachedelta[1]
1009 else:
1010 else:
1010 t = buildtext()
1011 t = buildtext()
1011 ptext = self.revision(self.node(rev))
1012 ptext = self.revision(self.node(rev))
1012 delta = mdiff.textdiff(ptext, t)
1013 delta = mdiff.textdiff(ptext, t)
1013 data = compress(delta)
1014 data = compress(delta)
1014 l = len(data[1]) + len(data[0])
1015 l = len(data[1]) + len(data[0])
1015 if basecache[0] == rev:
1016 if basecache[0] == rev:
1016 chainbase = basecache[1]
1017 chainbase = basecache[1]
1017 else:
1018 else:
1018 chainbase = self.chainbase(rev)
1019 chainbase = self.chainbase(rev)
1019 dist = l + offset - self.start(chainbase)
1020 dist = l + offset - self.start(chainbase)
1020 if self._generaldelta:
1021 if self._generaldelta:
1021 base = rev
1022 base = rev
1022 else:
1023 else:
1023 base = chainbase
1024 base = chainbase
1024 return dist, l, data, base, chainbase
1025 return dist, l, data, base, chainbase
1025
1026
1026 curr = len(self)
1027 curr = len(self)
1027 prev = curr - 1
1028 prev = curr - 1
1028 base = chainbase = curr
1029 base = chainbase = curr
1029 offset = self.end(prev)
1030 offset = self.end(prev)
1030 flags = 0
1031 flags = 0
1031 d = None
1032 d = None
1032 basecache = self._basecache
1033 basecache = self._basecache
1033 p1r, p2r = self.rev(p1), self.rev(p2)
1034 p1r, p2r = self.rev(p1), self.rev(p2)
1034
1035
1035 # should we try to build a delta?
1036 # should we try to build a delta?
1036 if prev != nullrev:
1037 if prev != nullrev:
1037 if self._generaldelta:
1038 if self._generaldelta:
1038 if p1r >= basecache[1]:
1039 if p1r >= basecache[1]:
1039 d = builddelta(p1r)
1040 d = builddelta(p1r)
1040 elif p2r >= basecache[1]:
1041 elif p2r >= basecache[1]:
1041 d = builddelta(p2r)
1042 d = builddelta(p2r)
1042 else:
1043 else:
1043 d = builddelta(prev)
1044 d = builddelta(prev)
1044 else:
1045 else:
1045 d = builddelta(prev)
1046 d = builddelta(prev)
1046 dist, l, data, base, chainbase = d
1047 dist, l, data, base, chainbase = d
1047
1048
1048 # full versions are inserted when the needed deltas
1049 # full versions are inserted when the needed deltas
1049 # become comparable to the uncompressed text
1050 # become comparable to the uncompressed text
1050 if text is None:
1051 if text is None:
1051 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1052 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1052 cachedelta[1])
1053 cachedelta[1])
1053 else:
1054 else:
1054 textlen = len(text)
1055 textlen = len(text)
1055 if d is None or dist > textlen * 2:
1056 if d is None or dist > textlen * 2:
1056 text = buildtext()
1057 text = buildtext()
1057 data = compress(text)
1058 data = compress(text)
1058 l = len(data[1]) + len(data[0])
1059 l = len(data[1]) + len(data[0])
1059 base = chainbase = curr
1060 base = chainbase = curr
1060
1061
1061 e = (offset_type(offset, flags), l, textlen,
1062 e = (offset_type(offset, flags), l, textlen,
1062 base, link, p1r, p2r, node)
1063 base, link, p1r, p2r, node)
1063 self.index.insert(-1, e)
1064 self.index.insert(-1, e)
1064 self.nodemap[node] = curr
1065 self.nodemap[node] = curr
1065
1066
1066 entry = self._io.packentry(e, self.node, self.version, curr)
1067 entry = self._io.packentry(e, self.node, self.version, curr)
1067 if not self._inline:
1068 if not self._inline:
1068 transaction.add(self.datafile, offset)
1069 transaction.add(self.datafile, offset)
1069 transaction.add(self.indexfile, curr * len(entry))
1070 transaction.add(self.indexfile, curr * len(entry))
1070 if data[0]:
1071 if data[0]:
1071 dfh.write(data[0])
1072 dfh.write(data[0])
1072 dfh.write(data[1])
1073 dfh.write(data[1])
1073 dfh.flush()
1074 dfh.flush()
1074 ifh.write(entry)
1075 ifh.write(entry)
1075 else:
1076 else:
1076 offset += curr * self._io.size
1077 offset += curr * self._io.size
1077 transaction.add(self.indexfile, offset, curr)
1078 transaction.add(self.indexfile, offset, curr)
1078 ifh.write(entry)
1079 ifh.write(entry)
1079 ifh.write(data[0])
1080 ifh.write(data[0])
1080 ifh.write(data[1])
1081 ifh.write(data[1])
1081 self.checkinlinesize(transaction, ifh)
1082 self.checkinlinesize(transaction, ifh)
1082
1083
1083 if type(text) == str: # only accept immutable objects
1084 if type(text) == str: # only accept immutable objects
1084 self._cache = (node, curr, text)
1085 self._cache = (node, curr, text)
1085 self._basecache = (curr, chainbase)
1086 self._basecache = (curr, chainbase)
1086 return node
1087 return node
1087
1088
1088 def group(self, nodelist, bundler, reorder=None):
1089 def group(self, nodelist, bundler, reorder=None):
1089 """Calculate a delta group, yielding a sequence of changegroup chunks
1090 """Calculate a delta group, yielding a sequence of changegroup chunks
1090 (strings).
1091 (strings).
1091
1092
1092 Given a list of changeset revs, return a set of deltas and
1093 Given a list of changeset revs, return a set of deltas and
1093 metadata corresponding to nodes. The first delta is
1094 metadata corresponding to nodes. The first delta is
1094 first parent(nodelist[0]) -> nodelist[0], the receiver is
1095 first parent(nodelist[0]) -> nodelist[0], the receiver is
1095 guaranteed to have this parent as it has all history before
1096 guaranteed to have this parent as it has all history before
1096 these changesets. In the case firstparent is nullrev the
1097 these changesets. In the case firstparent is nullrev the
1097 changegroup starts with a full revision.
1098 changegroup starts with a full revision.
1098 """
1099 """
1099
1100
1100 # if we don't have any revisions touched by these changesets, bail
1101 # if we don't have any revisions touched by these changesets, bail
1101 if len(nodelist) == 0:
1102 if len(nodelist) == 0:
1102 yield bundler.close()
1103 yield bundler.close()
1103 return
1104 return
1104
1105
1105 # for generaldelta revlogs, we linearize the revs; this will both be
1106 # for generaldelta revlogs, we linearize the revs; this will both be
1106 # much quicker and generate a much smaller bundle
1107 # much quicker and generate a much smaller bundle
1107 if (self._generaldelta and reorder is not False) or reorder:
1108 if (self._generaldelta and reorder is not False) or reorder:
1108 dag = dagutil.revlogdag(self)
1109 dag = dagutil.revlogdag(self)
1109 revs = set(self.rev(n) for n in nodelist)
1110 revs = set(self.rev(n) for n in nodelist)
1110 revs = dag.linearize(revs)
1111 revs = dag.linearize(revs)
1111 else:
1112 else:
1112 revs = sorted([self.rev(n) for n in nodelist])
1113 revs = sorted([self.rev(n) for n in nodelist])
1113
1114
1114 # add the parent of the first rev
1115 # add the parent of the first rev
1115 p = self.parentrevs(revs[0])[0]
1116 p = self.parentrevs(revs[0])[0]
1116 revs.insert(0, p)
1117 revs.insert(0, p)
1117
1118
1118 # build deltas
1119 # build deltas
1119 for r in xrange(len(revs) - 1):
1120 for r in xrange(len(revs) - 1):
1120 prev, curr = revs[r], revs[r + 1]
1121 prev, curr = revs[r], revs[r + 1]
1121 for c in bundler.revchunk(self, curr, prev):
1122 for c in bundler.revchunk(self, curr, prev):
1122 yield c
1123 yield c
1123
1124
1124 yield bundler.close()
1125 yield bundler.close()
1125
1126
1126 def addgroup(self, bundle, linkmapper, transaction):
1127 def addgroup(self, bundle, linkmapper, transaction):
1127 """
1128 """
1128 add a delta group
1129 add a delta group
1129
1130
1130 given a set of deltas, add them to the revision log. the
1131 given a set of deltas, add them to the revision log. the
1131 first delta is against its parent, which should be in our
1132 first delta is against its parent, which should be in our
1132 log, the rest are against the previous delta.
1133 log, the rest are against the previous delta.
1133 """
1134 """
1134
1135
1135 # track the base of the current delta log
1136 # track the base of the current delta log
1136 node = None
1137 node = None
1137
1138
1138 r = len(self)
1139 r = len(self)
1139 end = 0
1140 end = 0
1140 if r:
1141 if r:
1141 end = self.end(r - 1)
1142 end = self.end(r - 1)
1142 ifh = self.opener(self.indexfile, "a+")
1143 ifh = self.opener(self.indexfile, "a+")
1143 isize = r * self._io.size
1144 isize = r * self._io.size
1144 if self._inline:
1145 if self._inline:
1145 transaction.add(self.indexfile, end + isize, r)
1146 transaction.add(self.indexfile, end + isize, r)
1146 dfh = None
1147 dfh = None
1147 else:
1148 else:
1148 transaction.add(self.indexfile, isize, r)
1149 transaction.add(self.indexfile, isize, r)
1149 transaction.add(self.datafile, end)
1150 transaction.add(self.datafile, end)
1150 dfh = self.opener(self.datafile, "a")
1151 dfh = self.opener(self.datafile, "a")
1151
1152
1152 try:
1153 try:
1153 # loop through our set of deltas
1154 # loop through our set of deltas
1154 chain = None
1155 chain = None
1155 while True:
1156 while True:
1156 chunkdata = bundle.deltachunk(chain)
1157 chunkdata = bundle.deltachunk(chain)
1157 if not chunkdata:
1158 if not chunkdata:
1158 break
1159 break
1159 node = chunkdata['node']
1160 node = chunkdata['node']
1160 p1 = chunkdata['p1']
1161 p1 = chunkdata['p1']
1161 p2 = chunkdata['p2']
1162 p2 = chunkdata['p2']
1162 cs = chunkdata['cs']
1163 cs = chunkdata['cs']
1163 deltabase = chunkdata['deltabase']
1164 deltabase = chunkdata['deltabase']
1164 delta = chunkdata['delta']
1165 delta = chunkdata['delta']
1165
1166
1166 link = linkmapper(cs)
1167 link = linkmapper(cs)
1167 if node in self.nodemap:
1168 if node in self.nodemap:
1168 # this can happen if two branches make the same change
1169 # this can happen if two branches make the same change
1169 chain = node
1170 chain = node
1170 continue
1171 continue
1171
1172
1172 for p in (p1, p2):
1173 for p in (p1, p2):
1173 if not p in self.nodemap:
1174 if not p in self.nodemap:
1174 raise LookupError(p, self.indexfile,
1175 raise LookupError(p, self.indexfile,
1175 _('unknown parent'))
1176 _('unknown parent'))
1176
1177
1177 if deltabase not in self.nodemap:
1178 if deltabase not in self.nodemap:
1178 raise LookupError(deltabase, self.indexfile,
1179 raise LookupError(deltabase, self.indexfile,
1179 _('unknown delta base'))
1180 _('unknown delta base'))
1180
1181
1181 baserev = self.rev(deltabase)
1182 baserev = self.rev(deltabase)
1182 chain = self._addrevision(node, None, transaction, link,
1183 chain = self._addrevision(node, None, transaction, link,
1183 p1, p2, (baserev, delta), ifh, dfh)
1184 p1, p2, (baserev, delta), ifh, dfh)
1184 if not dfh and not self._inline:
1185 if not dfh and not self._inline:
1185 # addrevision switched from inline to conventional
1186 # addrevision switched from inline to conventional
1186 # reopen the index
1187 # reopen the index
1187 ifh.close()
1188 ifh.close()
1188 dfh = self.opener(self.datafile, "a")
1189 dfh = self.opener(self.datafile, "a")
1189 ifh = self.opener(self.indexfile, "a")
1190 ifh = self.opener(self.indexfile, "a")
1190 finally:
1191 finally:
1191 if dfh:
1192 if dfh:
1192 dfh.close()
1193 dfh.close()
1193 ifh.close()
1194 ifh.close()
1194
1195
1195 return node
1196 return node
1196
1197
1197 def strip(self, minlink, transaction):
1198 def strip(self, minlink, transaction):
1198 """truncate the revlog on the first revision with a linkrev >= minlink
1199 """truncate the revlog on the first revision with a linkrev >= minlink
1199
1200
1200 This function is called when we're stripping revision minlink and
1201 This function is called when we're stripping revision minlink and
1201 its descendants from the repository.
1202 its descendants from the repository.
1202
1203
1203 We have to remove all revisions with linkrev >= minlink, because
1204 We have to remove all revisions with linkrev >= minlink, because
1204 the equivalent changelog revisions will be renumbered after the
1205 the equivalent changelog revisions will be renumbered after the
1205 strip.
1206 strip.
1206
1207
1207 So we truncate the revlog on the first of these revisions, and
1208 So we truncate the revlog on the first of these revisions, and
1208 trust that the caller has saved the revisions that shouldn't be
1209 trust that the caller has saved the revisions that shouldn't be
1209 removed and that it'll readd them after this truncation.
1210 removed and that it'll readd them after this truncation.
1210 """
1211 """
1211 if len(self) == 0:
1212 if len(self) == 0:
1212 return
1213 return
1213
1214
1214 for rev in self:
1215 for rev in self:
1215 if self.index[rev][4] >= minlink:
1216 if self.index[rev][4] >= minlink:
1216 break
1217 break
1217 else:
1218 else:
1218 return
1219 return
1219
1220
1220 # first truncate the files on disk
1221 # first truncate the files on disk
1221 end = self.start(rev)
1222 end = self.start(rev)
1222 if not self._inline:
1223 if not self._inline:
1223 transaction.add(self.datafile, end)
1224 transaction.add(self.datafile, end)
1224 end = rev * self._io.size
1225 end = rev * self._io.size
1225 else:
1226 else:
1226 end += rev * self._io.size
1227 end += rev * self._io.size
1227
1228
1228 transaction.add(self.indexfile, end)
1229 transaction.add(self.indexfile, end)
1229
1230
1230 # then reset internal state in memory to forget those revisions
1231 # then reset internal state in memory to forget those revisions
1231 self._cache = None
1232 self._cache = None
1232 self._chunkclear()
1233 self._chunkclear()
1233 for x in xrange(rev, len(self)):
1234 for x in xrange(rev, len(self)):
1234 del self.nodemap[self.node(x)]
1235 del self.nodemap[self.node(x)]
1235
1236
1236 del self.index[rev:-1]
1237 del self.index[rev:-1]
1237
1238
1238 def checksize(self):
1239 def checksize(self):
1239 expected = 0
1240 expected = 0
1240 if len(self):
1241 if len(self):
1241 expected = max(0, self.end(len(self) - 1))
1242 expected = max(0, self.end(len(self) - 1))
1242
1243
1243 try:
1244 try:
1244 f = self.opener(self.datafile)
1245 f = self.opener(self.datafile)
1245 f.seek(0, 2)
1246 f.seek(0, 2)
1246 actual = f.tell()
1247 actual = f.tell()
1247 f.close()
1248 f.close()
1248 dd = actual - expected
1249 dd = actual - expected
1249 except IOError, inst:
1250 except IOError, inst:
1250 if inst.errno != errno.ENOENT:
1251 if inst.errno != errno.ENOENT:
1251 raise
1252 raise
1252 dd = 0
1253 dd = 0
1253
1254
1254 try:
1255 try:
1255 f = self.opener(self.indexfile)
1256 f = self.opener(self.indexfile)
1256 f.seek(0, 2)
1257 f.seek(0, 2)
1257 actual = f.tell()
1258 actual = f.tell()
1258 f.close()
1259 f.close()
1259 s = self._io.size
1260 s = self._io.size
1260 i = max(0, actual // s)
1261 i = max(0, actual // s)
1261 di = actual - (i * s)
1262 di = actual - (i * s)
1262 if self._inline:
1263 if self._inline:
1263 databytes = 0
1264 databytes = 0
1264 for r in self:
1265 for r in self:
1265 databytes += max(0, self.length(r))
1266 databytes += max(0, self.length(r))
1266 dd = 0
1267 dd = 0
1267 di = actual - len(self) * s - databytes
1268 di = actual - len(self) * s - databytes
1268 except IOError, inst:
1269 except IOError, inst:
1269 if inst.errno != errno.ENOENT:
1270 if inst.errno != errno.ENOENT:
1270 raise
1271 raise
1271 di = 0
1272 di = 0
1272
1273
1273 return (dd, di)
1274 return (dd, di)
1274
1275
1275 def files(self):
1276 def files(self):
1276 res = [self.indexfile]
1277 res = [self.indexfile]
1277 if not self._inline:
1278 if not self._inline:
1278 res.append(self.datafile)
1279 res.append(self.datafile)
1279 return res
1280 return res
@@ -1,733 +1,734 b''
1 # ui.py - user interface bits for mercurial
1 # ui.py - user interface bits for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import errno, getpass, os, socket, sys, tempfile, traceback
9 import errno, getpass, os, socket, sys, tempfile, traceback
10 import config, scmutil, util, error
10 import config, scmutil, util, error
11
11
12 class ui(object):
12 class ui(object):
13 def __init__(self, src=None):
13 def __init__(self, src=None):
14 self._buffers = []
14 self._buffers = []
15 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
15 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
16 self._reportuntrusted = True
16 self._reportuntrusted = True
17 self._ocfg = config.config() # overlay
17 self._ocfg = config.config() # overlay
18 self._tcfg = config.config() # trusted
18 self._tcfg = config.config() # trusted
19 self._ucfg = config.config() # untrusted
19 self._ucfg = config.config() # untrusted
20 self._trustusers = set()
20 self._trustusers = set()
21 self._trustgroups = set()
21 self._trustgroups = set()
22
22
23 if src:
23 if src:
24 self.fout = src.fout
24 self.fout = src.fout
25 self.ferr = src.ferr
25 self.ferr = src.ferr
26 self.fin = src.fin
26 self.fin = src.fin
27
27
28 self._tcfg = src._tcfg.copy()
28 self._tcfg = src._tcfg.copy()
29 self._ucfg = src._ucfg.copy()
29 self._ucfg = src._ucfg.copy()
30 self._ocfg = src._ocfg.copy()
30 self._ocfg = src._ocfg.copy()
31 self._trustusers = src._trustusers.copy()
31 self._trustusers = src._trustusers.copy()
32 self._trustgroups = src._trustgroups.copy()
32 self._trustgroups = src._trustgroups.copy()
33 self.environ = src.environ
33 self.environ = src.environ
34 self.fixconfig()
34 self.fixconfig()
35 else:
35 else:
36 self.fout = sys.stdout
36 self.fout = sys.stdout
37 self.ferr = sys.stderr
37 self.ferr = sys.stderr
38 self.fin = sys.stdin
38 self.fin = sys.stdin
39
39
40 # shared read-only environment
40 # shared read-only environment
41 self.environ = os.environ
41 self.environ = os.environ
42 # we always trust global config files
42 # we always trust global config files
43 for f in scmutil.rcpath():
43 for f in scmutil.rcpath():
44 self.readconfig(f, trust=True)
44 self.readconfig(f, trust=True)
45
45
46 def copy(self):
46 def copy(self):
47 return self.__class__(self)
47 return self.__class__(self)
48
48
49 def _trusted(self, fp, f):
49 def _trusted(self, fp, f):
50 st = util.fstat(fp)
50 st = util.fstat(fp)
51 if util.isowner(st):
51 if util.isowner(st):
52 return True
52 return True
53
53
54 tusers, tgroups = self._trustusers, self._trustgroups
54 tusers, tgroups = self._trustusers, self._trustgroups
55 if '*' in tusers or '*' in tgroups:
55 if '*' in tusers or '*' in tgroups:
56 return True
56 return True
57
57
58 user = util.username(st.st_uid)
58 user = util.username(st.st_uid)
59 group = util.groupname(st.st_gid)
59 group = util.groupname(st.st_gid)
60 if user in tusers or group in tgroups or user == util.username():
60 if user in tusers or group in tgroups or user == util.username():
61 return True
61 return True
62
62
63 if self._reportuntrusted:
63 if self._reportuntrusted:
64 self.warn(_('Not trusting file %s from untrusted '
64 self.warn(_('Not trusting file %s from untrusted '
65 'user %s, group %s\n') % (f, user, group))
65 'user %s, group %s\n') % (f, user, group))
66 return False
66 return False
67
67
68 def readconfig(self, filename, root=None, trust=False,
68 def readconfig(self, filename, root=None, trust=False,
69 sections=None, remap=None):
69 sections=None, remap=None):
70 try:
70 try:
71 fp = open(filename)
71 fp = open(filename)
72 except IOError:
72 except IOError:
73 if not sections: # ignore unless we were looking for something
73 if not sections: # ignore unless we were looking for something
74 return
74 return
75 raise
75 raise
76
76
77 cfg = config.config()
77 cfg = config.config()
78 trusted = sections or trust or self._trusted(fp, filename)
78 trusted = sections or trust or self._trusted(fp, filename)
79
79
80 try:
80 try:
81 cfg.read(filename, fp, sections=sections, remap=remap)
81 cfg.read(filename, fp, sections=sections, remap=remap)
82 fp.close()
82 except error.ConfigError, inst:
83 except error.ConfigError, inst:
83 if trusted:
84 if trusted:
84 raise
85 raise
85 self.warn(_("Ignored: %s\n") % str(inst))
86 self.warn(_("Ignored: %s\n") % str(inst))
86
87
87 if self.plain():
88 if self.plain():
88 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
89 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
89 'logtemplate', 'style',
90 'logtemplate', 'style',
90 'traceback', 'verbose'):
91 'traceback', 'verbose'):
91 if k in cfg['ui']:
92 if k in cfg['ui']:
92 del cfg['ui'][k]
93 del cfg['ui'][k]
93 for k, v in cfg.items('defaults'):
94 for k, v in cfg.items('defaults'):
94 del cfg['defaults'][k]
95 del cfg['defaults'][k]
95 # Don't remove aliases from the configuration if in the exceptionlist
96 # Don't remove aliases from the configuration if in the exceptionlist
96 if self.plain('alias'):
97 if self.plain('alias'):
97 for k, v in cfg.items('alias'):
98 for k, v in cfg.items('alias'):
98 del cfg['alias'][k]
99 del cfg['alias'][k]
99
100
100 if trusted:
101 if trusted:
101 self._tcfg.update(cfg)
102 self._tcfg.update(cfg)
102 self._tcfg.update(self._ocfg)
103 self._tcfg.update(self._ocfg)
103 self._ucfg.update(cfg)
104 self._ucfg.update(cfg)
104 self._ucfg.update(self._ocfg)
105 self._ucfg.update(self._ocfg)
105
106
106 if root is None:
107 if root is None:
107 root = os.path.expanduser('~')
108 root = os.path.expanduser('~')
108 self.fixconfig(root=root)
109 self.fixconfig(root=root)
109
110
110 def fixconfig(self, root=None, section=None):
111 def fixconfig(self, root=None, section=None):
111 if section in (None, 'paths'):
112 if section in (None, 'paths'):
112 # expand vars and ~
113 # expand vars and ~
113 # translate paths relative to root (or home) into absolute paths
114 # translate paths relative to root (or home) into absolute paths
114 root = root or os.getcwd()
115 root = root or os.getcwd()
115 for c in self._tcfg, self._ucfg, self._ocfg:
116 for c in self._tcfg, self._ucfg, self._ocfg:
116 for n, p in c.items('paths'):
117 for n, p in c.items('paths'):
117 if not p:
118 if not p:
118 continue
119 continue
119 if '%%' in p:
120 if '%%' in p:
120 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
121 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
121 % (n, p, self.configsource('paths', n)))
122 % (n, p, self.configsource('paths', n)))
122 p = p.replace('%%', '%')
123 p = p.replace('%%', '%')
123 p = util.expandpath(p)
124 p = util.expandpath(p)
124 if not util.hasscheme(p) and not os.path.isabs(p):
125 if not util.hasscheme(p) and not os.path.isabs(p):
125 p = os.path.normpath(os.path.join(root, p))
126 p = os.path.normpath(os.path.join(root, p))
126 c.set("paths", n, p)
127 c.set("paths", n, p)
127
128
128 if section in (None, 'ui'):
129 if section in (None, 'ui'):
129 # update ui options
130 # update ui options
130 self.debugflag = self.configbool('ui', 'debug')
131 self.debugflag = self.configbool('ui', 'debug')
131 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
132 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
132 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
133 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
133 if self.verbose and self.quiet:
134 if self.verbose and self.quiet:
134 self.quiet = self.verbose = False
135 self.quiet = self.verbose = False
135 self._reportuntrusted = self.debugflag or self.configbool("ui",
136 self._reportuntrusted = self.debugflag or self.configbool("ui",
136 "report_untrusted", True)
137 "report_untrusted", True)
137 self.tracebackflag = self.configbool('ui', 'traceback', False)
138 self.tracebackflag = self.configbool('ui', 'traceback', False)
138
139
139 if section in (None, 'trusted'):
140 if section in (None, 'trusted'):
140 # update trust information
141 # update trust information
141 self._trustusers.update(self.configlist('trusted', 'users'))
142 self._trustusers.update(self.configlist('trusted', 'users'))
142 self._trustgroups.update(self.configlist('trusted', 'groups'))
143 self._trustgroups.update(self.configlist('trusted', 'groups'))
143
144
144 def setconfig(self, section, name, value, overlay=True):
145 def setconfig(self, section, name, value, overlay=True):
145 if overlay:
146 if overlay:
146 self._ocfg.set(section, name, value)
147 self._ocfg.set(section, name, value)
147 self._tcfg.set(section, name, value)
148 self._tcfg.set(section, name, value)
148 self._ucfg.set(section, name, value)
149 self._ucfg.set(section, name, value)
149 self.fixconfig(section=section)
150 self.fixconfig(section=section)
150
151
151 def _data(self, untrusted):
152 def _data(self, untrusted):
152 return untrusted and self._ucfg or self._tcfg
153 return untrusted and self._ucfg or self._tcfg
153
154
154 def configsource(self, section, name, untrusted=False):
155 def configsource(self, section, name, untrusted=False):
155 return self._data(untrusted).source(section, name) or 'none'
156 return self._data(untrusted).source(section, name) or 'none'
156
157
157 def config(self, section, name, default=None, untrusted=False):
158 def config(self, section, name, default=None, untrusted=False):
158 if isinstance(name, list):
159 if isinstance(name, list):
159 alternates = name
160 alternates = name
160 else:
161 else:
161 alternates = [name]
162 alternates = [name]
162
163
163 for n in alternates:
164 for n in alternates:
164 value = self._data(untrusted).get(section, name, None)
165 value = self._data(untrusted).get(section, name, None)
165 if value is not None:
166 if value is not None:
166 name = n
167 name = n
167 break
168 break
168 else:
169 else:
169 value = default
170 value = default
170
171
171 if self.debugflag and not untrusted and self._reportuntrusted:
172 if self.debugflag and not untrusted and self._reportuntrusted:
172 uvalue = self._ucfg.get(section, name)
173 uvalue = self._ucfg.get(section, name)
173 if uvalue is not None and uvalue != value:
174 if uvalue is not None and uvalue != value:
174 self.debug("ignoring untrusted configuration option "
175 self.debug("ignoring untrusted configuration option "
175 "%s.%s = %s\n" % (section, name, uvalue))
176 "%s.%s = %s\n" % (section, name, uvalue))
176 return value
177 return value
177
178
178 def configpath(self, section, name, default=None, untrusted=False):
179 def configpath(self, section, name, default=None, untrusted=False):
179 'get a path config item, expanded relative to repo root or config file'
180 'get a path config item, expanded relative to repo root or config file'
180 v = self.config(section, name, default, untrusted)
181 v = self.config(section, name, default, untrusted)
181 if v is None:
182 if v is None:
182 return None
183 return None
183 if not os.path.isabs(v) or "://" not in v:
184 if not os.path.isabs(v) or "://" not in v:
184 src = self.configsource(section, name, untrusted)
185 src = self.configsource(section, name, untrusted)
185 if ':' in src:
186 if ':' in src:
186 base = os.path.dirname(src.rsplit(':')[0])
187 base = os.path.dirname(src.rsplit(':')[0])
187 v = os.path.join(base, os.path.expanduser(v))
188 v = os.path.join(base, os.path.expanduser(v))
188 return v
189 return v
189
190
190 def configbool(self, section, name, default=False, untrusted=False):
191 def configbool(self, section, name, default=False, untrusted=False):
191 """parse a configuration element as a boolean
192 """parse a configuration element as a boolean
192
193
193 >>> u = ui(); s = 'foo'
194 >>> u = ui(); s = 'foo'
194 >>> u.setconfig(s, 'true', 'yes')
195 >>> u.setconfig(s, 'true', 'yes')
195 >>> u.configbool(s, 'true')
196 >>> u.configbool(s, 'true')
196 True
197 True
197 >>> u.setconfig(s, 'false', 'no')
198 >>> u.setconfig(s, 'false', 'no')
198 >>> u.configbool(s, 'false')
199 >>> u.configbool(s, 'false')
199 False
200 False
200 >>> u.configbool(s, 'unknown')
201 >>> u.configbool(s, 'unknown')
201 False
202 False
202 >>> u.configbool(s, 'unknown', True)
203 >>> u.configbool(s, 'unknown', True)
203 True
204 True
204 >>> u.setconfig(s, 'invalid', 'somevalue')
205 >>> u.setconfig(s, 'invalid', 'somevalue')
205 >>> u.configbool(s, 'invalid')
206 >>> u.configbool(s, 'invalid')
206 Traceback (most recent call last):
207 Traceback (most recent call last):
207 ...
208 ...
208 ConfigError: foo.invalid is not a boolean ('somevalue')
209 ConfigError: foo.invalid is not a boolean ('somevalue')
209 """
210 """
210
211
211 v = self.config(section, name, None, untrusted)
212 v = self.config(section, name, None, untrusted)
212 if v is None:
213 if v is None:
213 return default
214 return default
214 if isinstance(v, bool):
215 if isinstance(v, bool):
215 return v
216 return v
216 b = util.parsebool(v)
217 b = util.parsebool(v)
217 if b is None:
218 if b is None:
218 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
219 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
219 % (section, name, v))
220 % (section, name, v))
220 return b
221 return b
221
222
222 def configint(self, section, name, default=None, untrusted=False):
223 def configint(self, section, name, default=None, untrusted=False):
223 """parse a configuration element as an integer
224 """parse a configuration element as an integer
224
225
225 >>> u = ui(); s = 'foo'
226 >>> u = ui(); s = 'foo'
226 >>> u.setconfig(s, 'int1', '42')
227 >>> u.setconfig(s, 'int1', '42')
227 >>> u.configint(s, 'int1')
228 >>> u.configint(s, 'int1')
228 42
229 42
229 >>> u.setconfig(s, 'int2', '-42')
230 >>> u.setconfig(s, 'int2', '-42')
230 >>> u.configint(s, 'int2')
231 >>> u.configint(s, 'int2')
231 -42
232 -42
232 >>> u.configint(s, 'unknown', 7)
233 >>> u.configint(s, 'unknown', 7)
233 7
234 7
234 >>> u.setconfig(s, 'invalid', 'somevalue')
235 >>> u.setconfig(s, 'invalid', 'somevalue')
235 >>> u.configint(s, 'invalid')
236 >>> u.configint(s, 'invalid')
236 Traceback (most recent call last):
237 Traceback (most recent call last):
237 ...
238 ...
238 ConfigError: foo.invalid is not an integer ('somevalue')
239 ConfigError: foo.invalid is not an integer ('somevalue')
239 """
240 """
240
241
241 v = self.config(section, name, None, untrusted)
242 v = self.config(section, name, None, untrusted)
242 if v is None:
243 if v is None:
243 return default
244 return default
244 try:
245 try:
245 return int(v)
246 return int(v)
246 except ValueError:
247 except ValueError:
247 raise error.ConfigError(_("%s.%s is not an integer ('%s')")
248 raise error.ConfigError(_("%s.%s is not an integer ('%s')")
248 % (section, name, v))
249 % (section, name, v))
249
250
250 def configlist(self, section, name, default=None, untrusted=False):
251 def configlist(self, section, name, default=None, untrusted=False):
251 """parse a configuration element as a list of comma/space separated
252 """parse a configuration element as a list of comma/space separated
252 strings
253 strings
253
254
254 >>> u = ui(); s = 'foo'
255 >>> u = ui(); s = 'foo'
255 >>> u.setconfig(s, 'list1', 'this,is "a small" ,test')
256 >>> u.setconfig(s, 'list1', 'this,is "a small" ,test')
256 >>> u.configlist(s, 'list1')
257 >>> u.configlist(s, 'list1')
257 ['this', 'is', 'a small', 'test']
258 ['this', 'is', 'a small', 'test']
258 """
259 """
259
260
260 def _parse_plain(parts, s, offset):
261 def _parse_plain(parts, s, offset):
261 whitespace = False
262 whitespace = False
262 while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
263 while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
263 whitespace = True
264 whitespace = True
264 offset += 1
265 offset += 1
265 if offset >= len(s):
266 if offset >= len(s):
266 return None, parts, offset
267 return None, parts, offset
267 if whitespace:
268 if whitespace:
268 parts.append('')
269 parts.append('')
269 if s[offset] == '"' and not parts[-1]:
270 if s[offset] == '"' and not parts[-1]:
270 return _parse_quote, parts, offset + 1
271 return _parse_quote, parts, offset + 1
271 elif s[offset] == '"' and parts[-1][-1] == '\\':
272 elif s[offset] == '"' and parts[-1][-1] == '\\':
272 parts[-1] = parts[-1][:-1] + s[offset]
273 parts[-1] = parts[-1][:-1] + s[offset]
273 return _parse_plain, parts, offset + 1
274 return _parse_plain, parts, offset + 1
274 parts[-1] += s[offset]
275 parts[-1] += s[offset]
275 return _parse_plain, parts, offset + 1
276 return _parse_plain, parts, offset + 1
276
277
277 def _parse_quote(parts, s, offset):
278 def _parse_quote(parts, s, offset):
278 if offset < len(s) and s[offset] == '"': # ""
279 if offset < len(s) and s[offset] == '"': # ""
279 parts.append('')
280 parts.append('')
280 offset += 1
281 offset += 1
281 while offset < len(s) and (s[offset].isspace() or
282 while offset < len(s) and (s[offset].isspace() or
282 s[offset] == ','):
283 s[offset] == ','):
283 offset += 1
284 offset += 1
284 return _parse_plain, parts, offset
285 return _parse_plain, parts, offset
285
286
286 while offset < len(s) and s[offset] != '"':
287 while offset < len(s) and s[offset] != '"':
287 if (s[offset] == '\\' and offset + 1 < len(s)
288 if (s[offset] == '\\' and offset + 1 < len(s)
288 and s[offset + 1] == '"'):
289 and s[offset + 1] == '"'):
289 offset += 1
290 offset += 1
290 parts[-1] += '"'
291 parts[-1] += '"'
291 else:
292 else:
292 parts[-1] += s[offset]
293 parts[-1] += s[offset]
293 offset += 1
294 offset += 1
294
295
295 if offset >= len(s):
296 if offset >= len(s):
296 real_parts = _configlist(parts[-1])
297 real_parts = _configlist(parts[-1])
297 if not real_parts:
298 if not real_parts:
298 parts[-1] = '"'
299 parts[-1] = '"'
299 else:
300 else:
300 real_parts[0] = '"' + real_parts[0]
301 real_parts[0] = '"' + real_parts[0]
301 parts = parts[:-1]
302 parts = parts[:-1]
302 parts.extend(real_parts)
303 parts.extend(real_parts)
303 return None, parts, offset
304 return None, parts, offset
304
305
305 offset += 1
306 offset += 1
306 while offset < len(s) and s[offset] in [' ', ',']:
307 while offset < len(s) and s[offset] in [' ', ',']:
307 offset += 1
308 offset += 1
308
309
309 if offset < len(s):
310 if offset < len(s):
310 if offset + 1 == len(s) and s[offset] == '"':
311 if offset + 1 == len(s) and s[offset] == '"':
311 parts[-1] += '"'
312 parts[-1] += '"'
312 offset += 1
313 offset += 1
313 else:
314 else:
314 parts.append('')
315 parts.append('')
315 else:
316 else:
316 return None, parts, offset
317 return None, parts, offset
317
318
318 return _parse_plain, parts, offset
319 return _parse_plain, parts, offset
319
320
320 def _configlist(s):
321 def _configlist(s):
321 s = s.rstrip(' ,')
322 s = s.rstrip(' ,')
322 if not s:
323 if not s:
323 return []
324 return []
324 parser, parts, offset = _parse_plain, [''], 0
325 parser, parts, offset = _parse_plain, [''], 0
325 while parser:
326 while parser:
326 parser, parts, offset = parser(parts, s, offset)
327 parser, parts, offset = parser(parts, s, offset)
327 return parts
328 return parts
328
329
329 result = self.config(section, name, untrusted=untrusted)
330 result = self.config(section, name, untrusted=untrusted)
330 if result is None:
331 if result is None:
331 result = default or []
332 result = default or []
332 if isinstance(result, basestring):
333 if isinstance(result, basestring):
333 result = _configlist(result.lstrip(' ,\n'))
334 result = _configlist(result.lstrip(' ,\n'))
334 if result is None:
335 if result is None:
335 result = default or []
336 result = default or []
336 return result
337 return result
337
338
338 def has_section(self, section, untrusted=False):
339 def has_section(self, section, untrusted=False):
339 '''tell whether section exists in config.'''
340 '''tell whether section exists in config.'''
340 return section in self._data(untrusted)
341 return section in self._data(untrusted)
341
342
342 def configitems(self, section, untrusted=False):
343 def configitems(self, section, untrusted=False):
343 items = self._data(untrusted).items(section)
344 items = self._data(untrusted).items(section)
344 if self.debugflag and not untrusted and self._reportuntrusted:
345 if self.debugflag and not untrusted and self._reportuntrusted:
345 for k, v in self._ucfg.items(section):
346 for k, v in self._ucfg.items(section):
346 if self._tcfg.get(section, k) != v:
347 if self._tcfg.get(section, k) != v:
347 self.debug("ignoring untrusted configuration option "
348 self.debug("ignoring untrusted configuration option "
348 "%s.%s = %s\n" % (section, k, v))
349 "%s.%s = %s\n" % (section, k, v))
349 return items
350 return items
350
351
351 def walkconfig(self, untrusted=False):
352 def walkconfig(self, untrusted=False):
352 cfg = self._data(untrusted)
353 cfg = self._data(untrusted)
353 for section in cfg.sections():
354 for section in cfg.sections():
354 for name, value in self.configitems(section, untrusted):
355 for name, value in self.configitems(section, untrusted):
355 yield section, name, value
356 yield section, name, value
356
357
357 def plain(self, feature=None):
358 def plain(self, feature=None):
358 '''is plain mode active?
359 '''is plain mode active?
359
360
360 Plain mode means that all configuration variables which affect
361 Plain mode means that all configuration variables which affect
361 the behavior and output of Mercurial should be
362 the behavior and output of Mercurial should be
362 ignored. Additionally, the output should be stable,
363 ignored. Additionally, the output should be stable,
363 reproducible and suitable for use in scripts or applications.
364 reproducible and suitable for use in scripts or applications.
364
365
365 The only way to trigger plain mode is by setting either the
366 The only way to trigger plain mode is by setting either the
366 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
367 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
367
368
368 The return value can either be
369 The return value can either be
369 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
370 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
370 - True otherwise
371 - True otherwise
371 '''
372 '''
372 if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ:
373 if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ:
373 return False
374 return False
374 exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',')
375 exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',')
375 if feature and exceptions:
376 if feature and exceptions:
376 return feature not in exceptions
377 return feature not in exceptions
377 return True
378 return True
378
379
379 def username(self):
380 def username(self):
380 """Return default username to be used in commits.
381 """Return default username to be used in commits.
381
382
382 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
383 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
383 and stop searching if one of these is set.
384 and stop searching if one of these is set.
384 If not found and ui.askusername is True, ask the user, else use
385 If not found and ui.askusername is True, ask the user, else use
385 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
386 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
386 """
387 """
387 user = os.environ.get("HGUSER")
388 user = os.environ.get("HGUSER")
388 if user is None:
389 if user is None:
389 user = self.config("ui", "username")
390 user = self.config("ui", "username")
390 if user is not None:
391 if user is not None:
391 user = os.path.expandvars(user)
392 user = os.path.expandvars(user)
392 if user is None:
393 if user is None:
393 user = os.environ.get("EMAIL")
394 user = os.environ.get("EMAIL")
394 if user is None and self.configbool("ui", "askusername"):
395 if user is None and self.configbool("ui", "askusername"):
395 user = self.prompt(_("enter a commit username:"), default=None)
396 user = self.prompt(_("enter a commit username:"), default=None)
396 if user is None and not self.interactive():
397 if user is None and not self.interactive():
397 try:
398 try:
398 user = '%s@%s' % (util.getuser(), socket.getfqdn())
399 user = '%s@%s' % (util.getuser(), socket.getfqdn())
399 self.warn(_("No username found, using '%s' instead\n") % user)
400 self.warn(_("No username found, using '%s' instead\n") % user)
400 except KeyError:
401 except KeyError:
401 pass
402 pass
402 if not user:
403 if not user:
403 raise util.Abort(_('no username supplied (see "hg help config")'))
404 raise util.Abort(_('no username supplied (see "hg help config")'))
404 if "\n" in user:
405 if "\n" in user:
405 raise util.Abort(_("username %s contains a newline\n") % repr(user))
406 raise util.Abort(_("username %s contains a newline\n") % repr(user))
406 return user
407 return user
407
408
408 def shortuser(self, user):
409 def shortuser(self, user):
409 """Return a short representation of a user name or email address."""
410 """Return a short representation of a user name or email address."""
410 if not self.verbose:
411 if not self.verbose:
411 user = util.shortuser(user)
412 user = util.shortuser(user)
412 return user
413 return user
413
414
414 def expandpath(self, loc, default=None):
415 def expandpath(self, loc, default=None):
415 """Return repository location relative to cwd or from [paths]"""
416 """Return repository location relative to cwd or from [paths]"""
416 if util.hasscheme(loc) or os.path.isdir(os.path.join(loc, '.hg')):
417 if util.hasscheme(loc) or os.path.isdir(os.path.join(loc, '.hg')):
417 return loc
418 return loc
418
419
419 path = self.config('paths', loc)
420 path = self.config('paths', loc)
420 if not path and default is not None:
421 if not path and default is not None:
421 path = self.config('paths', default)
422 path = self.config('paths', default)
422 return path or loc
423 return path or loc
423
424
424 def pushbuffer(self):
425 def pushbuffer(self):
425 self._buffers.append([])
426 self._buffers.append([])
426
427
427 def popbuffer(self, labeled=False):
428 def popbuffer(self, labeled=False):
428 '''pop the last buffer and return the buffered output
429 '''pop the last buffer and return the buffered output
429
430
430 If labeled is True, any labels associated with buffered
431 If labeled is True, any labels associated with buffered
431 output will be handled. By default, this has no effect
432 output will be handled. By default, this has no effect
432 on the output returned, but extensions and GUI tools may
433 on the output returned, but extensions and GUI tools may
433 handle this argument and returned styled output. If output
434 handle this argument and returned styled output. If output
434 is being buffered so it can be captured and parsed or
435 is being buffered so it can be captured and parsed or
435 processed, labeled should not be set to True.
436 processed, labeled should not be set to True.
436 '''
437 '''
437 return "".join(self._buffers.pop())
438 return "".join(self._buffers.pop())
438
439
439 def write(self, *args, **opts):
440 def write(self, *args, **opts):
440 '''write args to output
441 '''write args to output
441
442
442 By default, this method simply writes to the buffer or stdout,
443 By default, this method simply writes to the buffer or stdout,
443 but extensions or GUI tools may override this method,
444 but extensions or GUI tools may override this method,
444 write_err(), popbuffer(), and label() to style output from
445 write_err(), popbuffer(), and label() to style output from
445 various parts of hg.
446 various parts of hg.
446
447
447 An optional keyword argument, "label", can be passed in.
448 An optional keyword argument, "label", can be passed in.
448 This should be a string containing label names separated by
449 This should be a string containing label names separated by
449 space. Label names take the form of "topic.type". For example,
450 space. Label names take the form of "topic.type". For example,
450 ui.debug() issues a label of "ui.debug".
451 ui.debug() issues a label of "ui.debug".
451
452
452 When labeling output for a specific command, a label of
453 When labeling output for a specific command, a label of
453 "cmdname.type" is recommended. For example, status issues
454 "cmdname.type" is recommended. For example, status issues
454 a label of "status.modified" for modified files.
455 a label of "status.modified" for modified files.
455 '''
456 '''
456 if self._buffers:
457 if self._buffers:
457 self._buffers[-1].extend([str(a) for a in args])
458 self._buffers[-1].extend([str(a) for a in args])
458 else:
459 else:
459 for a in args:
460 for a in args:
460 self.fout.write(str(a))
461 self.fout.write(str(a))
461
462
462 def write_err(self, *args, **opts):
463 def write_err(self, *args, **opts):
463 try:
464 try:
464 if not getattr(self.fout, 'closed', False):
465 if not getattr(self.fout, 'closed', False):
465 self.fout.flush()
466 self.fout.flush()
466 for a in args:
467 for a in args:
467 self.ferr.write(str(a))
468 self.ferr.write(str(a))
468 # stderr may be buffered under win32 when redirected to files,
469 # stderr may be buffered under win32 when redirected to files,
469 # including stdout.
470 # including stdout.
470 if not getattr(self.ferr, 'closed', False):
471 if not getattr(self.ferr, 'closed', False):
471 self.ferr.flush()
472 self.ferr.flush()
472 except IOError, inst:
473 except IOError, inst:
473 if inst.errno not in (errno.EPIPE, errno.EIO):
474 if inst.errno not in (errno.EPIPE, errno.EIO):
474 raise
475 raise
475
476
476 def flush(self):
477 def flush(self):
477 try: self.fout.flush()
478 try: self.fout.flush()
478 except: pass
479 except: pass
479 try: self.ferr.flush()
480 try: self.ferr.flush()
480 except: pass
481 except: pass
481
482
482 def interactive(self):
483 def interactive(self):
483 '''is interactive input allowed?
484 '''is interactive input allowed?
484
485
485 An interactive session is a session where input can be reasonably read
486 An interactive session is a session where input can be reasonably read
486 from `sys.stdin'. If this function returns false, any attempt to read
487 from `sys.stdin'. If this function returns false, any attempt to read
487 from stdin should fail with an error, unless a sensible default has been
488 from stdin should fail with an error, unless a sensible default has been
488 specified.
489 specified.
489
490
490 Interactiveness is triggered by the value of the `ui.interactive'
491 Interactiveness is triggered by the value of the `ui.interactive'
491 configuration variable or - if it is unset - when `sys.stdin' points
492 configuration variable or - if it is unset - when `sys.stdin' points
492 to a terminal device.
493 to a terminal device.
493
494
494 This function refers to input only; for output, see `ui.formatted()'.
495 This function refers to input only; for output, see `ui.formatted()'.
495 '''
496 '''
496 i = self.configbool("ui", "interactive", None)
497 i = self.configbool("ui", "interactive", None)
497 if i is None:
498 if i is None:
498 # some environments replace stdin without implementing isatty
499 # some environments replace stdin without implementing isatty
499 # usually those are non-interactive
500 # usually those are non-interactive
500 return util.isatty(self.fin)
501 return util.isatty(self.fin)
501
502
502 return i
503 return i
503
504
504 def termwidth(self):
505 def termwidth(self):
505 '''how wide is the terminal in columns?
506 '''how wide is the terminal in columns?
506 '''
507 '''
507 if 'COLUMNS' in os.environ:
508 if 'COLUMNS' in os.environ:
508 try:
509 try:
509 return int(os.environ['COLUMNS'])
510 return int(os.environ['COLUMNS'])
510 except ValueError:
511 except ValueError:
511 pass
512 pass
512 return util.termwidth()
513 return util.termwidth()
513
514
514 def formatted(self):
515 def formatted(self):
515 '''should formatted output be used?
516 '''should formatted output be used?
516
517
517 It is often desirable to format the output to suite the output medium.
518 It is often desirable to format the output to suite the output medium.
518 Examples of this are truncating long lines or colorizing messages.
519 Examples of this are truncating long lines or colorizing messages.
519 However, this is not often not desirable when piping output into other
520 However, this is not often not desirable when piping output into other
520 utilities, e.g. `grep'.
521 utilities, e.g. `grep'.
521
522
522 Formatted output is triggered by the value of the `ui.formatted'
523 Formatted output is triggered by the value of the `ui.formatted'
523 configuration variable or - if it is unset - when `sys.stdout' points
524 configuration variable or - if it is unset - when `sys.stdout' points
524 to a terminal device. Please note that `ui.formatted' should be
525 to a terminal device. Please note that `ui.formatted' should be
525 considered an implementation detail; it is not intended for use outside
526 considered an implementation detail; it is not intended for use outside
526 Mercurial or its extensions.
527 Mercurial or its extensions.
527
528
528 This function refers to output only; for input, see `ui.interactive()'.
529 This function refers to output only; for input, see `ui.interactive()'.
529 This function always returns false when in plain mode, see `ui.plain()'.
530 This function always returns false when in plain mode, see `ui.plain()'.
530 '''
531 '''
531 if self.plain():
532 if self.plain():
532 return False
533 return False
533
534
534 i = self.configbool("ui", "formatted", None)
535 i = self.configbool("ui", "formatted", None)
535 if i is None:
536 if i is None:
536 # some environments replace stdout without implementing isatty
537 # some environments replace stdout without implementing isatty
537 # usually those are non-interactive
538 # usually those are non-interactive
538 return util.isatty(self.fout)
539 return util.isatty(self.fout)
539
540
540 return i
541 return i
541
542
542 def _readline(self, prompt=''):
543 def _readline(self, prompt=''):
543 if util.isatty(self.fin):
544 if util.isatty(self.fin):
544 try:
545 try:
545 # magically add command line editing support, where
546 # magically add command line editing support, where
546 # available
547 # available
547 import readline
548 import readline
548 # force demandimport to really load the module
549 # force demandimport to really load the module
549 readline.read_history_file
550 readline.read_history_file
550 # windows sometimes raises something other than ImportError
551 # windows sometimes raises something other than ImportError
551 except Exception:
552 except Exception:
552 pass
553 pass
553
554
554 # call write() so output goes through subclassed implementation
555 # call write() so output goes through subclassed implementation
555 # e.g. color extension on Windows
556 # e.g. color extension on Windows
556 self.write(prompt)
557 self.write(prompt)
557
558
558 # instead of trying to emulate raw_input, swap (self.fin,
559 # instead of trying to emulate raw_input, swap (self.fin,
559 # self.fout) with (sys.stdin, sys.stdout)
560 # self.fout) with (sys.stdin, sys.stdout)
560 oldin = sys.stdin
561 oldin = sys.stdin
561 oldout = sys.stdout
562 oldout = sys.stdout
562 sys.stdin = self.fin
563 sys.stdin = self.fin
563 sys.stdout = self.fout
564 sys.stdout = self.fout
564 line = raw_input(' ')
565 line = raw_input(' ')
565 sys.stdin = oldin
566 sys.stdin = oldin
566 sys.stdout = oldout
567 sys.stdout = oldout
567
568
568 # When stdin is in binary mode on Windows, it can cause
569 # When stdin is in binary mode on Windows, it can cause
569 # raw_input() to emit an extra trailing carriage return
570 # raw_input() to emit an extra trailing carriage return
570 if os.linesep == '\r\n' and line and line[-1] == '\r':
571 if os.linesep == '\r\n' and line and line[-1] == '\r':
571 line = line[:-1]
572 line = line[:-1]
572 return line
573 return line
573
574
574 def prompt(self, msg, default="y"):
575 def prompt(self, msg, default="y"):
575 """Prompt user with msg, read response.
576 """Prompt user with msg, read response.
576 If ui is not interactive, the default is returned.
577 If ui is not interactive, the default is returned.
577 """
578 """
578 if not self.interactive():
579 if not self.interactive():
579 self.write(msg, ' ', default, "\n")
580 self.write(msg, ' ', default, "\n")
580 return default
581 return default
581 try:
582 try:
582 r = self._readline(self.label(msg, 'ui.prompt'))
583 r = self._readline(self.label(msg, 'ui.prompt'))
583 if not r:
584 if not r:
584 return default
585 return default
585 return r
586 return r
586 except EOFError:
587 except EOFError:
587 raise util.Abort(_('response expected'))
588 raise util.Abort(_('response expected'))
588
589
589 def promptchoice(self, msg, choices, default=0):
590 def promptchoice(self, msg, choices, default=0):
590 """Prompt user with msg, read response, and ensure it matches
591 """Prompt user with msg, read response, and ensure it matches
591 one of the provided choices. The index of the choice is returned.
592 one of the provided choices. The index of the choice is returned.
592 choices is a sequence of acceptable responses with the format:
593 choices is a sequence of acceptable responses with the format:
593 ('&None', 'E&xec', 'Sym&link') Responses are case insensitive.
594 ('&None', 'E&xec', 'Sym&link') Responses are case insensitive.
594 If ui is not interactive, the default is returned.
595 If ui is not interactive, the default is returned.
595 """
596 """
596 resps = [s[s.index('&')+1].lower() for s in choices]
597 resps = [s[s.index('&')+1].lower() for s in choices]
597 while True:
598 while True:
598 r = self.prompt(msg, resps[default])
599 r = self.prompt(msg, resps[default])
599 if r.lower() in resps:
600 if r.lower() in resps:
600 return resps.index(r.lower())
601 return resps.index(r.lower())
601 self.write(_("unrecognized response\n"))
602 self.write(_("unrecognized response\n"))
602
603
603 def getpass(self, prompt=None, default=None):
604 def getpass(self, prompt=None, default=None):
604 if not self.interactive():
605 if not self.interactive():
605 return default
606 return default
606 try:
607 try:
607 return getpass.getpass(prompt or _('password: '))
608 return getpass.getpass(prompt or _('password: '))
608 except EOFError:
609 except EOFError:
609 raise util.Abort(_('response expected'))
610 raise util.Abort(_('response expected'))
610 def status(self, *msg, **opts):
611 def status(self, *msg, **opts):
611 '''write status message to output (if ui.quiet is False)
612 '''write status message to output (if ui.quiet is False)
612
613
613 This adds an output label of "ui.status".
614 This adds an output label of "ui.status".
614 '''
615 '''
615 if not self.quiet:
616 if not self.quiet:
616 opts['label'] = opts.get('label', '') + ' ui.status'
617 opts['label'] = opts.get('label', '') + ' ui.status'
617 self.write(*msg, **opts)
618 self.write(*msg, **opts)
618 def warn(self, *msg, **opts):
619 def warn(self, *msg, **opts):
619 '''write warning message to output (stderr)
620 '''write warning message to output (stderr)
620
621
621 This adds an output label of "ui.warning".
622 This adds an output label of "ui.warning".
622 '''
623 '''
623 opts['label'] = opts.get('label', '') + ' ui.warning'
624 opts['label'] = opts.get('label', '') + ' ui.warning'
624 self.write_err(*msg, **opts)
625 self.write_err(*msg, **opts)
625 def note(self, *msg, **opts):
626 def note(self, *msg, **opts):
626 '''write note to output (if ui.verbose is True)
627 '''write note to output (if ui.verbose is True)
627
628
628 This adds an output label of "ui.note".
629 This adds an output label of "ui.note".
629 '''
630 '''
630 if self.verbose:
631 if self.verbose:
631 opts['label'] = opts.get('label', '') + ' ui.note'
632 opts['label'] = opts.get('label', '') + ' ui.note'
632 self.write(*msg, **opts)
633 self.write(*msg, **opts)
633 def debug(self, *msg, **opts):
634 def debug(self, *msg, **opts):
634 '''write debug message to output (if ui.debugflag is True)
635 '''write debug message to output (if ui.debugflag is True)
635
636
636 This adds an output label of "ui.debug".
637 This adds an output label of "ui.debug".
637 '''
638 '''
638 if self.debugflag:
639 if self.debugflag:
639 opts['label'] = opts.get('label', '') + ' ui.debug'
640 opts['label'] = opts.get('label', '') + ' ui.debug'
640 self.write(*msg, **opts)
641 self.write(*msg, **opts)
641 def edit(self, text, user):
642 def edit(self, text, user):
642 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
643 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
643 text=True)
644 text=True)
644 try:
645 try:
645 f = os.fdopen(fd, "w")
646 f = os.fdopen(fd, "w")
646 f.write(text)
647 f.write(text)
647 f.close()
648 f.close()
648
649
649 editor = self.geteditor()
650 editor = self.geteditor()
650
651
651 util.system("%s \"%s\"" % (editor, name),
652 util.system("%s \"%s\"" % (editor, name),
652 environ={'HGUSER': user},
653 environ={'HGUSER': user},
653 onerr=util.Abort, errprefix=_("edit failed"),
654 onerr=util.Abort, errprefix=_("edit failed"),
654 out=self.fout)
655 out=self.fout)
655
656
656 f = open(name)
657 f = open(name)
657 t = f.read()
658 t = f.read()
658 f.close()
659 f.close()
659 finally:
660 finally:
660 os.unlink(name)
661 os.unlink(name)
661
662
662 return t
663 return t
663
664
664 def traceback(self, exc=None):
665 def traceback(self, exc=None):
665 '''print exception traceback if traceback printing enabled.
666 '''print exception traceback if traceback printing enabled.
666 only to call in exception handler. returns true if traceback
667 only to call in exception handler. returns true if traceback
667 printed.'''
668 printed.'''
668 if self.tracebackflag:
669 if self.tracebackflag:
669 if exc:
670 if exc:
670 traceback.print_exception(exc[0], exc[1], exc[2], file=self.ferr)
671 traceback.print_exception(exc[0], exc[1], exc[2], file=self.ferr)
671 else:
672 else:
672 traceback.print_exc(file=self.ferr)
673 traceback.print_exc(file=self.ferr)
673 return self.tracebackflag
674 return self.tracebackflag
674
675
675 def geteditor(self):
676 def geteditor(self):
676 '''return editor to use'''
677 '''return editor to use'''
677 return (os.environ.get("HGEDITOR") or
678 return (os.environ.get("HGEDITOR") or
678 self.config("ui", "editor") or
679 self.config("ui", "editor") or
679 os.environ.get("VISUAL") or
680 os.environ.get("VISUAL") or
680 os.environ.get("EDITOR", "vi"))
681 os.environ.get("EDITOR", "vi"))
681
682
682 def progress(self, topic, pos, item="", unit="", total=None):
683 def progress(self, topic, pos, item="", unit="", total=None):
683 '''show a progress message
684 '''show a progress message
684
685
685 With stock hg, this is simply a debug message that is hidden
686 With stock hg, this is simply a debug message that is hidden
686 by default, but with extensions or GUI tools it may be
687 by default, but with extensions or GUI tools it may be
687 visible. 'topic' is the current operation, 'item' is a
688 visible. 'topic' is the current operation, 'item' is a
688 non-numeric marker of the current position (ie the currently
689 non-numeric marker of the current position (ie the currently
689 in-process file), 'pos' is the current numeric position (ie
690 in-process file), 'pos' is the current numeric position (ie
690 revision, bytes, etc.), unit is a corresponding unit label,
691 revision, bytes, etc.), unit is a corresponding unit label,
691 and total is the highest expected pos.
692 and total is the highest expected pos.
692
693
693 Multiple nested topics may be active at a time.
694 Multiple nested topics may be active at a time.
694
695
695 All topics should be marked closed by setting pos to None at
696 All topics should be marked closed by setting pos to None at
696 termination.
697 termination.
697 '''
698 '''
698
699
699 if pos is None or not self.debugflag:
700 if pos is None or not self.debugflag:
700 return
701 return
701
702
702 if unit:
703 if unit:
703 unit = ' ' + unit
704 unit = ' ' + unit
704 if item:
705 if item:
705 item = ' ' + item
706 item = ' ' + item
706
707
707 if total:
708 if total:
708 pct = 100.0 * pos / total
709 pct = 100.0 * pos / total
709 self.debug('%s:%s %s/%s%s (%4.2f%%)\n'
710 self.debug('%s:%s %s/%s%s (%4.2f%%)\n'
710 % (topic, item, pos, total, unit, pct))
711 % (topic, item, pos, total, unit, pct))
711 else:
712 else:
712 self.debug('%s:%s %s%s\n' % (topic, item, pos, unit))
713 self.debug('%s:%s %s%s\n' % (topic, item, pos, unit))
713
714
714 def log(self, service, message):
715 def log(self, service, message):
715 '''hook for logging facility extensions
716 '''hook for logging facility extensions
716
717
717 service should be a readily-identifiable subsystem, which will
718 service should be a readily-identifiable subsystem, which will
718 allow filtering.
719 allow filtering.
719 message should be a newline-terminated string to log.
720 message should be a newline-terminated string to log.
720 '''
721 '''
721 pass
722 pass
722
723
723 def label(self, msg, label):
724 def label(self, msg, label):
724 '''style msg based on supplied label
725 '''style msg based on supplied label
725
726
726 Like ui.write(), this just returns msg unchanged, but extensions
727 Like ui.write(), this just returns msg unchanged, but extensions
727 and GUI tools can override it to allow styling output without
728 and GUI tools can override it to allow styling output without
728 writing it.
729 writing it.
729
730
730 ui.write(s, 'label') is equivalent to
731 ui.write(s, 'label') is equivalent to
731 ui.write(ui.label(s, 'label')).
732 ui.write(ui.label(s, 'label')).
732 '''
733 '''
733 return msg
734 return msg
General Comments 0
You need to be logged in to leave comments. Login now