##// END OF EJS Templates
move filename encoding functions from util.py to new store.py
Adrian Buehlmann -
r6839:01db3e10 default
parent child Browse files
Show More
@@ -0,0 +1,39 b''
1 # store.py - repository store handling for Mercurial
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
7
8 def _buildencodefun():
9 e = '_'
10 win_reserved = [ord(x) for x in '\\:*?"<>|']
11 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
12 for x in (range(32) + range(126, 256) + win_reserved):
13 cmap[chr(x)] = "~%02x" % x
14 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
15 cmap[chr(x)] = e + chr(x).lower()
16 dmap = {}
17 for k, v in cmap.iteritems():
18 dmap[v] = k
19 def decode(s):
20 i = 0
21 while i < len(s):
22 for l in xrange(1, 4):
23 try:
24 yield dmap[s[i:i+l]]
25 i += l
26 break
27 except KeyError:
28 pass
29 else:
30 raise KeyError
31 return (lambda s: "".join([cmap[c] for c in s]),
32 lambda s: "".join(list(decode(s))))
33
34 encodefilename, decodefilename = _buildencodefun()
35
36 def encodedopener(openerfn, fn):
37 def o(path, *args, **kw):
38 return openerfn(fn(path), *args, **kw)
39 return o
@@ -1,2076 +1,2076 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15
15
16 class localrepository(repo.repository):
16 class localrepository(repo.repository):
17 capabilities = util.set(('lookup', 'changegroupsubset'))
17 capabilities = util.set(('lookup', 'changegroupsubset'))
18 supported = ('revlogv1', 'store')
18 supported = ('revlogv1', 'store')
19
19
20 def __init__(self, parentui, path=None, create=0):
20 def __init__(self, parentui, path=None, create=0):
21 repo.repository.__init__(self)
21 repo.repository.__init__(self)
22 self.root = os.path.realpath(path)
22 self.root = os.path.realpath(path)
23 self.path = os.path.join(self.root, ".hg")
23 self.path = os.path.join(self.root, ".hg")
24 self.origroot = path
24 self.origroot = path
25 self.opener = util.opener(self.path)
25 self.opener = util.opener(self.path)
26 self.wopener = util.opener(self.root)
26 self.wopener = util.opener(self.root)
27
27
28 if not os.path.isdir(self.path):
28 if not os.path.isdir(self.path):
29 if create:
29 if create:
30 if not os.path.exists(path):
30 if not os.path.exists(path):
31 os.mkdir(path)
31 os.mkdir(path)
32 os.mkdir(self.path)
32 os.mkdir(self.path)
33 requirements = ["revlogv1"]
33 requirements = ["revlogv1"]
34 if parentui.configbool('format', 'usestore', True):
34 if parentui.configbool('format', 'usestore', True):
35 os.mkdir(os.path.join(self.path, "store"))
35 os.mkdir(os.path.join(self.path, "store"))
36 requirements.append("store")
36 requirements.append("store")
37 # create an invalid changelog
37 # create an invalid changelog
38 self.opener("00changelog.i", "a").write(
38 self.opener("00changelog.i", "a").write(
39 '\0\0\0\2' # represents revlogv2
39 '\0\0\0\2' # represents revlogv2
40 ' dummy changelog to prevent using the old repo layout'
40 ' dummy changelog to prevent using the old repo layout'
41 )
41 )
42 reqfile = self.opener("requires", "w")
42 reqfile = self.opener("requires", "w")
43 for r in requirements:
43 for r in requirements:
44 reqfile.write("%s\n" % r)
44 reqfile.write("%s\n" % r)
45 reqfile.close()
45 reqfile.close()
46 else:
46 else:
47 raise repo.RepoError(_("repository %s not found") % path)
47 raise repo.RepoError(_("repository %s not found") % path)
48 elif create:
48 elif create:
49 raise repo.RepoError(_("repository %s already exists") % path)
49 raise repo.RepoError(_("repository %s already exists") % path)
50 else:
50 else:
51 # find requirements
51 # find requirements
52 try:
52 try:
53 requirements = self.opener("requires").read().splitlines()
53 requirements = self.opener("requires").read().splitlines()
54 except IOError, inst:
54 except IOError, inst:
55 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
56 raise
56 raise
57 requirements = []
57 requirements = []
58 # check them
58 # check them
59 for r in requirements:
59 for r in requirements:
60 if r not in self.supported:
60 if r not in self.supported:
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62
62
63 # setup store
63 # setup store
64 if "store" in requirements:
64 if "store" in requirements:
65 self.encodefn = util.encodefilename
65 self.encodefn = store.encodefilename
66 self.decodefn = util.decodefilename
66 self.decodefn = store.decodefilename
67 self.spath = os.path.join(self.path, "store")
67 self.spath = os.path.join(self.path, "store")
68 else:
68 else:
69 self.encodefn = lambda x: x
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
71 self.spath = self.path
72
72
73 try:
73 try:
74 # files in .hg/ will be created using this mode
74 # files in .hg/ will be created using this mode
75 mode = os.stat(self.spath).st_mode
75 mode = os.stat(self.spath).st_mode
76 # avoid some useless chmods
76 # avoid some useless chmods
77 if (0777 & ~util._umask) == (0777 & mode):
77 if (0777 & ~util._umask) == (0777 & mode):
78 mode = None
78 mode = None
79 except OSError:
79 except OSError:
80 mode = None
80 mode = None
81
81
82 self._createmode = mode
82 self._createmode = mode
83 self.opener.createmode = mode
83 self.opener.createmode = mode
84 sopener = util.opener(self.spath)
84 sopener = util.opener(self.spath)
85 sopener.createmode = mode
85 sopener.createmode = mode
86 self.sopener = util.encodedopener(sopener, self.encodefn)
86 self.sopener = store.encodedopener(sopener, self.encodefn)
87
87
88 self.ui = ui.ui(parentui=parentui)
88 self.ui = ui.ui(parentui=parentui)
89 try:
89 try:
90 self.ui.readconfig(self.join("hgrc"), self.root)
90 self.ui.readconfig(self.join("hgrc"), self.root)
91 extensions.loadall(self.ui)
91 extensions.loadall(self.ui)
92 except IOError:
92 except IOError:
93 pass
93 pass
94
94
95 self.tagscache = None
95 self.tagscache = None
96 self._tagstypecache = None
96 self._tagstypecache = None
97 self.branchcache = None
97 self.branchcache = None
98 self._ubranchcache = None # UTF-8 version of branchcache
98 self._ubranchcache = None # UTF-8 version of branchcache
99 self._branchcachetip = None
99 self._branchcachetip = None
100 self.nodetagscache = None
100 self.nodetagscache = None
101 self.filterpats = {}
101 self.filterpats = {}
102 self._datafilters = {}
102 self._datafilters = {}
103 self._transref = self._lockref = self._wlockref = None
103 self._transref = self._lockref = self._wlockref = None
104
104
105 def __getattr__(self, name):
105 def __getattr__(self, name):
106 if name == 'changelog':
106 if name == 'changelog':
107 self.changelog = changelog.changelog(self.sopener)
107 self.changelog = changelog.changelog(self.sopener)
108 self.sopener.defversion = self.changelog.version
108 self.sopener.defversion = self.changelog.version
109 return self.changelog
109 return self.changelog
110 if name == 'manifest':
110 if name == 'manifest':
111 self.changelog
111 self.changelog
112 self.manifest = manifest.manifest(self.sopener)
112 self.manifest = manifest.manifest(self.sopener)
113 return self.manifest
113 return self.manifest
114 if name == 'dirstate':
114 if name == 'dirstate':
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
116 return self.dirstate
116 return self.dirstate
117 else:
117 else:
118 raise AttributeError, name
118 raise AttributeError, name
119
119
120 def __getitem__(self, changeid):
120 def __getitem__(self, changeid):
121 if changeid == None:
121 if changeid == None:
122 return context.workingctx(self)
122 return context.workingctx(self)
123 return context.changectx(self, changeid)
123 return context.changectx(self, changeid)
124
124
125 def __nonzero__(self):
125 def __nonzero__(self):
126 return True
126 return True
127
127
128 def __len__(self):
128 def __len__(self):
129 return len(self.changelog)
129 return len(self.changelog)
130
130
131 def __iter__(self):
131 def __iter__(self):
132 for i in xrange(len(self)):
132 for i in xrange(len(self)):
133 yield i
133 yield i
134
134
135 def url(self):
135 def url(self):
136 return 'file:' + self.root
136 return 'file:' + self.root
137
137
138 def hook(self, name, throw=False, **args):
138 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
139 return hook.hook(self.ui, self, name, throw, **args)
140
140
141 tag_disallowed = ':\r\n'
141 tag_disallowed = ':\r\n'
142
142
143 def _tag(self, names, node, message, local, user, date, parent=None,
143 def _tag(self, names, node, message, local, user, date, parent=None,
144 extra={}):
144 extra={}):
145 use_dirstate = parent is None
145 use_dirstate = parent is None
146
146
147 if isinstance(names, str):
147 if isinstance(names, str):
148 allchars = names
148 allchars = names
149 names = (names,)
149 names = (names,)
150 else:
150 else:
151 allchars = ''.join(names)
151 allchars = ''.join(names)
152 for c in self.tag_disallowed:
152 for c in self.tag_disallowed:
153 if c in allchars:
153 if c in allchars:
154 raise util.Abort(_('%r cannot be used in a tag name') % c)
154 raise util.Abort(_('%r cannot be used in a tag name') % c)
155
155
156 for name in names:
156 for name in names:
157 self.hook('pretag', throw=True, node=hex(node), tag=name,
157 self.hook('pretag', throw=True, node=hex(node), tag=name,
158 local=local)
158 local=local)
159
159
160 def writetags(fp, names, munge, prevtags):
160 def writetags(fp, names, munge, prevtags):
161 fp.seek(0, 2)
161 fp.seek(0, 2)
162 if prevtags and prevtags[-1] != '\n':
162 if prevtags and prevtags[-1] != '\n':
163 fp.write('\n')
163 fp.write('\n')
164 for name in names:
164 for name in names:
165 m = munge and munge(name) or name
165 m = munge and munge(name) or name
166 if self._tagstypecache and name in self._tagstypecache:
166 if self._tagstypecache and name in self._tagstypecache:
167 old = self.tagscache.get(name, nullid)
167 old = self.tagscache.get(name, nullid)
168 fp.write('%s %s\n' % (hex(old), m))
168 fp.write('%s %s\n' % (hex(old), m))
169 fp.write('%s %s\n' % (hex(node), m))
169 fp.write('%s %s\n' % (hex(node), m))
170 fp.close()
170 fp.close()
171
171
172 prevtags = ''
172 prevtags = ''
173 if local:
173 if local:
174 try:
174 try:
175 fp = self.opener('localtags', 'r+')
175 fp = self.opener('localtags', 'r+')
176 except IOError, err:
176 except IOError, err:
177 fp = self.opener('localtags', 'a')
177 fp = self.opener('localtags', 'a')
178 else:
178 else:
179 prevtags = fp.read()
179 prevtags = fp.read()
180
180
181 # local tags are stored in the current charset
181 # local tags are stored in the current charset
182 writetags(fp, names, None, prevtags)
182 writetags(fp, names, None, prevtags)
183 for name in names:
183 for name in names:
184 self.hook('tag', node=hex(node), tag=name, local=local)
184 self.hook('tag', node=hex(node), tag=name, local=local)
185 return
185 return
186
186
187 if use_dirstate:
187 if use_dirstate:
188 try:
188 try:
189 fp = self.wfile('.hgtags', 'rb+')
189 fp = self.wfile('.hgtags', 'rb+')
190 except IOError, err:
190 except IOError, err:
191 fp = self.wfile('.hgtags', 'ab')
191 fp = self.wfile('.hgtags', 'ab')
192 else:
192 else:
193 prevtags = fp.read()
193 prevtags = fp.read()
194 else:
194 else:
195 try:
195 try:
196 prevtags = self.filectx('.hgtags', parent).data()
196 prevtags = self.filectx('.hgtags', parent).data()
197 except revlog.LookupError:
197 except revlog.LookupError:
198 pass
198 pass
199 fp = self.wfile('.hgtags', 'wb')
199 fp = self.wfile('.hgtags', 'wb')
200 if prevtags:
200 if prevtags:
201 fp.write(prevtags)
201 fp.write(prevtags)
202
202
203 # committed tags are stored in UTF-8
203 # committed tags are stored in UTF-8
204 writetags(fp, names, util.fromlocal, prevtags)
204 writetags(fp, names, util.fromlocal, prevtags)
205
205
206 if use_dirstate and '.hgtags' not in self.dirstate:
206 if use_dirstate and '.hgtags' not in self.dirstate:
207 self.add(['.hgtags'])
207 self.add(['.hgtags'])
208
208
209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
210 extra=extra)
210 extra=extra)
211
211
212 for name in names:
212 for name in names:
213 self.hook('tag', node=hex(node), tag=name, local=local)
213 self.hook('tag', node=hex(node), tag=name, local=local)
214
214
215 return tagnode
215 return tagnode
216
216
217 def tag(self, names, node, message, local, user, date):
217 def tag(self, names, node, message, local, user, date):
218 '''tag a revision with one or more symbolic names.
218 '''tag a revision with one or more symbolic names.
219
219
220 names is a list of strings or, when adding a single tag, names may be a
220 names is a list of strings or, when adding a single tag, names may be a
221 string.
221 string.
222
222
223 if local is True, the tags are stored in a per-repository file.
223 if local is True, the tags are stored in a per-repository file.
224 otherwise, they are stored in the .hgtags file, and a new
224 otherwise, they are stored in the .hgtags file, and a new
225 changeset is committed with the change.
225 changeset is committed with the change.
226
226
227 keyword arguments:
227 keyword arguments:
228
228
229 local: whether to store tags in non-version-controlled file
229 local: whether to store tags in non-version-controlled file
230 (default False)
230 (default False)
231
231
232 message: commit message to use if committing
232 message: commit message to use if committing
233
233
234 user: name of user to use if committing
234 user: name of user to use if committing
235
235
236 date: date tuple to use if committing'''
236 date: date tuple to use if committing'''
237
237
238 for x in self.status()[:5]:
238 for x in self.status()[:5]:
239 if '.hgtags' in x:
239 if '.hgtags' in x:
240 raise util.Abort(_('working copy of .hgtags is changed '
240 raise util.Abort(_('working copy of .hgtags is changed '
241 '(please commit .hgtags manually)'))
241 '(please commit .hgtags manually)'))
242
242
243 self._tag(names, node, message, local, user, date)
243 self._tag(names, node, message, local, user, date)
244
244
245 def tags(self):
245 def tags(self):
246 '''return a mapping of tag to node'''
246 '''return a mapping of tag to node'''
247 if self.tagscache:
247 if self.tagscache:
248 return self.tagscache
248 return self.tagscache
249
249
250 globaltags = {}
250 globaltags = {}
251 tagtypes = {}
251 tagtypes = {}
252
252
253 def readtags(lines, fn, tagtype):
253 def readtags(lines, fn, tagtype):
254 filetags = {}
254 filetags = {}
255 count = 0
255 count = 0
256
256
257 def warn(msg):
257 def warn(msg):
258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
259
259
260 for l in lines:
260 for l in lines:
261 count += 1
261 count += 1
262 if not l:
262 if not l:
263 continue
263 continue
264 s = l.split(" ", 1)
264 s = l.split(" ", 1)
265 if len(s) != 2:
265 if len(s) != 2:
266 warn(_("cannot parse entry"))
266 warn(_("cannot parse entry"))
267 continue
267 continue
268 node, key = s
268 node, key = s
269 key = util.tolocal(key.strip()) # stored in UTF-8
269 key = util.tolocal(key.strip()) # stored in UTF-8
270 try:
270 try:
271 bin_n = bin(node)
271 bin_n = bin(node)
272 except TypeError:
272 except TypeError:
273 warn(_("node '%s' is not well formed") % node)
273 warn(_("node '%s' is not well formed") % node)
274 continue
274 continue
275 if bin_n not in self.changelog.nodemap:
275 if bin_n not in self.changelog.nodemap:
276 warn(_("tag '%s' refers to unknown node") % key)
276 warn(_("tag '%s' refers to unknown node") % key)
277 continue
277 continue
278
278
279 h = []
279 h = []
280 if key in filetags:
280 if key in filetags:
281 n, h = filetags[key]
281 n, h = filetags[key]
282 h.append(n)
282 h.append(n)
283 filetags[key] = (bin_n, h)
283 filetags[key] = (bin_n, h)
284
284
285 for k, nh in filetags.items():
285 for k, nh in filetags.items():
286 if k not in globaltags:
286 if k not in globaltags:
287 globaltags[k] = nh
287 globaltags[k] = nh
288 tagtypes[k] = tagtype
288 tagtypes[k] = tagtype
289 continue
289 continue
290
290
291 # we prefer the global tag if:
291 # we prefer the global tag if:
292 # it supercedes us OR
292 # it supercedes us OR
293 # mutual supercedes and it has a higher rank
293 # mutual supercedes and it has a higher rank
294 # otherwise we win because we're tip-most
294 # otherwise we win because we're tip-most
295 an, ah = nh
295 an, ah = nh
296 bn, bh = globaltags[k]
296 bn, bh = globaltags[k]
297 if (bn != an and an in bh and
297 if (bn != an and an in bh and
298 (bn not in ah or len(bh) > len(ah))):
298 (bn not in ah or len(bh) > len(ah))):
299 an = bn
299 an = bn
300 ah.extend([n for n in bh if n not in ah])
300 ah.extend([n for n in bh if n not in ah])
301 globaltags[k] = an, ah
301 globaltags[k] = an, ah
302 tagtypes[k] = tagtype
302 tagtypes[k] = tagtype
303
303
304 # read the tags file from each head, ending with the tip
304 # read the tags file from each head, ending with the tip
305 f = None
305 f = None
306 for rev, node, fnode in self._hgtagsnodes():
306 for rev, node, fnode in self._hgtagsnodes():
307 f = (f and f.filectx(fnode) or
307 f = (f and f.filectx(fnode) or
308 self.filectx('.hgtags', fileid=fnode))
308 self.filectx('.hgtags', fileid=fnode))
309 readtags(f.data().splitlines(), f, "global")
309 readtags(f.data().splitlines(), f, "global")
310
310
311 try:
311 try:
312 data = util.fromlocal(self.opener("localtags").read())
312 data = util.fromlocal(self.opener("localtags").read())
313 # localtags are stored in the local character set
313 # localtags are stored in the local character set
314 # while the internal tag table is stored in UTF-8
314 # while the internal tag table is stored in UTF-8
315 readtags(data.splitlines(), "localtags", "local")
315 readtags(data.splitlines(), "localtags", "local")
316 except IOError:
316 except IOError:
317 pass
317 pass
318
318
319 self.tagscache = {}
319 self.tagscache = {}
320 self._tagstypecache = {}
320 self._tagstypecache = {}
321 for k,nh in globaltags.items():
321 for k,nh in globaltags.items():
322 n = nh[0]
322 n = nh[0]
323 if n != nullid:
323 if n != nullid:
324 self.tagscache[k] = n
324 self.tagscache[k] = n
325 self._tagstypecache[k] = tagtypes[k]
325 self._tagstypecache[k] = tagtypes[k]
326 self.tagscache['tip'] = self.changelog.tip()
326 self.tagscache['tip'] = self.changelog.tip()
327 return self.tagscache
327 return self.tagscache
328
328
329 def tagtype(self, tagname):
329 def tagtype(self, tagname):
330 '''
330 '''
331 return the type of the given tag. result can be:
331 return the type of the given tag. result can be:
332
332
333 'local' : a local tag
333 'local' : a local tag
334 'global' : a global tag
334 'global' : a global tag
335 None : tag does not exist
335 None : tag does not exist
336 '''
336 '''
337
337
338 self.tags()
338 self.tags()
339
339
340 return self._tagstypecache.get(tagname)
340 return self._tagstypecache.get(tagname)
341
341
342 def _hgtagsnodes(self):
342 def _hgtagsnodes(self):
343 heads = self.heads()
343 heads = self.heads()
344 heads.reverse()
344 heads.reverse()
345 last = {}
345 last = {}
346 ret = []
346 ret = []
347 for node in heads:
347 for node in heads:
348 c = self[node]
348 c = self[node]
349 rev = c.rev()
349 rev = c.rev()
350 try:
350 try:
351 fnode = c.filenode('.hgtags')
351 fnode = c.filenode('.hgtags')
352 except revlog.LookupError:
352 except revlog.LookupError:
353 continue
353 continue
354 ret.append((rev, node, fnode))
354 ret.append((rev, node, fnode))
355 if fnode in last:
355 if fnode in last:
356 ret[last[fnode]] = None
356 ret[last[fnode]] = None
357 last[fnode] = len(ret) - 1
357 last[fnode] = len(ret) - 1
358 return [item for item in ret if item]
358 return [item for item in ret if item]
359
359
360 def tagslist(self):
360 def tagslist(self):
361 '''return a list of tags ordered by revision'''
361 '''return a list of tags ordered by revision'''
362 l = []
362 l = []
363 for t, n in self.tags().items():
363 for t, n in self.tags().items():
364 try:
364 try:
365 r = self.changelog.rev(n)
365 r = self.changelog.rev(n)
366 except:
366 except:
367 r = -2 # sort to the beginning of the list if unknown
367 r = -2 # sort to the beginning of the list if unknown
368 l.append((r, t, n))
368 l.append((r, t, n))
369 return [(t, n) for r, t, n in util.sort(l)]
369 return [(t, n) for r, t, n in util.sort(l)]
370
370
371 def nodetags(self, node):
371 def nodetags(self, node):
372 '''return the tags associated with a node'''
372 '''return the tags associated with a node'''
373 if not self.nodetagscache:
373 if not self.nodetagscache:
374 self.nodetagscache = {}
374 self.nodetagscache = {}
375 for t, n in self.tags().items():
375 for t, n in self.tags().items():
376 self.nodetagscache.setdefault(n, []).append(t)
376 self.nodetagscache.setdefault(n, []).append(t)
377 return self.nodetagscache.get(node, [])
377 return self.nodetagscache.get(node, [])
378
378
379 def _branchtags(self, partial, lrev):
379 def _branchtags(self, partial, lrev):
380 tiprev = len(self) - 1
380 tiprev = len(self) - 1
381 if lrev != tiprev:
381 if lrev != tiprev:
382 self._updatebranchcache(partial, lrev+1, tiprev+1)
382 self._updatebranchcache(partial, lrev+1, tiprev+1)
383 self._writebranchcache(partial, self.changelog.tip(), tiprev)
383 self._writebranchcache(partial, self.changelog.tip(), tiprev)
384
384
385 return partial
385 return partial
386
386
387 def branchtags(self):
387 def branchtags(self):
388 tip = self.changelog.tip()
388 tip = self.changelog.tip()
389 if self.branchcache is not None and self._branchcachetip == tip:
389 if self.branchcache is not None and self._branchcachetip == tip:
390 return self.branchcache
390 return self.branchcache
391
391
392 oldtip = self._branchcachetip
392 oldtip = self._branchcachetip
393 self._branchcachetip = tip
393 self._branchcachetip = tip
394 if self.branchcache is None:
394 if self.branchcache is None:
395 self.branchcache = {} # avoid recursion in changectx
395 self.branchcache = {} # avoid recursion in changectx
396 else:
396 else:
397 self.branchcache.clear() # keep using the same dict
397 self.branchcache.clear() # keep using the same dict
398 if oldtip is None or oldtip not in self.changelog.nodemap:
398 if oldtip is None or oldtip not in self.changelog.nodemap:
399 partial, last, lrev = self._readbranchcache()
399 partial, last, lrev = self._readbranchcache()
400 else:
400 else:
401 lrev = self.changelog.rev(oldtip)
401 lrev = self.changelog.rev(oldtip)
402 partial = self._ubranchcache
402 partial = self._ubranchcache
403
403
404 self._branchtags(partial, lrev)
404 self._branchtags(partial, lrev)
405
405
406 # the branch cache is stored on disk as UTF-8, but in the local
406 # the branch cache is stored on disk as UTF-8, but in the local
407 # charset internally
407 # charset internally
408 for k, v in partial.items():
408 for k, v in partial.items():
409 self.branchcache[util.tolocal(k)] = v
409 self.branchcache[util.tolocal(k)] = v
410 self._ubranchcache = partial
410 self._ubranchcache = partial
411 return self.branchcache
411 return self.branchcache
412
412
413 def _readbranchcache(self):
413 def _readbranchcache(self):
414 partial = {}
414 partial = {}
415 try:
415 try:
416 f = self.opener("branch.cache")
416 f = self.opener("branch.cache")
417 lines = f.read().split('\n')
417 lines = f.read().split('\n')
418 f.close()
418 f.close()
419 except (IOError, OSError):
419 except (IOError, OSError):
420 return {}, nullid, nullrev
420 return {}, nullid, nullrev
421
421
422 try:
422 try:
423 last, lrev = lines.pop(0).split(" ", 1)
423 last, lrev = lines.pop(0).split(" ", 1)
424 last, lrev = bin(last), int(lrev)
424 last, lrev = bin(last), int(lrev)
425 if lrev >= len(self) or self[lrev].node() != last:
425 if lrev >= len(self) or self[lrev].node() != last:
426 # invalidate the cache
426 # invalidate the cache
427 raise ValueError('invalidating branch cache (tip differs)')
427 raise ValueError('invalidating branch cache (tip differs)')
428 for l in lines:
428 for l in lines:
429 if not l: continue
429 if not l: continue
430 node, label = l.split(" ", 1)
430 node, label = l.split(" ", 1)
431 partial[label.strip()] = bin(node)
431 partial[label.strip()] = bin(node)
432 except (KeyboardInterrupt, util.SignalInterrupt):
432 except (KeyboardInterrupt, util.SignalInterrupt):
433 raise
433 raise
434 except Exception, inst:
434 except Exception, inst:
435 if self.ui.debugflag:
435 if self.ui.debugflag:
436 self.ui.warn(str(inst), '\n')
436 self.ui.warn(str(inst), '\n')
437 partial, last, lrev = {}, nullid, nullrev
437 partial, last, lrev = {}, nullid, nullrev
438 return partial, last, lrev
438 return partial, last, lrev
439
439
440 def _writebranchcache(self, branches, tip, tiprev):
440 def _writebranchcache(self, branches, tip, tiprev):
441 try:
441 try:
442 f = self.opener("branch.cache", "w", atomictemp=True)
442 f = self.opener("branch.cache", "w", atomictemp=True)
443 f.write("%s %s\n" % (hex(tip), tiprev))
443 f.write("%s %s\n" % (hex(tip), tiprev))
444 for label, node in branches.iteritems():
444 for label, node in branches.iteritems():
445 f.write("%s %s\n" % (hex(node), label))
445 f.write("%s %s\n" % (hex(node), label))
446 f.rename()
446 f.rename()
447 except (IOError, OSError):
447 except (IOError, OSError):
448 pass
448 pass
449
449
450 def _updatebranchcache(self, partial, start, end):
450 def _updatebranchcache(self, partial, start, end):
451 for r in xrange(start, end):
451 for r in xrange(start, end):
452 c = self[r]
452 c = self[r]
453 b = c.branch()
453 b = c.branch()
454 partial[b] = c.node()
454 partial[b] = c.node()
455
455
456 def lookup(self, key):
456 def lookup(self, key):
457 if key == '.':
457 if key == '.':
458 return self.dirstate.parents()[0]
458 return self.dirstate.parents()[0]
459 elif key == 'null':
459 elif key == 'null':
460 return nullid
460 return nullid
461 n = self.changelog._match(key)
461 n = self.changelog._match(key)
462 if n:
462 if n:
463 return n
463 return n
464 if key in self.tags():
464 if key in self.tags():
465 return self.tags()[key]
465 return self.tags()[key]
466 if key in self.branchtags():
466 if key in self.branchtags():
467 return self.branchtags()[key]
467 return self.branchtags()[key]
468 n = self.changelog._partialmatch(key)
468 n = self.changelog._partialmatch(key)
469 if n:
469 if n:
470 return n
470 return n
471 try:
471 try:
472 if len(key) == 20:
472 if len(key) == 20:
473 key = hex(key)
473 key = hex(key)
474 except:
474 except:
475 pass
475 pass
476 raise repo.RepoError(_("unknown revision '%s'") % key)
476 raise repo.RepoError(_("unknown revision '%s'") % key)
477
477
478 def local(self):
478 def local(self):
479 return True
479 return True
480
480
481 def join(self, f):
481 def join(self, f):
482 return os.path.join(self.path, f)
482 return os.path.join(self.path, f)
483
483
484 def sjoin(self, f):
484 def sjoin(self, f):
485 f = self.encodefn(f)
485 f = self.encodefn(f)
486 return os.path.join(self.spath, f)
486 return os.path.join(self.spath, f)
487
487
488 def wjoin(self, f):
488 def wjoin(self, f):
489 return os.path.join(self.root, f)
489 return os.path.join(self.root, f)
490
490
491 def rjoin(self, f):
491 def rjoin(self, f):
492 return os.path.join(self.root, util.pconvert(f))
492 return os.path.join(self.root, util.pconvert(f))
493
493
494 def file(self, f):
494 def file(self, f):
495 if f[0] == '/':
495 if f[0] == '/':
496 f = f[1:]
496 f = f[1:]
497 return filelog.filelog(self.sopener, f)
497 return filelog.filelog(self.sopener, f)
498
498
499 def changectx(self, changeid):
499 def changectx(self, changeid):
500 return self[changeid]
500 return self[changeid]
501
501
502 def parents(self, changeid=None):
502 def parents(self, changeid=None):
503 '''get list of changectxs for parents of changeid'''
503 '''get list of changectxs for parents of changeid'''
504 return self[changeid].parents()
504 return self[changeid].parents()
505
505
506 def filectx(self, path, changeid=None, fileid=None):
506 def filectx(self, path, changeid=None, fileid=None):
507 """changeid can be a changeset revision, node, or tag.
507 """changeid can be a changeset revision, node, or tag.
508 fileid can be a file revision or node."""
508 fileid can be a file revision or node."""
509 return context.filectx(self, path, changeid, fileid)
509 return context.filectx(self, path, changeid, fileid)
510
510
511 def getcwd(self):
511 def getcwd(self):
512 return self.dirstate.getcwd()
512 return self.dirstate.getcwd()
513
513
514 def pathto(self, f, cwd=None):
514 def pathto(self, f, cwd=None):
515 return self.dirstate.pathto(f, cwd)
515 return self.dirstate.pathto(f, cwd)
516
516
517 def wfile(self, f, mode='r'):
517 def wfile(self, f, mode='r'):
518 return self.wopener(f, mode)
518 return self.wopener(f, mode)
519
519
520 def _link(self, f):
520 def _link(self, f):
521 return os.path.islink(self.wjoin(f))
521 return os.path.islink(self.wjoin(f))
522
522
523 def _filter(self, filter, filename, data):
523 def _filter(self, filter, filename, data):
524 if filter not in self.filterpats:
524 if filter not in self.filterpats:
525 l = []
525 l = []
526 for pat, cmd in self.ui.configitems(filter):
526 for pat, cmd in self.ui.configitems(filter):
527 mf = util.matcher(self.root, "", [pat], [], [])[1]
527 mf = util.matcher(self.root, "", [pat], [], [])[1]
528 fn = None
528 fn = None
529 params = cmd
529 params = cmd
530 for name, filterfn in self._datafilters.iteritems():
530 for name, filterfn in self._datafilters.iteritems():
531 if cmd.startswith(name):
531 if cmd.startswith(name):
532 fn = filterfn
532 fn = filterfn
533 params = cmd[len(name):].lstrip()
533 params = cmd[len(name):].lstrip()
534 break
534 break
535 if not fn:
535 if not fn:
536 fn = lambda s, c, **kwargs: util.filter(s, c)
536 fn = lambda s, c, **kwargs: util.filter(s, c)
537 # Wrap old filters not supporting keyword arguments
537 # Wrap old filters not supporting keyword arguments
538 if not inspect.getargspec(fn)[2]:
538 if not inspect.getargspec(fn)[2]:
539 oldfn = fn
539 oldfn = fn
540 fn = lambda s, c, **kwargs: oldfn(s, c)
540 fn = lambda s, c, **kwargs: oldfn(s, c)
541 l.append((mf, fn, params))
541 l.append((mf, fn, params))
542 self.filterpats[filter] = l
542 self.filterpats[filter] = l
543
543
544 for mf, fn, cmd in self.filterpats[filter]:
544 for mf, fn, cmd in self.filterpats[filter]:
545 if mf(filename):
545 if mf(filename):
546 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
546 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
547 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
547 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
548 break
548 break
549
549
550 return data
550 return data
551
551
552 def adddatafilter(self, name, filter):
552 def adddatafilter(self, name, filter):
553 self._datafilters[name] = filter
553 self._datafilters[name] = filter
554
554
555 def wread(self, filename):
555 def wread(self, filename):
556 if self._link(filename):
556 if self._link(filename):
557 data = os.readlink(self.wjoin(filename))
557 data = os.readlink(self.wjoin(filename))
558 else:
558 else:
559 data = self.wopener(filename, 'r').read()
559 data = self.wopener(filename, 'r').read()
560 return self._filter("encode", filename, data)
560 return self._filter("encode", filename, data)
561
561
562 def wwrite(self, filename, data, flags):
562 def wwrite(self, filename, data, flags):
563 data = self._filter("decode", filename, data)
563 data = self._filter("decode", filename, data)
564 try:
564 try:
565 os.unlink(self.wjoin(filename))
565 os.unlink(self.wjoin(filename))
566 except OSError:
566 except OSError:
567 pass
567 pass
568 self.wopener(filename, 'w').write(data)
568 self.wopener(filename, 'w').write(data)
569 util.set_flags(self.wjoin(filename), flags)
569 util.set_flags(self.wjoin(filename), flags)
570
570
571 def wwritedata(self, filename, data):
571 def wwritedata(self, filename, data):
572 return self._filter("decode", filename, data)
572 return self._filter("decode", filename, data)
573
573
574 def transaction(self):
574 def transaction(self):
575 if self._transref and self._transref():
575 if self._transref and self._transref():
576 return self._transref().nest()
576 return self._transref().nest()
577
577
578 # abort here if the journal already exists
578 # abort here if the journal already exists
579 if os.path.exists(self.sjoin("journal")):
579 if os.path.exists(self.sjoin("journal")):
580 raise repo.RepoError(_("journal already exists - run hg recover"))
580 raise repo.RepoError(_("journal already exists - run hg recover"))
581
581
582 # save dirstate for rollback
582 # save dirstate for rollback
583 try:
583 try:
584 ds = self.opener("dirstate").read()
584 ds = self.opener("dirstate").read()
585 except IOError:
585 except IOError:
586 ds = ""
586 ds = ""
587 self.opener("journal.dirstate", "w").write(ds)
587 self.opener("journal.dirstate", "w").write(ds)
588 self.opener("journal.branch", "w").write(self.dirstate.branch())
588 self.opener("journal.branch", "w").write(self.dirstate.branch())
589
589
590 renames = [(self.sjoin("journal"), self.sjoin("undo")),
590 renames = [(self.sjoin("journal"), self.sjoin("undo")),
591 (self.join("journal.dirstate"), self.join("undo.dirstate")),
591 (self.join("journal.dirstate"), self.join("undo.dirstate")),
592 (self.join("journal.branch"), self.join("undo.branch"))]
592 (self.join("journal.branch"), self.join("undo.branch"))]
593 tr = transaction.transaction(self.ui.warn, self.sopener,
593 tr = transaction.transaction(self.ui.warn, self.sopener,
594 self.sjoin("journal"),
594 self.sjoin("journal"),
595 aftertrans(renames),
595 aftertrans(renames),
596 self._createmode)
596 self._createmode)
597 self._transref = weakref.ref(tr)
597 self._transref = weakref.ref(tr)
598 return tr
598 return tr
599
599
600 def recover(self):
600 def recover(self):
601 l = self.lock()
601 l = self.lock()
602 try:
602 try:
603 if os.path.exists(self.sjoin("journal")):
603 if os.path.exists(self.sjoin("journal")):
604 self.ui.status(_("rolling back interrupted transaction\n"))
604 self.ui.status(_("rolling back interrupted transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("journal"))
605 transaction.rollback(self.sopener, self.sjoin("journal"))
606 self.invalidate()
606 self.invalidate()
607 return True
607 return True
608 else:
608 else:
609 self.ui.warn(_("no interrupted transaction available\n"))
609 self.ui.warn(_("no interrupted transaction available\n"))
610 return False
610 return False
611 finally:
611 finally:
612 del l
612 del l
613
613
614 def rollback(self):
614 def rollback(self):
615 wlock = lock = None
615 wlock = lock = None
616 try:
616 try:
617 wlock = self.wlock()
617 wlock = self.wlock()
618 lock = self.lock()
618 lock = self.lock()
619 if os.path.exists(self.sjoin("undo")):
619 if os.path.exists(self.sjoin("undo")):
620 self.ui.status(_("rolling back last transaction\n"))
620 self.ui.status(_("rolling back last transaction\n"))
621 transaction.rollback(self.sopener, self.sjoin("undo"))
621 transaction.rollback(self.sopener, self.sjoin("undo"))
622 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
622 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
623 try:
623 try:
624 branch = self.opener("undo.branch").read()
624 branch = self.opener("undo.branch").read()
625 self.dirstate.setbranch(branch)
625 self.dirstate.setbranch(branch)
626 except IOError:
626 except IOError:
627 self.ui.warn(_("Named branch could not be reset, "
627 self.ui.warn(_("Named branch could not be reset, "
628 "current branch still is: %s\n")
628 "current branch still is: %s\n")
629 % util.tolocal(self.dirstate.branch()))
629 % util.tolocal(self.dirstate.branch()))
630 self.invalidate()
630 self.invalidate()
631 self.dirstate.invalidate()
631 self.dirstate.invalidate()
632 else:
632 else:
633 self.ui.warn(_("no rollback information available\n"))
633 self.ui.warn(_("no rollback information available\n"))
634 finally:
634 finally:
635 del lock, wlock
635 del lock, wlock
636
636
637 def invalidate(self):
637 def invalidate(self):
638 for a in "changelog manifest".split():
638 for a in "changelog manifest".split():
639 if a in self.__dict__:
639 if a in self.__dict__:
640 delattr(self, a)
640 delattr(self, a)
641 self.tagscache = None
641 self.tagscache = None
642 self._tagstypecache = None
642 self._tagstypecache = None
643 self.nodetagscache = None
643 self.nodetagscache = None
644 self.branchcache = None
644 self.branchcache = None
645 self._ubranchcache = None
645 self._ubranchcache = None
646 self._branchcachetip = None
646 self._branchcachetip = None
647
647
648 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
648 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
649 try:
649 try:
650 l = lock.lock(lockname, 0, releasefn, desc=desc)
650 l = lock.lock(lockname, 0, releasefn, desc=desc)
651 except lock.LockHeld, inst:
651 except lock.LockHeld, inst:
652 if not wait:
652 if not wait:
653 raise
653 raise
654 self.ui.warn(_("waiting for lock on %s held by %r\n") %
654 self.ui.warn(_("waiting for lock on %s held by %r\n") %
655 (desc, inst.locker))
655 (desc, inst.locker))
656 # default to 600 seconds timeout
656 # default to 600 seconds timeout
657 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
657 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
658 releasefn, desc=desc)
658 releasefn, desc=desc)
659 if acquirefn:
659 if acquirefn:
660 acquirefn()
660 acquirefn()
661 return l
661 return l
662
662
663 def lock(self, wait=True):
663 def lock(self, wait=True):
664 if self._lockref and self._lockref():
664 if self._lockref and self._lockref():
665 return self._lockref()
665 return self._lockref()
666
666
667 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
667 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
668 _('repository %s') % self.origroot)
668 _('repository %s') % self.origroot)
669 self._lockref = weakref.ref(l)
669 self._lockref = weakref.ref(l)
670 return l
670 return l
671
671
672 def wlock(self, wait=True):
672 def wlock(self, wait=True):
673 if self._wlockref and self._wlockref():
673 if self._wlockref and self._wlockref():
674 return self._wlockref()
674 return self._wlockref()
675
675
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 self.dirstate.invalidate, _('working directory of %s') %
677 self.dirstate.invalidate, _('working directory of %s') %
678 self.origroot)
678 self.origroot)
679 self._wlockref = weakref.ref(l)
679 self._wlockref = weakref.ref(l)
680 return l
680 return l
681
681
682 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
682 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 """
683 """
684 commit an individual file as part of a larger transaction
684 commit an individual file as part of a larger transaction
685 """
685 """
686
686
687 fn = fctx.path()
687 fn = fctx.path()
688 t = fctx.data()
688 t = fctx.data()
689 fl = self.file(fn)
689 fl = self.file(fn)
690 fp1 = manifest1.get(fn, nullid)
690 fp1 = manifest1.get(fn, nullid)
691 fp2 = manifest2.get(fn, nullid)
691 fp2 = manifest2.get(fn, nullid)
692
692
693 meta = {}
693 meta = {}
694 cp = fctx.renamed()
694 cp = fctx.renamed()
695 if cp and cp[0] != fn:
695 if cp and cp[0] != fn:
696 cp = cp[0]
696 cp = cp[0]
697 # Mark the new revision of this file as a copy of another
697 # Mark the new revision of this file as a copy of another
698 # file. This copy data will effectively act as a parent
698 # file. This copy data will effectively act as a parent
699 # of this new revision. If this is a merge, the first
699 # of this new revision. If this is a merge, the first
700 # parent will be the nullid (meaning "look up the copy data")
700 # parent will be the nullid (meaning "look up the copy data")
701 # and the second one will be the other parent. For example:
701 # and the second one will be the other parent. For example:
702 #
702 #
703 # 0 --- 1 --- 3 rev1 changes file foo
703 # 0 --- 1 --- 3 rev1 changes file foo
704 # \ / rev2 renames foo to bar and changes it
704 # \ / rev2 renames foo to bar and changes it
705 # \- 2 -/ rev3 should have bar with all changes and
705 # \- 2 -/ rev3 should have bar with all changes and
706 # should record that bar descends from
706 # should record that bar descends from
707 # bar in rev2 and foo in rev1
707 # bar in rev2 and foo in rev1
708 #
708 #
709 # this allows this merge to succeed:
709 # this allows this merge to succeed:
710 #
710 #
711 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
712 # \ / merging rev3 and rev4 should use bar@rev2
712 # \ / merging rev3 and rev4 should use bar@rev2
713 # \- 2 --- 4 as the merge base
713 # \- 2 --- 4 as the merge base
714 #
714 #
715 meta["copy"] = cp
715 meta["copy"] = cp
716 if not manifest2: # not a branch merge
716 if not manifest2: # not a branch merge
717 meta["copyrev"] = hex(manifest1[cp])
717 meta["copyrev"] = hex(manifest1[cp])
718 fp2 = nullid
718 fp2 = nullid
719 elif fp2 != nullid: # copied on remote side
719 elif fp2 != nullid: # copied on remote side
720 meta["copyrev"] = hex(manifest1[cp])
720 meta["copyrev"] = hex(manifest1[cp])
721 elif fp1 != nullid: # copied on local side, reversed
721 elif fp1 != nullid: # copied on local side, reversed
722 meta["copyrev"] = hex(manifest2[cp])
722 meta["copyrev"] = hex(manifest2[cp])
723 fp2 = fp1
723 fp2 = fp1
724 elif cp in manifest2: # directory rename on local side
724 elif cp in manifest2: # directory rename on local side
725 meta["copyrev"] = hex(manifest2[cp])
725 meta["copyrev"] = hex(manifest2[cp])
726 else: # directory rename on remote side
726 else: # directory rename on remote side
727 meta["copyrev"] = hex(manifest1[cp])
727 meta["copyrev"] = hex(manifest1[cp])
728 self.ui.debug(_(" %s: copy %s:%s\n") %
728 self.ui.debug(_(" %s: copy %s:%s\n") %
729 (fn, cp, meta["copyrev"]))
729 (fn, cp, meta["copyrev"]))
730 fp1 = nullid
730 fp1 = nullid
731 elif fp2 != nullid:
731 elif fp2 != nullid:
732 # is one parent an ancestor of the other?
732 # is one parent an ancestor of the other?
733 fpa = fl.ancestor(fp1, fp2)
733 fpa = fl.ancestor(fp1, fp2)
734 if fpa == fp1:
734 if fpa == fp1:
735 fp1, fp2 = fp2, nullid
735 fp1, fp2 = fp2, nullid
736 elif fpa == fp2:
736 elif fpa == fp2:
737 fp2 = nullid
737 fp2 = nullid
738
738
739 # is the file unmodified from the parent? report existing entry
739 # is the file unmodified from the parent? report existing entry
740 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
740 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
741 return fp1
741 return fp1
742
742
743 changelist.append(fn)
743 changelist.append(fn)
744 return fl.add(t, meta, tr, linkrev, fp1, fp2)
744 return fl.add(t, meta, tr, linkrev, fp1, fp2)
745
745
746 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
746 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
747 if p1 is None:
747 if p1 is None:
748 p1, p2 = self.dirstate.parents()
748 p1, p2 = self.dirstate.parents()
749 return self.commit(files=files, text=text, user=user, date=date,
749 return self.commit(files=files, text=text, user=user, date=date,
750 p1=p1, p2=p2, extra=extra, empty_ok=True)
750 p1=p1, p2=p2, extra=extra, empty_ok=True)
751
751
752 def commit(self, files=None, text="", user=None, date=None,
752 def commit(self, files=None, text="", user=None, date=None,
753 match=None, force=False, force_editor=False,
753 match=None, force=False, force_editor=False,
754 p1=None, p2=None, extra={}, empty_ok=False):
754 p1=None, p2=None, extra={}, empty_ok=False):
755 wlock = lock = None
755 wlock = lock = None
756 if files:
756 if files:
757 files = util.unique(files)
757 files = util.unique(files)
758 try:
758 try:
759 wlock = self.wlock()
759 wlock = self.wlock()
760 lock = self.lock()
760 lock = self.lock()
761 use_dirstate = (p1 is None) # not rawcommit
761 use_dirstate = (p1 is None) # not rawcommit
762
762
763 if use_dirstate:
763 if use_dirstate:
764 p1, p2 = self.dirstate.parents()
764 p1, p2 = self.dirstate.parents()
765 update_dirstate = True
765 update_dirstate = True
766
766
767 if (not force and p2 != nullid and
767 if (not force and p2 != nullid and
768 (match and (match.files() or match.anypats()))):
768 (match and (match.files() or match.anypats()))):
769 raise util.Abort(_('cannot partially commit a merge '
769 raise util.Abort(_('cannot partially commit a merge '
770 '(do not specify files or patterns)'))
770 '(do not specify files or patterns)'))
771
771
772 if files:
772 if files:
773 modified, removed = [], []
773 modified, removed = [], []
774 for f in files:
774 for f in files:
775 s = self.dirstate[f]
775 s = self.dirstate[f]
776 if s in 'nma':
776 if s in 'nma':
777 modified.append(f)
777 modified.append(f)
778 elif s == 'r':
778 elif s == 'r':
779 removed.append(f)
779 removed.append(f)
780 else:
780 else:
781 self.ui.warn(_("%s not tracked!\n") % f)
781 self.ui.warn(_("%s not tracked!\n") % f)
782 changes = [modified, [], removed, [], []]
782 changes = [modified, [], removed, [], []]
783 else:
783 else:
784 changes = self.status(match=match)
784 changes = self.status(match=match)
785 else:
785 else:
786 p1, p2 = p1, p2 or nullid
786 p1, p2 = p1, p2 or nullid
787 update_dirstate = (self.dirstate.parents()[0] == p1)
787 update_dirstate = (self.dirstate.parents()[0] == p1)
788 changes = [files, [], [], [], []]
788 changes = [files, [], [], [], []]
789
789
790 wctx = context.workingctx(self, (p1, p2), text, user, date,
790 wctx = context.workingctx(self, (p1, p2), text, user, date,
791 extra, changes)
791 extra, changes)
792 return self._commitctx(wctx, force, force_editor, empty_ok,
792 return self._commitctx(wctx, force, force_editor, empty_ok,
793 use_dirstate, update_dirstate)
793 use_dirstate, update_dirstate)
794 finally:
794 finally:
795 del lock, wlock
795 del lock, wlock
796
796
797 def commitctx(self, ctx):
797 def commitctx(self, ctx):
798 wlock = lock = None
798 wlock = lock = None
799 try:
799 try:
800 wlock = self.wlock()
800 wlock = self.wlock()
801 lock = self.lock()
801 lock = self.lock()
802 return self._commitctx(ctx, force=True, force_editor=False,
802 return self._commitctx(ctx, force=True, force_editor=False,
803 empty_ok=True, use_dirstate=False,
803 empty_ok=True, use_dirstate=False,
804 update_dirstate=False)
804 update_dirstate=False)
805 finally:
805 finally:
806 del lock, wlock
806 del lock, wlock
807
807
808 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
808 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
809 use_dirstate=True, update_dirstate=True):
809 use_dirstate=True, update_dirstate=True):
810 tr = None
810 tr = None
811 valid = 0 # don't save the dirstate if this isn't set
811 valid = 0 # don't save the dirstate if this isn't set
812 try:
812 try:
813 commit = util.sort(wctx.modified() + wctx.added())
813 commit = util.sort(wctx.modified() + wctx.added())
814 remove = wctx.removed()
814 remove = wctx.removed()
815 extra = wctx.extra().copy()
815 extra = wctx.extra().copy()
816 branchname = extra['branch']
816 branchname = extra['branch']
817 user = wctx.user()
817 user = wctx.user()
818 text = wctx.description()
818 text = wctx.description()
819
819
820 p1, p2 = [p.node() for p in wctx.parents()]
820 p1, p2 = [p.node() for p in wctx.parents()]
821 c1 = self.changelog.read(p1)
821 c1 = self.changelog.read(p1)
822 c2 = self.changelog.read(p2)
822 c2 = self.changelog.read(p2)
823 m1 = self.manifest.read(c1[0]).copy()
823 m1 = self.manifest.read(c1[0]).copy()
824 m2 = self.manifest.read(c2[0])
824 m2 = self.manifest.read(c2[0])
825
825
826 if use_dirstate:
826 if use_dirstate:
827 oldname = c1[5].get("branch") # stored in UTF-8
827 oldname = c1[5].get("branch") # stored in UTF-8
828 if (not commit and not remove and not force and p2 == nullid
828 if (not commit and not remove and not force and p2 == nullid
829 and branchname == oldname):
829 and branchname == oldname):
830 self.ui.status(_("nothing changed\n"))
830 self.ui.status(_("nothing changed\n"))
831 return None
831 return None
832
832
833 xp1 = hex(p1)
833 xp1 = hex(p1)
834 if p2 == nullid: xp2 = ''
834 if p2 == nullid: xp2 = ''
835 else: xp2 = hex(p2)
835 else: xp2 = hex(p2)
836
836
837 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
837 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
838
838
839 tr = self.transaction()
839 tr = self.transaction()
840 trp = weakref.proxy(tr)
840 trp = weakref.proxy(tr)
841
841
842 # check in files
842 # check in files
843 new = {}
843 new = {}
844 changed = []
844 changed = []
845 linkrev = len(self)
845 linkrev = len(self)
846 for f in commit:
846 for f in commit:
847 self.ui.note(f + "\n")
847 self.ui.note(f + "\n")
848 try:
848 try:
849 fctx = wctx.filectx(f)
849 fctx = wctx.filectx(f)
850 newflags = fctx.flags()
850 newflags = fctx.flags()
851 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
851 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
852 if ((not changed or changed[-1] != f) and
852 if ((not changed or changed[-1] != f) and
853 m2.get(f) != new[f]):
853 m2.get(f) != new[f]):
854 # mention the file in the changelog if some
854 # mention the file in the changelog if some
855 # flag changed, even if there was no content
855 # flag changed, even if there was no content
856 # change.
856 # change.
857 if m1.flags(f) != newflags:
857 if m1.flags(f) != newflags:
858 changed.append(f)
858 changed.append(f)
859 m1.set(f, newflags)
859 m1.set(f, newflags)
860 if use_dirstate:
860 if use_dirstate:
861 self.dirstate.normal(f)
861 self.dirstate.normal(f)
862
862
863 except (OSError, IOError):
863 except (OSError, IOError):
864 if use_dirstate:
864 if use_dirstate:
865 self.ui.warn(_("trouble committing %s!\n") % f)
865 self.ui.warn(_("trouble committing %s!\n") % f)
866 raise
866 raise
867 else:
867 else:
868 remove.append(f)
868 remove.append(f)
869
869
870 # update manifest
870 # update manifest
871 m1.update(new)
871 m1.update(new)
872 removed = []
872 removed = []
873
873
874 for f in util.sort(remove):
874 for f in util.sort(remove):
875 if f in m1:
875 if f in m1:
876 del m1[f]
876 del m1[f]
877 removed.append(f)
877 removed.append(f)
878 elif f in m2:
878 elif f in m2:
879 removed.append(f)
879 removed.append(f)
880 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
880 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
881 (new, removed))
881 (new, removed))
882
882
883 # add changeset
883 # add changeset
884 if (not empty_ok and not text) or force_editor:
884 if (not empty_ok and not text) or force_editor:
885 edittext = []
885 edittext = []
886 if text:
886 if text:
887 edittext.append(text)
887 edittext.append(text)
888 edittext.append("")
888 edittext.append("")
889 edittext.append(_("HG: Enter commit message."
889 edittext.append(_("HG: Enter commit message."
890 " Lines beginning with 'HG:' are removed."))
890 " Lines beginning with 'HG:' are removed."))
891 edittext.append("HG: --")
891 edittext.append("HG: --")
892 edittext.append("HG: user: %s" % user)
892 edittext.append("HG: user: %s" % user)
893 if p2 != nullid:
893 if p2 != nullid:
894 edittext.append("HG: branch merge")
894 edittext.append("HG: branch merge")
895 if branchname:
895 if branchname:
896 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
896 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
897 edittext.extend(["HG: changed %s" % f for f in changed])
897 edittext.extend(["HG: changed %s" % f for f in changed])
898 edittext.extend(["HG: removed %s" % f for f in removed])
898 edittext.extend(["HG: removed %s" % f for f in removed])
899 if not changed and not remove:
899 if not changed and not remove:
900 edittext.append("HG: no files changed")
900 edittext.append("HG: no files changed")
901 edittext.append("")
901 edittext.append("")
902 # run editor in the repository root
902 # run editor in the repository root
903 olddir = os.getcwd()
903 olddir = os.getcwd()
904 os.chdir(self.root)
904 os.chdir(self.root)
905 text = self.ui.edit("\n".join(edittext), user)
905 text = self.ui.edit("\n".join(edittext), user)
906 os.chdir(olddir)
906 os.chdir(olddir)
907
907
908 lines = [line.rstrip() for line in text.rstrip().splitlines()]
908 lines = [line.rstrip() for line in text.rstrip().splitlines()]
909 while lines and not lines[0]:
909 while lines and not lines[0]:
910 del lines[0]
910 del lines[0]
911 if not lines and use_dirstate:
911 if not lines and use_dirstate:
912 raise util.Abort(_("empty commit message"))
912 raise util.Abort(_("empty commit message"))
913 text = '\n'.join(lines)
913 text = '\n'.join(lines)
914
914
915 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
915 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
916 user, wctx.date(), extra)
916 user, wctx.date(), extra)
917 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
917 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
918 parent2=xp2)
918 parent2=xp2)
919 tr.close()
919 tr.close()
920
920
921 if self.branchcache:
921 if self.branchcache:
922 self.branchtags()
922 self.branchtags()
923
923
924 if use_dirstate or update_dirstate:
924 if use_dirstate or update_dirstate:
925 self.dirstate.setparents(n)
925 self.dirstate.setparents(n)
926 if use_dirstate:
926 if use_dirstate:
927 for f in removed:
927 for f in removed:
928 self.dirstate.forget(f)
928 self.dirstate.forget(f)
929 valid = 1 # our dirstate updates are complete
929 valid = 1 # our dirstate updates are complete
930
930
931 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
931 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
932 return n
932 return n
933 finally:
933 finally:
934 if not valid: # don't save our updated dirstate
934 if not valid: # don't save our updated dirstate
935 self.dirstate.invalidate()
935 self.dirstate.invalidate()
936 del tr
936 del tr
937
937
938 def walk(self, match, node=None):
938 def walk(self, match, node=None):
939 '''
939 '''
940 walk recursively through the directory tree or a given
940 walk recursively through the directory tree or a given
941 changeset, finding all files matched by the match
941 changeset, finding all files matched by the match
942 function
942 function
943 '''
943 '''
944 return self[node].walk(match)
944 return self[node].walk(match)
945
945
946 def status(self, node1='.', node2=None, match=None,
946 def status(self, node1='.', node2=None, match=None,
947 ignored=False, clean=False, unknown=False):
947 ignored=False, clean=False, unknown=False):
948 """return status of files between two nodes or node and working directory
948 """return status of files between two nodes or node and working directory
949
949
950 If node1 is None, use the first dirstate parent instead.
950 If node1 is None, use the first dirstate parent instead.
951 If node2 is None, compare node1 with working directory.
951 If node2 is None, compare node1 with working directory.
952 """
952 """
953
953
954 def mfmatches(ctx):
954 def mfmatches(ctx):
955 mf = ctx.manifest().copy()
955 mf = ctx.manifest().copy()
956 for fn in mf.keys():
956 for fn in mf.keys():
957 if not match(fn):
957 if not match(fn):
958 del mf[fn]
958 del mf[fn]
959 return mf
959 return mf
960
960
961 ctx1 = self[node1]
961 ctx1 = self[node1]
962 ctx2 = self[node2]
962 ctx2 = self[node2]
963 working = ctx2 == self[None]
963 working = ctx2 == self[None]
964 parentworking = working and ctx1 == self['.']
964 parentworking = working and ctx1 == self['.']
965 match = match or match_.always(self.root, self.getcwd())
965 match = match or match_.always(self.root, self.getcwd())
966 listignored, listclean, listunknown = ignored, clean, unknown
966 listignored, listclean, listunknown = ignored, clean, unknown
967
967
968 if working: # we need to scan the working dir
968 if working: # we need to scan the working dir
969 s = self.dirstate.status(match, listignored, listclean, listunknown)
969 s = self.dirstate.status(match, listignored, listclean, listunknown)
970 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
970 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
971
971
972 # check for any possibly clean files
972 # check for any possibly clean files
973 if parentworking and cmp:
973 if parentworking and cmp:
974 fixup = []
974 fixup = []
975 # do a full compare of any files that might have changed
975 # do a full compare of any files that might have changed
976 for f in cmp:
976 for f in cmp:
977 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
977 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
978 or ctx1[f].cmp(ctx2[f].data())):
978 or ctx1[f].cmp(ctx2[f].data())):
979 modified.append(f)
979 modified.append(f)
980 else:
980 else:
981 fixup.append(f)
981 fixup.append(f)
982
982
983 if listclean:
983 if listclean:
984 clean += fixup
984 clean += fixup
985
985
986 # update dirstate for files that are actually clean
986 # update dirstate for files that are actually clean
987 if fixup:
987 if fixup:
988 wlock = None
988 wlock = None
989 try:
989 try:
990 try:
990 try:
991 wlock = self.wlock(False)
991 wlock = self.wlock(False)
992 for f in fixup:
992 for f in fixup:
993 self.dirstate.normal(f)
993 self.dirstate.normal(f)
994 except lock.LockException:
994 except lock.LockException:
995 pass
995 pass
996 finally:
996 finally:
997 del wlock
997 del wlock
998
998
999 if not parentworking:
999 if not parentworking:
1000 mf1 = mfmatches(ctx1)
1000 mf1 = mfmatches(ctx1)
1001 if working:
1001 if working:
1002 # we are comparing working dir against non-parent
1002 # we are comparing working dir against non-parent
1003 # generate a pseudo-manifest for the working dir
1003 # generate a pseudo-manifest for the working dir
1004 mf2 = mfmatches(self['.'])
1004 mf2 = mfmatches(self['.'])
1005 for f in cmp + modified + added:
1005 for f in cmp + modified + added:
1006 mf2[f] = None
1006 mf2[f] = None
1007 mf2.set(f, ctx2.flags(f))
1007 mf2.set(f, ctx2.flags(f))
1008 for f in removed:
1008 for f in removed:
1009 if f in mf2:
1009 if f in mf2:
1010 del mf2[f]
1010 del mf2[f]
1011 else:
1011 else:
1012 # we are comparing two revisions
1012 # we are comparing two revisions
1013 deleted, unknown, ignored = [], [], []
1013 deleted, unknown, ignored = [], [], []
1014 mf2 = mfmatches(ctx2)
1014 mf2 = mfmatches(ctx2)
1015
1015
1016 modified, added, clean = [], [], []
1016 modified, added, clean = [], [], []
1017 for fn in mf2:
1017 for fn in mf2:
1018 if fn in mf1:
1018 if fn in mf1:
1019 if (mf1.flags(fn) != mf2.flags(fn) or
1019 if (mf1.flags(fn) != mf2.flags(fn) or
1020 (mf1[fn] != mf2[fn] and
1020 (mf1[fn] != mf2[fn] and
1021 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1021 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1022 modified.append(fn)
1022 modified.append(fn)
1023 elif listclean:
1023 elif listclean:
1024 clean.append(fn)
1024 clean.append(fn)
1025 del mf1[fn]
1025 del mf1[fn]
1026 else:
1026 else:
1027 added.append(fn)
1027 added.append(fn)
1028 removed = mf1.keys()
1028 removed = mf1.keys()
1029
1029
1030 r = modified, added, removed, deleted, unknown, ignored, clean
1030 r = modified, added, removed, deleted, unknown, ignored, clean
1031 [l.sort() for l in r]
1031 [l.sort() for l in r]
1032 return r
1032 return r
1033
1033
1034 def add(self, list):
1034 def add(self, list):
1035 wlock = self.wlock()
1035 wlock = self.wlock()
1036 try:
1036 try:
1037 rejected = []
1037 rejected = []
1038 for f in list:
1038 for f in list:
1039 p = self.wjoin(f)
1039 p = self.wjoin(f)
1040 try:
1040 try:
1041 st = os.lstat(p)
1041 st = os.lstat(p)
1042 except:
1042 except:
1043 self.ui.warn(_("%s does not exist!\n") % f)
1043 self.ui.warn(_("%s does not exist!\n") % f)
1044 rejected.append(f)
1044 rejected.append(f)
1045 continue
1045 continue
1046 if st.st_size > 10000000:
1046 if st.st_size > 10000000:
1047 self.ui.warn(_("%s: files over 10MB may cause memory and"
1047 self.ui.warn(_("%s: files over 10MB may cause memory and"
1048 " performance problems\n"
1048 " performance problems\n"
1049 "(use 'hg revert %s' to unadd the file)\n")
1049 "(use 'hg revert %s' to unadd the file)\n")
1050 % (f, f))
1050 % (f, f))
1051 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1051 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1052 self.ui.warn(_("%s not added: only files and symlinks "
1052 self.ui.warn(_("%s not added: only files and symlinks "
1053 "supported currently\n") % f)
1053 "supported currently\n") % f)
1054 rejected.append(p)
1054 rejected.append(p)
1055 elif self.dirstate[f] in 'amn':
1055 elif self.dirstate[f] in 'amn':
1056 self.ui.warn(_("%s already tracked!\n") % f)
1056 self.ui.warn(_("%s already tracked!\n") % f)
1057 elif self.dirstate[f] == 'r':
1057 elif self.dirstate[f] == 'r':
1058 self.dirstate.normallookup(f)
1058 self.dirstate.normallookup(f)
1059 else:
1059 else:
1060 self.dirstate.add(f)
1060 self.dirstate.add(f)
1061 return rejected
1061 return rejected
1062 finally:
1062 finally:
1063 del wlock
1063 del wlock
1064
1064
1065 def forget(self, list):
1065 def forget(self, list):
1066 wlock = self.wlock()
1066 wlock = self.wlock()
1067 try:
1067 try:
1068 for f in list:
1068 for f in list:
1069 if self.dirstate[f] != 'a':
1069 if self.dirstate[f] != 'a':
1070 self.ui.warn(_("%s not added!\n") % f)
1070 self.ui.warn(_("%s not added!\n") % f)
1071 else:
1071 else:
1072 self.dirstate.forget(f)
1072 self.dirstate.forget(f)
1073 finally:
1073 finally:
1074 del wlock
1074 del wlock
1075
1075
1076 def remove(self, list, unlink=False):
1076 def remove(self, list, unlink=False):
1077 wlock = None
1077 wlock = None
1078 try:
1078 try:
1079 if unlink:
1079 if unlink:
1080 for f in list:
1080 for f in list:
1081 try:
1081 try:
1082 util.unlink(self.wjoin(f))
1082 util.unlink(self.wjoin(f))
1083 except OSError, inst:
1083 except OSError, inst:
1084 if inst.errno != errno.ENOENT:
1084 if inst.errno != errno.ENOENT:
1085 raise
1085 raise
1086 wlock = self.wlock()
1086 wlock = self.wlock()
1087 for f in list:
1087 for f in list:
1088 if unlink and os.path.exists(self.wjoin(f)):
1088 if unlink and os.path.exists(self.wjoin(f)):
1089 self.ui.warn(_("%s still exists!\n") % f)
1089 self.ui.warn(_("%s still exists!\n") % f)
1090 elif self.dirstate[f] == 'a':
1090 elif self.dirstate[f] == 'a':
1091 self.dirstate.forget(f)
1091 self.dirstate.forget(f)
1092 elif f not in self.dirstate:
1092 elif f not in self.dirstate:
1093 self.ui.warn(_("%s not tracked!\n") % f)
1093 self.ui.warn(_("%s not tracked!\n") % f)
1094 else:
1094 else:
1095 self.dirstate.remove(f)
1095 self.dirstate.remove(f)
1096 finally:
1096 finally:
1097 del wlock
1097 del wlock
1098
1098
1099 def undelete(self, list):
1099 def undelete(self, list):
1100 wlock = None
1100 wlock = None
1101 try:
1101 try:
1102 manifests = [self.manifest.read(self.changelog.read(p)[0])
1102 manifests = [self.manifest.read(self.changelog.read(p)[0])
1103 for p in self.dirstate.parents() if p != nullid]
1103 for p in self.dirstate.parents() if p != nullid]
1104 wlock = self.wlock()
1104 wlock = self.wlock()
1105 for f in list:
1105 for f in list:
1106 if self.dirstate[f] != 'r':
1106 if self.dirstate[f] != 'r':
1107 self.ui.warn("%s not removed!\n" % f)
1107 self.ui.warn("%s not removed!\n" % f)
1108 else:
1108 else:
1109 m = f in manifests[0] and manifests[0] or manifests[1]
1109 m = f in manifests[0] and manifests[0] or manifests[1]
1110 t = self.file(f).read(m[f])
1110 t = self.file(f).read(m[f])
1111 self.wwrite(f, t, m.flags(f))
1111 self.wwrite(f, t, m.flags(f))
1112 self.dirstate.normal(f)
1112 self.dirstate.normal(f)
1113 finally:
1113 finally:
1114 del wlock
1114 del wlock
1115
1115
1116 def copy(self, source, dest):
1116 def copy(self, source, dest):
1117 wlock = None
1117 wlock = None
1118 try:
1118 try:
1119 p = self.wjoin(dest)
1119 p = self.wjoin(dest)
1120 if not (os.path.exists(p) or os.path.islink(p)):
1120 if not (os.path.exists(p) or os.path.islink(p)):
1121 self.ui.warn(_("%s does not exist!\n") % dest)
1121 self.ui.warn(_("%s does not exist!\n") % dest)
1122 elif not (os.path.isfile(p) or os.path.islink(p)):
1122 elif not (os.path.isfile(p) or os.path.islink(p)):
1123 self.ui.warn(_("copy failed: %s is not a file or a "
1123 self.ui.warn(_("copy failed: %s is not a file or a "
1124 "symbolic link\n") % dest)
1124 "symbolic link\n") % dest)
1125 else:
1125 else:
1126 wlock = self.wlock()
1126 wlock = self.wlock()
1127 if dest not in self.dirstate:
1127 if dest not in self.dirstate:
1128 self.dirstate.add(dest)
1128 self.dirstate.add(dest)
1129 self.dirstate.copy(source, dest)
1129 self.dirstate.copy(source, dest)
1130 finally:
1130 finally:
1131 del wlock
1131 del wlock
1132
1132
1133 def heads(self, start=None):
1133 def heads(self, start=None):
1134 heads = self.changelog.heads(start)
1134 heads = self.changelog.heads(start)
1135 # sort the output in rev descending order
1135 # sort the output in rev descending order
1136 heads = [(-self.changelog.rev(h), h) for h in heads]
1136 heads = [(-self.changelog.rev(h), h) for h in heads]
1137 return [n for (r, n) in util.sort(heads)]
1137 return [n for (r, n) in util.sort(heads)]
1138
1138
1139 def branchheads(self, branch=None, start=None):
1139 def branchheads(self, branch=None, start=None):
1140 if branch is None:
1140 if branch is None:
1141 branch = self[None].branch()
1141 branch = self[None].branch()
1142 branches = self.branchtags()
1142 branches = self.branchtags()
1143 if branch not in branches:
1143 if branch not in branches:
1144 return []
1144 return []
1145 # The basic algorithm is this:
1145 # The basic algorithm is this:
1146 #
1146 #
1147 # Start from the branch tip since there are no later revisions that can
1147 # Start from the branch tip since there are no later revisions that can
1148 # possibly be in this branch, and the tip is a guaranteed head.
1148 # possibly be in this branch, and the tip is a guaranteed head.
1149 #
1149 #
1150 # Remember the tip's parents as the first ancestors, since these by
1150 # Remember the tip's parents as the first ancestors, since these by
1151 # definition are not heads.
1151 # definition are not heads.
1152 #
1152 #
1153 # Step backwards from the brach tip through all the revisions. We are
1153 # Step backwards from the brach tip through all the revisions. We are
1154 # guaranteed by the rules of Mercurial that we will now be visiting the
1154 # guaranteed by the rules of Mercurial that we will now be visiting the
1155 # nodes in reverse topological order (children before parents).
1155 # nodes in reverse topological order (children before parents).
1156 #
1156 #
1157 # If a revision is one of the ancestors of a head then we can toss it
1157 # If a revision is one of the ancestors of a head then we can toss it
1158 # out of the ancestors set (we've already found it and won't be
1158 # out of the ancestors set (we've already found it and won't be
1159 # visiting it again) and put its parents in the ancestors set.
1159 # visiting it again) and put its parents in the ancestors set.
1160 #
1160 #
1161 # Otherwise, if a revision is in the branch it's another head, since it
1161 # Otherwise, if a revision is in the branch it's another head, since it
1162 # wasn't in the ancestor list of an existing head. So add it to the
1162 # wasn't in the ancestor list of an existing head. So add it to the
1163 # head list, and add its parents to the ancestor list.
1163 # head list, and add its parents to the ancestor list.
1164 #
1164 #
1165 # If it is not in the branch ignore it.
1165 # If it is not in the branch ignore it.
1166 #
1166 #
1167 # Once we have a list of heads, use nodesbetween to filter out all the
1167 # Once we have a list of heads, use nodesbetween to filter out all the
1168 # heads that cannot be reached from startrev. There may be a more
1168 # heads that cannot be reached from startrev. There may be a more
1169 # efficient way to do this as part of the previous algorithm.
1169 # efficient way to do this as part of the previous algorithm.
1170
1170
1171 set = util.set
1171 set = util.set
1172 heads = [self.changelog.rev(branches[branch])]
1172 heads = [self.changelog.rev(branches[branch])]
1173 # Don't care if ancestors contains nullrev or not.
1173 # Don't care if ancestors contains nullrev or not.
1174 ancestors = set(self.changelog.parentrevs(heads[0]))
1174 ancestors = set(self.changelog.parentrevs(heads[0]))
1175 for rev in xrange(heads[0] - 1, nullrev, -1):
1175 for rev in xrange(heads[0] - 1, nullrev, -1):
1176 if rev in ancestors:
1176 if rev in ancestors:
1177 ancestors.update(self.changelog.parentrevs(rev))
1177 ancestors.update(self.changelog.parentrevs(rev))
1178 ancestors.remove(rev)
1178 ancestors.remove(rev)
1179 elif self[rev].branch() == branch:
1179 elif self[rev].branch() == branch:
1180 heads.append(rev)
1180 heads.append(rev)
1181 ancestors.update(self.changelog.parentrevs(rev))
1181 ancestors.update(self.changelog.parentrevs(rev))
1182 heads = [self.changelog.node(rev) for rev in heads]
1182 heads = [self.changelog.node(rev) for rev in heads]
1183 if start is not None:
1183 if start is not None:
1184 heads = self.changelog.nodesbetween([start], heads)[2]
1184 heads = self.changelog.nodesbetween([start], heads)[2]
1185 return heads
1185 return heads
1186
1186
1187 def branches(self, nodes):
1187 def branches(self, nodes):
1188 if not nodes:
1188 if not nodes:
1189 nodes = [self.changelog.tip()]
1189 nodes = [self.changelog.tip()]
1190 b = []
1190 b = []
1191 for n in nodes:
1191 for n in nodes:
1192 t = n
1192 t = n
1193 while 1:
1193 while 1:
1194 p = self.changelog.parents(n)
1194 p = self.changelog.parents(n)
1195 if p[1] != nullid or p[0] == nullid:
1195 if p[1] != nullid or p[0] == nullid:
1196 b.append((t, n, p[0], p[1]))
1196 b.append((t, n, p[0], p[1]))
1197 break
1197 break
1198 n = p[0]
1198 n = p[0]
1199 return b
1199 return b
1200
1200
1201 def between(self, pairs):
1201 def between(self, pairs):
1202 r = []
1202 r = []
1203
1203
1204 for top, bottom in pairs:
1204 for top, bottom in pairs:
1205 n, l, i = top, [], 0
1205 n, l, i = top, [], 0
1206 f = 1
1206 f = 1
1207
1207
1208 while n != bottom:
1208 while n != bottom:
1209 p = self.changelog.parents(n)[0]
1209 p = self.changelog.parents(n)[0]
1210 if i == f:
1210 if i == f:
1211 l.append(n)
1211 l.append(n)
1212 f = f * 2
1212 f = f * 2
1213 n = p
1213 n = p
1214 i += 1
1214 i += 1
1215
1215
1216 r.append(l)
1216 r.append(l)
1217
1217
1218 return r
1218 return r
1219
1219
1220 def findincoming(self, remote, base=None, heads=None, force=False):
1220 def findincoming(self, remote, base=None, heads=None, force=False):
1221 """Return list of roots of the subsets of missing nodes from remote
1221 """Return list of roots of the subsets of missing nodes from remote
1222
1222
1223 If base dict is specified, assume that these nodes and their parents
1223 If base dict is specified, assume that these nodes and their parents
1224 exist on the remote side and that no child of a node of base exists
1224 exist on the remote side and that no child of a node of base exists
1225 in both remote and self.
1225 in both remote and self.
1226 Furthermore base will be updated to include the nodes that exists
1226 Furthermore base will be updated to include the nodes that exists
1227 in self and remote but no children exists in self and remote.
1227 in self and remote but no children exists in self and remote.
1228 If a list of heads is specified, return only nodes which are heads
1228 If a list of heads is specified, return only nodes which are heads
1229 or ancestors of these heads.
1229 or ancestors of these heads.
1230
1230
1231 All the ancestors of base are in self and in remote.
1231 All the ancestors of base are in self and in remote.
1232 All the descendants of the list returned are missing in self.
1232 All the descendants of the list returned are missing in self.
1233 (and so we know that the rest of the nodes are missing in remote, see
1233 (and so we know that the rest of the nodes are missing in remote, see
1234 outgoing)
1234 outgoing)
1235 """
1235 """
1236 m = self.changelog.nodemap
1236 m = self.changelog.nodemap
1237 search = []
1237 search = []
1238 fetch = {}
1238 fetch = {}
1239 seen = {}
1239 seen = {}
1240 seenbranch = {}
1240 seenbranch = {}
1241 if base == None:
1241 if base == None:
1242 base = {}
1242 base = {}
1243
1243
1244 if not heads:
1244 if not heads:
1245 heads = remote.heads()
1245 heads = remote.heads()
1246
1246
1247 if self.changelog.tip() == nullid:
1247 if self.changelog.tip() == nullid:
1248 base[nullid] = 1
1248 base[nullid] = 1
1249 if heads != [nullid]:
1249 if heads != [nullid]:
1250 return [nullid]
1250 return [nullid]
1251 return []
1251 return []
1252
1252
1253 # assume we're closer to the tip than the root
1253 # assume we're closer to the tip than the root
1254 # and start by examining the heads
1254 # and start by examining the heads
1255 self.ui.status(_("searching for changes\n"))
1255 self.ui.status(_("searching for changes\n"))
1256
1256
1257 unknown = []
1257 unknown = []
1258 for h in heads:
1258 for h in heads:
1259 if h not in m:
1259 if h not in m:
1260 unknown.append(h)
1260 unknown.append(h)
1261 else:
1261 else:
1262 base[h] = 1
1262 base[h] = 1
1263
1263
1264 if not unknown:
1264 if not unknown:
1265 return []
1265 return []
1266
1266
1267 req = dict.fromkeys(unknown)
1267 req = dict.fromkeys(unknown)
1268 reqcnt = 0
1268 reqcnt = 0
1269
1269
1270 # search through remote branches
1270 # search through remote branches
1271 # a 'branch' here is a linear segment of history, with four parts:
1271 # a 'branch' here is a linear segment of history, with four parts:
1272 # head, root, first parent, second parent
1272 # head, root, first parent, second parent
1273 # (a branch always has two parents (or none) by definition)
1273 # (a branch always has two parents (or none) by definition)
1274 unknown = remote.branches(unknown)
1274 unknown = remote.branches(unknown)
1275 while unknown:
1275 while unknown:
1276 r = []
1276 r = []
1277 while unknown:
1277 while unknown:
1278 n = unknown.pop(0)
1278 n = unknown.pop(0)
1279 if n[0] in seen:
1279 if n[0] in seen:
1280 continue
1280 continue
1281
1281
1282 self.ui.debug(_("examining %s:%s\n")
1282 self.ui.debug(_("examining %s:%s\n")
1283 % (short(n[0]), short(n[1])))
1283 % (short(n[0]), short(n[1])))
1284 if n[0] == nullid: # found the end of the branch
1284 if n[0] == nullid: # found the end of the branch
1285 pass
1285 pass
1286 elif n in seenbranch:
1286 elif n in seenbranch:
1287 self.ui.debug(_("branch already found\n"))
1287 self.ui.debug(_("branch already found\n"))
1288 continue
1288 continue
1289 elif n[1] and n[1] in m: # do we know the base?
1289 elif n[1] and n[1] in m: # do we know the base?
1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1291 % (short(n[0]), short(n[1])))
1291 % (short(n[0]), short(n[1])))
1292 search.append(n) # schedule branch range for scanning
1292 search.append(n) # schedule branch range for scanning
1293 seenbranch[n] = 1
1293 seenbranch[n] = 1
1294 else:
1294 else:
1295 if n[1] not in seen and n[1] not in fetch:
1295 if n[1] not in seen and n[1] not in fetch:
1296 if n[2] in m and n[3] in m:
1296 if n[2] in m and n[3] in m:
1297 self.ui.debug(_("found new changeset %s\n") %
1297 self.ui.debug(_("found new changeset %s\n") %
1298 short(n[1]))
1298 short(n[1]))
1299 fetch[n[1]] = 1 # earliest unknown
1299 fetch[n[1]] = 1 # earliest unknown
1300 for p in n[2:4]:
1300 for p in n[2:4]:
1301 if p in m:
1301 if p in m:
1302 base[p] = 1 # latest known
1302 base[p] = 1 # latest known
1303
1303
1304 for p in n[2:4]:
1304 for p in n[2:4]:
1305 if p not in req and p not in m:
1305 if p not in req and p not in m:
1306 r.append(p)
1306 r.append(p)
1307 req[p] = 1
1307 req[p] = 1
1308 seen[n[0]] = 1
1308 seen[n[0]] = 1
1309
1309
1310 if r:
1310 if r:
1311 reqcnt += 1
1311 reqcnt += 1
1312 self.ui.debug(_("request %d: %s\n") %
1312 self.ui.debug(_("request %d: %s\n") %
1313 (reqcnt, " ".join(map(short, r))))
1313 (reqcnt, " ".join(map(short, r))))
1314 for p in xrange(0, len(r), 10):
1314 for p in xrange(0, len(r), 10):
1315 for b in remote.branches(r[p:p+10]):
1315 for b in remote.branches(r[p:p+10]):
1316 self.ui.debug(_("received %s:%s\n") %
1316 self.ui.debug(_("received %s:%s\n") %
1317 (short(b[0]), short(b[1])))
1317 (short(b[0]), short(b[1])))
1318 unknown.append(b)
1318 unknown.append(b)
1319
1319
1320 # do binary search on the branches we found
1320 # do binary search on the branches we found
1321 while search:
1321 while search:
1322 n = search.pop(0)
1322 n = search.pop(0)
1323 reqcnt += 1
1323 reqcnt += 1
1324 l = remote.between([(n[0], n[1])])[0]
1324 l = remote.between([(n[0], n[1])])[0]
1325 l.append(n[1])
1325 l.append(n[1])
1326 p = n[0]
1326 p = n[0]
1327 f = 1
1327 f = 1
1328 for i in l:
1328 for i in l:
1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1330 if i in m:
1330 if i in m:
1331 if f <= 2:
1331 if f <= 2:
1332 self.ui.debug(_("found new branch changeset %s\n") %
1332 self.ui.debug(_("found new branch changeset %s\n") %
1333 short(p))
1333 short(p))
1334 fetch[p] = 1
1334 fetch[p] = 1
1335 base[i] = 1
1335 base[i] = 1
1336 else:
1336 else:
1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1338 % (short(p), short(i)))
1338 % (short(p), short(i)))
1339 search.append((p, i))
1339 search.append((p, i))
1340 break
1340 break
1341 p, f = i, f * 2
1341 p, f = i, f * 2
1342
1342
1343 # sanity check our fetch list
1343 # sanity check our fetch list
1344 for f in fetch.keys():
1344 for f in fetch.keys():
1345 if f in m:
1345 if f in m:
1346 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1346 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1347
1347
1348 if base.keys() == [nullid]:
1348 if base.keys() == [nullid]:
1349 if force:
1349 if force:
1350 self.ui.warn(_("warning: repository is unrelated\n"))
1350 self.ui.warn(_("warning: repository is unrelated\n"))
1351 else:
1351 else:
1352 raise util.Abort(_("repository is unrelated"))
1352 raise util.Abort(_("repository is unrelated"))
1353
1353
1354 self.ui.debug(_("found new changesets starting at ") +
1354 self.ui.debug(_("found new changesets starting at ") +
1355 " ".join([short(f) for f in fetch]) + "\n")
1355 " ".join([short(f) for f in fetch]) + "\n")
1356
1356
1357 self.ui.debug(_("%d total queries\n") % reqcnt)
1357 self.ui.debug(_("%d total queries\n") % reqcnt)
1358
1358
1359 return fetch.keys()
1359 return fetch.keys()
1360
1360
1361 def findoutgoing(self, remote, base=None, heads=None, force=False):
1361 def findoutgoing(self, remote, base=None, heads=None, force=False):
1362 """Return list of nodes that are roots of subsets not in remote
1362 """Return list of nodes that are roots of subsets not in remote
1363
1363
1364 If base dict is specified, assume that these nodes and their parents
1364 If base dict is specified, assume that these nodes and their parents
1365 exist on the remote side.
1365 exist on the remote side.
1366 If a list of heads is specified, return only nodes which are heads
1366 If a list of heads is specified, return only nodes which are heads
1367 or ancestors of these heads, and return a second element which
1367 or ancestors of these heads, and return a second element which
1368 contains all remote heads which get new children.
1368 contains all remote heads which get new children.
1369 """
1369 """
1370 if base == None:
1370 if base == None:
1371 base = {}
1371 base = {}
1372 self.findincoming(remote, base, heads, force=force)
1372 self.findincoming(remote, base, heads, force=force)
1373
1373
1374 self.ui.debug(_("common changesets up to ")
1374 self.ui.debug(_("common changesets up to ")
1375 + " ".join(map(short, base.keys())) + "\n")
1375 + " ".join(map(short, base.keys())) + "\n")
1376
1376
1377 remain = dict.fromkeys(self.changelog.nodemap)
1377 remain = dict.fromkeys(self.changelog.nodemap)
1378
1378
1379 # prune everything remote has from the tree
1379 # prune everything remote has from the tree
1380 del remain[nullid]
1380 del remain[nullid]
1381 remove = base.keys()
1381 remove = base.keys()
1382 while remove:
1382 while remove:
1383 n = remove.pop(0)
1383 n = remove.pop(0)
1384 if n in remain:
1384 if n in remain:
1385 del remain[n]
1385 del remain[n]
1386 for p in self.changelog.parents(n):
1386 for p in self.changelog.parents(n):
1387 remove.append(p)
1387 remove.append(p)
1388
1388
1389 # find every node whose parents have been pruned
1389 # find every node whose parents have been pruned
1390 subset = []
1390 subset = []
1391 # find every remote head that will get new children
1391 # find every remote head that will get new children
1392 updated_heads = {}
1392 updated_heads = {}
1393 for n in remain:
1393 for n in remain:
1394 p1, p2 = self.changelog.parents(n)
1394 p1, p2 = self.changelog.parents(n)
1395 if p1 not in remain and p2 not in remain:
1395 if p1 not in remain and p2 not in remain:
1396 subset.append(n)
1396 subset.append(n)
1397 if heads:
1397 if heads:
1398 if p1 in heads:
1398 if p1 in heads:
1399 updated_heads[p1] = True
1399 updated_heads[p1] = True
1400 if p2 in heads:
1400 if p2 in heads:
1401 updated_heads[p2] = True
1401 updated_heads[p2] = True
1402
1402
1403 # this is the set of all roots we have to push
1403 # this is the set of all roots we have to push
1404 if heads:
1404 if heads:
1405 return subset, updated_heads.keys()
1405 return subset, updated_heads.keys()
1406 else:
1406 else:
1407 return subset
1407 return subset
1408
1408
1409 def pull(self, remote, heads=None, force=False):
1409 def pull(self, remote, heads=None, force=False):
1410 lock = self.lock()
1410 lock = self.lock()
1411 try:
1411 try:
1412 fetch = self.findincoming(remote, heads=heads, force=force)
1412 fetch = self.findincoming(remote, heads=heads, force=force)
1413 if fetch == [nullid]:
1413 if fetch == [nullid]:
1414 self.ui.status(_("requesting all changes\n"))
1414 self.ui.status(_("requesting all changes\n"))
1415
1415
1416 if not fetch:
1416 if not fetch:
1417 self.ui.status(_("no changes found\n"))
1417 self.ui.status(_("no changes found\n"))
1418 return 0
1418 return 0
1419
1419
1420 if heads is None:
1420 if heads is None:
1421 cg = remote.changegroup(fetch, 'pull')
1421 cg = remote.changegroup(fetch, 'pull')
1422 else:
1422 else:
1423 if 'changegroupsubset' not in remote.capabilities:
1423 if 'changegroupsubset' not in remote.capabilities:
1424 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1424 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1425 cg = remote.changegroupsubset(fetch, heads, 'pull')
1425 cg = remote.changegroupsubset(fetch, heads, 'pull')
1426 return self.addchangegroup(cg, 'pull', remote.url())
1426 return self.addchangegroup(cg, 'pull', remote.url())
1427 finally:
1427 finally:
1428 del lock
1428 del lock
1429
1429
1430 def push(self, remote, force=False, revs=None):
1430 def push(self, remote, force=False, revs=None):
1431 # there are two ways to push to remote repo:
1431 # there are two ways to push to remote repo:
1432 #
1432 #
1433 # addchangegroup assumes local user can lock remote
1433 # addchangegroup assumes local user can lock remote
1434 # repo (local filesystem, old ssh servers).
1434 # repo (local filesystem, old ssh servers).
1435 #
1435 #
1436 # unbundle assumes local user cannot lock remote repo (new ssh
1436 # unbundle assumes local user cannot lock remote repo (new ssh
1437 # servers, http servers).
1437 # servers, http servers).
1438
1438
1439 if remote.capable('unbundle'):
1439 if remote.capable('unbundle'):
1440 return self.push_unbundle(remote, force, revs)
1440 return self.push_unbundle(remote, force, revs)
1441 return self.push_addchangegroup(remote, force, revs)
1441 return self.push_addchangegroup(remote, force, revs)
1442
1442
1443 def prepush(self, remote, force, revs):
1443 def prepush(self, remote, force, revs):
1444 base = {}
1444 base = {}
1445 remote_heads = remote.heads()
1445 remote_heads = remote.heads()
1446 inc = self.findincoming(remote, base, remote_heads, force=force)
1446 inc = self.findincoming(remote, base, remote_heads, force=force)
1447
1447
1448 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1448 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1449 if revs is not None:
1449 if revs is not None:
1450 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1450 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1451 else:
1451 else:
1452 bases, heads = update, self.changelog.heads()
1452 bases, heads = update, self.changelog.heads()
1453
1453
1454 if not bases:
1454 if not bases:
1455 self.ui.status(_("no changes found\n"))
1455 self.ui.status(_("no changes found\n"))
1456 return None, 1
1456 return None, 1
1457 elif not force:
1457 elif not force:
1458 # check if we're creating new remote heads
1458 # check if we're creating new remote heads
1459 # to be a remote head after push, node must be either
1459 # to be a remote head after push, node must be either
1460 # - unknown locally
1460 # - unknown locally
1461 # - a local outgoing head descended from update
1461 # - a local outgoing head descended from update
1462 # - a remote head that's known locally and not
1462 # - a remote head that's known locally and not
1463 # ancestral to an outgoing head
1463 # ancestral to an outgoing head
1464
1464
1465 warn = 0
1465 warn = 0
1466
1466
1467 if remote_heads == [nullid]:
1467 if remote_heads == [nullid]:
1468 warn = 0
1468 warn = 0
1469 elif not revs and len(heads) > len(remote_heads):
1469 elif not revs and len(heads) > len(remote_heads):
1470 warn = 1
1470 warn = 1
1471 else:
1471 else:
1472 newheads = list(heads)
1472 newheads = list(heads)
1473 for r in remote_heads:
1473 for r in remote_heads:
1474 if r in self.changelog.nodemap:
1474 if r in self.changelog.nodemap:
1475 desc = self.changelog.heads(r, heads)
1475 desc = self.changelog.heads(r, heads)
1476 l = [h for h in heads if h in desc]
1476 l = [h for h in heads if h in desc]
1477 if not l:
1477 if not l:
1478 newheads.append(r)
1478 newheads.append(r)
1479 else:
1479 else:
1480 newheads.append(r)
1480 newheads.append(r)
1481 if len(newheads) > len(remote_heads):
1481 if len(newheads) > len(remote_heads):
1482 warn = 1
1482 warn = 1
1483
1483
1484 if warn:
1484 if warn:
1485 self.ui.warn(_("abort: push creates new remote heads!\n"))
1485 self.ui.warn(_("abort: push creates new remote heads!\n"))
1486 self.ui.status(_("(did you forget to merge?"
1486 self.ui.status(_("(did you forget to merge?"
1487 " use push -f to force)\n"))
1487 " use push -f to force)\n"))
1488 return None, 0
1488 return None, 0
1489 elif inc:
1489 elif inc:
1490 self.ui.warn(_("note: unsynced remote changes!\n"))
1490 self.ui.warn(_("note: unsynced remote changes!\n"))
1491
1491
1492
1492
1493 if revs is None:
1493 if revs is None:
1494 cg = self.changegroup(update, 'push')
1494 cg = self.changegroup(update, 'push')
1495 else:
1495 else:
1496 cg = self.changegroupsubset(update, revs, 'push')
1496 cg = self.changegroupsubset(update, revs, 'push')
1497 return cg, remote_heads
1497 return cg, remote_heads
1498
1498
1499 def push_addchangegroup(self, remote, force, revs):
1499 def push_addchangegroup(self, remote, force, revs):
1500 lock = remote.lock()
1500 lock = remote.lock()
1501 try:
1501 try:
1502 ret = self.prepush(remote, force, revs)
1502 ret = self.prepush(remote, force, revs)
1503 if ret[0] is not None:
1503 if ret[0] is not None:
1504 cg, remote_heads = ret
1504 cg, remote_heads = ret
1505 return remote.addchangegroup(cg, 'push', self.url())
1505 return remote.addchangegroup(cg, 'push', self.url())
1506 return ret[1]
1506 return ret[1]
1507 finally:
1507 finally:
1508 del lock
1508 del lock
1509
1509
1510 def push_unbundle(self, remote, force, revs):
1510 def push_unbundle(self, remote, force, revs):
1511 # local repo finds heads on server, finds out what revs it
1511 # local repo finds heads on server, finds out what revs it
1512 # must push. once revs transferred, if server finds it has
1512 # must push. once revs transferred, if server finds it has
1513 # different heads (someone else won commit/push race), server
1513 # different heads (someone else won commit/push race), server
1514 # aborts.
1514 # aborts.
1515
1515
1516 ret = self.prepush(remote, force, revs)
1516 ret = self.prepush(remote, force, revs)
1517 if ret[0] is not None:
1517 if ret[0] is not None:
1518 cg, remote_heads = ret
1518 cg, remote_heads = ret
1519 if force: remote_heads = ['force']
1519 if force: remote_heads = ['force']
1520 return remote.unbundle(cg, remote_heads, 'push')
1520 return remote.unbundle(cg, remote_heads, 'push')
1521 return ret[1]
1521 return ret[1]
1522
1522
1523 def changegroupinfo(self, nodes, source):
1523 def changegroupinfo(self, nodes, source):
1524 if self.ui.verbose or source == 'bundle':
1524 if self.ui.verbose or source == 'bundle':
1525 self.ui.status(_("%d changesets found\n") % len(nodes))
1525 self.ui.status(_("%d changesets found\n") % len(nodes))
1526 if self.ui.debugflag:
1526 if self.ui.debugflag:
1527 self.ui.debug(_("List of changesets:\n"))
1527 self.ui.debug(_("List of changesets:\n"))
1528 for node in nodes:
1528 for node in nodes:
1529 self.ui.debug("%s\n" % hex(node))
1529 self.ui.debug("%s\n" % hex(node))
1530
1530
1531 def changegroupsubset(self, bases, heads, source, extranodes=None):
1531 def changegroupsubset(self, bases, heads, source, extranodes=None):
1532 """This function generates a changegroup consisting of all the nodes
1532 """This function generates a changegroup consisting of all the nodes
1533 that are descendents of any of the bases, and ancestors of any of
1533 that are descendents of any of the bases, and ancestors of any of
1534 the heads.
1534 the heads.
1535
1535
1536 It is fairly complex as determining which filenodes and which
1536 It is fairly complex as determining which filenodes and which
1537 manifest nodes need to be included for the changeset to be complete
1537 manifest nodes need to be included for the changeset to be complete
1538 is non-trivial.
1538 is non-trivial.
1539
1539
1540 Another wrinkle is doing the reverse, figuring out which changeset in
1540 Another wrinkle is doing the reverse, figuring out which changeset in
1541 the changegroup a particular filenode or manifestnode belongs to.
1541 the changegroup a particular filenode or manifestnode belongs to.
1542
1542
1543 The caller can specify some nodes that must be included in the
1543 The caller can specify some nodes that must be included in the
1544 changegroup using the extranodes argument. It should be a dict
1544 changegroup using the extranodes argument. It should be a dict
1545 where the keys are the filenames (or 1 for the manifest), and the
1545 where the keys are the filenames (or 1 for the manifest), and the
1546 values are lists of (node, linknode) tuples, where node is a wanted
1546 values are lists of (node, linknode) tuples, where node is a wanted
1547 node and linknode is the changelog node that should be transmitted as
1547 node and linknode is the changelog node that should be transmitted as
1548 the linkrev.
1548 the linkrev.
1549 """
1549 """
1550
1550
1551 self.hook('preoutgoing', throw=True, source=source)
1551 self.hook('preoutgoing', throw=True, source=source)
1552
1552
1553 # Set up some initial variables
1553 # Set up some initial variables
1554 # Make it easy to refer to self.changelog
1554 # Make it easy to refer to self.changelog
1555 cl = self.changelog
1555 cl = self.changelog
1556 # msng is short for missing - compute the list of changesets in this
1556 # msng is short for missing - compute the list of changesets in this
1557 # changegroup.
1557 # changegroup.
1558 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1558 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1559 self.changegroupinfo(msng_cl_lst, source)
1559 self.changegroupinfo(msng_cl_lst, source)
1560 # Some bases may turn out to be superfluous, and some heads may be
1560 # Some bases may turn out to be superfluous, and some heads may be
1561 # too. nodesbetween will return the minimal set of bases and heads
1561 # too. nodesbetween will return the minimal set of bases and heads
1562 # necessary to re-create the changegroup.
1562 # necessary to re-create the changegroup.
1563
1563
1564 # Known heads are the list of heads that it is assumed the recipient
1564 # Known heads are the list of heads that it is assumed the recipient
1565 # of this changegroup will know about.
1565 # of this changegroup will know about.
1566 knownheads = {}
1566 knownheads = {}
1567 # We assume that all parents of bases are known heads.
1567 # We assume that all parents of bases are known heads.
1568 for n in bases:
1568 for n in bases:
1569 for p in cl.parents(n):
1569 for p in cl.parents(n):
1570 if p != nullid:
1570 if p != nullid:
1571 knownheads[p] = 1
1571 knownheads[p] = 1
1572 knownheads = knownheads.keys()
1572 knownheads = knownheads.keys()
1573 if knownheads:
1573 if knownheads:
1574 # Now that we know what heads are known, we can compute which
1574 # Now that we know what heads are known, we can compute which
1575 # changesets are known. The recipient must know about all
1575 # changesets are known. The recipient must know about all
1576 # changesets required to reach the known heads from the null
1576 # changesets required to reach the known heads from the null
1577 # changeset.
1577 # changeset.
1578 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1578 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1579 junk = None
1579 junk = None
1580 # Transform the list into an ersatz set.
1580 # Transform the list into an ersatz set.
1581 has_cl_set = dict.fromkeys(has_cl_set)
1581 has_cl_set = dict.fromkeys(has_cl_set)
1582 else:
1582 else:
1583 # If there were no known heads, the recipient cannot be assumed to
1583 # If there were no known heads, the recipient cannot be assumed to
1584 # know about any changesets.
1584 # know about any changesets.
1585 has_cl_set = {}
1585 has_cl_set = {}
1586
1586
1587 # Make it easy to refer to self.manifest
1587 # Make it easy to refer to self.manifest
1588 mnfst = self.manifest
1588 mnfst = self.manifest
1589 # We don't know which manifests are missing yet
1589 # We don't know which manifests are missing yet
1590 msng_mnfst_set = {}
1590 msng_mnfst_set = {}
1591 # Nor do we know which filenodes are missing.
1591 # Nor do we know which filenodes are missing.
1592 msng_filenode_set = {}
1592 msng_filenode_set = {}
1593
1593
1594 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1594 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1595 junk = None
1595 junk = None
1596
1596
1597 # A changeset always belongs to itself, so the changenode lookup
1597 # A changeset always belongs to itself, so the changenode lookup
1598 # function for a changenode is identity.
1598 # function for a changenode is identity.
1599 def identity(x):
1599 def identity(x):
1600 return x
1600 return x
1601
1601
1602 # A function generating function. Sets up an environment for the
1602 # A function generating function. Sets up an environment for the
1603 # inner function.
1603 # inner function.
1604 def cmp_by_rev_func(revlog):
1604 def cmp_by_rev_func(revlog):
1605 # Compare two nodes by their revision number in the environment's
1605 # Compare two nodes by their revision number in the environment's
1606 # revision history. Since the revision number both represents the
1606 # revision history. Since the revision number both represents the
1607 # most efficient order to read the nodes in, and represents a
1607 # most efficient order to read the nodes in, and represents a
1608 # topological sorting of the nodes, this function is often useful.
1608 # topological sorting of the nodes, this function is often useful.
1609 def cmp_by_rev(a, b):
1609 def cmp_by_rev(a, b):
1610 return cmp(revlog.rev(a), revlog.rev(b))
1610 return cmp(revlog.rev(a), revlog.rev(b))
1611 return cmp_by_rev
1611 return cmp_by_rev
1612
1612
1613 # If we determine that a particular file or manifest node must be a
1613 # If we determine that a particular file or manifest node must be a
1614 # node that the recipient of the changegroup will already have, we can
1614 # node that the recipient of the changegroup will already have, we can
1615 # also assume the recipient will have all the parents. This function
1615 # also assume the recipient will have all the parents. This function
1616 # prunes them from the set of missing nodes.
1616 # prunes them from the set of missing nodes.
1617 def prune_parents(revlog, hasset, msngset):
1617 def prune_parents(revlog, hasset, msngset):
1618 haslst = hasset.keys()
1618 haslst = hasset.keys()
1619 haslst.sort(cmp_by_rev_func(revlog))
1619 haslst.sort(cmp_by_rev_func(revlog))
1620 for node in haslst:
1620 for node in haslst:
1621 parentlst = [p for p in revlog.parents(node) if p != nullid]
1621 parentlst = [p for p in revlog.parents(node) if p != nullid]
1622 while parentlst:
1622 while parentlst:
1623 n = parentlst.pop()
1623 n = parentlst.pop()
1624 if n not in hasset:
1624 if n not in hasset:
1625 hasset[n] = 1
1625 hasset[n] = 1
1626 p = [p for p in revlog.parents(n) if p != nullid]
1626 p = [p for p in revlog.parents(n) if p != nullid]
1627 parentlst.extend(p)
1627 parentlst.extend(p)
1628 for n in hasset:
1628 for n in hasset:
1629 msngset.pop(n, None)
1629 msngset.pop(n, None)
1630
1630
1631 # This is a function generating function used to set up an environment
1631 # This is a function generating function used to set up an environment
1632 # for the inner function to execute in.
1632 # for the inner function to execute in.
1633 def manifest_and_file_collector(changedfileset):
1633 def manifest_and_file_collector(changedfileset):
1634 # This is an information gathering function that gathers
1634 # This is an information gathering function that gathers
1635 # information from each changeset node that goes out as part of
1635 # information from each changeset node that goes out as part of
1636 # the changegroup. The information gathered is a list of which
1636 # the changegroup. The information gathered is a list of which
1637 # manifest nodes are potentially required (the recipient may
1637 # manifest nodes are potentially required (the recipient may
1638 # already have them) and total list of all files which were
1638 # already have them) and total list of all files which were
1639 # changed in any changeset in the changegroup.
1639 # changed in any changeset in the changegroup.
1640 #
1640 #
1641 # We also remember the first changenode we saw any manifest
1641 # We also remember the first changenode we saw any manifest
1642 # referenced by so we can later determine which changenode 'owns'
1642 # referenced by so we can later determine which changenode 'owns'
1643 # the manifest.
1643 # the manifest.
1644 def collect_manifests_and_files(clnode):
1644 def collect_manifests_and_files(clnode):
1645 c = cl.read(clnode)
1645 c = cl.read(clnode)
1646 for f in c[3]:
1646 for f in c[3]:
1647 # This is to make sure we only have one instance of each
1647 # This is to make sure we only have one instance of each
1648 # filename string for each filename.
1648 # filename string for each filename.
1649 changedfileset.setdefault(f, f)
1649 changedfileset.setdefault(f, f)
1650 msng_mnfst_set.setdefault(c[0], clnode)
1650 msng_mnfst_set.setdefault(c[0], clnode)
1651 return collect_manifests_and_files
1651 return collect_manifests_and_files
1652
1652
1653 # Figure out which manifest nodes (of the ones we think might be part
1653 # Figure out which manifest nodes (of the ones we think might be part
1654 # of the changegroup) the recipient must know about and remove them
1654 # of the changegroup) the recipient must know about and remove them
1655 # from the changegroup.
1655 # from the changegroup.
1656 def prune_manifests():
1656 def prune_manifests():
1657 has_mnfst_set = {}
1657 has_mnfst_set = {}
1658 for n in msng_mnfst_set:
1658 for n in msng_mnfst_set:
1659 # If a 'missing' manifest thinks it belongs to a changenode
1659 # If a 'missing' manifest thinks it belongs to a changenode
1660 # the recipient is assumed to have, obviously the recipient
1660 # the recipient is assumed to have, obviously the recipient
1661 # must have that manifest.
1661 # must have that manifest.
1662 linknode = cl.node(mnfst.linkrev(n))
1662 linknode = cl.node(mnfst.linkrev(n))
1663 if linknode in has_cl_set:
1663 if linknode in has_cl_set:
1664 has_mnfst_set[n] = 1
1664 has_mnfst_set[n] = 1
1665 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1665 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1666
1666
1667 # Use the information collected in collect_manifests_and_files to say
1667 # Use the information collected in collect_manifests_and_files to say
1668 # which changenode any manifestnode belongs to.
1668 # which changenode any manifestnode belongs to.
1669 def lookup_manifest_link(mnfstnode):
1669 def lookup_manifest_link(mnfstnode):
1670 return msng_mnfst_set[mnfstnode]
1670 return msng_mnfst_set[mnfstnode]
1671
1671
1672 # A function generating function that sets up the initial environment
1672 # A function generating function that sets up the initial environment
1673 # the inner function.
1673 # the inner function.
1674 def filenode_collector(changedfiles):
1674 def filenode_collector(changedfiles):
1675 next_rev = [0]
1675 next_rev = [0]
1676 # This gathers information from each manifestnode included in the
1676 # This gathers information from each manifestnode included in the
1677 # changegroup about which filenodes the manifest node references
1677 # changegroup about which filenodes the manifest node references
1678 # so we can include those in the changegroup too.
1678 # so we can include those in the changegroup too.
1679 #
1679 #
1680 # It also remembers which changenode each filenode belongs to. It
1680 # It also remembers which changenode each filenode belongs to. It
1681 # does this by assuming the a filenode belongs to the changenode
1681 # does this by assuming the a filenode belongs to the changenode
1682 # the first manifest that references it belongs to.
1682 # the first manifest that references it belongs to.
1683 def collect_msng_filenodes(mnfstnode):
1683 def collect_msng_filenodes(mnfstnode):
1684 r = mnfst.rev(mnfstnode)
1684 r = mnfst.rev(mnfstnode)
1685 if r == next_rev[0]:
1685 if r == next_rev[0]:
1686 # If the last rev we looked at was the one just previous,
1686 # If the last rev we looked at was the one just previous,
1687 # we only need to see a diff.
1687 # we only need to see a diff.
1688 deltamf = mnfst.readdelta(mnfstnode)
1688 deltamf = mnfst.readdelta(mnfstnode)
1689 # For each line in the delta
1689 # For each line in the delta
1690 for f, fnode in deltamf.items():
1690 for f, fnode in deltamf.items():
1691 f = changedfiles.get(f, None)
1691 f = changedfiles.get(f, None)
1692 # And if the file is in the list of files we care
1692 # And if the file is in the list of files we care
1693 # about.
1693 # about.
1694 if f is not None:
1694 if f is not None:
1695 # Get the changenode this manifest belongs to
1695 # Get the changenode this manifest belongs to
1696 clnode = msng_mnfst_set[mnfstnode]
1696 clnode = msng_mnfst_set[mnfstnode]
1697 # Create the set of filenodes for the file if
1697 # Create the set of filenodes for the file if
1698 # there isn't one already.
1698 # there isn't one already.
1699 ndset = msng_filenode_set.setdefault(f, {})
1699 ndset = msng_filenode_set.setdefault(f, {})
1700 # And set the filenode's changelog node to the
1700 # And set the filenode's changelog node to the
1701 # manifest's if it hasn't been set already.
1701 # manifest's if it hasn't been set already.
1702 ndset.setdefault(fnode, clnode)
1702 ndset.setdefault(fnode, clnode)
1703 else:
1703 else:
1704 # Otherwise we need a full manifest.
1704 # Otherwise we need a full manifest.
1705 m = mnfst.read(mnfstnode)
1705 m = mnfst.read(mnfstnode)
1706 # For every file in we care about.
1706 # For every file in we care about.
1707 for f in changedfiles:
1707 for f in changedfiles:
1708 fnode = m.get(f, None)
1708 fnode = m.get(f, None)
1709 # If it's in the manifest
1709 # If it's in the manifest
1710 if fnode is not None:
1710 if fnode is not None:
1711 # See comments above.
1711 # See comments above.
1712 clnode = msng_mnfst_set[mnfstnode]
1712 clnode = msng_mnfst_set[mnfstnode]
1713 ndset = msng_filenode_set.setdefault(f, {})
1713 ndset = msng_filenode_set.setdefault(f, {})
1714 ndset.setdefault(fnode, clnode)
1714 ndset.setdefault(fnode, clnode)
1715 # Remember the revision we hope to see next.
1715 # Remember the revision we hope to see next.
1716 next_rev[0] = r + 1
1716 next_rev[0] = r + 1
1717 return collect_msng_filenodes
1717 return collect_msng_filenodes
1718
1718
1719 # We have a list of filenodes we think we need for a file, lets remove
1719 # We have a list of filenodes we think we need for a file, lets remove
1720 # all those we now the recipient must have.
1720 # all those we now the recipient must have.
1721 def prune_filenodes(f, filerevlog):
1721 def prune_filenodes(f, filerevlog):
1722 msngset = msng_filenode_set[f]
1722 msngset = msng_filenode_set[f]
1723 hasset = {}
1723 hasset = {}
1724 # If a 'missing' filenode thinks it belongs to a changenode we
1724 # If a 'missing' filenode thinks it belongs to a changenode we
1725 # assume the recipient must have, then the recipient must have
1725 # assume the recipient must have, then the recipient must have
1726 # that filenode.
1726 # that filenode.
1727 for n in msngset:
1727 for n in msngset:
1728 clnode = cl.node(filerevlog.linkrev(n))
1728 clnode = cl.node(filerevlog.linkrev(n))
1729 if clnode in has_cl_set:
1729 if clnode in has_cl_set:
1730 hasset[n] = 1
1730 hasset[n] = 1
1731 prune_parents(filerevlog, hasset, msngset)
1731 prune_parents(filerevlog, hasset, msngset)
1732
1732
1733 # A function generator function that sets up the a context for the
1733 # A function generator function that sets up the a context for the
1734 # inner function.
1734 # inner function.
1735 def lookup_filenode_link_func(fname):
1735 def lookup_filenode_link_func(fname):
1736 msngset = msng_filenode_set[fname]
1736 msngset = msng_filenode_set[fname]
1737 # Lookup the changenode the filenode belongs to.
1737 # Lookup the changenode the filenode belongs to.
1738 def lookup_filenode_link(fnode):
1738 def lookup_filenode_link(fnode):
1739 return msngset[fnode]
1739 return msngset[fnode]
1740 return lookup_filenode_link
1740 return lookup_filenode_link
1741
1741
1742 # Add the nodes that were explicitly requested.
1742 # Add the nodes that were explicitly requested.
1743 def add_extra_nodes(name, nodes):
1743 def add_extra_nodes(name, nodes):
1744 if not extranodes or name not in extranodes:
1744 if not extranodes or name not in extranodes:
1745 return
1745 return
1746
1746
1747 for node, linknode in extranodes[name]:
1747 for node, linknode in extranodes[name]:
1748 if node not in nodes:
1748 if node not in nodes:
1749 nodes[node] = linknode
1749 nodes[node] = linknode
1750
1750
1751 # Now that we have all theses utility functions to help out and
1751 # Now that we have all theses utility functions to help out and
1752 # logically divide up the task, generate the group.
1752 # logically divide up the task, generate the group.
1753 def gengroup():
1753 def gengroup():
1754 # The set of changed files starts empty.
1754 # The set of changed files starts empty.
1755 changedfiles = {}
1755 changedfiles = {}
1756 # Create a changenode group generator that will call our functions
1756 # Create a changenode group generator that will call our functions
1757 # back to lookup the owning changenode and collect information.
1757 # back to lookup the owning changenode and collect information.
1758 group = cl.group(msng_cl_lst, identity,
1758 group = cl.group(msng_cl_lst, identity,
1759 manifest_and_file_collector(changedfiles))
1759 manifest_and_file_collector(changedfiles))
1760 for chnk in group:
1760 for chnk in group:
1761 yield chnk
1761 yield chnk
1762
1762
1763 # The list of manifests has been collected by the generator
1763 # The list of manifests has been collected by the generator
1764 # calling our functions back.
1764 # calling our functions back.
1765 prune_manifests()
1765 prune_manifests()
1766 add_extra_nodes(1, msng_mnfst_set)
1766 add_extra_nodes(1, msng_mnfst_set)
1767 msng_mnfst_lst = msng_mnfst_set.keys()
1767 msng_mnfst_lst = msng_mnfst_set.keys()
1768 # Sort the manifestnodes by revision number.
1768 # Sort the manifestnodes by revision number.
1769 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1769 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1770 # Create a generator for the manifestnodes that calls our lookup
1770 # Create a generator for the manifestnodes that calls our lookup
1771 # and data collection functions back.
1771 # and data collection functions back.
1772 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1772 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1773 filenode_collector(changedfiles))
1773 filenode_collector(changedfiles))
1774 for chnk in group:
1774 for chnk in group:
1775 yield chnk
1775 yield chnk
1776
1776
1777 # These are no longer needed, dereference and toss the memory for
1777 # These are no longer needed, dereference and toss the memory for
1778 # them.
1778 # them.
1779 msng_mnfst_lst = None
1779 msng_mnfst_lst = None
1780 msng_mnfst_set.clear()
1780 msng_mnfst_set.clear()
1781
1781
1782 if extranodes:
1782 if extranodes:
1783 for fname in extranodes:
1783 for fname in extranodes:
1784 if isinstance(fname, int):
1784 if isinstance(fname, int):
1785 continue
1785 continue
1786 add_extra_nodes(fname,
1786 add_extra_nodes(fname,
1787 msng_filenode_set.setdefault(fname, {}))
1787 msng_filenode_set.setdefault(fname, {}))
1788 changedfiles[fname] = 1
1788 changedfiles[fname] = 1
1789 # Go through all our files in order sorted by name.
1789 # Go through all our files in order sorted by name.
1790 for fname in util.sort(changedfiles):
1790 for fname in util.sort(changedfiles):
1791 filerevlog = self.file(fname)
1791 filerevlog = self.file(fname)
1792 if not len(filerevlog):
1792 if not len(filerevlog):
1793 raise util.Abort(_("empty or missing revlog for %s") % fname)
1793 raise util.Abort(_("empty or missing revlog for %s") % fname)
1794 # Toss out the filenodes that the recipient isn't really
1794 # Toss out the filenodes that the recipient isn't really
1795 # missing.
1795 # missing.
1796 if fname in msng_filenode_set:
1796 if fname in msng_filenode_set:
1797 prune_filenodes(fname, filerevlog)
1797 prune_filenodes(fname, filerevlog)
1798 msng_filenode_lst = msng_filenode_set[fname].keys()
1798 msng_filenode_lst = msng_filenode_set[fname].keys()
1799 else:
1799 else:
1800 msng_filenode_lst = []
1800 msng_filenode_lst = []
1801 # If any filenodes are left, generate the group for them,
1801 # If any filenodes are left, generate the group for them,
1802 # otherwise don't bother.
1802 # otherwise don't bother.
1803 if len(msng_filenode_lst) > 0:
1803 if len(msng_filenode_lst) > 0:
1804 yield changegroup.chunkheader(len(fname))
1804 yield changegroup.chunkheader(len(fname))
1805 yield fname
1805 yield fname
1806 # Sort the filenodes by their revision #
1806 # Sort the filenodes by their revision #
1807 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1807 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1808 # Create a group generator and only pass in a changenode
1808 # Create a group generator and only pass in a changenode
1809 # lookup function as we need to collect no information
1809 # lookup function as we need to collect no information
1810 # from filenodes.
1810 # from filenodes.
1811 group = filerevlog.group(msng_filenode_lst,
1811 group = filerevlog.group(msng_filenode_lst,
1812 lookup_filenode_link_func(fname))
1812 lookup_filenode_link_func(fname))
1813 for chnk in group:
1813 for chnk in group:
1814 yield chnk
1814 yield chnk
1815 if fname in msng_filenode_set:
1815 if fname in msng_filenode_set:
1816 # Don't need this anymore, toss it to free memory.
1816 # Don't need this anymore, toss it to free memory.
1817 del msng_filenode_set[fname]
1817 del msng_filenode_set[fname]
1818 # Signal that no more groups are left.
1818 # Signal that no more groups are left.
1819 yield changegroup.closechunk()
1819 yield changegroup.closechunk()
1820
1820
1821 if msng_cl_lst:
1821 if msng_cl_lst:
1822 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1822 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1823
1823
1824 return util.chunkbuffer(gengroup())
1824 return util.chunkbuffer(gengroup())
1825
1825
1826 def changegroup(self, basenodes, source):
1826 def changegroup(self, basenodes, source):
1827 """Generate a changegroup of all nodes that we have that a recipient
1827 """Generate a changegroup of all nodes that we have that a recipient
1828 doesn't.
1828 doesn't.
1829
1829
1830 This is much easier than the previous function as we can assume that
1830 This is much easier than the previous function as we can assume that
1831 the recipient has any changenode we aren't sending them."""
1831 the recipient has any changenode we aren't sending them."""
1832
1832
1833 self.hook('preoutgoing', throw=True, source=source)
1833 self.hook('preoutgoing', throw=True, source=source)
1834
1834
1835 cl = self.changelog
1835 cl = self.changelog
1836 nodes = cl.nodesbetween(basenodes, None)[0]
1836 nodes = cl.nodesbetween(basenodes, None)[0]
1837 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1837 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1838 self.changegroupinfo(nodes, source)
1838 self.changegroupinfo(nodes, source)
1839
1839
1840 def identity(x):
1840 def identity(x):
1841 return x
1841 return x
1842
1842
1843 def gennodelst(log):
1843 def gennodelst(log):
1844 for r in log:
1844 for r in log:
1845 n = log.node(r)
1845 n = log.node(r)
1846 if log.linkrev(n) in revset:
1846 if log.linkrev(n) in revset:
1847 yield n
1847 yield n
1848
1848
1849 def changed_file_collector(changedfileset):
1849 def changed_file_collector(changedfileset):
1850 def collect_changed_files(clnode):
1850 def collect_changed_files(clnode):
1851 c = cl.read(clnode)
1851 c = cl.read(clnode)
1852 for fname in c[3]:
1852 for fname in c[3]:
1853 changedfileset[fname] = 1
1853 changedfileset[fname] = 1
1854 return collect_changed_files
1854 return collect_changed_files
1855
1855
1856 def lookuprevlink_func(revlog):
1856 def lookuprevlink_func(revlog):
1857 def lookuprevlink(n):
1857 def lookuprevlink(n):
1858 return cl.node(revlog.linkrev(n))
1858 return cl.node(revlog.linkrev(n))
1859 return lookuprevlink
1859 return lookuprevlink
1860
1860
1861 def gengroup():
1861 def gengroup():
1862 # construct a list of all changed files
1862 # construct a list of all changed files
1863 changedfiles = {}
1863 changedfiles = {}
1864
1864
1865 for chnk in cl.group(nodes, identity,
1865 for chnk in cl.group(nodes, identity,
1866 changed_file_collector(changedfiles)):
1866 changed_file_collector(changedfiles)):
1867 yield chnk
1867 yield chnk
1868
1868
1869 mnfst = self.manifest
1869 mnfst = self.manifest
1870 nodeiter = gennodelst(mnfst)
1870 nodeiter = gennodelst(mnfst)
1871 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1871 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1872 yield chnk
1872 yield chnk
1873
1873
1874 for fname in util.sort(changedfiles):
1874 for fname in util.sort(changedfiles):
1875 filerevlog = self.file(fname)
1875 filerevlog = self.file(fname)
1876 if not len(filerevlog):
1876 if not len(filerevlog):
1877 raise util.Abort(_("empty or missing revlog for %s") % fname)
1877 raise util.Abort(_("empty or missing revlog for %s") % fname)
1878 nodeiter = gennodelst(filerevlog)
1878 nodeiter = gennodelst(filerevlog)
1879 nodeiter = list(nodeiter)
1879 nodeiter = list(nodeiter)
1880 if nodeiter:
1880 if nodeiter:
1881 yield changegroup.chunkheader(len(fname))
1881 yield changegroup.chunkheader(len(fname))
1882 yield fname
1882 yield fname
1883 lookup = lookuprevlink_func(filerevlog)
1883 lookup = lookuprevlink_func(filerevlog)
1884 for chnk in filerevlog.group(nodeiter, lookup):
1884 for chnk in filerevlog.group(nodeiter, lookup):
1885 yield chnk
1885 yield chnk
1886
1886
1887 yield changegroup.closechunk()
1887 yield changegroup.closechunk()
1888
1888
1889 if nodes:
1889 if nodes:
1890 self.hook('outgoing', node=hex(nodes[0]), source=source)
1890 self.hook('outgoing', node=hex(nodes[0]), source=source)
1891
1891
1892 return util.chunkbuffer(gengroup())
1892 return util.chunkbuffer(gengroup())
1893
1893
1894 def addchangegroup(self, source, srctype, url, emptyok=False):
1894 def addchangegroup(self, source, srctype, url, emptyok=False):
1895 """add changegroup to repo.
1895 """add changegroup to repo.
1896
1896
1897 return values:
1897 return values:
1898 - nothing changed or no source: 0
1898 - nothing changed or no source: 0
1899 - more heads than before: 1+added heads (2..n)
1899 - more heads than before: 1+added heads (2..n)
1900 - less heads than before: -1-removed heads (-2..-n)
1900 - less heads than before: -1-removed heads (-2..-n)
1901 - number of heads stays the same: 1
1901 - number of heads stays the same: 1
1902 """
1902 """
1903 def csmap(x):
1903 def csmap(x):
1904 self.ui.debug(_("add changeset %s\n") % short(x))
1904 self.ui.debug(_("add changeset %s\n") % short(x))
1905 return len(cl)
1905 return len(cl)
1906
1906
1907 def revmap(x):
1907 def revmap(x):
1908 return cl.rev(x)
1908 return cl.rev(x)
1909
1909
1910 if not source:
1910 if not source:
1911 return 0
1911 return 0
1912
1912
1913 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1913 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1914
1914
1915 changesets = files = revisions = 0
1915 changesets = files = revisions = 0
1916
1916
1917 # write changelog data to temp files so concurrent readers will not see
1917 # write changelog data to temp files so concurrent readers will not see
1918 # inconsistent view
1918 # inconsistent view
1919 cl = self.changelog
1919 cl = self.changelog
1920 cl.delayupdate()
1920 cl.delayupdate()
1921 oldheads = len(cl.heads())
1921 oldheads = len(cl.heads())
1922
1922
1923 tr = self.transaction()
1923 tr = self.transaction()
1924 try:
1924 try:
1925 trp = weakref.proxy(tr)
1925 trp = weakref.proxy(tr)
1926 # pull off the changeset group
1926 # pull off the changeset group
1927 self.ui.status(_("adding changesets\n"))
1927 self.ui.status(_("adding changesets\n"))
1928 cor = len(cl) - 1
1928 cor = len(cl) - 1
1929 chunkiter = changegroup.chunkiter(source)
1929 chunkiter = changegroup.chunkiter(source)
1930 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1930 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1931 raise util.Abort(_("received changelog group is empty"))
1931 raise util.Abort(_("received changelog group is empty"))
1932 cnr = len(cl) - 1
1932 cnr = len(cl) - 1
1933 changesets = cnr - cor
1933 changesets = cnr - cor
1934
1934
1935 # pull off the manifest group
1935 # pull off the manifest group
1936 self.ui.status(_("adding manifests\n"))
1936 self.ui.status(_("adding manifests\n"))
1937 chunkiter = changegroup.chunkiter(source)
1937 chunkiter = changegroup.chunkiter(source)
1938 # no need to check for empty manifest group here:
1938 # no need to check for empty manifest group here:
1939 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1939 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1940 # no new manifest will be created and the manifest group will
1940 # no new manifest will be created and the manifest group will
1941 # be empty during the pull
1941 # be empty during the pull
1942 self.manifest.addgroup(chunkiter, revmap, trp)
1942 self.manifest.addgroup(chunkiter, revmap, trp)
1943
1943
1944 # process the files
1944 # process the files
1945 self.ui.status(_("adding file changes\n"))
1945 self.ui.status(_("adding file changes\n"))
1946 while 1:
1946 while 1:
1947 f = changegroup.getchunk(source)
1947 f = changegroup.getchunk(source)
1948 if not f:
1948 if not f:
1949 break
1949 break
1950 self.ui.debug(_("adding %s revisions\n") % f)
1950 self.ui.debug(_("adding %s revisions\n") % f)
1951 fl = self.file(f)
1951 fl = self.file(f)
1952 o = len(fl)
1952 o = len(fl)
1953 chunkiter = changegroup.chunkiter(source)
1953 chunkiter = changegroup.chunkiter(source)
1954 if fl.addgroup(chunkiter, revmap, trp) is None:
1954 if fl.addgroup(chunkiter, revmap, trp) is None:
1955 raise util.Abort(_("received file revlog group is empty"))
1955 raise util.Abort(_("received file revlog group is empty"))
1956 revisions += len(fl) - o
1956 revisions += len(fl) - o
1957 files += 1
1957 files += 1
1958
1958
1959 # make changelog see real files again
1959 # make changelog see real files again
1960 cl.finalize(trp)
1960 cl.finalize(trp)
1961
1961
1962 newheads = len(self.changelog.heads())
1962 newheads = len(self.changelog.heads())
1963 heads = ""
1963 heads = ""
1964 if oldheads and newheads != oldheads:
1964 if oldheads and newheads != oldheads:
1965 heads = _(" (%+d heads)") % (newheads - oldheads)
1965 heads = _(" (%+d heads)") % (newheads - oldheads)
1966
1966
1967 self.ui.status(_("added %d changesets"
1967 self.ui.status(_("added %d changesets"
1968 " with %d changes to %d files%s\n")
1968 " with %d changes to %d files%s\n")
1969 % (changesets, revisions, files, heads))
1969 % (changesets, revisions, files, heads))
1970
1970
1971 if changesets > 0:
1971 if changesets > 0:
1972 self.hook('pretxnchangegroup', throw=True,
1972 self.hook('pretxnchangegroup', throw=True,
1973 node=hex(self.changelog.node(cor+1)), source=srctype,
1973 node=hex(self.changelog.node(cor+1)), source=srctype,
1974 url=url)
1974 url=url)
1975
1975
1976 tr.close()
1976 tr.close()
1977 finally:
1977 finally:
1978 del tr
1978 del tr
1979
1979
1980 if changesets > 0:
1980 if changesets > 0:
1981 # forcefully update the on-disk branch cache
1981 # forcefully update the on-disk branch cache
1982 self.ui.debug(_("updating the branch cache\n"))
1982 self.ui.debug(_("updating the branch cache\n"))
1983 self.branchtags()
1983 self.branchtags()
1984 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1984 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1985 source=srctype, url=url)
1985 source=srctype, url=url)
1986
1986
1987 for i in xrange(cor + 1, cnr + 1):
1987 for i in xrange(cor + 1, cnr + 1):
1988 self.hook("incoming", node=hex(self.changelog.node(i)),
1988 self.hook("incoming", node=hex(self.changelog.node(i)),
1989 source=srctype, url=url)
1989 source=srctype, url=url)
1990
1990
1991 # never return 0 here:
1991 # never return 0 here:
1992 if newheads < oldheads:
1992 if newheads < oldheads:
1993 return newheads - oldheads - 1
1993 return newheads - oldheads - 1
1994 else:
1994 else:
1995 return newheads - oldheads + 1
1995 return newheads - oldheads + 1
1996
1996
1997
1997
1998 def stream_in(self, remote):
1998 def stream_in(self, remote):
1999 fp = remote.stream_out()
1999 fp = remote.stream_out()
2000 l = fp.readline()
2000 l = fp.readline()
2001 try:
2001 try:
2002 resp = int(l)
2002 resp = int(l)
2003 except ValueError:
2003 except ValueError:
2004 raise util.UnexpectedOutput(
2004 raise util.UnexpectedOutput(
2005 _('Unexpected response from remote server:'), l)
2005 _('Unexpected response from remote server:'), l)
2006 if resp == 1:
2006 if resp == 1:
2007 raise util.Abort(_('operation forbidden by server'))
2007 raise util.Abort(_('operation forbidden by server'))
2008 elif resp == 2:
2008 elif resp == 2:
2009 raise util.Abort(_('locking the remote repository failed'))
2009 raise util.Abort(_('locking the remote repository failed'))
2010 elif resp != 0:
2010 elif resp != 0:
2011 raise util.Abort(_('the server sent an unknown error code'))
2011 raise util.Abort(_('the server sent an unknown error code'))
2012 self.ui.status(_('streaming all changes\n'))
2012 self.ui.status(_('streaming all changes\n'))
2013 l = fp.readline()
2013 l = fp.readline()
2014 try:
2014 try:
2015 total_files, total_bytes = map(int, l.split(' ', 1))
2015 total_files, total_bytes = map(int, l.split(' ', 1))
2016 except (ValueError, TypeError):
2016 except (ValueError, TypeError):
2017 raise util.UnexpectedOutput(
2017 raise util.UnexpectedOutput(
2018 _('Unexpected response from remote server:'), l)
2018 _('Unexpected response from remote server:'), l)
2019 self.ui.status(_('%d files to transfer, %s of data\n') %
2019 self.ui.status(_('%d files to transfer, %s of data\n') %
2020 (total_files, util.bytecount(total_bytes)))
2020 (total_files, util.bytecount(total_bytes)))
2021 start = time.time()
2021 start = time.time()
2022 for i in xrange(total_files):
2022 for i in xrange(total_files):
2023 # XXX doesn't support '\n' or '\r' in filenames
2023 # XXX doesn't support '\n' or '\r' in filenames
2024 l = fp.readline()
2024 l = fp.readline()
2025 try:
2025 try:
2026 name, size = l.split('\0', 1)
2026 name, size = l.split('\0', 1)
2027 size = int(size)
2027 size = int(size)
2028 except ValueError, TypeError:
2028 except ValueError, TypeError:
2029 raise util.UnexpectedOutput(
2029 raise util.UnexpectedOutput(
2030 _('Unexpected response from remote server:'), l)
2030 _('Unexpected response from remote server:'), l)
2031 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2031 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2032 ofp = self.sopener(name, 'w')
2032 ofp = self.sopener(name, 'w')
2033 for chunk in util.filechunkiter(fp, limit=size):
2033 for chunk in util.filechunkiter(fp, limit=size):
2034 ofp.write(chunk)
2034 ofp.write(chunk)
2035 ofp.close()
2035 ofp.close()
2036 elapsed = time.time() - start
2036 elapsed = time.time() - start
2037 if elapsed <= 0:
2037 if elapsed <= 0:
2038 elapsed = 0.001
2038 elapsed = 0.001
2039 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2039 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2040 (util.bytecount(total_bytes), elapsed,
2040 (util.bytecount(total_bytes), elapsed,
2041 util.bytecount(total_bytes / elapsed)))
2041 util.bytecount(total_bytes / elapsed)))
2042 self.invalidate()
2042 self.invalidate()
2043 return len(self.heads()) + 1
2043 return len(self.heads()) + 1
2044
2044
2045 def clone(self, remote, heads=[], stream=False):
2045 def clone(self, remote, heads=[], stream=False):
2046 '''clone remote repository.
2046 '''clone remote repository.
2047
2047
2048 keyword arguments:
2048 keyword arguments:
2049 heads: list of revs to clone (forces use of pull)
2049 heads: list of revs to clone (forces use of pull)
2050 stream: use streaming clone if possible'''
2050 stream: use streaming clone if possible'''
2051
2051
2052 # now, all clients that can request uncompressed clones can
2052 # now, all clients that can request uncompressed clones can
2053 # read repo formats supported by all servers that can serve
2053 # read repo formats supported by all servers that can serve
2054 # them.
2054 # them.
2055
2055
2056 # if revlog format changes, client will have to check version
2056 # if revlog format changes, client will have to check version
2057 # and format flags on "stream" capability, and use
2057 # and format flags on "stream" capability, and use
2058 # uncompressed only if compatible.
2058 # uncompressed only if compatible.
2059
2059
2060 if stream and not heads and remote.capable('stream'):
2060 if stream and not heads and remote.capable('stream'):
2061 return self.stream_in(remote)
2061 return self.stream_in(remote)
2062 return self.pull(remote, heads)
2062 return self.pull(remote, heads)
2063
2063
2064 # used to avoid circular references so destructors work
2064 # used to avoid circular references so destructors work
2065 def aftertrans(files):
2065 def aftertrans(files):
2066 renamefiles = [tuple(t) for t in files]
2066 renamefiles = [tuple(t) for t in files]
2067 def a():
2067 def a():
2068 for src, dest in renamefiles:
2068 for src, dest in renamefiles:
2069 util.rename(src, dest)
2069 util.rename(src, dest)
2070 return a
2070 return a
2071
2071
2072 def instance(ui, path, create):
2072 def instance(ui, path, create):
2073 return localrepository(ui, util.drop_scheme('file', path), create)
2073 return localrepository(ui, util.drop_scheme('file', path), create)
2074
2074
2075 def islocal(path):
2075 def islocal(path):
2076 return True
2076 return True
@@ -1,83 +1,83 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms
7 # This software may be used and distributed according to the terms
8 # of the GNU General Public License, incorporated herein by reference.
8 # of the GNU General Public License, incorporated herein by reference.
9
9
10 from i18n import _
10 from i18n import _
11 import changelog, httprangereader
11 import changelog, httprangereader
12 import repo, localrepo, manifest, util
12 import repo, localrepo, manifest, util, store
13 import urllib, urllib2, errno
13 import urllib, urllib2, errno
14
14
15 class rangereader(httprangereader.httprangereader):
15 class rangereader(httprangereader.httprangereader):
16 def read(self, size=None):
16 def read(self, size=None):
17 try:
17 try:
18 return httprangereader.httprangereader.read(self, size)
18 return httprangereader.httprangereader.read(self, size)
19 except urllib2.HTTPError, inst:
19 except urllib2.HTTPError, inst:
20 num = inst.code == 404 and errno.ENOENT or None
20 num = inst.code == 404 and errno.ENOENT or None
21 raise IOError(num, inst)
21 raise IOError(num, inst)
22 except urllib2.URLError, inst:
22 except urllib2.URLError, inst:
23 raise IOError(None, inst.reason[1])
23 raise IOError(None, inst.reason[1])
24
24
25 def opener(base):
25 def opener(base):
26 """return a function that opens files over http"""
26 """return a function that opens files over http"""
27 p = base
27 p = base
28 def o(path, mode="r"):
28 def o(path, mode="r"):
29 f = "/".join((p, urllib.quote(path)))
29 f = "/".join((p, urllib.quote(path)))
30 return rangereader(f)
30 return rangereader(f)
31 return o
31 return o
32
32
33 class statichttprepository(localrepo.localrepository):
33 class statichttprepository(localrepo.localrepository):
34 def __init__(self, ui, path):
34 def __init__(self, ui, path):
35 self._url = path
35 self._url = path
36 self.ui = ui
36 self.ui = ui
37
37
38 self.path = path.rstrip('/') + "/.hg"
38 self.path = path.rstrip('/') + "/.hg"
39 self.opener = opener(self.path)
39 self.opener = opener(self.path)
40
40
41 # find requirements
41 # find requirements
42 try:
42 try:
43 requirements = self.opener("requires").read().splitlines()
43 requirements = self.opener("requires").read().splitlines()
44 except IOError, inst:
44 except IOError, inst:
45 if inst.errno == errno.ENOENT:
45 if inst.errno == errno.ENOENT:
46 msg = _("'%s' does not appear to be an hg repository") % path
46 msg = _("'%s' does not appear to be an hg repository") % path
47 raise repo.RepoError(msg)
47 raise repo.RepoError(msg)
48 else:
48 else:
49 requirements = []
49 requirements = []
50
50
51 # check them
51 # check them
52 for r in requirements:
52 for r in requirements:
53 if r not in self.supported:
53 if r not in self.supported:
54 raise repo.RepoError(_("requirement '%s' not supported") % r)
54 raise repo.RepoError(_("requirement '%s' not supported") % r)
55
55
56 # setup store
56 # setup store
57 if "store" in requirements:
57 if "store" in requirements:
58 self.encodefn = util.encodefilename
58 self.encodefn = store.encodefilename
59 self.decodefn = util.decodefilename
59 self.decodefn = store.decodefilename
60 self.spath = self.path + "/store"
60 self.spath = self.path + "/store"
61 else:
61 else:
62 self.encodefn = lambda x: x
62 self.encodefn = lambda x: x
63 self.decodefn = lambda x: x
63 self.decodefn = lambda x: x
64 self.spath = self.path
64 self.spath = self.path
65 self.sopener = util.encodedopener(opener(self.spath), self.encodefn)
65 self.sopener = store.encodedopener(opener(self.spath), self.encodefn)
66
66
67 self.manifest = manifest.manifest(self.sopener)
67 self.manifest = manifest.manifest(self.sopener)
68 self.changelog = changelog.changelog(self.sopener)
68 self.changelog = changelog.changelog(self.sopener)
69 self.tagscache = None
69 self.tagscache = None
70 self.nodetagscache = None
70 self.nodetagscache = None
71 self.encodepats = None
71 self.encodepats = None
72 self.decodepats = None
72 self.decodepats = None
73
73
74 def url(self):
74 def url(self):
75 return 'static-' + self._url
75 return 'static-' + self._url
76
76
77 def local(self):
77 def local(self):
78 return False
78 return False
79
79
80 def instance(ui, path, create):
80 def instance(ui, path, create):
81 if create:
81 if create:
82 raise util.Abort(_('cannot create new static-http repository'))
82 raise util.Abort(_('cannot create new static-http repository'))
83 return statichttprepository(ui, path[7:])
83 return statichttprepository(ui, path[7:])
@@ -1,1897 +1,1864 b''
1 """
1 """
2 util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
3
3
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7
7
8 This software may be used and distributed according to the terms
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
9 of the GNU General Public License, incorporated herein by reference.
10
10
11 This contains helper routines that are independent of the SCM core and hide
11 This contains helper routines that are independent of the SCM core and hide
12 platform-specific details from the core.
12 platform-specific details from the core.
13 """
13 """
14
14
15 from i18n import _
15 from i18n import _
16 import cStringIO, errno, getpass, re, shutil, sys, tempfile
16 import cStringIO, errno, getpass, re, shutil, sys, tempfile
17 import os, stat, threading, time, calendar, ConfigParser, locale, glob, osutil
17 import os, stat, threading, time, calendar, ConfigParser, locale, glob, osutil
18 import imp, urlparse
18 import imp, urlparse
19
19
20 # Python compatibility
20 # Python compatibility
21
21
22 try:
22 try:
23 set = set
23 set = set
24 frozenset = frozenset
24 frozenset = frozenset
25 except NameError:
25 except NameError:
26 from sets import Set as set, ImmutableSet as frozenset
26 from sets import Set as set, ImmutableSet as frozenset
27
27
28 _md5 = None
28 _md5 = None
29 def md5(s):
29 def md5(s):
30 global _md5
30 global _md5
31 if _md5 is None:
31 if _md5 is None:
32 try:
32 try:
33 import hashlib
33 import hashlib
34 _md5 = hashlib.md5
34 _md5 = hashlib.md5
35 except ImportError:
35 except ImportError:
36 import md5
36 import md5
37 _md5 = md5.md5
37 _md5 = md5.md5
38 return _md5(s)
38 return _md5(s)
39
39
40 _sha1 = None
40 _sha1 = None
41 def sha1(s):
41 def sha1(s):
42 global _sha1
42 global _sha1
43 if _sha1 is None:
43 if _sha1 is None:
44 try:
44 try:
45 import hashlib
45 import hashlib
46 _sha1 = hashlib.sha1
46 _sha1 = hashlib.sha1
47 except ImportError:
47 except ImportError:
48 import sha
48 import sha
49 _sha1 = sha.sha
49 _sha1 = sha.sha
50 return _sha1(s)
50 return _sha1(s)
51
51
52 try:
52 try:
53 _encoding = os.environ.get("HGENCODING")
53 _encoding = os.environ.get("HGENCODING")
54 if sys.platform == 'darwin' and not _encoding:
54 if sys.platform == 'darwin' and not _encoding:
55 # On darwin, getpreferredencoding ignores the locale environment and
55 # On darwin, getpreferredencoding ignores the locale environment and
56 # always returns mac-roman. We override this if the environment is
56 # always returns mac-roman. We override this if the environment is
57 # not C (has been customized by the user).
57 # not C (has been customized by the user).
58 locale.setlocale(locale.LC_CTYPE, '')
58 locale.setlocale(locale.LC_CTYPE, '')
59 _encoding = locale.getlocale()[1]
59 _encoding = locale.getlocale()[1]
60 if not _encoding:
60 if not _encoding:
61 _encoding = locale.getpreferredencoding() or 'ascii'
61 _encoding = locale.getpreferredencoding() or 'ascii'
62 except locale.Error:
62 except locale.Error:
63 _encoding = 'ascii'
63 _encoding = 'ascii'
64 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
64 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
65 _fallbackencoding = 'ISO-8859-1'
65 _fallbackencoding = 'ISO-8859-1'
66
66
67 def tolocal(s):
67 def tolocal(s):
68 """
68 """
69 Convert a string from internal UTF-8 to local encoding
69 Convert a string from internal UTF-8 to local encoding
70
70
71 All internal strings should be UTF-8 but some repos before the
71 All internal strings should be UTF-8 but some repos before the
72 implementation of locale support may contain latin1 or possibly
72 implementation of locale support may contain latin1 or possibly
73 other character sets. We attempt to decode everything strictly
73 other character sets. We attempt to decode everything strictly
74 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
74 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
75 replace unknown characters.
75 replace unknown characters.
76 """
76 """
77 for e in ('UTF-8', _fallbackencoding):
77 for e in ('UTF-8', _fallbackencoding):
78 try:
78 try:
79 u = s.decode(e) # attempt strict decoding
79 u = s.decode(e) # attempt strict decoding
80 return u.encode(_encoding, "replace")
80 return u.encode(_encoding, "replace")
81 except LookupError, k:
81 except LookupError, k:
82 raise Abort(_("%s, please check your locale settings") % k)
82 raise Abort(_("%s, please check your locale settings") % k)
83 except UnicodeDecodeError:
83 except UnicodeDecodeError:
84 pass
84 pass
85 u = s.decode("utf-8", "replace") # last ditch
85 u = s.decode("utf-8", "replace") # last ditch
86 return u.encode(_encoding, "replace")
86 return u.encode(_encoding, "replace")
87
87
88 def fromlocal(s):
88 def fromlocal(s):
89 """
89 """
90 Convert a string from the local character encoding to UTF-8
90 Convert a string from the local character encoding to UTF-8
91
91
92 We attempt to decode strings using the encoding mode set by
92 We attempt to decode strings using the encoding mode set by
93 HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
93 HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
94 characters will cause an error message. Other modes include
94 characters will cause an error message. Other modes include
95 'replace', which replaces unknown characters with a special
95 'replace', which replaces unknown characters with a special
96 Unicode character, and 'ignore', which drops the character.
96 Unicode character, and 'ignore', which drops the character.
97 """
97 """
98 try:
98 try:
99 return s.decode(_encoding, _encodingmode).encode("utf-8")
99 return s.decode(_encoding, _encodingmode).encode("utf-8")
100 except UnicodeDecodeError, inst:
100 except UnicodeDecodeError, inst:
101 sub = s[max(0, inst.start-10):inst.start+10]
101 sub = s[max(0, inst.start-10):inst.start+10]
102 raise Abort("decoding near '%s': %s!" % (sub, inst))
102 raise Abort("decoding near '%s': %s!" % (sub, inst))
103 except LookupError, k:
103 except LookupError, k:
104 raise Abort(_("%s, please check your locale settings") % k)
104 raise Abort(_("%s, please check your locale settings") % k)
105
105
106 def locallen(s):
106 def locallen(s):
107 """Find the length in characters of a local string"""
107 """Find the length in characters of a local string"""
108 return len(s.decode(_encoding, "replace"))
108 return len(s.decode(_encoding, "replace"))
109
109
110 # used by parsedate
110 # used by parsedate
111 defaultdateformats = (
111 defaultdateformats = (
112 '%Y-%m-%d %H:%M:%S',
112 '%Y-%m-%d %H:%M:%S',
113 '%Y-%m-%d %I:%M:%S%p',
113 '%Y-%m-%d %I:%M:%S%p',
114 '%Y-%m-%d %H:%M',
114 '%Y-%m-%d %H:%M',
115 '%Y-%m-%d %I:%M%p',
115 '%Y-%m-%d %I:%M%p',
116 '%Y-%m-%d',
116 '%Y-%m-%d',
117 '%m-%d',
117 '%m-%d',
118 '%m/%d',
118 '%m/%d',
119 '%m/%d/%y',
119 '%m/%d/%y',
120 '%m/%d/%Y',
120 '%m/%d/%Y',
121 '%a %b %d %H:%M:%S %Y',
121 '%a %b %d %H:%M:%S %Y',
122 '%a %b %d %I:%M:%S%p %Y',
122 '%a %b %d %I:%M:%S%p %Y',
123 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
123 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
124 '%b %d %H:%M:%S %Y',
124 '%b %d %H:%M:%S %Y',
125 '%b %d %I:%M:%S%p %Y',
125 '%b %d %I:%M:%S%p %Y',
126 '%b %d %H:%M:%S',
126 '%b %d %H:%M:%S',
127 '%b %d %I:%M:%S%p',
127 '%b %d %I:%M:%S%p',
128 '%b %d %H:%M',
128 '%b %d %H:%M',
129 '%b %d %I:%M%p',
129 '%b %d %I:%M%p',
130 '%b %d %Y',
130 '%b %d %Y',
131 '%b %d',
131 '%b %d',
132 '%H:%M:%S',
132 '%H:%M:%S',
133 '%I:%M:%SP',
133 '%I:%M:%SP',
134 '%H:%M',
134 '%H:%M',
135 '%I:%M%p',
135 '%I:%M%p',
136 )
136 )
137
137
138 extendeddateformats = defaultdateformats + (
138 extendeddateformats = defaultdateformats + (
139 "%Y",
139 "%Y",
140 "%Y-%m",
140 "%Y-%m",
141 "%b",
141 "%b",
142 "%b %Y",
142 "%b %Y",
143 )
143 )
144
144
145 class SignalInterrupt(Exception):
145 class SignalInterrupt(Exception):
146 """Exception raised on SIGTERM and SIGHUP."""
146 """Exception raised on SIGTERM and SIGHUP."""
147
147
148 # differences from SafeConfigParser:
148 # differences from SafeConfigParser:
149 # - case-sensitive keys
149 # - case-sensitive keys
150 # - allows values that are not strings (this means that you may not
150 # - allows values that are not strings (this means that you may not
151 # be able to save the configuration to a file)
151 # be able to save the configuration to a file)
152 class configparser(ConfigParser.SafeConfigParser):
152 class configparser(ConfigParser.SafeConfigParser):
153 def optionxform(self, optionstr):
153 def optionxform(self, optionstr):
154 return optionstr
154 return optionstr
155
155
156 def set(self, section, option, value):
156 def set(self, section, option, value):
157 return ConfigParser.ConfigParser.set(self, section, option, value)
157 return ConfigParser.ConfigParser.set(self, section, option, value)
158
158
159 def _interpolate(self, section, option, rawval, vars):
159 def _interpolate(self, section, option, rawval, vars):
160 if not isinstance(rawval, basestring):
160 if not isinstance(rawval, basestring):
161 return rawval
161 return rawval
162 return ConfigParser.SafeConfigParser._interpolate(self, section,
162 return ConfigParser.SafeConfigParser._interpolate(self, section,
163 option, rawval, vars)
163 option, rawval, vars)
164
164
165 def cachefunc(func):
165 def cachefunc(func):
166 '''cache the result of function calls'''
166 '''cache the result of function calls'''
167 # XXX doesn't handle keywords args
167 # XXX doesn't handle keywords args
168 cache = {}
168 cache = {}
169 if func.func_code.co_argcount == 1:
169 if func.func_code.co_argcount == 1:
170 # we gain a small amount of time because
170 # we gain a small amount of time because
171 # we don't need to pack/unpack the list
171 # we don't need to pack/unpack the list
172 def f(arg):
172 def f(arg):
173 if arg not in cache:
173 if arg not in cache:
174 cache[arg] = func(arg)
174 cache[arg] = func(arg)
175 return cache[arg]
175 return cache[arg]
176 else:
176 else:
177 def f(*args):
177 def f(*args):
178 if args not in cache:
178 if args not in cache:
179 cache[args] = func(*args)
179 cache[args] = func(*args)
180 return cache[args]
180 return cache[args]
181
181
182 return f
182 return f
183
183
184 def pipefilter(s, cmd):
184 def pipefilter(s, cmd):
185 '''filter string S through command CMD, returning its output'''
185 '''filter string S through command CMD, returning its output'''
186 (pin, pout) = os.popen2(cmd, 'b')
186 (pin, pout) = os.popen2(cmd, 'b')
187 def writer():
187 def writer():
188 try:
188 try:
189 pin.write(s)
189 pin.write(s)
190 pin.close()
190 pin.close()
191 except IOError, inst:
191 except IOError, inst:
192 if inst.errno != errno.EPIPE:
192 if inst.errno != errno.EPIPE:
193 raise
193 raise
194
194
195 # we should use select instead on UNIX, but this will work on most
195 # we should use select instead on UNIX, but this will work on most
196 # systems, including Windows
196 # systems, including Windows
197 w = threading.Thread(target=writer)
197 w = threading.Thread(target=writer)
198 w.start()
198 w.start()
199 f = pout.read()
199 f = pout.read()
200 pout.close()
200 pout.close()
201 w.join()
201 w.join()
202 return f
202 return f
203
203
204 def tempfilter(s, cmd):
204 def tempfilter(s, cmd):
205 '''filter string S through a pair of temporary files with CMD.
205 '''filter string S through a pair of temporary files with CMD.
206 CMD is used as a template to create the real command to be run,
206 CMD is used as a template to create the real command to be run,
207 with the strings INFILE and OUTFILE replaced by the real names of
207 with the strings INFILE and OUTFILE replaced by the real names of
208 the temporary files generated.'''
208 the temporary files generated.'''
209 inname, outname = None, None
209 inname, outname = None, None
210 try:
210 try:
211 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
211 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
212 fp = os.fdopen(infd, 'wb')
212 fp = os.fdopen(infd, 'wb')
213 fp.write(s)
213 fp.write(s)
214 fp.close()
214 fp.close()
215 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
215 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
216 os.close(outfd)
216 os.close(outfd)
217 cmd = cmd.replace('INFILE', inname)
217 cmd = cmd.replace('INFILE', inname)
218 cmd = cmd.replace('OUTFILE', outname)
218 cmd = cmd.replace('OUTFILE', outname)
219 code = os.system(cmd)
219 code = os.system(cmd)
220 if sys.platform == 'OpenVMS' and code & 1:
220 if sys.platform == 'OpenVMS' and code & 1:
221 code = 0
221 code = 0
222 if code: raise Abort(_("command '%s' failed: %s") %
222 if code: raise Abort(_("command '%s' failed: %s") %
223 (cmd, explain_exit(code)))
223 (cmd, explain_exit(code)))
224 return open(outname, 'rb').read()
224 return open(outname, 'rb').read()
225 finally:
225 finally:
226 try:
226 try:
227 if inname: os.unlink(inname)
227 if inname: os.unlink(inname)
228 except: pass
228 except: pass
229 try:
229 try:
230 if outname: os.unlink(outname)
230 if outname: os.unlink(outname)
231 except: pass
231 except: pass
232
232
233 filtertable = {
233 filtertable = {
234 'tempfile:': tempfilter,
234 'tempfile:': tempfilter,
235 'pipe:': pipefilter,
235 'pipe:': pipefilter,
236 }
236 }
237
237
238 def filter(s, cmd):
238 def filter(s, cmd):
239 "filter a string through a command that transforms its input to its output"
239 "filter a string through a command that transforms its input to its output"
240 for name, fn in filtertable.iteritems():
240 for name, fn in filtertable.iteritems():
241 if cmd.startswith(name):
241 if cmd.startswith(name):
242 return fn(s, cmd[len(name):].lstrip())
242 return fn(s, cmd[len(name):].lstrip())
243 return pipefilter(s, cmd)
243 return pipefilter(s, cmd)
244
244
245 def binary(s):
245 def binary(s):
246 """return true if a string is binary data"""
246 """return true if a string is binary data"""
247 if s and '\0' in s:
247 if s and '\0' in s:
248 return True
248 return True
249 return False
249 return False
250
250
251 def unique(g):
251 def unique(g):
252 """return the uniq elements of iterable g"""
252 """return the uniq elements of iterable g"""
253 return dict.fromkeys(g).keys()
253 return dict.fromkeys(g).keys()
254
254
255 def sort(l):
255 def sort(l):
256 if not isinstance(l, list):
256 if not isinstance(l, list):
257 l = list(l)
257 l = list(l)
258 l.sort()
258 l.sort()
259 return l
259 return l
260
260
261 class Abort(Exception):
261 class Abort(Exception):
262 """Raised if a command needs to print an error and exit."""
262 """Raised if a command needs to print an error and exit."""
263
263
264 class UnexpectedOutput(Abort):
264 class UnexpectedOutput(Abort):
265 """Raised to print an error with part of output and exit."""
265 """Raised to print an error with part of output and exit."""
266
266
267 def always(fn): return True
267 def always(fn): return True
268 def never(fn): return False
268 def never(fn): return False
269
269
270 def expand_glob(pats):
270 def expand_glob(pats):
271 '''On Windows, expand the implicit globs in a list of patterns'''
271 '''On Windows, expand the implicit globs in a list of patterns'''
272 if os.name != 'nt':
272 if os.name != 'nt':
273 return list(pats)
273 return list(pats)
274 ret = []
274 ret = []
275 for p in pats:
275 for p in pats:
276 kind, name = patkind(p, None)
276 kind, name = patkind(p, None)
277 if kind is None:
277 if kind is None:
278 globbed = glob.glob(name)
278 globbed = glob.glob(name)
279 if globbed:
279 if globbed:
280 ret.extend(globbed)
280 ret.extend(globbed)
281 continue
281 continue
282 # if we couldn't expand the glob, just keep it around
282 # if we couldn't expand the glob, just keep it around
283 ret.append(p)
283 ret.append(p)
284 return ret
284 return ret
285
285
286 def patkind(name, default):
286 def patkind(name, default):
287 """Split a string into an optional pattern kind prefix and the
287 """Split a string into an optional pattern kind prefix and the
288 actual pattern."""
288 actual pattern."""
289 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
289 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
290 if name.startswith(prefix + ':'): return name.split(':', 1)
290 if name.startswith(prefix + ':'): return name.split(':', 1)
291 return default, name
291 return default, name
292
292
293 def globre(pat, head='^', tail='$'):
293 def globre(pat, head='^', tail='$'):
294 "convert a glob pattern into a regexp"
294 "convert a glob pattern into a regexp"
295 i, n = 0, len(pat)
295 i, n = 0, len(pat)
296 res = ''
296 res = ''
297 group = 0
297 group = 0
298 def peek(): return i < n and pat[i]
298 def peek(): return i < n and pat[i]
299 while i < n:
299 while i < n:
300 c = pat[i]
300 c = pat[i]
301 i = i+1
301 i = i+1
302 if c == '*':
302 if c == '*':
303 if peek() == '*':
303 if peek() == '*':
304 i += 1
304 i += 1
305 res += '.*'
305 res += '.*'
306 else:
306 else:
307 res += '[^/]*'
307 res += '[^/]*'
308 elif c == '?':
308 elif c == '?':
309 res += '.'
309 res += '.'
310 elif c == '[':
310 elif c == '[':
311 j = i
311 j = i
312 if j < n and pat[j] in '!]':
312 if j < n and pat[j] in '!]':
313 j += 1
313 j += 1
314 while j < n and pat[j] != ']':
314 while j < n and pat[j] != ']':
315 j += 1
315 j += 1
316 if j >= n:
316 if j >= n:
317 res += '\\['
317 res += '\\['
318 else:
318 else:
319 stuff = pat[i:j].replace('\\','\\\\')
319 stuff = pat[i:j].replace('\\','\\\\')
320 i = j + 1
320 i = j + 1
321 if stuff[0] == '!':
321 if stuff[0] == '!':
322 stuff = '^' + stuff[1:]
322 stuff = '^' + stuff[1:]
323 elif stuff[0] == '^':
323 elif stuff[0] == '^':
324 stuff = '\\' + stuff
324 stuff = '\\' + stuff
325 res = '%s[%s]' % (res, stuff)
325 res = '%s[%s]' % (res, stuff)
326 elif c == '{':
326 elif c == '{':
327 group += 1
327 group += 1
328 res += '(?:'
328 res += '(?:'
329 elif c == '}' and group:
329 elif c == '}' and group:
330 res += ')'
330 res += ')'
331 group -= 1
331 group -= 1
332 elif c == ',' and group:
332 elif c == ',' and group:
333 res += '|'
333 res += '|'
334 elif c == '\\':
334 elif c == '\\':
335 p = peek()
335 p = peek()
336 if p:
336 if p:
337 i += 1
337 i += 1
338 res += re.escape(p)
338 res += re.escape(p)
339 else:
339 else:
340 res += re.escape(c)
340 res += re.escape(c)
341 else:
341 else:
342 res += re.escape(c)
342 res += re.escape(c)
343 return head + res + tail
343 return head + res + tail
344
344
345 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
345 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
346
346
347 def pathto(root, n1, n2):
347 def pathto(root, n1, n2):
348 '''return the relative path from one place to another.
348 '''return the relative path from one place to another.
349 root should use os.sep to separate directories
349 root should use os.sep to separate directories
350 n1 should use os.sep to separate directories
350 n1 should use os.sep to separate directories
351 n2 should use "/" to separate directories
351 n2 should use "/" to separate directories
352 returns an os.sep-separated path.
352 returns an os.sep-separated path.
353
353
354 If n1 is a relative path, it's assumed it's
354 If n1 is a relative path, it's assumed it's
355 relative to root.
355 relative to root.
356 n2 should always be relative to root.
356 n2 should always be relative to root.
357 '''
357 '''
358 if not n1: return localpath(n2)
358 if not n1: return localpath(n2)
359 if os.path.isabs(n1):
359 if os.path.isabs(n1):
360 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
360 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
361 return os.path.join(root, localpath(n2))
361 return os.path.join(root, localpath(n2))
362 n2 = '/'.join((pconvert(root), n2))
362 n2 = '/'.join((pconvert(root), n2))
363 a, b = splitpath(n1), n2.split('/')
363 a, b = splitpath(n1), n2.split('/')
364 a.reverse()
364 a.reverse()
365 b.reverse()
365 b.reverse()
366 while a and b and a[-1] == b[-1]:
366 while a and b and a[-1] == b[-1]:
367 a.pop()
367 a.pop()
368 b.pop()
368 b.pop()
369 b.reverse()
369 b.reverse()
370 return os.sep.join((['..'] * len(a)) + b) or '.'
370 return os.sep.join((['..'] * len(a)) + b) or '.'
371
371
372 def canonpath(root, cwd, myname):
372 def canonpath(root, cwd, myname):
373 """return the canonical path of myname, given cwd and root"""
373 """return the canonical path of myname, given cwd and root"""
374 if root == os.sep:
374 if root == os.sep:
375 rootsep = os.sep
375 rootsep = os.sep
376 elif endswithsep(root):
376 elif endswithsep(root):
377 rootsep = root
377 rootsep = root
378 else:
378 else:
379 rootsep = root + os.sep
379 rootsep = root + os.sep
380 name = myname
380 name = myname
381 if not os.path.isabs(name):
381 if not os.path.isabs(name):
382 name = os.path.join(root, cwd, name)
382 name = os.path.join(root, cwd, name)
383 name = os.path.normpath(name)
383 name = os.path.normpath(name)
384 audit_path = path_auditor(root)
384 audit_path = path_auditor(root)
385 if name != rootsep and name.startswith(rootsep):
385 if name != rootsep and name.startswith(rootsep):
386 name = name[len(rootsep):]
386 name = name[len(rootsep):]
387 audit_path(name)
387 audit_path(name)
388 return pconvert(name)
388 return pconvert(name)
389 elif name == root:
389 elif name == root:
390 return ''
390 return ''
391 else:
391 else:
392 # Determine whether `name' is in the hierarchy at or beneath `root',
392 # Determine whether `name' is in the hierarchy at or beneath `root',
393 # by iterating name=dirname(name) until that causes no change (can't
393 # by iterating name=dirname(name) until that causes no change (can't
394 # check name == '/', because that doesn't work on windows). For each
394 # check name == '/', because that doesn't work on windows). For each
395 # `name', compare dev/inode numbers. If they match, the list `rel'
395 # `name', compare dev/inode numbers. If they match, the list `rel'
396 # holds the reversed list of components making up the relative file
396 # holds the reversed list of components making up the relative file
397 # name we want.
397 # name we want.
398 root_st = os.stat(root)
398 root_st = os.stat(root)
399 rel = []
399 rel = []
400 while True:
400 while True:
401 try:
401 try:
402 name_st = os.stat(name)
402 name_st = os.stat(name)
403 except OSError:
403 except OSError:
404 break
404 break
405 if samestat(name_st, root_st):
405 if samestat(name_st, root_st):
406 if not rel:
406 if not rel:
407 # name was actually the same as root (maybe a symlink)
407 # name was actually the same as root (maybe a symlink)
408 return ''
408 return ''
409 rel.reverse()
409 rel.reverse()
410 name = os.path.join(*rel)
410 name = os.path.join(*rel)
411 audit_path(name)
411 audit_path(name)
412 return pconvert(name)
412 return pconvert(name)
413 dirname, basename = os.path.split(name)
413 dirname, basename = os.path.split(name)
414 rel.append(basename)
414 rel.append(basename)
415 if dirname == name:
415 if dirname == name:
416 break
416 break
417 name = dirname
417 name = dirname
418
418
419 raise Abort('%s not under root' % myname)
419 raise Abort('%s not under root' % myname)
420
420
421 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None, dflt_pat='glob'):
421 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None, dflt_pat='glob'):
422 """build a function to match a set of file patterns
422 """build a function to match a set of file patterns
423
423
424 arguments:
424 arguments:
425 canonroot - the canonical root of the tree you're matching against
425 canonroot - the canonical root of the tree you're matching against
426 cwd - the current working directory, if relevant
426 cwd - the current working directory, if relevant
427 names - patterns to find
427 names - patterns to find
428 inc - patterns to include
428 inc - patterns to include
429 exc - patterns to exclude
429 exc - patterns to exclude
430 dflt_pat - if a pattern in names has no explicit type, assume this one
430 dflt_pat - if a pattern in names has no explicit type, assume this one
431 src - where these patterns came from (e.g. .hgignore)
431 src - where these patterns came from (e.g. .hgignore)
432
432
433 a pattern is one of:
433 a pattern is one of:
434 'glob:<glob>' - a glob relative to cwd
434 'glob:<glob>' - a glob relative to cwd
435 're:<regexp>' - a regular expression
435 're:<regexp>' - a regular expression
436 'path:<path>' - a path relative to canonroot
436 'path:<path>' - a path relative to canonroot
437 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
437 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
438 'relpath:<path>' - a path relative to cwd
438 'relpath:<path>' - a path relative to cwd
439 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
439 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
440 '<something>' - one of the cases above, selected by the dflt_pat argument
440 '<something>' - one of the cases above, selected by the dflt_pat argument
441
441
442 returns:
442 returns:
443 a 3-tuple containing
443 a 3-tuple containing
444 - list of roots (places where one should start a recursive walk of the fs);
444 - list of roots (places where one should start a recursive walk of the fs);
445 this often matches the explicit non-pattern names passed in, but also
445 this often matches the explicit non-pattern names passed in, but also
446 includes the initial part of glob: patterns that has no glob characters
446 includes the initial part of glob: patterns that has no glob characters
447 - a bool match(filename) function
447 - a bool match(filename) function
448 - a bool indicating if any patterns were passed in
448 - a bool indicating if any patterns were passed in
449 """
449 """
450
450
451 # a common case: no patterns at all
451 # a common case: no patterns at all
452 if not names and not inc and not exc:
452 if not names and not inc and not exc:
453 return [], always, False
453 return [], always, False
454
454
455 def contains_glob(name):
455 def contains_glob(name):
456 for c in name:
456 for c in name:
457 if c in _globchars: return True
457 if c in _globchars: return True
458 return False
458 return False
459
459
460 def regex(kind, name, tail):
460 def regex(kind, name, tail):
461 '''convert a pattern into a regular expression'''
461 '''convert a pattern into a regular expression'''
462 if not name:
462 if not name:
463 return ''
463 return ''
464 if kind == 're':
464 if kind == 're':
465 return name
465 return name
466 elif kind == 'path':
466 elif kind == 'path':
467 return '^' + re.escape(name) + '(?:/|$)'
467 return '^' + re.escape(name) + '(?:/|$)'
468 elif kind == 'relglob':
468 elif kind == 'relglob':
469 return globre(name, '(?:|.*/)', tail)
469 return globre(name, '(?:|.*/)', tail)
470 elif kind == 'relpath':
470 elif kind == 'relpath':
471 return re.escape(name) + '(?:/|$)'
471 return re.escape(name) + '(?:/|$)'
472 elif kind == 'relre':
472 elif kind == 'relre':
473 if name.startswith('^'):
473 if name.startswith('^'):
474 return name
474 return name
475 return '.*' + name
475 return '.*' + name
476 return globre(name, '', tail)
476 return globre(name, '', tail)
477
477
478 def matchfn(pats, tail):
478 def matchfn(pats, tail):
479 """build a matching function from a set of patterns"""
479 """build a matching function from a set of patterns"""
480 if not pats:
480 if not pats:
481 return
481 return
482 try:
482 try:
483 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
483 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
484 if len(pat) > 20000:
484 if len(pat) > 20000:
485 raise OverflowError()
485 raise OverflowError()
486 return re.compile(pat).match
486 return re.compile(pat).match
487 except OverflowError:
487 except OverflowError:
488 # We're using a Python with a tiny regex engine and we
488 # We're using a Python with a tiny regex engine and we
489 # made it explode, so we'll divide the pattern list in two
489 # made it explode, so we'll divide the pattern list in two
490 # until it works
490 # until it works
491 l = len(pats)
491 l = len(pats)
492 if l < 2:
492 if l < 2:
493 raise
493 raise
494 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
494 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
495 return lambda s: a(s) or b(s)
495 return lambda s: a(s) or b(s)
496 except re.error:
496 except re.error:
497 for k, p in pats:
497 for k, p in pats:
498 try:
498 try:
499 re.compile('(?:%s)' % regex(k, p, tail))
499 re.compile('(?:%s)' % regex(k, p, tail))
500 except re.error:
500 except re.error:
501 if src:
501 if src:
502 raise Abort("%s: invalid pattern (%s): %s" %
502 raise Abort("%s: invalid pattern (%s): %s" %
503 (src, k, p))
503 (src, k, p))
504 else:
504 else:
505 raise Abort("invalid pattern (%s): %s" % (k, p))
505 raise Abort("invalid pattern (%s): %s" % (k, p))
506 raise Abort("invalid pattern")
506 raise Abort("invalid pattern")
507
507
508 def globprefix(pat):
508 def globprefix(pat):
509 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
509 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
510 root = []
510 root = []
511 for p in pat.split('/'):
511 for p in pat.split('/'):
512 if contains_glob(p): break
512 if contains_glob(p): break
513 root.append(p)
513 root.append(p)
514 return '/'.join(root) or '.'
514 return '/'.join(root) or '.'
515
515
516 def normalizepats(names, default):
516 def normalizepats(names, default):
517 pats = []
517 pats = []
518 roots = []
518 roots = []
519 anypats = False
519 anypats = False
520 for kind, name in [patkind(p, default) for p in names]:
520 for kind, name in [patkind(p, default) for p in names]:
521 if kind in ('glob', 'relpath'):
521 if kind in ('glob', 'relpath'):
522 name = canonpath(canonroot, cwd, name)
522 name = canonpath(canonroot, cwd, name)
523 elif kind in ('relglob', 'path'):
523 elif kind in ('relglob', 'path'):
524 name = normpath(name)
524 name = normpath(name)
525
525
526 pats.append((kind, name))
526 pats.append((kind, name))
527
527
528 if kind in ('glob', 're', 'relglob', 'relre'):
528 if kind in ('glob', 're', 'relglob', 'relre'):
529 anypats = True
529 anypats = True
530
530
531 if kind == 'glob':
531 if kind == 'glob':
532 root = globprefix(name)
532 root = globprefix(name)
533 roots.append(root)
533 roots.append(root)
534 elif kind in ('relpath', 'path'):
534 elif kind in ('relpath', 'path'):
535 roots.append(name or '.')
535 roots.append(name or '.')
536 elif kind == 'relglob':
536 elif kind == 'relglob':
537 roots.append('.')
537 roots.append('.')
538 return roots, pats, anypats
538 return roots, pats, anypats
539
539
540 roots, pats, anypats = normalizepats(names, dflt_pat)
540 roots, pats, anypats = normalizepats(names, dflt_pat)
541
541
542 patmatch = matchfn(pats, '$') or always
542 patmatch = matchfn(pats, '$') or always
543 incmatch = always
543 incmatch = always
544 if inc:
544 if inc:
545 dummy, inckinds, dummy = normalizepats(inc, 'glob')
545 dummy, inckinds, dummy = normalizepats(inc, 'glob')
546 incmatch = matchfn(inckinds, '(?:/|$)')
546 incmatch = matchfn(inckinds, '(?:/|$)')
547 excmatch = lambda fn: False
547 excmatch = lambda fn: False
548 if exc:
548 if exc:
549 dummy, exckinds, dummy = normalizepats(exc, 'glob')
549 dummy, exckinds, dummy = normalizepats(exc, 'glob')
550 excmatch = matchfn(exckinds, '(?:/|$)')
550 excmatch = matchfn(exckinds, '(?:/|$)')
551
551
552 if not names and inc and not exc:
552 if not names and inc and not exc:
553 # common case: hgignore patterns
553 # common case: hgignore patterns
554 match = incmatch
554 match = incmatch
555 else:
555 else:
556 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
556 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
557
557
558 return (roots, match, (inc or exc or anypats) and True)
558 return (roots, match, (inc or exc or anypats) and True)
559
559
560 _hgexecutable = None
560 _hgexecutable = None
561
561
562 def main_is_frozen():
562 def main_is_frozen():
563 """return True if we are a frozen executable.
563 """return True if we are a frozen executable.
564
564
565 The code supports py2exe (most common, Windows only) and tools/freeze
565 The code supports py2exe (most common, Windows only) and tools/freeze
566 (portable, not much used).
566 (portable, not much used).
567 """
567 """
568 return (hasattr(sys, "frozen") or # new py2exe
568 return (hasattr(sys, "frozen") or # new py2exe
569 hasattr(sys, "importers") or # old py2exe
569 hasattr(sys, "importers") or # old py2exe
570 imp.is_frozen("__main__")) # tools/freeze
570 imp.is_frozen("__main__")) # tools/freeze
571
571
572 def hgexecutable():
572 def hgexecutable():
573 """return location of the 'hg' executable.
573 """return location of the 'hg' executable.
574
574
575 Defaults to $HG or 'hg' in the search path.
575 Defaults to $HG or 'hg' in the search path.
576 """
576 """
577 if _hgexecutable is None:
577 if _hgexecutable is None:
578 hg = os.environ.get('HG')
578 hg = os.environ.get('HG')
579 if hg:
579 if hg:
580 set_hgexecutable(hg)
580 set_hgexecutable(hg)
581 elif main_is_frozen():
581 elif main_is_frozen():
582 set_hgexecutable(sys.executable)
582 set_hgexecutable(sys.executable)
583 else:
583 else:
584 set_hgexecutable(find_exe('hg', 'hg'))
584 set_hgexecutable(find_exe('hg', 'hg'))
585 return _hgexecutable
585 return _hgexecutable
586
586
587 def set_hgexecutable(path):
587 def set_hgexecutable(path):
588 """set location of the 'hg' executable"""
588 """set location of the 'hg' executable"""
589 global _hgexecutable
589 global _hgexecutable
590 _hgexecutable = path
590 _hgexecutable = path
591
591
592 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
592 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
593 '''enhanced shell command execution.
593 '''enhanced shell command execution.
594 run with environment maybe modified, maybe in different dir.
594 run with environment maybe modified, maybe in different dir.
595
595
596 if command fails and onerr is None, return status. if ui object,
596 if command fails and onerr is None, return status. if ui object,
597 print error message and return status, else raise onerr object as
597 print error message and return status, else raise onerr object as
598 exception.'''
598 exception.'''
599 def py2shell(val):
599 def py2shell(val):
600 'convert python object into string that is useful to shell'
600 'convert python object into string that is useful to shell'
601 if val in (None, False):
601 if val in (None, False):
602 return '0'
602 return '0'
603 if val == True:
603 if val == True:
604 return '1'
604 return '1'
605 return str(val)
605 return str(val)
606 oldenv = {}
606 oldenv = {}
607 for k in environ:
607 for k in environ:
608 oldenv[k] = os.environ.get(k)
608 oldenv[k] = os.environ.get(k)
609 if cwd is not None:
609 if cwd is not None:
610 oldcwd = os.getcwd()
610 oldcwd = os.getcwd()
611 origcmd = cmd
611 origcmd = cmd
612 if os.name == 'nt':
612 if os.name == 'nt':
613 cmd = '"%s"' % cmd
613 cmd = '"%s"' % cmd
614 try:
614 try:
615 for k, v in environ.iteritems():
615 for k, v in environ.iteritems():
616 os.environ[k] = py2shell(v)
616 os.environ[k] = py2shell(v)
617 os.environ['HG'] = hgexecutable()
617 os.environ['HG'] = hgexecutable()
618 if cwd is not None and oldcwd != cwd:
618 if cwd is not None and oldcwd != cwd:
619 os.chdir(cwd)
619 os.chdir(cwd)
620 rc = os.system(cmd)
620 rc = os.system(cmd)
621 if sys.platform == 'OpenVMS' and rc & 1:
621 if sys.platform == 'OpenVMS' and rc & 1:
622 rc = 0
622 rc = 0
623 if rc and onerr:
623 if rc and onerr:
624 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
624 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
625 explain_exit(rc)[0])
625 explain_exit(rc)[0])
626 if errprefix:
626 if errprefix:
627 errmsg = '%s: %s' % (errprefix, errmsg)
627 errmsg = '%s: %s' % (errprefix, errmsg)
628 try:
628 try:
629 onerr.warn(errmsg + '\n')
629 onerr.warn(errmsg + '\n')
630 except AttributeError:
630 except AttributeError:
631 raise onerr(errmsg)
631 raise onerr(errmsg)
632 return rc
632 return rc
633 finally:
633 finally:
634 for k, v in oldenv.iteritems():
634 for k, v in oldenv.iteritems():
635 if v is None:
635 if v is None:
636 del os.environ[k]
636 del os.environ[k]
637 else:
637 else:
638 os.environ[k] = v
638 os.environ[k] = v
639 if cwd is not None and oldcwd != cwd:
639 if cwd is not None and oldcwd != cwd:
640 os.chdir(oldcwd)
640 os.chdir(oldcwd)
641
641
642 # os.path.lexists is not available on python2.3
642 # os.path.lexists is not available on python2.3
643 def lexists(filename):
643 def lexists(filename):
644 "test whether a file with this name exists. does not follow symlinks"
644 "test whether a file with this name exists. does not follow symlinks"
645 try:
645 try:
646 os.lstat(filename)
646 os.lstat(filename)
647 except:
647 except:
648 return False
648 return False
649 return True
649 return True
650
650
651 def rename(src, dst):
651 def rename(src, dst):
652 """forcibly rename a file"""
652 """forcibly rename a file"""
653 try:
653 try:
654 os.rename(src, dst)
654 os.rename(src, dst)
655 except OSError, err: # FIXME: check err (EEXIST ?)
655 except OSError, err: # FIXME: check err (EEXIST ?)
656 # on windows, rename to existing file is not allowed, so we
656 # on windows, rename to existing file is not allowed, so we
657 # must delete destination first. but if file is open, unlink
657 # must delete destination first. but if file is open, unlink
658 # schedules it for delete but does not delete it. rename
658 # schedules it for delete but does not delete it. rename
659 # happens immediately even for open files, so we create
659 # happens immediately even for open files, so we create
660 # temporary file, delete it, rename destination to that name,
660 # temporary file, delete it, rename destination to that name,
661 # then delete that. then rename is safe to do.
661 # then delete that. then rename is safe to do.
662 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
662 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
663 os.close(fd)
663 os.close(fd)
664 os.unlink(temp)
664 os.unlink(temp)
665 os.rename(dst, temp)
665 os.rename(dst, temp)
666 os.unlink(temp)
666 os.unlink(temp)
667 os.rename(src, dst)
667 os.rename(src, dst)
668
668
669 def unlink(f):
669 def unlink(f):
670 """unlink and remove the directory if it is empty"""
670 """unlink and remove the directory if it is empty"""
671 os.unlink(f)
671 os.unlink(f)
672 # try removing directories that might now be empty
672 # try removing directories that might now be empty
673 try:
673 try:
674 os.removedirs(os.path.dirname(f))
674 os.removedirs(os.path.dirname(f))
675 except OSError:
675 except OSError:
676 pass
676 pass
677
677
678 def copyfile(src, dest):
678 def copyfile(src, dest):
679 "copy a file, preserving mode"
679 "copy a file, preserving mode"
680 if os.path.islink(src):
680 if os.path.islink(src):
681 try:
681 try:
682 os.unlink(dest)
682 os.unlink(dest)
683 except:
683 except:
684 pass
684 pass
685 os.symlink(os.readlink(src), dest)
685 os.symlink(os.readlink(src), dest)
686 else:
686 else:
687 try:
687 try:
688 shutil.copyfile(src, dest)
688 shutil.copyfile(src, dest)
689 shutil.copymode(src, dest)
689 shutil.copymode(src, dest)
690 except shutil.Error, inst:
690 except shutil.Error, inst:
691 raise Abort(str(inst))
691 raise Abort(str(inst))
692
692
693 def copyfiles(src, dst, hardlink=None):
693 def copyfiles(src, dst, hardlink=None):
694 """Copy a directory tree using hardlinks if possible"""
694 """Copy a directory tree using hardlinks if possible"""
695
695
696 if hardlink is None:
696 if hardlink is None:
697 hardlink = (os.stat(src).st_dev ==
697 hardlink = (os.stat(src).st_dev ==
698 os.stat(os.path.dirname(dst)).st_dev)
698 os.stat(os.path.dirname(dst)).st_dev)
699
699
700 if os.path.isdir(src):
700 if os.path.isdir(src):
701 os.mkdir(dst)
701 os.mkdir(dst)
702 for name, kind in osutil.listdir(src):
702 for name, kind in osutil.listdir(src):
703 srcname = os.path.join(src, name)
703 srcname = os.path.join(src, name)
704 dstname = os.path.join(dst, name)
704 dstname = os.path.join(dst, name)
705 copyfiles(srcname, dstname, hardlink)
705 copyfiles(srcname, dstname, hardlink)
706 else:
706 else:
707 if hardlink:
707 if hardlink:
708 try:
708 try:
709 os_link(src, dst)
709 os_link(src, dst)
710 except (IOError, OSError):
710 except (IOError, OSError):
711 hardlink = False
711 hardlink = False
712 shutil.copy(src, dst)
712 shutil.copy(src, dst)
713 else:
713 else:
714 shutil.copy(src, dst)
714 shutil.copy(src, dst)
715
715
716 class path_auditor(object):
716 class path_auditor(object):
717 '''ensure that a filesystem path contains no banned components.
717 '''ensure that a filesystem path contains no banned components.
718 the following properties of a path are checked:
718 the following properties of a path are checked:
719
719
720 - under top-level .hg
720 - under top-level .hg
721 - starts at the root of a windows drive
721 - starts at the root of a windows drive
722 - contains ".."
722 - contains ".."
723 - traverses a symlink (e.g. a/symlink_here/b)
723 - traverses a symlink (e.g. a/symlink_here/b)
724 - inside a nested repository'''
724 - inside a nested repository'''
725
725
726 def __init__(self, root):
726 def __init__(self, root):
727 self.audited = set()
727 self.audited = set()
728 self.auditeddir = set()
728 self.auditeddir = set()
729 self.root = root
729 self.root = root
730
730
731 def __call__(self, path):
731 def __call__(self, path):
732 if path in self.audited:
732 if path in self.audited:
733 return
733 return
734 normpath = os.path.normcase(path)
734 normpath = os.path.normcase(path)
735 parts = splitpath(normpath)
735 parts = splitpath(normpath)
736 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
736 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
737 or os.pardir in parts):
737 or os.pardir in parts):
738 raise Abort(_("path contains illegal component: %s") % path)
738 raise Abort(_("path contains illegal component: %s") % path)
739 def check(prefix):
739 def check(prefix):
740 curpath = os.path.join(self.root, prefix)
740 curpath = os.path.join(self.root, prefix)
741 try:
741 try:
742 st = os.lstat(curpath)
742 st = os.lstat(curpath)
743 except OSError, err:
743 except OSError, err:
744 # EINVAL can be raised as invalid path syntax under win32.
744 # EINVAL can be raised as invalid path syntax under win32.
745 # They must be ignored for patterns can be checked too.
745 # They must be ignored for patterns can be checked too.
746 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
746 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
747 raise
747 raise
748 else:
748 else:
749 if stat.S_ISLNK(st.st_mode):
749 if stat.S_ISLNK(st.st_mode):
750 raise Abort(_('path %r traverses symbolic link %r') %
750 raise Abort(_('path %r traverses symbolic link %r') %
751 (path, prefix))
751 (path, prefix))
752 elif (stat.S_ISDIR(st.st_mode) and
752 elif (stat.S_ISDIR(st.st_mode) and
753 os.path.isdir(os.path.join(curpath, '.hg'))):
753 os.path.isdir(os.path.join(curpath, '.hg'))):
754 raise Abort(_('path %r is inside repo %r') %
754 raise Abort(_('path %r is inside repo %r') %
755 (path, prefix))
755 (path, prefix))
756 parts.pop()
756 parts.pop()
757 prefixes = []
757 prefixes = []
758 for n in range(len(parts)):
758 for n in range(len(parts)):
759 prefix = os.sep.join(parts)
759 prefix = os.sep.join(parts)
760 if prefix in self.auditeddir:
760 if prefix in self.auditeddir:
761 break
761 break
762 check(prefix)
762 check(prefix)
763 prefixes.append(prefix)
763 prefixes.append(prefix)
764 parts.pop()
764 parts.pop()
765
765
766 self.audited.add(path)
766 self.audited.add(path)
767 # only add prefixes to the cache after checking everything: we don't
767 # only add prefixes to the cache after checking everything: we don't
768 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
768 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
769 self.auditeddir.update(prefixes)
769 self.auditeddir.update(prefixes)
770
770
771 def _makelock_file(info, pathname):
771 def _makelock_file(info, pathname):
772 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
772 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
773 os.write(ld, info)
773 os.write(ld, info)
774 os.close(ld)
774 os.close(ld)
775
775
776 def _readlock_file(pathname):
776 def _readlock_file(pathname):
777 return posixfile(pathname).read()
777 return posixfile(pathname).read()
778
778
779 def nlinks(pathname):
779 def nlinks(pathname):
780 """Return number of hardlinks for the given file."""
780 """Return number of hardlinks for the given file."""
781 return os.lstat(pathname).st_nlink
781 return os.lstat(pathname).st_nlink
782
782
783 if hasattr(os, 'link'):
783 if hasattr(os, 'link'):
784 os_link = os.link
784 os_link = os.link
785 else:
785 else:
786 def os_link(src, dst):
786 def os_link(src, dst):
787 raise OSError(0, _("Hardlinks not supported"))
787 raise OSError(0, _("Hardlinks not supported"))
788
788
789 def fstat(fp):
789 def fstat(fp):
790 '''stat file object that may not have fileno method.'''
790 '''stat file object that may not have fileno method.'''
791 try:
791 try:
792 return os.fstat(fp.fileno())
792 return os.fstat(fp.fileno())
793 except AttributeError:
793 except AttributeError:
794 return os.stat(fp.name)
794 return os.stat(fp.name)
795
795
796 posixfile = file
796 posixfile = file
797
797
798 def openhardlinks():
798 def openhardlinks():
799 '''return true if it is safe to hold open file handles to hardlinks'''
799 '''return true if it is safe to hold open file handles to hardlinks'''
800 return True
800 return True
801
801
802 getuser_fallback = None
802 getuser_fallback = None
803
803
804 def getuser():
804 def getuser():
805 '''return name of current user'''
805 '''return name of current user'''
806 try:
806 try:
807 return getpass.getuser()
807 return getpass.getuser()
808 except ImportError:
808 except ImportError:
809 # import of pwd will fail on windows - try fallback
809 # import of pwd will fail on windows - try fallback
810 if getuser_fallback:
810 if getuser_fallback:
811 return getuser_fallback()
811 return getuser_fallback()
812 # raised if win32api not available
812 # raised if win32api not available
813 raise Abort(_('user name not available - set USERNAME '
813 raise Abort(_('user name not available - set USERNAME '
814 'environment variable'))
814 'environment variable'))
815
815
816 def username(uid=None):
816 def username(uid=None):
817 """Return the name of the user with the given uid.
817 """Return the name of the user with the given uid.
818
818
819 If uid is None, return the name of the current user."""
819 If uid is None, return the name of the current user."""
820 try:
820 try:
821 import pwd
821 import pwd
822 if uid is None:
822 if uid is None:
823 uid = os.getuid()
823 uid = os.getuid()
824 try:
824 try:
825 return pwd.getpwuid(uid)[0]
825 return pwd.getpwuid(uid)[0]
826 except KeyError:
826 except KeyError:
827 return str(uid)
827 return str(uid)
828 except ImportError:
828 except ImportError:
829 return None
829 return None
830
830
831 def groupname(gid=None):
831 def groupname(gid=None):
832 """Return the name of the group with the given gid.
832 """Return the name of the group with the given gid.
833
833
834 If gid is None, return the name of the current group."""
834 If gid is None, return the name of the current group."""
835 try:
835 try:
836 import grp
836 import grp
837 if gid is None:
837 if gid is None:
838 gid = os.getgid()
838 gid = os.getgid()
839 try:
839 try:
840 return grp.getgrgid(gid)[0]
840 return grp.getgrgid(gid)[0]
841 except KeyError:
841 except KeyError:
842 return str(gid)
842 return str(gid)
843 except ImportError:
843 except ImportError:
844 return None
844 return None
845
845
846 # File system features
846 # File system features
847
847
848 def checkcase(path):
848 def checkcase(path):
849 """
849 """
850 Check whether the given path is on a case-sensitive filesystem
850 Check whether the given path is on a case-sensitive filesystem
851
851
852 Requires a path (like /foo/.hg) ending with a foldable final
852 Requires a path (like /foo/.hg) ending with a foldable final
853 directory component.
853 directory component.
854 """
854 """
855 s1 = os.stat(path)
855 s1 = os.stat(path)
856 d, b = os.path.split(path)
856 d, b = os.path.split(path)
857 p2 = os.path.join(d, b.upper())
857 p2 = os.path.join(d, b.upper())
858 if path == p2:
858 if path == p2:
859 p2 = os.path.join(d, b.lower())
859 p2 = os.path.join(d, b.lower())
860 try:
860 try:
861 s2 = os.stat(p2)
861 s2 = os.stat(p2)
862 if s2 == s1:
862 if s2 == s1:
863 return False
863 return False
864 return True
864 return True
865 except:
865 except:
866 return True
866 return True
867
867
868 _fspathcache = {}
868 _fspathcache = {}
869 def fspath(name, root):
869 def fspath(name, root):
870 '''Get name in the case stored in the filesystem
870 '''Get name in the case stored in the filesystem
871
871
872 The name is either relative to root, or it is an absolute path starting
872 The name is either relative to root, or it is an absolute path starting
873 with root. Note that this function is unnecessary, and should not be
873 with root. Note that this function is unnecessary, and should not be
874 called, for case-sensitive filesystems (simply because it's expensive).
874 called, for case-sensitive filesystems (simply because it's expensive).
875 '''
875 '''
876 # If name is absolute, make it relative
876 # If name is absolute, make it relative
877 if name.lower().startswith(root.lower()):
877 if name.lower().startswith(root.lower()):
878 l = len(root)
878 l = len(root)
879 if name[l] == os.sep or name[l] == os.altsep:
879 if name[l] == os.sep or name[l] == os.altsep:
880 l = l + 1
880 l = l + 1
881 name = name[l:]
881 name = name[l:]
882
882
883 if not os.path.exists(os.path.join(root, name)):
883 if not os.path.exists(os.path.join(root, name)):
884 return None
884 return None
885
885
886 seps = os.sep
886 seps = os.sep
887 if os.altsep:
887 if os.altsep:
888 seps = seps + os.altsep
888 seps = seps + os.altsep
889 # Protect backslashes. This gets silly very quickly.
889 # Protect backslashes. This gets silly very quickly.
890 seps.replace('\\','\\\\')
890 seps.replace('\\','\\\\')
891 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
891 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
892 dir = os.path.normcase(os.path.normpath(root))
892 dir = os.path.normcase(os.path.normpath(root))
893 result = []
893 result = []
894 for part, sep in pattern.findall(name):
894 for part, sep in pattern.findall(name):
895 if sep:
895 if sep:
896 result.append(sep)
896 result.append(sep)
897 continue
897 continue
898
898
899 if dir not in _fspathcache:
899 if dir not in _fspathcache:
900 _fspathcache[dir] = os.listdir(dir)
900 _fspathcache[dir] = os.listdir(dir)
901 contents = _fspathcache[dir]
901 contents = _fspathcache[dir]
902
902
903 lpart = part.lower()
903 lpart = part.lower()
904 for n in contents:
904 for n in contents:
905 if n.lower() == lpart:
905 if n.lower() == lpart:
906 result.append(n)
906 result.append(n)
907 break
907 break
908 else:
908 else:
909 # Cannot happen, as the file exists!
909 # Cannot happen, as the file exists!
910 result.append(part)
910 result.append(part)
911 dir = os.path.join(dir, lpart)
911 dir = os.path.join(dir, lpart)
912
912
913 return ''.join(result)
913 return ''.join(result)
914
914
915 def checkexec(path):
915 def checkexec(path):
916 """
916 """
917 Check whether the given path is on a filesystem with UNIX-like exec flags
917 Check whether the given path is on a filesystem with UNIX-like exec flags
918
918
919 Requires a directory (like /foo/.hg)
919 Requires a directory (like /foo/.hg)
920 """
920 """
921
921
922 # VFAT on some Linux versions can flip mode but it doesn't persist
922 # VFAT on some Linux versions can flip mode but it doesn't persist
923 # a FS remount. Frequently we can detect it if files are created
923 # a FS remount. Frequently we can detect it if files are created
924 # with exec bit on.
924 # with exec bit on.
925
925
926 try:
926 try:
927 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
927 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
928 fh, fn = tempfile.mkstemp("", "", path)
928 fh, fn = tempfile.mkstemp("", "", path)
929 try:
929 try:
930 os.close(fh)
930 os.close(fh)
931 m = os.stat(fn).st_mode & 0777
931 m = os.stat(fn).st_mode & 0777
932 new_file_has_exec = m & EXECFLAGS
932 new_file_has_exec = m & EXECFLAGS
933 os.chmod(fn, m ^ EXECFLAGS)
933 os.chmod(fn, m ^ EXECFLAGS)
934 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
934 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
935 finally:
935 finally:
936 os.unlink(fn)
936 os.unlink(fn)
937 except (IOError, OSError):
937 except (IOError, OSError):
938 # we don't care, the user probably won't be able to commit anyway
938 # we don't care, the user probably won't be able to commit anyway
939 return False
939 return False
940 return not (new_file_has_exec or exec_flags_cannot_flip)
940 return not (new_file_has_exec or exec_flags_cannot_flip)
941
941
942 def checklink(path):
942 def checklink(path):
943 """check whether the given path is on a symlink-capable filesystem"""
943 """check whether the given path is on a symlink-capable filesystem"""
944 # mktemp is not racy because symlink creation will fail if the
944 # mktemp is not racy because symlink creation will fail if the
945 # file already exists
945 # file already exists
946 name = tempfile.mktemp(dir=path)
946 name = tempfile.mktemp(dir=path)
947 try:
947 try:
948 os.symlink(".", name)
948 os.symlink(".", name)
949 os.unlink(name)
949 os.unlink(name)
950 return True
950 return True
951 except (OSError, AttributeError):
951 except (OSError, AttributeError):
952 return False
952 return False
953
953
954 _umask = os.umask(0)
954 _umask = os.umask(0)
955 os.umask(_umask)
955 os.umask(_umask)
956
956
957 def needbinarypatch():
957 def needbinarypatch():
958 """return True if patches should be applied in binary mode by default."""
958 """return True if patches should be applied in binary mode by default."""
959 return os.name == 'nt'
959 return os.name == 'nt'
960
960
961 def endswithsep(path):
961 def endswithsep(path):
962 '''Check path ends with os.sep or os.altsep.'''
962 '''Check path ends with os.sep or os.altsep.'''
963 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
963 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
964
964
965 def splitpath(path):
965 def splitpath(path):
966 '''Split path by os.sep.
966 '''Split path by os.sep.
967 Note that this function does not use os.altsep because this is
967 Note that this function does not use os.altsep because this is
968 an alternative of simple "xxx.split(os.sep)".
968 an alternative of simple "xxx.split(os.sep)".
969 It is recommended to use os.path.normpath() before using this
969 It is recommended to use os.path.normpath() before using this
970 function if need.'''
970 function if need.'''
971 return path.split(os.sep)
971 return path.split(os.sep)
972
972
973 def gui():
973 def gui():
974 '''Are we running in a GUI?'''
974 '''Are we running in a GUI?'''
975 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
975 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
976
976
977 def lookup_reg(key, name=None, scope=None):
977 def lookup_reg(key, name=None, scope=None):
978 return None
978 return None
979
979
980 # Platform specific variants
980 # Platform specific variants
981 if os.name == 'nt':
981 if os.name == 'nt':
982 import msvcrt
982 import msvcrt
983 nulldev = 'NUL:'
983 nulldev = 'NUL:'
984
984
985 class winstdout:
985 class winstdout:
986 '''stdout on windows misbehaves if sent through a pipe'''
986 '''stdout on windows misbehaves if sent through a pipe'''
987
987
988 def __init__(self, fp):
988 def __init__(self, fp):
989 self.fp = fp
989 self.fp = fp
990
990
991 def __getattr__(self, key):
991 def __getattr__(self, key):
992 return getattr(self.fp, key)
992 return getattr(self.fp, key)
993
993
994 def close(self):
994 def close(self):
995 try:
995 try:
996 self.fp.close()
996 self.fp.close()
997 except: pass
997 except: pass
998
998
999 def write(self, s):
999 def write(self, s):
1000 try:
1000 try:
1001 # This is workaround for "Not enough space" error on
1001 # This is workaround for "Not enough space" error on
1002 # writing large size of data to console.
1002 # writing large size of data to console.
1003 limit = 16000
1003 limit = 16000
1004 l = len(s)
1004 l = len(s)
1005 start = 0
1005 start = 0
1006 while start < l:
1006 while start < l:
1007 end = start + limit
1007 end = start + limit
1008 self.fp.write(s[start:end])
1008 self.fp.write(s[start:end])
1009 start = end
1009 start = end
1010 except IOError, inst:
1010 except IOError, inst:
1011 if inst.errno != 0: raise
1011 if inst.errno != 0: raise
1012 self.close()
1012 self.close()
1013 raise IOError(errno.EPIPE, 'Broken pipe')
1013 raise IOError(errno.EPIPE, 'Broken pipe')
1014
1014
1015 def flush(self):
1015 def flush(self):
1016 try:
1016 try:
1017 return self.fp.flush()
1017 return self.fp.flush()
1018 except IOError, inst:
1018 except IOError, inst:
1019 if inst.errno != errno.EINVAL: raise
1019 if inst.errno != errno.EINVAL: raise
1020 self.close()
1020 self.close()
1021 raise IOError(errno.EPIPE, 'Broken pipe')
1021 raise IOError(errno.EPIPE, 'Broken pipe')
1022
1022
1023 sys.stdout = winstdout(sys.stdout)
1023 sys.stdout = winstdout(sys.stdout)
1024
1024
1025 def _is_win_9x():
1025 def _is_win_9x():
1026 '''return true if run on windows 95, 98 or me.'''
1026 '''return true if run on windows 95, 98 or me.'''
1027 try:
1027 try:
1028 return sys.getwindowsversion()[3] == 1
1028 return sys.getwindowsversion()[3] == 1
1029 except AttributeError:
1029 except AttributeError:
1030 return 'command' in os.environ.get('comspec', '')
1030 return 'command' in os.environ.get('comspec', '')
1031
1031
1032 def openhardlinks():
1032 def openhardlinks():
1033 return not _is_win_9x and "win32api" in locals()
1033 return not _is_win_9x and "win32api" in locals()
1034
1034
1035 def system_rcpath():
1035 def system_rcpath():
1036 try:
1036 try:
1037 return system_rcpath_win32()
1037 return system_rcpath_win32()
1038 except:
1038 except:
1039 return [r'c:\mercurial\mercurial.ini']
1039 return [r'c:\mercurial\mercurial.ini']
1040
1040
1041 def user_rcpath():
1041 def user_rcpath():
1042 '''return os-specific hgrc search path to the user dir'''
1042 '''return os-specific hgrc search path to the user dir'''
1043 try:
1043 try:
1044 path = user_rcpath_win32()
1044 path = user_rcpath_win32()
1045 except:
1045 except:
1046 home = os.path.expanduser('~')
1046 home = os.path.expanduser('~')
1047 path = [os.path.join(home, 'mercurial.ini'),
1047 path = [os.path.join(home, 'mercurial.ini'),
1048 os.path.join(home, '.hgrc')]
1048 os.path.join(home, '.hgrc')]
1049 userprofile = os.environ.get('USERPROFILE')
1049 userprofile = os.environ.get('USERPROFILE')
1050 if userprofile:
1050 if userprofile:
1051 path.append(os.path.join(userprofile, 'mercurial.ini'))
1051 path.append(os.path.join(userprofile, 'mercurial.ini'))
1052 path.append(os.path.join(userprofile, '.hgrc'))
1052 path.append(os.path.join(userprofile, '.hgrc'))
1053 return path
1053 return path
1054
1054
1055 def parse_patch_output(output_line):
1055 def parse_patch_output(output_line):
1056 """parses the output produced by patch and returns the file name"""
1056 """parses the output produced by patch and returns the file name"""
1057 pf = output_line[14:]
1057 pf = output_line[14:]
1058 if pf[0] == '`':
1058 if pf[0] == '`':
1059 pf = pf[1:-1] # Remove the quotes
1059 pf = pf[1:-1] # Remove the quotes
1060 return pf
1060 return pf
1061
1061
1062 def sshargs(sshcmd, host, user, port):
1062 def sshargs(sshcmd, host, user, port):
1063 '''Build argument list for ssh or Plink'''
1063 '''Build argument list for ssh or Plink'''
1064 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
1064 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
1065 args = user and ("%s@%s" % (user, host)) or host
1065 args = user and ("%s@%s" % (user, host)) or host
1066 return port and ("%s %s %s" % (args, pflag, port)) or args
1066 return port and ("%s %s %s" % (args, pflag, port)) or args
1067
1067
1068 def testpid(pid):
1068 def testpid(pid):
1069 '''return False if pid dead, True if running or not known'''
1069 '''return False if pid dead, True if running or not known'''
1070 return True
1070 return True
1071
1071
1072 def set_flags(f, flags):
1072 def set_flags(f, flags):
1073 pass
1073 pass
1074
1074
1075 def set_binary(fd):
1075 def set_binary(fd):
1076 # When run without console, pipes may expose invalid
1076 # When run without console, pipes may expose invalid
1077 # fileno(), usually set to -1.
1077 # fileno(), usually set to -1.
1078 if hasattr(fd, 'fileno') and fd.fileno() >= 0:
1078 if hasattr(fd, 'fileno') and fd.fileno() >= 0:
1079 msvcrt.setmode(fd.fileno(), os.O_BINARY)
1079 msvcrt.setmode(fd.fileno(), os.O_BINARY)
1080
1080
1081 def pconvert(path):
1081 def pconvert(path):
1082 return '/'.join(splitpath(path))
1082 return '/'.join(splitpath(path))
1083
1083
1084 def localpath(path):
1084 def localpath(path):
1085 return path.replace('/', '\\')
1085 return path.replace('/', '\\')
1086
1086
1087 def normpath(path):
1087 def normpath(path):
1088 return pconvert(os.path.normpath(path))
1088 return pconvert(os.path.normpath(path))
1089
1089
1090 makelock = _makelock_file
1090 makelock = _makelock_file
1091 readlock = _readlock_file
1091 readlock = _readlock_file
1092
1092
1093 def samestat(s1, s2):
1093 def samestat(s1, s2):
1094 return False
1094 return False
1095
1095
1096 # A sequence of backslashes is special iff it precedes a double quote:
1096 # A sequence of backslashes is special iff it precedes a double quote:
1097 # - if there's an even number of backslashes, the double quote is not
1097 # - if there's an even number of backslashes, the double quote is not
1098 # quoted (i.e. it ends the quoted region)
1098 # quoted (i.e. it ends the quoted region)
1099 # - if there's an odd number of backslashes, the double quote is quoted
1099 # - if there's an odd number of backslashes, the double quote is quoted
1100 # - in both cases, every pair of backslashes is unquoted into a single
1100 # - in both cases, every pair of backslashes is unquoted into a single
1101 # backslash
1101 # backslash
1102 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
1102 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
1103 # So, to quote a string, we must surround it in double quotes, double
1103 # So, to quote a string, we must surround it in double quotes, double
1104 # the number of backslashes that preceed double quotes and add another
1104 # the number of backslashes that preceed double quotes and add another
1105 # backslash before every double quote (being careful with the double
1105 # backslash before every double quote (being careful with the double
1106 # quote we've appended to the end)
1106 # quote we've appended to the end)
1107 _quotere = None
1107 _quotere = None
1108 def shellquote(s):
1108 def shellquote(s):
1109 global _quotere
1109 global _quotere
1110 if _quotere is None:
1110 if _quotere is None:
1111 _quotere = re.compile(r'(\\*)("|\\$)')
1111 _quotere = re.compile(r'(\\*)("|\\$)')
1112 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
1112 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
1113
1113
1114 def quotecommand(cmd):
1114 def quotecommand(cmd):
1115 """Build a command string suitable for os.popen* calls."""
1115 """Build a command string suitable for os.popen* calls."""
1116 # The extra quotes are needed because popen* runs the command
1116 # The extra quotes are needed because popen* runs the command
1117 # through the current COMSPEC. cmd.exe suppress enclosing quotes.
1117 # through the current COMSPEC. cmd.exe suppress enclosing quotes.
1118 return '"' + cmd + '"'
1118 return '"' + cmd + '"'
1119
1119
1120 def popen(command, mode='r'):
1120 def popen(command, mode='r'):
1121 # Work around "popen spawned process may not write to stdout
1121 # Work around "popen spawned process may not write to stdout
1122 # under windows"
1122 # under windows"
1123 # http://bugs.python.org/issue1366
1123 # http://bugs.python.org/issue1366
1124 command += " 2> %s" % nulldev
1124 command += " 2> %s" % nulldev
1125 return os.popen(quotecommand(command), mode)
1125 return os.popen(quotecommand(command), mode)
1126
1126
1127 def explain_exit(code):
1127 def explain_exit(code):
1128 return _("exited with status %d") % code, code
1128 return _("exited with status %d") % code, code
1129
1129
1130 # if you change this stub into a real check, please try to implement the
1130 # if you change this stub into a real check, please try to implement the
1131 # username and groupname functions above, too.
1131 # username and groupname functions above, too.
1132 def isowner(fp, st=None):
1132 def isowner(fp, st=None):
1133 return True
1133 return True
1134
1134
1135 def find_in_path(name, path, default=None):
1135 def find_in_path(name, path, default=None):
1136 '''find name in search path. path can be string (will be split
1136 '''find name in search path. path can be string (will be split
1137 with os.pathsep), or iterable thing that returns strings. if name
1137 with os.pathsep), or iterable thing that returns strings. if name
1138 found, return path to name. else return default. name is looked up
1138 found, return path to name. else return default. name is looked up
1139 using cmd.exe rules, using PATHEXT.'''
1139 using cmd.exe rules, using PATHEXT.'''
1140 if isinstance(path, str):
1140 if isinstance(path, str):
1141 path = path.split(os.pathsep)
1141 path = path.split(os.pathsep)
1142
1142
1143 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
1143 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
1144 pathext = pathext.lower().split(os.pathsep)
1144 pathext = pathext.lower().split(os.pathsep)
1145 isexec = os.path.splitext(name)[1].lower() in pathext
1145 isexec = os.path.splitext(name)[1].lower() in pathext
1146
1146
1147 for p in path:
1147 for p in path:
1148 p_name = os.path.join(p, name)
1148 p_name = os.path.join(p, name)
1149
1149
1150 if isexec and os.path.exists(p_name):
1150 if isexec and os.path.exists(p_name):
1151 return p_name
1151 return p_name
1152
1152
1153 for ext in pathext:
1153 for ext in pathext:
1154 p_name_ext = p_name + ext
1154 p_name_ext = p_name + ext
1155 if os.path.exists(p_name_ext):
1155 if os.path.exists(p_name_ext):
1156 return p_name_ext
1156 return p_name_ext
1157 return default
1157 return default
1158
1158
1159 def set_signal_handler():
1159 def set_signal_handler():
1160 try:
1160 try:
1161 set_signal_handler_win32()
1161 set_signal_handler_win32()
1162 except NameError:
1162 except NameError:
1163 pass
1163 pass
1164
1164
1165 try:
1165 try:
1166 # override functions with win32 versions if possible
1166 # override functions with win32 versions if possible
1167 from util_win32 import *
1167 from util_win32 import *
1168 if not _is_win_9x():
1168 if not _is_win_9x():
1169 posixfile = posixfile_nt
1169 posixfile = posixfile_nt
1170 except ImportError:
1170 except ImportError:
1171 pass
1171 pass
1172
1172
1173 else:
1173 else:
1174 nulldev = '/dev/null'
1174 nulldev = '/dev/null'
1175
1175
1176 def rcfiles(path):
1176 def rcfiles(path):
1177 rcs = [os.path.join(path, 'hgrc')]
1177 rcs = [os.path.join(path, 'hgrc')]
1178 rcdir = os.path.join(path, 'hgrc.d')
1178 rcdir = os.path.join(path, 'hgrc.d')
1179 try:
1179 try:
1180 rcs.extend([os.path.join(rcdir, f)
1180 rcs.extend([os.path.join(rcdir, f)
1181 for f, kind in osutil.listdir(rcdir)
1181 for f, kind in osutil.listdir(rcdir)
1182 if f.endswith(".rc")])
1182 if f.endswith(".rc")])
1183 except OSError:
1183 except OSError:
1184 pass
1184 pass
1185 return rcs
1185 return rcs
1186
1186
1187 def system_rcpath():
1187 def system_rcpath():
1188 path = []
1188 path = []
1189 # old mod_python does not set sys.argv
1189 # old mod_python does not set sys.argv
1190 if len(getattr(sys, 'argv', [])) > 0:
1190 if len(getattr(sys, 'argv', [])) > 0:
1191 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
1191 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
1192 '/../etc/mercurial'))
1192 '/../etc/mercurial'))
1193 path.extend(rcfiles('/etc/mercurial'))
1193 path.extend(rcfiles('/etc/mercurial'))
1194 return path
1194 return path
1195
1195
1196 def user_rcpath():
1196 def user_rcpath():
1197 return [os.path.expanduser('~/.hgrc')]
1197 return [os.path.expanduser('~/.hgrc')]
1198
1198
1199 def parse_patch_output(output_line):
1199 def parse_patch_output(output_line):
1200 """parses the output produced by patch and returns the file name"""
1200 """parses the output produced by patch and returns the file name"""
1201 pf = output_line[14:]
1201 pf = output_line[14:]
1202 if os.sys.platform == 'OpenVMS':
1202 if os.sys.platform == 'OpenVMS':
1203 if pf[0] == '`':
1203 if pf[0] == '`':
1204 pf = pf[1:-1] # Remove the quotes
1204 pf = pf[1:-1] # Remove the quotes
1205 else:
1205 else:
1206 if pf.startswith("'") and pf.endswith("'") and " " in pf:
1206 if pf.startswith("'") and pf.endswith("'") and " " in pf:
1207 pf = pf[1:-1] # Remove the quotes
1207 pf = pf[1:-1] # Remove the quotes
1208 return pf
1208 return pf
1209
1209
1210 def sshargs(sshcmd, host, user, port):
1210 def sshargs(sshcmd, host, user, port):
1211 '''Build argument list for ssh'''
1211 '''Build argument list for ssh'''
1212 args = user and ("%s@%s" % (user, host)) or host
1212 args = user and ("%s@%s" % (user, host)) or host
1213 return port and ("%s -p %s" % (args, port)) or args
1213 return port and ("%s -p %s" % (args, port)) or args
1214
1214
1215 def is_exec(f):
1215 def is_exec(f):
1216 """check whether a file is executable"""
1216 """check whether a file is executable"""
1217 return (os.lstat(f).st_mode & 0100 != 0)
1217 return (os.lstat(f).st_mode & 0100 != 0)
1218
1218
1219 def set_flags(f, flags):
1219 def set_flags(f, flags):
1220 s = os.lstat(f).st_mode
1220 s = os.lstat(f).st_mode
1221 x = "x" in flags
1221 x = "x" in flags
1222 l = "l" in flags
1222 l = "l" in flags
1223 if l:
1223 if l:
1224 if not stat.S_ISLNK(s):
1224 if not stat.S_ISLNK(s):
1225 # switch file to link
1225 # switch file to link
1226 data = file(f).read()
1226 data = file(f).read()
1227 os.unlink(f)
1227 os.unlink(f)
1228 os.symlink(data, f)
1228 os.symlink(data, f)
1229 # no chmod needed at this point
1229 # no chmod needed at this point
1230 return
1230 return
1231 if stat.S_ISLNK(s):
1231 if stat.S_ISLNK(s):
1232 # switch link to file
1232 # switch link to file
1233 data = os.readlink(f)
1233 data = os.readlink(f)
1234 os.unlink(f)
1234 os.unlink(f)
1235 file(f, "w").write(data)
1235 file(f, "w").write(data)
1236 s = 0666 & ~_umask # avoid restatting for chmod
1236 s = 0666 & ~_umask # avoid restatting for chmod
1237
1237
1238 sx = s & 0100
1238 sx = s & 0100
1239 if x and not sx:
1239 if x and not sx:
1240 # Turn on +x for every +r bit when making a file executable
1240 # Turn on +x for every +r bit when making a file executable
1241 # and obey umask.
1241 # and obey umask.
1242 os.chmod(f, s | (s & 0444) >> 2 & ~_umask)
1242 os.chmod(f, s | (s & 0444) >> 2 & ~_umask)
1243 elif not x and sx:
1243 elif not x and sx:
1244 # Turn off all +x bits
1244 # Turn off all +x bits
1245 os.chmod(f, s & 0666)
1245 os.chmod(f, s & 0666)
1246
1246
1247 def set_binary(fd):
1247 def set_binary(fd):
1248 pass
1248 pass
1249
1249
1250 def pconvert(path):
1250 def pconvert(path):
1251 return path
1251 return path
1252
1252
1253 def localpath(path):
1253 def localpath(path):
1254 return path
1254 return path
1255
1255
1256 normpath = os.path.normpath
1256 normpath = os.path.normpath
1257 samestat = os.path.samestat
1257 samestat = os.path.samestat
1258
1258
1259 def makelock(info, pathname):
1259 def makelock(info, pathname):
1260 try:
1260 try:
1261 os.symlink(info, pathname)
1261 os.symlink(info, pathname)
1262 except OSError, why:
1262 except OSError, why:
1263 if why.errno == errno.EEXIST:
1263 if why.errno == errno.EEXIST:
1264 raise
1264 raise
1265 else:
1265 else:
1266 _makelock_file(info, pathname)
1266 _makelock_file(info, pathname)
1267
1267
1268 def readlock(pathname):
1268 def readlock(pathname):
1269 try:
1269 try:
1270 return os.readlink(pathname)
1270 return os.readlink(pathname)
1271 except OSError, why:
1271 except OSError, why:
1272 if why.errno in (errno.EINVAL, errno.ENOSYS):
1272 if why.errno in (errno.EINVAL, errno.ENOSYS):
1273 return _readlock_file(pathname)
1273 return _readlock_file(pathname)
1274 else:
1274 else:
1275 raise
1275 raise
1276
1276
1277 def shellquote(s):
1277 def shellquote(s):
1278 if os.sys.platform == 'OpenVMS':
1278 if os.sys.platform == 'OpenVMS':
1279 return '"%s"' % s
1279 return '"%s"' % s
1280 else:
1280 else:
1281 return "'%s'" % s.replace("'", "'\\''")
1281 return "'%s'" % s.replace("'", "'\\''")
1282
1282
1283 def quotecommand(cmd):
1283 def quotecommand(cmd):
1284 return cmd
1284 return cmd
1285
1285
1286 def popen(command, mode='r'):
1286 def popen(command, mode='r'):
1287 return os.popen(command, mode)
1287 return os.popen(command, mode)
1288
1288
1289 def testpid(pid):
1289 def testpid(pid):
1290 '''return False if pid dead, True if running or not sure'''
1290 '''return False if pid dead, True if running or not sure'''
1291 if os.sys.platform == 'OpenVMS':
1291 if os.sys.platform == 'OpenVMS':
1292 return True
1292 return True
1293 try:
1293 try:
1294 os.kill(pid, 0)
1294 os.kill(pid, 0)
1295 return True
1295 return True
1296 except OSError, inst:
1296 except OSError, inst:
1297 return inst.errno != errno.ESRCH
1297 return inst.errno != errno.ESRCH
1298
1298
1299 def explain_exit(code):
1299 def explain_exit(code):
1300 """return a 2-tuple (desc, code) describing a process's status"""
1300 """return a 2-tuple (desc, code) describing a process's status"""
1301 if os.WIFEXITED(code):
1301 if os.WIFEXITED(code):
1302 val = os.WEXITSTATUS(code)
1302 val = os.WEXITSTATUS(code)
1303 return _("exited with status %d") % val, val
1303 return _("exited with status %d") % val, val
1304 elif os.WIFSIGNALED(code):
1304 elif os.WIFSIGNALED(code):
1305 val = os.WTERMSIG(code)
1305 val = os.WTERMSIG(code)
1306 return _("killed by signal %d") % val, val
1306 return _("killed by signal %d") % val, val
1307 elif os.WIFSTOPPED(code):
1307 elif os.WIFSTOPPED(code):
1308 val = os.WSTOPSIG(code)
1308 val = os.WSTOPSIG(code)
1309 return _("stopped by signal %d") % val, val
1309 return _("stopped by signal %d") % val, val
1310 raise ValueError(_("invalid exit code"))
1310 raise ValueError(_("invalid exit code"))
1311
1311
1312 def isowner(fp, st=None):
1312 def isowner(fp, st=None):
1313 """Return True if the file object f belongs to the current user.
1313 """Return True if the file object f belongs to the current user.
1314
1314
1315 The return value of a util.fstat(f) may be passed as the st argument.
1315 The return value of a util.fstat(f) may be passed as the st argument.
1316 """
1316 """
1317 if st is None:
1317 if st is None:
1318 st = fstat(fp)
1318 st = fstat(fp)
1319 return st.st_uid == os.getuid()
1319 return st.st_uid == os.getuid()
1320
1320
1321 def find_in_path(name, path, default=None):
1321 def find_in_path(name, path, default=None):
1322 '''find name in search path. path can be string (will be split
1322 '''find name in search path. path can be string (will be split
1323 with os.pathsep), or iterable thing that returns strings. if name
1323 with os.pathsep), or iterable thing that returns strings. if name
1324 found, return path to name. else return default.'''
1324 found, return path to name. else return default.'''
1325 if isinstance(path, str):
1325 if isinstance(path, str):
1326 path = path.split(os.pathsep)
1326 path = path.split(os.pathsep)
1327 for p in path:
1327 for p in path:
1328 p_name = os.path.join(p, name)
1328 p_name = os.path.join(p, name)
1329 if os.path.exists(p_name):
1329 if os.path.exists(p_name):
1330 return p_name
1330 return p_name
1331 return default
1331 return default
1332
1332
1333 def set_signal_handler():
1333 def set_signal_handler():
1334 pass
1334 pass
1335
1335
1336 def find_exe(name, default=None):
1336 def find_exe(name, default=None):
1337 '''find path of an executable.
1337 '''find path of an executable.
1338 if name contains a path component, return it as is. otherwise,
1338 if name contains a path component, return it as is. otherwise,
1339 use normal executable search path.'''
1339 use normal executable search path.'''
1340
1340
1341 if os.sep in name or sys.platform == 'OpenVMS':
1341 if os.sep in name or sys.platform == 'OpenVMS':
1342 # don't check the executable bit. if the file isn't
1342 # don't check the executable bit. if the file isn't
1343 # executable, whoever tries to actually run it will give a
1343 # executable, whoever tries to actually run it will give a
1344 # much more useful error message.
1344 # much more useful error message.
1345 return name
1345 return name
1346 return find_in_path(name, os.environ.get('PATH', ''), default=default)
1346 return find_in_path(name, os.environ.get('PATH', ''), default=default)
1347
1347
1348 def _buildencodefun():
1349 e = '_'
1350 win_reserved = [ord(x) for x in '\\:*?"<>|']
1351 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
1352 for x in (range(32) + range(126, 256) + win_reserved):
1353 cmap[chr(x)] = "~%02x" % x
1354 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
1355 cmap[chr(x)] = e + chr(x).lower()
1356 dmap = {}
1357 for k, v in cmap.iteritems():
1358 dmap[v] = k
1359 def decode(s):
1360 i = 0
1361 while i < len(s):
1362 for l in xrange(1, 4):
1363 try:
1364 yield dmap[s[i:i+l]]
1365 i += l
1366 break
1367 except KeyError:
1368 pass
1369 else:
1370 raise KeyError
1371 return (lambda s: "".join([cmap[c] for c in s]),
1372 lambda s: "".join(list(decode(s))))
1373
1374 encodefilename, decodefilename = _buildencodefun()
1375
1376 def encodedopener(openerfn, fn):
1377 def o(path, *args, **kw):
1378 return openerfn(fn(path), *args, **kw)
1379 return o
1380
1381 def mktempcopy(name, emptyok=False, createmode=None):
1348 def mktempcopy(name, emptyok=False, createmode=None):
1382 """Create a temporary file with the same contents from name
1349 """Create a temporary file with the same contents from name
1383
1350
1384 The permission bits are copied from the original file.
1351 The permission bits are copied from the original file.
1385
1352
1386 If the temporary file is going to be truncated immediately, you
1353 If the temporary file is going to be truncated immediately, you
1387 can use emptyok=True as an optimization.
1354 can use emptyok=True as an optimization.
1388
1355
1389 Returns the name of the temporary file.
1356 Returns the name of the temporary file.
1390 """
1357 """
1391 d, fn = os.path.split(name)
1358 d, fn = os.path.split(name)
1392 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1359 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1393 os.close(fd)
1360 os.close(fd)
1394 # Temporary files are created with mode 0600, which is usually not
1361 # Temporary files are created with mode 0600, which is usually not
1395 # what we want. If the original file already exists, just copy
1362 # what we want. If the original file already exists, just copy
1396 # its mode. Otherwise, manually obey umask.
1363 # its mode. Otherwise, manually obey umask.
1397 try:
1364 try:
1398 st_mode = os.lstat(name).st_mode & 0777
1365 st_mode = os.lstat(name).st_mode & 0777
1399 except OSError, inst:
1366 except OSError, inst:
1400 if inst.errno != errno.ENOENT:
1367 if inst.errno != errno.ENOENT:
1401 raise
1368 raise
1402 st_mode = createmode
1369 st_mode = createmode
1403 if st_mode is None:
1370 if st_mode is None:
1404 st_mode = ~_umask
1371 st_mode = ~_umask
1405 st_mode &= 0666
1372 st_mode &= 0666
1406 os.chmod(temp, st_mode)
1373 os.chmod(temp, st_mode)
1407 if emptyok:
1374 if emptyok:
1408 return temp
1375 return temp
1409 try:
1376 try:
1410 try:
1377 try:
1411 ifp = posixfile(name, "rb")
1378 ifp = posixfile(name, "rb")
1412 except IOError, inst:
1379 except IOError, inst:
1413 if inst.errno == errno.ENOENT:
1380 if inst.errno == errno.ENOENT:
1414 return temp
1381 return temp
1415 if not getattr(inst, 'filename', None):
1382 if not getattr(inst, 'filename', None):
1416 inst.filename = name
1383 inst.filename = name
1417 raise
1384 raise
1418 ofp = posixfile(temp, "wb")
1385 ofp = posixfile(temp, "wb")
1419 for chunk in filechunkiter(ifp):
1386 for chunk in filechunkiter(ifp):
1420 ofp.write(chunk)
1387 ofp.write(chunk)
1421 ifp.close()
1388 ifp.close()
1422 ofp.close()
1389 ofp.close()
1423 except:
1390 except:
1424 try: os.unlink(temp)
1391 try: os.unlink(temp)
1425 except: pass
1392 except: pass
1426 raise
1393 raise
1427 return temp
1394 return temp
1428
1395
1429 class atomictempfile(posixfile):
1396 class atomictempfile(posixfile):
1430 """file-like object that atomically updates a file
1397 """file-like object that atomically updates a file
1431
1398
1432 All writes will be redirected to a temporary copy of the original
1399 All writes will be redirected to a temporary copy of the original
1433 file. When rename is called, the copy is renamed to the original
1400 file. When rename is called, the copy is renamed to the original
1434 name, making the changes visible.
1401 name, making the changes visible.
1435 """
1402 """
1436 def __init__(self, name, mode, createmode):
1403 def __init__(self, name, mode, createmode):
1437 self.__name = name
1404 self.__name = name
1438 self.temp = mktempcopy(name, emptyok=('w' in mode),
1405 self.temp = mktempcopy(name, emptyok=('w' in mode),
1439 createmode=createmode)
1406 createmode=createmode)
1440 posixfile.__init__(self, self.temp, mode)
1407 posixfile.__init__(self, self.temp, mode)
1441
1408
1442 def rename(self):
1409 def rename(self):
1443 if not self.closed:
1410 if not self.closed:
1444 posixfile.close(self)
1411 posixfile.close(self)
1445 rename(self.temp, localpath(self.__name))
1412 rename(self.temp, localpath(self.__name))
1446
1413
1447 def __del__(self):
1414 def __del__(self):
1448 if not self.closed:
1415 if not self.closed:
1449 try:
1416 try:
1450 os.unlink(self.temp)
1417 os.unlink(self.temp)
1451 except: pass
1418 except: pass
1452 posixfile.close(self)
1419 posixfile.close(self)
1453
1420
1454 def makedirs(name, mode=None):
1421 def makedirs(name, mode=None):
1455 """recursive directory creation with parent mode inheritance"""
1422 """recursive directory creation with parent mode inheritance"""
1456 try:
1423 try:
1457 os.mkdir(name)
1424 os.mkdir(name)
1458 if mode is not None:
1425 if mode is not None:
1459 os.chmod(name, mode)
1426 os.chmod(name, mode)
1460 return
1427 return
1461 except OSError, err:
1428 except OSError, err:
1462 if err.errno == errno.EEXIST:
1429 if err.errno == errno.EEXIST:
1463 return
1430 return
1464 if err.errno != errno.ENOENT:
1431 if err.errno != errno.ENOENT:
1465 raise
1432 raise
1466 parent = os.path.abspath(os.path.dirname(name))
1433 parent = os.path.abspath(os.path.dirname(name))
1467 makedirs(parent, mode)
1434 makedirs(parent, mode)
1468 makedirs(name, mode)
1435 makedirs(name, mode)
1469
1436
1470 class opener(object):
1437 class opener(object):
1471 """Open files relative to a base directory
1438 """Open files relative to a base directory
1472
1439
1473 This class is used to hide the details of COW semantics and
1440 This class is used to hide the details of COW semantics and
1474 remote file access from higher level code.
1441 remote file access from higher level code.
1475 """
1442 """
1476 def __init__(self, base, audit=True):
1443 def __init__(self, base, audit=True):
1477 self.base = base
1444 self.base = base
1478 if audit:
1445 if audit:
1479 self.audit_path = path_auditor(base)
1446 self.audit_path = path_auditor(base)
1480 else:
1447 else:
1481 self.audit_path = always
1448 self.audit_path = always
1482 self.createmode = None
1449 self.createmode = None
1483
1450
1484 def __getattr__(self, name):
1451 def __getattr__(self, name):
1485 if name == '_can_symlink':
1452 if name == '_can_symlink':
1486 self._can_symlink = checklink(self.base)
1453 self._can_symlink = checklink(self.base)
1487 return self._can_symlink
1454 return self._can_symlink
1488 raise AttributeError(name)
1455 raise AttributeError(name)
1489
1456
1490 def _fixfilemode(self, name):
1457 def _fixfilemode(self, name):
1491 if self.createmode is None:
1458 if self.createmode is None:
1492 return
1459 return
1493 os.chmod(name, self.createmode & 0666)
1460 os.chmod(name, self.createmode & 0666)
1494
1461
1495 def __call__(self, path, mode="r", text=False, atomictemp=False):
1462 def __call__(self, path, mode="r", text=False, atomictemp=False):
1496 self.audit_path(path)
1463 self.audit_path(path)
1497 f = os.path.join(self.base, path)
1464 f = os.path.join(self.base, path)
1498
1465
1499 if not text and "b" not in mode:
1466 if not text and "b" not in mode:
1500 mode += "b" # for that other OS
1467 mode += "b" # for that other OS
1501
1468
1502 nlink = -1
1469 nlink = -1
1503 if mode not in ("r", "rb"):
1470 if mode not in ("r", "rb"):
1504 try:
1471 try:
1505 nlink = nlinks(f)
1472 nlink = nlinks(f)
1506 except OSError:
1473 except OSError:
1507 nlink = 0
1474 nlink = 0
1508 d = os.path.dirname(f)
1475 d = os.path.dirname(f)
1509 if not os.path.isdir(d):
1476 if not os.path.isdir(d):
1510 makedirs(d, self.createmode)
1477 makedirs(d, self.createmode)
1511 if atomictemp:
1478 if atomictemp:
1512 return atomictempfile(f, mode, self.createmode)
1479 return atomictempfile(f, mode, self.createmode)
1513 if nlink > 1:
1480 if nlink > 1:
1514 rename(mktempcopy(f), f)
1481 rename(mktempcopy(f), f)
1515 fp = posixfile(f, mode)
1482 fp = posixfile(f, mode)
1516 if nlink == 0:
1483 if nlink == 0:
1517 self._fixfilemode(f)
1484 self._fixfilemode(f)
1518 return fp
1485 return fp
1519
1486
1520 def symlink(self, src, dst):
1487 def symlink(self, src, dst):
1521 self.audit_path(dst)
1488 self.audit_path(dst)
1522 linkname = os.path.join(self.base, dst)
1489 linkname = os.path.join(self.base, dst)
1523 try:
1490 try:
1524 os.unlink(linkname)
1491 os.unlink(linkname)
1525 except OSError:
1492 except OSError:
1526 pass
1493 pass
1527
1494
1528 dirname = os.path.dirname(linkname)
1495 dirname = os.path.dirname(linkname)
1529 if not os.path.exists(dirname):
1496 if not os.path.exists(dirname):
1530 makedirs(dirname, self.createmode)
1497 makedirs(dirname, self.createmode)
1531
1498
1532 if self._can_symlink:
1499 if self._can_symlink:
1533 try:
1500 try:
1534 os.symlink(src, linkname)
1501 os.symlink(src, linkname)
1535 except OSError, err:
1502 except OSError, err:
1536 raise OSError(err.errno, _('could not symlink to %r: %s') %
1503 raise OSError(err.errno, _('could not symlink to %r: %s') %
1537 (src, err.strerror), linkname)
1504 (src, err.strerror), linkname)
1538 else:
1505 else:
1539 f = self(dst, "w")
1506 f = self(dst, "w")
1540 f.write(src)
1507 f.write(src)
1541 f.close()
1508 f.close()
1542 self._fixfilemode(dst)
1509 self._fixfilemode(dst)
1543
1510
1544 class chunkbuffer(object):
1511 class chunkbuffer(object):
1545 """Allow arbitrary sized chunks of data to be efficiently read from an
1512 """Allow arbitrary sized chunks of data to be efficiently read from an
1546 iterator over chunks of arbitrary size."""
1513 iterator over chunks of arbitrary size."""
1547
1514
1548 def __init__(self, in_iter):
1515 def __init__(self, in_iter):
1549 """in_iter is the iterator that's iterating over the input chunks.
1516 """in_iter is the iterator that's iterating over the input chunks.
1550 targetsize is how big a buffer to try to maintain."""
1517 targetsize is how big a buffer to try to maintain."""
1551 self.iter = iter(in_iter)
1518 self.iter = iter(in_iter)
1552 self.buf = ''
1519 self.buf = ''
1553 self.targetsize = 2**16
1520 self.targetsize = 2**16
1554
1521
1555 def read(self, l):
1522 def read(self, l):
1556 """Read L bytes of data from the iterator of chunks of data.
1523 """Read L bytes of data from the iterator of chunks of data.
1557 Returns less than L bytes if the iterator runs dry."""
1524 Returns less than L bytes if the iterator runs dry."""
1558 if l > len(self.buf) and self.iter:
1525 if l > len(self.buf) and self.iter:
1559 # Clamp to a multiple of self.targetsize
1526 # Clamp to a multiple of self.targetsize
1560 targetsize = max(l, self.targetsize)
1527 targetsize = max(l, self.targetsize)
1561 collector = cStringIO.StringIO()
1528 collector = cStringIO.StringIO()
1562 collector.write(self.buf)
1529 collector.write(self.buf)
1563 collected = len(self.buf)
1530 collected = len(self.buf)
1564 for chunk in self.iter:
1531 for chunk in self.iter:
1565 collector.write(chunk)
1532 collector.write(chunk)
1566 collected += len(chunk)
1533 collected += len(chunk)
1567 if collected >= targetsize:
1534 if collected >= targetsize:
1568 break
1535 break
1569 if collected < targetsize:
1536 if collected < targetsize:
1570 self.iter = False
1537 self.iter = False
1571 self.buf = collector.getvalue()
1538 self.buf = collector.getvalue()
1572 if len(self.buf) == l:
1539 if len(self.buf) == l:
1573 s, self.buf = str(self.buf), ''
1540 s, self.buf = str(self.buf), ''
1574 else:
1541 else:
1575 s, self.buf = self.buf[:l], buffer(self.buf, l)
1542 s, self.buf = self.buf[:l], buffer(self.buf, l)
1576 return s
1543 return s
1577
1544
1578 def filechunkiter(f, size=65536, limit=None):
1545 def filechunkiter(f, size=65536, limit=None):
1579 """Create a generator that produces the data in the file size
1546 """Create a generator that produces the data in the file size
1580 (default 65536) bytes at a time, up to optional limit (default is
1547 (default 65536) bytes at a time, up to optional limit (default is
1581 to read all data). Chunks may be less than size bytes if the
1548 to read all data). Chunks may be less than size bytes if the
1582 chunk is the last chunk in the file, or the file is a socket or
1549 chunk is the last chunk in the file, or the file is a socket or
1583 some other type of file that sometimes reads less data than is
1550 some other type of file that sometimes reads less data than is
1584 requested."""
1551 requested."""
1585 assert size >= 0
1552 assert size >= 0
1586 assert limit is None or limit >= 0
1553 assert limit is None or limit >= 0
1587 while True:
1554 while True:
1588 if limit is None: nbytes = size
1555 if limit is None: nbytes = size
1589 else: nbytes = min(limit, size)
1556 else: nbytes = min(limit, size)
1590 s = nbytes and f.read(nbytes)
1557 s = nbytes and f.read(nbytes)
1591 if not s: break
1558 if not s: break
1592 if limit: limit -= len(s)
1559 if limit: limit -= len(s)
1593 yield s
1560 yield s
1594
1561
1595 def makedate():
1562 def makedate():
1596 lt = time.localtime()
1563 lt = time.localtime()
1597 if lt[8] == 1 and time.daylight:
1564 if lt[8] == 1 and time.daylight:
1598 tz = time.altzone
1565 tz = time.altzone
1599 else:
1566 else:
1600 tz = time.timezone
1567 tz = time.timezone
1601 return time.mktime(lt), tz
1568 return time.mktime(lt), tz
1602
1569
1603 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1570 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1604 """represent a (unixtime, offset) tuple as a localized time.
1571 """represent a (unixtime, offset) tuple as a localized time.
1605 unixtime is seconds since the epoch, and offset is the time zone's
1572 unixtime is seconds since the epoch, and offset is the time zone's
1606 number of seconds away from UTC. if timezone is false, do not
1573 number of seconds away from UTC. if timezone is false, do not
1607 append time zone to string."""
1574 append time zone to string."""
1608 t, tz = date or makedate()
1575 t, tz = date or makedate()
1609 if "%1" in format or "%2" in format:
1576 if "%1" in format or "%2" in format:
1610 sign = (tz > 0) and "-" or "+"
1577 sign = (tz > 0) and "-" or "+"
1611 minutes = abs(tz) / 60
1578 minutes = abs(tz) / 60
1612 format = format.replace("%1", "%c%02d" % (sign, minutes / 60))
1579 format = format.replace("%1", "%c%02d" % (sign, minutes / 60))
1613 format = format.replace("%2", "%02d" % (minutes % 60))
1580 format = format.replace("%2", "%02d" % (minutes % 60))
1614 s = time.strftime(format, time.gmtime(float(t) - tz))
1581 s = time.strftime(format, time.gmtime(float(t) - tz))
1615 return s
1582 return s
1616
1583
1617 def shortdate(date=None):
1584 def shortdate(date=None):
1618 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1585 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1619 return datestr(date, format='%Y-%m-%d')
1586 return datestr(date, format='%Y-%m-%d')
1620
1587
1621 def strdate(string, format, defaults=[]):
1588 def strdate(string, format, defaults=[]):
1622 """parse a localized time string and return a (unixtime, offset) tuple.
1589 """parse a localized time string and return a (unixtime, offset) tuple.
1623 if the string cannot be parsed, ValueError is raised."""
1590 if the string cannot be parsed, ValueError is raised."""
1624 def timezone(string):
1591 def timezone(string):
1625 tz = string.split()[-1]
1592 tz = string.split()[-1]
1626 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1593 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1627 sign = (tz[0] == "+") and 1 or -1
1594 sign = (tz[0] == "+") and 1 or -1
1628 hours = int(tz[1:3])
1595 hours = int(tz[1:3])
1629 minutes = int(tz[3:5])
1596 minutes = int(tz[3:5])
1630 return -sign * (hours * 60 + minutes) * 60
1597 return -sign * (hours * 60 + minutes) * 60
1631 if tz == "GMT" or tz == "UTC":
1598 if tz == "GMT" or tz == "UTC":
1632 return 0
1599 return 0
1633 return None
1600 return None
1634
1601
1635 # NOTE: unixtime = localunixtime + offset
1602 # NOTE: unixtime = localunixtime + offset
1636 offset, date = timezone(string), string
1603 offset, date = timezone(string), string
1637 if offset != None:
1604 if offset != None:
1638 date = " ".join(string.split()[:-1])
1605 date = " ".join(string.split()[:-1])
1639
1606
1640 # add missing elements from defaults
1607 # add missing elements from defaults
1641 for part in defaults:
1608 for part in defaults:
1642 found = [True for p in part if ("%"+p) in format]
1609 found = [True for p in part if ("%"+p) in format]
1643 if not found:
1610 if not found:
1644 date += "@" + defaults[part]
1611 date += "@" + defaults[part]
1645 format += "@%" + part[0]
1612 format += "@%" + part[0]
1646
1613
1647 timetuple = time.strptime(date, format)
1614 timetuple = time.strptime(date, format)
1648 localunixtime = int(calendar.timegm(timetuple))
1615 localunixtime = int(calendar.timegm(timetuple))
1649 if offset is None:
1616 if offset is None:
1650 # local timezone
1617 # local timezone
1651 unixtime = int(time.mktime(timetuple))
1618 unixtime = int(time.mktime(timetuple))
1652 offset = unixtime - localunixtime
1619 offset = unixtime - localunixtime
1653 else:
1620 else:
1654 unixtime = localunixtime + offset
1621 unixtime = localunixtime + offset
1655 return unixtime, offset
1622 return unixtime, offset
1656
1623
1657 def parsedate(date, formats=None, defaults=None):
1624 def parsedate(date, formats=None, defaults=None):
1658 """parse a localized date/time string and return a (unixtime, offset) tuple.
1625 """parse a localized date/time string and return a (unixtime, offset) tuple.
1659
1626
1660 The date may be a "unixtime offset" string or in one of the specified
1627 The date may be a "unixtime offset" string or in one of the specified
1661 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1628 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1662 """
1629 """
1663 if not date:
1630 if not date:
1664 return 0, 0
1631 return 0, 0
1665 if isinstance(date, tuple) and len(date) == 2:
1632 if isinstance(date, tuple) and len(date) == 2:
1666 return date
1633 return date
1667 if not formats:
1634 if not formats:
1668 formats = defaultdateformats
1635 formats = defaultdateformats
1669 date = date.strip()
1636 date = date.strip()
1670 try:
1637 try:
1671 when, offset = map(int, date.split(' '))
1638 when, offset = map(int, date.split(' '))
1672 except ValueError:
1639 except ValueError:
1673 # fill out defaults
1640 # fill out defaults
1674 if not defaults:
1641 if not defaults:
1675 defaults = {}
1642 defaults = {}
1676 now = makedate()
1643 now = makedate()
1677 for part in "d mb yY HI M S".split():
1644 for part in "d mb yY HI M S".split():
1678 if part not in defaults:
1645 if part not in defaults:
1679 if part[0] in "HMS":
1646 if part[0] in "HMS":
1680 defaults[part] = "00"
1647 defaults[part] = "00"
1681 else:
1648 else:
1682 defaults[part] = datestr(now, "%" + part[0])
1649 defaults[part] = datestr(now, "%" + part[0])
1683
1650
1684 for format in formats:
1651 for format in formats:
1685 try:
1652 try:
1686 when, offset = strdate(date, format, defaults)
1653 when, offset = strdate(date, format, defaults)
1687 except (ValueError, OverflowError):
1654 except (ValueError, OverflowError):
1688 pass
1655 pass
1689 else:
1656 else:
1690 break
1657 break
1691 else:
1658 else:
1692 raise Abort(_('invalid date: %r ') % date)
1659 raise Abort(_('invalid date: %r ') % date)
1693 # validate explicit (probably user-specified) date and
1660 # validate explicit (probably user-specified) date and
1694 # time zone offset. values must fit in signed 32 bits for
1661 # time zone offset. values must fit in signed 32 bits for
1695 # current 32-bit linux runtimes. timezones go from UTC-12
1662 # current 32-bit linux runtimes. timezones go from UTC-12
1696 # to UTC+14
1663 # to UTC+14
1697 if abs(when) > 0x7fffffff:
1664 if abs(when) > 0x7fffffff:
1698 raise Abort(_('date exceeds 32 bits: %d') % when)
1665 raise Abort(_('date exceeds 32 bits: %d') % when)
1699 if offset < -50400 or offset > 43200:
1666 if offset < -50400 or offset > 43200:
1700 raise Abort(_('impossible time zone offset: %d') % offset)
1667 raise Abort(_('impossible time zone offset: %d') % offset)
1701 return when, offset
1668 return when, offset
1702
1669
1703 def matchdate(date):
1670 def matchdate(date):
1704 """Return a function that matches a given date match specifier
1671 """Return a function that matches a given date match specifier
1705
1672
1706 Formats include:
1673 Formats include:
1707
1674
1708 '{date}' match a given date to the accuracy provided
1675 '{date}' match a given date to the accuracy provided
1709
1676
1710 '<{date}' on or before a given date
1677 '<{date}' on or before a given date
1711
1678
1712 '>{date}' on or after a given date
1679 '>{date}' on or after a given date
1713
1680
1714 """
1681 """
1715
1682
1716 def lower(date):
1683 def lower(date):
1717 d = dict(mb="1", d="1")
1684 d = dict(mb="1", d="1")
1718 return parsedate(date, extendeddateformats, d)[0]
1685 return parsedate(date, extendeddateformats, d)[0]
1719
1686
1720 def upper(date):
1687 def upper(date):
1721 d = dict(mb="12", HI="23", M="59", S="59")
1688 d = dict(mb="12", HI="23", M="59", S="59")
1722 for days in "31 30 29".split():
1689 for days in "31 30 29".split():
1723 try:
1690 try:
1724 d["d"] = days
1691 d["d"] = days
1725 return parsedate(date, extendeddateformats, d)[0]
1692 return parsedate(date, extendeddateformats, d)[0]
1726 except:
1693 except:
1727 pass
1694 pass
1728 d["d"] = "28"
1695 d["d"] = "28"
1729 return parsedate(date, extendeddateformats, d)[0]
1696 return parsedate(date, extendeddateformats, d)[0]
1730
1697
1731 if date[0] == "<":
1698 if date[0] == "<":
1732 when = upper(date[1:])
1699 when = upper(date[1:])
1733 return lambda x: x <= when
1700 return lambda x: x <= when
1734 elif date[0] == ">":
1701 elif date[0] == ">":
1735 when = lower(date[1:])
1702 when = lower(date[1:])
1736 return lambda x: x >= when
1703 return lambda x: x >= when
1737 elif date[0] == "-":
1704 elif date[0] == "-":
1738 try:
1705 try:
1739 days = int(date[1:])
1706 days = int(date[1:])
1740 except ValueError:
1707 except ValueError:
1741 raise Abort(_("invalid day spec: %s") % date[1:])
1708 raise Abort(_("invalid day spec: %s") % date[1:])
1742 when = makedate()[0] - days * 3600 * 24
1709 when = makedate()[0] - days * 3600 * 24
1743 return lambda x: x >= when
1710 return lambda x: x >= when
1744 elif " to " in date:
1711 elif " to " in date:
1745 a, b = date.split(" to ")
1712 a, b = date.split(" to ")
1746 start, stop = lower(a), upper(b)
1713 start, stop = lower(a), upper(b)
1747 return lambda x: x >= start and x <= stop
1714 return lambda x: x >= start and x <= stop
1748 else:
1715 else:
1749 start, stop = lower(date), upper(date)
1716 start, stop = lower(date), upper(date)
1750 return lambda x: x >= start and x <= stop
1717 return lambda x: x >= start and x <= stop
1751
1718
1752 def shortuser(user):
1719 def shortuser(user):
1753 """Return a short representation of a user name or email address."""
1720 """Return a short representation of a user name or email address."""
1754 f = user.find('@')
1721 f = user.find('@')
1755 if f >= 0:
1722 if f >= 0:
1756 user = user[:f]
1723 user = user[:f]
1757 f = user.find('<')
1724 f = user.find('<')
1758 if f >= 0:
1725 if f >= 0:
1759 user = user[f+1:]
1726 user = user[f+1:]
1760 f = user.find(' ')
1727 f = user.find(' ')
1761 if f >= 0:
1728 if f >= 0:
1762 user = user[:f]
1729 user = user[:f]
1763 f = user.find('.')
1730 f = user.find('.')
1764 if f >= 0:
1731 if f >= 0:
1765 user = user[:f]
1732 user = user[:f]
1766 return user
1733 return user
1767
1734
1768 def email(author):
1735 def email(author):
1769 '''get email of author.'''
1736 '''get email of author.'''
1770 r = author.find('>')
1737 r = author.find('>')
1771 if r == -1: r = None
1738 if r == -1: r = None
1772 return author[author.find('<')+1:r]
1739 return author[author.find('<')+1:r]
1773
1740
1774 def ellipsis(text, maxlength=400):
1741 def ellipsis(text, maxlength=400):
1775 """Trim string to at most maxlength (default: 400) characters."""
1742 """Trim string to at most maxlength (default: 400) characters."""
1776 if len(text) <= maxlength:
1743 if len(text) <= maxlength:
1777 return text
1744 return text
1778 else:
1745 else:
1779 return "%s..." % (text[:maxlength-3])
1746 return "%s..." % (text[:maxlength-3])
1780
1747
1781 def walkrepos(path, followsym=False, seen_dirs=None):
1748 def walkrepos(path, followsym=False, seen_dirs=None):
1782 '''yield every hg repository under path, recursively.'''
1749 '''yield every hg repository under path, recursively.'''
1783 def errhandler(err):
1750 def errhandler(err):
1784 if err.filename == path:
1751 if err.filename == path:
1785 raise err
1752 raise err
1786 if followsym and hasattr(os.path, 'samestat'):
1753 if followsym and hasattr(os.path, 'samestat'):
1787 def _add_dir_if_not_there(dirlst, dirname):
1754 def _add_dir_if_not_there(dirlst, dirname):
1788 match = False
1755 match = False
1789 samestat = os.path.samestat
1756 samestat = os.path.samestat
1790 dirstat = os.stat(dirname)
1757 dirstat = os.stat(dirname)
1791 for lstdirstat in dirlst:
1758 for lstdirstat in dirlst:
1792 if samestat(dirstat, lstdirstat):
1759 if samestat(dirstat, lstdirstat):
1793 match = True
1760 match = True
1794 break
1761 break
1795 if not match:
1762 if not match:
1796 dirlst.append(dirstat)
1763 dirlst.append(dirstat)
1797 return not match
1764 return not match
1798 else:
1765 else:
1799 followsym = False
1766 followsym = False
1800
1767
1801 if (seen_dirs is None) and followsym:
1768 if (seen_dirs is None) and followsym:
1802 seen_dirs = []
1769 seen_dirs = []
1803 _add_dir_if_not_there(seen_dirs, path)
1770 _add_dir_if_not_there(seen_dirs, path)
1804 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1771 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1805 if '.hg' in dirs:
1772 if '.hg' in dirs:
1806 dirs[:] = [] # don't descend further
1773 dirs[:] = [] # don't descend further
1807 yield root # found a repository
1774 yield root # found a repository
1808 qroot = os.path.join(root, '.hg', 'patches')
1775 qroot = os.path.join(root, '.hg', 'patches')
1809 if os.path.isdir(os.path.join(qroot, '.hg')):
1776 if os.path.isdir(os.path.join(qroot, '.hg')):
1810 yield qroot # we have a patch queue repo here
1777 yield qroot # we have a patch queue repo here
1811 elif followsym:
1778 elif followsym:
1812 newdirs = []
1779 newdirs = []
1813 for d in dirs:
1780 for d in dirs:
1814 fname = os.path.join(root, d)
1781 fname = os.path.join(root, d)
1815 if _add_dir_if_not_there(seen_dirs, fname):
1782 if _add_dir_if_not_there(seen_dirs, fname):
1816 if os.path.islink(fname):
1783 if os.path.islink(fname):
1817 for hgname in walkrepos(fname, True, seen_dirs):
1784 for hgname in walkrepos(fname, True, seen_dirs):
1818 yield hgname
1785 yield hgname
1819 else:
1786 else:
1820 newdirs.append(d)
1787 newdirs.append(d)
1821 dirs[:] = newdirs
1788 dirs[:] = newdirs
1822
1789
1823 _rcpath = None
1790 _rcpath = None
1824
1791
1825 def os_rcpath():
1792 def os_rcpath():
1826 '''return default os-specific hgrc search path'''
1793 '''return default os-specific hgrc search path'''
1827 path = system_rcpath()
1794 path = system_rcpath()
1828 path.extend(user_rcpath())
1795 path.extend(user_rcpath())
1829 path = [os.path.normpath(f) for f in path]
1796 path = [os.path.normpath(f) for f in path]
1830 return path
1797 return path
1831
1798
1832 def rcpath():
1799 def rcpath():
1833 '''return hgrc search path. if env var HGRCPATH is set, use it.
1800 '''return hgrc search path. if env var HGRCPATH is set, use it.
1834 for each item in path, if directory, use files ending in .rc,
1801 for each item in path, if directory, use files ending in .rc,
1835 else use item.
1802 else use item.
1836 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1803 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1837 if no HGRCPATH, use default os-specific path.'''
1804 if no HGRCPATH, use default os-specific path.'''
1838 global _rcpath
1805 global _rcpath
1839 if _rcpath is None:
1806 if _rcpath is None:
1840 if 'HGRCPATH' in os.environ:
1807 if 'HGRCPATH' in os.environ:
1841 _rcpath = []
1808 _rcpath = []
1842 for p in os.environ['HGRCPATH'].split(os.pathsep):
1809 for p in os.environ['HGRCPATH'].split(os.pathsep):
1843 if not p: continue
1810 if not p: continue
1844 if os.path.isdir(p):
1811 if os.path.isdir(p):
1845 for f, kind in osutil.listdir(p):
1812 for f, kind in osutil.listdir(p):
1846 if f.endswith('.rc'):
1813 if f.endswith('.rc'):
1847 _rcpath.append(os.path.join(p, f))
1814 _rcpath.append(os.path.join(p, f))
1848 else:
1815 else:
1849 _rcpath.append(p)
1816 _rcpath.append(p)
1850 else:
1817 else:
1851 _rcpath = os_rcpath()
1818 _rcpath = os_rcpath()
1852 return _rcpath
1819 return _rcpath
1853
1820
1854 def bytecount(nbytes):
1821 def bytecount(nbytes):
1855 '''return byte count formatted as readable string, with units'''
1822 '''return byte count formatted as readable string, with units'''
1856
1823
1857 units = (
1824 units = (
1858 (100, 1<<30, _('%.0f GB')),
1825 (100, 1<<30, _('%.0f GB')),
1859 (10, 1<<30, _('%.1f GB')),
1826 (10, 1<<30, _('%.1f GB')),
1860 (1, 1<<30, _('%.2f GB')),
1827 (1, 1<<30, _('%.2f GB')),
1861 (100, 1<<20, _('%.0f MB')),
1828 (100, 1<<20, _('%.0f MB')),
1862 (10, 1<<20, _('%.1f MB')),
1829 (10, 1<<20, _('%.1f MB')),
1863 (1, 1<<20, _('%.2f MB')),
1830 (1, 1<<20, _('%.2f MB')),
1864 (100, 1<<10, _('%.0f KB')),
1831 (100, 1<<10, _('%.0f KB')),
1865 (10, 1<<10, _('%.1f KB')),
1832 (10, 1<<10, _('%.1f KB')),
1866 (1, 1<<10, _('%.2f KB')),
1833 (1, 1<<10, _('%.2f KB')),
1867 (1, 1, _('%.0f bytes')),
1834 (1, 1, _('%.0f bytes')),
1868 )
1835 )
1869
1836
1870 for multiplier, divisor, format in units:
1837 for multiplier, divisor, format in units:
1871 if nbytes >= divisor * multiplier:
1838 if nbytes >= divisor * multiplier:
1872 return format % (nbytes / float(divisor))
1839 return format % (nbytes / float(divisor))
1873 return units[-1][2] % nbytes
1840 return units[-1][2] % nbytes
1874
1841
1875 def drop_scheme(scheme, path):
1842 def drop_scheme(scheme, path):
1876 sc = scheme + ':'
1843 sc = scheme + ':'
1877 if path.startswith(sc):
1844 if path.startswith(sc):
1878 path = path[len(sc):]
1845 path = path[len(sc):]
1879 if path.startswith('//'):
1846 if path.startswith('//'):
1880 path = path[2:]
1847 path = path[2:]
1881 return path
1848 return path
1882
1849
1883 def uirepr(s):
1850 def uirepr(s):
1884 # Avoid double backslash in Windows path repr()
1851 # Avoid double backslash in Windows path repr()
1885 return repr(s).replace('\\\\', '\\')
1852 return repr(s).replace('\\\\', '\\')
1886
1853
1887 def hidepassword(url):
1854 def hidepassword(url):
1888 '''hide user credential in a url string'''
1855 '''hide user credential in a url string'''
1889 scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
1856 scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
1890 netloc = re.sub('([^:]*):([^@]*)@(.*)', r'\1:***@\3', netloc)
1857 netloc = re.sub('([^:]*):([^@]*)@(.*)', r'\1:***@\3', netloc)
1891 return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
1858 return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
1892
1859
1893 def removeauth(url):
1860 def removeauth(url):
1894 '''remove all authentication information from a url string'''
1861 '''remove all authentication information from a url string'''
1895 scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
1862 scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
1896 netloc = netloc[netloc.find('@')+1:]
1863 netloc = netloc[netloc.find('@')+1:]
1897 return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
1864 return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
General Comments 0
You need to be logged in to leave comments. Login now