##// END OF EJS Templates
url: refactor util.drop_scheme() and hg.localpath() into url.localpath()...
Brodie Rao -
r13826:e574207e default
parent child Browse files
Show More
@@ -1,323 +1,323 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from node import nullid
14 from node import nullid
15 from i18n import _
15 from i18n import _
16 import os, struct, tempfile, shutil
16 import os, struct, tempfile, shutil
17 import changegroup, util, mdiff, discovery
17 import changegroup, util, mdiff, discovery
18 import localrepo, changelog, manifest, filelog, revlog, error
18 import localrepo, changelog, manifest, filelog, revlog, error, url
19
19
20 class bundlerevlog(revlog.revlog):
20 class bundlerevlog(revlog.revlog):
21 def __init__(self, opener, indexfile, bundle,
21 def __init__(self, opener, indexfile, bundle,
22 linkmapper=None):
22 linkmapper=None):
23 # How it works:
23 # How it works:
24 # to retrieve a revision, we need to know the offset of
24 # to retrieve a revision, we need to know the offset of
25 # the revision in the bundle (an unbundle object).
25 # the revision in the bundle (an unbundle object).
26 #
26 #
27 # We store this offset in the index (start), to differentiate a
27 # We store this offset in the index (start), to differentiate a
28 # rev in the bundle and from a rev in the revlog, we check
28 # rev in the bundle and from a rev in the revlog, we check
29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
30 # (it is bigger since we store the node to which the delta is)
30 # (it is bigger since we store the node to which the delta is)
31 #
31 #
32 revlog.revlog.__init__(self, opener, indexfile)
32 revlog.revlog.__init__(self, opener, indexfile)
33 self.bundle = bundle
33 self.bundle = bundle
34 self.basemap = {}
34 self.basemap = {}
35 def chunkpositer():
35 def chunkpositer():
36 while 1:
36 while 1:
37 chunk = bundle.chunk()
37 chunk = bundle.chunk()
38 if not chunk:
38 if not chunk:
39 break
39 break
40 pos = bundle.tell()
40 pos = bundle.tell()
41 yield chunk, pos - len(chunk)
41 yield chunk, pos - len(chunk)
42 n = len(self)
42 n = len(self)
43 prev = None
43 prev = None
44 for chunk, start in chunkpositer():
44 for chunk, start in chunkpositer():
45 size = len(chunk)
45 size = len(chunk)
46 if size < 80:
46 if size < 80:
47 raise util.Abort(_("invalid changegroup"))
47 raise util.Abort(_("invalid changegroup"))
48 start += 80
48 start += 80
49 size -= 80
49 size -= 80
50 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
50 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
51 if node in self.nodemap:
51 if node in self.nodemap:
52 prev = node
52 prev = node
53 continue
53 continue
54 for p in (p1, p2):
54 for p in (p1, p2):
55 if not p in self.nodemap:
55 if not p in self.nodemap:
56 raise error.LookupError(p, self.indexfile,
56 raise error.LookupError(p, self.indexfile,
57 _("unknown parent"))
57 _("unknown parent"))
58 if linkmapper is None:
58 if linkmapper is None:
59 link = n
59 link = n
60 else:
60 else:
61 link = linkmapper(cs)
61 link = linkmapper(cs)
62
62
63 if not prev:
63 if not prev:
64 prev = p1
64 prev = p1
65 # start, size, full unc. size, base (unused), link, p1, p2, node
65 # start, size, full unc. size, base (unused), link, p1, p2, node
66 e = (revlog.offset_type(start, 0), size, -1, -1, link,
66 e = (revlog.offset_type(start, 0), size, -1, -1, link,
67 self.rev(p1), self.rev(p2), node)
67 self.rev(p1), self.rev(p2), node)
68 self.basemap[n] = prev
68 self.basemap[n] = prev
69 self.index.insert(-1, e)
69 self.index.insert(-1, e)
70 self.nodemap[node] = n
70 self.nodemap[node] = n
71 prev = node
71 prev = node
72 n += 1
72 n += 1
73
73
74 def inbundle(self, rev):
74 def inbundle(self, rev):
75 """is rev from the bundle"""
75 """is rev from the bundle"""
76 if rev < 0:
76 if rev < 0:
77 return False
77 return False
78 return rev in self.basemap
78 return rev in self.basemap
79 def bundlebase(self, rev):
79 def bundlebase(self, rev):
80 return self.basemap[rev]
80 return self.basemap[rev]
81 def _chunk(self, rev):
81 def _chunk(self, rev):
82 # Warning: in case of bundle, the diff is against bundlebase,
82 # Warning: in case of bundle, the diff is against bundlebase,
83 # not against rev - 1
83 # not against rev - 1
84 # XXX: could use some caching
84 # XXX: could use some caching
85 if not self.inbundle(rev):
85 if not self.inbundle(rev):
86 return revlog.revlog._chunk(self, rev)
86 return revlog.revlog._chunk(self, rev)
87 self.bundle.seek(self.start(rev))
87 self.bundle.seek(self.start(rev))
88 return self.bundle.read(self.length(rev))
88 return self.bundle.read(self.length(rev))
89
89
90 def revdiff(self, rev1, rev2):
90 def revdiff(self, rev1, rev2):
91 """return or calculate a delta between two revisions"""
91 """return or calculate a delta between two revisions"""
92 if self.inbundle(rev1) and self.inbundle(rev2):
92 if self.inbundle(rev1) and self.inbundle(rev2):
93 # hot path for bundle
93 # hot path for bundle
94 revb = self.rev(self.bundlebase(rev2))
94 revb = self.rev(self.bundlebase(rev2))
95 if revb == rev1:
95 if revb == rev1:
96 return self._chunk(rev2)
96 return self._chunk(rev2)
97 elif not self.inbundle(rev1) and not self.inbundle(rev2):
97 elif not self.inbundle(rev1) and not self.inbundle(rev2):
98 return revlog.revlog.revdiff(self, rev1, rev2)
98 return revlog.revlog.revdiff(self, rev1, rev2)
99
99
100 return mdiff.textdiff(self.revision(self.node(rev1)),
100 return mdiff.textdiff(self.revision(self.node(rev1)),
101 self.revision(self.node(rev2)))
101 self.revision(self.node(rev2)))
102
102
103 def revision(self, node):
103 def revision(self, node):
104 """return an uncompressed revision of a given"""
104 """return an uncompressed revision of a given"""
105 if node == nullid:
105 if node == nullid:
106 return ""
106 return ""
107
107
108 text = None
108 text = None
109 chain = []
109 chain = []
110 iter_node = node
110 iter_node = node
111 rev = self.rev(iter_node)
111 rev = self.rev(iter_node)
112 # reconstruct the revision if it is from a changegroup
112 # reconstruct the revision if it is from a changegroup
113 while self.inbundle(rev):
113 while self.inbundle(rev):
114 if self._cache and self._cache[0] == iter_node:
114 if self._cache and self._cache[0] == iter_node:
115 text = self._cache[2]
115 text = self._cache[2]
116 break
116 break
117 chain.append(rev)
117 chain.append(rev)
118 iter_node = self.bundlebase(rev)
118 iter_node = self.bundlebase(rev)
119 rev = self.rev(iter_node)
119 rev = self.rev(iter_node)
120 if text is None:
120 if text is None:
121 text = revlog.revlog.revision(self, iter_node)
121 text = revlog.revlog.revision(self, iter_node)
122
122
123 while chain:
123 while chain:
124 delta = self._chunk(chain.pop())
124 delta = self._chunk(chain.pop())
125 text = mdiff.patches(text, [delta])
125 text = mdiff.patches(text, [delta])
126
126
127 p1, p2 = self.parents(node)
127 p1, p2 = self.parents(node)
128 if node != revlog.hash(text, p1, p2):
128 if node != revlog.hash(text, p1, p2):
129 raise error.RevlogError(_("integrity check failed on %s:%d")
129 raise error.RevlogError(_("integrity check failed on %s:%d")
130 % (self.datafile, self.rev(node)))
130 % (self.datafile, self.rev(node)))
131
131
132 self._cache = (node, self.rev(node), text)
132 self._cache = (node, self.rev(node), text)
133 return text
133 return text
134
134
135 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
135 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
136 raise NotImplementedError
136 raise NotImplementedError
137 def addgroup(self, revs, linkmapper, transaction):
137 def addgroup(self, revs, linkmapper, transaction):
138 raise NotImplementedError
138 raise NotImplementedError
139 def strip(self, rev, minlink):
139 def strip(self, rev, minlink):
140 raise NotImplementedError
140 raise NotImplementedError
141 def checksize(self):
141 def checksize(self):
142 raise NotImplementedError
142 raise NotImplementedError
143
143
144 class bundlechangelog(bundlerevlog, changelog.changelog):
144 class bundlechangelog(bundlerevlog, changelog.changelog):
145 def __init__(self, opener, bundle):
145 def __init__(self, opener, bundle):
146 changelog.changelog.__init__(self, opener)
146 changelog.changelog.__init__(self, opener)
147 bundlerevlog.__init__(self, opener, self.indexfile, bundle)
147 bundlerevlog.__init__(self, opener, self.indexfile, bundle)
148
148
149 class bundlemanifest(bundlerevlog, manifest.manifest):
149 class bundlemanifest(bundlerevlog, manifest.manifest):
150 def __init__(self, opener, bundle, linkmapper):
150 def __init__(self, opener, bundle, linkmapper):
151 manifest.manifest.__init__(self, opener)
151 manifest.manifest.__init__(self, opener)
152 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
152 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
153 linkmapper)
153 linkmapper)
154
154
155 class bundlefilelog(bundlerevlog, filelog.filelog):
155 class bundlefilelog(bundlerevlog, filelog.filelog):
156 def __init__(self, opener, path, bundle, linkmapper):
156 def __init__(self, opener, path, bundle, linkmapper):
157 filelog.filelog.__init__(self, opener, path)
157 filelog.filelog.__init__(self, opener, path)
158 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
158 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
159 linkmapper)
159 linkmapper)
160
160
161 class bundlerepository(localrepo.localrepository):
161 class bundlerepository(localrepo.localrepository):
162 def __init__(self, ui, path, bundlename):
162 def __init__(self, ui, path, bundlename):
163 self._tempparent = None
163 self._tempparent = None
164 try:
164 try:
165 localrepo.localrepository.__init__(self, ui, path)
165 localrepo.localrepository.__init__(self, ui, path)
166 except error.RepoError:
166 except error.RepoError:
167 self._tempparent = tempfile.mkdtemp()
167 self._tempparent = tempfile.mkdtemp()
168 localrepo.instance(ui, self._tempparent, 1)
168 localrepo.instance(ui, self._tempparent, 1)
169 localrepo.localrepository.__init__(self, ui, self._tempparent)
169 localrepo.localrepository.__init__(self, ui, self._tempparent)
170
170
171 if path:
171 if path:
172 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
172 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
173 else:
173 else:
174 self._url = 'bundle:' + bundlename
174 self._url = 'bundle:' + bundlename
175
175
176 self.tempfile = None
176 self.tempfile = None
177 f = util.posixfile(bundlename, "rb")
177 f = util.posixfile(bundlename, "rb")
178 self.bundle = changegroup.readbundle(f, bundlename)
178 self.bundle = changegroup.readbundle(f, bundlename)
179 if self.bundle.compressed():
179 if self.bundle.compressed():
180 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
180 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
181 suffix=".hg10un", dir=self.path)
181 suffix=".hg10un", dir=self.path)
182 self.tempfile = temp
182 self.tempfile = temp
183 fptemp = os.fdopen(fdtemp, 'wb')
183 fptemp = os.fdopen(fdtemp, 'wb')
184
184
185 try:
185 try:
186 fptemp.write("HG10UN")
186 fptemp.write("HG10UN")
187 while 1:
187 while 1:
188 chunk = self.bundle.read(2**18)
188 chunk = self.bundle.read(2**18)
189 if not chunk:
189 if not chunk:
190 break
190 break
191 fptemp.write(chunk)
191 fptemp.write(chunk)
192 finally:
192 finally:
193 fptemp.close()
193 fptemp.close()
194
194
195 f = util.posixfile(self.tempfile, "rb")
195 f = util.posixfile(self.tempfile, "rb")
196 self.bundle = changegroup.readbundle(f, bundlename)
196 self.bundle = changegroup.readbundle(f, bundlename)
197
197
198 # dict with the mapping 'filename' -> position in the bundle
198 # dict with the mapping 'filename' -> position in the bundle
199 self.bundlefilespos = {}
199 self.bundlefilespos = {}
200
200
201 @util.propertycache
201 @util.propertycache
202 def changelog(self):
202 def changelog(self):
203 c = bundlechangelog(self.sopener, self.bundle)
203 c = bundlechangelog(self.sopener, self.bundle)
204 self.manstart = self.bundle.tell()
204 self.manstart = self.bundle.tell()
205 return c
205 return c
206
206
207 @util.propertycache
207 @util.propertycache
208 def manifest(self):
208 def manifest(self):
209 self.bundle.seek(self.manstart)
209 self.bundle.seek(self.manstart)
210 m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev)
210 m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev)
211 self.filestart = self.bundle.tell()
211 self.filestart = self.bundle.tell()
212 return m
212 return m
213
213
214 @util.propertycache
214 @util.propertycache
215 def manstart(self):
215 def manstart(self):
216 self.changelog
216 self.changelog
217 return self.manstart
217 return self.manstart
218
218
219 @util.propertycache
219 @util.propertycache
220 def filestart(self):
220 def filestart(self):
221 self.manifest
221 self.manifest
222 return self.filestart
222 return self.filestart
223
223
224 def url(self):
224 def url(self):
225 return self._url
225 return self._url
226
226
227 def file(self, f):
227 def file(self, f):
228 if not self.bundlefilespos:
228 if not self.bundlefilespos:
229 self.bundle.seek(self.filestart)
229 self.bundle.seek(self.filestart)
230 while 1:
230 while 1:
231 chunk = self.bundle.chunk()
231 chunk = self.bundle.chunk()
232 if not chunk:
232 if not chunk:
233 break
233 break
234 self.bundlefilespos[chunk] = self.bundle.tell()
234 self.bundlefilespos[chunk] = self.bundle.tell()
235 while 1:
235 while 1:
236 c = self.bundle.chunk()
236 c = self.bundle.chunk()
237 if not c:
237 if not c:
238 break
238 break
239
239
240 if f[0] == '/':
240 if f[0] == '/':
241 f = f[1:]
241 f = f[1:]
242 if f in self.bundlefilespos:
242 if f in self.bundlefilespos:
243 self.bundle.seek(self.bundlefilespos[f])
243 self.bundle.seek(self.bundlefilespos[f])
244 return bundlefilelog(self.sopener, f, self.bundle,
244 return bundlefilelog(self.sopener, f, self.bundle,
245 self.changelog.rev)
245 self.changelog.rev)
246 else:
246 else:
247 return filelog.filelog(self.sopener, f)
247 return filelog.filelog(self.sopener, f)
248
248
249 def close(self):
249 def close(self):
250 """Close assigned bundle file immediately."""
250 """Close assigned bundle file immediately."""
251 self.bundle.close()
251 self.bundle.close()
252 if self.tempfile is not None:
252 if self.tempfile is not None:
253 os.unlink(self.tempfile)
253 os.unlink(self.tempfile)
254 if self._tempparent:
254 if self._tempparent:
255 shutil.rmtree(self._tempparent, True)
255 shutil.rmtree(self._tempparent, True)
256
256
257 def cancopy(self):
257 def cancopy(self):
258 return False
258 return False
259
259
260 def getcwd(self):
260 def getcwd(self):
261 return os.getcwd() # always outside the repo
261 return os.getcwd() # always outside the repo
262
262
263 def instance(ui, path, create):
263 def instance(ui, path, create):
264 if create:
264 if create:
265 raise util.Abort(_('cannot create new bundle repository'))
265 raise util.Abort(_('cannot create new bundle repository'))
266 parentpath = ui.config("bundle", "mainreporoot", "")
266 parentpath = ui.config("bundle", "mainreporoot", "")
267 if parentpath:
267 if parentpath:
268 # Try to make the full path relative so we get a nice, short URL.
268 # Try to make the full path relative so we get a nice, short URL.
269 # In particular, we don't want temp dir names in test outputs.
269 # In particular, we don't want temp dir names in test outputs.
270 cwd = os.getcwd()
270 cwd = os.getcwd()
271 if parentpath == cwd:
271 if parentpath == cwd:
272 parentpath = ''
272 parentpath = ''
273 else:
273 else:
274 cwd = os.path.join(cwd,'')
274 cwd = os.path.join(cwd,'')
275 if parentpath.startswith(cwd):
275 if parentpath.startswith(cwd):
276 parentpath = parentpath[len(cwd):]
276 parentpath = parentpath[len(cwd):]
277 path = util.drop_scheme('file', path)
277 u = url.url(path)
278 if path.startswith('bundle:'):
278 path = u.localpath()
279 path = util.drop_scheme('bundle', path)
279 if u.scheme == 'bundle':
280 s = path.split("+", 1)
280 s = path.split("+", 1)
281 if len(s) == 1:
281 if len(s) == 1:
282 repopath, bundlename = parentpath, s[0]
282 repopath, bundlename = parentpath, s[0]
283 else:
283 else:
284 repopath, bundlename = s
284 repopath, bundlename = s
285 else:
285 else:
286 repopath, bundlename = parentpath, path
286 repopath, bundlename = parentpath, path
287 return bundlerepository(ui, repopath, bundlename)
287 return bundlerepository(ui, repopath, bundlename)
288
288
289 def getremotechanges(ui, repo, other, revs=None, bundlename=None,
289 def getremotechanges(ui, repo, other, revs=None, bundlename=None,
290 force=False, usecommon=False):
290 force=False, usecommon=False):
291 tmp = discovery.findcommonincoming(repo, other, heads=revs, force=force,
291 tmp = discovery.findcommonincoming(repo, other, heads=revs, force=force,
292 commononly=usecommon)
292 commononly=usecommon)
293 common, incoming, rheads = tmp
293 common, incoming, rheads = tmp
294 if not incoming:
294 if not incoming:
295 try:
295 try:
296 os.unlink(bundlename)
296 os.unlink(bundlename)
297 except:
297 except:
298 pass
298 pass
299 return other, None, None, None
299 return other, None, None, None
300
300
301 bundle = None
301 bundle = None
302 if bundlename or not other.local():
302 if bundlename or not other.local():
303 # create a bundle (uncompressed if other repo is not local)
303 # create a bundle (uncompressed if other repo is not local)
304
304
305 if revs is None and other.capable('changegroupsubset'):
305 if revs is None and other.capable('changegroupsubset'):
306 revs = rheads
306 revs = rheads
307
307
308 if usecommon:
308 if usecommon:
309 cg = other.getbundle('incoming', common=common, heads=revs)
309 cg = other.getbundle('incoming', common=common, heads=revs)
310 elif revs is None:
310 elif revs is None:
311 cg = other.changegroup(incoming, "incoming")
311 cg = other.changegroup(incoming, "incoming")
312 else:
312 else:
313 cg = other.changegroupsubset(incoming, revs, 'incoming')
313 cg = other.changegroupsubset(incoming, revs, 'incoming')
314 bundletype = other.local() and "HG10BZ" or "HG10UN"
314 bundletype = other.local() and "HG10BZ" or "HG10UN"
315 fname = bundle = changegroup.writebundle(cg, bundlename, bundletype)
315 fname = bundle = changegroup.writebundle(cg, bundlename, bundletype)
316 # keep written bundle?
316 # keep written bundle?
317 if bundlename:
317 if bundlename:
318 bundle = None
318 bundle = None
319 if not other.local():
319 if not other.local():
320 # use the created uncompressed bundlerepo
320 # use the created uncompressed bundlerepo
321 other = bundlerepository(ui, repo.root, fname)
321 other = bundlerepository(ui, repo.root, fname)
322 return (other, common, incoming, bundle)
322 return (other, common, incoming, bundle)
323
323
@@ -1,573 +1,564 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from i18n import _
9 from i18n import _
10 from lock import release
10 from lock import release
11 from node import hex, nullid, nullrev, short
11 from node import hex, nullid, nullrev, short
12 import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo, bookmarks
12 import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo, bookmarks
13 import lock, util, extensions, error, encoding, node
13 import lock, util, extensions, error, encoding, node
14 import cmdutil, discovery, url
14 import cmdutil, discovery, url
15 import merge as mergemod
15 import merge as mergemod
16 import verify as verifymod
16 import verify as verifymod
17 import errno, os, shutil
17 import errno, os, shutil
18
18
19 def _local(path):
19 def _local(path):
20 path = util.expandpath(util.drop_scheme('file', path))
20 path = util.expandpath(url.localpath(path))
21 return (os.path.isfile(path) and bundlerepo or localrepo)
21 return (os.path.isfile(path) and bundlerepo or localrepo)
22
22
23 def addbranchrevs(lrepo, repo, branches, revs):
23 def addbranchrevs(lrepo, repo, branches, revs):
24 hashbranch, branches = branches
24 hashbranch, branches = branches
25 if not hashbranch and not branches:
25 if not hashbranch and not branches:
26 return revs or None, revs and revs[0] or None
26 return revs or None, revs and revs[0] or None
27 revs = revs and list(revs) or []
27 revs = revs and list(revs) or []
28 if not repo.capable('branchmap'):
28 if not repo.capable('branchmap'):
29 if branches:
29 if branches:
30 raise util.Abort(_("remote branch lookup not supported"))
30 raise util.Abort(_("remote branch lookup not supported"))
31 revs.append(hashbranch)
31 revs.append(hashbranch)
32 return revs, revs[0]
32 return revs, revs[0]
33 branchmap = repo.branchmap()
33 branchmap = repo.branchmap()
34
34
35 def primary(branch):
35 def primary(branch):
36 if branch == '.':
36 if branch == '.':
37 if not lrepo or not lrepo.local():
37 if not lrepo or not lrepo.local():
38 raise util.Abort(_("dirstate branch not accessible"))
38 raise util.Abort(_("dirstate branch not accessible"))
39 branch = lrepo.dirstate.branch()
39 branch = lrepo.dirstate.branch()
40 if branch in branchmap:
40 if branch in branchmap:
41 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
41 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
42 return True
42 return True
43 else:
43 else:
44 return False
44 return False
45
45
46 for branch in branches:
46 for branch in branches:
47 if not primary(branch):
47 if not primary(branch):
48 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
48 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
49 if hashbranch:
49 if hashbranch:
50 if not primary(hashbranch):
50 if not primary(hashbranch):
51 revs.append(hashbranch)
51 revs.append(hashbranch)
52 return revs, revs[0]
52 return revs, revs[0]
53
53
54 def parseurl(path, branches=None):
54 def parseurl(path, branches=None):
55 '''parse url#branch, returning (url, (branch, branches))'''
55 '''parse url#branch, returning (url, (branch, branches))'''
56
56
57 u = url.url(path)
57 u = url.url(path)
58 if not u.fragment:
58 if not u.fragment:
59 return path, (None, branches or [])
59 return path, (None, branches or [])
60 branch = u.fragment
60 branch = u.fragment
61 u.fragment = None
61 u.fragment = None
62 return str(u), (branch, branches or [])
62 return str(u), (branch, branches or [])
63
63
64 schemes = {
64 schemes = {
65 'bundle': bundlerepo,
65 'bundle': bundlerepo,
66 'file': _local,
66 'file': _local,
67 'http': httprepo,
67 'http': httprepo,
68 'https': httprepo,
68 'https': httprepo,
69 'ssh': sshrepo,
69 'ssh': sshrepo,
70 'static-http': statichttprepo,
70 'static-http': statichttprepo,
71 }
71 }
72
72
73 def _lookup(path):
73 def _lookup(path):
74 u = url.url(path)
74 u = url.url(path)
75 scheme = u.scheme or 'file'
75 scheme = u.scheme or 'file'
76 thing = schemes.get(scheme) or schemes['file']
76 thing = schemes.get(scheme) or schemes['file']
77 try:
77 try:
78 return thing(path)
78 return thing(path)
79 except TypeError:
79 except TypeError:
80 return thing
80 return thing
81
81
82 def islocal(repo):
82 def islocal(repo):
83 '''return true if repo or path is local'''
83 '''return true if repo or path is local'''
84 if isinstance(repo, str):
84 if isinstance(repo, str):
85 try:
85 try:
86 return _lookup(repo).islocal(repo)
86 return _lookup(repo).islocal(repo)
87 except AttributeError:
87 except AttributeError:
88 return False
88 return False
89 return repo.local()
89 return repo.local()
90
90
91 def repository(ui, path='', create=False):
91 def repository(ui, path='', create=False):
92 """return a repository object for the specified path"""
92 """return a repository object for the specified path"""
93 repo = _lookup(path).instance(ui, path, create)
93 repo = _lookup(path).instance(ui, path, create)
94 ui = getattr(repo, "ui", ui)
94 ui = getattr(repo, "ui", ui)
95 for name, module in extensions.extensions():
95 for name, module in extensions.extensions():
96 hook = getattr(module, 'reposetup', None)
96 hook = getattr(module, 'reposetup', None)
97 if hook:
97 if hook:
98 hook(ui, repo)
98 hook(ui, repo)
99 return repo
99 return repo
100
100
101 def defaultdest(source):
101 def defaultdest(source):
102 '''return default destination of clone if none is given'''
102 '''return default destination of clone if none is given'''
103 return os.path.basename(os.path.normpath(source))
103 return os.path.basename(os.path.normpath(source))
104
104
105 def localpath(path):
106 if path.startswith('file://localhost/'):
107 return path[16:]
108 if path.startswith('file://'):
109 return path[7:]
110 if path.startswith('file:'):
111 return path[5:]
112 return path
113
114 def share(ui, source, dest=None, update=True):
105 def share(ui, source, dest=None, update=True):
115 '''create a shared repository'''
106 '''create a shared repository'''
116
107
117 if not islocal(source):
108 if not islocal(source):
118 raise util.Abort(_('can only share local repositories'))
109 raise util.Abort(_('can only share local repositories'))
119
110
120 if not dest:
111 if not dest:
121 dest = defaultdest(source)
112 dest = defaultdest(source)
122 else:
113 else:
123 dest = ui.expandpath(dest)
114 dest = ui.expandpath(dest)
124
115
125 if isinstance(source, str):
116 if isinstance(source, str):
126 origsource = ui.expandpath(source)
117 origsource = ui.expandpath(source)
127 source, branches = parseurl(origsource)
118 source, branches = parseurl(origsource)
128 srcrepo = repository(ui, source)
119 srcrepo = repository(ui, source)
129 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
120 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
130 else:
121 else:
131 srcrepo = source
122 srcrepo = source
132 origsource = source = srcrepo.url()
123 origsource = source = srcrepo.url()
133 checkout = None
124 checkout = None
134
125
135 sharedpath = srcrepo.sharedpath # if our source is already sharing
126 sharedpath = srcrepo.sharedpath # if our source is already sharing
136
127
137 root = os.path.realpath(dest)
128 root = os.path.realpath(dest)
138 roothg = os.path.join(root, '.hg')
129 roothg = os.path.join(root, '.hg')
139
130
140 if os.path.exists(roothg):
131 if os.path.exists(roothg):
141 raise util.Abort(_('destination already exists'))
132 raise util.Abort(_('destination already exists'))
142
133
143 if not os.path.isdir(root):
134 if not os.path.isdir(root):
144 os.mkdir(root)
135 os.mkdir(root)
145 util.makedir(roothg, notindexed=True)
136 util.makedir(roothg, notindexed=True)
146
137
147 requirements = ''
138 requirements = ''
148 try:
139 try:
149 requirements = srcrepo.opener('requires').read()
140 requirements = srcrepo.opener('requires').read()
150 except IOError, inst:
141 except IOError, inst:
151 if inst.errno != errno.ENOENT:
142 if inst.errno != errno.ENOENT:
152 raise
143 raise
153
144
154 requirements += 'shared\n'
145 requirements += 'shared\n'
155 file(os.path.join(roothg, 'requires'), 'w').write(requirements)
146 file(os.path.join(roothg, 'requires'), 'w').write(requirements)
156 file(os.path.join(roothg, 'sharedpath'), 'w').write(sharedpath)
147 file(os.path.join(roothg, 'sharedpath'), 'w').write(sharedpath)
157
148
158 default = srcrepo.ui.config('paths', 'default')
149 default = srcrepo.ui.config('paths', 'default')
159 if default:
150 if default:
160 f = file(os.path.join(roothg, 'hgrc'), 'w')
151 f = file(os.path.join(roothg, 'hgrc'), 'w')
161 f.write('[paths]\ndefault = %s\n' % default)
152 f.write('[paths]\ndefault = %s\n' % default)
162 f.close()
153 f.close()
163
154
164 r = repository(ui, root)
155 r = repository(ui, root)
165
156
166 if update:
157 if update:
167 r.ui.status(_("updating working directory\n"))
158 r.ui.status(_("updating working directory\n"))
168 if update is not True:
159 if update is not True:
169 checkout = update
160 checkout = update
170 for test in (checkout, 'default', 'tip'):
161 for test in (checkout, 'default', 'tip'):
171 if test is None:
162 if test is None:
172 continue
163 continue
173 try:
164 try:
174 uprev = r.lookup(test)
165 uprev = r.lookup(test)
175 break
166 break
176 except error.RepoLookupError:
167 except error.RepoLookupError:
177 continue
168 continue
178 _update(r, uprev)
169 _update(r, uprev)
179
170
180 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
171 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
181 stream=False, branch=None):
172 stream=False, branch=None):
182 """Make a copy of an existing repository.
173 """Make a copy of an existing repository.
183
174
184 Create a copy of an existing repository in a new directory. The
175 Create a copy of an existing repository in a new directory. The
185 source and destination are URLs, as passed to the repository
176 source and destination are URLs, as passed to the repository
186 function. Returns a pair of repository objects, the source and
177 function. Returns a pair of repository objects, the source and
187 newly created destination.
178 newly created destination.
188
179
189 The location of the source is added to the new repository's
180 The location of the source is added to the new repository's
190 .hg/hgrc file, as the default to be used for future pulls and
181 .hg/hgrc file, as the default to be used for future pulls and
191 pushes.
182 pushes.
192
183
193 If an exception is raised, the partly cloned/updated destination
184 If an exception is raised, the partly cloned/updated destination
194 repository will be deleted.
185 repository will be deleted.
195
186
196 Arguments:
187 Arguments:
197
188
198 source: repository object or URL
189 source: repository object or URL
199
190
200 dest: URL of destination repository to create (defaults to base
191 dest: URL of destination repository to create (defaults to base
201 name of source repository)
192 name of source repository)
202
193
203 pull: always pull from source repository, even in local case
194 pull: always pull from source repository, even in local case
204
195
205 stream: stream raw data uncompressed from repository (fast over
196 stream: stream raw data uncompressed from repository (fast over
206 LAN, slow over WAN)
197 LAN, slow over WAN)
207
198
208 rev: revision to clone up to (implies pull=True)
199 rev: revision to clone up to (implies pull=True)
209
200
210 update: update working directory after clone completes, if
201 update: update working directory after clone completes, if
211 destination is local repository (True means update to default rev,
202 destination is local repository (True means update to default rev,
212 anything else is treated as a revision)
203 anything else is treated as a revision)
213
204
214 branch: branches to clone
205 branch: branches to clone
215 """
206 """
216
207
217 if isinstance(source, str):
208 if isinstance(source, str):
218 origsource = ui.expandpath(source)
209 origsource = ui.expandpath(source)
219 source, branch = parseurl(origsource, branch)
210 source, branch = parseurl(origsource, branch)
220 src_repo = repository(ui, source)
211 src_repo = repository(ui, source)
221 else:
212 else:
222 src_repo = source
213 src_repo = source
223 branch = (None, branch or [])
214 branch = (None, branch or [])
224 origsource = source = src_repo.url()
215 origsource = source = src_repo.url()
225 rev, checkout = addbranchrevs(src_repo, src_repo, branch, rev)
216 rev, checkout = addbranchrevs(src_repo, src_repo, branch, rev)
226
217
227 if dest is None:
218 if dest is None:
228 dest = defaultdest(source)
219 dest = defaultdest(source)
229 ui.status(_("destination directory: %s\n") % dest)
220 ui.status(_("destination directory: %s\n") % dest)
230 else:
221 else:
231 dest = ui.expandpath(dest)
222 dest = ui.expandpath(dest)
232
223
233 dest = localpath(dest)
224 dest = url.localpath(dest)
234 source = localpath(source)
225 source = url.localpath(source)
235
226
236 if os.path.exists(dest):
227 if os.path.exists(dest):
237 if not os.path.isdir(dest):
228 if not os.path.isdir(dest):
238 raise util.Abort(_("destination '%s' already exists") % dest)
229 raise util.Abort(_("destination '%s' already exists") % dest)
239 elif os.listdir(dest):
230 elif os.listdir(dest):
240 raise util.Abort(_("destination '%s' is not empty") % dest)
231 raise util.Abort(_("destination '%s' is not empty") % dest)
241
232
242 class DirCleanup(object):
233 class DirCleanup(object):
243 def __init__(self, dir_):
234 def __init__(self, dir_):
244 self.rmtree = shutil.rmtree
235 self.rmtree = shutil.rmtree
245 self.dir_ = dir_
236 self.dir_ = dir_
246 def close(self):
237 def close(self):
247 self.dir_ = None
238 self.dir_ = None
248 def cleanup(self):
239 def cleanup(self):
249 if self.dir_:
240 if self.dir_:
250 self.rmtree(self.dir_, True)
241 self.rmtree(self.dir_, True)
251
242
252 src_lock = dest_lock = dir_cleanup = None
243 src_lock = dest_lock = dir_cleanup = None
253 try:
244 try:
254 if islocal(dest):
245 if islocal(dest):
255 dir_cleanup = DirCleanup(dest)
246 dir_cleanup = DirCleanup(dest)
256
247
257 abspath = origsource
248 abspath = origsource
258 copy = False
249 copy = False
259 if src_repo.cancopy() and islocal(dest):
250 if src_repo.cancopy() and islocal(dest):
260 abspath = os.path.abspath(util.drop_scheme('file', origsource))
251 abspath = os.path.abspath(url.localpath(origsource))
261 copy = not pull and not rev
252 copy = not pull and not rev
262
253
263 if copy:
254 if copy:
264 try:
255 try:
265 # we use a lock here because if we race with commit, we
256 # we use a lock here because if we race with commit, we
266 # can end up with extra data in the cloned revlogs that's
257 # can end up with extra data in the cloned revlogs that's
267 # not pointed to by changesets, thus causing verify to
258 # not pointed to by changesets, thus causing verify to
268 # fail
259 # fail
269 src_lock = src_repo.lock(wait=False)
260 src_lock = src_repo.lock(wait=False)
270 except error.LockError:
261 except error.LockError:
271 copy = False
262 copy = False
272
263
273 if copy:
264 if copy:
274 src_repo.hook('preoutgoing', throw=True, source='clone')
265 src_repo.hook('preoutgoing', throw=True, source='clone')
275 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
266 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
276 if not os.path.exists(dest):
267 if not os.path.exists(dest):
277 os.mkdir(dest)
268 os.mkdir(dest)
278 else:
269 else:
279 # only clean up directories we create ourselves
270 # only clean up directories we create ourselves
280 dir_cleanup.dir_ = hgdir
271 dir_cleanup.dir_ = hgdir
281 try:
272 try:
282 dest_path = hgdir
273 dest_path = hgdir
283 util.makedir(dest_path, notindexed=True)
274 util.makedir(dest_path, notindexed=True)
284 except OSError, inst:
275 except OSError, inst:
285 if inst.errno == errno.EEXIST:
276 if inst.errno == errno.EEXIST:
286 dir_cleanup.close()
277 dir_cleanup.close()
287 raise util.Abort(_("destination '%s' already exists")
278 raise util.Abort(_("destination '%s' already exists")
288 % dest)
279 % dest)
289 raise
280 raise
290
281
291 hardlink = None
282 hardlink = None
292 num = 0
283 num = 0
293 for f in src_repo.store.copylist():
284 for f in src_repo.store.copylist():
294 src = os.path.join(src_repo.sharedpath, f)
285 src = os.path.join(src_repo.sharedpath, f)
295 dst = os.path.join(dest_path, f)
286 dst = os.path.join(dest_path, f)
296 dstbase = os.path.dirname(dst)
287 dstbase = os.path.dirname(dst)
297 if dstbase and not os.path.exists(dstbase):
288 if dstbase and not os.path.exists(dstbase):
298 os.mkdir(dstbase)
289 os.mkdir(dstbase)
299 if os.path.exists(src):
290 if os.path.exists(src):
300 if dst.endswith('data'):
291 if dst.endswith('data'):
301 # lock to avoid premature writing to the target
292 # lock to avoid premature writing to the target
302 dest_lock = lock.lock(os.path.join(dstbase, "lock"))
293 dest_lock = lock.lock(os.path.join(dstbase, "lock"))
303 hardlink, n = util.copyfiles(src, dst, hardlink)
294 hardlink, n = util.copyfiles(src, dst, hardlink)
304 num += n
295 num += n
305 if hardlink:
296 if hardlink:
306 ui.debug("linked %d files\n" % num)
297 ui.debug("linked %d files\n" % num)
307 else:
298 else:
308 ui.debug("copied %d files\n" % num)
299 ui.debug("copied %d files\n" % num)
309
300
310 # we need to re-init the repo after manually copying the data
301 # we need to re-init the repo after manually copying the data
311 # into it
302 # into it
312 dest_repo = repository(ui, dest)
303 dest_repo = repository(ui, dest)
313 src_repo.hook('outgoing', source='clone',
304 src_repo.hook('outgoing', source='clone',
314 node=node.hex(node.nullid))
305 node=node.hex(node.nullid))
315 else:
306 else:
316 try:
307 try:
317 dest_repo = repository(ui, dest, create=True)
308 dest_repo = repository(ui, dest, create=True)
318 except OSError, inst:
309 except OSError, inst:
319 if inst.errno == errno.EEXIST:
310 if inst.errno == errno.EEXIST:
320 dir_cleanup.close()
311 dir_cleanup.close()
321 raise util.Abort(_("destination '%s' already exists")
312 raise util.Abort(_("destination '%s' already exists")
322 % dest)
313 % dest)
323 raise
314 raise
324
315
325 revs = None
316 revs = None
326 if rev:
317 if rev:
327 if 'lookup' not in src_repo.capabilities:
318 if 'lookup' not in src_repo.capabilities:
328 raise util.Abort(_("src repository does not support "
319 raise util.Abort(_("src repository does not support "
329 "revision lookup and so doesn't "
320 "revision lookup and so doesn't "
330 "support clone by revision"))
321 "support clone by revision"))
331 revs = [src_repo.lookup(r) for r in rev]
322 revs = [src_repo.lookup(r) for r in rev]
332 checkout = revs[0]
323 checkout = revs[0]
333 if dest_repo.local():
324 if dest_repo.local():
334 dest_repo.clone(src_repo, heads=revs, stream=stream)
325 dest_repo.clone(src_repo, heads=revs, stream=stream)
335 elif src_repo.local():
326 elif src_repo.local():
336 src_repo.push(dest_repo, revs=revs)
327 src_repo.push(dest_repo, revs=revs)
337 else:
328 else:
338 raise util.Abort(_("clone from remote to remote not supported"))
329 raise util.Abort(_("clone from remote to remote not supported"))
339
330
340 if dir_cleanup:
331 if dir_cleanup:
341 dir_cleanup.close()
332 dir_cleanup.close()
342
333
343 if dest_repo.local():
334 if dest_repo.local():
344 fp = dest_repo.opener("hgrc", "w", text=True)
335 fp = dest_repo.opener("hgrc", "w", text=True)
345 fp.write("[paths]\n")
336 fp.write("[paths]\n")
346 fp.write("default = %s\n" % abspath)
337 fp.write("default = %s\n" % abspath)
347 fp.close()
338 fp.close()
348
339
349 dest_repo.ui.setconfig('paths', 'default', abspath)
340 dest_repo.ui.setconfig('paths', 'default', abspath)
350
341
351 if update:
342 if update:
352 if update is not True:
343 if update is not True:
353 checkout = update
344 checkout = update
354 if src_repo.local():
345 if src_repo.local():
355 checkout = src_repo.lookup(update)
346 checkout = src_repo.lookup(update)
356 for test in (checkout, 'default', 'tip'):
347 for test in (checkout, 'default', 'tip'):
357 if test is None:
348 if test is None:
358 continue
349 continue
359 try:
350 try:
360 uprev = dest_repo.lookup(test)
351 uprev = dest_repo.lookup(test)
361 break
352 break
362 except error.RepoLookupError:
353 except error.RepoLookupError:
363 continue
354 continue
364 bn = dest_repo[uprev].branch()
355 bn = dest_repo[uprev].branch()
365 dest_repo.ui.status(_("updating to branch %s\n") % bn)
356 dest_repo.ui.status(_("updating to branch %s\n") % bn)
366 _update(dest_repo, uprev)
357 _update(dest_repo, uprev)
367
358
368 # clone all bookmarks
359 # clone all bookmarks
369 if dest_repo.local() and src_repo.capable("pushkey"):
360 if dest_repo.local() and src_repo.capable("pushkey"):
370 rb = src_repo.listkeys('bookmarks')
361 rb = src_repo.listkeys('bookmarks')
371 for k, n in rb.iteritems():
362 for k, n in rb.iteritems():
372 try:
363 try:
373 m = dest_repo.lookup(n)
364 m = dest_repo.lookup(n)
374 dest_repo._bookmarks[k] = m
365 dest_repo._bookmarks[k] = m
375 except:
366 except:
376 pass
367 pass
377 if rb:
368 if rb:
378 bookmarks.write(dest_repo)
369 bookmarks.write(dest_repo)
379 elif src_repo.local() and dest_repo.capable("pushkey"):
370 elif src_repo.local() and dest_repo.capable("pushkey"):
380 for k, n in src_repo._bookmarks.iteritems():
371 for k, n in src_repo._bookmarks.iteritems():
381 dest_repo.pushkey('bookmarks', k, '', hex(n))
372 dest_repo.pushkey('bookmarks', k, '', hex(n))
382
373
383 return src_repo, dest_repo
374 return src_repo, dest_repo
384 finally:
375 finally:
385 release(src_lock, dest_lock)
376 release(src_lock, dest_lock)
386 if dir_cleanup is not None:
377 if dir_cleanup is not None:
387 dir_cleanup.cleanup()
378 dir_cleanup.cleanup()
388
379
389 def _showstats(repo, stats):
380 def _showstats(repo, stats):
390 repo.ui.status(_("%d files updated, %d files merged, "
381 repo.ui.status(_("%d files updated, %d files merged, "
391 "%d files removed, %d files unresolved\n") % stats)
382 "%d files removed, %d files unresolved\n") % stats)
392
383
393 def update(repo, node):
384 def update(repo, node):
394 """update the working directory to node, merging linear changes"""
385 """update the working directory to node, merging linear changes"""
395 stats = mergemod.update(repo, node, False, False, None)
386 stats = mergemod.update(repo, node, False, False, None)
396 _showstats(repo, stats)
387 _showstats(repo, stats)
397 if stats[3]:
388 if stats[3]:
398 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
389 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
399 return stats[3] > 0
390 return stats[3] > 0
400
391
401 # naming conflict in clone()
392 # naming conflict in clone()
402 _update = update
393 _update = update
403
394
404 def clean(repo, node, show_stats=True):
395 def clean(repo, node, show_stats=True):
405 """forcibly switch the working directory to node, clobbering changes"""
396 """forcibly switch the working directory to node, clobbering changes"""
406 stats = mergemod.update(repo, node, False, True, None)
397 stats = mergemod.update(repo, node, False, True, None)
407 if show_stats:
398 if show_stats:
408 _showstats(repo, stats)
399 _showstats(repo, stats)
409 return stats[3] > 0
400 return stats[3] > 0
410
401
411 def merge(repo, node, force=None, remind=True):
402 def merge(repo, node, force=None, remind=True):
412 """Branch merge with node, resolving changes. Return true if any
403 """Branch merge with node, resolving changes. Return true if any
413 unresolved conflicts."""
404 unresolved conflicts."""
414 stats = mergemod.update(repo, node, True, force, False)
405 stats = mergemod.update(repo, node, True, force, False)
415 _showstats(repo, stats)
406 _showstats(repo, stats)
416 if stats[3]:
407 if stats[3]:
417 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
408 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
418 "or 'hg update -C .' to abandon\n"))
409 "or 'hg update -C .' to abandon\n"))
419 elif remind:
410 elif remind:
420 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
411 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
421 return stats[3] > 0
412 return stats[3] > 0
422
413
423 def _incoming(displaychlist, subreporecurse, ui, repo, source,
414 def _incoming(displaychlist, subreporecurse, ui, repo, source,
424 opts, buffered=False):
415 opts, buffered=False):
425 """
416 """
426 Helper for incoming / gincoming.
417 Helper for incoming / gincoming.
427 displaychlist gets called with
418 displaychlist gets called with
428 (remoterepo, incomingchangesetlist, displayer) parameters,
419 (remoterepo, incomingchangesetlist, displayer) parameters,
429 and is supposed to contain only code that can't be unified.
420 and is supposed to contain only code that can't be unified.
430 """
421 """
431 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
422 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
432 other = repository(remoteui(repo, opts), source)
423 other = repository(remoteui(repo, opts), source)
433 ui.status(_('comparing with %s\n') % url.hidepassword(source))
424 ui.status(_('comparing with %s\n') % url.hidepassword(source))
434 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
425 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
435
426
436 if revs:
427 if revs:
437 revs = [other.lookup(rev) for rev in revs]
428 revs = [other.lookup(rev) for rev in revs]
438 usecommon = other.capable('getbundle')
429 usecommon = other.capable('getbundle')
439 other, common, incoming, bundle = bundlerepo.getremotechanges(ui, repo, other,
430 other, common, incoming, bundle = bundlerepo.getremotechanges(ui, repo, other,
440 revs, opts["bundle"], opts["force"],
431 revs, opts["bundle"], opts["force"],
441 usecommon=usecommon)
432 usecommon=usecommon)
442 if not incoming:
433 if not incoming:
443 ui.status(_("no changes found\n"))
434 ui.status(_("no changes found\n"))
444 return subreporecurse()
435 return subreporecurse()
445
436
446 try:
437 try:
447 if usecommon:
438 if usecommon:
448 chlist = other.changelog.findmissing(common, revs)
439 chlist = other.changelog.findmissing(common, revs)
449 else:
440 else:
450 chlist = other.changelog.nodesbetween(incoming, revs)[0]
441 chlist = other.changelog.nodesbetween(incoming, revs)[0]
451 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
442 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
452
443
453 # XXX once graphlog extension makes it into core,
444 # XXX once graphlog extension makes it into core,
454 # should be replaced by a if graph/else
445 # should be replaced by a if graph/else
455 displaychlist(other, chlist, displayer)
446 displaychlist(other, chlist, displayer)
456
447
457 displayer.close()
448 displayer.close()
458 finally:
449 finally:
459 if hasattr(other, 'close'):
450 if hasattr(other, 'close'):
460 other.close()
451 other.close()
461 if bundle:
452 if bundle:
462 os.unlink(bundle)
453 os.unlink(bundle)
463 subreporecurse()
454 subreporecurse()
464 return 0 # exit code is zero since we found incoming changes
455 return 0 # exit code is zero since we found incoming changes
465
456
466 def incoming(ui, repo, source, opts):
457 def incoming(ui, repo, source, opts):
467 def subreporecurse():
458 def subreporecurse():
468 ret = 1
459 ret = 1
469 if opts.get('subrepos'):
460 if opts.get('subrepos'):
470 ctx = repo[None]
461 ctx = repo[None]
471 for subpath in sorted(ctx.substate):
462 for subpath in sorted(ctx.substate):
472 sub = ctx.sub(subpath)
463 sub = ctx.sub(subpath)
473 ret = min(ret, sub.incoming(ui, source, opts))
464 ret = min(ret, sub.incoming(ui, source, opts))
474 return ret
465 return ret
475
466
476 def display(other, chlist, displayer):
467 def display(other, chlist, displayer):
477 limit = cmdutil.loglimit(opts)
468 limit = cmdutil.loglimit(opts)
478 if opts.get('newest_first'):
469 if opts.get('newest_first'):
479 chlist.reverse()
470 chlist.reverse()
480 count = 0
471 count = 0
481 for n in chlist:
472 for n in chlist:
482 if limit is not None and count >= limit:
473 if limit is not None and count >= limit:
483 break
474 break
484 parents = [p for p in other.changelog.parents(n) if p != nullid]
475 parents = [p for p in other.changelog.parents(n) if p != nullid]
485 if opts.get('no_merges') and len(parents) == 2:
476 if opts.get('no_merges') and len(parents) == 2:
486 continue
477 continue
487 count += 1
478 count += 1
488 displayer.show(other[n])
479 displayer.show(other[n])
489 return _incoming(display, subreporecurse, ui, repo, source, opts)
480 return _incoming(display, subreporecurse, ui, repo, source, opts)
490
481
491 def _outgoing(ui, repo, dest, opts):
482 def _outgoing(ui, repo, dest, opts):
492 dest = ui.expandpath(dest or 'default-push', dest or 'default')
483 dest = ui.expandpath(dest or 'default-push', dest or 'default')
493 dest, branches = parseurl(dest, opts.get('branch'))
484 dest, branches = parseurl(dest, opts.get('branch'))
494 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
485 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
495 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
486 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
496 if revs:
487 if revs:
497 revs = [repo.lookup(rev) for rev in revs]
488 revs = [repo.lookup(rev) for rev in revs]
498
489
499 other = repository(remoteui(repo, opts), dest)
490 other = repository(remoteui(repo, opts), dest)
500 o = discovery.findoutgoing(repo, other, force=opts.get('force'))
491 o = discovery.findoutgoing(repo, other, force=opts.get('force'))
501 if not o:
492 if not o:
502 ui.status(_("no changes found\n"))
493 ui.status(_("no changes found\n"))
503 return None
494 return None
504
495
505 return repo.changelog.nodesbetween(o, revs)[0]
496 return repo.changelog.nodesbetween(o, revs)[0]
506
497
507 def outgoing(ui, repo, dest, opts):
498 def outgoing(ui, repo, dest, opts):
508 def recurse():
499 def recurse():
509 ret = 1
500 ret = 1
510 if opts.get('subrepos'):
501 if opts.get('subrepos'):
511 ctx = repo[None]
502 ctx = repo[None]
512 for subpath in sorted(ctx.substate):
503 for subpath in sorted(ctx.substate):
513 sub = ctx.sub(subpath)
504 sub = ctx.sub(subpath)
514 ret = min(ret, sub.outgoing(ui, dest, opts))
505 ret = min(ret, sub.outgoing(ui, dest, opts))
515 return ret
506 return ret
516
507
517 limit = cmdutil.loglimit(opts)
508 limit = cmdutil.loglimit(opts)
518 o = _outgoing(ui, repo, dest, opts)
509 o = _outgoing(ui, repo, dest, opts)
519 if o is None:
510 if o is None:
520 return recurse()
511 return recurse()
521
512
522 if opts.get('newest_first'):
513 if opts.get('newest_first'):
523 o.reverse()
514 o.reverse()
524 displayer = cmdutil.show_changeset(ui, repo, opts)
515 displayer = cmdutil.show_changeset(ui, repo, opts)
525 count = 0
516 count = 0
526 for n in o:
517 for n in o:
527 if limit is not None and count >= limit:
518 if limit is not None and count >= limit:
528 break
519 break
529 parents = [p for p in repo.changelog.parents(n) if p != nullid]
520 parents = [p for p in repo.changelog.parents(n) if p != nullid]
530 if opts.get('no_merges') and len(parents) == 2:
521 if opts.get('no_merges') and len(parents) == 2:
531 continue
522 continue
532 count += 1
523 count += 1
533 displayer.show(repo[n])
524 displayer.show(repo[n])
534 displayer.close()
525 displayer.close()
535 recurse()
526 recurse()
536 return 0 # exit code is zero since we found outgoing changes
527 return 0 # exit code is zero since we found outgoing changes
537
528
538 def revert(repo, node, choose):
529 def revert(repo, node, choose):
539 """revert changes to revision in node without updating dirstate"""
530 """revert changes to revision in node without updating dirstate"""
540 return mergemod.update(repo, node, False, True, choose)[3] > 0
531 return mergemod.update(repo, node, False, True, choose)[3] > 0
541
532
542 def verify(repo):
533 def verify(repo):
543 """verify the consistency of a repository"""
534 """verify the consistency of a repository"""
544 return verifymod.verify(repo)
535 return verifymod.verify(repo)
545
536
546 def remoteui(src, opts):
537 def remoteui(src, opts):
547 'build a remote ui from ui or repo and opts'
538 'build a remote ui from ui or repo and opts'
548 if hasattr(src, 'baseui'): # looks like a repository
539 if hasattr(src, 'baseui'): # looks like a repository
549 dst = src.baseui.copy() # drop repo-specific config
540 dst = src.baseui.copy() # drop repo-specific config
550 src = src.ui # copy target options from repo
541 src = src.ui # copy target options from repo
551 else: # assume it's a global ui object
542 else: # assume it's a global ui object
552 dst = src.copy() # keep all global options
543 dst = src.copy() # keep all global options
553
544
554 # copy ssh-specific options
545 # copy ssh-specific options
555 for o in 'ssh', 'remotecmd':
546 for o in 'ssh', 'remotecmd':
556 v = opts.get(o) or src.config('ui', o)
547 v = opts.get(o) or src.config('ui', o)
557 if v:
548 if v:
558 dst.setconfig("ui", o, v)
549 dst.setconfig("ui", o, v)
559
550
560 # copy bundle-specific options
551 # copy bundle-specific options
561 r = src.config('bundle', 'mainreporoot')
552 r = src.config('bundle', 'mainreporoot')
562 if r:
553 if r:
563 dst.setconfig('bundle', 'mainreporoot', r)
554 dst.setconfig('bundle', 'mainreporoot', r)
564
555
565 # copy selected local settings to the remote ui
556 # copy selected local settings to the remote ui
566 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
557 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
567 for key, val in src.configitems(sect):
558 for key, val in src.configitems(sect):
568 dst.setconfig(sect, key, val)
559 dst.setconfig(sect, key, val)
569 v = src.config('web', 'cacerts')
560 v = src.config('web', 'cacerts')
570 if v:
561 if v:
571 dst.setconfig('web', 'cacerts', util.expandpath(v))
562 dst.setconfig('web', 'cacerts', util.expandpath(v))
572
563
573 return dst
564 return dst
@@ -1,1934 +1,1934 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=0):
29 def __init__(self, baseui, path=None, create=0):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.auditor = util.path_auditor(self.root, self._checknested)
35 self.opener = util.opener(self.path)
35 self.opener = util.opener(self.path)
36 self.wopener = util.opener(self.root)
36 self.wopener = util.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener("00changelog.i", "a").write(
60 self.opener("00changelog.i", "a").write(
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'parentdelta', False):
64 if self.ui.configbool('format', 'parentdelta', False):
65 requirements.append("parentdelta")
65 requirements.append("parentdelta")
66 else:
66 else:
67 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
68 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
70 else:
70 else:
71 # find requirements
71 # find requirements
72 requirements = set()
72 requirements = set()
73 try:
73 try:
74 requirements = set(self.opener("requires").read().splitlines())
74 requirements = set(self.opener("requires").read().splitlines())
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 for r in requirements - self.supported:
78 for r in requirements - self.supported:
79 raise error.RequirementError(
79 raise error.RequirementError(
80 _("requirement '%s' not supported") % r)
80 _("requirement '%s' not supported") % r)
81
81
82 self.sharedpath = self.path
82 self.sharedpath = self.path
83 try:
83 try:
84 s = os.path.realpath(self.opener("sharedpath").read())
84 s = os.path.realpath(self.opener("sharedpath").read())
85 if not os.path.exists(s):
85 if not os.path.exists(s):
86 raise error.RepoError(
86 raise error.RepoError(
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 self.sharedpath = s
88 self.sharedpath = s
89 except IOError, inst:
89 except IOError, inst:
90 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
91 raise
91 raise
92
92
93 self.store = store.store(requirements, self.sharedpath, util.opener)
93 self.store = store.store(requirements, self.sharedpath, util.opener)
94 self.spath = self.store.path
94 self.spath = self.store.path
95 self.sopener = self.store.opener
95 self.sopener = self.store.opener
96 self.sjoin = self.store.join
96 self.sjoin = self.store.join
97 self.opener.createmode = self.store.createmode
97 self.opener.createmode = self.store.createmode
98 self._applyrequirements(requirements)
98 self._applyrequirements(requirements)
99 if create:
99 if create:
100 self._writerequirements()
100 self._writerequirements()
101
101
102 # These two define the set of tags for this repository. _tags
102 # These two define the set of tags for this repository. _tags
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 # 'local'. (Global tags are defined by .hgtags across all
104 # 'local'. (Global tags are defined by .hgtags across all
105 # heads, and local tags are defined in .hg/localtags.) They
105 # heads, and local tags are defined in .hg/localtags.) They
106 # constitute the in-memory cache of tags.
106 # constitute the in-memory cache of tags.
107 self._tags = None
107 self._tags = None
108 self._tagtypes = None
108 self._tagtypes = None
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.nodetagscache = None
112 self.nodetagscache = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 def _applyrequirements(self, requirements):
117 def _applyrequirements(self, requirements):
118 self.requirements = requirements
118 self.requirements = requirements
119 self.sopener.options = {}
119 self.sopener.options = {}
120 if 'parentdelta' in requirements:
120 if 'parentdelta' in requirements:
121 self.sopener.options['parentdelta'] = 1
121 self.sopener.options['parentdelta'] = 1
122
122
123 def _writerequirements(self):
123 def _writerequirements(self):
124 reqfile = self.opener("requires", "w")
124 reqfile = self.opener("requires", "w")
125 for r in self.requirements:
125 for r in self.requirements:
126 reqfile.write("%s\n" % r)
126 reqfile.write("%s\n" % r)
127 reqfile.close()
127 reqfile.close()
128
128
129 def _checknested(self, path):
129 def _checknested(self, path):
130 """Determine if path is a legal nested repository."""
130 """Determine if path is a legal nested repository."""
131 if not path.startswith(self.root):
131 if not path.startswith(self.root):
132 return False
132 return False
133 subpath = path[len(self.root) + 1:]
133 subpath = path[len(self.root) + 1:]
134
134
135 # XXX: Checking against the current working copy is wrong in
135 # XXX: Checking against the current working copy is wrong in
136 # the sense that it can reject things like
136 # the sense that it can reject things like
137 #
137 #
138 # $ hg cat -r 10 sub/x.txt
138 # $ hg cat -r 10 sub/x.txt
139 #
139 #
140 # if sub/ is no longer a subrepository in the working copy
140 # if sub/ is no longer a subrepository in the working copy
141 # parent revision.
141 # parent revision.
142 #
142 #
143 # However, it can of course also allow things that would have
143 # However, it can of course also allow things that would have
144 # been rejected before, such as the above cat command if sub/
144 # been rejected before, such as the above cat command if sub/
145 # is a subrepository now, but was a normal directory before.
145 # is a subrepository now, but was a normal directory before.
146 # The old path auditor would have rejected by mistake since it
146 # The old path auditor would have rejected by mistake since it
147 # panics when it sees sub/.hg/.
147 # panics when it sees sub/.hg/.
148 #
148 #
149 # All in all, checking against the working copy seems sensible
149 # All in all, checking against the working copy seems sensible
150 # since we want to prevent access to nested repositories on
150 # since we want to prevent access to nested repositories on
151 # the filesystem *now*.
151 # the filesystem *now*.
152 ctx = self[None]
152 ctx = self[None]
153 parts = util.splitpath(subpath)
153 parts = util.splitpath(subpath)
154 while parts:
154 while parts:
155 prefix = os.sep.join(parts)
155 prefix = os.sep.join(parts)
156 if prefix in ctx.substate:
156 if prefix in ctx.substate:
157 if prefix == subpath:
157 if prefix == subpath:
158 return True
158 return True
159 else:
159 else:
160 sub = ctx.sub(prefix)
160 sub = ctx.sub(prefix)
161 return sub.checknested(subpath[len(prefix) + 1:])
161 return sub.checknested(subpath[len(prefix) + 1:])
162 else:
162 else:
163 parts.pop()
163 parts.pop()
164 return False
164 return False
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarks(self):
167 def _bookmarks(self):
168 return bookmarks.read(self)
168 return bookmarks.read(self)
169
169
170 @util.propertycache
170 @util.propertycache
171 def _bookmarkcurrent(self):
171 def _bookmarkcurrent(self):
172 return bookmarks.readcurrent(self)
172 return bookmarks.readcurrent(self)
173
173
174 @propertycache
174 @propertycache
175 def changelog(self):
175 def changelog(self):
176 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
177 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
178 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
179 if p.startswith(self.root):
179 if p.startswith(self.root):
180 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
181 self.sopener.options['defversion'] = c.version
181 self.sopener.options['defversion'] = c.version
182 return c
182 return c
183
183
184 @propertycache
184 @propertycache
185 def manifest(self):
185 def manifest(self):
186 return manifest.manifest(self.sopener)
186 return manifest.manifest(self.sopener)
187
187
188 @propertycache
188 @propertycache
189 def dirstate(self):
189 def dirstate(self):
190 warned = [0]
190 warned = [0]
191 def validate(node):
191 def validate(node):
192 try:
192 try:
193 r = self.changelog.rev(node)
193 r = self.changelog.rev(node)
194 return node
194 return node
195 except error.LookupError:
195 except error.LookupError:
196 if not warned[0]:
196 if not warned[0]:
197 warned[0] = True
197 warned[0] = True
198 self.ui.warn(_("warning: ignoring unknown"
198 self.ui.warn(_("warning: ignoring unknown"
199 " working parent %s!\n") % short(node))
199 " working parent %s!\n") % short(node))
200 return nullid
200 return nullid
201
201
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203
203
204 def __getitem__(self, changeid):
204 def __getitem__(self, changeid):
205 if changeid is None:
205 if changeid is None:
206 return context.workingctx(self)
206 return context.workingctx(self)
207 return context.changectx(self, changeid)
207 return context.changectx(self, changeid)
208
208
209 def __contains__(self, changeid):
209 def __contains__(self, changeid):
210 try:
210 try:
211 return bool(self.lookup(changeid))
211 return bool(self.lookup(changeid))
212 except error.RepoLookupError:
212 except error.RepoLookupError:
213 return False
213 return False
214
214
215 def __nonzero__(self):
215 def __nonzero__(self):
216 return True
216 return True
217
217
218 def __len__(self):
218 def __len__(self):
219 return len(self.changelog)
219 return len(self.changelog)
220
220
221 def __iter__(self):
221 def __iter__(self):
222 for i in xrange(len(self)):
222 for i in xrange(len(self)):
223 yield i
223 yield i
224
224
225 def url(self):
225 def url(self):
226 return 'file:' + self.root
226 return 'file:' + self.root
227
227
228 def hook(self, name, throw=False, **args):
228 def hook(self, name, throw=False, **args):
229 return hook.hook(self.ui, self, name, throw, **args)
229 return hook.hook(self.ui, self, name, throw, **args)
230
230
231 tag_disallowed = ':\r\n'
231 tag_disallowed = ':\r\n'
232
232
233 def _tag(self, names, node, message, local, user, date, extra={}):
233 def _tag(self, names, node, message, local, user, date, extra={}):
234 if isinstance(names, str):
234 if isinstance(names, str):
235 allchars = names
235 allchars = names
236 names = (names,)
236 names = (names,)
237 else:
237 else:
238 allchars = ''.join(names)
238 allchars = ''.join(names)
239 for c in self.tag_disallowed:
239 for c in self.tag_disallowed:
240 if c in allchars:
240 if c in allchars:
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242
242
243 branches = self.branchmap()
243 branches = self.branchmap()
244 for name in names:
244 for name in names:
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 local=local)
246 local=local)
247 if name in branches:
247 if name in branches:
248 self.ui.warn(_("warning: tag %s conflicts with existing"
248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 " branch name\n") % name)
249 " branch name\n") % name)
250
250
251 def writetags(fp, names, munge, prevtags):
251 def writetags(fp, names, munge, prevtags):
252 fp.seek(0, 2)
252 fp.seek(0, 2)
253 if prevtags and prevtags[-1] != '\n':
253 if prevtags and prevtags[-1] != '\n':
254 fp.write('\n')
254 fp.write('\n')
255 for name in names:
255 for name in names:
256 m = munge and munge(name) or name
256 m = munge and munge(name) or name
257 if self._tagtypes and name in self._tagtypes:
257 if self._tagtypes and name in self._tagtypes:
258 old = self._tags.get(name, nullid)
258 old = self._tags.get(name, nullid)
259 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(old), m))
260 fp.write('%s %s\n' % (hex(node), m))
260 fp.write('%s %s\n' % (hex(node), m))
261 fp.close()
261 fp.close()
262
262
263 prevtags = ''
263 prevtags = ''
264 if local:
264 if local:
265 try:
265 try:
266 fp = self.opener('localtags', 'r+')
266 fp = self.opener('localtags', 'r+')
267 except IOError:
267 except IOError:
268 fp = self.opener('localtags', 'a')
268 fp = self.opener('localtags', 'a')
269 else:
269 else:
270 prevtags = fp.read()
270 prevtags = fp.read()
271
271
272 # local tags are stored in the current charset
272 # local tags are stored in the current charset
273 writetags(fp, names, None, prevtags)
273 writetags(fp, names, None, prevtags)
274 for name in names:
274 for name in names:
275 self.hook('tag', node=hex(node), tag=name, local=local)
275 self.hook('tag', node=hex(node), tag=name, local=local)
276 return
276 return
277
277
278 try:
278 try:
279 fp = self.wfile('.hgtags', 'rb+')
279 fp = self.wfile('.hgtags', 'rb+')
280 except IOError:
280 except IOError:
281 fp = self.wfile('.hgtags', 'ab')
281 fp = self.wfile('.hgtags', 'ab')
282 else:
282 else:
283 prevtags = fp.read()
283 prevtags = fp.read()
284
284
285 # committed tags are stored in UTF-8
285 # committed tags are stored in UTF-8
286 writetags(fp, names, encoding.fromlocal, prevtags)
286 writetags(fp, names, encoding.fromlocal, prevtags)
287
287
288 fp.close()
288 fp.close()
289
289
290 if '.hgtags' not in self.dirstate:
290 if '.hgtags' not in self.dirstate:
291 self[None].add(['.hgtags'])
291 self[None].add(['.hgtags'])
292
292
293 m = matchmod.exact(self.root, '', ['.hgtags'])
293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295
295
296 for name in names:
296 for name in names:
297 self.hook('tag', node=hex(node), tag=name, local=local)
297 self.hook('tag', node=hex(node), tag=name, local=local)
298
298
299 return tagnode
299 return tagnode
300
300
301 def tag(self, names, node, message, local, user, date):
301 def tag(self, names, node, message, local, user, date):
302 '''tag a revision with one or more symbolic names.
302 '''tag a revision with one or more symbolic names.
303
303
304 names is a list of strings or, when adding a single tag, names may be a
304 names is a list of strings or, when adding a single tag, names may be a
305 string.
305 string.
306
306
307 if local is True, the tags are stored in a per-repository file.
307 if local is True, the tags are stored in a per-repository file.
308 otherwise, they are stored in the .hgtags file, and a new
308 otherwise, they are stored in the .hgtags file, and a new
309 changeset is committed with the change.
309 changeset is committed with the change.
310
310
311 keyword arguments:
311 keyword arguments:
312
312
313 local: whether to store tags in non-version-controlled file
313 local: whether to store tags in non-version-controlled file
314 (default False)
314 (default False)
315
315
316 message: commit message to use if committing
316 message: commit message to use if committing
317
317
318 user: name of user to use if committing
318 user: name of user to use if committing
319
319
320 date: date tuple to use if committing'''
320 date: date tuple to use if committing'''
321
321
322 if not local:
322 if not local:
323 for x in self.status()[:5]:
323 for x in self.status()[:5]:
324 if '.hgtags' in x:
324 if '.hgtags' in x:
325 raise util.Abort(_('working copy of .hgtags is changed '
325 raise util.Abort(_('working copy of .hgtags is changed '
326 '(please commit .hgtags manually)'))
326 '(please commit .hgtags manually)'))
327
327
328 self.tags() # instantiate the cache
328 self.tags() # instantiate the cache
329 self._tag(names, node, message, local, user, date)
329 self._tag(names, node, message, local, user, date)
330
330
331 def tags(self):
331 def tags(self):
332 '''return a mapping of tag to node'''
332 '''return a mapping of tag to node'''
333 if self._tags is None:
333 if self._tags is None:
334 (self._tags, self._tagtypes) = self._findtags()
334 (self._tags, self._tagtypes) = self._findtags()
335
335
336 return self._tags
336 return self._tags
337
337
338 def _findtags(self):
338 def _findtags(self):
339 '''Do the hard work of finding tags. Return a pair of dicts
339 '''Do the hard work of finding tags. Return a pair of dicts
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 maps tag name to a string like \'global\' or \'local\'.
341 maps tag name to a string like \'global\' or \'local\'.
342 Subclasses or extensions are free to add their own tags, but
342 Subclasses or extensions are free to add their own tags, but
343 should be aware that the returned dicts will be retained for the
343 should be aware that the returned dicts will be retained for the
344 duration of the localrepo object.'''
344 duration of the localrepo object.'''
345
345
346 # XXX what tagtype should subclasses/extensions use? Currently
346 # XXX what tagtype should subclasses/extensions use? Currently
347 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 # Should each extension invent its own tag type? Should there
348 # Should each extension invent its own tag type? Should there
349 # be one tagtype for all such "virtual" tags? Or is the status
349 # be one tagtype for all such "virtual" tags? Or is the status
350 # quo fine?
350 # quo fine?
351
351
352 alltags = {} # map tag name to (node, hist)
352 alltags = {} # map tag name to (node, hist)
353 tagtypes = {}
353 tagtypes = {}
354
354
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357
357
358 # Build the return dicts. Have to re-encode tag names because
358 # Build the return dicts. Have to re-encode tag names because
359 # the tags module always uses UTF-8 (in order not to lose info
359 # the tags module always uses UTF-8 (in order not to lose info
360 # writing to the cache), but the rest of Mercurial wants them in
360 # writing to the cache), but the rest of Mercurial wants them in
361 # local encoding.
361 # local encoding.
362 tags = {}
362 tags = {}
363 for (name, (node, hist)) in alltags.iteritems():
363 for (name, (node, hist)) in alltags.iteritems():
364 if node != nullid:
364 if node != nullid:
365 tags[encoding.tolocal(name)] = node
365 tags[encoding.tolocal(name)] = node
366 tags['tip'] = self.changelog.tip()
366 tags['tip'] = self.changelog.tip()
367 tagtypes = dict([(encoding.tolocal(name), value)
367 tagtypes = dict([(encoding.tolocal(name), value)
368 for (name, value) in tagtypes.iteritems()])
368 for (name, value) in tagtypes.iteritems()])
369 return (tags, tagtypes)
369 return (tags, tagtypes)
370
370
371 def tagtype(self, tagname):
371 def tagtype(self, tagname):
372 '''
372 '''
373 return the type of the given tag. result can be:
373 return the type of the given tag. result can be:
374
374
375 'local' : a local tag
375 'local' : a local tag
376 'global' : a global tag
376 'global' : a global tag
377 None : tag does not exist
377 None : tag does not exist
378 '''
378 '''
379
379
380 self.tags()
380 self.tags()
381
381
382 return self._tagtypes.get(tagname)
382 return self._tagtypes.get(tagname)
383
383
384 def tagslist(self):
384 def tagslist(self):
385 '''return a list of tags ordered by revision'''
385 '''return a list of tags ordered by revision'''
386 l = []
386 l = []
387 for t, n in self.tags().iteritems():
387 for t, n in self.tags().iteritems():
388 try:
388 try:
389 r = self.changelog.rev(n)
389 r = self.changelog.rev(n)
390 except:
390 except:
391 r = -2 # sort to the beginning of the list if unknown
391 r = -2 # sort to the beginning of the list if unknown
392 l.append((r, t, n))
392 l.append((r, t, n))
393 return [(t, n) for r, t, n in sorted(l)]
393 return [(t, n) for r, t, n in sorted(l)]
394
394
395 def nodetags(self, node):
395 def nodetags(self, node):
396 '''return the tags associated with a node'''
396 '''return the tags associated with a node'''
397 if not self.nodetagscache:
397 if not self.nodetagscache:
398 self.nodetagscache = {}
398 self.nodetagscache = {}
399 for t, n in self.tags().iteritems():
399 for t, n in self.tags().iteritems():
400 self.nodetagscache.setdefault(n, []).append(t)
400 self.nodetagscache.setdefault(n, []).append(t)
401 for tags in self.nodetagscache.itervalues():
401 for tags in self.nodetagscache.itervalues():
402 tags.sort()
402 tags.sort()
403 return self.nodetagscache.get(node, [])
403 return self.nodetagscache.get(node, [])
404
404
405 def nodebookmarks(self, node):
405 def nodebookmarks(self, node):
406 marks = []
406 marks = []
407 for bookmark, n in self._bookmarks.iteritems():
407 for bookmark, n in self._bookmarks.iteritems():
408 if n == node:
408 if n == node:
409 marks.append(bookmark)
409 marks.append(bookmark)
410 return sorted(marks)
410 return sorted(marks)
411
411
412 def _branchtags(self, partial, lrev):
412 def _branchtags(self, partial, lrev):
413 # TODO: rename this function?
413 # TODO: rename this function?
414 tiprev = len(self) - 1
414 tiprev = len(self) - 1
415 if lrev != tiprev:
415 if lrev != tiprev:
416 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
416 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
417 self._updatebranchcache(partial, ctxgen)
417 self._updatebranchcache(partial, ctxgen)
418 self._writebranchcache(partial, self.changelog.tip(), tiprev)
418 self._writebranchcache(partial, self.changelog.tip(), tiprev)
419
419
420 return partial
420 return partial
421
421
422 def updatebranchcache(self):
422 def updatebranchcache(self):
423 tip = self.changelog.tip()
423 tip = self.changelog.tip()
424 if self._branchcache is not None and self._branchcachetip == tip:
424 if self._branchcache is not None and self._branchcachetip == tip:
425 return self._branchcache
425 return self._branchcache
426
426
427 oldtip = self._branchcachetip
427 oldtip = self._branchcachetip
428 self._branchcachetip = tip
428 self._branchcachetip = tip
429 if oldtip is None or oldtip not in self.changelog.nodemap:
429 if oldtip is None or oldtip not in self.changelog.nodemap:
430 partial, last, lrev = self._readbranchcache()
430 partial, last, lrev = self._readbranchcache()
431 else:
431 else:
432 lrev = self.changelog.rev(oldtip)
432 lrev = self.changelog.rev(oldtip)
433 partial = self._branchcache
433 partial = self._branchcache
434
434
435 self._branchtags(partial, lrev)
435 self._branchtags(partial, lrev)
436 # this private cache holds all heads (not just tips)
436 # this private cache holds all heads (not just tips)
437 self._branchcache = partial
437 self._branchcache = partial
438
438
439 def branchmap(self):
439 def branchmap(self):
440 '''returns a dictionary {branch: [branchheads]}'''
440 '''returns a dictionary {branch: [branchheads]}'''
441 self.updatebranchcache()
441 self.updatebranchcache()
442 return self._branchcache
442 return self._branchcache
443
443
444 def branchtags(self):
444 def branchtags(self):
445 '''return a dict where branch names map to the tipmost head of
445 '''return a dict where branch names map to the tipmost head of
446 the branch, open heads come before closed'''
446 the branch, open heads come before closed'''
447 bt = {}
447 bt = {}
448 for bn, heads in self.branchmap().iteritems():
448 for bn, heads in self.branchmap().iteritems():
449 tip = heads[-1]
449 tip = heads[-1]
450 for h in reversed(heads):
450 for h in reversed(heads):
451 if 'close' not in self.changelog.read(h)[5]:
451 if 'close' not in self.changelog.read(h)[5]:
452 tip = h
452 tip = h
453 break
453 break
454 bt[bn] = tip
454 bt[bn] = tip
455 return bt
455 return bt
456
456
457 def _readbranchcache(self):
457 def _readbranchcache(self):
458 partial = {}
458 partial = {}
459 try:
459 try:
460 f = self.opener("cache/branchheads")
460 f = self.opener("cache/branchheads")
461 lines = f.read().split('\n')
461 lines = f.read().split('\n')
462 f.close()
462 f.close()
463 except (IOError, OSError):
463 except (IOError, OSError):
464 return {}, nullid, nullrev
464 return {}, nullid, nullrev
465
465
466 try:
466 try:
467 last, lrev = lines.pop(0).split(" ", 1)
467 last, lrev = lines.pop(0).split(" ", 1)
468 last, lrev = bin(last), int(lrev)
468 last, lrev = bin(last), int(lrev)
469 if lrev >= len(self) or self[lrev].node() != last:
469 if lrev >= len(self) or self[lrev].node() != last:
470 # invalidate the cache
470 # invalidate the cache
471 raise ValueError('invalidating branch cache (tip differs)')
471 raise ValueError('invalidating branch cache (tip differs)')
472 for l in lines:
472 for l in lines:
473 if not l:
473 if not l:
474 continue
474 continue
475 node, label = l.split(" ", 1)
475 node, label = l.split(" ", 1)
476 label = encoding.tolocal(label.strip())
476 label = encoding.tolocal(label.strip())
477 partial.setdefault(label, []).append(bin(node))
477 partial.setdefault(label, []).append(bin(node))
478 except KeyboardInterrupt:
478 except KeyboardInterrupt:
479 raise
479 raise
480 except Exception, inst:
480 except Exception, inst:
481 if self.ui.debugflag:
481 if self.ui.debugflag:
482 self.ui.warn(str(inst), '\n')
482 self.ui.warn(str(inst), '\n')
483 partial, last, lrev = {}, nullid, nullrev
483 partial, last, lrev = {}, nullid, nullrev
484 return partial, last, lrev
484 return partial, last, lrev
485
485
486 def _writebranchcache(self, branches, tip, tiprev):
486 def _writebranchcache(self, branches, tip, tiprev):
487 try:
487 try:
488 f = self.opener("cache/branchheads", "w", atomictemp=True)
488 f = self.opener("cache/branchheads", "w", atomictemp=True)
489 f.write("%s %s\n" % (hex(tip), tiprev))
489 f.write("%s %s\n" % (hex(tip), tiprev))
490 for label, nodes in branches.iteritems():
490 for label, nodes in branches.iteritems():
491 for node in nodes:
491 for node in nodes:
492 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
492 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
493 f.rename()
493 f.rename()
494 except (IOError, OSError):
494 except (IOError, OSError):
495 pass
495 pass
496
496
497 def _updatebranchcache(self, partial, ctxgen):
497 def _updatebranchcache(self, partial, ctxgen):
498 # collect new branch entries
498 # collect new branch entries
499 newbranches = {}
499 newbranches = {}
500 for c in ctxgen:
500 for c in ctxgen:
501 newbranches.setdefault(c.branch(), []).append(c.node())
501 newbranches.setdefault(c.branch(), []).append(c.node())
502 # if older branchheads are reachable from new ones, they aren't
502 # if older branchheads are reachable from new ones, they aren't
503 # really branchheads. Note checking parents is insufficient:
503 # really branchheads. Note checking parents is insufficient:
504 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
504 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
505 for branch, newnodes in newbranches.iteritems():
505 for branch, newnodes in newbranches.iteritems():
506 bheads = partial.setdefault(branch, [])
506 bheads = partial.setdefault(branch, [])
507 bheads.extend(newnodes)
507 bheads.extend(newnodes)
508 if len(bheads) <= 1:
508 if len(bheads) <= 1:
509 continue
509 continue
510 # starting from tip means fewer passes over reachable
510 # starting from tip means fewer passes over reachable
511 while newnodes:
511 while newnodes:
512 latest = newnodes.pop()
512 latest = newnodes.pop()
513 if latest not in bheads:
513 if latest not in bheads:
514 continue
514 continue
515 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
515 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
516 reachable = self.changelog.reachable(latest, minbhrev)
516 reachable = self.changelog.reachable(latest, minbhrev)
517 reachable.remove(latest)
517 reachable.remove(latest)
518 bheads = [b for b in bheads if b not in reachable]
518 bheads = [b for b in bheads if b not in reachable]
519 partial[branch] = bheads
519 partial[branch] = bheads
520
520
521 def lookup(self, key):
521 def lookup(self, key):
522 if isinstance(key, int):
522 if isinstance(key, int):
523 return self.changelog.node(key)
523 return self.changelog.node(key)
524 elif key == '.':
524 elif key == '.':
525 return self.dirstate.parents()[0]
525 return self.dirstate.parents()[0]
526 elif key == 'null':
526 elif key == 'null':
527 return nullid
527 return nullid
528 elif key == 'tip':
528 elif key == 'tip':
529 return self.changelog.tip()
529 return self.changelog.tip()
530 n = self.changelog._match(key)
530 n = self.changelog._match(key)
531 if n:
531 if n:
532 return n
532 return n
533 if key in self._bookmarks:
533 if key in self._bookmarks:
534 return self._bookmarks[key]
534 return self._bookmarks[key]
535 if key in self.tags():
535 if key in self.tags():
536 return self.tags()[key]
536 return self.tags()[key]
537 if key in self.branchtags():
537 if key in self.branchtags():
538 return self.branchtags()[key]
538 return self.branchtags()[key]
539 n = self.changelog._partialmatch(key)
539 n = self.changelog._partialmatch(key)
540 if n:
540 if n:
541 return n
541 return n
542
542
543 # can't find key, check if it might have come from damaged dirstate
543 # can't find key, check if it might have come from damaged dirstate
544 if key in self.dirstate.parents():
544 if key in self.dirstate.parents():
545 raise error.Abort(_("working directory has unknown parent '%s'!")
545 raise error.Abort(_("working directory has unknown parent '%s'!")
546 % short(key))
546 % short(key))
547 try:
547 try:
548 if len(key) == 20:
548 if len(key) == 20:
549 key = hex(key)
549 key = hex(key)
550 except:
550 except:
551 pass
551 pass
552 raise error.RepoLookupError(_("unknown revision '%s'") % key)
552 raise error.RepoLookupError(_("unknown revision '%s'") % key)
553
553
554 def lookupbranch(self, key, remote=None):
554 def lookupbranch(self, key, remote=None):
555 repo = remote or self
555 repo = remote or self
556 if key in repo.branchmap():
556 if key in repo.branchmap():
557 return key
557 return key
558
558
559 repo = (remote and remote.local()) and remote or self
559 repo = (remote and remote.local()) and remote or self
560 return repo[key].branch()
560 return repo[key].branch()
561
561
562 def known(self, nodes):
562 def known(self, nodes):
563 nm = self.changelog.nodemap
563 nm = self.changelog.nodemap
564 return [(n in nm) for n in nodes]
564 return [(n in nm) for n in nodes]
565
565
566 def local(self):
566 def local(self):
567 return True
567 return True
568
568
569 def join(self, f):
569 def join(self, f):
570 return os.path.join(self.path, f)
570 return os.path.join(self.path, f)
571
571
572 def wjoin(self, f):
572 def wjoin(self, f):
573 return os.path.join(self.root, f)
573 return os.path.join(self.root, f)
574
574
575 def file(self, f):
575 def file(self, f):
576 if f[0] == '/':
576 if f[0] == '/':
577 f = f[1:]
577 f = f[1:]
578 return filelog.filelog(self.sopener, f)
578 return filelog.filelog(self.sopener, f)
579
579
580 def changectx(self, changeid):
580 def changectx(self, changeid):
581 return self[changeid]
581 return self[changeid]
582
582
583 def parents(self, changeid=None):
583 def parents(self, changeid=None):
584 '''get list of changectxs for parents of changeid'''
584 '''get list of changectxs for parents of changeid'''
585 return self[changeid].parents()
585 return self[changeid].parents()
586
586
587 def filectx(self, path, changeid=None, fileid=None):
587 def filectx(self, path, changeid=None, fileid=None):
588 """changeid can be a changeset revision, node, or tag.
588 """changeid can be a changeset revision, node, or tag.
589 fileid can be a file revision or node."""
589 fileid can be a file revision or node."""
590 return context.filectx(self, path, changeid, fileid)
590 return context.filectx(self, path, changeid, fileid)
591
591
592 def getcwd(self):
592 def getcwd(self):
593 return self.dirstate.getcwd()
593 return self.dirstate.getcwd()
594
594
595 def pathto(self, f, cwd=None):
595 def pathto(self, f, cwd=None):
596 return self.dirstate.pathto(f, cwd)
596 return self.dirstate.pathto(f, cwd)
597
597
598 def wfile(self, f, mode='r'):
598 def wfile(self, f, mode='r'):
599 return self.wopener(f, mode)
599 return self.wopener(f, mode)
600
600
601 def _link(self, f):
601 def _link(self, f):
602 return os.path.islink(self.wjoin(f))
602 return os.path.islink(self.wjoin(f))
603
603
604 def _loadfilter(self, filter):
604 def _loadfilter(self, filter):
605 if filter not in self.filterpats:
605 if filter not in self.filterpats:
606 l = []
606 l = []
607 for pat, cmd in self.ui.configitems(filter):
607 for pat, cmd in self.ui.configitems(filter):
608 if cmd == '!':
608 if cmd == '!':
609 continue
609 continue
610 mf = matchmod.match(self.root, '', [pat])
610 mf = matchmod.match(self.root, '', [pat])
611 fn = None
611 fn = None
612 params = cmd
612 params = cmd
613 for name, filterfn in self._datafilters.iteritems():
613 for name, filterfn in self._datafilters.iteritems():
614 if cmd.startswith(name):
614 if cmd.startswith(name):
615 fn = filterfn
615 fn = filterfn
616 params = cmd[len(name):].lstrip()
616 params = cmd[len(name):].lstrip()
617 break
617 break
618 if not fn:
618 if not fn:
619 fn = lambda s, c, **kwargs: util.filter(s, c)
619 fn = lambda s, c, **kwargs: util.filter(s, c)
620 # Wrap old filters not supporting keyword arguments
620 # Wrap old filters not supporting keyword arguments
621 if not inspect.getargspec(fn)[2]:
621 if not inspect.getargspec(fn)[2]:
622 oldfn = fn
622 oldfn = fn
623 fn = lambda s, c, **kwargs: oldfn(s, c)
623 fn = lambda s, c, **kwargs: oldfn(s, c)
624 l.append((mf, fn, params))
624 l.append((mf, fn, params))
625 self.filterpats[filter] = l
625 self.filterpats[filter] = l
626 return self.filterpats[filter]
626 return self.filterpats[filter]
627
627
628 def _filter(self, filterpats, filename, data):
628 def _filter(self, filterpats, filename, data):
629 for mf, fn, cmd in filterpats:
629 for mf, fn, cmd in filterpats:
630 if mf(filename):
630 if mf(filename):
631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
633 break
633 break
634
634
635 return data
635 return data
636
636
637 @propertycache
637 @propertycache
638 def _encodefilterpats(self):
638 def _encodefilterpats(self):
639 return self._loadfilter('encode')
639 return self._loadfilter('encode')
640
640
641 @propertycache
641 @propertycache
642 def _decodefilterpats(self):
642 def _decodefilterpats(self):
643 return self._loadfilter('decode')
643 return self._loadfilter('decode')
644
644
645 def adddatafilter(self, name, filter):
645 def adddatafilter(self, name, filter):
646 self._datafilters[name] = filter
646 self._datafilters[name] = filter
647
647
648 def wread(self, filename):
648 def wread(self, filename):
649 if self._link(filename):
649 if self._link(filename):
650 data = os.readlink(self.wjoin(filename))
650 data = os.readlink(self.wjoin(filename))
651 else:
651 else:
652 data = self.wopener(filename, 'r').read()
652 data = self.wopener(filename, 'r').read()
653 return self._filter(self._encodefilterpats, filename, data)
653 return self._filter(self._encodefilterpats, filename, data)
654
654
655 def wwrite(self, filename, data, flags):
655 def wwrite(self, filename, data, flags):
656 data = self._filter(self._decodefilterpats, filename, data)
656 data = self._filter(self._decodefilterpats, filename, data)
657 if 'l' in flags:
657 if 'l' in flags:
658 self.wopener.symlink(data, filename)
658 self.wopener.symlink(data, filename)
659 else:
659 else:
660 self.wopener(filename, 'w').write(data)
660 self.wopener(filename, 'w').write(data)
661 if 'x' in flags:
661 if 'x' in flags:
662 util.set_flags(self.wjoin(filename), False, True)
662 util.set_flags(self.wjoin(filename), False, True)
663
663
664 def wwritedata(self, filename, data):
664 def wwritedata(self, filename, data):
665 return self._filter(self._decodefilterpats, filename, data)
665 return self._filter(self._decodefilterpats, filename, data)
666
666
667 def transaction(self, desc):
667 def transaction(self, desc):
668 tr = self._transref and self._transref() or None
668 tr = self._transref and self._transref() or None
669 if tr and tr.running():
669 if tr and tr.running():
670 return tr.nest()
670 return tr.nest()
671
671
672 # abort here if the journal already exists
672 # abort here if the journal already exists
673 if os.path.exists(self.sjoin("journal")):
673 if os.path.exists(self.sjoin("journal")):
674 raise error.RepoError(
674 raise error.RepoError(
675 _("abandoned transaction found - run hg recover"))
675 _("abandoned transaction found - run hg recover"))
676
676
677 # save dirstate for rollback
677 # save dirstate for rollback
678 try:
678 try:
679 ds = self.opener("dirstate").read()
679 ds = self.opener("dirstate").read()
680 except IOError:
680 except IOError:
681 ds = ""
681 ds = ""
682 self.opener("journal.dirstate", "w").write(ds)
682 self.opener("journal.dirstate", "w").write(ds)
683 self.opener("journal.branch", "w").write(
683 self.opener("journal.branch", "w").write(
684 encoding.fromlocal(self.dirstate.branch()))
684 encoding.fromlocal(self.dirstate.branch()))
685 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
685 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
686
686
687 renames = [(self.sjoin("journal"), self.sjoin("undo")),
687 renames = [(self.sjoin("journal"), self.sjoin("undo")),
688 (self.join("journal.dirstate"), self.join("undo.dirstate")),
688 (self.join("journal.dirstate"), self.join("undo.dirstate")),
689 (self.join("journal.branch"), self.join("undo.branch")),
689 (self.join("journal.branch"), self.join("undo.branch")),
690 (self.join("journal.desc"), self.join("undo.desc"))]
690 (self.join("journal.desc"), self.join("undo.desc"))]
691 tr = transaction.transaction(self.ui.warn, self.sopener,
691 tr = transaction.transaction(self.ui.warn, self.sopener,
692 self.sjoin("journal"),
692 self.sjoin("journal"),
693 aftertrans(renames),
693 aftertrans(renames),
694 self.store.createmode)
694 self.store.createmode)
695 self._transref = weakref.ref(tr)
695 self._transref = weakref.ref(tr)
696 return tr
696 return tr
697
697
698 def recover(self):
698 def recover(self):
699 lock = self.lock()
699 lock = self.lock()
700 try:
700 try:
701 if os.path.exists(self.sjoin("journal")):
701 if os.path.exists(self.sjoin("journal")):
702 self.ui.status(_("rolling back interrupted transaction\n"))
702 self.ui.status(_("rolling back interrupted transaction\n"))
703 transaction.rollback(self.sopener, self.sjoin("journal"),
703 transaction.rollback(self.sopener, self.sjoin("journal"),
704 self.ui.warn)
704 self.ui.warn)
705 self.invalidate()
705 self.invalidate()
706 return True
706 return True
707 else:
707 else:
708 self.ui.warn(_("no interrupted transaction available\n"))
708 self.ui.warn(_("no interrupted transaction available\n"))
709 return False
709 return False
710 finally:
710 finally:
711 lock.release()
711 lock.release()
712
712
713 def rollback(self, dryrun=False):
713 def rollback(self, dryrun=False):
714 wlock = lock = None
714 wlock = lock = None
715 try:
715 try:
716 wlock = self.wlock()
716 wlock = self.wlock()
717 lock = self.lock()
717 lock = self.lock()
718 if os.path.exists(self.sjoin("undo")):
718 if os.path.exists(self.sjoin("undo")):
719 try:
719 try:
720 args = self.opener("undo.desc", "r").read().splitlines()
720 args = self.opener("undo.desc", "r").read().splitlines()
721 if len(args) >= 3 and self.ui.verbose:
721 if len(args) >= 3 and self.ui.verbose:
722 desc = _("repository tip rolled back to revision %s"
722 desc = _("repository tip rolled back to revision %s"
723 " (undo %s: %s)\n") % (
723 " (undo %s: %s)\n") % (
724 int(args[0]) - 1, args[1], args[2])
724 int(args[0]) - 1, args[1], args[2])
725 elif len(args) >= 2:
725 elif len(args) >= 2:
726 desc = _("repository tip rolled back to revision %s"
726 desc = _("repository tip rolled back to revision %s"
727 " (undo %s)\n") % (
727 " (undo %s)\n") % (
728 int(args[0]) - 1, args[1])
728 int(args[0]) - 1, args[1])
729 except IOError:
729 except IOError:
730 desc = _("rolling back unknown transaction\n")
730 desc = _("rolling back unknown transaction\n")
731 self.ui.status(desc)
731 self.ui.status(desc)
732 if dryrun:
732 if dryrun:
733 return
733 return
734 transaction.rollback(self.sopener, self.sjoin("undo"),
734 transaction.rollback(self.sopener, self.sjoin("undo"),
735 self.ui.warn)
735 self.ui.warn)
736 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
736 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
737 if os.path.exists(self.join('undo.bookmarks')):
737 if os.path.exists(self.join('undo.bookmarks')):
738 util.rename(self.join('undo.bookmarks'),
738 util.rename(self.join('undo.bookmarks'),
739 self.join('bookmarks'))
739 self.join('bookmarks'))
740 try:
740 try:
741 branch = self.opener("undo.branch").read()
741 branch = self.opener("undo.branch").read()
742 self.dirstate.setbranch(branch)
742 self.dirstate.setbranch(branch)
743 except IOError:
743 except IOError:
744 self.ui.warn(_("Named branch could not be reset, "
744 self.ui.warn(_("Named branch could not be reset, "
745 "current branch still is: %s\n")
745 "current branch still is: %s\n")
746 % self.dirstate.branch())
746 % self.dirstate.branch())
747 self.invalidate()
747 self.invalidate()
748 self.dirstate.invalidate()
748 self.dirstate.invalidate()
749 self.destroyed()
749 self.destroyed()
750 parents = tuple([p.rev() for p in self.parents()])
750 parents = tuple([p.rev() for p in self.parents()])
751 if len(parents) > 1:
751 if len(parents) > 1:
752 self.ui.status(_("working directory now based on "
752 self.ui.status(_("working directory now based on "
753 "revisions %d and %d\n") % parents)
753 "revisions %d and %d\n") % parents)
754 else:
754 else:
755 self.ui.status(_("working directory now based on "
755 self.ui.status(_("working directory now based on "
756 "revision %d\n") % parents)
756 "revision %d\n") % parents)
757 else:
757 else:
758 self.ui.warn(_("no rollback information available\n"))
758 self.ui.warn(_("no rollback information available\n"))
759 return 1
759 return 1
760 finally:
760 finally:
761 release(lock, wlock)
761 release(lock, wlock)
762
762
763 def invalidatecaches(self):
763 def invalidatecaches(self):
764 self._tags = None
764 self._tags = None
765 self._tagtypes = None
765 self._tagtypes = None
766 self.nodetagscache = None
766 self.nodetagscache = None
767 self._branchcache = None # in UTF-8
767 self._branchcache = None # in UTF-8
768 self._branchcachetip = None
768 self._branchcachetip = None
769
769
770 def invalidate(self):
770 def invalidate(self):
771 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
771 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
772 if a in self.__dict__:
772 if a in self.__dict__:
773 delattr(self, a)
773 delattr(self, a)
774 self.invalidatecaches()
774 self.invalidatecaches()
775
775
776 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
776 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
777 try:
777 try:
778 l = lock.lock(lockname, 0, releasefn, desc=desc)
778 l = lock.lock(lockname, 0, releasefn, desc=desc)
779 except error.LockHeld, inst:
779 except error.LockHeld, inst:
780 if not wait:
780 if not wait:
781 raise
781 raise
782 self.ui.warn(_("waiting for lock on %s held by %r\n") %
782 self.ui.warn(_("waiting for lock on %s held by %r\n") %
783 (desc, inst.locker))
783 (desc, inst.locker))
784 # default to 600 seconds timeout
784 # default to 600 seconds timeout
785 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
785 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
786 releasefn, desc=desc)
786 releasefn, desc=desc)
787 if acquirefn:
787 if acquirefn:
788 acquirefn()
788 acquirefn()
789 return l
789 return l
790
790
791 def lock(self, wait=True):
791 def lock(self, wait=True):
792 '''Lock the repository store (.hg/store) and return a weak reference
792 '''Lock the repository store (.hg/store) and return a weak reference
793 to the lock. Use this before modifying the store (e.g. committing or
793 to the lock. Use this before modifying the store (e.g. committing or
794 stripping). If you are opening a transaction, get a lock as well.)'''
794 stripping). If you are opening a transaction, get a lock as well.)'''
795 l = self._lockref and self._lockref()
795 l = self._lockref and self._lockref()
796 if l is not None and l.held:
796 if l is not None and l.held:
797 l.lock()
797 l.lock()
798 return l
798 return l
799
799
800 l = self._lock(self.sjoin("lock"), wait, self.store.write,
800 l = self._lock(self.sjoin("lock"), wait, self.store.write,
801 self.invalidate, _('repository %s') % self.origroot)
801 self.invalidate, _('repository %s') % self.origroot)
802 self._lockref = weakref.ref(l)
802 self._lockref = weakref.ref(l)
803 return l
803 return l
804
804
805 def wlock(self, wait=True):
805 def wlock(self, wait=True):
806 '''Lock the non-store parts of the repository (everything under
806 '''Lock the non-store parts of the repository (everything under
807 .hg except .hg/store) and return a weak reference to the lock.
807 .hg except .hg/store) and return a weak reference to the lock.
808 Use this before modifying files in .hg.'''
808 Use this before modifying files in .hg.'''
809 l = self._wlockref and self._wlockref()
809 l = self._wlockref and self._wlockref()
810 if l is not None and l.held:
810 if l is not None and l.held:
811 l.lock()
811 l.lock()
812 return l
812 return l
813
813
814 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
814 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
815 self.dirstate.invalidate, _('working directory of %s') %
815 self.dirstate.invalidate, _('working directory of %s') %
816 self.origroot)
816 self.origroot)
817 self._wlockref = weakref.ref(l)
817 self._wlockref = weakref.ref(l)
818 return l
818 return l
819
819
820 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
820 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
821 """
821 """
822 commit an individual file as part of a larger transaction
822 commit an individual file as part of a larger transaction
823 """
823 """
824
824
825 fname = fctx.path()
825 fname = fctx.path()
826 text = fctx.data()
826 text = fctx.data()
827 flog = self.file(fname)
827 flog = self.file(fname)
828 fparent1 = manifest1.get(fname, nullid)
828 fparent1 = manifest1.get(fname, nullid)
829 fparent2 = fparent2o = manifest2.get(fname, nullid)
829 fparent2 = fparent2o = manifest2.get(fname, nullid)
830
830
831 meta = {}
831 meta = {}
832 copy = fctx.renamed()
832 copy = fctx.renamed()
833 if copy and copy[0] != fname:
833 if copy and copy[0] != fname:
834 # Mark the new revision of this file as a copy of another
834 # Mark the new revision of this file as a copy of another
835 # file. This copy data will effectively act as a parent
835 # file. This copy data will effectively act as a parent
836 # of this new revision. If this is a merge, the first
836 # of this new revision. If this is a merge, the first
837 # parent will be the nullid (meaning "look up the copy data")
837 # parent will be the nullid (meaning "look up the copy data")
838 # and the second one will be the other parent. For example:
838 # and the second one will be the other parent. For example:
839 #
839 #
840 # 0 --- 1 --- 3 rev1 changes file foo
840 # 0 --- 1 --- 3 rev1 changes file foo
841 # \ / rev2 renames foo to bar and changes it
841 # \ / rev2 renames foo to bar and changes it
842 # \- 2 -/ rev3 should have bar with all changes and
842 # \- 2 -/ rev3 should have bar with all changes and
843 # should record that bar descends from
843 # should record that bar descends from
844 # bar in rev2 and foo in rev1
844 # bar in rev2 and foo in rev1
845 #
845 #
846 # this allows this merge to succeed:
846 # this allows this merge to succeed:
847 #
847 #
848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
849 # \ / merging rev3 and rev4 should use bar@rev2
849 # \ / merging rev3 and rev4 should use bar@rev2
850 # \- 2 --- 4 as the merge base
850 # \- 2 --- 4 as the merge base
851 #
851 #
852
852
853 cfname = copy[0]
853 cfname = copy[0]
854 crev = manifest1.get(cfname)
854 crev = manifest1.get(cfname)
855 newfparent = fparent2
855 newfparent = fparent2
856
856
857 if manifest2: # branch merge
857 if manifest2: # branch merge
858 if fparent2 == nullid or crev is None: # copied on remote side
858 if fparent2 == nullid or crev is None: # copied on remote side
859 if cfname in manifest2:
859 if cfname in manifest2:
860 crev = manifest2[cfname]
860 crev = manifest2[cfname]
861 newfparent = fparent1
861 newfparent = fparent1
862
862
863 # find source in nearest ancestor if we've lost track
863 # find source in nearest ancestor if we've lost track
864 if not crev:
864 if not crev:
865 self.ui.debug(" %s: searching for copy revision for %s\n" %
865 self.ui.debug(" %s: searching for copy revision for %s\n" %
866 (fname, cfname))
866 (fname, cfname))
867 for ancestor in self[None].ancestors():
867 for ancestor in self[None].ancestors():
868 if cfname in ancestor:
868 if cfname in ancestor:
869 crev = ancestor[cfname].filenode()
869 crev = ancestor[cfname].filenode()
870 break
870 break
871
871
872 if crev:
872 if crev:
873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
874 meta["copy"] = cfname
874 meta["copy"] = cfname
875 meta["copyrev"] = hex(crev)
875 meta["copyrev"] = hex(crev)
876 fparent1, fparent2 = nullid, newfparent
876 fparent1, fparent2 = nullid, newfparent
877 else:
877 else:
878 self.ui.warn(_("warning: can't find ancestor for '%s' "
878 self.ui.warn(_("warning: can't find ancestor for '%s' "
879 "copied from '%s'!\n") % (fname, cfname))
879 "copied from '%s'!\n") % (fname, cfname))
880
880
881 elif fparent2 != nullid:
881 elif fparent2 != nullid:
882 # is one parent an ancestor of the other?
882 # is one parent an ancestor of the other?
883 fparentancestor = flog.ancestor(fparent1, fparent2)
883 fparentancestor = flog.ancestor(fparent1, fparent2)
884 if fparentancestor == fparent1:
884 if fparentancestor == fparent1:
885 fparent1, fparent2 = fparent2, nullid
885 fparent1, fparent2 = fparent2, nullid
886 elif fparentancestor == fparent2:
886 elif fparentancestor == fparent2:
887 fparent2 = nullid
887 fparent2 = nullid
888
888
889 # is the file changed?
889 # is the file changed?
890 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
890 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
891 changelist.append(fname)
891 changelist.append(fname)
892 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
892 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
893
893
894 # are just the flags changed during merge?
894 # are just the flags changed during merge?
895 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
895 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
896 changelist.append(fname)
896 changelist.append(fname)
897
897
898 return fparent1
898 return fparent1
899
899
900 def commit(self, text="", user=None, date=None, match=None, force=False,
900 def commit(self, text="", user=None, date=None, match=None, force=False,
901 editor=False, extra={}):
901 editor=False, extra={}):
902 """Add a new revision to current repository.
902 """Add a new revision to current repository.
903
903
904 Revision information is gathered from the working directory,
904 Revision information is gathered from the working directory,
905 match can be used to filter the committed files. If editor is
905 match can be used to filter the committed files. If editor is
906 supplied, it is called to get a commit message.
906 supplied, it is called to get a commit message.
907 """
907 """
908
908
909 def fail(f, msg):
909 def fail(f, msg):
910 raise util.Abort('%s: %s' % (f, msg))
910 raise util.Abort('%s: %s' % (f, msg))
911
911
912 if not match:
912 if not match:
913 match = matchmod.always(self.root, '')
913 match = matchmod.always(self.root, '')
914
914
915 if not force:
915 if not force:
916 vdirs = []
916 vdirs = []
917 match.dir = vdirs.append
917 match.dir = vdirs.append
918 match.bad = fail
918 match.bad = fail
919
919
920 wlock = self.wlock()
920 wlock = self.wlock()
921 try:
921 try:
922 wctx = self[None]
922 wctx = self[None]
923 merge = len(wctx.parents()) > 1
923 merge = len(wctx.parents()) > 1
924
924
925 if (not force and merge and match and
925 if (not force and merge and match and
926 (match.files() or match.anypats())):
926 (match.files() or match.anypats())):
927 raise util.Abort(_('cannot partially commit a merge '
927 raise util.Abort(_('cannot partially commit a merge '
928 '(do not specify files or patterns)'))
928 '(do not specify files or patterns)'))
929
929
930 changes = self.status(match=match, clean=force)
930 changes = self.status(match=match, clean=force)
931 if force:
931 if force:
932 changes[0].extend(changes[6]) # mq may commit unchanged files
932 changes[0].extend(changes[6]) # mq may commit unchanged files
933
933
934 # check subrepos
934 # check subrepos
935 subs = []
935 subs = []
936 removedsubs = set()
936 removedsubs = set()
937 for p in wctx.parents():
937 for p in wctx.parents():
938 removedsubs.update(s for s in p.substate if match(s))
938 removedsubs.update(s for s in p.substate if match(s))
939 for s in wctx.substate:
939 for s in wctx.substate:
940 removedsubs.discard(s)
940 removedsubs.discard(s)
941 if match(s) and wctx.sub(s).dirty():
941 if match(s) and wctx.sub(s).dirty():
942 subs.append(s)
942 subs.append(s)
943 if (subs or removedsubs):
943 if (subs or removedsubs):
944 if (not match('.hgsub') and
944 if (not match('.hgsub') and
945 '.hgsub' in (wctx.modified() + wctx.added())):
945 '.hgsub' in (wctx.modified() + wctx.added())):
946 raise util.Abort(_("can't commit subrepos without .hgsub"))
946 raise util.Abort(_("can't commit subrepos without .hgsub"))
947 if '.hgsubstate' not in changes[0]:
947 if '.hgsubstate' not in changes[0]:
948 changes[0].insert(0, '.hgsubstate')
948 changes[0].insert(0, '.hgsubstate')
949
949
950 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
950 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
951 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
951 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
952 if changedsubs:
952 if changedsubs:
953 raise util.Abort(_("uncommitted changes in subrepo %s")
953 raise util.Abort(_("uncommitted changes in subrepo %s")
954 % changedsubs[0])
954 % changedsubs[0])
955
955
956 # make sure all explicit patterns are matched
956 # make sure all explicit patterns are matched
957 if not force and match.files():
957 if not force and match.files():
958 matched = set(changes[0] + changes[1] + changes[2])
958 matched = set(changes[0] + changes[1] + changes[2])
959
959
960 for f in match.files():
960 for f in match.files():
961 if f == '.' or f in matched or f in wctx.substate:
961 if f == '.' or f in matched or f in wctx.substate:
962 continue
962 continue
963 if f in changes[3]: # missing
963 if f in changes[3]: # missing
964 fail(f, _('file not found!'))
964 fail(f, _('file not found!'))
965 if f in vdirs: # visited directory
965 if f in vdirs: # visited directory
966 d = f + '/'
966 d = f + '/'
967 for mf in matched:
967 for mf in matched:
968 if mf.startswith(d):
968 if mf.startswith(d):
969 break
969 break
970 else:
970 else:
971 fail(f, _("no match under directory!"))
971 fail(f, _("no match under directory!"))
972 elif f not in self.dirstate:
972 elif f not in self.dirstate:
973 fail(f, _("file not tracked!"))
973 fail(f, _("file not tracked!"))
974
974
975 if (not force and not extra.get("close") and not merge
975 if (not force and not extra.get("close") and not merge
976 and not (changes[0] or changes[1] or changes[2])
976 and not (changes[0] or changes[1] or changes[2])
977 and wctx.branch() == wctx.p1().branch()):
977 and wctx.branch() == wctx.p1().branch()):
978 return None
978 return None
979
979
980 ms = mergemod.mergestate(self)
980 ms = mergemod.mergestate(self)
981 for f in changes[0]:
981 for f in changes[0]:
982 if f in ms and ms[f] == 'u':
982 if f in ms and ms[f] == 'u':
983 raise util.Abort(_("unresolved merge conflicts "
983 raise util.Abort(_("unresolved merge conflicts "
984 "(see hg help resolve)"))
984 "(see hg help resolve)"))
985
985
986 cctx = context.workingctx(self, text, user, date, extra, changes)
986 cctx = context.workingctx(self, text, user, date, extra, changes)
987 if editor:
987 if editor:
988 cctx._text = editor(self, cctx, subs)
988 cctx._text = editor(self, cctx, subs)
989 edited = (text != cctx._text)
989 edited = (text != cctx._text)
990
990
991 # commit subs
991 # commit subs
992 if subs or removedsubs:
992 if subs or removedsubs:
993 state = wctx.substate.copy()
993 state = wctx.substate.copy()
994 for s in sorted(subs):
994 for s in sorted(subs):
995 sub = wctx.sub(s)
995 sub = wctx.sub(s)
996 self.ui.status(_('committing subrepository %s\n') %
996 self.ui.status(_('committing subrepository %s\n') %
997 subrepo.subrelpath(sub))
997 subrepo.subrelpath(sub))
998 sr = sub.commit(cctx._text, user, date)
998 sr = sub.commit(cctx._text, user, date)
999 state[s] = (state[s][0], sr)
999 state[s] = (state[s][0], sr)
1000 subrepo.writestate(self, state)
1000 subrepo.writestate(self, state)
1001
1001
1002 # Save commit message in case this transaction gets rolled back
1002 # Save commit message in case this transaction gets rolled back
1003 # (e.g. by a pretxncommit hook). Leave the content alone on
1003 # (e.g. by a pretxncommit hook). Leave the content alone on
1004 # the assumption that the user will use the same editor again.
1004 # the assumption that the user will use the same editor again.
1005 msgfile = self.opener('last-message.txt', 'wb')
1005 msgfile = self.opener('last-message.txt', 'wb')
1006 msgfile.write(cctx._text)
1006 msgfile.write(cctx._text)
1007 msgfile.close()
1007 msgfile.close()
1008
1008
1009 p1, p2 = self.dirstate.parents()
1009 p1, p2 = self.dirstate.parents()
1010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1011 try:
1011 try:
1012 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1012 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1013 ret = self.commitctx(cctx, True)
1013 ret = self.commitctx(cctx, True)
1014 except:
1014 except:
1015 if edited:
1015 if edited:
1016 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1016 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1017 self.ui.write(
1017 self.ui.write(
1018 _('note: commit message saved in %s\n') % msgfn)
1018 _('note: commit message saved in %s\n') % msgfn)
1019 raise
1019 raise
1020
1020
1021 # update bookmarks, dirstate and mergestate
1021 # update bookmarks, dirstate and mergestate
1022 bookmarks.update(self, p1, ret)
1022 bookmarks.update(self, p1, ret)
1023 for f in changes[0] + changes[1]:
1023 for f in changes[0] + changes[1]:
1024 self.dirstate.normal(f)
1024 self.dirstate.normal(f)
1025 for f in changes[2]:
1025 for f in changes[2]:
1026 self.dirstate.forget(f)
1026 self.dirstate.forget(f)
1027 self.dirstate.setparents(ret)
1027 self.dirstate.setparents(ret)
1028 ms.reset()
1028 ms.reset()
1029 finally:
1029 finally:
1030 wlock.release()
1030 wlock.release()
1031
1031
1032 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1032 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1033 return ret
1033 return ret
1034
1034
1035 def commitctx(self, ctx, error=False):
1035 def commitctx(self, ctx, error=False):
1036 """Add a new revision to current repository.
1036 """Add a new revision to current repository.
1037 Revision information is passed via the context argument.
1037 Revision information is passed via the context argument.
1038 """
1038 """
1039
1039
1040 tr = lock = None
1040 tr = lock = None
1041 removed = list(ctx.removed())
1041 removed = list(ctx.removed())
1042 p1, p2 = ctx.p1(), ctx.p2()
1042 p1, p2 = ctx.p1(), ctx.p2()
1043 m1 = p1.manifest().copy()
1043 m1 = p1.manifest().copy()
1044 m2 = p2.manifest()
1044 m2 = p2.manifest()
1045 user = ctx.user()
1045 user = ctx.user()
1046
1046
1047 lock = self.lock()
1047 lock = self.lock()
1048 try:
1048 try:
1049 tr = self.transaction("commit")
1049 tr = self.transaction("commit")
1050 trp = weakref.proxy(tr)
1050 trp = weakref.proxy(tr)
1051
1051
1052 # check in files
1052 # check in files
1053 new = {}
1053 new = {}
1054 changed = []
1054 changed = []
1055 linkrev = len(self)
1055 linkrev = len(self)
1056 for f in sorted(ctx.modified() + ctx.added()):
1056 for f in sorted(ctx.modified() + ctx.added()):
1057 self.ui.note(f + "\n")
1057 self.ui.note(f + "\n")
1058 try:
1058 try:
1059 fctx = ctx[f]
1059 fctx = ctx[f]
1060 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1060 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1061 changed)
1061 changed)
1062 m1.set(f, fctx.flags())
1062 m1.set(f, fctx.flags())
1063 except OSError, inst:
1063 except OSError, inst:
1064 self.ui.warn(_("trouble committing %s!\n") % f)
1064 self.ui.warn(_("trouble committing %s!\n") % f)
1065 raise
1065 raise
1066 except IOError, inst:
1066 except IOError, inst:
1067 errcode = getattr(inst, 'errno', errno.ENOENT)
1067 errcode = getattr(inst, 'errno', errno.ENOENT)
1068 if error or errcode and errcode != errno.ENOENT:
1068 if error or errcode and errcode != errno.ENOENT:
1069 self.ui.warn(_("trouble committing %s!\n") % f)
1069 self.ui.warn(_("trouble committing %s!\n") % f)
1070 raise
1070 raise
1071 else:
1071 else:
1072 removed.append(f)
1072 removed.append(f)
1073
1073
1074 # update manifest
1074 # update manifest
1075 m1.update(new)
1075 m1.update(new)
1076 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1076 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1077 drop = [f for f in removed if f in m1]
1077 drop = [f for f in removed if f in m1]
1078 for f in drop:
1078 for f in drop:
1079 del m1[f]
1079 del m1[f]
1080 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1080 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1081 p2.manifestnode(), (new, drop))
1081 p2.manifestnode(), (new, drop))
1082
1082
1083 # update changelog
1083 # update changelog
1084 self.changelog.delayupdate()
1084 self.changelog.delayupdate()
1085 n = self.changelog.add(mn, changed + removed, ctx.description(),
1085 n = self.changelog.add(mn, changed + removed, ctx.description(),
1086 trp, p1.node(), p2.node(),
1086 trp, p1.node(), p2.node(),
1087 user, ctx.date(), ctx.extra().copy())
1087 user, ctx.date(), ctx.extra().copy())
1088 p = lambda: self.changelog.writepending() and self.root or ""
1088 p = lambda: self.changelog.writepending() and self.root or ""
1089 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1089 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1090 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1090 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1091 parent2=xp2, pending=p)
1091 parent2=xp2, pending=p)
1092 self.changelog.finalize(trp)
1092 self.changelog.finalize(trp)
1093 tr.close()
1093 tr.close()
1094
1094
1095 if self._branchcache:
1095 if self._branchcache:
1096 self.updatebranchcache()
1096 self.updatebranchcache()
1097 return n
1097 return n
1098 finally:
1098 finally:
1099 if tr:
1099 if tr:
1100 tr.release()
1100 tr.release()
1101 lock.release()
1101 lock.release()
1102
1102
1103 def destroyed(self):
1103 def destroyed(self):
1104 '''Inform the repository that nodes have been destroyed.
1104 '''Inform the repository that nodes have been destroyed.
1105 Intended for use by strip and rollback, so there's a common
1105 Intended for use by strip and rollback, so there's a common
1106 place for anything that has to be done after destroying history.'''
1106 place for anything that has to be done after destroying history.'''
1107 # XXX it might be nice if we could take the list of destroyed
1107 # XXX it might be nice if we could take the list of destroyed
1108 # nodes, but I don't see an easy way for rollback() to do that
1108 # nodes, but I don't see an easy way for rollback() to do that
1109
1109
1110 # Ensure the persistent tag cache is updated. Doing it now
1110 # Ensure the persistent tag cache is updated. Doing it now
1111 # means that the tag cache only has to worry about destroyed
1111 # means that the tag cache only has to worry about destroyed
1112 # heads immediately after a strip/rollback. That in turn
1112 # heads immediately after a strip/rollback. That in turn
1113 # guarantees that "cachetip == currenttip" (comparing both rev
1113 # guarantees that "cachetip == currenttip" (comparing both rev
1114 # and node) always means no nodes have been added or destroyed.
1114 # and node) always means no nodes have been added or destroyed.
1115
1115
1116 # XXX this is suboptimal when qrefresh'ing: we strip the current
1116 # XXX this is suboptimal when qrefresh'ing: we strip the current
1117 # head, refresh the tag cache, then immediately add a new head.
1117 # head, refresh the tag cache, then immediately add a new head.
1118 # But I think doing it this way is necessary for the "instant
1118 # But I think doing it this way is necessary for the "instant
1119 # tag cache retrieval" case to work.
1119 # tag cache retrieval" case to work.
1120 self.invalidatecaches()
1120 self.invalidatecaches()
1121
1121
1122 def walk(self, match, node=None):
1122 def walk(self, match, node=None):
1123 '''
1123 '''
1124 walk recursively through the directory tree or a given
1124 walk recursively through the directory tree or a given
1125 changeset, finding all files matched by the match
1125 changeset, finding all files matched by the match
1126 function
1126 function
1127 '''
1127 '''
1128 return self[node].walk(match)
1128 return self[node].walk(match)
1129
1129
1130 def status(self, node1='.', node2=None, match=None,
1130 def status(self, node1='.', node2=None, match=None,
1131 ignored=False, clean=False, unknown=False,
1131 ignored=False, clean=False, unknown=False,
1132 listsubrepos=False):
1132 listsubrepos=False):
1133 """return status of files between two nodes or node and working directory
1133 """return status of files between two nodes or node and working directory
1134
1134
1135 If node1 is None, use the first dirstate parent instead.
1135 If node1 is None, use the first dirstate parent instead.
1136 If node2 is None, compare node1 with working directory.
1136 If node2 is None, compare node1 with working directory.
1137 """
1137 """
1138
1138
1139 def mfmatches(ctx):
1139 def mfmatches(ctx):
1140 mf = ctx.manifest().copy()
1140 mf = ctx.manifest().copy()
1141 for fn in mf.keys():
1141 for fn in mf.keys():
1142 if not match(fn):
1142 if not match(fn):
1143 del mf[fn]
1143 del mf[fn]
1144 return mf
1144 return mf
1145
1145
1146 if isinstance(node1, context.changectx):
1146 if isinstance(node1, context.changectx):
1147 ctx1 = node1
1147 ctx1 = node1
1148 else:
1148 else:
1149 ctx1 = self[node1]
1149 ctx1 = self[node1]
1150 if isinstance(node2, context.changectx):
1150 if isinstance(node2, context.changectx):
1151 ctx2 = node2
1151 ctx2 = node2
1152 else:
1152 else:
1153 ctx2 = self[node2]
1153 ctx2 = self[node2]
1154
1154
1155 working = ctx2.rev() is None
1155 working = ctx2.rev() is None
1156 parentworking = working and ctx1 == self['.']
1156 parentworking = working and ctx1 == self['.']
1157 match = match or matchmod.always(self.root, self.getcwd())
1157 match = match or matchmod.always(self.root, self.getcwd())
1158 listignored, listclean, listunknown = ignored, clean, unknown
1158 listignored, listclean, listunknown = ignored, clean, unknown
1159
1159
1160 # load earliest manifest first for caching reasons
1160 # load earliest manifest first for caching reasons
1161 if not working and ctx2.rev() < ctx1.rev():
1161 if not working and ctx2.rev() < ctx1.rev():
1162 ctx2.manifest()
1162 ctx2.manifest()
1163
1163
1164 if not parentworking:
1164 if not parentworking:
1165 def bad(f, msg):
1165 def bad(f, msg):
1166 if f not in ctx1:
1166 if f not in ctx1:
1167 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1167 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1168 match.bad = bad
1168 match.bad = bad
1169
1169
1170 if working: # we need to scan the working dir
1170 if working: # we need to scan the working dir
1171 subrepos = []
1171 subrepos = []
1172 if '.hgsub' in self.dirstate:
1172 if '.hgsub' in self.dirstate:
1173 subrepos = ctx1.substate.keys()
1173 subrepos = ctx1.substate.keys()
1174 s = self.dirstate.status(match, subrepos, listignored,
1174 s = self.dirstate.status(match, subrepos, listignored,
1175 listclean, listunknown)
1175 listclean, listunknown)
1176 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1176 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1177
1177
1178 # check for any possibly clean files
1178 # check for any possibly clean files
1179 if parentworking and cmp:
1179 if parentworking and cmp:
1180 fixup = []
1180 fixup = []
1181 # do a full compare of any files that might have changed
1181 # do a full compare of any files that might have changed
1182 for f in sorted(cmp):
1182 for f in sorted(cmp):
1183 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1183 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1184 or ctx1[f].cmp(ctx2[f])):
1184 or ctx1[f].cmp(ctx2[f])):
1185 modified.append(f)
1185 modified.append(f)
1186 else:
1186 else:
1187 fixup.append(f)
1187 fixup.append(f)
1188
1188
1189 # update dirstate for files that are actually clean
1189 # update dirstate for files that are actually clean
1190 if fixup:
1190 if fixup:
1191 if listclean:
1191 if listclean:
1192 clean += fixup
1192 clean += fixup
1193
1193
1194 try:
1194 try:
1195 # updating the dirstate is optional
1195 # updating the dirstate is optional
1196 # so we don't wait on the lock
1196 # so we don't wait on the lock
1197 wlock = self.wlock(False)
1197 wlock = self.wlock(False)
1198 try:
1198 try:
1199 for f in fixup:
1199 for f in fixup:
1200 self.dirstate.normal(f)
1200 self.dirstate.normal(f)
1201 finally:
1201 finally:
1202 wlock.release()
1202 wlock.release()
1203 except error.LockError:
1203 except error.LockError:
1204 pass
1204 pass
1205
1205
1206 if not parentworking:
1206 if not parentworking:
1207 mf1 = mfmatches(ctx1)
1207 mf1 = mfmatches(ctx1)
1208 if working:
1208 if working:
1209 # we are comparing working dir against non-parent
1209 # we are comparing working dir against non-parent
1210 # generate a pseudo-manifest for the working dir
1210 # generate a pseudo-manifest for the working dir
1211 mf2 = mfmatches(self['.'])
1211 mf2 = mfmatches(self['.'])
1212 for f in cmp + modified + added:
1212 for f in cmp + modified + added:
1213 mf2[f] = None
1213 mf2[f] = None
1214 mf2.set(f, ctx2.flags(f))
1214 mf2.set(f, ctx2.flags(f))
1215 for f in removed:
1215 for f in removed:
1216 if f in mf2:
1216 if f in mf2:
1217 del mf2[f]
1217 del mf2[f]
1218 else:
1218 else:
1219 # we are comparing two revisions
1219 # we are comparing two revisions
1220 deleted, unknown, ignored = [], [], []
1220 deleted, unknown, ignored = [], [], []
1221 mf2 = mfmatches(ctx2)
1221 mf2 = mfmatches(ctx2)
1222
1222
1223 modified, added, clean = [], [], []
1223 modified, added, clean = [], [], []
1224 for fn in mf2:
1224 for fn in mf2:
1225 if fn in mf1:
1225 if fn in mf1:
1226 if (mf1.flags(fn) != mf2.flags(fn) or
1226 if (mf1.flags(fn) != mf2.flags(fn) or
1227 (mf1[fn] != mf2[fn] and
1227 (mf1[fn] != mf2[fn] and
1228 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1228 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1229 modified.append(fn)
1229 modified.append(fn)
1230 elif listclean:
1230 elif listclean:
1231 clean.append(fn)
1231 clean.append(fn)
1232 del mf1[fn]
1232 del mf1[fn]
1233 else:
1233 else:
1234 added.append(fn)
1234 added.append(fn)
1235 removed = mf1.keys()
1235 removed = mf1.keys()
1236
1236
1237 r = modified, added, removed, deleted, unknown, ignored, clean
1237 r = modified, added, removed, deleted, unknown, ignored, clean
1238
1238
1239 if listsubrepos:
1239 if listsubrepos:
1240 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1240 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1241 if working:
1241 if working:
1242 rev2 = None
1242 rev2 = None
1243 else:
1243 else:
1244 rev2 = ctx2.substate[subpath][1]
1244 rev2 = ctx2.substate[subpath][1]
1245 try:
1245 try:
1246 submatch = matchmod.narrowmatcher(subpath, match)
1246 submatch = matchmod.narrowmatcher(subpath, match)
1247 s = sub.status(rev2, match=submatch, ignored=listignored,
1247 s = sub.status(rev2, match=submatch, ignored=listignored,
1248 clean=listclean, unknown=listunknown,
1248 clean=listclean, unknown=listunknown,
1249 listsubrepos=True)
1249 listsubrepos=True)
1250 for rfiles, sfiles in zip(r, s):
1250 for rfiles, sfiles in zip(r, s):
1251 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1251 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1252 except error.LookupError:
1252 except error.LookupError:
1253 self.ui.status(_("skipping missing subrepository: %s\n")
1253 self.ui.status(_("skipping missing subrepository: %s\n")
1254 % subpath)
1254 % subpath)
1255
1255
1256 for l in r:
1256 for l in r:
1257 l.sort()
1257 l.sort()
1258 return r
1258 return r
1259
1259
1260 def heads(self, start=None):
1260 def heads(self, start=None):
1261 heads = self.changelog.heads(start)
1261 heads = self.changelog.heads(start)
1262 # sort the output in rev descending order
1262 # sort the output in rev descending order
1263 return sorted(heads, key=self.changelog.rev, reverse=True)
1263 return sorted(heads, key=self.changelog.rev, reverse=True)
1264
1264
1265 def branchheads(self, branch=None, start=None, closed=False):
1265 def branchheads(self, branch=None, start=None, closed=False):
1266 '''return a (possibly filtered) list of heads for the given branch
1266 '''return a (possibly filtered) list of heads for the given branch
1267
1267
1268 Heads are returned in topological order, from newest to oldest.
1268 Heads are returned in topological order, from newest to oldest.
1269 If branch is None, use the dirstate branch.
1269 If branch is None, use the dirstate branch.
1270 If start is not None, return only heads reachable from start.
1270 If start is not None, return only heads reachable from start.
1271 If closed is True, return heads that are marked as closed as well.
1271 If closed is True, return heads that are marked as closed as well.
1272 '''
1272 '''
1273 if branch is None:
1273 if branch is None:
1274 branch = self[None].branch()
1274 branch = self[None].branch()
1275 branches = self.branchmap()
1275 branches = self.branchmap()
1276 if branch not in branches:
1276 if branch not in branches:
1277 return []
1277 return []
1278 # the cache returns heads ordered lowest to highest
1278 # the cache returns heads ordered lowest to highest
1279 bheads = list(reversed(branches[branch]))
1279 bheads = list(reversed(branches[branch]))
1280 if start is not None:
1280 if start is not None:
1281 # filter out the heads that cannot be reached from startrev
1281 # filter out the heads that cannot be reached from startrev
1282 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1282 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1283 bheads = [h for h in bheads if h in fbheads]
1283 bheads = [h for h in bheads if h in fbheads]
1284 if not closed:
1284 if not closed:
1285 bheads = [h for h in bheads if
1285 bheads = [h for h in bheads if
1286 ('close' not in self.changelog.read(h)[5])]
1286 ('close' not in self.changelog.read(h)[5])]
1287 return bheads
1287 return bheads
1288
1288
1289 def branches(self, nodes):
1289 def branches(self, nodes):
1290 if not nodes:
1290 if not nodes:
1291 nodes = [self.changelog.tip()]
1291 nodes = [self.changelog.tip()]
1292 b = []
1292 b = []
1293 for n in nodes:
1293 for n in nodes:
1294 t = n
1294 t = n
1295 while 1:
1295 while 1:
1296 p = self.changelog.parents(n)
1296 p = self.changelog.parents(n)
1297 if p[1] != nullid or p[0] == nullid:
1297 if p[1] != nullid or p[0] == nullid:
1298 b.append((t, n, p[0], p[1]))
1298 b.append((t, n, p[0], p[1]))
1299 break
1299 break
1300 n = p[0]
1300 n = p[0]
1301 return b
1301 return b
1302
1302
1303 def between(self, pairs):
1303 def between(self, pairs):
1304 r = []
1304 r = []
1305
1305
1306 for top, bottom in pairs:
1306 for top, bottom in pairs:
1307 n, l, i = top, [], 0
1307 n, l, i = top, [], 0
1308 f = 1
1308 f = 1
1309
1309
1310 while n != bottom and n != nullid:
1310 while n != bottom and n != nullid:
1311 p = self.changelog.parents(n)[0]
1311 p = self.changelog.parents(n)[0]
1312 if i == f:
1312 if i == f:
1313 l.append(n)
1313 l.append(n)
1314 f = f * 2
1314 f = f * 2
1315 n = p
1315 n = p
1316 i += 1
1316 i += 1
1317
1317
1318 r.append(l)
1318 r.append(l)
1319
1319
1320 return r
1320 return r
1321
1321
1322 def pull(self, remote, heads=None, force=False):
1322 def pull(self, remote, heads=None, force=False):
1323 lock = self.lock()
1323 lock = self.lock()
1324 try:
1324 try:
1325 usecommon = remote.capable('getbundle')
1325 usecommon = remote.capable('getbundle')
1326 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1326 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1327 force=force, commononly=usecommon)
1327 force=force, commononly=usecommon)
1328 common, fetch, rheads = tmp
1328 common, fetch, rheads = tmp
1329 if not fetch:
1329 if not fetch:
1330 self.ui.status(_("no changes found\n"))
1330 self.ui.status(_("no changes found\n"))
1331 result = 0
1331 result = 0
1332 else:
1332 else:
1333 if heads is None and list(common) == [nullid]:
1333 if heads is None and list(common) == [nullid]:
1334 self.ui.status(_("requesting all changes\n"))
1334 self.ui.status(_("requesting all changes\n"))
1335 elif heads is None and remote.capable('changegroupsubset'):
1335 elif heads is None and remote.capable('changegroupsubset'):
1336 # issue1320, avoid a race if remote changed after discovery
1336 # issue1320, avoid a race if remote changed after discovery
1337 heads = rheads
1337 heads = rheads
1338
1338
1339 if usecommon:
1339 if usecommon:
1340 cg = remote.getbundle('pull', common=common,
1340 cg = remote.getbundle('pull', common=common,
1341 heads=heads or rheads)
1341 heads=heads or rheads)
1342 elif heads is None:
1342 elif heads is None:
1343 cg = remote.changegroup(fetch, 'pull')
1343 cg = remote.changegroup(fetch, 'pull')
1344 elif not remote.capable('changegroupsubset'):
1344 elif not remote.capable('changegroupsubset'):
1345 raise util.Abort(_("partial pull cannot be done because "
1345 raise util.Abort(_("partial pull cannot be done because "
1346 "other repository doesn't support "
1346 "other repository doesn't support "
1347 "changegroupsubset."))
1347 "changegroupsubset."))
1348 else:
1348 else:
1349 cg = remote.changegroupsubset(fetch, heads, 'pull')
1349 cg = remote.changegroupsubset(fetch, heads, 'pull')
1350 result = self.addchangegroup(cg, 'pull', remote.url(),
1350 result = self.addchangegroup(cg, 'pull', remote.url(),
1351 lock=lock)
1351 lock=lock)
1352 finally:
1352 finally:
1353 lock.release()
1353 lock.release()
1354
1354
1355 return result
1355 return result
1356
1356
1357 def checkpush(self, force, revs):
1357 def checkpush(self, force, revs):
1358 """Extensions can override this function if additional checks have
1358 """Extensions can override this function if additional checks have
1359 to be performed before pushing, or call it if they override push
1359 to be performed before pushing, or call it if they override push
1360 command.
1360 command.
1361 """
1361 """
1362 pass
1362 pass
1363
1363
1364 def push(self, remote, force=False, revs=None, newbranch=False):
1364 def push(self, remote, force=False, revs=None, newbranch=False):
1365 '''Push outgoing changesets (limited by revs) from the current
1365 '''Push outgoing changesets (limited by revs) from the current
1366 repository to remote. Return an integer:
1366 repository to remote. Return an integer:
1367 - 0 means HTTP error *or* nothing to push
1367 - 0 means HTTP error *or* nothing to push
1368 - 1 means we pushed and remote head count is unchanged *or*
1368 - 1 means we pushed and remote head count is unchanged *or*
1369 we have outgoing changesets but refused to push
1369 we have outgoing changesets but refused to push
1370 - other values as described by addchangegroup()
1370 - other values as described by addchangegroup()
1371 '''
1371 '''
1372 # there are two ways to push to remote repo:
1372 # there are two ways to push to remote repo:
1373 #
1373 #
1374 # addchangegroup assumes local user can lock remote
1374 # addchangegroup assumes local user can lock remote
1375 # repo (local filesystem, old ssh servers).
1375 # repo (local filesystem, old ssh servers).
1376 #
1376 #
1377 # unbundle assumes local user cannot lock remote repo (new ssh
1377 # unbundle assumes local user cannot lock remote repo (new ssh
1378 # servers, http servers).
1378 # servers, http servers).
1379
1379
1380 self.checkpush(force, revs)
1380 self.checkpush(force, revs)
1381 lock = None
1381 lock = None
1382 unbundle = remote.capable('unbundle')
1382 unbundle = remote.capable('unbundle')
1383 if not unbundle:
1383 if not unbundle:
1384 lock = remote.lock()
1384 lock = remote.lock()
1385 try:
1385 try:
1386 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1386 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1387 newbranch)
1387 newbranch)
1388 ret = remote_heads
1388 ret = remote_heads
1389 if cg is not None:
1389 if cg is not None:
1390 if unbundle:
1390 if unbundle:
1391 # local repo finds heads on server, finds out what
1391 # local repo finds heads on server, finds out what
1392 # revs it must push. once revs transferred, if server
1392 # revs it must push. once revs transferred, if server
1393 # finds it has different heads (someone else won
1393 # finds it has different heads (someone else won
1394 # commit/push race), server aborts.
1394 # commit/push race), server aborts.
1395 if force:
1395 if force:
1396 remote_heads = ['force']
1396 remote_heads = ['force']
1397 # ssh: return remote's addchangegroup()
1397 # ssh: return remote's addchangegroup()
1398 # http: return remote's addchangegroup() or 0 for error
1398 # http: return remote's addchangegroup() or 0 for error
1399 ret = remote.unbundle(cg, remote_heads, 'push')
1399 ret = remote.unbundle(cg, remote_heads, 'push')
1400 else:
1400 else:
1401 # we return an integer indicating remote head count change
1401 # we return an integer indicating remote head count change
1402 ret = remote.addchangegroup(cg, 'push', self.url(),
1402 ret = remote.addchangegroup(cg, 'push', self.url(),
1403 lock=lock)
1403 lock=lock)
1404 finally:
1404 finally:
1405 if lock is not None:
1405 if lock is not None:
1406 lock.release()
1406 lock.release()
1407
1407
1408 self.ui.debug("checking for updated bookmarks\n")
1408 self.ui.debug("checking for updated bookmarks\n")
1409 rb = remote.listkeys('bookmarks')
1409 rb = remote.listkeys('bookmarks')
1410 for k in rb.keys():
1410 for k in rb.keys():
1411 if k in self._bookmarks:
1411 if k in self._bookmarks:
1412 nr, nl = rb[k], hex(self._bookmarks[k])
1412 nr, nl = rb[k], hex(self._bookmarks[k])
1413 if nr in self:
1413 if nr in self:
1414 cr = self[nr]
1414 cr = self[nr]
1415 cl = self[nl]
1415 cl = self[nl]
1416 if cl in cr.descendants():
1416 if cl in cr.descendants():
1417 r = remote.pushkey('bookmarks', k, nr, nl)
1417 r = remote.pushkey('bookmarks', k, nr, nl)
1418 if r:
1418 if r:
1419 self.ui.status(_("updating bookmark %s\n") % k)
1419 self.ui.status(_("updating bookmark %s\n") % k)
1420 else:
1420 else:
1421 self.ui.warn(_('updating bookmark %s'
1421 self.ui.warn(_('updating bookmark %s'
1422 ' failed!\n') % k)
1422 ' failed!\n') % k)
1423
1423
1424 return ret
1424 return ret
1425
1425
1426 def changegroupinfo(self, nodes, source):
1426 def changegroupinfo(self, nodes, source):
1427 if self.ui.verbose or source == 'bundle':
1427 if self.ui.verbose or source == 'bundle':
1428 self.ui.status(_("%d changesets found\n") % len(nodes))
1428 self.ui.status(_("%d changesets found\n") % len(nodes))
1429 if self.ui.debugflag:
1429 if self.ui.debugflag:
1430 self.ui.debug("list of changesets:\n")
1430 self.ui.debug("list of changesets:\n")
1431 for node in nodes:
1431 for node in nodes:
1432 self.ui.debug("%s\n" % hex(node))
1432 self.ui.debug("%s\n" % hex(node))
1433
1433
1434 def changegroupsubset(self, bases, heads, source):
1434 def changegroupsubset(self, bases, heads, source):
1435 """Compute a changegroup consisting of all the nodes that are
1435 """Compute a changegroup consisting of all the nodes that are
1436 descendents of any of the bases and ancestors of any of the heads.
1436 descendents of any of the bases and ancestors of any of the heads.
1437 Return a chunkbuffer object whose read() method will return
1437 Return a chunkbuffer object whose read() method will return
1438 successive changegroup chunks.
1438 successive changegroup chunks.
1439
1439
1440 It is fairly complex as determining which filenodes and which
1440 It is fairly complex as determining which filenodes and which
1441 manifest nodes need to be included for the changeset to be complete
1441 manifest nodes need to be included for the changeset to be complete
1442 is non-trivial.
1442 is non-trivial.
1443
1443
1444 Another wrinkle is doing the reverse, figuring out which changeset in
1444 Another wrinkle is doing the reverse, figuring out which changeset in
1445 the changegroup a particular filenode or manifestnode belongs to.
1445 the changegroup a particular filenode or manifestnode belongs to.
1446 """
1446 """
1447 cl = self.changelog
1447 cl = self.changelog
1448 if not bases:
1448 if not bases:
1449 bases = [nullid]
1449 bases = [nullid]
1450 csets, bases, heads = cl.nodesbetween(bases, heads)
1450 csets, bases, heads = cl.nodesbetween(bases, heads)
1451 # We assume that all ancestors of bases are known
1451 # We assume that all ancestors of bases are known
1452 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1452 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1453 return self._changegroupsubset(common, csets, heads, source)
1453 return self._changegroupsubset(common, csets, heads, source)
1454
1454
1455 def getbundle(self, source, heads=None, common=None):
1455 def getbundle(self, source, heads=None, common=None):
1456 """Like changegroupsubset, but returns the set difference between the
1456 """Like changegroupsubset, but returns the set difference between the
1457 ancestors of heads and the ancestors common.
1457 ancestors of heads and the ancestors common.
1458
1458
1459 If heads is None, use the local heads. If common is None, use [nullid].
1459 If heads is None, use the local heads. If common is None, use [nullid].
1460
1460
1461 The nodes in common might not all be known locally due to the way the
1461 The nodes in common might not all be known locally due to the way the
1462 current discovery protocol works.
1462 current discovery protocol works.
1463 """
1463 """
1464 cl = self.changelog
1464 cl = self.changelog
1465 if common:
1465 if common:
1466 nm = cl.nodemap
1466 nm = cl.nodemap
1467 common = [n for n in common if n in nm]
1467 common = [n for n in common if n in nm]
1468 else:
1468 else:
1469 common = [nullid]
1469 common = [nullid]
1470 if not heads:
1470 if not heads:
1471 heads = cl.heads()
1471 heads = cl.heads()
1472 common, missing = cl.findcommonmissing(common, heads)
1472 common, missing = cl.findcommonmissing(common, heads)
1473 return self._changegroupsubset(common, missing, heads, source)
1473 return self._changegroupsubset(common, missing, heads, source)
1474
1474
1475 def _changegroupsubset(self, commonrevs, csets, heads, source):
1475 def _changegroupsubset(self, commonrevs, csets, heads, source):
1476
1476
1477 cl = self.changelog
1477 cl = self.changelog
1478 mf = self.manifest
1478 mf = self.manifest
1479 mfs = {} # needed manifests
1479 mfs = {} # needed manifests
1480 fnodes = {} # needed file nodes
1480 fnodes = {} # needed file nodes
1481 changedfiles = set()
1481 changedfiles = set()
1482 count = [0]
1482 count = [0]
1483
1483
1484 # can we go through the fast path ?
1484 # can we go through the fast path ?
1485 heads.sort()
1485 heads.sort()
1486 if heads == sorted(self.heads()):
1486 if heads == sorted(self.heads()):
1487 return self._changegroup(csets, source)
1487 return self._changegroup(csets, source)
1488
1488
1489 # slow path
1489 # slow path
1490 self.hook('preoutgoing', throw=True, source=source)
1490 self.hook('preoutgoing', throw=True, source=source)
1491 self.changegroupinfo(csets, source)
1491 self.changegroupinfo(csets, source)
1492
1492
1493 # filter any nodes that claim to be part of the known set
1493 # filter any nodes that claim to be part of the known set
1494 def prune(revlog, missing):
1494 def prune(revlog, missing):
1495 for n in missing:
1495 for n in missing:
1496 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1496 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1497 yield n
1497 yield n
1498
1498
1499 def clookup(revlog, x):
1499 def clookup(revlog, x):
1500 c = cl.read(x)
1500 c = cl.read(x)
1501 changedfiles.update(c[3])
1501 changedfiles.update(c[3])
1502 mfs.setdefault(c[0], x)
1502 mfs.setdefault(c[0], x)
1503 count[0] += 1
1503 count[0] += 1
1504 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1504 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1505 return x
1505 return x
1506
1506
1507 def mlookup(revlog, x):
1507 def mlookup(revlog, x):
1508 clnode = mfs[x]
1508 clnode = mfs[x]
1509 mdata = mf.readfast(x)
1509 mdata = mf.readfast(x)
1510 for f in changedfiles:
1510 for f in changedfiles:
1511 if f in mdata:
1511 if f in mdata:
1512 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1512 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1513 count[0] += 1
1513 count[0] += 1
1514 self.ui.progress(_('bundling'), count[0],
1514 self.ui.progress(_('bundling'), count[0],
1515 unit=_('manifests'), total=len(mfs))
1515 unit=_('manifests'), total=len(mfs))
1516 return mfs[x]
1516 return mfs[x]
1517
1517
1518 # Now that we have all theses utility functions to help out and
1518 # Now that we have all theses utility functions to help out and
1519 # logically divide up the task, generate the group.
1519 # logically divide up the task, generate the group.
1520 def gengroup():
1520 def gengroup():
1521 # Create a changenode group generator that will call our functions
1521 # Create a changenode group generator that will call our functions
1522 # back to lookup the owning changenode and collect information.
1522 # back to lookup the owning changenode and collect information.
1523 for chunk in cl.group(csets, clookup):
1523 for chunk in cl.group(csets, clookup):
1524 yield chunk
1524 yield chunk
1525 efiles = len(changedfiles)
1525 efiles = len(changedfiles)
1526 self.ui.progress(_('bundling'), None)
1526 self.ui.progress(_('bundling'), None)
1527
1527
1528 # Create a generator for the manifestnodes that calls our lookup
1528 # Create a generator for the manifestnodes that calls our lookup
1529 # and data collection functions back.
1529 # and data collection functions back.
1530 count[0] = 0
1530 count[0] = 0
1531 for chunk in mf.group(prune(mf, mfs), mlookup):
1531 for chunk in mf.group(prune(mf, mfs), mlookup):
1532 yield chunk
1532 yield chunk
1533 self.ui.progress(_('bundling'), None)
1533 self.ui.progress(_('bundling'), None)
1534
1534
1535 mfs.clear()
1535 mfs.clear()
1536
1536
1537 # Go through all our files in order sorted by name.
1537 # Go through all our files in order sorted by name.
1538 for idx, fname in enumerate(sorted(changedfiles)):
1538 for idx, fname in enumerate(sorted(changedfiles)):
1539 filerevlog = self.file(fname)
1539 filerevlog = self.file(fname)
1540 if not len(filerevlog):
1540 if not len(filerevlog):
1541 raise util.Abort(_("empty or missing revlog for %s") % fname)
1541 raise util.Abort(_("empty or missing revlog for %s") % fname)
1542 # Toss out the filenodes that the recipient isn't really
1542 # Toss out the filenodes that the recipient isn't really
1543 # missing.
1543 # missing.
1544 missingfnodes = fnodes.pop(fname, {})
1544 missingfnodes = fnodes.pop(fname, {})
1545 first = True
1545 first = True
1546
1546
1547 def flookup(revlog, x):
1547 def flookup(revlog, x):
1548 # even though we print the same progress on
1548 # even though we print the same progress on
1549 # most loop iterations, put the progress call
1549 # most loop iterations, put the progress call
1550 # here so that time estimates (if any) can be updated
1550 # here so that time estimates (if any) can be updated
1551 self.ui.progress(
1551 self.ui.progress(
1552 _('bundling'), idx, item=fname,
1552 _('bundling'), idx, item=fname,
1553 unit=_('files'), total=efiles)
1553 unit=_('files'), total=efiles)
1554 return missingfnodes[x]
1554 return missingfnodes[x]
1555
1555
1556 for chunk in filerevlog.group(prune(filerevlog, missingfnodes),
1556 for chunk in filerevlog.group(prune(filerevlog, missingfnodes),
1557 flookup):
1557 flookup):
1558 if first:
1558 if first:
1559 if chunk == changegroup.closechunk():
1559 if chunk == changegroup.closechunk():
1560 break
1560 break
1561 yield changegroup.chunkheader(len(fname))
1561 yield changegroup.chunkheader(len(fname))
1562 yield fname
1562 yield fname
1563 first = False
1563 first = False
1564 yield chunk
1564 yield chunk
1565 # Signal that no more groups are left.
1565 # Signal that no more groups are left.
1566 yield changegroup.closechunk()
1566 yield changegroup.closechunk()
1567 self.ui.progress(_('bundling'), None)
1567 self.ui.progress(_('bundling'), None)
1568
1568
1569 if csets:
1569 if csets:
1570 self.hook('outgoing', node=hex(csets[0]), source=source)
1570 self.hook('outgoing', node=hex(csets[0]), source=source)
1571
1571
1572 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1572 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1573
1573
1574 def changegroup(self, basenodes, source):
1574 def changegroup(self, basenodes, source):
1575 # to avoid a race we use changegroupsubset() (issue1320)
1575 # to avoid a race we use changegroupsubset() (issue1320)
1576 return self.changegroupsubset(basenodes, self.heads(), source)
1576 return self.changegroupsubset(basenodes, self.heads(), source)
1577
1577
1578 def _changegroup(self, nodes, source):
1578 def _changegroup(self, nodes, source):
1579 """Compute the changegroup of all nodes that we have that a recipient
1579 """Compute the changegroup of all nodes that we have that a recipient
1580 doesn't. Return a chunkbuffer object whose read() method will return
1580 doesn't. Return a chunkbuffer object whose read() method will return
1581 successive changegroup chunks.
1581 successive changegroup chunks.
1582
1582
1583 This is much easier than the previous function as we can assume that
1583 This is much easier than the previous function as we can assume that
1584 the recipient has any changenode we aren't sending them.
1584 the recipient has any changenode we aren't sending them.
1585
1585
1586 nodes is the set of nodes to send"""
1586 nodes is the set of nodes to send"""
1587
1587
1588 cl = self.changelog
1588 cl = self.changelog
1589 mf = self.manifest
1589 mf = self.manifest
1590 mfs = {}
1590 mfs = {}
1591 changedfiles = set()
1591 changedfiles = set()
1592
1592
1593 self.hook('preoutgoing', throw=True, source=source)
1593 self.hook('preoutgoing', throw=True, source=source)
1594 self.changegroupinfo(nodes, source)
1594 self.changegroupinfo(nodes, source)
1595
1595
1596 revset = set([cl.rev(n) for n in nodes])
1596 revset = set([cl.rev(n) for n in nodes])
1597
1597
1598 def gennodelst(log):
1598 def gennodelst(log):
1599 for r in log:
1599 for r in log:
1600 if log.linkrev(r) in revset:
1600 if log.linkrev(r) in revset:
1601 yield log.node(r)
1601 yield log.node(r)
1602
1602
1603 def gengroup():
1603 def gengroup():
1604 '''yield a sequence of changegroup chunks (strings)'''
1604 '''yield a sequence of changegroup chunks (strings)'''
1605 # construct a list of all changed files
1605 # construct a list of all changed files
1606
1606
1607 count = [0]
1607 count = [0]
1608 def clookup(revlog, x):
1608 def clookup(revlog, x):
1609 c = cl.read(x)
1609 c = cl.read(x)
1610 changedfiles.update(c[3])
1610 changedfiles.update(c[3])
1611 mfs.setdefault(c[0], x)
1611 mfs.setdefault(c[0], x)
1612 count[0] += 1
1612 count[0] += 1
1613 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1613 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1614 return x
1614 return x
1615
1615
1616 for chunk in cl.group(nodes, clookup):
1616 for chunk in cl.group(nodes, clookup):
1617 yield chunk
1617 yield chunk
1618 efiles = len(changedfiles)
1618 efiles = len(changedfiles)
1619 changecount = count[0]
1619 changecount = count[0]
1620 self.ui.progress(_('bundling'), None)
1620 self.ui.progress(_('bundling'), None)
1621
1621
1622 count = [0]
1622 count = [0]
1623 def mlookup(revlog, x):
1623 def mlookup(revlog, x):
1624 count[0] += 1
1624 count[0] += 1
1625 self.ui.progress(_('bundling'), count[0],
1625 self.ui.progress(_('bundling'), count[0],
1626 unit=_('manifests'), total=changecount)
1626 unit=_('manifests'), total=changecount)
1627 return cl.node(revlog.linkrev(revlog.rev(x)))
1627 return cl.node(revlog.linkrev(revlog.rev(x)))
1628
1628
1629 for chunk in mf.group(gennodelst(mf), mlookup):
1629 for chunk in mf.group(gennodelst(mf), mlookup):
1630 yield chunk
1630 yield chunk
1631 self.ui.progress(_('bundling'), None)
1631 self.ui.progress(_('bundling'), None)
1632
1632
1633 for idx, fname in enumerate(sorted(changedfiles)):
1633 for idx, fname in enumerate(sorted(changedfiles)):
1634 filerevlog = self.file(fname)
1634 filerevlog = self.file(fname)
1635 if not len(filerevlog):
1635 if not len(filerevlog):
1636 raise util.Abort(_("empty or missing revlog for %s") % fname)
1636 raise util.Abort(_("empty or missing revlog for %s") % fname)
1637 first = True
1637 first = True
1638 def flookup(revlog, x):
1638 def flookup(revlog, x):
1639 self.ui.progress(
1639 self.ui.progress(
1640 _('bundling'), idx, item=fname,
1640 _('bundling'), idx, item=fname,
1641 total=efiles, unit=_('files'))
1641 total=efiles, unit=_('files'))
1642 return cl.node(revlog.linkrev(revlog.rev(x)))
1642 return cl.node(revlog.linkrev(revlog.rev(x)))
1643
1643
1644 for chunk in filerevlog.group(gennodelst(filerevlog), flookup):
1644 for chunk in filerevlog.group(gennodelst(filerevlog), flookup):
1645 if first:
1645 if first:
1646 if chunk == changegroup.closechunk():
1646 if chunk == changegroup.closechunk():
1647 break
1647 break
1648 yield changegroup.chunkheader(len(fname))
1648 yield changegroup.chunkheader(len(fname))
1649 yield fname
1649 yield fname
1650 first = False
1650 first = False
1651 yield chunk
1651 yield chunk
1652 yield changegroup.closechunk()
1652 yield changegroup.closechunk()
1653 self.ui.progress(_('bundling'), None)
1653 self.ui.progress(_('bundling'), None)
1654
1654
1655 if nodes:
1655 if nodes:
1656 self.hook('outgoing', node=hex(nodes[0]), source=source)
1656 self.hook('outgoing', node=hex(nodes[0]), source=source)
1657
1657
1658 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1658 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1659
1659
1660 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1660 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1661 """Add the changegroup returned by source.read() to this repo.
1661 """Add the changegroup returned by source.read() to this repo.
1662 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1662 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1663 the URL of the repo where this changegroup is coming from.
1663 the URL of the repo where this changegroup is coming from.
1664 If lock is not None, the function takes ownership of the lock
1664 If lock is not None, the function takes ownership of the lock
1665 and releases it after the changegroup is added.
1665 and releases it after the changegroup is added.
1666
1666
1667 Return an integer summarizing the change to this repo:
1667 Return an integer summarizing the change to this repo:
1668 - nothing changed or no source: 0
1668 - nothing changed or no source: 0
1669 - more heads than before: 1+added heads (2..n)
1669 - more heads than before: 1+added heads (2..n)
1670 - fewer heads than before: -1-removed heads (-2..-n)
1670 - fewer heads than before: -1-removed heads (-2..-n)
1671 - number of heads stays the same: 1
1671 - number of heads stays the same: 1
1672 """
1672 """
1673 def csmap(x):
1673 def csmap(x):
1674 self.ui.debug("add changeset %s\n" % short(x))
1674 self.ui.debug("add changeset %s\n" % short(x))
1675 return len(cl)
1675 return len(cl)
1676
1676
1677 def revmap(x):
1677 def revmap(x):
1678 return cl.rev(x)
1678 return cl.rev(x)
1679
1679
1680 if not source:
1680 if not source:
1681 return 0
1681 return 0
1682
1682
1683 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1683 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1684
1684
1685 changesets = files = revisions = 0
1685 changesets = files = revisions = 0
1686 efiles = set()
1686 efiles = set()
1687
1687
1688 # write changelog data to temp files so concurrent readers will not see
1688 # write changelog data to temp files so concurrent readers will not see
1689 # inconsistent view
1689 # inconsistent view
1690 cl = self.changelog
1690 cl = self.changelog
1691 cl.delayupdate()
1691 cl.delayupdate()
1692 oldheads = len(cl.heads())
1692 oldheads = len(cl.heads())
1693
1693
1694 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1694 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1695 try:
1695 try:
1696 trp = weakref.proxy(tr)
1696 trp = weakref.proxy(tr)
1697 # pull off the changeset group
1697 # pull off the changeset group
1698 self.ui.status(_("adding changesets\n"))
1698 self.ui.status(_("adding changesets\n"))
1699 clstart = len(cl)
1699 clstart = len(cl)
1700 class prog(object):
1700 class prog(object):
1701 step = _('changesets')
1701 step = _('changesets')
1702 count = 1
1702 count = 1
1703 ui = self.ui
1703 ui = self.ui
1704 total = None
1704 total = None
1705 def __call__(self):
1705 def __call__(self):
1706 self.ui.progress(self.step, self.count, unit=_('chunks'),
1706 self.ui.progress(self.step, self.count, unit=_('chunks'),
1707 total=self.total)
1707 total=self.total)
1708 self.count += 1
1708 self.count += 1
1709 pr = prog()
1709 pr = prog()
1710 source.callback = pr
1710 source.callback = pr
1711
1711
1712 if (cl.addgroup(source, csmap, trp) is None
1712 if (cl.addgroup(source, csmap, trp) is None
1713 and not emptyok):
1713 and not emptyok):
1714 raise util.Abort(_("received changelog group is empty"))
1714 raise util.Abort(_("received changelog group is empty"))
1715 clend = len(cl)
1715 clend = len(cl)
1716 changesets = clend - clstart
1716 changesets = clend - clstart
1717 for c in xrange(clstart, clend):
1717 for c in xrange(clstart, clend):
1718 efiles.update(self[c].files())
1718 efiles.update(self[c].files())
1719 efiles = len(efiles)
1719 efiles = len(efiles)
1720 self.ui.progress(_('changesets'), None)
1720 self.ui.progress(_('changesets'), None)
1721
1721
1722 # pull off the manifest group
1722 # pull off the manifest group
1723 self.ui.status(_("adding manifests\n"))
1723 self.ui.status(_("adding manifests\n"))
1724 pr.step = _('manifests')
1724 pr.step = _('manifests')
1725 pr.count = 1
1725 pr.count = 1
1726 pr.total = changesets # manifests <= changesets
1726 pr.total = changesets # manifests <= changesets
1727 # no need to check for empty manifest group here:
1727 # no need to check for empty manifest group here:
1728 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1728 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1729 # no new manifest will be created and the manifest group will
1729 # no new manifest will be created and the manifest group will
1730 # be empty during the pull
1730 # be empty during the pull
1731 self.manifest.addgroup(source, revmap, trp)
1731 self.manifest.addgroup(source, revmap, trp)
1732 self.ui.progress(_('manifests'), None)
1732 self.ui.progress(_('manifests'), None)
1733
1733
1734 needfiles = {}
1734 needfiles = {}
1735 if self.ui.configbool('server', 'validate', default=False):
1735 if self.ui.configbool('server', 'validate', default=False):
1736 # validate incoming csets have their manifests
1736 # validate incoming csets have their manifests
1737 for cset in xrange(clstart, clend):
1737 for cset in xrange(clstart, clend):
1738 mfest = self.changelog.read(self.changelog.node(cset))[0]
1738 mfest = self.changelog.read(self.changelog.node(cset))[0]
1739 mfest = self.manifest.readdelta(mfest)
1739 mfest = self.manifest.readdelta(mfest)
1740 # store file nodes we must see
1740 # store file nodes we must see
1741 for f, n in mfest.iteritems():
1741 for f, n in mfest.iteritems():
1742 needfiles.setdefault(f, set()).add(n)
1742 needfiles.setdefault(f, set()).add(n)
1743
1743
1744 # process the files
1744 # process the files
1745 self.ui.status(_("adding file changes\n"))
1745 self.ui.status(_("adding file changes\n"))
1746 pr.step = 'files'
1746 pr.step = 'files'
1747 pr.count = 1
1747 pr.count = 1
1748 pr.total = efiles
1748 pr.total = efiles
1749 source.callback = None
1749 source.callback = None
1750
1750
1751 while 1:
1751 while 1:
1752 f = source.chunk()
1752 f = source.chunk()
1753 if not f:
1753 if not f:
1754 break
1754 break
1755 self.ui.debug("adding %s revisions\n" % f)
1755 self.ui.debug("adding %s revisions\n" % f)
1756 pr()
1756 pr()
1757 fl = self.file(f)
1757 fl = self.file(f)
1758 o = len(fl)
1758 o = len(fl)
1759 if fl.addgroup(source, revmap, trp) is None:
1759 if fl.addgroup(source, revmap, trp) is None:
1760 raise util.Abort(_("received file revlog group is empty"))
1760 raise util.Abort(_("received file revlog group is empty"))
1761 revisions += len(fl) - o
1761 revisions += len(fl) - o
1762 files += 1
1762 files += 1
1763 if f in needfiles:
1763 if f in needfiles:
1764 needs = needfiles[f]
1764 needs = needfiles[f]
1765 for new in xrange(o, len(fl)):
1765 for new in xrange(o, len(fl)):
1766 n = fl.node(new)
1766 n = fl.node(new)
1767 if n in needs:
1767 if n in needs:
1768 needs.remove(n)
1768 needs.remove(n)
1769 if not needs:
1769 if not needs:
1770 del needfiles[f]
1770 del needfiles[f]
1771 self.ui.progress(_('files'), None)
1771 self.ui.progress(_('files'), None)
1772
1772
1773 for f, needs in needfiles.iteritems():
1773 for f, needs in needfiles.iteritems():
1774 fl = self.file(f)
1774 fl = self.file(f)
1775 for n in needs:
1775 for n in needs:
1776 try:
1776 try:
1777 fl.rev(n)
1777 fl.rev(n)
1778 except error.LookupError:
1778 except error.LookupError:
1779 raise util.Abort(
1779 raise util.Abort(
1780 _('missing file data for %s:%s - run hg verify') %
1780 _('missing file data for %s:%s - run hg verify') %
1781 (f, hex(n)))
1781 (f, hex(n)))
1782
1782
1783 newheads = len(cl.heads())
1783 newheads = len(cl.heads())
1784 heads = ""
1784 heads = ""
1785 if oldheads and newheads != oldheads:
1785 if oldheads and newheads != oldheads:
1786 heads = _(" (%+d heads)") % (newheads - oldheads)
1786 heads = _(" (%+d heads)") % (newheads - oldheads)
1787
1787
1788 self.ui.status(_("added %d changesets"
1788 self.ui.status(_("added %d changesets"
1789 " with %d changes to %d files%s\n")
1789 " with %d changes to %d files%s\n")
1790 % (changesets, revisions, files, heads))
1790 % (changesets, revisions, files, heads))
1791
1791
1792 if changesets > 0:
1792 if changesets > 0:
1793 p = lambda: cl.writepending() and self.root or ""
1793 p = lambda: cl.writepending() and self.root or ""
1794 self.hook('pretxnchangegroup', throw=True,
1794 self.hook('pretxnchangegroup', throw=True,
1795 node=hex(cl.node(clstart)), source=srctype,
1795 node=hex(cl.node(clstart)), source=srctype,
1796 url=url, pending=p)
1796 url=url, pending=p)
1797
1797
1798 # make changelog see real files again
1798 # make changelog see real files again
1799 cl.finalize(trp)
1799 cl.finalize(trp)
1800
1800
1801 tr.close()
1801 tr.close()
1802 finally:
1802 finally:
1803 tr.release()
1803 tr.release()
1804 if lock:
1804 if lock:
1805 lock.release()
1805 lock.release()
1806
1806
1807 if changesets > 0:
1807 if changesets > 0:
1808 # forcefully update the on-disk branch cache
1808 # forcefully update the on-disk branch cache
1809 self.ui.debug("updating the branch cache\n")
1809 self.ui.debug("updating the branch cache\n")
1810 self.updatebranchcache()
1810 self.updatebranchcache()
1811 self.hook("changegroup", node=hex(cl.node(clstart)),
1811 self.hook("changegroup", node=hex(cl.node(clstart)),
1812 source=srctype, url=url)
1812 source=srctype, url=url)
1813
1813
1814 for i in xrange(clstart, clend):
1814 for i in xrange(clstart, clend):
1815 self.hook("incoming", node=hex(cl.node(i)),
1815 self.hook("incoming", node=hex(cl.node(i)),
1816 source=srctype, url=url)
1816 source=srctype, url=url)
1817
1817
1818 # never return 0 here:
1818 # never return 0 here:
1819 if newheads < oldheads:
1819 if newheads < oldheads:
1820 return newheads - oldheads - 1
1820 return newheads - oldheads - 1
1821 else:
1821 else:
1822 return newheads - oldheads + 1
1822 return newheads - oldheads + 1
1823
1823
1824
1824
1825 def stream_in(self, remote, requirements):
1825 def stream_in(self, remote, requirements):
1826 lock = self.lock()
1826 lock = self.lock()
1827 try:
1827 try:
1828 fp = remote.stream_out()
1828 fp = remote.stream_out()
1829 l = fp.readline()
1829 l = fp.readline()
1830 try:
1830 try:
1831 resp = int(l)
1831 resp = int(l)
1832 except ValueError:
1832 except ValueError:
1833 raise error.ResponseError(
1833 raise error.ResponseError(
1834 _('Unexpected response from remote server:'), l)
1834 _('Unexpected response from remote server:'), l)
1835 if resp == 1:
1835 if resp == 1:
1836 raise util.Abort(_('operation forbidden by server'))
1836 raise util.Abort(_('operation forbidden by server'))
1837 elif resp == 2:
1837 elif resp == 2:
1838 raise util.Abort(_('locking the remote repository failed'))
1838 raise util.Abort(_('locking the remote repository failed'))
1839 elif resp != 0:
1839 elif resp != 0:
1840 raise util.Abort(_('the server sent an unknown error code'))
1840 raise util.Abort(_('the server sent an unknown error code'))
1841 self.ui.status(_('streaming all changes\n'))
1841 self.ui.status(_('streaming all changes\n'))
1842 l = fp.readline()
1842 l = fp.readline()
1843 try:
1843 try:
1844 total_files, total_bytes = map(int, l.split(' ', 1))
1844 total_files, total_bytes = map(int, l.split(' ', 1))
1845 except (ValueError, TypeError):
1845 except (ValueError, TypeError):
1846 raise error.ResponseError(
1846 raise error.ResponseError(
1847 _('Unexpected response from remote server:'), l)
1847 _('Unexpected response from remote server:'), l)
1848 self.ui.status(_('%d files to transfer, %s of data\n') %
1848 self.ui.status(_('%d files to transfer, %s of data\n') %
1849 (total_files, util.bytecount(total_bytes)))
1849 (total_files, util.bytecount(total_bytes)))
1850 start = time.time()
1850 start = time.time()
1851 for i in xrange(total_files):
1851 for i in xrange(total_files):
1852 # XXX doesn't support '\n' or '\r' in filenames
1852 # XXX doesn't support '\n' or '\r' in filenames
1853 l = fp.readline()
1853 l = fp.readline()
1854 try:
1854 try:
1855 name, size = l.split('\0', 1)
1855 name, size = l.split('\0', 1)
1856 size = int(size)
1856 size = int(size)
1857 except (ValueError, TypeError):
1857 except (ValueError, TypeError):
1858 raise error.ResponseError(
1858 raise error.ResponseError(
1859 _('Unexpected response from remote server:'), l)
1859 _('Unexpected response from remote server:'), l)
1860 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1860 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1861 # for backwards compat, name was partially encoded
1861 # for backwards compat, name was partially encoded
1862 ofp = self.sopener(store.decodedir(name), 'w')
1862 ofp = self.sopener(store.decodedir(name), 'w')
1863 for chunk in util.filechunkiter(fp, limit=size):
1863 for chunk in util.filechunkiter(fp, limit=size):
1864 ofp.write(chunk)
1864 ofp.write(chunk)
1865 ofp.close()
1865 ofp.close()
1866 elapsed = time.time() - start
1866 elapsed = time.time() - start
1867 if elapsed <= 0:
1867 if elapsed <= 0:
1868 elapsed = 0.001
1868 elapsed = 0.001
1869 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1869 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1870 (util.bytecount(total_bytes), elapsed,
1870 (util.bytecount(total_bytes), elapsed,
1871 util.bytecount(total_bytes / elapsed)))
1871 util.bytecount(total_bytes / elapsed)))
1872
1872
1873 # new requirements = old non-format requirements + new format-related
1873 # new requirements = old non-format requirements + new format-related
1874 # requirements from the streamed-in repository
1874 # requirements from the streamed-in repository
1875 requirements.update(set(self.requirements) - self.supportedformats)
1875 requirements.update(set(self.requirements) - self.supportedformats)
1876 self._applyrequirements(requirements)
1876 self._applyrequirements(requirements)
1877 self._writerequirements()
1877 self._writerequirements()
1878
1878
1879 self.invalidate()
1879 self.invalidate()
1880 return len(self.heads()) + 1
1880 return len(self.heads()) + 1
1881 finally:
1881 finally:
1882 lock.release()
1882 lock.release()
1883
1883
1884 def clone(self, remote, heads=[], stream=False):
1884 def clone(self, remote, heads=[], stream=False):
1885 '''clone remote repository.
1885 '''clone remote repository.
1886
1886
1887 keyword arguments:
1887 keyword arguments:
1888 heads: list of revs to clone (forces use of pull)
1888 heads: list of revs to clone (forces use of pull)
1889 stream: use streaming clone if possible'''
1889 stream: use streaming clone if possible'''
1890
1890
1891 # now, all clients that can request uncompressed clones can
1891 # now, all clients that can request uncompressed clones can
1892 # read repo formats supported by all servers that can serve
1892 # read repo formats supported by all servers that can serve
1893 # them.
1893 # them.
1894
1894
1895 # if revlog format changes, client will have to check version
1895 # if revlog format changes, client will have to check version
1896 # and format flags on "stream" capability, and use
1896 # and format flags on "stream" capability, and use
1897 # uncompressed only if compatible.
1897 # uncompressed only if compatible.
1898
1898
1899 if stream and not heads:
1899 if stream and not heads:
1900 # 'stream' means remote revlog format is revlogv1 only
1900 # 'stream' means remote revlog format is revlogv1 only
1901 if remote.capable('stream'):
1901 if remote.capable('stream'):
1902 return self.stream_in(remote, set(('revlogv1',)))
1902 return self.stream_in(remote, set(('revlogv1',)))
1903 # otherwise, 'streamreqs' contains the remote revlog format
1903 # otherwise, 'streamreqs' contains the remote revlog format
1904 streamreqs = remote.capable('streamreqs')
1904 streamreqs = remote.capable('streamreqs')
1905 if streamreqs:
1905 if streamreqs:
1906 streamreqs = set(streamreqs.split(','))
1906 streamreqs = set(streamreqs.split(','))
1907 # if we support it, stream in and adjust our requirements
1907 # if we support it, stream in and adjust our requirements
1908 if not streamreqs - self.supportedformats:
1908 if not streamreqs - self.supportedformats:
1909 return self.stream_in(remote, streamreqs)
1909 return self.stream_in(remote, streamreqs)
1910 return self.pull(remote, heads)
1910 return self.pull(remote, heads)
1911
1911
1912 def pushkey(self, namespace, key, old, new):
1912 def pushkey(self, namespace, key, old, new):
1913 return pushkey.push(self, namespace, key, old, new)
1913 return pushkey.push(self, namespace, key, old, new)
1914
1914
1915 def listkeys(self, namespace):
1915 def listkeys(self, namespace):
1916 return pushkey.list(self, namespace)
1916 return pushkey.list(self, namespace)
1917
1917
1918 def debugwireargs(self, one, two, three=None, four=None):
1918 def debugwireargs(self, one, two, three=None, four=None):
1919 '''used to test argument passing over the wire'''
1919 '''used to test argument passing over the wire'''
1920 return "%s %s %s %s" % (one, two, three, four)
1920 return "%s %s %s %s" % (one, two, three, four)
1921
1921
1922 # used to avoid circular references so destructors work
1922 # used to avoid circular references so destructors work
1923 def aftertrans(files):
1923 def aftertrans(files):
1924 renamefiles = [tuple(t) for t in files]
1924 renamefiles = [tuple(t) for t in files]
1925 def a():
1925 def a():
1926 for src, dest in renamefiles:
1926 for src, dest in renamefiles:
1927 util.rename(src, dest)
1927 util.rename(src, dest)
1928 return a
1928 return a
1929
1929
1930 def instance(ui, path, create):
1930 def instance(ui, path, create):
1931 return localrepository(ui, util.drop_scheme('file', path), create)
1931 return localrepository(ui, urlmod.localpath(path), create)
1932
1932
1933 def islocal(path):
1933 def islocal(path):
1934 return True
1934 return True
@@ -1,937 +1,960 b''
1 # url.py - HTTP handling for mercurial
1 # url.py - HTTP handling for mercurial
2 #
2 #
3 # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 import urllib, urllib2, httplib, os, socket, cStringIO
10 import urllib, urllib2, httplib, os, socket, cStringIO
11 import __builtin__
11 import __builtin__
12 from i18n import _
12 from i18n import _
13 import keepalive, util
13 import keepalive, util
14
14
15 class url(object):
15 class url(object):
16 """Reliable URL parser.
16 """Reliable URL parser.
17
17
18 This parses URLs and provides attributes for the following
18 This parses URLs and provides attributes for the following
19 components:
19 components:
20
20
21 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
21 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
22
22
23 Missing components are set to None. The only exception is
23 Missing components are set to None. The only exception is
24 fragment, which is set to '' if present but empty.
24 fragment, which is set to '' if present but empty.
25
25
26 If parse_fragment is False, fragment is included in query. If
26 If parse_fragment is False, fragment is included in query. If
27 parse_query is False, query is included in path. If both are
27 parse_query is False, query is included in path. If both are
28 False, both fragment and query are included in path.
28 False, both fragment and query are included in path.
29
29
30 See http://www.ietf.org/rfc/rfc2396.txt for more information.
30 See http://www.ietf.org/rfc/rfc2396.txt for more information.
31
31
32 Note that for backward compatibility reasons, bundle URLs do not
32 Note that for backward compatibility reasons, bundle URLs do not
33 take host names. That means 'bundle://../' has a path of '../'.
33 take host names. That means 'bundle://../' has a path of '../'.
34
34
35 Examples:
35 Examples:
36
36
37 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
37 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
38 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
38 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
39 >>> url('ssh://[::1]:2200//home/joe/repo')
39 >>> url('ssh://[::1]:2200//home/joe/repo')
40 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
40 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
41 >>> url('file:///home/joe/repo')
41 >>> url('file:///home/joe/repo')
42 <url scheme: 'file', path: '/home/joe/repo'>
42 <url scheme: 'file', path: '/home/joe/repo'>
43 >>> url('bundle:foo')
43 >>> url('bundle:foo')
44 <url scheme: 'bundle', path: 'foo'>
44 <url scheme: 'bundle', path: 'foo'>
45 >>> url('bundle://../foo')
45 >>> url('bundle://../foo')
46 <url scheme: 'bundle', path: '../foo'>
46 <url scheme: 'bundle', path: '../foo'>
47 >>> url('c:\\\\foo\\\\bar')
47 >>> url('c:\\\\foo\\\\bar')
48 <url path: 'c:\\\\foo\\\\bar'>
48 <url path: 'c:\\\\foo\\\\bar'>
49
49
50 Authentication credentials:
50 Authentication credentials:
51
51
52 >>> url('ssh://joe:xyz@x/repo')
52 >>> url('ssh://joe:xyz@x/repo')
53 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
53 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
54 >>> url('ssh://joe@x/repo')
54 >>> url('ssh://joe@x/repo')
55 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
55 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
56
56
57 Query strings and fragments:
57 Query strings and fragments:
58
58
59 >>> url('http://host/a?b#c')
59 >>> url('http://host/a?b#c')
60 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
60 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
61 >>> url('http://host/a?b#c', parse_query=False, parse_fragment=False)
61 >>> url('http://host/a?b#c', parse_query=False, parse_fragment=False)
62 <url scheme: 'http', host: 'host', path: 'a?b#c'>
62 <url scheme: 'http', host: 'host', path: 'a?b#c'>
63 """
63 """
64
64
65 _safechars = "!~*'()+"
65 _safechars = "!~*'()+"
66 _safepchars = "/!~*'()+"
66 _safepchars = "/!~*'()+"
67
67
68 def __init__(self, path, parse_query=True, parse_fragment=True):
68 def __init__(self, path, parse_query=True, parse_fragment=True):
69 # We slowly chomp away at path until we have only the path left
69 # We slowly chomp away at path until we have only the path left
70 self.scheme = self.user = self.passwd = self.host = None
70 self.scheme = self.user = self.passwd = self.host = None
71 self.port = self.path = self.query = self.fragment = None
71 self.port = self.path = self.query = self.fragment = None
72 self._localpath = True
72 self._localpath = True
73 self._hostport = ''
74 self._origpath = path
73
75
74 # special case for Windows drive letters
76 # special case for Windows drive letters
75 if has_drive_letter(path):
77 if has_drive_letter(path):
76 self.path = path
78 self.path = path
77 return
79 return
78
80
79 # For compatibility reasons, we can't handle bundle paths as
81 # For compatibility reasons, we can't handle bundle paths as
80 # normal URLS
82 # normal URLS
81 if path.startswith('bundle:'):
83 if path.startswith('bundle:'):
82 self.scheme = 'bundle'
84 self.scheme = 'bundle'
83 path = path[7:]
85 path = path[7:]
84 if path.startswith('//'):
86 if path.startswith('//'):
85 path = path[2:]
87 path = path[2:]
86 self.path = path
88 self.path = path
87 return
89 return
88
90
89 if not path.startswith('/') and ':' in path:
91 if not path.startswith('/') and ':' in path:
90 parts = path.split(':', 1)
92 parts = path.split(':', 1)
91 if parts[0]:
93 if parts[0]:
92 self.scheme, path = parts
94 self.scheme, path = parts
93 self._localpath = False
95 self._localpath = False
94
96
95 if not path:
97 if not path:
96 path = None
98 path = None
97 if self._localpath:
99 if self._localpath:
98 self.path = ''
100 self.path = ''
99 return
101 return
100 else:
102 else:
101 if parse_fragment and '#' in path:
103 if parse_fragment and '#' in path:
102 path, self.fragment = path.split('#', 1)
104 path, self.fragment = path.split('#', 1)
103 if not path:
105 if not path:
104 path = None
106 path = None
105 if self._localpath:
107 if self._localpath:
106 self.path = path
108 self.path = path
107 return
109 return
108
110
109 if parse_query and '?' in path:
111 if parse_query and '?' in path:
110 path, self.query = path.split('?', 1)
112 path, self.query = path.split('?', 1)
111 if not path:
113 if not path:
112 path = None
114 path = None
113 if not self.query:
115 if not self.query:
114 self.query = None
116 self.query = None
115
117
116 # // is required to specify a host/authority
118 # // is required to specify a host/authority
117 if path and path.startswith('//'):
119 if path and path.startswith('//'):
118 parts = path[2:].split('/', 1)
120 parts = path[2:].split('/', 1)
119 if len(parts) > 1:
121 if len(parts) > 1:
120 self.host, path = parts
122 self.host, path = parts
121 path = path
123 path = path
122 else:
124 else:
123 self.host = parts[0]
125 self.host = parts[0]
124 path = None
126 path = None
125 if not self.host:
127 if not self.host:
126 self.host = None
128 self.host = None
127 if path:
129 if path:
128 path = '/' + path
130 path = '/' + path
129
131
130 if self.host and '@' in self.host:
132 if self.host and '@' in self.host:
131 self.user, self.host = self.host.rsplit('@', 1)
133 self.user, self.host = self.host.rsplit('@', 1)
132 if ':' in self.user:
134 if ':' in self.user:
133 self.user, self.passwd = self.user.split(':', 1)
135 self.user, self.passwd = self.user.split(':', 1)
134 if not self.host:
136 if not self.host:
135 self.host = None
137 self.host = None
136
138
137 # Don't split on colons in IPv6 addresses without ports
139 # Don't split on colons in IPv6 addresses without ports
138 if (self.host and ':' in self.host and
140 if (self.host and ':' in self.host and
139 not (self.host.startswith('[') and self.host.endswith(']'))):
141 not (self.host.startswith('[') and self.host.endswith(']'))):
142 self._hostport = self.host
140 self.host, self.port = self.host.rsplit(':', 1)
143 self.host, self.port = self.host.rsplit(':', 1)
141 if not self.host:
144 if not self.host:
142 self.host = None
145 self.host = None
143
146
144 if (self.host and self.scheme == 'file' and
147 if (self.host and self.scheme == 'file' and
145 self.host not in ('localhost', '127.0.0.1', '[::1]')):
148 self.host not in ('localhost', '127.0.0.1', '[::1]')):
146 raise util.Abort(_('file:// URLs can only refer to localhost'))
149 raise util.Abort(_('file:// URLs can only refer to localhost'))
147
150
148 self.path = path
151 self.path = path
149
152
150 for a in ('user', 'passwd', 'host', 'port',
153 for a in ('user', 'passwd', 'host', 'port',
151 'path', 'query', 'fragment'):
154 'path', 'query', 'fragment'):
152 v = getattr(self, a)
155 v = getattr(self, a)
153 if v is not None:
156 if v is not None:
154 setattr(self, a, urllib.unquote(v))
157 setattr(self, a, urllib.unquote(v))
155
158
156 def __repr__(self):
159 def __repr__(self):
157 attrs = []
160 attrs = []
158 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
161 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
159 'query', 'fragment'):
162 'query', 'fragment'):
160 v = getattr(self, a)
163 v = getattr(self, a)
161 if v is not None:
164 if v is not None:
162 attrs.append('%s: %r' % (a, v))
165 attrs.append('%s: %r' % (a, v))
163 return '<url %s>' % ', '.join(attrs)
166 return '<url %s>' % ', '.join(attrs)
164
167
165 def __str__(self):
168 def __str__(self):
166 """Join the URL's components back into a URL string.
169 """Join the URL's components back into a URL string.
167
170
168 Examples:
171 Examples:
169
172
170 >>> str(url('http://user:pw@host:80/?foo#bar'))
173 >>> str(url('http://user:pw@host:80/?foo#bar'))
171 'http://user:pw@host:80/?foo#bar'
174 'http://user:pw@host:80/?foo#bar'
172 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
175 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
173 'ssh://user:pw@[::1]:2200//home/joe#'
176 'ssh://user:pw@[::1]:2200//home/joe#'
174 >>> str(url('http://localhost:80//'))
177 >>> str(url('http://localhost:80//'))
175 'http://localhost:80//'
178 'http://localhost:80//'
176 >>> str(url('http://localhost:80/'))
179 >>> str(url('http://localhost:80/'))
177 'http://localhost:80/'
180 'http://localhost:80/'
178 >>> str(url('http://localhost:80'))
181 >>> str(url('http://localhost:80'))
179 'http://localhost:80/'
182 'http://localhost:80/'
180 >>> str(url('bundle:foo'))
183 >>> str(url('bundle:foo'))
181 'bundle:foo'
184 'bundle:foo'
182 >>> str(url('bundle://../foo'))
185 >>> str(url('bundle://../foo'))
183 'bundle:../foo'
186 'bundle:../foo'
184 >>> str(url('path'))
187 >>> str(url('path'))
185 'path'
188 'path'
186 """
189 """
187 if self._localpath:
190 if self._localpath:
188 s = self.path
191 s = self.path
189 if self.scheme == 'bundle':
192 if self.scheme == 'bundle':
190 s = 'bundle:' + s
193 s = 'bundle:' + s
191 if self.fragment:
194 if self.fragment:
192 s += '#' + self.fragment
195 s += '#' + self.fragment
193 return s
196 return s
194
197
195 s = self.scheme + ':'
198 s = self.scheme + ':'
196 if (self.user or self.passwd or self.host or
199 if (self.user or self.passwd or self.host or
197 self.scheme and not self.path):
200 self.scheme and not self.path):
198 s += '//'
201 s += '//'
199 if self.user:
202 if self.user:
200 s += urllib.quote(self.user, safe=self._safechars)
203 s += urllib.quote(self.user, safe=self._safechars)
201 if self.passwd:
204 if self.passwd:
202 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
205 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
203 if self.user or self.passwd:
206 if self.user or self.passwd:
204 s += '@'
207 s += '@'
205 if self.host:
208 if self.host:
206 if not (self.host.startswith('[') and self.host.endswith(']')):
209 if not (self.host.startswith('[') and self.host.endswith(']')):
207 s += urllib.quote(self.host)
210 s += urllib.quote(self.host)
208 else:
211 else:
209 s += self.host
212 s += self.host
210 if self.port:
213 if self.port:
211 s += ':' + urllib.quote(self.port)
214 s += ':' + urllib.quote(self.port)
212 if self.host:
215 if self.host:
213 s += '/'
216 s += '/'
214 if self.path:
217 if self.path:
215 s += urllib.quote(self.path, safe=self._safepchars)
218 s += urllib.quote(self.path, safe=self._safepchars)
216 if self.query:
219 if self.query:
217 s += '?' + urllib.quote(self.query, safe=self._safepchars)
220 s += '?' + urllib.quote(self.query, safe=self._safepchars)
218 if self.fragment is not None:
221 if self.fragment is not None:
219 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
222 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
220 return s
223 return s
221
224
222 def authinfo(self):
225 def authinfo(self):
223 user, passwd = self.user, self.passwd
226 user, passwd = self.user, self.passwd
224 try:
227 try:
225 self.user, self.passwd = None, None
228 self.user, self.passwd = None, None
226 s = str(self)
229 s = str(self)
227 finally:
230 finally:
228 self.user, self.passwd = user, passwd
231 self.user, self.passwd = user, passwd
229 if not self.user:
232 if not self.user:
230 return (s, None)
233 return (s, None)
231 return (s, (None, (str(self), self.host),
234 return (s, (None, (str(self), self.host),
232 self.user, self.passwd or ''))
235 self.user, self.passwd or ''))
233
236
237 def localpath(self):
238 if self.scheme == 'file' or self.scheme == 'bundle':
239 path = self.path or '/'
240 # For Windows, we need to promote hosts containing drive
241 # letters to paths with drive letters.
242 if has_drive_letter(self._hostport):
243 path = self._hostport + '/' + self.path
244 elif self.host is not None and self.path:
245 path = '/' + path
246 # We also need to handle the case of file:///C:/, which
247 # should return C:/, not /C:/.
248 elif has_drive_letter(path):
249 # Strip leading slash from paths with drive names
250 return path[1:]
251 return path
252 return self._origpath
253
234 def has_scheme(path):
254 def has_scheme(path):
235 return bool(url(path).scheme)
255 return bool(url(path).scheme)
236
256
237 def has_drive_letter(path):
257 def has_drive_letter(path):
238 return path[1:2] == ':' and path[0:1].isalpha()
258 return path[1:2] == ':' and path[0:1].isalpha()
239
259
260 def localpath(path):
261 return url(path, parse_query=False, parse_fragment=False).localpath()
262
240 def hidepassword(u):
263 def hidepassword(u):
241 '''hide user credential in a url string'''
264 '''hide user credential in a url string'''
242 u = url(u)
265 u = url(u)
243 if u.passwd:
266 if u.passwd:
244 u.passwd = '***'
267 u.passwd = '***'
245 return str(u)
268 return str(u)
246
269
247 def removeauth(u):
270 def removeauth(u):
248 '''remove all authentication information from a url string'''
271 '''remove all authentication information from a url string'''
249 u = url(u)
272 u = url(u)
250 u.user = u.passwd = None
273 u.user = u.passwd = None
251 return str(u)
274 return str(u)
252
275
253 def netlocsplit(netloc):
276 def netlocsplit(netloc):
254 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
277 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
255
278
256 a = netloc.find('@')
279 a = netloc.find('@')
257 if a == -1:
280 if a == -1:
258 user, passwd = None, None
281 user, passwd = None, None
259 else:
282 else:
260 userpass, netloc = netloc[:a], netloc[a + 1:]
283 userpass, netloc = netloc[:a], netloc[a + 1:]
261 c = userpass.find(':')
284 c = userpass.find(':')
262 if c == -1:
285 if c == -1:
263 user, passwd = urllib.unquote(userpass), None
286 user, passwd = urllib.unquote(userpass), None
264 else:
287 else:
265 user = urllib.unquote(userpass[:c])
288 user = urllib.unquote(userpass[:c])
266 passwd = urllib.unquote(userpass[c + 1:])
289 passwd = urllib.unquote(userpass[c + 1:])
267 c = netloc.find(':')
290 c = netloc.find(':')
268 if c == -1:
291 if c == -1:
269 host, port = netloc, None
292 host, port = netloc, None
270 else:
293 else:
271 host, port = netloc[:c], netloc[c + 1:]
294 host, port = netloc[:c], netloc[c + 1:]
272 return host, port, user, passwd
295 return host, port, user, passwd
273
296
274 def netlocunsplit(host, port, user=None, passwd=None):
297 def netlocunsplit(host, port, user=None, passwd=None):
275 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
298 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
276 if port:
299 if port:
277 hostport = host + ':' + port
300 hostport = host + ':' + port
278 else:
301 else:
279 hostport = host
302 hostport = host
280 if user:
303 if user:
281 quote = lambda s: urllib.quote(s, safe='')
304 quote = lambda s: urllib.quote(s, safe='')
282 if passwd:
305 if passwd:
283 userpass = quote(user) + ':' + quote(passwd)
306 userpass = quote(user) + ':' + quote(passwd)
284 else:
307 else:
285 userpass = quote(user)
308 userpass = quote(user)
286 return userpass + '@' + hostport
309 return userpass + '@' + hostport
287 return hostport
310 return hostport
288
311
289 def readauthforuri(ui, uri):
312 def readauthforuri(ui, uri):
290 # Read configuration
313 # Read configuration
291 config = dict()
314 config = dict()
292 for key, val in ui.configitems('auth'):
315 for key, val in ui.configitems('auth'):
293 if '.' not in key:
316 if '.' not in key:
294 ui.warn(_("ignoring invalid [auth] key '%s'\n") % key)
317 ui.warn(_("ignoring invalid [auth] key '%s'\n") % key)
295 continue
318 continue
296 group, setting = key.rsplit('.', 1)
319 group, setting = key.rsplit('.', 1)
297 gdict = config.setdefault(group, dict())
320 gdict = config.setdefault(group, dict())
298 if setting in ('username', 'cert', 'key'):
321 if setting in ('username', 'cert', 'key'):
299 val = util.expandpath(val)
322 val = util.expandpath(val)
300 gdict[setting] = val
323 gdict[setting] = val
301
324
302 # Find the best match
325 # Find the best match
303 scheme, hostpath = uri.split('://', 1)
326 scheme, hostpath = uri.split('://', 1)
304 bestlen = 0
327 bestlen = 0
305 bestauth = None
328 bestauth = None
306 for group, auth in config.iteritems():
329 for group, auth in config.iteritems():
307 prefix = auth.get('prefix')
330 prefix = auth.get('prefix')
308 if not prefix:
331 if not prefix:
309 continue
332 continue
310 p = prefix.split('://', 1)
333 p = prefix.split('://', 1)
311 if len(p) > 1:
334 if len(p) > 1:
312 schemes, prefix = [p[0]], p[1]
335 schemes, prefix = [p[0]], p[1]
313 else:
336 else:
314 schemes = (auth.get('schemes') or 'https').split()
337 schemes = (auth.get('schemes') or 'https').split()
315 if (prefix == '*' or hostpath.startswith(prefix)) and \
338 if (prefix == '*' or hostpath.startswith(prefix)) and \
316 len(prefix) > bestlen and scheme in schemes:
339 len(prefix) > bestlen and scheme in schemes:
317 bestlen = len(prefix)
340 bestlen = len(prefix)
318 bestauth = group, auth
341 bestauth = group, auth
319 return bestauth
342 return bestauth
320
343
321 _safe = ('abcdefghijklmnopqrstuvwxyz'
344 _safe = ('abcdefghijklmnopqrstuvwxyz'
322 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
345 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
323 '0123456789' '_.-/')
346 '0123456789' '_.-/')
324 _safeset = None
347 _safeset = None
325 _hex = None
348 _hex = None
326 def quotepath(path):
349 def quotepath(path):
327 '''quote the path part of a URL
350 '''quote the path part of a URL
328
351
329 This is similar to urllib.quote, but it also tries to avoid
352 This is similar to urllib.quote, but it also tries to avoid
330 quoting things twice (inspired by wget):
353 quoting things twice (inspired by wget):
331
354
332 >>> quotepath('abc def')
355 >>> quotepath('abc def')
333 'abc%20def'
356 'abc%20def'
334 >>> quotepath('abc%20def')
357 >>> quotepath('abc%20def')
335 'abc%20def'
358 'abc%20def'
336 >>> quotepath('abc%20 def')
359 >>> quotepath('abc%20 def')
337 'abc%20%20def'
360 'abc%20%20def'
338 >>> quotepath('abc def%20')
361 >>> quotepath('abc def%20')
339 'abc%20def%20'
362 'abc%20def%20'
340 >>> quotepath('abc def%2')
363 >>> quotepath('abc def%2')
341 'abc%20def%252'
364 'abc%20def%252'
342 >>> quotepath('abc def%')
365 >>> quotepath('abc def%')
343 'abc%20def%25'
366 'abc%20def%25'
344 '''
367 '''
345 global _safeset, _hex
368 global _safeset, _hex
346 if _safeset is None:
369 if _safeset is None:
347 _safeset = set(_safe)
370 _safeset = set(_safe)
348 _hex = set('abcdefABCDEF0123456789')
371 _hex = set('abcdefABCDEF0123456789')
349 l = list(path)
372 l = list(path)
350 for i in xrange(len(l)):
373 for i in xrange(len(l)):
351 c = l[i]
374 c = l[i]
352 if (c == '%' and i + 2 < len(l) and
375 if (c == '%' and i + 2 < len(l) and
353 l[i + 1] in _hex and l[i + 2] in _hex):
376 l[i + 1] in _hex and l[i + 2] in _hex):
354 pass
377 pass
355 elif c not in _safeset:
378 elif c not in _safeset:
356 l[i] = '%%%02X' % ord(c)
379 l[i] = '%%%02X' % ord(c)
357 return ''.join(l)
380 return ''.join(l)
358
381
359 class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
382 class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
360 def __init__(self, ui):
383 def __init__(self, ui):
361 urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
384 urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
362 self.ui = ui
385 self.ui = ui
363
386
364 def find_user_password(self, realm, authuri):
387 def find_user_password(self, realm, authuri):
365 authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
388 authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
366 self, realm, authuri)
389 self, realm, authuri)
367 user, passwd = authinfo
390 user, passwd = authinfo
368 if user and passwd:
391 if user and passwd:
369 self._writedebug(user, passwd)
392 self._writedebug(user, passwd)
370 return (user, passwd)
393 return (user, passwd)
371
394
372 if not user:
395 if not user:
373 res = readauthforuri(self.ui, authuri)
396 res = readauthforuri(self.ui, authuri)
374 if res:
397 if res:
375 group, auth = res
398 group, auth = res
376 user, passwd = auth.get('username'), auth.get('password')
399 user, passwd = auth.get('username'), auth.get('password')
377 self.ui.debug("using auth.%s.* for authentication\n" % group)
400 self.ui.debug("using auth.%s.* for authentication\n" % group)
378 if not user or not passwd:
401 if not user or not passwd:
379 if not self.ui.interactive():
402 if not self.ui.interactive():
380 raise util.Abort(_('http authorization required'))
403 raise util.Abort(_('http authorization required'))
381
404
382 self.ui.write(_("http authorization required\n"))
405 self.ui.write(_("http authorization required\n"))
383 self.ui.write(_("realm: %s\n") % realm)
406 self.ui.write(_("realm: %s\n") % realm)
384 if user:
407 if user:
385 self.ui.write(_("user: %s\n") % user)
408 self.ui.write(_("user: %s\n") % user)
386 else:
409 else:
387 user = self.ui.prompt(_("user:"), default=None)
410 user = self.ui.prompt(_("user:"), default=None)
388
411
389 if not passwd:
412 if not passwd:
390 passwd = self.ui.getpass()
413 passwd = self.ui.getpass()
391
414
392 self.add_password(realm, authuri, user, passwd)
415 self.add_password(realm, authuri, user, passwd)
393 self._writedebug(user, passwd)
416 self._writedebug(user, passwd)
394 return (user, passwd)
417 return (user, passwd)
395
418
396 def _writedebug(self, user, passwd):
419 def _writedebug(self, user, passwd):
397 msg = _('http auth: user %s, password %s\n')
420 msg = _('http auth: user %s, password %s\n')
398 self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
421 self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
399
422
400 class proxyhandler(urllib2.ProxyHandler):
423 class proxyhandler(urllib2.ProxyHandler):
401 def __init__(self, ui):
424 def __init__(self, ui):
402 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
425 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
403 # XXX proxyauthinfo = None
426 # XXX proxyauthinfo = None
404
427
405 if proxyurl:
428 if proxyurl:
406 # proxy can be proper url or host[:port]
429 # proxy can be proper url or host[:port]
407 if not (proxyurl.startswith('http:') or
430 if not (proxyurl.startswith('http:') or
408 proxyurl.startswith('https:')):
431 proxyurl.startswith('https:')):
409 proxyurl = 'http://' + proxyurl + '/'
432 proxyurl = 'http://' + proxyurl + '/'
410 proxy = url(proxyurl)
433 proxy = url(proxyurl)
411 if not proxy.user:
434 if not proxy.user:
412 proxy.user = ui.config("http_proxy", "user")
435 proxy.user = ui.config("http_proxy", "user")
413 proxy.passwd = ui.config("http_proxy", "passwd")
436 proxy.passwd = ui.config("http_proxy", "passwd")
414
437
415 # see if we should use a proxy for this url
438 # see if we should use a proxy for this url
416 no_list = ["localhost", "127.0.0.1"]
439 no_list = ["localhost", "127.0.0.1"]
417 no_list.extend([p.lower() for
440 no_list.extend([p.lower() for
418 p in ui.configlist("http_proxy", "no")])
441 p in ui.configlist("http_proxy", "no")])
419 no_list.extend([p.strip().lower() for
442 no_list.extend([p.strip().lower() for
420 p in os.getenv("no_proxy", '').split(',')
443 p in os.getenv("no_proxy", '').split(',')
421 if p.strip()])
444 if p.strip()])
422 # "http_proxy.always" config is for running tests on localhost
445 # "http_proxy.always" config is for running tests on localhost
423 if ui.configbool("http_proxy", "always"):
446 if ui.configbool("http_proxy", "always"):
424 self.no_list = []
447 self.no_list = []
425 else:
448 else:
426 self.no_list = no_list
449 self.no_list = no_list
427
450
428 proxyurl = str(proxy)
451 proxyurl = str(proxy)
429 proxies = {'http': proxyurl, 'https': proxyurl}
452 proxies = {'http': proxyurl, 'https': proxyurl}
430 ui.debug('proxying through http://%s:%s\n' %
453 ui.debug('proxying through http://%s:%s\n' %
431 (proxy.host, proxy.port))
454 (proxy.host, proxy.port))
432 else:
455 else:
433 proxies = {}
456 proxies = {}
434
457
435 # urllib2 takes proxy values from the environment and those
458 # urllib2 takes proxy values from the environment and those
436 # will take precedence if found, so drop them
459 # will take precedence if found, so drop them
437 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
460 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
438 try:
461 try:
439 if env in os.environ:
462 if env in os.environ:
440 del os.environ[env]
463 del os.environ[env]
441 except OSError:
464 except OSError:
442 pass
465 pass
443
466
444 urllib2.ProxyHandler.__init__(self, proxies)
467 urllib2.ProxyHandler.__init__(self, proxies)
445 self.ui = ui
468 self.ui = ui
446
469
447 def proxy_open(self, req, proxy, type_):
470 def proxy_open(self, req, proxy, type_):
448 host = req.get_host().split(':')[0]
471 host = req.get_host().split(':')[0]
449 if host in self.no_list:
472 if host in self.no_list:
450 return None
473 return None
451
474
452 # work around a bug in Python < 2.4.2
475 # work around a bug in Python < 2.4.2
453 # (it leaves a "\n" at the end of Proxy-authorization headers)
476 # (it leaves a "\n" at the end of Proxy-authorization headers)
454 baseclass = req.__class__
477 baseclass = req.__class__
455 class _request(baseclass):
478 class _request(baseclass):
456 def add_header(self, key, val):
479 def add_header(self, key, val):
457 if key.lower() == 'proxy-authorization':
480 if key.lower() == 'proxy-authorization':
458 val = val.strip()
481 val = val.strip()
459 return baseclass.add_header(self, key, val)
482 return baseclass.add_header(self, key, val)
460 req.__class__ = _request
483 req.__class__ = _request
461
484
462 return urllib2.ProxyHandler.proxy_open(self, req, proxy, type_)
485 return urllib2.ProxyHandler.proxy_open(self, req, proxy, type_)
463
486
464 class httpsendfile(object):
487 class httpsendfile(object):
465 """This is a wrapper around the objects returned by python's "open".
488 """This is a wrapper around the objects returned by python's "open".
466
489
467 Its purpose is to send file-like objects via HTTP and, to do so, it
490 Its purpose is to send file-like objects via HTTP and, to do so, it
468 defines a __len__ attribute to feed the Content-Length header.
491 defines a __len__ attribute to feed the Content-Length header.
469 """
492 """
470
493
471 def __init__(self, ui, *args, **kwargs):
494 def __init__(self, ui, *args, **kwargs):
472 # We can't just "self._data = open(*args, **kwargs)" here because there
495 # We can't just "self._data = open(*args, **kwargs)" here because there
473 # is an "open" function defined in this module that shadows the global
496 # is an "open" function defined in this module that shadows the global
474 # one
497 # one
475 self.ui = ui
498 self.ui = ui
476 self._data = __builtin__.open(*args, **kwargs)
499 self._data = __builtin__.open(*args, **kwargs)
477 self.seek = self._data.seek
500 self.seek = self._data.seek
478 self.close = self._data.close
501 self.close = self._data.close
479 self.write = self._data.write
502 self.write = self._data.write
480 self._len = os.fstat(self._data.fileno()).st_size
503 self._len = os.fstat(self._data.fileno()).st_size
481 self._pos = 0
504 self._pos = 0
482 self._total = len(self) / 1024 * 2
505 self._total = len(self) / 1024 * 2
483
506
484 def read(self, *args, **kwargs):
507 def read(self, *args, **kwargs):
485 try:
508 try:
486 ret = self._data.read(*args, **kwargs)
509 ret = self._data.read(*args, **kwargs)
487 except EOFError:
510 except EOFError:
488 self.ui.progress(_('sending'), None)
511 self.ui.progress(_('sending'), None)
489 self._pos += len(ret)
512 self._pos += len(ret)
490 # We pass double the max for total because we currently have
513 # We pass double the max for total because we currently have
491 # to send the bundle twice in the case of a server that
514 # to send the bundle twice in the case of a server that
492 # requires authentication. Since we can't know until we try
515 # requires authentication. Since we can't know until we try
493 # once whether authentication will be required, just lie to
516 # once whether authentication will be required, just lie to
494 # the user and maybe the push succeeds suddenly at 50%.
517 # the user and maybe the push succeeds suddenly at 50%.
495 self.ui.progress(_('sending'), self._pos / 1024,
518 self.ui.progress(_('sending'), self._pos / 1024,
496 unit=_('kb'), total=self._total)
519 unit=_('kb'), total=self._total)
497 return ret
520 return ret
498
521
499 def __len__(self):
522 def __len__(self):
500 return self._len
523 return self._len
501
524
502 def _gen_sendfile(orgsend):
525 def _gen_sendfile(orgsend):
503 def _sendfile(self, data):
526 def _sendfile(self, data):
504 # send a file
527 # send a file
505 if isinstance(data, httpsendfile):
528 if isinstance(data, httpsendfile):
506 # if auth required, some data sent twice, so rewind here
529 # if auth required, some data sent twice, so rewind here
507 data.seek(0)
530 data.seek(0)
508 for chunk in util.filechunkiter(data):
531 for chunk in util.filechunkiter(data):
509 orgsend(self, chunk)
532 orgsend(self, chunk)
510 else:
533 else:
511 orgsend(self, data)
534 orgsend(self, data)
512 return _sendfile
535 return _sendfile
513
536
514 has_https = hasattr(urllib2, 'HTTPSHandler')
537 has_https = hasattr(urllib2, 'HTTPSHandler')
515 if has_https:
538 if has_https:
516 try:
539 try:
517 # avoid using deprecated/broken FakeSocket in python 2.6
540 # avoid using deprecated/broken FakeSocket in python 2.6
518 import ssl
541 import ssl
519 _ssl_wrap_socket = ssl.wrap_socket
542 _ssl_wrap_socket = ssl.wrap_socket
520 CERT_REQUIRED = ssl.CERT_REQUIRED
543 CERT_REQUIRED = ssl.CERT_REQUIRED
521 except ImportError:
544 except ImportError:
522 CERT_REQUIRED = 2
545 CERT_REQUIRED = 2
523
546
524 def _ssl_wrap_socket(sock, key_file, cert_file,
547 def _ssl_wrap_socket(sock, key_file, cert_file,
525 cert_reqs=CERT_REQUIRED, ca_certs=None):
548 cert_reqs=CERT_REQUIRED, ca_certs=None):
526 if ca_certs:
549 if ca_certs:
527 raise util.Abort(_(
550 raise util.Abort(_(
528 'certificate checking requires Python 2.6'))
551 'certificate checking requires Python 2.6'))
529
552
530 ssl = socket.ssl(sock, key_file, cert_file)
553 ssl = socket.ssl(sock, key_file, cert_file)
531 return httplib.FakeSocket(sock, ssl)
554 return httplib.FakeSocket(sock, ssl)
532
555
533 try:
556 try:
534 _create_connection = socket.create_connection
557 _create_connection = socket.create_connection
535 except AttributeError:
558 except AttributeError:
536 _GLOBAL_DEFAULT_TIMEOUT = object()
559 _GLOBAL_DEFAULT_TIMEOUT = object()
537
560
538 def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
561 def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
539 source_address=None):
562 source_address=None):
540 # lifted from Python 2.6
563 # lifted from Python 2.6
541
564
542 msg = "getaddrinfo returns an empty list"
565 msg = "getaddrinfo returns an empty list"
543 host, port = address
566 host, port = address
544 for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
567 for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
545 af, socktype, proto, canonname, sa = res
568 af, socktype, proto, canonname, sa = res
546 sock = None
569 sock = None
547 try:
570 try:
548 sock = socket.socket(af, socktype, proto)
571 sock = socket.socket(af, socktype, proto)
549 if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
572 if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
550 sock.settimeout(timeout)
573 sock.settimeout(timeout)
551 if source_address:
574 if source_address:
552 sock.bind(source_address)
575 sock.bind(source_address)
553 sock.connect(sa)
576 sock.connect(sa)
554 return sock
577 return sock
555
578
556 except socket.error, msg:
579 except socket.error, msg:
557 if sock is not None:
580 if sock is not None:
558 sock.close()
581 sock.close()
559
582
560 raise socket.error, msg
583 raise socket.error, msg
561
584
562 class httpconnection(keepalive.HTTPConnection):
585 class httpconnection(keepalive.HTTPConnection):
563 # must be able to send big bundle as stream.
586 # must be able to send big bundle as stream.
564 send = _gen_sendfile(keepalive.HTTPConnection.send)
587 send = _gen_sendfile(keepalive.HTTPConnection.send)
565
588
566 def connect(self):
589 def connect(self):
567 if has_https and self.realhostport: # use CONNECT proxy
590 if has_https and self.realhostport: # use CONNECT proxy
568 self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
591 self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
569 self.sock.connect((self.host, self.port))
592 self.sock.connect((self.host, self.port))
570 if _generic_proxytunnel(self):
593 if _generic_proxytunnel(self):
571 # we do not support client x509 certificates
594 # we do not support client x509 certificates
572 self.sock = _ssl_wrap_socket(self.sock, None, None)
595 self.sock = _ssl_wrap_socket(self.sock, None, None)
573 else:
596 else:
574 keepalive.HTTPConnection.connect(self)
597 keepalive.HTTPConnection.connect(self)
575
598
576 def getresponse(self):
599 def getresponse(self):
577 proxyres = getattr(self, 'proxyres', None)
600 proxyres = getattr(self, 'proxyres', None)
578 if proxyres:
601 if proxyres:
579 if proxyres.will_close:
602 if proxyres.will_close:
580 self.close()
603 self.close()
581 self.proxyres = None
604 self.proxyres = None
582 return proxyres
605 return proxyres
583 return keepalive.HTTPConnection.getresponse(self)
606 return keepalive.HTTPConnection.getresponse(self)
584
607
585 # general transaction handler to support different ways to handle
608 # general transaction handler to support different ways to handle
586 # HTTPS proxying before and after Python 2.6.3.
609 # HTTPS proxying before and after Python 2.6.3.
587 def _generic_start_transaction(handler, h, req):
610 def _generic_start_transaction(handler, h, req):
588 if hasattr(req, '_tunnel_host') and req._tunnel_host:
611 if hasattr(req, '_tunnel_host') and req._tunnel_host:
589 tunnel_host = req._tunnel_host
612 tunnel_host = req._tunnel_host
590 if tunnel_host[:7] not in ['http://', 'https:/']:
613 if tunnel_host[:7] not in ['http://', 'https:/']:
591 tunnel_host = 'https://' + tunnel_host
614 tunnel_host = 'https://' + tunnel_host
592 new_tunnel = True
615 new_tunnel = True
593 else:
616 else:
594 tunnel_host = req.get_selector()
617 tunnel_host = req.get_selector()
595 new_tunnel = False
618 new_tunnel = False
596
619
597 if new_tunnel or tunnel_host == req.get_full_url(): # has proxy
620 if new_tunnel or tunnel_host == req.get_full_url(): # has proxy
598 u = url(tunnel_host)
621 u = url(tunnel_host)
599 if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS
622 if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS
600 h.realhostport = ':'.join([u.host, (u.port or '443')])
623 h.realhostport = ':'.join([u.host, (u.port or '443')])
601 h.headers = req.headers.copy()
624 h.headers = req.headers.copy()
602 h.headers.update(handler.parent.addheaders)
625 h.headers.update(handler.parent.addheaders)
603 return
626 return
604
627
605 h.realhostport = None
628 h.realhostport = None
606 h.headers = None
629 h.headers = None
607
630
608 def _generic_proxytunnel(self):
631 def _generic_proxytunnel(self):
609 proxyheaders = dict(
632 proxyheaders = dict(
610 [(x, self.headers[x]) for x in self.headers
633 [(x, self.headers[x]) for x in self.headers
611 if x.lower().startswith('proxy-')])
634 if x.lower().startswith('proxy-')])
612 self._set_hostport(self.host, self.port)
635 self._set_hostport(self.host, self.port)
613 self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport)
636 self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport)
614 for header in proxyheaders.iteritems():
637 for header in proxyheaders.iteritems():
615 self.send('%s: %s\r\n' % header)
638 self.send('%s: %s\r\n' % header)
616 self.send('\r\n')
639 self.send('\r\n')
617
640
618 # majority of the following code is duplicated from
641 # majority of the following code is duplicated from
619 # httplib.HTTPConnection as there are no adequate places to
642 # httplib.HTTPConnection as there are no adequate places to
620 # override functions to provide the needed functionality
643 # override functions to provide the needed functionality
621 res = self.response_class(self.sock,
644 res = self.response_class(self.sock,
622 strict=self.strict,
645 strict=self.strict,
623 method=self._method)
646 method=self._method)
624
647
625 while True:
648 while True:
626 version, status, reason = res._read_status()
649 version, status, reason = res._read_status()
627 if status != httplib.CONTINUE:
650 if status != httplib.CONTINUE:
628 break
651 break
629 while True:
652 while True:
630 skip = res.fp.readline().strip()
653 skip = res.fp.readline().strip()
631 if not skip:
654 if not skip:
632 break
655 break
633 res.status = status
656 res.status = status
634 res.reason = reason.strip()
657 res.reason = reason.strip()
635
658
636 if res.status == 200:
659 if res.status == 200:
637 while True:
660 while True:
638 line = res.fp.readline()
661 line = res.fp.readline()
639 if line == '\r\n':
662 if line == '\r\n':
640 break
663 break
641 return True
664 return True
642
665
643 if version == 'HTTP/1.0':
666 if version == 'HTTP/1.0':
644 res.version = 10
667 res.version = 10
645 elif version.startswith('HTTP/1.'):
668 elif version.startswith('HTTP/1.'):
646 res.version = 11
669 res.version = 11
647 elif version == 'HTTP/0.9':
670 elif version == 'HTTP/0.9':
648 res.version = 9
671 res.version = 9
649 else:
672 else:
650 raise httplib.UnknownProtocol(version)
673 raise httplib.UnknownProtocol(version)
651
674
652 if res.version == 9:
675 if res.version == 9:
653 res.length = None
676 res.length = None
654 res.chunked = 0
677 res.chunked = 0
655 res.will_close = 1
678 res.will_close = 1
656 res.msg = httplib.HTTPMessage(cStringIO.StringIO())
679 res.msg = httplib.HTTPMessage(cStringIO.StringIO())
657 return False
680 return False
658
681
659 res.msg = httplib.HTTPMessage(res.fp)
682 res.msg = httplib.HTTPMessage(res.fp)
660 res.msg.fp = None
683 res.msg.fp = None
661
684
662 # are we using the chunked-style of transfer encoding?
685 # are we using the chunked-style of transfer encoding?
663 trenc = res.msg.getheader('transfer-encoding')
686 trenc = res.msg.getheader('transfer-encoding')
664 if trenc and trenc.lower() == "chunked":
687 if trenc and trenc.lower() == "chunked":
665 res.chunked = 1
688 res.chunked = 1
666 res.chunk_left = None
689 res.chunk_left = None
667 else:
690 else:
668 res.chunked = 0
691 res.chunked = 0
669
692
670 # will the connection close at the end of the response?
693 # will the connection close at the end of the response?
671 res.will_close = res._check_close()
694 res.will_close = res._check_close()
672
695
673 # do we have a Content-Length?
696 # do we have a Content-Length?
674 # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
697 # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
675 length = res.msg.getheader('content-length')
698 length = res.msg.getheader('content-length')
676 if length and not res.chunked:
699 if length and not res.chunked:
677 try:
700 try:
678 res.length = int(length)
701 res.length = int(length)
679 except ValueError:
702 except ValueError:
680 res.length = None
703 res.length = None
681 else:
704 else:
682 if res.length < 0: # ignore nonsensical negative lengths
705 if res.length < 0: # ignore nonsensical negative lengths
683 res.length = None
706 res.length = None
684 else:
707 else:
685 res.length = None
708 res.length = None
686
709
687 # does the body have a fixed length? (of zero)
710 # does the body have a fixed length? (of zero)
688 if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
711 if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
689 100 <= status < 200 or # 1xx codes
712 100 <= status < 200 or # 1xx codes
690 res._method == 'HEAD'):
713 res._method == 'HEAD'):
691 res.length = 0
714 res.length = 0
692
715
693 # if the connection remains open, and we aren't using chunked, and
716 # if the connection remains open, and we aren't using chunked, and
694 # a content-length was not provided, then assume that the connection
717 # a content-length was not provided, then assume that the connection
695 # WILL close.
718 # WILL close.
696 if (not res.will_close and
719 if (not res.will_close and
697 not res.chunked and
720 not res.chunked and
698 res.length is None):
721 res.length is None):
699 res.will_close = 1
722 res.will_close = 1
700
723
701 self.proxyres = res
724 self.proxyres = res
702
725
703 return False
726 return False
704
727
705 class httphandler(keepalive.HTTPHandler):
728 class httphandler(keepalive.HTTPHandler):
706 def http_open(self, req):
729 def http_open(self, req):
707 return self.do_open(httpconnection, req)
730 return self.do_open(httpconnection, req)
708
731
709 def _start_transaction(self, h, req):
732 def _start_transaction(self, h, req):
710 _generic_start_transaction(self, h, req)
733 _generic_start_transaction(self, h, req)
711 return keepalive.HTTPHandler._start_transaction(self, h, req)
734 return keepalive.HTTPHandler._start_transaction(self, h, req)
712
735
713 def _verifycert(cert, hostname):
736 def _verifycert(cert, hostname):
714 '''Verify that cert (in socket.getpeercert() format) matches hostname.
737 '''Verify that cert (in socket.getpeercert() format) matches hostname.
715 CRLs is not handled.
738 CRLs is not handled.
716
739
717 Returns error message if any problems are found and None on success.
740 Returns error message if any problems are found and None on success.
718 '''
741 '''
719 if not cert:
742 if not cert:
720 return _('no certificate received')
743 return _('no certificate received')
721 dnsname = hostname.lower()
744 dnsname = hostname.lower()
722 def matchdnsname(certname):
745 def matchdnsname(certname):
723 return (certname == dnsname or
746 return (certname == dnsname or
724 '.' in dnsname and certname == '*.' + dnsname.split('.', 1)[1])
747 '.' in dnsname and certname == '*.' + dnsname.split('.', 1)[1])
725
748
726 san = cert.get('subjectAltName', [])
749 san = cert.get('subjectAltName', [])
727 if san:
750 if san:
728 certnames = [value.lower() for key, value in san if key == 'DNS']
751 certnames = [value.lower() for key, value in san if key == 'DNS']
729 for name in certnames:
752 for name in certnames:
730 if matchdnsname(name):
753 if matchdnsname(name):
731 return None
754 return None
732 return _('certificate is for %s') % ', '.join(certnames)
755 return _('certificate is for %s') % ', '.join(certnames)
733
756
734 # subject is only checked when subjectAltName is empty
757 # subject is only checked when subjectAltName is empty
735 for s in cert.get('subject', []):
758 for s in cert.get('subject', []):
736 key, value = s[0]
759 key, value = s[0]
737 if key == 'commonName':
760 if key == 'commonName':
738 try:
761 try:
739 # 'subject' entries are unicode
762 # 'subject' entries are unicode
740 certname = value.lower().encode('ascii')
763 certname = value.lower().encode('ascii')
741 except UnicodeEncodeError:
764 except UnicodeEncodeError:
742 return _('IDN in certificate not supported')
765 return _('IDN in certificate not supported')
743 if matchdnsname(certname):
766 if matchdnsname(certname):
744 return None
767 return None
745 return _('certificate is for %s') % certname
768 return _('certificate is for %s') % certname
746 return _('no commonName or subjectAltName found in certificate')
769 return _('no commonName or subjectAltName found in certificate')
747
770
748 if has_https:
771 if has_https:
749 class httpsconnection(httplib.HTTPSConnection):
772 class httpsconnection(httplib.HTTPSConnection):
750 response_class = keepalive.HTTPResponse
773 response_class = keepalive.HTTPResponse
751 # must be able to send big bundle as stream.
774 # must be able to send big bundle as stream.
752 send = _gen_sendfile(keepalive.safesend)
775 send = _gen_sendfile(keepalive.safesend)
753 getresponse = keepalive.wrapgetresponse(httplib.HTTPSConnection)
776 getresponse = keepalive.wrapgetresponse(httplib.HTTPSConnection)
754
777
755 def connect(self):
778 def connect(self):
756 self.sock = _create_connection((self.host, self.port))
779 self.sock = _create_connection((self.host, self.port))
757
780
758 host = self.host
781 host = self.host
759 if self.realhostport: # use CONNECT proxy
782 if self.realhostport: # use CONNECT proxy
760 something = _generic_proxytunnel(self)
783 something = _generic_proxytunnel(self)
761 host = self.realhostport.rsplit(':', 1)[0]
784 host = self.realhostport.rsplit(':', 1)[0]
762
785
763 cacerts = self.ui.config('web', 'cacerts')
786 cacerts = self.ui.config('web', 'cacerts')
764 hostfingerprint = self.ui.config('hostfingerprints', host)
787 hostfingerprint = self.ui.config('hostfingerprints', host)
765
788
766 if cacerts and not hostfingerprint:
789 if cacerts and not hostfingerprint:
767 cacerts = util.expandpath(cacerts)
790 cacerts = util.expandpath(cacerts)
768 if not os.path.exists(cacerts):
791 if not os.path.exists(cacerts):
769 raise util.Abort(_('could not find '
792 raise util.Abort(_('could not find '
770 'web.cacerts: %s') % cacerts)
793 'web.cacerts: %s') % cacerts)
771 self.sock = _ssl_wrap_socket(self.sock, self.key_file,
794 self.sock = _ssl_wrap_socket(self.sock, self.key_file,
772 self.cert_file, cert_reqs=CERT_REQUIRED,
795 self.cert_file, cert_reqs=CERT_REQUIRED,
773 ca_certs=cacerts)
796 ca_certs=cacerts)
774 msg = _verifycert(self.sock.getpeercert(), host)
797 msg = _verifycert(self.sock.getpeercert(), host)
775 if msg:
798 if msg:
776 raise util.Abort(_('%s certificate error: %s '
799 raise util.Abort(_('%s certificate error: %s '
777 '(use --insecure to connect '
800 '(use --insecure to connect '
778 'insecurely)') % (host, msg))
801 'insecurely)') % (host, msg))
779 self.ui.debug('%s certificate successfully verified\n' % host)
802 self.ui.debug('%s certificate successfully verified\n' % host)
780 else:
803 else:
781 self.sock = _ssl_wrap_socket(self.sock, self.key_file,
804 self.sock = _ssl_wrap_socket(self.sock, self.key_file,
782 self.cert_file)
805 self.cert_file)
783 if hasattr(self.sock, 'getpeercert'):
806 if hasattr(self.sock, 'getpeercert'):
784 peercert = self.sock.getpeercert(True)
807 peercert = self.sock.getpeercert(True)
785 peerfingerprint = util.sha1(peercert).hexdigest()
808 peerfingerprint = util.sha1(peercert).hexdigest()
786 nicefingerprint = ":".join([peerfingerprint[x:x + 2]
809 nicefingerprint = ":".join([peerfingerprint[x:x + 2]
787 for x in xrange(0, len(peerfingerprint), 2)])
810 for x in xrange(0, len(peerfingerprint), 2)])
788 if hostfingerprint:
811 if hostfingerprint:
789 if peerfingerprint.lower() != \
812 if peerfingerprint.lower() != \
790 hostfingerprint.replace(':', '').lower():
813 hostfingerprint.replace(':', '').lower():
791 raise util.Abort(_('invalid certificate for %s '
814 raise util.Abort(_('invalid certificate for %s '
792 'with fingerprint %s') %
815 'with fingerprint %s') %
793 (host, nicefingerprint))
816 (host, nicefingerprint))
794 self.ui.debug('%s certificate matched fingerprint %s\n' %
817 self.ui.debug('%s certificate matched fingerprint %s\n' %
795 (host, nicefingerprint))
818 (host, nicefingerprint))
796 else:
819 else:
797 self.ui.warn(_('warning: %s certificate '
820 self.ui.warn(_('warning: %s certificate '
798 'with fingerprint %s not verified '
821 'with fingerprint %s not verified '
799 '(check hostfingerprints or web.cacerts '
822 '(check hostfingerprints or web.cacerts '
800 'config setting)\n') %
823 'config setting)\n') %
801 (host, nicefingerprint))
824 (host, nicefingerprint))
802 else: # python 2.5 ?
825 else: # python 2.5 ?
803 if hostfingerprint:
826 if hostfingerprint:
804 raise util.Abort(_('no certificate for %s with '
827 raise util.Abort(_('no certificate for %s with '
805 'configured hostfingerprint') % host)
828 'configured hostfingerprint') % host)
806 self.ui.warn(_('warning: %s certificate not verified '
829 self.ui.warn(_('warning: %s certificate not verified '
807 '(check web.cacerts config setting)\n') %
830 '(check web.cacerts config setting)\n') %
808 host)
831 host)
809
832
810 class httpshandler(keepalive.KeepAliveHandler, urllib2.HTTPSHandler):
833 class httpshandler(keepalive.KeepAliveHandler, urllib2.HTTPSHandler):
811 def __init__(self, ui):
834 def __init__(self, ui):
812 keepalive.KeepAliveHandler.__init__(self)
835 keepalive.KeepAliveHandler.__init__(self)
813 urllib2.HTTPSHandler.__init__(self)
836 urllib2.HTTPSHandler.__init__(self)
814 self.ui = ui
837 self.ui = ui
815 self.pwmgr = passwordmgr(self.ui)
838 self.pwmgr = passwordmgr(self.ui)
816
839
817 def _start_transaction(self, h, req):
840 def _start_transaction(self, h, req):
818 _generic_start_transaction(self, h, req)
841 _generic_start_transaction(self, h, req)
819 return keepalive.KeepAliveHandler._start_transaction(self, h, req)
842 return keepalive.KeepAliveHandler._start_transaction(self, h, req)
820
843
821 def https_open(self, req):
844 def https_open(self, req):
822 res = readauthforuri(self.ui, req.get_full_url())
845 res = readauthforuri(self.ui, req.get_full_url())
823 if res:
846 if res:
824 group, auth = res
847 group, auth = res
825 self.auth = auth
848 self.auth = auth
826 self.ui.debug("using auth.%s.* for authentication\n" % group)
849 self.ui.debug("using auth.%s.* for authentication\n" % group)
827 else:
850 else:
828 self.auth = None
851 self.auth = None
829 return self.do_open(self._makeconnection, req)
852 return self.do_open(self._makeconnection, req)
830
853
831 def _makeconnection(self, host, port=None, *args, **kwargs):
854 def _makeconnection(self, host, port=None, *args, **kwargs):
832 keyfile = None
855 keyfile = None
833 certfile = None
856 certfile = None
834
857
835 if len(args) >= 1: # key_file
858 if len(args) >= 1: # key_file
836 keyfile = args[0]
859 keyfile = args[0]
837 if len(args) >= 2: # cert_file
860 if len(args) >= 2: # cert_file
838 certfile = args[1]
861 certfile = args[1]
839 args = args[2:]
862 args = args[2:]
840
863
841 # if the user has specified different key/cert files in
864 # if the user has specified different key/cert files in
842 # hgrc, we prefer these
865 # hgrc, we prefer these
843 if self.auth and 'key' in self.auth and 'cert' in self.auth:
866 if self.auth and 'key' in self.auth and 'cert' in self.auth:
844 keyfile = self.auth['key']
867 keyfile = self.auth['key']
845 certfile = self.auth['cert']
868 certfile = self.auth['cert']
846
869
847 conn = httpsconnection(host, port, keyfile, certfile, *args, **kwargs)
870 conn = httpsconnection(host, port, keyfile, certfile, *args, **kwargs)
848 conn.ui = self.ui
871 conn.ui = self.ui
849 return conn
872 return conn
850
873
851 class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler):
874 class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler):
852 def __init__(self, *args, **kwargs):
875 def __init__(self, *args, **kwargs):
853 urllib2.HTTPDigestAuthHandler.__init__(self, *args, **kwargs)
876 urllib2.HTTPDigestAuthHandler.__init__(self, *args, **kwargs)
854 self.retried_req = None
877 self.retried_req = None
855
878
856 def reset_retry_count(self):
879 def reset_retry_count(self):
857 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
880 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
858 # forever. We disable reset_retry_count completely and reset in
881 # forever. We disable reset_retry_count completely and reset in
859 # http_error_auth_reqed instead.
882 # http_error_auth_reqed instead.
860 pass
883 pass
861
884
862 def http_error_auth_reqed(self, auth_header, host, req, headers):
885 def http_error_auth_reqed(self, auth_header, host, req, headers):
863 # Reset the retry counter once for each request.
886 # Reset the retry counter once for each request.
864 if req is not self.retried_req:
887 if req is not self.retried_req:
865 self.retried_req = req
888 self.retried_req = req
866 self.retried = 0
889 self.retried = 0
867 # In python < 2.5 AbstractDigestAuthHandler raises a ValueError if
890 # In python < 2.5 AbstractDigestAuthHandler raises a ValueError if
868 # it doesn't know about the auth type requested. This can happen if
891 # it doesn't know about the auth type requested. This can happen if
869 # somebody is using BasicAuth and types a bad password.
892 # somebody is using BasicAuth and types a bad password.
870 try:
893 try:
871 return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed(
894 return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed(
872 self, auth_header, host, req, headers)
895 self, auth_header, host, req, headers)
873 except ValueError, inst:
896 except ValueError, inst:
874 arg = inst.args[0]
897 arg = inst.args[0]
875 if arg.startswith("AbstractDigestAuthHandler doesn't know "):
898 if arg.startswith("AbstractDigestAuthHandler doesn't know "):
876 return
899 return
877 raise
900 raise
878
901
879 class httpbasicauthhandler(urllib2.HTTPBasicAuthHandler):
902 class httpbasicauthhandler(urllib2.HTTPBasicAuthHandler):
880 def __init__(self, *args, **kwargs):
903 def __init__(self, *args, **kwargs):
881 urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs)
904 urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs)
882 self.retried_req = None
905 self.retried_req = None
883
906
884 def reset_retry_count(self):
907 def reset_retry_count(self):
885 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
908 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
886 # forever. We disable reset_retry_count completely and reset in
909 # forever. We disable reset_retry_count completely and reset in
887 # http_error_auth_reqed instead.
910 # http_error_auth_reqed instead.
888 pass
911 pass
889
912
890 def http_error_auth_reqed(self, auth_header, host, req, headers):
913 def http_error_auth_reqed(self, auth_header, host, req, headers):
891 # Reset the retry counter once for each request.
914 # Reset the retry counter once for each request.
892 if req is not self.retried_req:
915 if req is not self.retried_req:
893 self.retried_req = req
916 self.retried_req = req
894 self.retried = 0
917 self.retried = 0
895 return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed(
918 return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed(
896 self, auth_header, host, req, headers)
919 self, auth_header, host, req, headers)
897
920
898 handlerfuncs = []
921 handlerfuncs = []
899
922
900 def opener(ui, authinfo=None):
923 def opener(ui, authinfo=None):
901 '''
924 '''
902 construct an opener suitable for urllib2
925 construct an opener suitable for urllib2
903 authinfo will be added to the password manager
926 authinfo will be added to the password manager
904 '''
927 '''
905 handlers = [httphandler()]
928 handlers = [httphandler()]
906 if has_https:
929 if has_https:
907 handlers.append(httpshandler(ui))
930 handlers.append(httpshandler(ui))
908
931
909 handlers.append(proxyhandler(ui))
932 handlers.append(proxyhandler(ui))
910
933
911 passmgr = passwordmgr(ui)
934 passmgr = passwordmgr(ui)
912 if authinfo is not None:
935 if authinfo is not None:
913 passmgr.add_password(*authinfo)
936 passmgr.add_password(*authinfo)
914 user, passwd = authinfo[2:4]
937 user, passwd = authinfo[2:4]
915 ui.debug('http auth: user %s, password %s\n' %
938 ui.debug('http auth: user %s, password %s\n' %
916 (user, passwd and '*' * len(passwd) or 'not set'))
939 (user, passwd and '*' * len(passwd) or 'not set'))
917
940
918 handlers.extend((httpbasicauthhandler(passmgr),
941 handlers.extend((httpbasicauthhandler(passmgr),
919 httpdigestauthhandler(passmgr)))
942 httpdigestauthhandler(passmgr)))
920 handlers.extend([h(ui, passmgr) for h in handlerfuncs])
943 handlers.extend([h(ui, passmgr) for h in handlerfuncs])
921 opener = urllib2.build_opener(*handlers)
944 opener = urllib2.build_opener(*handlers)
922
945
923 # 1.0 here is the _protocol_ version
946 # 1.0 here is the _protocol_ version
924 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
947 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
925 opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
948 opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
926 return opener
949 return opener
927
950
928 def open(ui, url_, data=None):
951 def open(ui, url_, data=None):
929 u = url(url_)
952 u = url(url_)
930 if u.scheme:
953 if u.scheme:
931 u.scheme = u.scheme.lower()
954 u.scheme = u.scheme.lower()
932 url_, authinfo = u.authinfo()
955 url_, authinfo = u.authinfo()
933 else:
956 else:
934 path = util.normpath(os.path.abspath(url_))
957 path = util.normpath(os.path.abspath(url_))
935 url_ = 'file://' + urllib.pathname2url(path)
958 url_ = 'file://' + urllib.pathname2url(path)
936 authinfo = None
959 authinfo = None
937 return opener(ui, authinfo).open(url_, data)
960 return opener(ui, authinfo).open(url_, data)
@@ -1,1583 +1,1563 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import errno, re, shutil, sys, tempfile, traceback
18 import errno, re, shutil, sys, tempfile, traceback
19 import os, stat, time, calendar, textwrap, unicodedata, signal
19 import os, stat, time, calendar, textwrap, unicodedata, signal
20 import imp, socket
20 import imp, socket
21
21
22 # Python compatibility
22 # Python compatibility
23
23
24 def sha1(s):
24 def sha1(s):
25 return _fastsha1(s)
25 return _fastsha1(s)
26
26
27 def _fastsha1(s):
27 def _fastsha1(s):
28 # This function will import sha1 from hashlib or sha (whichever is
28 # This function will import sha1 from hashlib or sha (whichever is
29 # available) and overwrite itself with it on the first call.
29 # available) and overwrite itself with it on the first call.
30 # Subsequent calls will go directly to the imported function.
30 # Subsequent calls will go directly to the imported function.
31 if sys.version_info >= (2, 5):
31 if sys.version_info >= (2, 5):
32 from hashlib import sha1 as _sha1
32 from hashlib import sha1 as _sha1
33 else:
33 else:
34 from sha import sha as _sha1
34 from sha import sha as _sha1
35 global _fastsha1, sha1
35 global _fastsha1, sha1
36 _fastsha1 = sha1 = _sha1
36 _fastsha1 = sha1 = _sha1
37 return _sha1(s)
37 return _sha1(s)
38
38
39 import __builtin__
39 import __builtin__
40
40
41 if sys.version_info[0] < 3:
41 if sys.version_info[0] < 3:
42 def fakebuffer(sliceable, offset=0):
42 def fakebuffer(sliceable, offset=0):
43 return sliceable[offset:]
43 return sliceable[offset:]
44 else:
44 else:
45 def fakebuffer(sliceable, offset=0):
45 def fakebuffer(sliceable, offset=0):
46 return memoryview(sliceable)[offset:]
46 return memoryview(sliceable)[offset:]
47 try:
47 try:
48 buffer
48 buffer
49 except NameError:
49 except NameError:
50 __builtin__.buffer = fakebuffer
50 __builtin__.buffer = fakebuffer
51
51
52 import subprocess
52 import subprocess
53 closefds = os.name == 'posix'
53 closefds = os.name == 'posix'
54
54
55 def popen2(cmd, env=None, newlines=False):
55 def popen2(cmd, env=None, newlines=False):
56 # Setting bufsize to -1 lets the system decide the buffer size.
56 # Setting bufsize to -1 lets the system decide the buffer size.
57 # The default for bufsize is 0, meaning unbuffered. This leads to
57 # The default for bufsize is 0, meaning unbuffered. This leads to
58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
60 close_fds=closefds,
60 close_fds=closefds,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
62 universal_newlines=newlines,
62 universal_newlines=newlines,
63 env=env)
63 env=env)
64 return p.stdin, p.stdout
64 return p.stdin, p.stdout
65
65
66 def popen3(cmd, env=None, newlines=False):
66 def popen3(cmd, env=None, newlines=False):
67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
68 close_fds=closefds,
68 close_fds=closefds,
69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
70 stderr=subprocess.PIPE,
70 stderr=subprocess.PIPE,
71 universal_newlines=newlines,
71 universal_newlines=newlines,
72 env=env)
72 env=env)
73 return p.stdin, p.stdout, p.stderr
73 return p.stdin, p.stdout, p.stderr
74
74
75 def version():
75 def version():
76 """Return version information if available."""
76 """Return version information if available."""
77 try:
77 try:
78 import __version__
78 import __version__
79 return __version__.version
79 return __version__.version
80 except ImportError:
80 except ImportError:
81 return 'unknown'
81 return 'unknown'
82
82
83 # used by parsedate
83 # used by parsedate
84 defaultdateformats = (
84 defaultdateformats = (
85 '%Y-%m-%d %H:%M:%S',
85 '%Y-%m-%d %H:%M:%S',
86 '%Y-%m-%d %I:%M:%S%p',
86 '%Y-%m-%d %I:%M:%S%p',
87 '%Y-%m-%d %H:%M',
87 '%Y-%m-%d %H:%M',
88 '%Y-%m-%d %I:%M%p',
88 '%Y-%m-%d %I:%M%p',
89 '%Y-%m-%d',
89 '%Y-%m-%d',
90 '%m-%d',
90 '%m-%d',
91 '%m/%d',
91 '%m/%d',
92 '%m/%d/%y',
92 '%m/%d/%y',
93 '%m/%d/%Y',
93 '%m/%d/%Y',
94 '%a %b %d %H:%M:%S %Y',
94 '%a %b %d %H:%M:%S %Y',
95 '%a %b %d %I:%M:%S%p %Y',
95 '%a %b %d %I:%M:%S%p %Y',
96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
97 '%b %d %H:%M:%S %Y',
97 '%b %d %H:%M:%S %Y',
98 '%b %d %I:%M:%S%p %Y',
98 '%b %d %I:%M:%S%p %Y',
99 '%b %d %H:%M:%S',
99 '%b %d %H:%M:%S',
100 '%b %d %I:%M:%S%p',
100 '%b %d %I:%M:%S%p',
101 '%b %d %H:%M',
101 '%b %d %H:%M',
102 '%b %d %I:%M%p',
102 '%b %d %I:%M%p',
103 '%b %d %Y',
103 '%b %d %Y',
104 '%b %d',
104 '%b %d',
105 '%H:%M:%S',
105 '%H:%M:%S',
106 '%I:%M:%S%p',
106 '%I:%M:%S%p',
107 '%H:%M',
107 '%H:%M',
108 '%I:%M%p',
108 '%I:%M%p',
109 )
109 )
110
110
111 extendeddateformats = defaultdateformats + (
111 extendeddateformats = defaultdateformats + (
112 "%Y",
112 "%Y",
113 "%Y-%m",
113 "%Y-%m",
114 "%b",
114 "%b",
115 "%b %Y",
115 "%b %Y",
116 )
116 )
117
117
118 def cachefunc(func):
118 def cachefunc(func):
119 '''cache the result of function calls'''
119 '''cache the result of function calls'''
120 # XXX doesn't handle keywords args
120 # XXX doesn't handle keywords args
121 cache = {}
121 cache = {}
122 if func.func_code.co_argcount == 1:
122 if func.func_code.co_argcount == 1:
123 # we gain a small amount of time because
123 # we gain a small amount of time because
124 # we don't need to pack/unpack the list
124 # we don't need to pack/unpack the list
125 def f(arg):
125 def f(arg):
126 if arg not in cache:
126 if arg not in cache:
127 cache[arg] = func(arg)
127 cache[arg] = func(arg)
128 return cache[arg]
128 return cache[arg]
129 else:
129 else:
130 def f(*args):
130 def f(*args):
131 if args not in cache:
131 if args not in cache:
132 cache[args] = func(*args)
132 cache[args] = func(*args)
133 return cache[args]
133 return cache[args]
134
134
135 return f
135 return f
136
136
137 def lrucachefunc(func):
137 def lrucachefunc(func):
138 '''cache most recent results of function calls'''
138 '''cache most recent results of function calls'''
139 cache = {}
139 cache = {}
140 order = []
140 order = []
141 if func.func_code.co_argcount == 1:
141 if func.func_code.co_argcount == 1:
142 def f(arg):
142 def f(arg):
143 if arg not in cache:
143 if arg not in cache:
144 if len(cache) > 20:
144 if len(cache) > 20:
145 del cache[order.pop(0)]
145 del cache[order.pop(0)]
146 cache[arg] = func(arg)
146 cache[arg] = func(arg)
147 else:
147 else:
148 order.remove(arg)
148 order.remove(arg)
149 order.append(arg)
149 order.append(arg)
150 return cache[arg]
150 return cache[arg]
151 else:
151 else:
152 def f(*args):
152 def f(*args):
153 if args not in cache:
153 if args not in cache:
154 if len(cache) > 20:
154 if len(cache) > 20:
155 del cache[order.pop(0)]
155 del cache[order.pop(0)]
156 cache[args] = func(*args)
156 cache[args] = func(*args)
157 else:
157 else:
158 order.remove(args)
158 order.remove(args)
159 order.append(args)
159 order.append(args)
160 return cache[args]
160 return cache[args]
161
161
162 return f
162 return f
163
163
164 class propertycache(object):
164 class propertycache(object):
165 def __init__(self, func):
165 def __init__(self, func):
166 self.func = func
166 self.func = func
167 self.name = func.__name__
167 self.name = func.__name__
168 def __get__(self, obj, type=None):
168 def __get__(self, obj, type=None):
169 result = self.func(obj)
169 result = self.func(obj)
170 setattr(obj, self.name, result)
170 setattr(obj, self.name, result)
171 return result
171 return result
172
172
173 def pipefilter(s, cmd):
173 def pipefilter(s, cmd):
174 '''filter string S through command CMD, returning its output'''
174 '''filter string S through command CMD, returning its output'''
175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
177 pout, perr = p.communicate(s)
177 pout, perr = p.communicate(s)
178 return pout
178 return pout
179
179
180 def tempfilter(s, cmd):
180 def tempfilter(s, cmd):
181 '''filter string S through a pair of temporary files with CMD.
181 '''filter string S through a pair of temporary files with CMD.
182 CMD is used as a template to create the real command to be run,
182 CMD is used as a template to create the real command to be run,
183 with the strings INFILE and OUTFILE replaced by the real names of
183 with the strings INFILE and OUTFILE replaced by the real names of
184 the temporary files generated.'''
184 the temporary files generated.'''
185 inname, outname = None, None
185 inname, outname = None, None
186 try:
186 try:
187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
188 fp = os.fdopen(infd, 'wb')
188 fp = os.fdopen(infd, 'wb')
189 fp.write(s)
189 fp.write(s)
190 fp.close()
190 fp.close()
191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
192 os.close(outfd)
192 os.close(outfd)
193 cmd = cmd.replace('INFILE', inname)
193 cmd = cmd.replace('INFILE', inname)
194 cmd = cmd.replace('OUTFILE', outname)
194 cmd = cmd.replace('OUTFILE', outname)
195 code = os.system(cmd)
195 code = os.system(cmd)
196 if sys.platform == 'OpenVMS' and code & 1:
196 if sys.platform == 'OpenVMS' and code & 1:
197 code = 0
197 code = 0
198 if code:
198 if code:
199 raise Abort(_("command '%s' failed: %s") %
199 raise Abort(_("command '%s' failed: %s") %
200 (cmd, explain_exit(code)))
200 (cmd, explain_exit(code)))
201 fp = open(outname, 'rb')
201 fp = open(outname, 'rb')
202 r = fp.read()
202 r = fp.read()
203 fp.close()
203 fp.close()
204 return r
204 return r
205 finally:
205 finally:
206 try:
206 try:
207 if inname:
207 if inname:
208 os.unlink(inname)
208 os.unlink(inname)
209 except:
209 except:
210 pass
210 pass
211 try:
211 try:
212 if outname:
212 if outname:
213 os.unlink(outname)
213 os.unlink(outname)
214 except:
214 except:
215 pass
215 pass
216
216
217 filtertable = {
217 filtertable = {
218 'tempfile:': tempfilter,
218 'tempfile:': tempfilter,
219 'pipe:': pipefilter,
219 'pipe:': pipefilter,
220 }
220 }
221
221
222 def filter(s, cmd):
222 def filter(s, cmd):
223 "filter a string through a command that transforms its input to its output"
223 "filter a string through a command that transforms its input to its output"
224 for name, fn in filtertable.iteritems():
224 for name, fn in filtertable.iteritems():
225 if cmd.startswith(name):
225 if cmd.startswith(name):
226 return fn(s, cmd[len(name):].lstrip())
226 return fn(s, cmd[len(name):].lstrip())
227 return pipefilter(s, cmd)
227 return pipefilter(s, cmd)
228
228
229 def binary(s):
229 def binary(s):
230 """return true if a string is binary data"""
230 """return true if a string is binary data"""
231 return bool(s and '\0' in s)
231 return bool(s and '\0' in s)
232
232
233 def increasingchunks(source, min=1024, max=65536):
233 def increasingchunks(source, min=1024, max=65536):
234 '''return no less than min bytes per chunk while data remains,
234 '''return no less than min bytes per chunk while data remains,
235 doubling min after each chunk until it reaches max'''
235 doubling min after each chunk until it reaches max'''
236 def log2(x):
236 def log2(x):
237 if not x:
237 if not x:
238 return 0
238 return 0
239 i = 0
239 i = 0
240 while x:
240 while x:
241 x >>= 1
241 x >>= 1
242 i += 1
242 i += 1
243 return i - 1
243 return i - 1
244
244
245 buf = []
245 buf = []
246 blen = 0
246 blen = 0
247 for chunk in source:
247 for chunk in source:
248 buf.append(chunk)
248 buf.append(chunk)
249 blen += len(chunk)
249 blen += len(chunk)
250 if blen >= min:
250 if blen >= min:
251 if min < max:
251 if min < max:
252 min = min << 1
252 min = min << 1
253 nmin = 1 << log2(blen)
253 nmin = 1 << log2(blen)
254 if nmin > min:
254 if nmin > min:
255 min = nmin
255 min = nmin
256 if min > max:
256 if min > max:
257 min = max
257 min = max
258 yield ''.join(buf)
258 yield ''.join(buf)
259 blen = 0
259 blen = 0
260 buf = []
260 buf = []
261 if buf:
261 if buf:
262 yield ''.join(buf)
262 yield ''.join(buf)
263
263
264 Abort = error.Abort
264 Abort = error.Abort
265
265
266 def always(fn):
266 def always(fn):
267 return True
267 return True
268
268
269 def never(fn):
269 def never(fn):
270 return False
270 return False
271
271
272 def pathto(root, n1, n2):
272 def pathto(root, n1, n2):
273 '''return the relative path from one place to another.
273 '''return the relative path from one place to another.
274 root should use os.sep to separate directories
274 root should use os.sep to separate directories
275 n1 should use os.sep to separate directories
275 n1 should use os.sep to separate directories
276 n2 should use "/" to separate directories
276 n2 should use "/" to separate directories
277 returns an os.sep-separated path.
277 returns an os.sep-separated path.
278
278
279 If n1 is a relative path, it's assumed it's
279 If n1 is a relative path, it's assumed it's
280 relative to root.
280 relative to root.
281 n2 should always be relative to root.
281 n2 should always be relative to root.
282 '''
282 '''
283 if not n1:
283 if not n1:
284 return localpath(n2)
284 return localpath(n2)
285 if os.path.isabs(n1):
285 if os.path.isabs(n1):
286 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
286 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
287 return os.path.join(root, localpath(n2))
287 return os.path.join(root, localpath(n2))
288 n2 = '/'.join((pconvert(root), n2))
288 n2 = '/'.join((pconvert(root), n2))
289 a, b = splitpath(n1), n2.split('/')
289 a, b = splitpath(n1), n2.split('/')
290 a.reverse()
290 a.reverse()
291 b.reverse()
291 b.reverse()
292 while a and b and a[-1] == b[-1]:
292 while a and b and a[-1] == b[-1]:
293 a.pop()
293 a.pop()
294 b.pop()
294 b.pop()
295 b.reverse()
295 b.reverse()
296 return os.sep.join((['..'] * len(a)) + b) or '.'
296 return os.sep.join((['..'] * len(a)) + b) or '.'
297
297
298 def canonpath(root, cwd, myname, auditor=None):
298 def canonpath(root, cwd, myname, auditor=None):
299 """return the canonical path of myname, given cwd and root"""
299 """return the canonical path of myname, given cwd and root"""
300 if endswithsep(root):
300 if endswithsep(root):
301 rootsep = root
301 rootsep = root
302 else:
302 else:
303 rootsep = root + os.sep
303 rootsep = root + os.sep
304 name = myname
304 name = myname
305 if not os.path.isabs(name):
305 if not os.path.isabs(name):
306 name = os.path.join(root, cwd, name)
306 name = os.path.join(root, cwd, name)
307 name = os.path.normpath(name)
307 name = os.path.normpath(name)
308 if auditor is None:
308 if auditor is None:
309 auditor = path_auditor(root)
309 auditor = path_auditor(root)
310 if name != rootsep and name.startswith(rootsep):
310 if name != rootsep and name.startswith(rootsep):
311 name = name[len(rootsep):]
311 name = name[len(rootsep):]
312 auditor(name)
312 auditor(name)
313 return pconvert(name)
313 return pconvert(name)
314 elif name == root:
314 elif name == root:
315 return ''
315 return ''
316 else:
316 else:
317 # Determine whether `name' is in the hierarchy at or beneath `root',
317 # Determine whether `name' is in the hierarchy at or beneath `root',
318 # by iterating name=dirname(name) until that causes no change (can't
318 # by iterating name=dirname(name) until that causes no change (can't
319 # check name == '/', because that doesn't work on windows). For each
319 # check name == '/', because that doesn't work on windows). For each
320 # `name', compare dev/inode numbers. If they match, the list `rel'
320 # `name', compare dev/inode numbers. If they match, the list `rel'
321 # holds the reversed list of components making up the relative file
321 # holds the reversed list of components making up the relative file
322 # name we want.
322 # name we want.
323 root_st = os.stat(root)
323 root_st = os.stat(root)
324 rel = []
324 rel = []
325 while True:
325 while True:
326 try:
326 try:
327 name_st = os.stat(name)
327 name_st = os.stat(name)
328 except OSError:
328 except OSError:
329 break
329 break
330 if samestat(name_st, root_st):
330 if samestat(name_st, root_st):
331 if not rel:
331 if not rel:
332 # name was actually the same as root (maybe a symlink)
332 # name was actually the same as root (maybe a symlink)
333 return ''
333 return ''
334 rel.reverse()
334 rel.reverse()
335 name = os.path.join(*rel)
335 name = os.path.join(*rel)
336 auditor(name)
336 auditor(name)
337 return pconvert(name)
337 return pconvert(name)
338 dirname, basename = os.path.split(name)
338 dirname, basename = os.path.split(name)
339 rel.append(basename)
339 rel.append(basename)
340 if dirname == name:
340 if dirname == name:
341 break
341 break
342 name = dirname
342 name = dirname
343
343
344 raise Abort('%s not under root' % myname)
344 raise Abort('%s not under root' % myname)
345
345
346 _hgexecutable = None
346 _hgexecutable = None
347
347
348 def main_is_frozen():
348 def main_is_frozen():
349 """return True if we are a frozen executable.
349 """return True if we are a frozen executable.
350
350
351 The code supports py2exe (most common, Windows only) and tools/freeze
351 The code supports py2exe (most common, Windows only) and tools/freeze
352 (portable, not much used).
352 (portable, not much used).
353 """
353 """
354 return (hasattr(sys, "frozen") or # new py2exe
354 return (hasattr(sys, "frozen") or # new py2exe
355 hasattr(sys, "importers") or # old py2exe
355 hasattr(sys, "importers") or # old py2exe
356 imp.is_frozen("__main__")) # tools/freeze
356 imp.is_frozen("__main__")) # tools/freeze
357
357
358 def hgexecutable():
358 def hgexecutable():
359 """return location of the 'hg' executable.
359 """return location of the 'hg' executable.
360
360
361 Defaults to $HG or 'hg' in the search path.
361 Defaults to $HG or 'hg' in the search path.
362 """
362 """
363 if _hgexecutable is None:
363 if _hgexecutable is None:
364 hg = os.environ.get('HG')
364 hg = os.environ.get('HG')
365 if hg:
365 if hg:
366 set_hgexecutable(hg)
366 set_hgexecutable(hg)
367 elif main_is_frozen():
367 elif main_is_frozen():
368 set_hgexecutable(sys.executable)
368 set_hgexecutable(sys.executable)
369 else:
369 else:
370 exe = find_exe('hg') or os.path.basename(sys.argv[0])
370 exe = find_exe('hg') or os.path.basename(sys.argv[0])
371 set_hgexecutable(exe)
371 set_hgexecutable(exe)
372 return _hgexecutable
372 return _hgexecutable
373
373
374 def set_hgexecutable(path):
374 def set_hgexecutable(path):
375 """set location of the 'hg' executable"""
375 """set location of the 'hg' executable"""
376 global _hgexecutable
376 global _hgexecutable
377 _hgexecutable = path
377 _hgexecutable = path
378
378
379 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
379 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
380 '''enhanced shell command execution.
380 '''enhanced shell command execution.
381 run with environment maybe modified, maybe in different dir.
381 run with environment maybe modified, maybe in different dir.
382
382
383 if command fails and onerr is None, return status. if ui object,
383 if command fails and onerr is None, return status. if ui object,
384 print error message and return status, else raise onerr object as
384 print error message and return status, else raise onerr object as
385 exception.
385 exception.
386
386
387 if out is specified, it is assumed to be a file-like object that has a
387 if out is specified, it is assumed to be a file-like object that has a
388 write() method. stdout and stderr will be redirected to out.'''
388 write() method. stdout and stderr will be redirected to out.'''
389 try:
389 try:
390 sys.stdout.flush()
390 sys.stdout.flush()
391 except Exception:
391 except Exception:
392 pass
392 pass
393 def py2shell(val):
393 def py2shell(val):
394 'convert python object into string that is useful to shell'
394 'convert python object into string that is useful to shell'
395 if val is None or val is False:
395 if val is None or val is False:
396 return '0'
396 return '0'
397 if val is True:
397 if val is True:
398 return '1'
398 return '1'
399 return str(val)
399 return str(val)
400 origcmd = cmd
400 origcmd = cmd
401 cmd = quotecommand(cmd)
401 cmd = quotecommand(cmd)
402 env = dict(os.environ)
402 env = dict(os.environ)
403 env.update((k, py2shell(v)) for k, v in environ.iteritems())
403 env.update((k, py2shell(v)) for k, v in environ.iteritems())
404 env['HG'] = hgexecutable()
404 env['HG'] = hgexecutable()
405 if out is None:
405 if out is None:
406 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
406 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
407 env=env, cwd=cwd)
407 env=env, cwd=cwd)
408 else:
408 else:
409 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
409 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
410 env=env, cwd=cwd, stdout=subprocess.PIPE,
410 env=env, cwd=cwd, stdout=subprocess.PIPE,
411 stderr=subprocess.STDOUT)
411 stderr=subprocess.STDOUT)
412 for line in proc.stdout:
412 for line in proc.stdout:
413 out.write(line)
413 out.write(line)
414 proc.wait()
414 proc.wait()
415 rc = proc.returncode
415 rc = proc.returncode
416 if sys.platform == 'OpenVMS' and rc & 1:
416 if sys.platform == 'OpenVMS' and rc & 1:
417 rc = 0
417 rc = 0
418 if rc and onerr:
418 if rc and onerr:
419 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
419 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
420 explain_exit(rc)[0])
420 explain_exit(rc)[0])
421 if errprefix:
421 if errprefix:
422 errmsg = '%s: %s' % (errprefix, errmsg)
422 errmsg = '%s: %s' % (errprefix, errmsg)
423 try:
423 try:
424 onerr.warn(errmsg + '\n')
424 onerr.warn(errmsg + '\n')
425 except AttributeError:
425 except AttributeError:
426 raise onerr(errmsg)
426 raise onerr(errmsg)
427 return rc
427 return rc
428
428
429 def checksignature(func):
429 def checksignature(func):
430 '''wrap a function with code to check for calling errors'''
430 '''wrap a function with code to check for calling errors'''
431 def check(*args, **kwargs):
431 def check(*args, **kwargs):
432 try:
432 try:
433 return func(*args, **kwargs)
433 return func(*args, **kwargs)
434 except TypeError:
434 except TypeError:
435 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
435 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
436 raise error.SignatureError
436 raise error.SignatureError
437 raise
437 raise
438
438
439 return check
439 return check
440
440
441 def makedir(path, notindexed):
441 def makedir(path, notindexed):
442 os.mkdir(path)
442 os.mkdir(path)
443
443
444 def unlinkpath(f):
444 def unlinkpath(f):
445 """unlink and remove the directory if it is empty"""
445 """unlink and remove the directory if it is empty"""
446 os.unlink(f)
446 os.unlink(f)
447 # try removing directories that might now be empty
447 # try removing directories that might now be empty
448 try:
448 try:
449 os.removedirs(os.path.dirname(f))
449 os.removedirs(os.path.dirname(f))
450 except OSError:
450 except OSError:
451 pass
451 pass
452
452
453 def copyfile(src, dest):
453 def copyfile(src, dest):
454 "copy a file, preserving mode and atime/mtime"
454 "copy a file, preserving mode and atime/mtime"
455 if os.path.islink(src):
455 if os.path.islink(src):
456 try:
456 try:
457 os.unlink(dest)
457 os.unlink(dest)
458 except:
458 except:
459 pass
459 pass
460 os.symlink(os.readlink(src), dest)
460 os.symlink(os.readlink(src), dest)
461 else:
461 else:
462 try:
462 try:
463 shutil.copyfile(src, dest)
463 shutil.copyfile(src, dest)
464 shutil.copymode(src, dest)
464 shutil.copymode(src, dest)
465 except shutil.Error, inst:
465 except shutil.Error, inst:
466 raise Abort(str(inst))
466 raise Abort(str(inst))
467
467
468 def copyfiles(src, dst, hardlink=None):
468 def copyfiles(src, dst, hardlink=None):
469 """Copy a directory tree using hardlinks if possible"""
469 """Copy a directory tree using hardlinks if possible"""
470
470
471 if hardlink is None:
471 if hardlink is None:
472 hardlink = (os.stat(src).st_dev ==
472 hardlink = (os.stat(src).st_dev ==
473 os.stat(os.path.dirname(dst)).st_dev)
473 os.stat(os.path.dirname(dst)).st_dev)
474
474
475 num = 0
475 num = 0
476 if os.path.isdir(src):
476 if os.path.isdir(src):
477 os.mkdir(dst)
477 os.mkdir(dst)
478 for name, kind in osutil.listdir(src):
478 for name, kind in osutil.listdir(src):
479 srcname = os.path.join(src, name)
479 srcname = os.path.join(src, name)
480 dstname = os.path.join(dst, name)
480 dstname = os.path.join(dst, name)
481 hardlink, n = copyfiles(srcname, dstname, hardlink)
481 hardlink, n = copyfiles(srcname, dstname, hardlink)
482 num += n
482 num += n
483 else:
483 else:
484 if hardlink:
484 if hardlink:
485 try:
485 try:
486 os_link(src, dst)
486 os_link(src, dst)
487 except (IOError, OSError):
487 except (IOError, OSError):
488 hardlink = False
488 hardlink = False
489 shutil.copy(src, dst)
489 shutil.copy(src, dst)
490 else:
490 else:
491 shutil.copy(src, dst)
491 shutil.copy(src, dst)
492 num += 1
492 num += 1
493
493
494 return hardlink, num
494 return hardlink, num
495
495
496 class path_auditor(object):
496 class path_auditor(object):
497 '''ensure that a filesystem path contains no banned components.
497 '''ensure that a filesystem path contains no banned components.
498 the following properties of a path are checked:
498 the following properties of a path are checked:
499
499
500 - ends with a directory separator
500 - ends with a directory separator
501 - under top-level .hg
501 - under top-level .hg
502 - starts at the root of a windows drive
502 - starts at the root of a windows drive
503 - contains ".."
503 - contains ".."
504 - traverses a symlink (e.g. a/symlink_here/b)
504 - traverses a symlink (e.g. a/symlink_here/b)
505 - inside a nested repository (a callback can be used to approve
505 - inside a nested repository (a callback can be used to approve
506 some nested repositories, e.g., subrepositories)
506 some nested repositories, e.g., subrepositories)
507 '''
507 '''
508
508
509 def __init__(self, root, callback=None):
509 def __init__(self, root, callback=None):
510 self.audited = set()
510 self.audited = set()
511 self.auditeddir = set()
511 self.auditeddir = set()
512 self.root = root
512 self.root = root
513 self.callback = callback
513 self.callback = callback
514
514
515 def __call__(self, path):
515 def __call__(self, path):
516 if path in self.audited:
516 if path in self.audited:
517 return
517 return
518 # AIX ignores "/" at end of path, others raise EISDIR.
518 # AIX ignores "/" at end of path, others raise EISDIR.
519 if endswithsep(path):
519 if endswithsep(path):
520 raise Abort(_("path ends in directory separator: %s") % path)
520 raise Abort(_("path ends in directory separator: %s") % path)
521 normpath = os.path.normcase(path)
521 normpath = os.path.normcase(path)
522 parts = splitpath(normpath)
522 parts = splitpath(normpath)
523 if (os.path.splitdrive(path)[0]
523 if (os.path.splitdrive(path)[0]
524 or parts[0].lower() in ('.hg', '.hg.', '')
524 or parts[0].lower() in ('.hg', '.hg.', '')
525 or os.pardir in parts):
525 or os.pardir in parts):
526 raise Abort(_("path contains illegal component: %s") % path)
526 raise Abort(_("path contains illegal component: %s") % path)
527 if '.hg' in path.lower():
527 if '.hg' in path.lower():
528 lparts = [p.lower() for p in parts]
528 lparts = [p.lower() for p in parts]
529 for p in '.hg', '.hg.':
529 for p in '.hg', '.hg.':
530 if p in lparts[1:]:
530 if p in lparts[1:]:
531 pos = lparts.index(p)
531 pos = lparts.index(p)
532 base = os.path.join(*parts[:pos])
532 base = os.path.join(*parts[:pos])
533 raise Abort(_('path %r is inside repo %r') % (path, base))
533 raise Abort(_('path %r is inside repo %r') % (path, base))
534 def check(prefix):
534 def check(prefix):
535 curpath = os.path.join(self.root, prefix)
535 curpath = os.path.join(self.root, prefix)
536 try:
536 try:
537 st = os.lstat(curpath)
537 st = os.lstat(curpath)
538 except OSError, err:
538 except OSError, err:
539 # EINVAL can be raised as invalid path syntax under win32.
539 # EINVAL can be raised as invalid path syntax under win32.
540 # They must be ignored for patterns can be checked too.
540 # They must be ignored for patterns can be checked too.
541 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
541 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
542 raise
542 raise
543 else:
543 else:
544 if stat.S_ISLNK(st.st_mode):
544 if stat.S_ISLNK(st.st_mode):
545 raise Abort(_('path %r traverses symbolic link %r') %
545 raise Abort(_('path %r traverses symbolic link %r') %
546 (path, prefix))
546 (path, prefix))
547 elif (stat.S_ISDIR(st.st_mode) and
547 elif (stat.S_ISDIR(st.st_mode) and
548 os.path.isdir(os.path.join(curpath, '.hg'))):
548 os.path.isdir(os.path.join(curpath, '.hg'))):
549 if not self.callback or not self.callback(curpath):
549 if not self.callback or not self.callback(curpath):
550 raise Abort(_('path %r is inside repo %r') %
550 raise Abort(_('path %r is inside repo %r') %
551 (path, prefix))
551 (path, prefix))
552 parts.pop()
552 parts.pop()
553 prefixes = []
553 prefixes = []
554 while parts:
554 while parts:
555 prefix = os.sep.join(parts)
555 prefix = os.sep.join(parts)
556 if prefix in self.auditeddir:
556 if prefix in self.auditeddir:
557 break
557 break
558 check(prefix)
558 check(prefix)
559 prefixes.append(prefix)
559 prefixes.append(prefix)
560 parts.pop()
560 parts.pop()
561
561
562 self.audited.add(path)
562 self.audited.add(path)
563 # only add prefixes to the cache after checking everything: we don't
563 # only add prefixes to the cache after checking everything: we don't
564 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
564 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
565 self.auditeddir.update(prefixes)
565 self.auditeddir.update(prefixes)
566
566
567 def lookup_reg(key, name=None, scope=None):
567 def lookup_reg(key, name=None, scope=None):
568 return None
568 return None
569
569
570 def hidewindow():
570 def hidewindow():
571 """Hide current shell window.
571 """Hide current shell window.
572
572
573 Used to hide the window opened when starting asynchronous
573 Used to hide the window opened when starting asynchronous
574 child process under Windows, unneeded on other systems.
574 child process under Windows, unneeded on other systems.
575 """
575 """
576 pass
576 pass
577
577
578 if os.name == 'nt':
578 if os.name == 'nt':
579 from windows import *
579 from windows import *
580 else:
580 else:
581 from posix import *
581 from posix import *
582
582
583 def makelock(info, pathname):
583 def makelock(info, pathname):
584 try:
584 try:
585 return os.symlink(info, pathname)
585 return os.symlink(info, pathname)
586 except OSError, why:
586 except OSError, why:
587 if why.errno == errno.EEXIST:
587 if why.errno == errno.EEXIST:
588 raise
588 raise
589 except AttributeError: # no symlink in os
589 except AttributeError: # no symlink in os
590 pass
590 pass
591
591
592 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
592 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
593 os.write(ld, info)
593 os.write(ld, info)
594 os.close(ld)
594 os.close(ld)
595
595
596 def readlock(pathname):
596 def readlock(pathname):
597 try:
597 try:
598 return os.readlink(pathname)
598 return os.readlink(pathname)
599 except OSError, why:
599 except OSError, why:
600 if why.errno not in (errno.EINVAL, errno.ENOSYS):
600 if why.errno not in (errno.EINVAL, errno.ENOSYS):
601 raise
601 raise
602 except AttributeError: # no symlink in os
602 except AttributeError: # no symlink in os
603 pass
603 pass
604 fp = posixfile(pathname)
604 fp = posixfile(pathname)
605 r = fp.read()
605 r = fp.read()
606 fp.close()
606 fp.close()
607 return r
607 return r
608
608
609 def fstat(fp):
609 def fstat(fp):
610 '''stat file object that may not have fileno method.'''
610 '''stat file object that may not have fileno method.'''
611 try:
611 try:
612 return os.fstat(fp.fileno())
612 return os.fstat(fp.fileno())
613 except AttributeError:
613 except AttributeError:
614 return os.stat(fp.name)
614 return os.stat(fp.name)
615
615
616 # File system features
616 # File system features
617
617
618 def checkcase(path):
618 def checkcase(path):
619 """
619 """
620 Check whether the given path is on a case-sensitive filesystem
620 Check whether the given path is on a case-sensitive filesystem
621
621
622 Requires a path (like /foo/.hg) ending with a foldable final
622 Requires a path (like /foo/.hg) ending with a foldable final
623 directory component.
623 directory component.
624 """
624 """
625 s1 = os.stat(path)
625 s1 = os.stat(path)
626 d, b = os.path.split(path)
626 d, b = os.path.split(path)
627 p2 = os.path.join(d, b.upper())
627 p2 = os.path.join(d, b.upper())
628 if path == p2:
628 if path == p2:
629 p2 = os.path.join(d, b.lower())
629 p2 = os.path.join(d, b.lower())
630 try:
630 try:
631 s2 = os.stat(p2)
631 s2 = os.stat(p2)
632 if s2 == s1:
632 if s2 == s1:
633 return False
633 return False
634 return True
634 return True
635 except:
635 except:
636 return True
636 return True
637
637
638 _fspathcache = {}
638 _fspathcache = {}
639 def fspath(name, root):
639 def fspath(name, root):
640 '''Get name in the case stored in the filesystem
640 '''Get name in the case stored in the filesystem
641
641
642 The name is either relative to root, or it is an absolute path starting
642 The name is either relative to root, or it is an absolute path starting
643 with root. Note that this function is unnecessary, and should not be
643 with root. Note that this function is unnecessary, and should not be
644 called, for case-sensitive filesystems (simply because it's expensive).
644 called, for case-sensitive filesystems (simply because it's expensive).
645 '''
645 '''
646 # If name is absolute, make it relative
646 # If name is absolute, make it relative
647 if name.lower().startswith(root.lower()):
647 if name.lower().startswith(root.lower()):
648 l = len(root)
648 l = len(root)
649 if name[l] == os.sep or name[l] == os.altsep:
649 if name[l] == os.sep or name[l] == os.altsep:
650 l = l + 1
650 l = l + 1
651 name = name[l:]
651 name = name[l:]
652
652
653 if not os.path.lexists(os.path.join(root, name)):
653 if not os.path.lexists(os.path.join(root, name)):
654 return None
654 return None
655
655
656 seps = os.sep
656 seps = os.sep
657 if os.altsep:
657 if os.altsep:
658 seps = seps + os.altsep
658 seps = seps + os.altsep
659 # Protect backslashes. This gets silly very quickly.
659 # Protect backslashes. This gets silly very quickly.
660 seps.replace('\\','\\\\')
660 seps.replace('\\','\\\\')
661 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
661 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
662 dir = os.path.normcase(os.path.normpath(root))
662 dir = os.path.normcase(os.path.normpath(root))
663 result = []
663 result = []
664 for part, sep in pattern.findall(name):
664 for part, sep in pattern.findall(name):
665 if sep:
665 if sep:
666 result.append(sep)
666 result.append(sep)
667 continue
667 continue
668
668
669 if dir not in _fspathcache:
669 if dir not in _fspathcache:
670 _fspathcache[dir] = os.listdir(dir)
670 _fspathcache[dir] = os.listdir(dir)
671 contents = _fspathcache[dir]
671 contents = _fspathcache[dir]
672
672
673 lpart = part.lower()
673 lpart = part.lower()
674 lenp = len(part)
674 lenp = len(part)
675 for n in contents:
675 for n in contents:
676 if lenp == len(n) and n.lower() == lpart:
676 if lenp == len(n) and n.lower() == lpart:
677 result.append(n)
677 result.append(n)
678 break
678 break
679 else:
679 else:
680 # Cannot happen, as the file exists!
680 # Cannot happen, as the file exists!
681 result.append(part)
681 result.append(part)
682 dir = os.path.join(dir, lpart)
682 dir = os.path.join(dir, lpart)
683
683
684 return ''.join(result)
684 return ''.join(result)
685
685
686 def checkexec(path):
686 def checkexec(path):
687 """
687 """
688 Check whether the given path is on a filesystem with UNIX-like exec flags
688 Check whether the given path is on a filesystem with UNIX-like exec flags
689
689
690 Requires a directory (like /foo/.hg)
690 Requires a directory (like /foo/.hg)
691 """
691 """
692
692
693 # VFAT on some Linux versions can flip mode but it doesn't persist
693 # VFAT on some Linux versions can flip mode but it doesn't persist
694 # a FS remount. Frequently we can detect it if files are created
694 # a FS remount. Frequently we can detect it if files are created
695 # with exec bit on.
695 # with exec bit on.
696
696
697 try:
697 try:
698 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
698 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
699 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
699 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
700 try:
700 try:
701 os.close(fh)
701 os.close(fh)
702 m = os.stat(fn).st_mode & 0777
702 m = os.stat(fn).st_mode & 0777
703 new_file_has_exec = m & EXECFLAGS
703 new_file_has_exec = m & EXECFLAGS
704 os.chmod(fn, m ^ EXECFLAGS)
704 os.chmod(fn, m ^ EXECFLAGS)
705 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
705 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
706 finally:
706 finally:
707 os.unlink(fn)
707 os.unlink(fn)
708 except (IOError, OSError):
708 except (IOError, OSError):
709 # we don't care, the user probably won't be able to commit anyway
709 # we don't care, the user probably won't be able to commit anyway
710 return False
710 return False
711 return not (new_file_has_exec or exec_flags_cannot_flip)
711 return not (new_file_has_exec or exec_flags_cannot_flip)
712
712
713 def checklink(path):
713 def checklink(path):
714 """check whether the given path is on a symlink-capable filesystem"""
714 """check whether the given path is on a symlink-capable filesystem"""
715 # mktemp is not racy because symlink creation will fail if the
715 # mktemp is not racy because symlink creation will fail if the
716 # file already exists
716 # file already exists
717 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
717 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
718 try:
718 try:
719 os.symlink(".", name)
719 os.symlink(".", name)
720 os.unlink(name)
720 os.unlink(name)
721 return True
721 return True
722 except (OSError, AttributeError):
722 except (OSError, AttributeError):
723 return False
723 return False
724
724
725 def checknlink(testfile):
725 def checknlink(testfile):
726 '''check whether hardlink count reporting works properly'''
726 '''check whether hardlink count reporting works properly'''
727
727
728 # testfile may be open, so we need a separate file for checking to
728 # testfile may be open, so we need a separate file for checking to
729 # work around issue2543 (or testfile may get lost on Samba shares)
729 # work around issue2543 (or testfile may get lost on Samba shares)
730 f1 = testfile + ".hgtmp1"
730 f1 = testfile + ".hgtmp1"
731 if os.path.lexists(f1):
731 if os.path.lexists(f1):
732 return False
732 return False
733 try:
733 try:
734 posixfile(f1, 'w').close()
734 posixfile(f1, 'w').close()
735 except IOError:
735 except IOError:
736 return False
736 return False
737
737
738 f2 = testfile + ".hgtmp2"
738 f2 = testfile + ".hgtmp2"
739 fd = None
739 fd = None
740 try:
740 try:
741 try:
741 try:
742 os_link(f1, f2)
742 os_link(f1, f2)
743 except OSError:
743 except OSError:
744 return False
744 return False
745
745
746 # nlinks() may behave differently for files on Windows shares if
746 # nlinks() may behave differently for files on Windows shares if
747 # the file is open.
747 # the file is open.
748 fd = posixfile(f2)
748 fd = posixfile(f2)
749 return nlinks(f2) > 1
749 return nlinks(f2) > 1
750 finally:
750 finally:
751 if fd is not None:
751 if fd is not None:
752 fd.close()
752 fd.close()
753 for f in (f1, f2):
753 for f in (f1, f2):
754 try:
754 try:
755 os.unlink(f)
755 os.unlink(f)
756 except OSError:
756 except OSError:
757 pass
757 pass
758
758
759 return False
759 return False
760
760
761 def endswithsep(path):
761 def endswithsep(path):
762 '''Check path ends with os.sep or os.altsep.'''
762 '''Check path ends with os.sep or os.altsep.'''
763 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
763 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
764
764
765 def splitpath(path):
765 def splitpath(path):
766 '''Split path by os.sep.
766 '''Split path by os.sep.
767 Note that this function does not use os.altsep because this is
767 Note that this function does not use os.altsep because this is
768 an alternative of simple "xxx.split(os.sep)".
768 an alternative of simple "xxx.split(os.sep)".
769 It is recommended to use os.path.normpath() before using this
769 It is recommended to use os.path.normpath() before using this
770 function if need.'''
770 function if need.'''
771 return path.split(os.sep)
771 return path.split(os.sep)
772
772
773 def gui():
773 def gui():
774 '''Are we running in a GUI?'''
774 '''Are we running in a GUI?'''
775 if sys.platform == 'darwin':
775 if sys.platform == 'darwin':
776 if 'SSH_CONNECTION' in os.environ:
776 if 'SSH_CONNECTION' in os.environ:
777 # handle SSH access to a box where the user is logged in
777 # handle SSH access to a box where the user is logged in
778 return False
778 return False
779 elif getattr(osutil, 'isgui', None):
779 elif getattr(osutil, 'isgui', None):
780 # check if a CoreGraphics session is available
780 # check if a CoreGraphics session is available
781 return osutil.isgui()
781 return osutil.isgui()
782 else:
782 else:
783 # pure build; use a safe default
783 # pure build; use a safe default
784 return True
784 return True
785 else:
785 else:
786 return os.name == "nt" or os.environ.get("DISPLAY")
786 return os.name == "nt" or os.environ.get("DISPLAY")
787
787
788 def mktempcopy(name, emptyok=False, createmode=None):
788 def mktempcopy(name, emptyok=False, createmode=None):
789 """Create a temporary file with the same contents from name
789 """Create a temporary file with the same contents from name
790
790
791 The permission bits are copied from the original file.
791 The permission bits are copied from the original file.
792
792
793 If the temporary file is going to be truncated immediately, you
793 If the temporary file is going to be truncated immediately, you
794 can use emptyok=True as an optimization.
794 can use emptyok=True as an optimization.
795
795
796 Returns the name of the temporary file.
796 Returns the name of the temporary file.
797 """
797 """
798 d, fn = os.path.split(name)
798 d, fn = os.path.split(name)
799 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
799 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
800 os.close(fd)
800 os.close(fd)
801 # Temporary files are created with mode 0600, which is usually not
801 # Temporary files are created with mode 0600, which is usually not
802 # what we want. If the original file already exists, just copy
802 # what we want. If the original file already exists, just copy
803 # its mode. Otherwise, manually obey umask.
803 # its mode. Otherwise, manually obey umask.
804 try:
804 try:
805 st_mode = os.lstat(name).st_mode & 0777
805 st_mode = os.lstat(name).st_mode & 0777
806 except OSError, inst:
806 except OSError, inst:
807 if inst.errno != errno.ENOENT:
807 if inst.errno != errno.ENOENT:
808 raise
808 raise
809 st_mode = createmode
809 st_mode = createmode
810 if st_mode is None:
810 if st_mode is None:
811 st_mode = ~umask
811 st_mode = ~umask
812 st_mode &= 0666
812 st_mode &= 0666
813 os.chmod(temp, st_mode)
813 os.chmod(temp, st_mode)
814 if emptyok:
814 if emptyok:
815 return temp
815 return temp
816 try:
816 try:
817 try:
817 try:
818 ifp = posixfile(name, "rb")
818 ifp = posixfile(name, "rb")
819 except IOError, inst:
819 except IOError, inst:
820 if inst.errno == errno.ENOENT:
820 if inst.errno == errno.ENOENT:
821 return temp
821 return temp
822 if not getattr(inst, 'filename', None):
822 if not getattr(inst, 'filename', None):
823 inst.filename = name
823 inst.filename = name
824 raise
824 raise
825 ofp = posixfile(temp, "wb")
825 ofp = posixfile(temp, "wb")
826 for chunk in filechunkiter(ifp):
826 for chunk in filechunkiter(ifp):
827 ofp.write(chunk)
827 ofp.write(chunk)
828 ifp.close()
828 ifp.close()
829 ofp.close()
829 ofp.close()
830 except:
830 except:
831 try: os.unlink(temp)
831 try: os.unlink(temp)
832 except: pass
832 except: pass
833 raise
833 raise
834 return temp
834 return temp
835
835
836 class atomictempfile(object):
836 class atomictempfile(object):
837 """file-like object that atomically updates a file
837 """file-like object that atomically updates a file
838
838
839 All writes will be redirected to a temporary copy of the original
839 All writes will be redirected to a temporary copy of the original
840 file. When rename is called, the copy is renamed to the original
840 file. When rename is called, the copy is renamed to the original
841 name, making the changes visible.
841 name, making the changes visible.
842 """
842 """
843 def __init__(self, name, mode='w+b', createmode=None):
843 def __init__(self, name, mode='w+b', createmode=None):
844 self.__name = name
844 self.__name = name
845 self._fp = None
845 self._fp = None
846 self.temp = mktempcopy(name, emptyok=('w' in mode),
846 self.temp = mktempcopy(name, emptyok=('w' in mode),
847 createmode=createmode)
847 createmode=createmode)
848 self._fp = posixfile(self.temp, mode)
848 self._fp = posixfile(self.temp, mode)
849
849
850 def __getattr__(self, name):
850 def __getattr__(self, name):
851 return getattr(self._fp, name)
851 return getattr(self._fp, name)
852
852
853 def rename(self):
853 def rename(self):
854 if not self._fp.closed:
854 if not self._fp.closed:
855 self._fp.close()
855 self._fp.close()
856 rename(self.temp, localpath(self.__name))
856 rename(self.temp, localpath(self.__name))
857
857
858 def close(self):
858 def close(self):
859 if not self._fp:
859 if not self._fp:
860 return
860 return
861 if not self._fp.closed:
861 if not self._fp.closed:
862 try:
862 try:
863 os.unlink(self.temp)
863 os.unlink(self.temp)
864 except: pass
864 except: pass
865 self._fp.close()
865 self._fp.close()
866
866
867 def __del__(self):
867 def __del__(self):
868 self.close()
868 self.close()
869
869
870 def makedirs(name, mode=None):
870 def makedirs(name, mode=None):
871 """recursive directory creation with parent mode inheritance"""
871 """recursive directory creation with parent mode inheritance"""
872 parent = os.path.abspath(os.path.dirname(name))
872 parent = os.path.abspath(os.path.dirname(name))
873 try:
873 try:
874 os.mkdir(name)
874 os.mkdir(name)
875 if mode is not None:
875 if mode is not None:
876 os.chmod(name, mode)
876 os.chmod(name, mode)
877 return
877 return
878 except OSError, err:
878 except OSError, err:
879 if err.errno == errno.EEXIST:
879 if err.errno == errno.EEXIST:
880 return
880 return
881 if not name or parent == name or err.errno != errno.ENOENT:
881 if not name or parent == name or err.errno != errno.ENOENT:
882 raise
882 raise
883 makedirs(parent, mode)
883 makedirs(parent, mode)
884 makedirs(name, mode)
884 makedirs(name, mode)
885
885
886 class opener(object):
886 class opener(object):
887 """Open files relative to a base directory
887 """Open files relative to a base directory
888
888
889 This class is used to hide the details of COW semantics and
889 This class is used to hide the details of COW semantics and
890 remote file access from higher level code.
890 remote file access from higher level code.
891 """
891 """
892 def __init__(self, base, audit=True):
892 def __init__(self, base, audit=True):
893 self.base = base
893 self.base = base
894 if audit:
894 if audit:
895 self.auditor = path_auditor(base)
895 self.auditor = path_auditor(base)
896 else:
896 else:
897 self.auditor = always
897 self.auditor = always
898 self.createmode = None
898 self.createmode = None
899 self._trustnlink = None
899 self._trustnlink = None
900
900
901 @propertycache
901 @propertycache
902 def _can_symlink(self):
902 def _can_symlink(self):
903 return checklink(self.base)
903 return checklink(self.base)
904
904
905 def _fixfilemode(self, name):
905 def _fixfilemode(self, name):
906 if self.createmode is None:
906 if self.createmode is None:
907 return
907 return
908 os.chmod(name, self.createmode & 0666)
908 os.chmod(name, self.createmode & 0666)
909
909
910 def __call__(self, path, mode="r", text=False, atomictemp=False):
910 def __call__(self, path, mode="r", text=False, atomictemp=False):
911 self.auditor(path)
911 self.auditor(path)
912 f = os.path.join(self.base, path)
912 f = os.path.join(self.base, path)
913
913
914 if not text and "b" not in mode:
914 if not text and "b" not in mode:
915 mode += "b" # for that other OS
915 mode += "b" # for that other OS
916
916
917 nlink = -1
917 nlink = -1
918 dirname, basename = os.path.split(f)
918 dirname, basename = os.path.split(f)
919 # If basename is empty, then the path is malformed because it points
919 # If basename is empty, then the path is malformed because it points
920 # to a directory. Let the posixfile() call below raise IOError.
920 # to a directory. Let the posixfile() call below raise IOError.
921 if basename and mode not in ('r', 'rb'):
921 if basename and mode not in ('r', 'rb'):
922 if atomictemp:
922 if atomictemp:
923 if not os.path.isdir(dirname):
923 if not os.path.isdir(dirname):
924 makedirs(dirname, self.createmode)
924 makedirs(dirname, self.createmode)
925 return atomictempfile(f, mode, self.createmode)
925 return atomictempfile(f, mode, self.createmode)
926 try:
926 try:
927 if 'w' in mode:
927 if 'w' in mode:
928 unlink(f)
928 unlink(f)
929 nlink = 0
929 nlink = 0
930 else:
930 else:
931 # nlinks() may behave differently for files on Windows
931 # nlinks() may behave differently for files on Windows
932 # shares if the file is open.
932 # shares if the file is open.
933 fd = posixfile(f)
933 fd = posixfile(f)
934 nlink = nlinks(f)
934 nlink = nlinks(f)
935 if nlink < 1:
935 if nlink < 1:
936 nlink = 2 # force mktempcopy (issue1922)
936 nlink = 2 # force mktempcopy (issue1922)
937 fd.close()
937 fd.close()
938 except (OSError, IOError), e:
938 except (OSError, IOError), e:
939 if e.errno != errno.ENOENT:
939 if e.errno != errno.ENOENT:
940 raise
940 raise
941 nlink = 0
941 nlink = 0
942 if not os.path.isdir(dirname):
942 if not os.path.isdir(dirname):
943 makedirs(dirname, self.createmode)
943 makedirs(dirname, self.createmode)
944 if nlink > 0:
944 if nlink > 0:
945 if self._trustnlink is None:
945 if self._trustnlink is None:
946 self._trustnlink = nlink > 1 or checknlink(f)
946 self._trustnlink = nlink > 1 or checknlink(f)
947 if nlink > 1 or not self._trustnlink:
947 if nlink > 1 or not self._trustnlink:
948 rename(mktempcopy(f), f)
948 rename(mktempcopy(f), f)
949 fp = posixfile(f, mode)
949 fp = posixfile(f, mode)
950 if nlink == 0:
950 if nlink == 0:
951 self._fixfilemode(f)
951 self._fixfilemode(f)
952 return fp
952 return fp
953
953
954 def symlink(self, src, dst):
954 def symlink(self, src, dst):
955 self.auditor(dst)
955 self.auditor(dst)
956 linkname = os.path.join(self.base, dst)
956 linkname = os.path.join(self.base, dst)
957 try:
957 try:
958 os.unlink(linkname)
958 os.unlink(linkname)
959 except OSError:
959 except OSError:
960 pass
960 pass
961
961
962 dirname = os.path.dirname(linkname)
962 dirname = os.path.dirname(linkname)
963 if not os.path.exists(dirname):
963 if not os.path.exists(dirname):
964 makedirs(dirname, self.createmode)
964 makedirs(dirname, self.createmode)
965
965
966 if self._can_symlink:
966 if self._can_symlink:
967 try:
967 try:
968 os.symlink(src, linkname)
968 os.symlink(src, linkname)
969 except OSError, err:
969 except OSError, err:
970 raise OSError(err.errno, _('could not symlink to %r: %s') %
970 raise OSError(err.errno, _('could not symlink to %r: %s') %
971 (src, err.strerror), linkname)
971 (src, err.strerror), linkname)
972 else:
972 else:
973 f = self(dst, "w")
973 f = self(dst, "w")
974 f.write(src)
974 f.write(src)
975 f.close()
975 f.close()
976 self._fixfilemode(dst)
976 self._fixfilemode(dst)
977
977
978 class chunkbuffer(object):
978 class chunkbuffer(object):
979 """Allow arbitrary sized chunks of data to be efficiently read from an
979 """Allow arbitrary sized chunks of data to be efficiently read from an
980 iterator over chunks of arbitrary size."""
980 iterator over chunks of arbitrary size."""
981
981
982 def __init__(self, in_iter):
982 def __init__(self, in_iter):
983 """in_iter is the iterator that's iterating over the input chunks.
983 """in_iter is the iterator that's iterating over the input chunks.
984 targetsize is how big a buffer to try to maintain."""
984 targetsize is how big a buffer to try to maintain."""
985 def splitbig(chunks):
985 def splitbig(chunks):
986 for chunk in chunks:
986 for chunk in chunks:
987 if len(chunk) > 2**20:
987 if len(chunk) > 2**20:
988 pos = 0
988 pos = 0
989 while pos < len(chunk):
989 while pos < len(chunk):
990 end = pos + 2 ** 18
990 end = pos + 2 ** 18
991 yield chunk[pos:end]
991 yield chunk[pos:end]
992 pos = end
992 pos = end
993 else:
993 else:
994 yield chunk
994 yield chunk
995 self.iter = splitbig(in_iter)
995 self.iter = splitbig(in_iter)
996 self._queue = []
996 self._queue = []
997
997
998 def read(self, l):
998 def read(self, l):
999 """Read L bytes of data from the iterator of chunks of data.
999 """Read L bytes of data from the iterator of chunks of data.
1000 Returns less than L bytes if the iterator runs dry."""
1000 Returns less than L bytes if the iterator runs dry."""
1001 left = l
1001 left = l
1002 buf = ''
1002 buf = ''
1003 queue = self._queue
1003 queue = self._queue
1004 while left > 0:
1004 while left > 0:
1005 # refill the queue
1005 # refill the queue
1006 if not queue:
1006 if not queue:
1007 target = 2**18
1007 target = 2**18
1008 for chunk in self.iter:
1008 for chunk in self.iter:
1009 queue.append(chunk)
1009 queue.append(chunk)
1010 target -= len(chunk)
1010 target -= len(chunk)
1011 if target <= 0:
1011 if target <= 0:
1012 break
1012 break
1013 if not queue:
1013 if not queue:
1014 break
1014 break
1015
1015
1016 chunk = queue.pop(0)
1016 chunk = queue.pop(0)
1017 left -= len(chunk)
1017 left -= len(chunk)
1018 if left < 0:
1018 if left < 0:
1019 queue.insert(0, chunk[left:])
1019 queue.insert(0, chunk[left:])
1020 buf += chunk[:left]
1020 buf += chunk[:left]
1021 else:
1021 else:
1022 buf += chunk
1022 buf += chunk
1023
1023
1024 return buf
1024 return buf
1025
1025
1026 def filechunkiter(f, size=65536, limit=None):
1026 def filechunkiter(f, size=65536, limit=None):
1027 """Create a generator that produces the data in the file size
1027 """Create a generator that produces the data in the file size
1028 (default 65536) bytes at a time, up to optional limit (default is
1028 (default 65536) bytes at a time, up to optional limit (default is
1029 to read all data). Chunks may be less than size bytes if the
1029 to read all data). Chunks may be less than size bytes if the
1030 chunk is the last chunk in the file, or the file is a socket or
1030 chunk is the last chunk in the file, or the file is a socket or
1031 some other type of file that sometimes reads less data than is
1031 some other type of file that sometimes reads less data than is
1032 requested."""
1032 requested."""
1033 assert size >= 0
1033 assert size >= 0
1034 assert limit is None or limit >= 0
1034 assert limit is None or limit >= 0
1035 while True:
1035 while True:
1036 if limit is None:
1036 if limit is None:
1037 nbytes = size
1037 nbytes = size
1038 else:
1038 else:
1039 nbytes = min(limit, size)
1039 nbytes = min(limit, size)
1040 s = nbytes and f.read(nbytes)
1040 s = nbytes and f.read(nbytes)
1041 if not s:
1041 if not s:
1042 break
1042 break
1043 if limit:
1043 if limit:
1044 limit -= len(s)
1044 limit -= len(s)
1045 yield s
1045 yield s
1046
1046
1047 def makedate():
1047 def makedate():
1048 lt = time.localtime()
1048 lt = time.localtime()
1049 if lt[8] == 1 and time.daylight:
1049 if lt[8] == 1 and time.daylight:
1050 tz = time.altzone
1050 tz = time.altzone
1051 else:
1051 else:
1052 tz = time.timezone
1052 tz = time.timezone
1053 t = time.mktime(lt)
1053 t = time.mktime(lt)
1054 if t < 0:
1054 if t < 0:
1055 hint = _("check your clock")
1055 hint = _("check your clock")
1056 raise Abort(_("negative timestamp: %d") % t, hint=hint)
1056 raise Abort(_("negative timestamp: %d") % t, hint=hint)
1057 return t, tz
1057 return t, tz
1058
1058
1059 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1059 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1060 """represent a (unixtime, offset) tuple as a localized time.
1060 """represent a (unixtime, offset) tuple as a localized time.
1061 unixtime is seconds since the epoch, and offset is the time zone's
1061 unixtime is seconds since the epoch, and offset is the time zone's
1062 number of seconds away from UTC. if timezone is false, do not
1062 number of seconds away from UTC. if timezone is false, do not
1063 append time zone to string."""
1063 append time zone to string."""
1064 t, tz = date or makedate()
1064 t, tz = date or makedate()
1065 if t < 0:
1065 if t < 0:
1066 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1066 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1067 tz = 0
1067 tz = 0
1068 if "%1" in format or "%2" in format:
1068 if "%1" in format or "%2" in format:
1069 sign = (tz > 0) and "-" or "+"
1069 sign = (tz > 0) and "-" or "+"
1070 minutes = abs(tz) // 60
1070 minutes = abs(tz) // 60
1071 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1071 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1072 format = format.replace("%2", "%02d" % (minutes % 60))
1072 format = format.replace("%2", "%02d" % (minutes % 60))
1073 s = time.strftime(format, time.gmtime(float(t) - tz))
1073 s = time.strftime(format, time.gmtime(float(t) - tz))
1074 return s
1074 return s
1075
1075
1076 def shortdate(date=None):
1076 def shortdate(date=None):
1077 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1077 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1078 return datestr(date, format='%Y-%m-%d')
1078 return datestr(date, format='%Y-%m-%d')
1079
1079
1080 def strdate(string, format, defaults=[]):
1080 def strdate(string, format, defaults=[]):
1081 """parse a localized time string and return a (unixtime, offset) tuple.
1081 """parse a localized time string and return a (unixtime, offset) tuple.
1082 if the string cannot be parsed, ValueError is raised."""
1082 if the string cannot be parsed, ValueError is raised."""
1083 def timezone(string):
1083 def timezone(string):
1084 tz = string.split()[-1]
1084 tz = string.split()[-1]
1085 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1085 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1086 sign = (tz[0] == "+") and 1 or -1
1086 sign = (tz[0] == "+") and 1 or -1
1087 hours = int(tz[1:3])
1087 hours = int(tz[1:3])
1088 minutes = int(tz[3:5])
1088 minutes = int(tz[3:5])
1089 return -sign * (hours * 60 + minutes) * 60
1089 return -sign * (hours * 60 + minutes) * 60
1090 if tz == "GMT" or tz == "UTC":
1090 if tz == "GMT" or tz == "UTC":
1091 return 0
1091 return 0
1092 return None
1092 return None
1093
1093
1094 # NOTE: unixtime = localunixtime + offset
1094 # NOTE: unixtime = localunixtime + offset
1095 offset, date = timezone(string), string
1095 offset, date = timezone(string), string
1096 if offset is not None:
1096 if offset is not None:
1097 date = " ".join(string.split()[:-1])
1097 date = " ".join(string.split()[:-1])
1098
1098
1099 # add missing elements from defaults
1099 # add missing elements from defaults
1100 usenow = False # default to using biased defaults
1100 usenow = False # default to using biased defaults
1101 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1101 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1102 found = [True for p in part if ("%"+p) in format]
1102 found = [True for p in part if ("%"+p) in format]
1103 if not found:
1103 if not found:
1104 date += "@" + defaults[part][usenow]
1104 date += "@" + defaults[part][usenow]
1105 format += "@%" + part[0]
1105 format += "@%" + part[0]
1106 else:
1106 else:
1107 # We've found a specific time element, less specific time
1107 # We've found a specific time element, less specific time
1108 # elements are relative to today
1108 # elements are relative to today
1109 usenow = True
1109 usenow = True
1110
1110
1111 timetuple = time.strptime(date, format)
1111 timetuple = time.strptime(date, format)
1112 localunixtime = int(calendar.timegm(timetuple))
1112 localunixtime = int(calendar.timegm(timetuple))
1113 if offset is None:
1113 if offset is None:
1114 # local timezone
1114 # local timezone
1115 unixtime = int(time.mktime(timetuple))
1115 unixtime = int(time.mktime(timetuple))
1116 offset = unixtime - localunixtime
1116 offset = unixtime - localunixtime
1117 else:
1117 else:
1118 unixtime = localunixtime + offset
1118 unixtime = localunixtime + offset
1119 return unixtime, offset
1119 return unixtime, offset
1120
1120
1121 def parsedate(date, formats=None, bias={}):
1121 def parsedate(date, formats=None, bias={}):
1122 """parse a localized date/time and return a (unixtime, offset) tuple.
1122 """parse a localized date/time and return a (unixtime, offset) tuple.
1123
1123
1124 The date may be a "unixtime offset" string or in one of the specified
1124 The date may be a "unixtime offset" string or in one of the specified
1125 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1125 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1126 """
1126 """
1127 if not date:
1127 if not date:
1128 return 0, 0
1128 return 0, 0
1129 if isinstance(date, tuple) and len(date) == 2:
1129 if isinstance(date, tuple) and len(date) == 2:
1130 return date
1130 return date
1131 if not formats:
1131 if not formats:
1132 formats = defaultdateformats
1132 formats = defaultdateformats
1133 date = date.strip()
1133 date = date.strip()
1134 try:
1134 try:
1135 when, offset = map(int, date.split(' '))
1135 when, offset = map(int, date.split(' '))
1136 except ValueError:
1136 except ValueError:
1137 # fill out defaults
1137 # fill out defaults
1138 now = makedate()
1138 now = makedate()
1139 defaults = {}
1139 defaults = {}
1140 nowmap = {}
1140 nowmap = {}
1141 for part in ("d", "mb", "yY", "HI", "M", "S"):
1141 for part in ("d", "mb", "yY", "HI", "M", "S"):
1142 # this piece is for rounding the specific end of unknowns
1142 # this piece is for rounding the specific end of unknowns
1143 b = bias.get(part)
1143 b = bias.get(part)
1144 if b is None:
1144 if b is None:
1145 if part[0] in "HMS":
1145 if part[0] in "HMS":
1146 b = "00"
1146 b = "00"
1147 else:
1147 else:
1148 b = "0"
1148 b = "0"
1149
1149
1150 # this piece is for matching the generic end to today's date
1150 # this piece is for matching the generic end to today's date
1151 n = datestr(now, "%" + part[0])
1151 n = datestr(now, "%" + part[0])
1152
1152
1153 defaults[part] = (b, n)
1153 defaults[part] = (b, n)
1154
1154
1155 for format in formats:
1155 for format in formats:
1156 try:
1156 try:
1157 when, offset = strdate(date, format, defaults)
1157 when, offset = strdate(date, format, defaults)
1158 except (ValueError, OverflowError):
1158 except (ValueError, OverflowError):
1159 pass
1159 pass
1160 else:
1160 else:
1161 break
1161 break
1162 else:
1162 else:
1163 raise Abort(_('invalid date: %r') % date)
1163 raise Abort(_('invalid date: %r') % date)
1164 # validate explicit (probably user-specified) date and
1164 # validate explicit (probably user-specified) date and
1165 # time zone offset. values must fit in signed 32 bits for
1165 # time zone offset. values must fit in signed 32 bits for
1166 # current 32-bit linux runtimes. timezones go from UTC-12
1166 # current 32-bit linux runtimes. timezones go from UTC-12
1167 # to UTC+14
1167 # to UTC+14
1168 if abs(when) > 0x7fffffff:
1168 if abs(when) > 0x7fffffff:
1169 raise Abort(_('date exceeds 32 bits: %d') % when)
1169 raise Abort(_('date exceeds 32 bits: %d') % when)
1170 if when < 0:
1170 if when < 0:
1171 raise Abort(_('negative date value: %d') % when)
1171 raise Abort(_('negative date value: %d') % when)
1172 if offset < -50400 or offset > 43200:
1172 if offset < -50400 or offset > 43200:
1173 raise Abort(_('impossible time zone offset: %d') % offset)
1173 raise Abort(_('impossible time zone offset: %d') % offset)
1174 return when, offset
1174 return when, offset
1175
1175
1176 def matchdate(date):
1176 def matchdate(date):
1177 """Return a function that matches a given date match specifier
1177 """Return a function that matches a given date match specifier
1178
1178
1179 Formats include:
1179 Formats include:
1180
1180
1181 '{date}' match a given date to the accuracy provided
1181 '{date}' match a given date to the accuracy provided
1182
1182
1183 '<{date}' on or before a given date
1183 '<{date}' on or before a given date
1184
1184
1185 '>{date}' on or after a given date
1185 '>{date}' on or after a given date
1186
1186
1187 >>> p1 = parsedate("10:29:59")
1187 >>> p1 = parsedate("10:29:59")
1188 >>> p2 = parsedate("10:30:00")
1188 >>> p2 = parsedate("10:30:00")
1189 >>> p3 = parsedate("10:30:59")
1189 >>> p3 = parsedate("10:30:59")
1190 >>> p4 = parsedate("10:31:00")
1190 >>> p4 = parsedate("10:31:00")
1191 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1191 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1192 >>> f = matchdate("10:30")
1192 >>> f = matchdate("10:30")
1193 >>> f(p1[0])
1193 >>> f(p1[0])
1194 False
1194 False
1195 >>> f(p2[0])
1195 >>> f(p2[0])
1196 True
1196 True
1197 >>> f(p3[0])
1197 >>> f(p3[0])
1198 True
1198 True
1199 >>> f(p4[0])
1199 >>> f(p4[0])
1200 False
1200 False
1201 >>> f(p5[0])
1201 >>> f(p5[0])
1202 False
1202 False
1203 """
1203 """
1204
1204
1205 def lower(date):
1205 def lower(date):
1206 d = dict(mb="1", d="1")
1206 d = dict(mb="1", d="1")
1207 return parsedate(date, extendeddateformats, d)[0]
1207 return parsedate(date, extendeddateformats, d)[0]
1208
1208
1209 def upper(date):
1209 def upper(date):
1210 d = dict(mb="12", HI="23", M="59", S="59")
1210 d = dict(mb="12", HI="23", M="59", S="59")
1211 for days in ("31", "30", "29"):
1211 for days in ("31", "30", "29"):
1212 try:
1212 try:
1213 d["d"] = days
1213 d["d"] = days
1214 return parsedate(date, extendeddateformats, d)[0]
1214 return parsedate(date, extendeddateformats, d)[0]
1215 except:
1215 except:
1216 pass
1216 pass
1217 d["d"] = "28"
1217 d["d"] = "28"
1218 return parsedate(date, extendeddateformats, d)[0]
1218 return parsedate(date, extendeddateformats, d)[0]
1219
1219
1220 date = date.strip()
1220 date = date.strip()
1221
1221
1222 if not date:
1222 if not date:
1223 raise Abort(_("dates cannot consist entirely of whitespace"))
1223 raise Abort(_("dates cannot consist entirely of whitespace"))
1224 elif date[0] == "<":
1224 elif date[0] == "<":
1225 when = upper(date[1:])
1225 when = upper(date[1:])
1226 return lambda x: x <= when
1226 return lambda x: x <= when
1227 elif date[0] == ">":
1227 elif date[0] == ">":
1228 when = lower(date[1:])
1228 when = lower(date[1:])
1229 return lambda x: x >= when
1229 return lambda x: x >= when
1230 elif date[0] == "-":
1230 elif date[0] == "-":
1231 try:
1231 try:
1232 days = int(date[1:])
1232 days = int(date[1:])
1233 except ValueError:
1233 except ValueError:
1234 raise Abort(_("invalid day spec: %s") % date[1:])
1234 raise Abort(_("invalid day spec: %s") % date[1:])
1235 when = makedate()[0] - days * 3600 * 24
1235 when = makedate()[0] - days * 3600 * 24
1236 return lambda x: x >= when
1236 return lambda x: x >= when
1237 elif " to " in date:
1237 elif " to " in date:
1238 a, b = date.split(" to ")
1238 a, b = date.split(" to ")
1239 start, stop = lower(a), upper(b)
1239 start, stop = lower(a), upper(b)
1240 return lambda x: x >= start and x <= stop
1240 return lambda x: x >= start and x <= stop
1241 else:
1241 else:
1242 start, stop = lower(date), upper(date)
1242 start, stop = lower(date), upper(date)
1243 return lambda x: x >= start and x <= stop
1243 return lambda x: x >= start and x <= stop
1244
1244
1245 def shortuser(user):
1245 def shortuser(user):
1246 """Return a short representation of a user name or email address."""
1246 """Return a short representation of a user name or email address."""
1247 f = user.find('@')
1247 f = user.find('@')
1248 if f >= 0:
1248 if f >= 0:
1249 user = user[:f]
1249 user = user[:f]
1250 f = user.find('<')
1250 f = user.find('<')
1251 if f >= 0:
1251 if f >= 0:
1252 user = user[f + 1:]
1252 user = user[f + 1:]
1253 f = user.find(' ')
1253 f = user.find(' ')
1254 if f >= 0:
1254 if f >= 0:
1255 user = user[:f]
1255 user = user[:f]
1256 f = user.find('.')
1256 f = user.find('.')
1257 if f >= 0:
1257 if f >= 0:
1258 user = user[:f]
1258 user = user[:f]
1259 return user
1259 return user
1260
1260
1261 def email(author):
1261 def email(author):
1262 '''get email of author.'''
1262 '''get email of author.'''
1263 r = author.find('>')
1263 r = author.find('>')
1264 if r == -1:
1264 if r == -1:
1265 r = None
1265 r = None
1266 return author[author.find('<') + 1:r]
1266 return author[author.find('<') + 1:r]
1267
1267
1268 def _ellipsis(text, maxlength):
1268 def _ellipsis(text, maxlength):
1269 if len(text) <= maxlength:
1269 if len(text) <= maxlength:
1270 return text, False
1270 return text, False
1271 else:
1271 else:
1272 return "%s..." % (text[:maxlength - 3]), True
1272 return "%s..." % (text[:maxlength - 3]), True
1273
1273
1274 def ellipsis(text, maxlength=400):
1274 def ellipsis(text, maxlength=400):
1275 """Trim string to at most maxlength (default: 400) characters."""
1275 """Trim string to at most maxlength (default: 400) characters."""
1276 try:
1276 try:
1277 # use unicode not to split at intermediate multi-byte sequence
1277 # use unicode not to split at intermediate multi-byte sequence
1278 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1278 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1279 maxlength)
1279 maxlength)
1280 if not truncated:
1280 if not truncated:
1281 return text
1281 return text
1282 return utext.encode(encoding.encoding)
1282 return utext.encode(encoding.encoding)
1283 except (UnicodeDecodeError, UnicodeEncodeError):
1283 except (UnicodeDecodeError, UnicodeEncodeError):
1284 return _ellipsis(text, maxlength)[0]
1284 return _ellipsis(text, maxlength)[0]
1285
1285
1286 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1286 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1287 '''yield every hg repository under path, recursively.'''
1287 '''yield every hg repository under path, recursively.'''
1288 def errhandler(err):
1288 def errhandler(err):
1289 if err.filename == path:
1289 if err.filename == path:
1290 raise err
1290 raise err
1291 if followsym and hasattr(os.path, 'samestat'):
1291 if followsym and hasattr(os.path, 'samestat'):
1292 def _add_dir_if_not_there(dirlst, dirname):
1292 def _add_dir_if_not_there(dirlst, dirname):
1293 match = False
1293 match = False
1294 samestat = os.path.samestat
1294 samestat = os.path.samestat
1295 dirstat = os.stat(dirname)
1295 dirstat = os.stat(dirname)
1296 for lstdirstat in dirlst:
1296 for lstdirstat in dirlst:
1297 if samestat(dirstat, lstdirstat):
1297 if samestat(dirstat, lstdirstat):
1298 match = True
1298 match = True
1299 break
1299 break
1300 if not match:
1300 if not match:
1301 dirlst.append(dirstat)
1301 dirlst.append(dirstat)
1302 return not match
1302 return not match
1303 else:
1303 else:
1304 followsym = False
1304 followsym = False
1305
1305
1306 if (seen_dirs is None) and followsym:
1306 if (seen_dirs is None) and followsym:
1307 seen_dirs = []
1307 seen_dirs = []
1308 _add_dir_if_not_there(seen_dirs, path)
1308 _add_dir_if_not_there(seen_dirs, path)
1309 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1309 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1310 dirs.sort()
1310 dirs.sort()
1311 if '.hg' in dirs:
1311 if '.hg' in dirs:
1312 yield root # found a repository
1312 yield root # found a repository
1313 qroot = os.path.join(root, '.hg', 'patches')
1313 qroot = os.path.join(root, '.hg', 'patches')
1314 if os.path.isdir(os.path.join(qroot, '.hg')):
1314 if os.path.isdir(os.path.join(qroot, '.hg')):
1315 yield qroot # we have a patch queue repo here
1315 yield qroot # we have a patch queue repo here
1316 if recurse:
1316 if recurse:
1317 # avoid recursing inside the .hg directory
1317 # avoid recursing inside the .hg directory
1318 dirs.remove('.hg')
1318 dirs.remove('.hg')
1319 else:
1319 else:
1320 dirs[:] = [] # don't descend further
1320 dirs[:] = [] # don't descend further
1321 elif followsym:
1321 elif followsym:
1322 newdirs = []
1322 newdirs = []
1323 for d in dirs:
1323 for d in dirs:
1324 fname = os.path.join(root, d)
1324 fname = os.path.join(root, d)
1325 if _add_dir_if_not_there(seen_dirs, fname):
1325 if _add_dir_if_not_there(seen_dirs, fname):
1326 if os.path.islink(fname):
1326 if os.path.islink(fname):
1327 for hgname in walkrepos(fname, True, seen_dirs):
1327 for hgname in walkrepos(fname, True, seen_dirs):
1328 yield hgname
1328 yield hgname
1329 else:
1329 else:
1330 newdirs.append(d)
1330 newdirs.append(d)
1331 dirs[:] = newdirs
1331 dirs[:] = newdirs
1332
1332
1333 _rcpath = None
1333 _rcpath = None
1334
1334
1335 def os_rcpath():
1335 def os_rcpath():
1336 '''return default os-specific hgrc search path'''
1336 '''return default os-specific hgrc search path'''
1337 path = system_rcpath()
1337 path = system_rcpath()
1338 path.extend(user_rcpath())
1338 path.extend(user_rcpath())
1339 path = [os.path.normpath(f) for f in path]
1339 path = [os.path.normpath(f) for f in path]
1340 return path
1340 return path
1341
1341
1342 def rcpath():
1342 def rcpath():
1343 '''return hgrc search path. if env var HGRCPATH is set, use it.
1343 '''return hgrc search path. if env var HGRCPATH is set, use it.
1344 for each item in path, if directory, use files ending in .rc,
1344 for each item in path, if directory, use files ending in .rc,
1345 else use item.
1345 else use item.
1346 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1346 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1347 if no HGRCPATH, use default os-specific path.'''
1347 if no HGRCPATH, use default os-specific path.'''
1348 global _rcpath
1348 global _rcpath
1349 if _rcpath is None:
1349 if _rcpath is None:
1350 if 'HGRCPATH' in os.environ:
1350 if 'HGRCPATH' in os.environ:
1351 _rcpath = []
1351 _rcpath = []
1352 for p in os.environ['HGRCPATH'].split(os.pathsep):
1352 for p in os.environ['HGRCPATH'].split(os.pathsep):
1353 if not p:
1353 if not p:
1354 continue
1354 continue
1355 p = expandpath(p)
1355 p = expandpath(p)
1356 if os.path.isdir(p):
1356 if os.path.isdir(p):
1357 for f, kind in osutil.listdir(p):
1357 for f, kind in osutil.listdir(p):
1358 if f.endswith('.rc'):
1358 if f.endswith('.rc'):
1359 _rcpath.append(os.path.join(p, f))
1359 _rcpath.append(os.path.join(p, f))
1360 else:
1360 else:
1361 _rcpath.append(p)
1361 _rcpath.append(p)
1362 else:
1362 else:
1363 _rcpath = os_rcpath()
1363 _rcpath = os_rcpath()
1364 return _rcpath
1364 return _rcpath
1365
1365
1366 def bytecount(nbytes):
1366 def bytecount(nbytes):
1367 '''return byte count formatted as readable string, with units'''
1367 '''return byte count formatted as readable string, with units'''
1368
1368
1369 units = (
1369 units = (
1370 (100, 1 << 30, _('%.0f GB')),
1370 (100, 1 << 30, _('%.0f GB')),
1371 (10, 1 << 30, _('%.1f GB')),
1371 (10, 1 << 30, _('%.1f GB')),
1372 (1, 1 << 30, _('%.2f GB')),
1372 (1, 1 << 30, _('%.2f GB')),
1373 (100, 1 << 20, _('%.0f MB')),
1373 (100, 1 << 20, _('%.0f MB')),
1374 (10, 1 << 20, _('%.1f MB')),
1374 (10, 1 << 20, _('%.1f MB')),
1375 (1, 1 << 20, _('%.2f MB')),
1375 (1, 1 << 20, _('%.2f MB')),
1376 (100, 1 << 10, _('%.0f KB')),
1376 (100, 1 << 10, _('%.0f KB')),
1377 (10, 1 << 10, _('%.1f KB')),
1377 (10, 1 << 10, _('%.1f KB')),
1378 (1, 1 << 10, _('%.2f KB')),
1378 (1, 1 << 10, _('%.2f KB')),
1379 (1, 1, _('%.0f bytes')),
1379 (1, 1, _('%.0f bytes')),
1380 )
1380 )
1381
1381
1382 for multiplier, divisor, format in units:
1382 for multiplier, divisor, format in units:
1383 if nbytes >= divisor * multiplier:
1383 if nbytes >= divisor * multiplier:
1384 return format % (nbytes / float(divisor))
1384 return format % (nbytes / float(divisor))
1385 return units[-1][2] % nbytes
1385 return units[-1][2] % nbytes
1386
1386
1387 def drop_scheme(scheme, path):
1388 sc = scheme + ':'
1389 if path.startswith(sc):
1390 path = path[len(sc):]
1391 if path.startswith('//'):
1392 if scheme == 'file':
1393 i = path.find('/', 2)
1394 if i == -1:
1395 return ''
1396 # On Windows, absolute paths are rooted at the current drive
1397 # root. On POSIX they are rooted at the file system root.
1398 if os.name == 'nt':
1399 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1400 path = os.path.join(droot, path[i + 1:])
1401 else:
1402 path = path[i:]
1403 else:
1404 path = path[2:]
1405 return path
1406
1407 def uirepr(s):
1387 def uirepr(s):
1408 # Avoid double backslash in Windows path repr()
1388 # Avoid double backslash in Windows path repr()
1409 return repr(s).replace('\\\\', '\\')
1389 return repr(s).replace('\\\\', '\\')
1410
1390
1411 # delay import of textwrap
1391 # delay import of textwrap
1412 def MBTextWrapper(**kwargs):
1392 def MBTextWrapper(**kwargs):
1413 class tw(textwrap.TextWrapper):
1393 class tw(textwrap.TextWrapper):
1414 """
1394 """
1415 Extend TextWrapper for double-width characters.
1395 Extend TextWrapper for double-width characters.
1416
1396
1417 Some Asian characters use two terminal columns instead of one.
1397 Some Asian characters use two terminal columns instead of one.
1418 A good example of this behavior can be seen with u'\u65e5\u672c',
1398 A good example of this behavior can be seen with u'\u65e5\u672c',
1419 the two Japanese characters for "Japan":
1399 the two Japanese characters for "Japan":
1420 len() returns 2, but when printed to a terminal, they eat 4 columns.
1400 len() returns 2, but when printed to a terminal, they eat 4 columns.
1421
1401
1422 (Note that this has nothing to do whatsoever with unicode
1402 (Note that this has nothing to do whatsoever with unicode
1423 representation, or encoding of the underlying string)
1403 representation, or encoding of the underlying string)
1424 """
1404 """
1425 def __init__(self, **kwargs):
1405 def __init__(self, **kwargs):
1426 textwrap.TextWrapper.__init__(self, **kwargs)
1406 textwrap.TextWrapper.__init__(self, **kwargs)
1427
1407
1428 def _cutdown(self, str, space_left):
1408 def _cutdown(self, str, space_left):
1429 l = 0
1409 l = 0
1430 ucstr = unicode(str, encoding.encoding)
1410 ucstr = unicode(str, encoding.encoding)
1431 colwidth = unicodedata.east_asian_width
1411 colwidth = unicodedata.east_asian_width
1432 for i in xrange(len(ucstr)):
1412 for i in xrange(len(ucstr)):
1433 l += colwidth(ucstr[i]) in 'WFA' and 2 or 1
1413 l += colwidth(ucstr[i]) in 'WFA' and 2 or 1
1434 if space_left < l:
1414 if space_left < l:
1435 return (ucstr[:i].encode(encoding.encoding),
1415 return (ucstr[:i].encode(encoding.encoding),
1436 ucstr[i:].encode(encoding.encoding))
1416 ucstr[i:].encode(encoding.encoding))
1437 return str, ''
1417 return str, ''
1438
1418
1439 # overriding of base class
1419 # overriding of base class
1440 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1420 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1441 space_left = max(width - cur_len, 1)
1421 space_left = max(width - cur_len, 1)
1442
1422
1443 if self.break_long_words:
1423 if self.break_long_words:
1444 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1424 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1445 cur_line.append(cut)
1425 cur_line.append(cut)
1446 reversed_chunks[-1] = res
1426 reversed_chunks[-1] = res
1447 elif not cur_line:
1427 elif not cur_line:
1448 cur_line.append(reversed_chunks.pop())
1428 cur_line.append(reversed_chunks.pop())
1449
1429
1450 global MBTextWrapper
1430 global MBTextWrapper
1451 MBTextWrapper = tw
1431 MBTextWrapper = tw
1452 return tw(**kwargs)
1432 return tw(**kwargs)
1453
1433
1454 def wrap(line, width, initindent='', hangindent=''):
1434 def wrap(line, width, initindent='', hangindent=''):
1455 maxindent = max(len(hangindent), len(initindent))
1435 maxindent = max(len(hangindent), len(initindent))
1456 if width <= maxindent:
1436 if width <= maxindent:
1457 # adjust for weird terminal size
1437 # adjust for weird terminal size
1458 width = max(78, maxindent + 1)
1438 width = max(78, maxindent + 1)
1459 wrapper = MBTextWrapper(width=width,
1439 wrapper = MBTextWrapper(width=width,
1460 initial_indent=initindent,
1440 initial_indent=initindent,
1461 subsequent_indent=hangindent)
1441 subsequent_indent=hangindent)
1462 return wrapper.fill(line)
1442 return wrapper.fill(line)
1463
1443
1464 def iterlines(iterator):
1444 def iterlines(iterator):
1465 for chunk in iterator:
1445 for chunk in iterator:
1466 for line in chunk.splitlines():
1446 for line in chunk.splitlines():
1467 yield line
1447 yield line
1468
1448
1469 def expandpath(path):
1449 def expandpath(path):
1470 return os.path.expanduser(os.path.expandvars(path))
1450 return os.path.expanduser(os.path.expandvars(path))
1471
1451
1472 def hgcmd():
1452 def hgcmd():
1473 """Return the command used to execute current hg
1453 """Return the command used to execute current hg
1474
1454
1475 This is different from hgexecutable() because on Windows we want
1455 This is different from hgexecutable() because on Windows we want
1476 to avoid things opening new shell windows like batch files, so we
1456 to avoid things opening new shell windows like batch files, so we
1477 get either the python call or current executable.
1457 get either the python call or current executable.
1478 """
1458 """
1479 if main_is_frozen():
1459 if main_is_frozen():
1480 return [sys.executable]
1460 return [sys.executable]
1481 return gethgcmd()
1461 return gethgcmd()
1482
1462
1483 def rundetached(args, condfn):
1463 def rundetached(args, condfn):
1484 """Execute the argument list in a detached process.
1464 """Execute the argument list in a detached process.
1485
1465
1486 condfn is a callable which is called repeatedly and should return
1466 condfn is a callable which is called repeatedly and should return
1487 True once the child process is known to have started successfully.
1467 True once the child process is known to have started successfully.
1488 At this point, the child process PID is returned. If the child
1468 At this point, the child process PID is returned. If the child
1489 process fails to start or finishes before condfn() evaluates to
1469 process fails to start or finishes before condfn() evaluates to
1490 True, return -1.
1470 True, return -1.
1491 """
1471 """
1492 # Windows case is easier because the child process is either
1472 # Windows case is easier because the child process is either
1493 # successfully starting and validating the condition or exiting
1473 # successfully starting and validating the condition or exiting
1494 # on failure. We just poll on its PID. On Unix, if the child
1474 # on failure. We just poll on its PID. On Unix, if the child
1495 # process fails to start, it will be left in a zombie state until
1475 # process fails to start, it will be left in a zombie state until
1496 # the parent wait on it, which we cannot do since we expect a long
1476 # the parent wait on it, which we cannot do since we expect a long
1497 # running process on success. Instead we listen for SIGCHLD telling
1477 # running process on success. Instead we listen for SIGCHLD telling
1498 # us our child process terminated.
1478 # us our child process terminated.
1499 terminated = set()
1479 terminated = set()
1500 def handler(signum, frame):
1480 def handler(signum, frame):
1501 terminated.add(os.wait())
1481 terminated.add(os.wait())
1502 prevhandler = None
1482 prevhandler = None
1503 if hasattr(signal, 'SIGCHLD'):
1483 if hasattr(signal, 'SIGCHLD'):
1504 prevhandler = signal.signal(signal.SIGCHLD, handler)
1484 prevhandler = signal.signal(signal.SIGCHLD, handler)
1505 try:
1485 try:
1506 pid = spawndetached(args)
1486 pid = spawndetached(args)
1507 while not condfn():
1487 while not condfn():
1508 if ((pid in terminated or not testpid(pid))
1488 if ((pid in terminated or not testpid(pid))
1509 and not condfn()):
1489 and not condfn()):
1510 return -1
1490 return -1
1511 time.sleep(0.1)
1491 time.sleep(0.1)
1512 return pid
1492 return pid
1513 finally:
1493 finally:
1514 if prevhandler is not None:
1494 if prevhandler is not None:
1515 signal.signal(signal.SIGCHLD, prevhandler)
1495 signal.signal(signal.SIGCHLD, prevhandler)
1516
1496
1517 try:
1497 try:
1518 any, all = any, all
1498 any, all = any, all
1519 except NameError:
1499 except NameError:
1520 def any(iterable):
1500 def any(iterable):
1521 for i in iterable:
1501 for i in iterable:
1522 if i:
1502 if i:
1523 return True
1503 return True
1524 return False
1504 return False
1525
1505
1526 def all(iterable):
1506 def all(iterable):
1527 for i in iterable:
1507 for i in iterable:
1528 if not i:
1508 if not i:
1529 return False
1509 return False
1530 return True
1510 return True
1531
1511
1532 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1512 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1533 """Return the result of interpolating items in the mapping into string s.
1513 """Return the result of interpolating items in the mapping into string s.
1534
1514
1535 prefix is a single character string, or a two character string with
1515 prefix is a single character string, or a two character string with
1536 a backslash as the first character if the prefix needs to be escaped in
1516 a backslash as the first character if the prefix needs to be escaped in
1537 a regular expression.
1517 a regular expression.
1538
1518
1539 fn is an optional function that will be applied to the replacement text
1519 fn is an optional function that will be applied to the replacement text
1540 just before replacement.
1520 just before replacement.
1541
1521
1542 escape_prefix is an optional flag that allows using doubled prefix for
1522 escape_prefix is an optional flag that allows using doubled prefix for
1543 its escaping.
1523 its escaping.
1544 """
1524 """
1545 fn = fn or (lambda s: s)
1525 fn = fn or (lambda s: s)
1546 patterns = '|'.join(mapping.keys())
1526 patterns = '|'.join(mapping.keys())
1547 if escape_prefix:
1527 if escape_prefix:
1548 patterns += '|' + prefix
1528 patterns += '|' + prefix
1549 if len(prefix) > 1:
1529 if len(prefix) > 1:
1550 prefix_char = prefix[1:]
1530 prefix_char = prefix[1:]
1551 else:
1531 else:
1552 prefix_char = prefix
1532 prefix_char = prefix
1553 mapping[prefix_char] = prefix_char
1533 mapping[prefix_char] = prefix_char
1554 r = re.compile(r'%s(%s)' % (prefix, patterns))
1534 r = re.compile(r'%s(%s)' % (prefix, patterns))
1555 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1535 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1556
1536
1557 def getport(port):
1537 def getport(port):
1558 """Return the port for a given network service.
1538 """Return the port for a given network service.
1559
1539
1560 If port is an integer, it's returned as is. If it's a string, it's
1540 If port is an integer, it's returned as is. If it's a string, it's
1561 looked up using socket.getservbyname(). If there's no matching
1541 looked up using socket.getservbyname(). If there's no matching
1562 service, util.Abort is raised.
1542 service, util.Abort is raised.
1563 """
1543 """
1564 try:
1544 try:
1565 return int(port)
1545 return int(port)
1566 except ValueError:
1546 except ValueError:
1567 pass
1547 pass
1568
1548
1569 try:
1549 try:
1570 return socket.getservbyname(port)
1550 return socket.getservbyname(port)
1571 except socket.error:
1551 except socket.error:
1572 raise Abort(_("no port number associated with service '%s'") % port)
1552 raise Abort(_("no port number associated with service '%s'") % port)
1573
1553
1574 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1554 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1575 '0': False, 'no': False, 'false': False, 'off': False,
1555 '0': False, 'no': False, 'false': False, 'off': False,
1576 'never': False}
1556 'never': False}
1577
1557
1578 def parsebool(s):
1558 def parsebool(s):
1579 """Parse s into a boolean.
1559 """Parse s into a boolean.
1580
1560
1581 If s is not a valid boolean, returns None.
1561 If s is not a valid boolean, returns None.
1582 """
1562 """
1583 return _booleans.get(s.lower(), None)
1563 return _booleans.get(s.lower(), None)
@@ -1,560 +1,576 b''
1 Setting up test
1 Setting up test
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo 0 > afile
5 $ echo 0 > afile
6 $ hg add afile
6 $ hg add afile
7 $ hg commit -m "0.0"
7 $ hg commit -m "0.0"
8 $ echo 1 >> afile
8 $ echo 1 >> afile
9 $ hg commit -m "0.1"
9 $ hg commit -m "0.1"
10 $ echo 2 >> afile
10 $ echo 2 >> afile
11 $ hg commit -m "0.2"
11 $ hg commit -m "0.2"
12 $ echo 3 >> afile
12 $ echo 3 >> afile
13 $ hg commit -m "0.3"
13 $ hg commit -m "0.3"
14 $ hg update -C 0
14 $ hg update -C 0
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 $ echo 1 >> afile
16 $ echo 1 >> afile
17 $ hg commit -m "1.1"
17 $ hg commit -m "1.1"
18 created new head
18 created new head
19 $ echo 2 >> afile
19 $ echo 2 >> afile
20 $ hg commit -m "1.2"
20 $ hg commit -m "1.2"
21 $ echo "a line" > fred
21 $ echo "a line" > fred
22 $ echo 3 >> afile
22 $ echo 3 >> afile
23 $ hg add fred
23 $ hg add fred
24 $ hg commit -m "1.3"
24 $ hg commit -m "1.3"
25 $ hg mv afile adifferentfile
25 $ hg mv afile adifferentfile
26 $ hg commit -m "1.3m"
26 $ hg commit -m "1.3m"
27 $ hg update -C 3
27 $ hg update -C 3
28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
29 $ hg mv afile anotherfile
29 $ hg mv afile anotherfile
30 $ hg commit -m "0.3m"
30 $ hg commit -m "0.3m"
31 $ hg verify
31 $ hg verify
32 checking changesets
32 checking changesets
33 checking manifests
33 checking manifests
34 crosschecking files in changesets and manifests
34 crosschecking files in changesets and manifests
35 checking files
35 checking files
36 4 files, 9 changesets, 7 total revisions
36 4 files, 9 changesets, 7 total revisions
37 $ cd ..
37 $ cd ..
38 $ hg init empty
38 $ hg init empty
39
39
40 Bundle --all
40 Bundle --all
41
41
42 $ hg -R test bundle --all all.hg
42 $ hg -R test bundle --all all.hg
43 9 changesets found
43 9 changesets found
44
44
45 Bundle test to full.hg
45 Bundle test to full.hg
46
46
47 $ hg -R test bundle full.hg empty
47 $ hg -R test bundle full.hg empty
48 searching for changes
48 searching for changes
49 9 changesets found
49 9 changesets found
50
50
51 Unbundle full.hg in test
51 Unbundle full.hg in test
52
52
53 $ hg -R test unbundle full.hg
53 $ hg -R test unbundle full.hg
54 adding changesets
54 adding changesets
55 adding manifests
55 adding manifests
56 adding file changes
56 adding file changes
57 added 0 changesets with 0 changes to 4 files
57 added 0 changesets with 0 changes to 4 files
58 (run 'hg update' to get a working copy)
58 (run 'hg update' to get a working copy)
59
59
60 Verify empty
60 Verify empty
61
61
62 $ hg -R empty heads
62 $ hg -R empty heads
63 [1]
63 [1]
64 $ hg -R empty verify
64 $ hg -R empty verify
65 checking changesets
65 checking changesets
66 checking manifests
66 checking manifests
67 crosschecking files in changesets and manifests
67 crosschecking files in changesets and manifests
68 checking files
68 checking files
69 0 files, 0 changesets, 0 total revisions
69 0 files, 0 changesets, 0 total revisions
70
70
71 Pull full.hg into test (using --cwd)
71 Pull full.hg into test (using --cwd)
72
72
73 $ hg --cwd test pull ../full.hg
73 $ hg --cwd test pull ../full.hg
74 pulling from ../full.hg
74 pulling from ../full.hg
75 searching for changes
75 searching for changes
76 no changes found
76 no changes found
77
77
78 Pull full.hg into empty (using --cwd)
78 Pull full.hg into empty (using --cwd)
79
79
80 $ hg --cwd empty pull ../full.hg
80 $ hg --cwd empty pull ../full.hg
81 pulling from ../full.hg
81 pulling from ../full.hg
82 requesting all changes
82 requesting all changes
83 adding changesets
83 adding changesets
84 adding manifests
84 adding manifests
85 adding file changes
85 adding file changes
86 added 9 changesets with 7 changes to 4 files (+1 heads)
86 added 9 changesets with 7 changes to 4 files (+1 heads)
87 (run 'hg heads' to see heads, 'hg merge' to merge)
87 (run 'hg heads' to see heads, 'hg merge' to merge)
88
88
89 Rollback empty
89 Rollback empty
90
90
91 $ hg -R empty rollback
91 $ hg -R empty rollback
92 repository tip rolled back to revision -1 (undo pull)
92 repository tip rolled back to revision -1 (undo pull)
93 working directory now based on revision -1
93 working directory now based on revision -1
94
94
95 Pull full.hg into empty again (using --cwd)
95 Pull full.hg into empty again (using --cwd)
96
96
97 $ hg --cwd empty pull ../full.hg
97 $ hg --cwd empty pull ../full.hg
98 pulling from ../full.hg
98 pulling from ../full.hg
99 requesting all changes
99 requesting all changes
100 adding changesets
100 adding changesets
101 adding manifests
101 adding manifests
102 adding file changes
102 adding file changes
103 added 9 changesets with 7 changes to 4 files (+1 heads)
103 added 9 changesets with 7 changes to 4 files (+1 heads)
104 (run 'hg heads' to see heads, 'hg merge' to merge)
104 (run 'hg heads' to see heads, 'hg merge' to merge)
105
105
106 Pull full.hg into test (using -R)
106 Pull full.hg into test (using -R)
107
107
108 $ hg -R test pull full.hg
108 $ hg -R test pull full.hg
109 pulling from full.hg
109 pulling from full.hg
110 searching for changes
110 searching for changes
111 no changes found
111 no changes found
112
112
113 Pull full.hg into empty (using -R)
113 Pull full.hg into empty (using -R)
114
114
115 $ hg -R empty pull full.hg
115 $ hg -R empty pull full.hg
116 pulling from full.hg
116 pulling from full.hg
117 searching for changes
117 searching for changes
118 no changes found
118 no changes found
119
119
120 Rollback empty
120 Rollback empty
121
121
122 $ hg -R empty rollback
122 $ hg -R empty rollback
123 repository tip rolled back to revision -1 (undo pull)
123 repository tip rolled back to revision -1 (undo pull)
124 working directory now based on revision -1
124 working directory now based on revision -1
125
125
126 Pull full.hg into empty again (using -R)
126 Pull full.hg into empty again (using -R)
127
127
128 $ hg -R empty pull full.hg
128 $ hg -R empty pull full.hg
129 pulling from full.hg
129 pulling from full.hg
130 requesting all changes
130 requesting all changes
131 adding changesets
131 adding changesets
132 adding manifests
132 adding manifests
133 adding file changes
133 adding file changes
134 added 9 changesets with 7 changes to 4 files (+1 heads)
134 added 9 changesets with 7 changes to 4 files (+1 heads)
135 (run 'hg heads' to see heads, 'hg merge' to merge)
135 (run 'hg heads' to see heads, 'hg merge' to merge)
136
136
137 Log -R full.hg in fresh empty
137 Log -R full.hg in fresh empty
138
138
139 $ rm -r empty
139 $ rm -r empty
140 $ hg init empty
140 $ hg init empty
141 $ cd empty
141 $ cd empty
142 $ hg -R bundle://../full.hg log
142 $ hg -R bundle://../full.hg log
143 changeset: 8:aa35859c02ea
143 changeset: 8:aa35859c02ea
144 tag: tip
144 tag: tip
145 parent: 3:eebf5a27f8ca
145 parent: 3:eebf5a27f8ca
146 user: test
146 user: test
147 date: Thu Jan 01 00:00:00 1970 +0000
147 date: Thu Jan 01 00:00:00 1970 +0000
148 summary: 0.3m
148 summary: 0.3m
149
149
150 changeset: 7:a6a34bfa0076
150 changeset: 7:a6a34bfa0076
151 user: test
151 user: test
152 date: Thu Jan 01 00:00:00 1970 +0000
152 date: Thu Jan 01 00:00:00 1970 +0000
153 summary: 1.3m
153 summary: 1.3m
154
154
155 changeset: 6:7373c1169842
155 changeset: 6:7373c1169842
156 user: test
156 user: test
157 date: Thu Jan 01 00:00:00 1970 +0000
157 date: Thu Jan 01 00:00:00 1970 +0000
158 summary: 1.3
158 summary: 1.3
159
159
160 changeset: 5:1bb50a9436a7
160 changeset: 5:1bb50a9436a7
161 user: test
161 user: test
162 date: Thu Jan 01 00:00:00 1970 +0000
162 date: Thu Jan 01 00:00:00 1970 +0000
163 summary: 1.2
163 summary: 1.2
164
164
165 changeset: 4:095197eb4973
165 changeset: 4:095197eb4973
166 parent: 0:f9ee2f85a263
166 parent: 0:f9ee2f85a263
167 user: test
167 user: test
168 date: Thu Jan 01 00:00:00 1970 +0000
168 date: Thu Jan 01 00:00:00 1970 +0000
169 summary: 1.1
169 summary: 1.1
170
170
171 changeset: 3:eebf5a27f8ca
171 changeset: 3:eebf5a27f8ca
172 user: test
172 user: test
173 date: Thu Jan 01 00:00:00 1970 +0000
173 date: Thu Jan 01 00:00:00 1970 +0000
174 summary: 0.3
174 summary: 0.3
175
175
176 changeset: 2:e38ba6f5b7e0
176 changeset: 2:e38ba6f5b7e0
177 user: test
177 user: test
178 date: Thu Jan 01 00:00:00 1970 +0000
178 date: Thu Jan 01 00:00:00 1970 +0000
179 summary: 0.2
179 summary: 0.2
180
180
181 changeset: 1:34c2bf6b0626
181 changeset: 1:34c2bf6b0626
182 user: test
182 user: test
183 date: Thu Jan 01 00:00:00 1970 +0000
183 date: Thu Jan 01 00:00:00 1970 +0000
184 summary: 0.1
184 summary: 0.1
185
185
186 changeset: 0:f9ee2f85a263
186 changeset: 0:f9ee2f85a263
187 user: test
187 user: test
188 date: Thu Jan 01 00:00:00 1970 +0000
188 date: Thu Jan 01 00:00:00 1970 +0000
189 summary: 0.0
189 summary: 0.0
190
190
191 Make sure bundlerepo doesn't leak tempfiles (issue2491)
191 Make sure bundlerepo doesn't leak tempfiles (issue2491)
192
192
193 $ ls .hg
193 $ ls .hg
194 00changelog.i
194 00changelog.i
195 cache
195 cache
196 requires
196 requires
197 store
197 store
198
198
199 Pull ../full.hg into empty (with hook)
199 Pull ../full.hg into empty (with hook)
200
200
201 $ echo '[hooks]' >> .hg/hgrc
201 $ echo '[hooks]' >> .hg/hgrc
202 $ echo 'changegroup = python "$TESTDIR"/printenv.py changegroup' >> .hg/hgrc
202 $ echo 'changegroup = python "$TESTDIR"/printenv.py changegroup' >> .hg/hgrc
203
203
204 doesn't work (yet ?)
204 doesn't work (yet ?)
205
205
206 hg -R bundle://../full.hg verify
206 hg -R bundle://../full.hg verify
207
207
208 $ hg pull bundle://../full.hg
208 $ hg pull bundle://../full.hg
209 pulling from bundle:../full.hg
209 pulling from bundle:../full.hg
210 requesting all changes
210 requesting all changes
211 adding changesets
211 adding changesets
212 adding manifests
212 adding manifests
213 adding file changes
213 adding file changes
214 added 9 changesets with 7 changes to 4 files (+1 heads)
214 added 9 changesets with 7 changes to 4 files (+1 heads)
215 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_URL=bundle:../full.hg
215 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_URL=bundle:../full.hg
216 (run 'hg heads' to see heads, 'hg merge' to merge)
216 (run 'hg heads' to see heads, 'hg merge' to merge)
217
217
218 Rollback empty
218 Rollback empty
219
219
220 $ hg rollback
220 $ hg rollback
221 repository tip rolled back to revision -1 (undo pull)
221 repository tip rolled back to revision -1 (undo pull)
222 working directory now based on revision -1
222 working directory now based on revision -1
223 $ cd ..
223 $ cd ..
224
224
225 Log -R bundle:empty+full.hg
225 Log -R bundle:empty+full.hg
226
226
227 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
227 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
228 8 7 6 5 4 3 2 1 0
228 8 7 6 5 4 3 2 1 0
229
229
230 Pull full.hg into empty again (using -R; with hook)
230 Pull full.hg into empty again (using -R; with hook)
231
231
232 $ hg -R empty pull full.hg
232 $ hg -R empty pull full.hg
233 pulling from full.hg
233 pulling from full.hg
234 requesting all changes
234 requesting all changes
235 adding changesets
235 adding changesets
236 adding manifests
236 adding manifests
237 adding file changes
237 adding file changes
238 added 9 changesets with 7 changes to 4 files (+1 heads)
238 added 9 changesets with 7 changes to 4 files (+1 heads)
239 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_URL=bundle:empty+full.hg
239 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_URL=bundle:empty+full.hg
240 (run 'hg heads' to see heads, 'hg merge' to merge)
240 (run 'hg heads' to see heads, 'hg merge' to merge)
241
241
242 Create partial clones
242 Create partial clones
243
243
244 $ rm -r empty
244 $ rm -r empty
245 $ hg init empty
245 $ hg init empty
246 $ hg clone -r 3 test partial
246 $ hg clone -r 3 test partial
247 adding changesets
247 adding changesets
248 adding manifests
248 adding manifests
249 adding file changes
249 adding file changes
250 added 4 changesets with 4 changes to 1 files
250 added 4 changesets with 4 changes to 1 files
251 updating to branch default
251 updating to branch default
252 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
252 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
253 $ hg clone partial partial2
253 $ hg clone partial partial2
254 updating to branch default
254 updating to branch default
255 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
255 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
256 $ cd partial
256 $ cd partial
257
257
258 Log -R full.hg in partial
258 Log -R full.hg in partial
259
259
260 $ hg -R bundle://../full.hg log
260 $ hg -R bundle://../full.hg log
261 changeset: 8:aa35859c02ea
261 changeset: 8:aa35859c02ea
262 tag: tip
262 tag: tip
263 parent: 3:eebf5a27f8ca
263 parent: 3:eebf5a27f8ca
264 user: test
264 user: test
265 date: Thu Jan 01 00:00:00 1970 +0000
265 date: Thu Jan 01 00:00:00 1970 +0000
266 summary: 0.3m
266 summary: 0.3m
267
267
268 changeset: 7:a6a34bfa0076
268 changeset: 7:a6a34bfa0076
269 user: test
269 user: test
270 date: Thu Jan 01 00:00:00 1970 +0000
270 date: Thu Jan 01 00:00:00 1970 +0000
271 summary: 1.3m
271 summary: 1.3m
272
272
273 changeset: 6:7373c1169842
273 changeset: 6:7373c1169842
274 user: test
274 user: test
275 date: Thu Jan 01 00:00:00 1970 +0000
275 date: Thu Jan 01 00:00:00 1970 +0000
276 summary: 1.3
276 summary: 1.3
277
277
278 changeset: 5:1bb50a9436a7
278 changeset: 5:1bb50a9436a7
279 user: test
279 user: test
280 date: Thu Jan 01 00:00:00 1970 +0000
280 date: Thu Jan 01 00:00:00 1970 +0000
281 summary: 1.2
281 summary: 1.2
282
282
283 changeset: 4:095197eb4973
283 changeset: 4:095197eb4973
284 parent: 0:f9ee2f85a263
284 parent: 0:f9ee2f85a263
285 user: test
285 user: test
286 date: Thu Jan 01 00:00:00 1970 +0000
286 date: Thu Jan 01 00:00:00 1970 +0000
287 summary: 1.1
287 summary: 1.1
288
288
289 changeset: 3:eebf5a27f8ca
289 changeset: 3:eebf5a27f8ca
290 user: test
290 user: test
291 date: Thu Jan 01 00:00:00 1970 +0000
291 date: Thu Jan 01 00:00:00 1970 +0000
292 summary: 0.3
292 summary: 0.3
293
293
294 changeset: 2:e38ba6f5b7e0
294 changeset: 2:e38ba6f5b7e0
295 user: test
295 user: test
296 date: Thu Jan 01 00:00:00 1970 +0000
296 date: Thu Jan 01 00:00:00 1970 +0000
297 summary: 0.2
297 summary: 0.2
298
298
299 changeset: 1:34c2bf6b0626
299 changeset: 1:34c2bf6b0626
300 user: test
300 user: test
301 date: Thu Jan 01 00:00:00 1970 +0000
301 date: Thu Jan 01 00:00:00 1970 +0000
302 summary: 0.1
302 summary: 0.1
303
303
304 changeset: 0:f9ee2f85a263
304 changeset: 0:f9ee2f85a263
305 user: test
305 user: test
306 date: Thu Jan 01 00:00:00 1970 +0000
306 date: Thu Jan 01 00:00:00 1970 +0000
307 summary: 0.0
307 summary: 0.0
308
308
309
309
310 Incoming full.hg in partial
310 Incoming full.hg in partial
311
311
312 $ hg incoming bundle://../full.hg
312 $ hg incoming bundle://../full.hg
313 comparing with bundle:../full.hg
313 comparing with bundle:../full.hg
314 searching for changes
314 searching for changes
315 changeset: 4:095197eb4973
315 changeset: 4:095197eb4973
316 parent: 0:f9ee2f85a263
316 parent: 0:f9ee2f85a263
317 user: test
317 user: test
318 date: Thu Jan 01 00:00:00 1970 +0000
318 date: Thu Jan 01 00:00:00 1970 +0000
319 summary: 1.1
319 summary: 1.1
320
320
321 changeset: 5:1bb50a9436a7
321 changeset: 5:1bb50a9436a7
322 user: test
322 user: test
323 date: Thu Jan 01 00:00:00 1970 +0000
323 date: Thu Jan 01 00:00:00 1970 +0000
324 summary: 1.2
324 summary: 1.2
325
325
326 changeset: 6:7373c1169842
326 changeset: 6:7373c1169842
327 user: test
327 user: test
328 date: Thu Jan 01 00:00:00 1970 +0000
328 date: Thu Jan 01 00:00:00 1970 +0000
329 summary: 1.3
329 summary: 1.3
330
330
331 changeset: 7:a6a34bfa0076
331 changeset: 7:a6a34bfa0076
332 user: test
332 user: test
333 date: Thu Jan 01 00:00:00 1970 +0000
333 date: Thu Jan 01 00:00:00 1970 +0000
334 summary: 1.3m
334 summary: 1.3m
335
335
336 changeset: 8:aa35859c02ea
336 changeset: 8:aa35859c02ea
337 tag: tip
337 tag: tip
338 parent: 3:eebf5a27f8ca
338 parent: 3:eebf5a27f8ca
339 user: test
339 user: test
340 date: Thu Jan 01 00:00:00 1970 +0000
340 date: Thu Jan 01 00:00:00 1970 +0000
341 summary: 0.3m
341 summary: 0.3m
342
342
343
343
344 Outgoing -R full.hg vs partial2 in partial
344 Outgoing -R full.hg vs partial2 in partial
345
345
346 $ hg -R bundle://../full.hg outgoing ../partial2
346 $ hg -R bundle://../full.hg outgoing ../partial2
347 comparing with ../partial2
347 comparing with ../partial2
348 searching for changes
348 searching for changes
349 changeset: 4:095197eb4973
349 changeset: 4:095197eb4973
350 parent: 0:f9ee2f85a263
350 parent: 0:f9ee2f85a263
351 user: test
351 user: test
352 date: Thu Jan 01 00:00:00 1970 +0000
352 date: Thu Jan 01 00:00:00 1970 +0000
353 summary: 1.1
353 summary: 1.1
354
354
355 changeset: 5:1bb50a9436a7
355 changeset: 5:1bb50a9436a7
356 user: test
356 user: test
357 date: Thu Jan 01 00:00:00 1970 +0000
357 date: Thu Jan 01 00:00:00 1970 +0000
358 summary: 1.2
358 summary: 1.2
359
359
360 changeset: 6:7373c1169842
360 changeset: 6:7373c1169842
361 user: test
361 user: test
362 date: Thu Jan 01 00:00:00 1970 +0000
362 date: Thu Jan 01 00:00:00 1970 +0000
363 summary: 1.3
363 summary: 1.3
364
364
365 changeset: 7:a6a34bfa0076
365 changeset: 7:a6a34bfa0076
366 user: test
366 user: test
367 date: Thu Jan 01 00:00:00 1970 +0000
367 date: Thu Jan 01 00:00:00 1970 +0000
368 summary: 1.3m
368 summary: 1.3m
369
369
370 changeset: 8:aa35859c02ea
370 changeset: 8:aa35859c02ea
371 tag: tip
371 tag: tip
372 parent: 3:eebf5a27f8ca
372 parent: 3:eebf5a27f8ca
373 user: test
373 user: test
374 date: Thu Jan 01 00:00:00 1970 +0000
374 date: Thu Jan 01 00:00:00 1970 +0000
375 summary: 0.3m
375 summary: 0.3m
376
376
377
377
378 Outgoing -R does-not-exist.hg vs partial2 in partial
378 Outgoing -R does-not-exist.hg vs partial2 in partial
379
379
380 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
380 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
381 abort: No such file or directory: ../does-not-exist.hg
381 abort: No such file or directory: ../does-not-exist.hg
382 [255]
382 [255]
383 $ cd ..
383 $ cd ..
384
384
385 Direct clone from bundle (all-history)
385 Direct clone from bundle (all-history)
386
386
387 $ hg clone full.hg full-clone
387 $ hg clone full.hg full-clone
388 requesting all changes
388 requesting all changes
389 adding changesets
389 adding changesets
390 adding manifests
390 adding manifests
391 adding file changes
391 adding file changes
392 added 9 changesets with 7 changes to 4 files (+1 heads)
392 added 9 changesets with 7 changes to 4 files (+1 heads)
393 updating to branch default
393 updating to branch default
394 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
394 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
395 $ hg -R full-clone heads
395 $ hg -R full-clone heads
396 changeset: 8:aa35859c02ea
396 changeset: 8:aa35859c02ea
397 tag: tip
397 tag: tip
398 parent: 3:eebf5a27f8ca
398 parent: 3:eebf5a27f8ca
399 user: test
399 user: test
400 date: Thu Jan 01 00:00:00 1970 +0000
400 date: Thu Jan 01 00:00:00 1970 +0000
401 summary: 0.3m
401 summary: 0.3m
402
402
403 changeset: 7:a6a34bfa0076
403 changeset: 7:a6a34bfa0076
404 user: test
404 user: test
405 date: Thu Jan 01 00:00:00 1970 +0000
405 date: Thu Jan 01 00:00:00 1970 +0000
406 summary: 1.3m
406 summary: 1.3m
407
407
408 $ rm -r full-clone
408 $ rm -r full-clone
409
409
410 When cloning from a non-copiable repository into '', do not
410 When cloning from a non-copiable repository into '', do not
411 recurse infinitely (issue 2528)
411 recurse infinitely (issue 2528)
412
412
413 $ hg clone full.hg ''
413 $ hg clone full.hg ''
414 abort: No such file or directory
414 abort: No such file or directory
415 [255]
415 [255]
416
416
417 test for http://mercurial.selenic.com/bts/issue216
417 test for http://mercurial.selenic.com/bts/issue216
418
418
419 Unbundle incremental bundles into fresh empty in one go
419 Unbundle incremental bundles into fresh empty in one go
420
420
421 $ rm -r empty
421 $ rm -r empty
422 $ hg init empty
422 $ hg init empty
423 $ hg -R test bundle --base null -r 0 ../0.hg
423 $ hg -R test bundle --base null -r 0 ../0.hg
424 1 changesets found
424 1 changesets found
425 $ hg -R test bundle --base 0 -r 1 ../1.hg
425 $ hg -R test bundle --base 0 -r 1 ../1.hg
426 1 changesets found
426 1 changesets found
427 $ hg -R empty unbundle -u ../0.hg ../1.hg
427 $ hg -R empty unbundle -u ../0.hg ../1.hg
428 adding changesets
428 adding changesets
429 adding manifests
429 adding manifests
430 adding file changes
430 adding file changes
431 added 1 changesets with 1 changes to 1 files
431 added 1 changesets with 1 changes to 1 files
432 adding changesets
432 adding changesets
433 adding manifests
433 adding manifests
434 adding file changes
434 adding file changes
435 added 1 changesets with 1 changes to 1 files
435 added 1 changesets with 1 changes to 1 files
436 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
436 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
437
437
438 test for 540d1059c802
438 test for 540d1059c802
439
439
440 test for 540d1059c802
440 test for 540d1059c802
441
441
442 $ hg init orig
442 $ hg init orig
443 $ cd orig
443 $ cd orig
444 $ echo foo > foo
444 $ echo foo > foo
445 $ hg add foo
445 $ hg add foo
446 $ hg ci -m 'add foo'
446 $ hg ci -m 'add foo'
447
447
448 $ hg clone . ../copy
448 $ hg clone . ../copy
449 updating to branch default
449 updating to branch default
450 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
450 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
451 $ hg tag foo
451 $ hg tag foo
452
452
453 $ cd ../copy
453 $ cd ../copy
454 $ echo >> foo
454 $ echo >> foo
455 $ hg ci -m 'change foo'
455 $ hg ci -m 'change foo'
456 $ hg bundle ../bundle.hg ../orig
456 $ hg bundle ../bundle.hg ../orig
457 searching for changes
457 searching for changes
458 1 changesets found
458 1 changesets found
459
459
460 $ cd ../orig
460 $ cd ../orig
461 $ hg incoming ../bundle.hg
461 $ hg incoming ../bundle.hg
462 comparing with ../bundle.hg
462 comparing with ../bundle.hg
463 searching for changes
463 searching for changes
464 changeset: 2:ed1b79f46b9a
464 changeset: 2:ed1b79f46b9a
465 tag: tip
465 tag: tip
466 parent: 0:bbd179dfa0a7
466 parent: 0:bbd179dfa0a7
467 user: test
467 user: test
468 date: Thu Jan 01 00:00:00 1970 +0000
468 date: Thu Jan 01 00:00:00 1970 +0000
469 summary: change foo
469 summary: change foo
470
470
471 $ cd ..
471 $ cd ..
472
472
473 test bundle with # in the filename (issue2154):
474
475 $ cp bundle.hg 'test#bundle.hg'
476 $ cd orig
477 $ hg incoming '../test#bundle.hg'
478 comparing with ../test
479 abort: unknown revision 'bundle.hg'!
480 [255]
481
482 note that percent encoding is not handled:
483
484 $ hg incoming ../test%23bundle.hg
485 abort: repository ../test%23bundle.hg not found!
486 [255]
487 $ cd ..
488
473 test for http://mercurial.selenic.com/bts/issue1144
489 test for http://mercurial.selenic.com/bts/issue1144
474
490
475 test that verify bundle does not traceback
491 test that verify bundle does not traceback
476
492
477 partial history bundle, fails w/ unkown parent
493 partial history bundle, fails w/ unkown parent
478
494
479 $ hg -R bundle.hg verify
495 $ hg -R bundle.hg verify
480 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
496 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
481 [255]
497 [255]
482
498
483 full history bundle, refuses to verify non-local repo
499 full history bundle, refuses to verify non-local repo
484
500
485 $ hg -R all.hg verify
501 $ hg -R all.hg verify
486 abort: cannot verify bundle or remote repos
502 abort: cannot verify bundle or remote repos
487 [255]
503 [255]
488
504
489 but, regular verify must continue to work
505 but, regular verify must continue to work
490
506
491 $ hg -R orig verify
507 $ hg -R orig verify
492 checking changesets
508 checking changesets
493 checking manifests
509 checking manifests
494 crosschecking files in changesets and manifests
510 crosschecking files in changesets and manifests
495 checking files
511 checking files
496 2 files, 2 changesets, 2 total revisions
512 2 files, 2 changesets, 2 total revisions
497
513
498 diff against bundle
514 diff against bundle
499
515
500 $ hg init b
516 $ hg init b
501 $ cd b
517 $ cd b
502 $ hg -R ../all.hg diff -r tip
518 $ hg -R ../all.hg diff -r tip
503 diff -r aa35859c02ea anotherfile
519 diff -r aa35859c02ea anotherfile
504 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
520 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
505 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
521 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
506 @@ -1,4 +0,0 @@
522 @@ -1,4 +0,0 @@
507 -0
523 -0
508 -1
524 -1
509 -2
525 -2
510 -3
526 -3
511 $ cd ..
527 $ cd ..
512
528
513 bundle single branch
529 bundle single branch
514
530
515 $ hg init branchy
531 $ hg init branchy
516 $ cd branchy
532 $ cd branchy
517 $ echo a >a
533 $ echo a >a
518 $ hg ci -Ama
534 $ hg ci -Ama
519 adding a
535 adding a
520 $ echo b >b
536 $ echo b >b
521 $ hg ci -Amb
537 $ hg ci -Amb
522 adding b
538 adding b
523 $ echo b1 >b1
539 $ echo b1 >b1
524 $ hg ci -Amb1
540 $ hg ci -Amb1
525 adding b1
541 adding b1
526 $ hg up 0
542 $ hg up 0
527 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
543 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
528 $ echo c >c
544 $ echo c >c
529 $ hg ci -Amc
545 $ hg ci -Amc
530 adding c
546 adding c
531 created new head
547 created new head
532 $ echo c1 >c1
548 $ echo c1 >c1
533 $ hg ci -Amc1
549 $ hg ci -Amc1
534 adding c1
550 adding c1
535 $ hg clone -q .#tip part
551 $ hg clone -q .#tip part
536
552
537 == bundling via incoming
553 == bundling via incoming
538
554
539 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
555 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
540 comparing with .
556 comparing with .
541 searching for changes
557 searching for changes
542 d2ae7f538514cd87c17547b0de4cea71fe1af9fb
558 d2ae7f538514cd87c17547b0de4cea71fe1af9fb
543 5ece8e77363e2b5269e27c66828b72da29e4341a
559 5ece8e77363e2b5269e27c66828b72da29e4341a
544
560
545 == bundling
561 == bundling
546
562
547 $ hg bundle bundle.hg part --debug
563 $ hg bundle bundle.hg part --debug
548 searching for changes
564 searching for changes
549 common changesets up to c0025332f9ed
565 common changesets up to c0025332f9ed
550 2 changesets found
566 2 changesets found
551 list of changesets:
567 list of changesets:
552 d2ae7f538514cd87c17547b0de4cea71fe1af9fb
568 d2ae7f538514cd87c17547b0de4cea71fe1af9fb
553 5ece8e77363e2b5269e27c66828b72da29e4341a
569 5ece8e77363e2b5269e27c66828b72da29e4341a
554 bundling: 1 changesets
570 bundling: 1 changesets
555 bundling: 2 changesets
571 bundling: 2 changesets
556 bundling: 1/2 manifests (50.00%)
572 bundling: 1/2 manifests (50.00%)
557 bundling: 2/2 manifests (100.00%)
573 bundling: 2/2 manifests (100.00%)
558 bundling: b 0/2 files (0.00%)
574 bundling: b 0/2 files (0.00%)
559 bundling: b1 1/2 files (50.00%)
575 bundling: b1 1/2 files (50.00%)
560
576
@@ -1,85 +1,89 b''
1 $ mkdir test
1 $ mkdir test
2 $ cd test
2 $ cd test
3
3
4 $ echo foo>foo
4 $ echo foo>foo
5 $ hg init
5 $ hg init
6 $ hg addremove
6 $ hg addremove
7 adding foo
7 adding foo
8 $ hg commit -m 1
8 $ hg commit -m 1
9
9
10 $ hg verify
10 $ hg verify
11 checking changesets
11 checking changesets
12 checking manifests
12 checking manifests
13 crosschecking files in changesets and manifests
13 crosschecking files in changesets and manifests
14 checking files
14 checking files
15 1 files, 1 changesets, 1 total revisions
15 1 files, 1 changesets, 1 total revisions
16
16
17 $ hg serve -p $HGPORT -d --pid-file=hg.pid
17 $ hg serve -p $HGPORT -d --pid-file=hg.pid
18 $ cat hg.pid >> $DAEMON_PIDS
18 $ cat hg.pid >> $DAEMON_PIDS
19 $ cd ..
19 $ cd ..
20
20
21 $ hg clone --pull http://foo:bar@localhost:$HGPORT/ copy
21 $ hg clone --pull http://foo:bar@localhost:$HGPORT/ copy
22 requesting all changes
22 requesting all changes
23 adding changesets
23 adding changesets
24 adding manifests
24 adding manifests
25 adding file changes
25 adding file changes
26 added 1 changesets with 1 changes to 1 files
26 added 1 changesets with 1 changes to 1 files
27 updating to branch default
27 updating to branch default
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29
29
30 $ cd copy
30 $ cd copy
31 $ hg verify
31 $ hg verify
32 checking changesets
32 checking changesets
33 checking manifests
33 checking manifests
34 crosschecking files in changesets and manifests
34 crosschecking files in changesets and manifests
35 checking files
35 checking files
36 1 files, 1 changesets, 1 total revisions
36 1 files, 1 changesets, 1 total revisions
37
37
38 $ hg co
38 $ hg co
39 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 $ cat foo
40 $ cat foo
41 foo
41 foo
42
42
43 $ hg manifest --debug
43 $ hg manifest --debug
44 2ed2a3912a0b24502043eae84ee4b279c18b90dd 644 foo
44 2ed2a3912a0b24502043eae84ee4b279c18b90dd 644 foo
45
45
46 $ hg pull
46 $ hg pull
47 pulling from http://foo:***@localhost:$HGPORT/
47 pulling from http://foo:***@localhost:$HGPORT/
48 searching for changes
48 searching for changes
49 no changes found
49 no changes found
50
50
51 $ hg rollback --dry-run --verbose
51 $ hg rollback --dry-run --verbose
52 repository tip rolled back to revision -1 (undo pull: http://foo:***@localhost:$HGPORT/)
52 repository tip rolled back to revision -1 (undo pull: http://foo:***@localhost:$HGPORT/)
53
53
54 Issue622: hg init && hg pull -u URL doesn't checkout default branch
54 Issue622: hg init && hg pull -u URL doesn't checkout default branch
55
55
56 $ cd ..
56 $ cd ..
57 $ hg init empty
57 $ hg init empty
58 $ cd empty
58 $ cd empty
59 $ hg pull -u ../test
59 $ hg pull -u ../test
60 pulling from ../test
60 pulling from ../test
61 requesting all changes
61 requesting all changes
62 adding changesets
62 adding changesets
63 adding manifests
63 adding manifests
64 adding file changes
64 adding file changes
65 added 1 changesets with 1 changes to 1 files
65 added 1 changesets with 1 changes to 1 files
66 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
66 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
67
67
68 Test 'file:' uri handling:
68 Test 'file:' uri handling:
69
69
70 $ hg pull -q file://../test-doesnt-exist
70 $ hg pull -q file://../test-doesnt-exist
71 abort: file:// URLs can only refer to localhost
71 abort: file:// URLs can only refer to localhost
72 [255]
72 [255]
73
73
74 $ hg pull -q file://../test
75 abort: file:// URLs can only refer to localhost
76 [255]
77
74 $ hg pull -q file:../test
78 $ hg pull -q file:../test
75
79
76 It's tricky to make file:// URLs working on every platform with
80 It's tricky to make file:// URLs working on every platform with
77 regular shell commands.
81 regular shell commands.
78
82
79 $ URL=`python -c "import os; print 'file://foobar' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test'"`
83 $ URL=`python -c "import os; print 'file://foobar' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test'"`
80 $ hg pull -q "$URL"
84 $ hg pull -q "$URL"
81 abort: file:// URLs can only refer to localhost
85 abort: file:// URLs can only refer to localhost
82 [255]
86 [255]
83
87
84 $ URL=`python -c "import os; print 'file://localhost' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test'"`
88 $ URL=`python -c "import os; print 'file://localhost' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test'"`
85 $ hg pull -q "$URL"
89 $ hg pull -q "$URL"
General Comments 0
You need to be logged in to leave comments. Login now