##// END OF EJS Templates
url: refactor util.drop_scheme() and hg.localpath() into url.localpath()...
Brodie Rao -
r13826:e574207e default
parent child Browse files
Show More
@@ -1,323 +1,323 b''
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from node import nullid
15 15 from i18n import _
16 16 import os, struct, tempfile, shutil
17 17 import changegroup, util, mdiff, discovery
18 import localrepo, changelog, manifest, filelog, revlog, error
18 import localrepo, changelog, manifest, filelog, revlog, error, url
19 19
20 20 class bundlerevlog(revlog.revlog):
21 21 def __init__(self, opener, indexfile, bundle,
22 22 linkmapper=None):
23 23 # How it works:
24 24 # to retrieve a revision, we need to know the offset of
25 25 # the revision in the bundle (an unbundle object).
26 26 #
27 27 # We store this offset in the index (start), to differentiate a
28 28 # rev in the bundle and from a rev in the revlog, we check
29 29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
30 30 # (it is bigger since we store the node to which the delta is)
31 31 #
32 32 revlog.revlog.__init__(self, opener, indexfile)
33 33 self.bundle = bundle
34 34 self.basemap = {}
35 35 def chunkpositer():
36 36 while 1:
37 37 chunk = bundle.chunk()
38 38 if not chunk:
39 39 break
40 40 pos = bundle.tell()
41 41 yield chunk, pos - len(chunk)
42 42 n = len(self)
43 43 prev = None
44 44 for chunk, start in chunkpositer():
45 45 size = len(chunk)
46 46 if size < 80:
47 47 raise util.Abort(_("invalid changegroup"))
48 48 start += 80
49 49 size -= 80
50 50 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
51 51 if node in self.nodemap:
52 52 prev = node
53 53 continue
54 54 for p in (p1, p2):
55 55 if not p in self.nodemap:
56 56 raise error.LookupError(p, self.indexfile,
57 57 _("unknown parent"))
58 58 if linkmapper is None:
59 59 link = n
60 60 else:
61 61 link = linkmapper(cs)
62 62
63 63 if not prev:
64 64 prev = p1
65 65 # start, size, full unc. size, base (unused), link, p1, p2, node
66 66 e = (revlog.offset_type(start, 0), size, -1, -1, link,
67 67 self.rev(p1), self.rev(p2), node)
68 68 self.basemap[n] = prev
69 69 self.index.insert(-1, e)
70 70 self.nodemap[node] = n
71 71 prev = node
72 72 n += 1
73 73
74 74 def inbundle(self, rev):
75 75 """is rev from the bundle"""
76 76 if rev < 0:
77 77 return False
78 78 return rev in self.basemap
79 79 def bundlebase(self, rev):
80 80 return self.basemap[rev]
81 81 def _chunk(self, rev):
82 82 # Warning: in case of bundle, the diff is against bundlebase,
83 83 # not against rev - 1
84 84 # XXX: could use some caching
85 85 if not self.inbundle(rev):
86 86 return revlog.revlog._chunk(self, rev)
87 87 self.bundle.seek(self.start(rev))
88 88 return self.bundle.read(self.length(rev))
89 89
90 90 def revdiff(self, rev1, rev2):
91 91 """return or calculate a delta between two revisions"""
92 92 if self.inbundle(rev1) and self.inbundle(rev2):
93 93 # hot path for bundle
94 94 revb = self.rev(self.bundlebase(rev2))
95 95 if revb == rev1:
96 96 return self._chunk(rev2)
97 97 elif not self.inbundle(rev1) and not self.inbundle(rev2):
98 98 return revlog.revlog.revdiff(self, rev1, rev2)
99 99
100 100 return mdiff.textdiff(self.revision(self.node(rev1)),
101 101 self.revision(self.node(rev2)))
102 102
103 103 def revision(self, node):
104 104 """return an uncompressed revision of a given"""
105 105 if node == nullid:
106 106 return ""
107 107
108 108 text = None
109 109 chain = []
110 110 iter_node = node
111 111 rev = self.rev(iter_node)
112 112 # reconstruct the revision if it is from a changegroup
113 113 while self.inbundle(rev):
114 114 if self._cache and self._cache[0] == iter_node:
115 115 text = self._cache[2]
116 116 break
117 117 chain.append(rev)
118 118 iter_node = self.bundlebase(rev)
119 119 rev = self.rev(iter_node)
120 120 if text is None:
121 121 text = revlog.revlog.revision(self, iter_node)
122 122
123 123 while chain:
124 124 delta = self._chunk(chain.pop())
125 125 text = mdiff.patches(text, [delta])
126 126
127 127 p1, p2 = self.parents(node)
128 128 if node != revlog.hash(text, p1, p2):
129 129 raise error.RevlogError(_("integrity check failed on %s:%d")
130 130 % (self.datafile, self.rev(node)))
131 131
132 132 self._cache = (node, self.rev(node), text)
133 133 return text
134 134
135 135 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
136 136 raise NotImplementedError
137 137 def addgroup(self, revs, linkmapper, transaction):
138 138 raise NotImplementedError
139 139 def strip(self, rev, minlink):
140 140 raise NotImplementedError
141 141 def checksize(self):
142 142 raise NotImplementedError
143 143
144 144 class bundlechangelog(bundlerevlog, changelog.changelog):
145 145 def __init__(self, opener, bundle):
146 146 changelog.changelog.__init__(self, opener)
147 147 bundlerevlog.__init__(self, opener, self.indexfile, bundle)
148 148
149 149 class bundlemanifest(bundlerevlog, manifest.manifest):
150 150 def __init__(self, opener, bundle, linkmapper):
151 151 manifest.manifest.__init__(self, opener)
152 152 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
153 153 linkmapper)
154 154
155 155 class bundlefilelog(bundlerevlog, filelog.filelog):
156 156 def __init__(self, opener, path, bundle, linkmapper):
157 157 filelog.filelog.__init__(self, opener, path)
158 158 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
159 159 linkmapper)
160 160
161 161 class bundlerepository(localrepo.localrepository):
162 162 def __init__(self, ui, path, bundlename):
163 163 self._tempparent = None
164 164 try:
165 165 localrepo.localrepository.__init__(self, ui, path)
166 166 except error.RepoError:
167 167 self._tempparent = tempfile.mkdtemp()
168 168 localrepo.instance(ui, self._tempparent, 1)
169 169 localrepo.localrepository.__init__(self, ui, self._tempparent)
170 170
171 171 if path:
172 172 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
173 173 else:
174 174 self._url = 'bundle:' + bundlename
175 175
176 176 self.tempfile = None
177 177 f = util.posixfile(bundlename, "rb")
178 178 self.bundle = changegroup.readbundle(f, bundlename)
179 179 if self.bundle.compressed():
180 180 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
181 181 suffix=".hg10un", dir=self.path)
182 182 self.tempfile = temp
183 183 fptemp = os.fdopen(fdtemp, 'wb')
184 184
185 185 try:
186 186 fptemp.write("HG10UN")
187 187 while 1:
188 188 chunk = self.bundle.read(2**18)
189 189 if not chunk:
190 190 break
191 191 fptemp.write(chunk)
192 192 finally:
193 193 fptemp.close()
194 194
195 195 f = util.posixfile(self.tempfile, "rb")
196 196 self.bundle = changegroup.readbundle(f, bundlename)
197 197
198 198 # dict with the mapping 'filename' -> position in the bundle
199 199 self.bundlefilespos = {}
200 200
201 201 @util.propertycache
202 202 def changelog(self):
203 203 c = bundlechangelog(self.sopener, self.bundle)
204 204 self.manstart = self.bundle.tell()
205 205 return c
206 206
207 207 @util.propertycache
208 208 def manifest(self):
209 209 self.bundle.seek(self.manstart)
210 210 m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev)
211 211 self.filestart = self.bundle.tell()
212 212 return m
213 213
214 214 @util.propertycache
215 215 def manstart(self):
216 216 self.changelog
217 217 return self.manstart
218 218
219 219 @util.propertycache
220 220 def filestart(self):
221 221 self.manifest
222 222 return self.filestart
223 223
224 224 def url(self):
225 225 return self._url
226 226
227 227 def file(self, f):
228 228 if not self.bundlefilespos:
229 229 self.bundle.seek(self.filestart)
230 230 while 1:
231 231 chunk = self.bundle.chunk()
232 232 if not chunk:
233 233 break
234 234 self.bundlefilespos[chunk] = self.bundle.tell()
235 235 while 1:
236 236 c = self.bundle.chunk()
237 237 if not c:
238 238 break
239 239
240 240 if f[0] == '/':
241 241 f = f[1:]
242 242 if f in self.bundlefilespos:
243 243 self.bundle.seek(self.bundlefilespos[f])
244 244 return bundlefilelog(self.sopener, f, self.bundle,
245 245 self.changelog.rev)
246 246 else:
247 247 return filelog.filelog(self.sopener, f)
248 248
249 249 def close(self):
250 250 """Close assigned bundle file immediately."""
251 251 self.bundle.close()
252 252 if self.tempfile is not None:
253 253 os.unlink(self.tempfile)
254 254 if self._tempparent:
255 255 shutil.rmtree(self._tempparent, True)
256 256
257 257 def cancopy(self):
258 258 return False
259 259
260 260 def getcwd(self):
261 261 return os.getcwd() # always outside the repo
262 262
263 263 def instance(ui, path, create):
264 264 if create:
265 265 raise util.Abort(_('cannot create new bundle repository'))
266 266 parentpath = ui.config("bundle", "mainreporoot", "")
267 267 if parentpath:
268 268 # Try to make the full path relative so we get a nice, short URL.
269 269 # In particular, we don't want temp dir names in test outputs.
270 270 cwd = os.getcwd()
271 271 if parentpath == cwd:
272 272 parentpath = ''
273 273 else:
274 274 cwd = os.path.join(cwd,'')
275 275 if parentpath.startswith(cwd):
276 276 parentpath = parentpath[len(cwd):]
277 path = util.drop_scheme('file', path)
278 if path.startswith('bundle:'):
279 path = util.drop_scheme('bundle', path)
277 u = url.url(path)
278 path = u.localpath()
279 if u.scheme == 'bundle':
280 280 s = path.split("+", 1)
281 281 if len(s) == 1:
282 282 repopath, bundlename = parentpath, s[0]
283 283 else:
284 284 repopath, bundlename = s
285 285 else:
286 286 repopath, bundlename = parentpath, path
287 287 return bundlerepository(ui, repopath, bundlename)
288 288
289 289 def getremotechanges(ui, repo, other, revs=None, bundlename=None,
290 290 force=False, usecommon=False):
291 291 tmp = discovery.findcommonincoming(repo, other, heads=revs, force=force,
292 292 commononly=usecommon)
293 293 common, incoming, rheads = tmp
294 294 if not incoming:
295 295 try:
296 296 os.unlink(bundlename)
297 297 except:
298 298 pass
299 299 return other, None, None, None
300 300
301 301 bundle = None
302 302 if bundlename or not other.local():
303 303 # create a bundle (uncompressed if other repo is not local)
304 304
305 305 if revs is None and other.capable('changegroupsubset'):
306 306 revs = rheads
307 307
308 308 if usecommon:
309 309 cg = other.getbundle('incoming', common=common, heads=revs)
310 310 elif revs is None:
311 311 cg = other.changegroup(incoming, "incoming")
312 312 else:
313 313 cg = other.changegroupsubset(incoming, revs, 'incoming')
314 314 bundletype = other.local() and "HG10BZ" or "HG10UN"
315 315 fname = bundle = changegroup.writebundle(cg, bundlename, bundletype)
316 316 # keep written bundle?
317 317 if bundlename:
318 318 bundle = None
319 319 if not other.local():
320 320 # use the created uncompressed bundlerepo
321 321 other = bundlerepository(ui, repo.root, fname)
322 322 return (other, common, incoming, bundle)
323 323
@@ -1,573 +1,564 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from i18n import _
10 10 from lock import release
11 11 from node import hex, nullid, nullrev, short
12 12 import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo, bookmarks
13 13 import lock, util, extensions, error, encoding, node
14 14 import cmdutil, discovery, url
15 15 import merge as mergemod
16 16 import verify as verifymod
17 17 import errno, os, shutil
18 18
19 19 def _local(path):
20 path = util.expandpath(util.drop_scheme('file', path))
20 path = util.expandpath(url.localpath(path))
21 21 return (os.path.isfile(path) and bundlerepo or localrepo)
22 22
23 23 def addbranchrevs(lrepo, repo, branches, revs):
24 24 hashbranch, branches = branches
25 25 if not hashbranch and not branches:
26 26 return revs or None, revs and revs[0] or None
27 27 revs = revs and list(revs) or []
28 28 if not repo.capable('branchmap'):
29 29 if branches:
30 30 raise util.Abort(_("remote branch lookup not supported"))
31 31 revs.append(hashbranch)
32 32 return revs, revs[0]
33 33 branchmap = repo.branchmap()
34 34
35 35 def primary(branch):
36 36 if branch == '.':
37 37 if not lrepo or not lrepo.local():
38 38 raise util.Abort(_("dirstate branch not accessible"))
39 39 branch = lrepo.dirstate.branch()
40 40 if branch in branchmap:
41 41 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
42 42 return True
43 43 else:
44 44 return False
45 45
46 46 for branch in branches:
47 47 if not primary(branch):
48 48 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
49 49 if hashbranch:
50 50 if not primary(hashbranch):
51 51 revs.append(hashbranch)
52 52 return revs, revs[0]
53 53
54 54 def parseurl(path, branches=None):
55 55 '''parse url#branch, returning (url, (branch, branches))'''
56 56
57 57 u = url.url(path)
58 58 if not u.fragment:
59 59 return path, (None, branches or [])
60 60 branch = u.fragment
61 61 u.fragment = None
62 62 return str(u), (branch, branches or [])
63 63
64 64 schemes = {
65 65 'bundle': bundlerepo,
66 66 'file': _local,
67 67 'http': httprepo,
68 68 'https': httprepo,
69 69 'ssh': sshrepo,
70 70 'static-http': statichttprepo,
71 71 }
72 72
73 73 def _lookup(path):
74 74 u = url.url(path)
75 75 scheme = u.scheme or 'file'
76 76 thing = schemes.get(scheme) or schemes['file']
77 77 try:
78 78 return thing(path)
79 79 except TypeError:
80 80 return thing
81 81
82 82 def islocal(repo):
83 83 '''return true if repo or path is local'''
84 84 if isinstance(repo, str):
85 85 try:
86 86 return _lookup(repo).islocal(repo)
87 87 except AttributeError:
88 88 return False
89 89 return repo.local()
90 90
91 91 def repository(ui, path='', create=False):
92 92 """return a repository object for the specified path"""
93 93 repo = _lookup(path).instance(ui, path, create)
94 94 ui = getattr(repo, "ui", ui)
95 95 for name, module in extensions.extensions():
96 96 hook = getattr(module, 'reposetup', None)
97 97 if hook:
98 98 hook(ui, repo)
99 99 return repo
100 100
101 101 def defaultdest(source):
102 102 '''return default destination of clone if none is given'''
103 103 return os.path.basename(os.path.normpath(source))
104 104
105 def localpath(path):
106 if path.startswith('file://localhost/'):
107 return path[16:]
108 if path.startswith('file://'):
109 return path[7:]
110 if path.startswith('file:'):
111 return path[5:]
112 return path
113
114 105 def share(ui, source, dest=None, update=True):
115 106 '''create a shared repository'''
116 107
117 108 if not islocal(source):
118 109 raise util.Abort(_('can only share local repositories'))
119 110
120 111 if not dest:
121 112 dest = defaultdest(source)
122 113 else:
123 114 dest = ui.expandpath(dest)
124 115
125 116 if isinstance(source, str):
126 117 origsource = ui.expandpath(source)
127 118 source, branches = parseurl(origsource)
128 119 srcrepo = repository(ui, source)
129 120 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
130 121 else:
131 122 srcrepo = source
132 123 origsource = source = srcrepo.url()
133 124 checkout = None
134 125
135 126 sharedpath = srcrepo.sharedpath # if our source is already sharing
136 127
137 128 root = os.path.realpath(dest)
138 129 roothg = os.path.join(root, '.hg')
139 130
140 131 if os.path.exists(roothg):
141 132 raise util.Abort(_('destination already exists'))
142 133
143 134 if not os.path.isdir(root):
144 135 os.mkdir(root)
145 136 util.makedir(roothg, notindexed=True)
146 137
147 138 requirements = ''
148 139 try:
149 140 requirements = srcrepo.opener('requires').read()
150 141 except IOError, inst:
151 142 if inst.errno != errno.ENOENT:
152 143 raise
153 144
154 145 requirements += 'shared\n'
155 146 file(os.path.join(roothg, 'requires'), 'w').write(requirements)
156 147 file(os.path.join(roothg, 'sharedpath'), 'w').write(sharedpath)
157 148
158 149 default = srcrepo.ui.config('paths', 'default')
159 150 if default:
160 151 f = file(os.path.join(roothg, 'hgrc'), 'w')
161 152 f.write('[paths]\ndefault = %s\n' % default)
162 153 f.close()
163 154
164 155 r = repository(ui, root)
165 156
166 157 if update:
167 158 r.ui.status(_("updating working directory\n"))
168 159 if update is not True:
169 160 checkout = update
170 161 for test in (checkout, 'default', 'tip'):
171 162 if test is None:
172 163 continue
173 164 try:
174 165 uprev = r.lookup(test)
175 166 break
176 167 except error.RepoLookupError:
177 168 continue
178 169 _update(r, uprev)
179 170
180 171 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
181 172 stream=False, branch=None):
182 173 """Make a copy of an existing repository.
183 174
184 175 Create a copy of an existing repository in a new directory. The
185 176 source and destination are URLs, as passed to the repository
186 177 function. Returns a pair of repository objects, the source and
187 178 newly created destination.
188 179
189 180 The location of the source is added to the new repository's
190 181 .hg/hgrc file, as the default to be used for future pulls and
191 182 pushes.
192 183
193 184 If an exception is raised, the partly cloned/updated destination
194 185 repository will be deleted.
195 186
196 187 Arguments:
197 188
198 189 source: repository object or URL
199 190
200 191 dest: URL of destination repository to create (defaults to base
201 192 name of source repository)
202 193
203 194 pull: always pull from source repository, even in local case
204 195
205 196 stream: stream raw data uncompressed from repository (fast over
206 197 LAN, slow over WAN)
207 198
208 199 rev: revision to clone up to (implies pull=True)
209 200
210 201 update: update working directory after clone completes, if
211 202 destination is local repository (True means update to default rev,
212 203 anything else is treated as a revision)
213 204
214 205 branch: branches to clone
215 206 """
216 207
217 208 if isinstance(source, str):
218 209 origsource = ui.expandpath(source)
219 210 source, branch = parseurl(origsource, branch)
220 211 src_repo = repository(ui, source)
221 212 else:
222 213 src_repo = source
223 214 branch = (None, branch or [])
224 215 origsource = source = src_repo.url()
225 216 rev, checkout = addbranchrevs(src_repo, src_repo, branch, rev)
226 217
227 218 if dest is None:
228 219 dest = defaultdest(source)
229 220 ui.status(_("destination directory: %s\n") % dest)
230 221 else:
231 222 dest = ui.expandpath(dest)
232 223
233 dest = localpath(dest)
234 source = localpath(source)
224 dest = url.localpath(dest)
225 source = url.localpath(source)
235 226
236 227 if os.path.exists(dest):
237 228 if not os.path.isdir(dest):
238 229 raise util.Abort(_("destination '%s' already exists") % dest)
239 230 elif os.listdir(dest):
240 231 raise util.Abort(_("destination '%s' is not empty") % dest)
241 232
242 233 class DirCleanup(object):
243 234 def __init__(self, dir_):
244 235 self.rmtree = shutil.rmtree
245 236 self.dir_ = dir_
246 237 def close(self):
247 238 self.dir_ = None
248 239 def cleanup(self):
249 240 if self.dir_:
250 241 self.rmtree(self.dir_, True)
251 242
252 243 src_lock = dest_lock = dir_cleanup = None
253 244 try:
254 245 if islocal(dest):
255 246 dir_cleanup = DirCleanup(dest)
256 247
257 248 abspath = origsource
258 249 copy = False
259 250 if src_repo.cancopy() and islocal(dest):
260 abspath = os.path.abspath(util.drop_scheme('file', origsource))
251 abspath = os.path.abspath(url.localpath(origsource))
261 252 copy = not pull and not rev
262 253
263 254 if copy:
264 255 try:
265 256 # we use a lock here because if we race with commit, we
266 257 # can end up with extra data in the cloned revlogs that's
267 258 # not pointed to by changesets, thus causing verify to
268 259 # fail
269 260 src_lock = src_repo.lock(wait=False)
270 261 except error.LockError:
271 262 copy = False
272 263
273 264 if copy:
274 265 src_repo.hook('preoutgoing', throw=True, source='clone')
275 266 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
276 267 if not os.path.exists(dest):
277 268 os.mkdir(dest)
278 269 else:
279 270 # only clean up directories we create ourselves
280 271 dir_cleanup.dir_ = hgdir
281 272 try:
282 273 dest_path = hgdir
283 274 util.makedir(dest_path, notindexed=True)
284 275 except OSError, inst:
285 276 if inst.errno == errno.EEXIST:
286 277 dir_cleanup.close()
287 278 raise util.Abort(_("destination '%s' already exists")
288 279 % dest)
289 280 raise
290 281
291 282 hardlink = None
292 283 num = 0
293 284 for f in src_repo.store.copylist():
294 285 src = os.path.join(src_repo.sharedpath, f)
295 286 dst = os.path.join(dest_path, f)
296 287 dstbase = os.path.dirname(dst)
297 288 if dstbase and not os.path.exists(dstbase):
298 289 os.mkdir(dstbase)
299 290 if os.path.exists(src):
300 291 if dst.endswith('data'):
301 292 # lock to avoid premature writing to the target
302 293 dest_lock = lock.lock(os.path.join(dstbase, "lock"))
303 294 hardlink, n = util.copyfiles(src, dst, hardlink)
304 295 num += n
305 296 if hardlink:
306 297 ui.debug("linked %d files\n" % num)
307 298 else:
308 299 ui.debug("copied %d files\n" % num)
309 300
310 301 # we need to re-init the repo after manually copying the data
311 302 # into it
312 303 dest_repo = repository(ui, dest)
313 304 src_repo.hook('outgoing', source='clone',
314 305 node=node.hex(node.nullid))
315 306 else:
316 307 try:
317 308 dest_repo = repository(ui, dest, create=True)
318 309 except OSError, inst:
319 310 if inst.errno == errno.EEXIST:
320 311 dir_cleanup.close()
321 312 raise util.Abort(_("destination '%s' already exists")
322 313 % dest)
323 314 raise
324 315
325 316 revs = None
326 317 if rev:
327 318 if 'lookup' not in src_repo.capabilities:
328 319 raise util.Abort(_("src repository does not support "
329 320 "revision lookup and so doesn't "
330 321 "support clone by revision"))
331 322 revs = [src_repo.lookup(r) for r in rev]
332 323 checkout = revs[0]
333 324 if dest_repo.local():
334 325 dest_repo.clone(src_repo, heads=revs, stream=stream)
335 326 elif src_repo.local():
336 327 src_repo.push(dest_repo, revs=revs)
337 328 else:
338 329 raise util.Abort(_("clone from remote to remote not supported"))
339 330
340 331 if dir_cleanup:
341 332 dir_cleanup.close()
342 333
343 334 if dest_repo.local():
344 335 fp = dest_repo.opener("hgrc", "w", text=True)
345 336 fp.write("[paths]\n")
346 337 fp.write("default = %s\n" % abspath)
347 338 fp.close()
348 339
349 340 dest_repo.ui.setconfig('paths', 'default', abspath)
350 341
351 342 if update:
352 343 if update is not True:
353 344 checkout = update
354 345 if src_repo.local():
355 346 checkout = src_repo.lookup(update)
356 347 for test in (checkout, 'default', 'tip'):
357 348 if test is None:
358 349 continue
359 350 try:
360 351 uprev = dest_repo.lookup(test)
361 352 break
362 353 except error.RepoLookupError:
363 354 continue
364 355 bn = dest_repo[uprev].branch()
365 356 dest_repo.ui.status(_("updating to branch %s\n") % bn)
366 357 _update(dest_repo, uprev)
367 358
368 359 # clone all bookmarks
369 360 if dest_repo.local() and src_repo.capable("pushkey"):
370 361 rb = src_repo.listkeys('bookmarks')
371 362 for k, n in rb.iteritems():
372 363 try:
373 364 m = dest_repo.lookup(n)
374 365 dest_repo._bookmarks[k] = m
375 366 except:
376 367 pass
377 368 if rb:
378 369 bookmarks.write(dest_repo)
379 370 elif src_repo.local() and dest_repo.capable("pushkey"):
380 371 for k, n in src_repo._bookmarks.iteritems():
381 372 dest_repo.pushkey('bookmarks', k, '', hex(n))
382 373
383 374 return src_repo, dest_repo
384 375 finally:
385 376 release(src_lock, dest_lock)
386 377 if dir_cleanup is not None:
387 378 dir_cleanup.cleanup()
388 379
389 380 def _showstats(repo, stats):
390 381 repo.ui.status(_("%d files updated, %d files merged, "
391 382 "%d files removed, %d files unresolved\n") % stats)
392 383
393 384 def update(repo, node):
394 385 """update the working directory to node, merging linear changes"""
395 386 stats = mergemod.update(repo, node, False, False, None)
396 387 _showstats(repo, stats)
397 388 if stats[3]:
398 389 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
399 390 return stats[3] > 0
400 391
401 392 # naming conflict in clone()
402 393 _update = update
403 394
404 395 def clean(repo, node, show_stats=True):
405 396 """forcibly switch the working directory to node, clobbering changes"""
406 397 stats = mergemod.update(repo, node, False, True, None)
407 398 if show_stats:
408 399 _showstats(repo, stats)
409 400 return stats[3] > 0
410 401
411 402 def merge(repo, node, force=None, remind=True):
412 403 """Branch merge with node, resolving changes. Return true if any
413 404 unresolved conflicts."""
414 405 stats = mergemod.update(repo, node, True, force, False)
415 406 _showstats(repo, stats)
416 407 if stats[3]:
417 408 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
418 409 "or 'hg update -C .' to abandon\n"))
419 410 elif remind:
420 411 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
421 412 return stats[3] > 0
422 413
423 414 def _incoming(displaychlist, subreporecurse, ui, repo, source,
424 415 opts, buffered=False):
425 416 """
426 417 Helper for incoming / gincoming.
427 418 displaychlist gets called with
428 419 (remoterepo, incomingchangesetlist, displayer) parameters,
429 420 and is supposed to contain only code that can't be unified.
430 421 """
431 422 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
432 423 other = repository(remoteui(repo, opts), source)
433 424 ui.status(_('comparing with %s\n') % url.hidepassword(source))
434 425 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
435 426
436 427 if revs:
437 428 revs = [other.lookup(rev) for rev in revs]
438 429 usecommon = other.capable('getbundle')
439 430 other, common, incoming, bundle = bundlerepo.getremotechanges(ui, repo, other,
440 431 revs, opts["bundle"], opts["force"],
441 432 usecommon=usecommon)
442 433 if not incoming:
443 434 ui.status(_("no changes found\n"))
444 435 return subreporecurse()
445 436
446 437 try:
447 438 if usecommon:
448 439 chlist = other.changelog.findmissing(common, revs)
449 440 else:
450 441 chlist = other.changelog.nodesbetween(incoming, revs)[0]
451 442 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
452 443
453 444 # XXX once graphlog extension makes it into core,
454 445 # should be replaced by a if graph/else
455 446 displaychlist(other, chlist, displayer)
456 447
457 448 displayer.close()
458 449 finally:
459 450 if hasattr(other, 'close'):
460 451 other.close()
461 452 if bundle:
462 453 os.unlink(bundle)
463 454 subreporecurse()
464 455 return 0 # exit code is zero since we found incoming changes
465 456
466 457 def incoming(ui, repo, source, opts):
467 458 def subreporecurse():
468 459 ret = 1
469 460 if opts.get('subrepos'):
470 461 ctx = repo[None]
471 462 for subpath in sorted(ctx.substate):
472 463 sub = ctx.sub(subpath)
473 464 ret = min(ret, sub.incoming(ui, source, opts))
474 465 return ret
475 466
476 467 def display(other, chlist, displayer):
477 468 limit = cmdutil.loglimit(opts)
478 469 if opts.get('newest_first'):
479 470 chlist.reverse()
480 471 count = 0
481 472 for n in chlist:
482 473 if limit is not None and count >= limit:
483 474 break
484 475 parents = [p for p in other.changelog.parents(n) if p != nullid]
485 476 if opts.get('no_merges') and len(parents) == 2:
486 477 continue
487 478 count += 1
488 479 displayer.show(other[n])
489 480 return _incoming(display, subreporecurse, ui, repo, source, opts)
490 481
491 482 def _outgoing(ui, repo, dest, opts):
492 483 dest = ui.expandpath(dest or 'default-push', dest or 'default')
493 484 dest, branches = parseurl(dest, opts.get('branch'))
494 485 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
495 486 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
496 487 if revs:
497 488 revs = [repo.lookup(rev) for rev in revs]
498 489
499 490 other = repository(remoteui(repo, opts), dest)
500 491 o = discovery.findoutgoing(repo, other, force=opts.get('force'))
501 492 if not o:
502 493 ui.status(_("no changes found\n"))
503 494 return None
504 495
505 496 return repo.changelog.nodesbetween(o, revs)[0]
506 497
507 498 def outgoing(ui, repo, dest, opts):
508 499 def recurse():
509 500 ret = 1
510 501 if opts.get('subrepos'):
511 502 ctx = repo[None]
512 503 for subpath in sorted(ctx.substate):
513 504 sub = ctx.sub(subpath)
514 505 ret = min(ret, sub.outgoing(ui, dest, opts))
515 506 return ret
516 507
517 508 limit = cmdutil.loglimit(opts)
518 509 o = _outgoing(ui, repo, dest, opts)
519 510 if o is None:
520 511 return recurse()
521 512
522 513 if opts.get('newest_first'):
523 514 o.reverse()
524 515 displayer = cmdutil.show_changeset(ui, repo, opts)
525 516 count = 0
526 517 for n in o:
527 518 if limit is not None and count >= limit:
528 519 break
529 520 parents = [p for p in repo.changelog.parents(n) if p != nullid]
530 521 if opts.get('no_merges') and len(parents) == 2:
531 522 continue
532 523 count += 1
533 524 displayer.show(repo[n])
534 525 displayer.close()
535 526 recurse()
536 527 return 0 # exit code is zero since we found outgoing changes
537 528
538 529 def revert(repo, node, choose):
539 530 """revert changes to revision in node without updating dirstate"""
540 531 return mergemod.update(repo, node, False, True, choose)[3] > 0
541 532
542 533 def verify(repo):
543 534 """verify the consistency of a repository"""
544 535 return verifymod.verify(repo)
545 536
546 537 def remoteui(src, opts):
547 538 'build a remote ui from ui or repo and opts'
548 539 if hasattr(src, 'baseui'): # looks like a repository
549 540 dst = src.baseui.copy() # drop repo-specific config
550 541 src = src.ui # copy target options from repo
551 542 else: # assume it's a global ui object
552 543 dst = src.copy() # keep all global options
553 544
554 545 # copy ssh-specific options
555 546 for o in 'ssh', 'remotecmd':
556 547 v = opts.get(o) or src.config('ui', o)
557 548 if v:
558 549 dst.setconfig("ui", o, v)
559 550
560 551 # copy bundle-specific options
561 552 r = src.config('bundle', 'mainreporoot')
562 553 if r:
563 554 dst.setconfig('bundle', 'mainreporoot', r)
564 555
565 556 # copy selected local settings to the remote ui
566 557 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
567 558 for key, val in src.configitems(sect):
568 559 dst.setconfig(sect, key, val)
569 560 v = src.config('web', 'cacerts')
570 561 if v:
571 562 dst.setconfig('web', 'cacerts', util.expandpath(v))
572 563
573 564 return dst
@@ -1,1934 +1,1934 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'parentdelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=0):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = util.path_auditor(self.root, self._checknested)
35 35 self.opener = util.opener(self.path)
36 36 self.wopener = util.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener("00changelog.i", "a").write(
61 61 '\0\0\0\2' # represents revlogv2
62 62 ' dummy changelog to prevent using the old repo layout'
63 63 )
64 64 if self.ui.configbool('format', 'parentdelta', False):
65 65 requirements.append("parentdelta")
66 66 else:
67 67 raise error.RepoError(_("repository %s not found") % path)
68 68 elif create:
69 69 raise error.RepoError(_("repository %s already exists") % path)
70 70 else:
71 71 # find requirements
72 72 requirements = set()
73 73 try:
74 74 requirements = set(self.opener("requires").read().splitlines())
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 for r in requirements - self.supported:
79 79 raise error.RequirementError(
80 80 _("requirement '%s' not supported") % r)
81 81
82 82 self.sharedpath = self.path
83 83 try:
84 84 s = os.path.realpath(self.opener("sharedpath").read())
85 85 if not os.path.exists(s):
86 86 raise error.RepoError(
87 87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 88 self.sharedpath = s
89 89 except IOError, inst:
90 90 if inst.errno != errno.ENOENT:
91 91 raise
92 92
93 93 self.store = store.store(requirements, self.sharedpath, util.opener)
94 94 self.spath = self.store.path
95 95 self.sopener = self.store.opener
96 96 self.sjoin = self.store.join
97 97 self.opener.createmode = self.store.createmode
98 98 self._applyrequirements(requirements)
99 99 if create:
100 100 self._writerequirements()
101 101
102 102 # These two define the set of tags for this repository. _tags
103 103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 104 # 'local'. (Global tags are defined by .hgtags across all
105 105 # heads, and local tags are defined in .hg/localtags.) They
106 106 # constitute the in-memory cache of tags.
107 107 self._tags = None
108 108 self._tagtypes = None
109 109
110 110 self._branchcache = None
111 111 self._branchcachetip = None
112 112 self.nodetagscache = None
113 113 self.filterpats = {}
114 114 self._datafilters = {}
115 115 self._transref = self._lockref = self._wlockref = None
116 116
117 117 def _applyrequirements(self, requirements):
118 118 self.requirements = requirements
119 119 self.sopener.options = {}
120 120 if 'parentdelta' in requirements:
121 121 self.sopener.options['parentdelta'] = 1
122 122
123 123 def _writerequirements(self):
124 124 reqfile = self.opener("requires", "w")
125 125 for r in self.requirements:
126 126 reqfile.write("%s\n" % r)
127 127 reqfile.close()
128 128
129 129 def _checknested(self, path):
130 130 """Determine if path is a legal nested repository."""
131 131 if not path.startswith(self.root):
132 132 return False
133 133 subpath = path[len(self.root) + 1:]
134 134
135 135 # XXX: Checking against the current working copy is wrong in
136 136 # the sense that it can reject things like
137 137 #
138 138 # $ hg cat -r 10 sub/x.txt
139 139 #
140 140 # if sub/ is no longer a subrepository in the working copy
141 141 # parent revision.
142 142 #
143 143 # However, it can of course also allow things that would have
144 144 # been rejected before, such as the above cat command if sub/
145 145 # is a subrepository now, but was a normal directory before.
146 146 # The old path auditor would have rejected by mistake since it
147 147 # panics when it sees sub/.hg/.
148 148 #
149 149 # All in all, checking against the working copy seems sensible
150 150 # since we want to prevent access to nested repositories on
151 151 # the filesystem *now*.
152 152 ctx = self[None]
153 153 parts = util.splitpath(subpath)
154 154 while parts:
155 155 prefix = os.sep.join(parts)
156 156 if prefix in ctx.substate:
157 157 if prefix == subpath:
158 158 return True
159 159 else:
160 160 sub = ctx.sub(prefix)
161 161 return sub.checknested(subpath[len(prefix) + 1:])
162 162 else:
163 163 parts.pop()
164 164 return False
165 165
166 166 @util.propertycache
167 167 def _bookmarks(self):
168 168 return bookmarks.read(self)
169 169
170 170 @util.propertycache
171 171 def _bookmarkcurrent(self):
172 172 return bookmarks.readcurrent(self)
173 173
174 174 @propertycache
175 175 def changelog(self):
176 176 c = changelog.changelog(self.sopener)
177 177 if 'HG_PENDING' in os.environ:
178 178 p = os.environ['HG_PENDING']
179 179 if p.startswith(self.root):
180 180 c.readpending('00changelog.i.a')
181 181 self.sopener.options['defversion'] = c.version
182 182 return c
183 183
184 184 @propertycache
185 185 def manifest(self):
186 186 return manifest.manifest(self.sopener)
187 187
188 188 @propertycache
189 189 def dirstate(self):
190 190 warned = [0]
191 191 def validate(node):
192 192 try:
193 193 r = self.changelog.rev(node)
194 194 return node
195 195 except error.LookupError:
196 196 if not warned[0]:
197 197 warned[0] = True
198 198 self.ui.warn(_("warning: ignoring unknown"
199 199 " working parent %s!\n") % short(node))
200 200 return nullid
201 201
202 202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203 203
204 204 def __getitem__(self, changeid):
205 205 if changeid is None:
206 206 return context.workingctx(self)
207 207 return context.changectx(self, changeid)
208 208
209 209 def __contains__(self, changeid):
210 210 try:
211 211 return bool(self.lookup(changeid))
212 212 except error.RepoLookupError:
213 213 return False
214 214
215 215 def __nonzero__(self):
216 216 return True
217 217
218 218 def __len__(self):
219 219 return len(self.changelog)
220 220
221 221 def __iter__(self):
222 222 for i in xrange(len(self)):
223 223 yield i
224 224
225 225 def url(self):
226 226 return 'file:' + self.root
227 227
228 228 def hook(self, name, throw=False, **args):
229 229 return hook.hook(self.ui, self, name, throw, **args)
230 230
231 231 tag_disallowed = ':\r\n'
232 232
233 233 def _tag(self, names, node, message, local, user, date, extra={}):
234 234 if isinstance(names, str):
235 235 allchars = names
236 236 names = (names,)
237 237 else:
238 238 allchars = ''.join(names)
239 239 for c in self.tag_disallowed:
240 240 if c in allchars:
241 241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242 242
243 243 branches = self.branchmap()
244 244 for name in names:
245 245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 246 local=local)
247 247 if name in branches:
248 248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 249 " branch name\n") % name)
250 250
251 251 def writetags(fp, names, munge, prevtags):
252 252 fp.seek(0, 2)
253 253 if prevtags and prevtags[-1] != '\n':
254 254 fp.write('\n')
255 255 for name in names:
256 256 m = munge and munge(name) or name
257 257 if self._tagtypes and name in self._tagtypes:
258 258 old = self._tags.get(name, nullid)
259 259 fp.write('%s %s\n' % (hex(old), m))
260 260 fp.write('%s %s\n' % (hex(node), m))
261 261 fp.close()
262 262
263 263 prevtags = ''
264 264 if local:
265 265 try:
266 266 fp = self.opener('localtags', 'r+')
267 267 except IOError:
268 268 fp = self.opener('localtags', 'a')
269 269 else:
270 270 prevtags = fp.read()
271 271
272 272 # local tags are stored in the current charset
273 273 writetags(fp, names, None, prevtags)
274 274 for name in names:
275 275 self.hook('tag', node=hex(node), tag=name, local=local)
276 276 return
277 277
278 278 try:
279 279 fp = self.wfile('.hgtags', 'rb+')
280 280 except IOError:
281 281 fp = self.wfile('.hgtags', 'ab')
282 282 else:
283 283 prevtags = fp.read()
284 284
285 285 # committed tags are stored in UTF-8
286 286 writetags(fp, names, encoding.fromlocal, prevtags)
287 287
288 288 fp.close()
289 289
290 290 if '.hgtags' not in self.dirstate:
291 291 self[None].add(['.hgtags'])
292 292
293 293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295 295
296 296 for name in names:
297 297 self.hook('tag', node=hex(node), tag=name, local=local)
298 298
299 299 return tagnode
300 300
301 301 def tag(self, names, node, message, local, user, date):
302 302 '''tag a revision with one or more symbolic names.
303 303
304 304 names is a list of strings or, when adding a single tag, names may be a
305 305 string.
306 306
307 307 if local is True, the tags are stored in a per-repository file.
308 308 otherwise, they are stored in the .hgtags file, and a new
309 309 changeset is committed with the change.
310 310
311 311 keyword arguments:
312 312
313 313 local: whether to store tags in non-version-controlled file
314 314 (default False)
315 315
316 316 message: commit message to use if committing
317 317
318 318 user: name of user to use if committing
319 319
320 320 date: date tuple to use if committing'''
321 321
322 322 if not local:
323 323 for x in self.status()[:5]:
324 324 if '.hgtags' in x:
325 325 raise util.Abort(_('working copy of .hgtags is changed '
326 326 '(please commit .hgtags manually)'))
327 327
328 328 self.tags() # instantiate the cache
329 329 self._tag(names, node, message, local, user, date)
330 330
331 331 def tags(self):
332 332 '''return a mapping of tag to node'''
333 333 if self._tags is None:
334 334 (self._tags, self._tagtypes) = self._findtags()
335 335
336 336 return self._tags
337 337
338 338 def _findtags(self):
339 339 '''Do the hard work of finding tags. Return a pair of dicts
340 340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 341 maps tag name to a string like \'global\' or \'local\'.
342 342 Subclasses or extensions are free to add their own tags, but
343 343 should be aware that the returned dicts will be retained for the
344 344 duration of the localrepo object.'''
345 345
346 346 # XXX what tagtype should subclasses/extensions use? Currently
347 347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 348 # Should each extension invent its own tag type? Should there
349 349 # be one tagtype for all such "virtual" tags? Or is the status
350 350 # quo fine?
351 351
352 352 alltags = {} # map tag name to (node, hist)
353 353 tagtypes = {}
354 354
355 355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357 357
358 358 # Build the return dicts. Have to re-encode tag names because
359 359 # the tags module always uses UTF-8 (in order not to lose info
360 360 # writing to the cache), but the rest of Mercurial wants them in
361 361 # local encoding.
362 362 tags = {}
363 363 for (name, (node, hist)) in alltags.iteritems():
364 364 if node != nullid:
365 365 tags[encoding.tolocal(name)] = node
366 366 tags['tip'] = self.changelog.tip()
367 367 tagtypes = dict([(encoding.tolocal(name), value)
368 368 for (name, value) in tagtypes.iteritems()])
369 369 return (tags, tagtypes)
370 370
371 371 def tagtype(self, tagname):
372 372 '''
373 373 return the type of the given tag. result can be:
374 374
375 375 'local' : a local tag
376 376 'global' : a global tag
377 377 None : tag does not exist
378 378 '''
379 379
380 380 self.tags()
381 381
382 382 return self._tagtypes.get(tagname)
383 383
384 384 def tagslist(self):
385 385 '''return a list of tags ordered by revision'''
386 386 l = []
387 387 for t, n in self.tags().iteritems():
388 388 try:
389 389 r = self.changelog.rev(n)
390 390 except:
391 391 r = -2 # sort to the beginning of the list if unknown
392 392 l.append((r, t, n))
393 393 return [(t, n) for r, t, n in sorted(l)]
394 394
395 395 def nodetags(self, node):
396 396 '''return the tags associated with a node'''
397 397 if not self.nodetagscache:
398 398 self.nodetagscache = {}
399 399 for t, n in self.tags().iteritems():
400 400 self.nodetagscache.setdefault(n, []).append(t)
401 401 for tags in self.nodetagscache.itervalues():
402 402 tags.sort()
403 403 return self.nodetagscache.get(node, [])
404 404
405 405 def nodebookmarks(self, node):
406 406 marks = []
407 407 for bookmark, n in self._bookmarks.iteritems():
408 408 if n == node:
409 409 marks.append(bookmark)
410 410 return sorted(marks)
411 411
412 412 def _branchtags(self, partial, lrev):
413 413 # TODO: rename this function?
414 414 tiprev = len(self) - 1
415 415 if lrev != tiprev:
416 416 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
417 417 self._updatebranchcache(partial, ctxgen)
418 418 self._writebranchcache(partial, self.changelog.tip(), tiprev)
419 419
420 420 return partial
421 421
422 422 def updatebranchcache(self):
423 423 tip = self.changelog.tip()
424 424 if self._branchcache is not None and self._branchcachetip == tip:
425 425 return self._branchcache
426 426
427 427 oldtip = self._branchcachetip
428 428 self._branchcachetip = tip
429 429 if oldtip is None or oldtip not in self.changelog.nodemap:
430 430 partial, last, lrev = self._readbranchcache()
431 431 else:
432 432 lrev = self.changelog.rev(oldtip)
433 433 partial = self._branchcache
434 434
435 435 self._branchtags(partial, lrev)
436 436 # this private cache holds all heads (not just tips)
437 437 self._branchcache = partial
438 438
439 439 def branchmap(self):
440 440 '''returns a dictionary {branch: [branchheads]}'''
441 441 self.updatebranchcache()
442 442 return self._branchcache
443 443
444 444 def branchtags(self):
445 445 '''return a dict where branch names map to the tipmost head of
446 446 the branch, open heads come before closed'''
447 447 bt = {}
448 448 for bn, heads in self.branchmap().iteritems():
449 449 tip = heads[-1]
450 450 for h in reversed(heads):
451 451 if 'close' not in self.changelog.read(h)[5]:
452 452 tip = h
453 453 break
454 454 bt[bn] = tip
455 455 return bt
456 456
457 457 def _readbranchcache(self):
458 458 partial = {}
459 459 try:
460 460 f = self.opener("cache/branchheads")
461 461 lines = f.read().split('\n')
462 462 f.close()
463 463 except (IOError, OSError):
464 464 return {}, nullid, nullrev
465 465
466 466 try:
467 467 last, lrev = lines.pop(0).split(" ", 1)
468 468 last, lrev = bin(last), int(lrev)
469 469 if lrev >= len(self) or self[lrev].node() != last:
470 470 # invalidate the cache
471 471 raise ValueError('invalidating branch cache (tip differs)')
472 472 for l in lines:
473 473 if not l:
474 474 continue
475 475 node, label = l.split(" ", 1)
476 476 label = encoding.tolocal(label.strip())
477 477 partial.setdefault(label, []).append(bin(node))
478 478 except KeyboardInterrupt:
479 479 raise
480 480 except Exception, inst:
481 481 if self.ui.debugflag:
482 482 self.ui.warn(str(inst), '\n')
483 483 partial, last, lrev = {}, nullid, nullrev
484 484 return partial, last, lrev
485 485
486 486 def _writebranchcache(self, branches, tip, tiprev):
487 487 try:
488 488 f = self.opener("cache/branchheads", "w", atomictemp=True)
489 489 f.write("%s %s\n" % (hex(tip), tiprev))
490 490 for label, nodes in branches.iteritems():
491 491 for node in nodes:
492 492 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
493 493 f.rename()
494 494 except (IOError, OSError):
495 495 pass
496 496
497 497 def _updatebranchcache(self, partial, ctxgen):
498 498 # collect new branch entries
499 499 newbranches = {}
500 500 for c in ctxgen:
501 501 newbranches.setdefault(c.branch(), []).append(c.node())
502 502 # if older branchheads are reachable from new ones, they aren't
503 503 # really branchheads. Note checking parents is insufficient:
504 504 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
505 505 for branch, newnodes in newbranches.iteritems():
506 506 bheads = partial.setdefault(branch, [])
507 507 bheads.extend(newnodes)
508 508 if len(bheads) <= 1:
509 509 continue
510 510 # starting from tip means fewer passes over reachable
511 511 while newnodes:
512 512 latest = newnodes.pop()
513 513 if latest not in bheads:
514 514 continue
515 515 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
516 516 reachable = self.changelog.reachable(latest, minbhrev)
517 517 reachable.remove(latest)
518 518 bheads = [b for b in bheads if b not in reachable]
519 519 partial[branch] = bheads
520 520
521 521 def lookup(self, key):
522 522 if isinstance(key, int):
523 523 return self.changelog.node(key)
524 524 elif key == '.':
525 525 return self.dirstate.parents()[0]
526 526 elif key == 'null':
527 527 return nullid
528 528 elif key == 'tip':
529 529 return self.changelog.tip()
530 530 n = self.changelog._match(key)
531 531 if n:
532 532 return n
533 533 if key in self._bookmarks:
534 534 return self._bookmarks[key]
535 535 if key in self.tags():
536 536 return self.tags()[key]
537 537 if key in self.branchtags():
538 538 return self.branchtags()[key]
539 539 n = self.changelog._partialmatch(key)
540 540 if n:
541 541 return n
542 542
543 543 # can't find key, check if it might have come from damaged dirstate
544 544 if key in self.dirstate.parents():
545 545 raise error.Abort(_("working directory has unknown parent '%s'!")
546 546 % short(key))
547 547 try:
548 548 if len(key) == 20:
549 549 key = hex(key)
550 550 except:
551 551 pass
552 552 raise error.RepoLookupError(_("unknown revision '%s'") % key)
553 553
554 554 def lookupbranch(self, key, remote=None):
555 555 repo = remote or self
556 556 if key in repo.branchmap():
557 557 return key
558 558
559 559 repo = (remote and remote.local()) and remote or self
560 560 return repo[key].branch()
561 561
562 562 def known(self, nodes):
563 563 nm = self.changelog.nodemap
564 564 return [(n in nm) for n in nodes]
565 565
566 566 def local(self):
567 567 return True
568 568
569 569 def join(self, f):
570 570 return os.path.join(self.path, f)
571 571
572 572 def wjoin(self, f):
573 573 return os.path.join(self.root, f)
574 574
575 575 def file(self, f):
576 576 if f[0] == '/':
577 577 f = f[1:]
578 578 return filelog.filelog(self.sopener, f)
579 579
580 580 def changectx(self, changeid):
581 581 return self[changeid]
582 582
583 583 def parents(self, changeid=None):
584 584 '''get list of changectxs for parents of changeid'''
585 585 return self[changeid].parents()
586 586
587 587 def filectx(self, path, changeid=None, fileid=None):
588 588 """changeid can be a changeset revision, node, or tag.
589 589 fileid can be a file revision or node."""
590 590 return context.filectx(self, path, changeid, fileid)
591 591
592 592 def getcwd(self):
593 593 return self.dirstate.getcwd()
594 594
595 595 def pathto(self, f, cwd=None):
596 596 return self.dirstate.pathto(f, cwd)
597 597
598 598 def wfile(self, f, mode='r'):
599 599 return self.wopener(f, mode)
600 600
601 601 def _link(self, f):
602 602 return os.path.islink(self.wjoin(f))
603 603
604 604 def _loadfilter(self, filter):
605 605 if filter not in self.filterpats:
606 606 l = []
607 607 for pat, cmd in self.ui.configitems(filter):
608 608 if cmd == '!':
609 609 continue
610 610 mf = matchmod.match(self.root, '', [pat])
611 611 fn = None
612 612 params = cmd
613 613 for name, filterfn in self._datafilters.iteritems():
614 614 if cmd.startswith(name):
615 615 fn = filterfn
616 616 params = cmd[len(name):].lstrip()
617 617 break
618 618 if not fn:
619 619 fn = lambda s, c, **kwargs: util.filter(s, c)
620 620 # Wrap old filters not supporting keyword arguments
621 621 if not inspect.getargspec(fn)[2]:
622 622 oldfn = fn
623 623 fn = lambda s, c, **kwargs: oldfn(s, c)
624 624 l.append((mf, fn, params))
625 625 self.filterpats[filter] = l
626 626 return self.filterpats[filter]
627 627
628 628 def _filter(self, filterpats, filename, data):
629 629 for mf, fn, cmd in filterpats:
630 630 if mf(filename):
631 631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
632 632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
633 633 break
634 634
635 635 return data
636 636
637 637 @propertycache
638 638 def _encodefilterpats(self):
639 639 return self._loadfilter('encode')
640 640
641 641 @propertycache
642 642 def _decodefilterpats(self):
643 643 return self._loadfilter('decode')
644 644
645 645 def adddatafilter(self, name, filter):
646 646 self._datafilters[name] = filter
647 647
648 648 def wread(self, filename):
649 649 if self._link(filename):
650 650 data = os.readlink(self.wjoin(filename))
651 651 else:
652 652 data = self.wopener(filename, 'r').read()
653 653 return self._filter(self._encodefilterpats, filename, data)
654 654
655 655 def wwrite(self, filename, data, flags):
656 656 data = self._filter(self._decodefilterpats, filename, data)
657 657 if 'l' in flags:
658 658 self.wopener.symlink(data, filename)
659 659 else:
660 660 self.wopener(filename, 'w').write(data)
661 661 if 'x' in flags:
662 662 util.set_flags(self.wjoin(filename), False, True)
663 663
664 664 def wwritedata(self, filename, data):
665 665 return self._filter(self._decodefilterpats, filename, data)
666 666
667 667 def transaction(self, desc):
668 668 tr = self._transref and self._transref() or None
669 669 if tr and tr.running():
670 670 return tr.nest()
671 671
672 672 # abort here if the journal already exists
673 673 if os.path.exists(self.sjoin("journal")):
674 674 raise error.RepoError(
675 675 _("abandoned transaction found - run hg recover"))
676 676
677 677 # save dirstate for rollback
678 678 try:
679 679 ds = self.opener("dirstate").read()
680 680 except IOError:
681 681 ds = ""
682 682 self.opener("journal.dirstate", "w").write(ds)
683 683 self.opener("journal.branch", "w").write(
684 684 encoding.fromlocal(self.dirstate.branch()))
685 685 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
686 686
687 687 renames = [(self.sjoin("journal"), self.sjoin("undo")),
688 688 (self.join("journal.dirstate"), self.join("undo.dirstate")),
689 689 (self.join("journal.branch"), self.join("undo.branch")),
690 690 (self.join("journal.desc"), self.join("undo.desc"))]
691 691 tr = transaction.transaction(self.ui.warn, self.sopener,
692 692 self.sjoin("journal"),
693 693 aftertrans(renames),
694 694 self.store.createmode)
695 695 self._transref = weakref.ref(tr)
696 696 return tr
697 697
698 698 def recover(self):
699 699 lock = self.lock()
700 700 try:
701 701 if os.path.exists(self.sjoin("journal")):
702 702 self.ui.status(_("rolling back interrupted transaction\n"))
703 703 transaction.rollback(self.sopener, self.sjoin("journal"),
704 704 self.ui.warn)
705 705 self.invalidate()
706 706 return True
707 707 else:
708 708 self.ui.warn(_("no interrupted transaction available\n"))
709 709 return False
710 710 finally:
711 711 lock.release()
712 712
713 713 def rollback(self, dryrun=False):
714 714 wlock = lock = None
715 715 try:
716 716 wlock = self.wlock()
717 717 lock = self.lock()
718 718 if os.path.exists(self.sjoin("undo")):
719 719 try:
720 720 args = self.opener("undo.desc", "r").read().splitlines()
721 721 if len(args) >= 3 and self.ui.verbose:
722 722 desc = _("repository tip rolled back to revision %s"
723 723 " (undo %s: %s)\n") % (
724 724 int(args[0]) - 1, args[1], args[2])
725 725 elif len(args) >= 2:
726 726 desc = _("repository tip rolled back to revision %s"
727 727 " (undo %s)\n") % (
728 728 int(args[0]) - 1, args[1])
729 729 except IOError:
730 730 desc = _("rolling back unknown transaction\n")
731 731 self.ui.status(desc)
732 732 if dryrun:
733 733 return
734 734 transaction.rollback(self.sopener, self.sjoin("undo"),
735 735 self.ui.warn)
736 736 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
737 737 if os.path.exists(self.join('undo.bookmarks')):
738 738 util.rename(self.join('undo.bookmarks'),
739 739 self.join('bookmarks'))
740 740 try:
741 741 branch = self.opener("undo.branch").read()
742 742 self.dirstate.setbranch(branch)
743 743 except IOError:
744 744 self.ui.warn(_("Named branch could not be reset, "
745 745 "current branch still is: %s\n")
746 746 % self.dirstate.branch())
747 747 self.invalidate()
748 748 self.dirstate.invalidate()
749 749 self.destroyed()
750 750 parents = tuple([p.rev() for p in self.parents()])
751 751 if len(parents) > 1:
752 752 self.ui.status(_("working directory now based on "
753 753 "revisions %d and %d\n") % parents)
754 754 else:
755 755 self.ui.status(_("working directory now based on "
756 756 "revision %d\n") % parents)
757 757 else:
758 758 self.ui.warn(_("no rollback information available\n"))
759 759 return 1
760 760 finally:
761 761 release(lock, wlock)
762 762
763 763 def invalidatecaches(self):
764 764 self._tags = None
765 765 self._tagtypes = None
766 766 self.nodetagscache = None
767 767 self._branchcache = None # in UTF-8
768 768 self._branchcachetip = None
769 769
770 770 def invalidate(self):
771 771 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
772 772 if a in self.__dict__:
773 773 delattr(self, a)
774 774 self.invalidatecaches()
775 775
776 776 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
777 777 try:
778 778 l = lock.lock(lockname, 0, releasefn, desc=desc)
779 779 except error.LockHeld, inst:
780 780 if not wait:
781 781 raise
782 782 self.ui.warn(_("waiting for lock on %s held by %r\n") %
783 783 (desc, inst.locker))
784 784 # default to 600 seconds timeout
785 785 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
786 786 releasefn, desc=desc)
787 787 if acquirefn:
788 788 acquirefn()
789 789 return l
790 790
791 791 def lock(self, wait=True):
792 792 '''Lock the repository store (.hg/store) and return a weak reference
793 793 to the lock. Use this before modifying the store (e.g. committing or
794 794 stripping). If you are opening a transaction, get a lock as well.)'''
795 795 l = self._lockref and self._lockref()
796 796 if l is not None and l.held:
797 797 l.lock()
798 798 return l
799 799
800 800 l = self._lock(self.sjoin("lock"), wait, self.store.write,
801 801 self.invalidate, _('repository %s') % self.origroot)
802 802 self._lockref = weakref.ref(l)
803 803 return l
804 804
805 805 def wlock(self, wait=True):
806 806 '''Lock the non-store parts of the repository (everything under
807 807 .hg except .hg/store) and return a weak reference to the lock.
808 808 Use this before modifying files in .hg.'''
809 809 l = self._wlockref and self._wlockref()
810 810 if l is not None and l.held:
811 811 l.lock()
812 812 return l
813 813
814 814 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
815 815 self.dirstate.invalidate, _('working directory of %s') %
816 816 self.origroot)
817 817 self._wlockref = weakref.ref(l)
818 818 return l
819 819
820 820 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
821 821 """
822 822 commit an individual file as part of a larger transaction
823 823 """
824 824
825 825 fname = fctx.path()
826 826 text = fctx.data()
827 827 flog = self.file(fname)
828 828 fparent1 = manifest1.get(fname, nullid)
829 829 fparent2 = fparent2o = manifest2.get(fname, nullid)
830 830
831 831 meta = {}
832 832 copy = fctx.renamed()
833 833 if copy and copy[0] != fname:
834 834 # Mark the new revision of this file as a copy of another
835 835 # file. This copy data will effectively act as a parent
836 836 # of this new revision. If this is a merge, the first
837 837 # parent will be the nullid (meaning "look up the copy data")
838 838 # and the second one will be the other parent. For example:
839 839 #
840 840 # 0 --- 1 --- 3 rev1 changes file foo
841 841 # \ / rev2 renames foo to bar and changes it
842 842 # \- 2 -/ rev3 should have bar with all changes and
843 843 # should record that bar descends from
844 844 # bar in rev2 and foo in rev1
845 845 #
846 846 # this allows this merge to succeed:
847 847 #
848 848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
849 849 # \ / merging rev3 and rev4 should use bar@rev2
850 850 # \- 2 --- 4 as the merge base
851 851 #
852 852
853 853 cfname = copy[0]
854 854 crev = manifest1.get(cfname)
855 855 newfparent = fparent2
856 856
857 857 if manifest2: # branch merge
858 858 if fparent2 == nullid or crev is None: # copied on remote side
859 859 if cfname in manifest2:
860 860 crev = manifest2[cfname]
861 861 newfparent = fparent1
862 862
863 863 # find source in nearest ancestor if we've lost track
864 864 if not crev:
865 865 self.ui.debug(" %s: searching for copy revision for %s\n" %
866 866 (fname, cfname))
867 867 for ancestor in self[None].ancestors():
868 868 if cfname in ancestor:
869 869 crev = ancestor[cfname].filenode()
870 870 break
871 871
872 872 if crev:
873 873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
874 874 meta["copy"] = cfname
875 875 meta["copyrev"] = hex(crev)
876 876 fparent1, fparent2 = nullid, newfparent
877 877 else:
878 878 self.ui.warn(_("warning: can't find ancestor for '%s' "
879 879 "copied from '%s'!\n") % (fname, cfname))
880 880
881 881 elif fparent2 != nullid:
882 882 # is one parent an ancestor of the other?
883 883 fparentancestor = flog.ancestor(fparent1, fparent2)
884 884 if fparentancestor == fparent1:
885 885 fparent1, fparent2 = fparent2, nullid
886 886 elif fparentancestor == fparent2:
887 887 fparent2 = nullid
888 888
889 889 # is the file changed?
890 890 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
891 891 changelist.append(fname)
892 892 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
893 893
894 894 # are just the flags changed during merge?
895 895 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
896 896 changelist.append(fname)
897 897
898 898 return fparent1
899 899
900 900 def commit(self, text="", user=None, date=None, match=None, force=False,
901 901 editor=False, extra={}):
902 902 """Add a new revision to current repository.
903 903
904 904 Revision information is gathered from the working directory,
905 905 match can be used to filter the committed files. If editor is
906 906 supplied, it is called to get a commit message.
907 907 """
908 908
909 909 def fail(f, msg):
910 910 raise util.Abort('%s: %s' % (f, msg))
911 911
912 912 if not match:
913 913 match = matchmod.always(self.root, '')
914 914
915 915 if not force:
916 916 vdirs = []
917 917 match.dir = vdirs.append
918 918 match.bad = fail
919 919
920 920 wlock = self.wlock()
921 921 try:
922 922 wctx = self[None]
923 923 merge = len(wctx.parents()) > 1
924 924
925 925 if (not force and merge and match and
926 926 (match.files() or match.anypats())):
927 927 raise util.Abort(_('cannot partially commit a merge '
928 928 '(do not specify files or patterns)'))
929 929
930 930 changes = self.status(match=match, clean=force)
931 931 if force:
932 932 changes[0].extend(changes[6]) # mq may commit unchanged files
933 933
934 934 # check subrepos
935 935 subs = []
936 936 removedsubs = set()
937 937 for p in wctx.parents():
938 938 removedsubs.update(s for s in p.substate if match(s))
939 939 for s in wctx.substate:
940 940 removedsubs.discard(s)
941 941 if match(s) and wctx.sub(s).dirty():
942 942 subs.append(s)
943 943 if (subs or removedsubs):
944 944 if (not match('.hgsub') and
945 945 '.hgsub' in (wctx.modified() + wctx.added())):
946 946 raise util.Abort(_("can't commit subrepos without .hgsub"))
947 947 if '.hgsubstate' not in changes[0]:
948 948 changes[0].insert(0, '.hgsubstate')
949 949
950 950 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
951 951 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
952 952 if changedsubs:
953 953 raise util.Abort(_("uncommitted changes in subrepo %s")
954 954 % changedsubs[0])
955 955
956 956 # make sure all explicit patterns are matched
957 957 if not force and match.files():
958 958 matched = set(changes[0] + changes[1] + changes[2])
959 959
960 960 for f in match.files():
961 961 if f == '.' or f in matched or f in wctx.substate:
962 962 continue
963 963 if f in changes[3]: # missing
964 964 fail(f, _('file not found!'))
965 965 if f in vdirs: # visited directory
966 966 d = f + '/'
967 967 for mf in matched:
968 968 if mf.startswith(d):
969 969 break
970 970 else:
971 971 fail(f, _("no match under directory!"))
972 972 elif f not in self.dirstate:
973 973 fail(f, _("file not tracked!"))
974 974
975 975 if (not force and not extra.get("close") and not merge
976 976 and not (changes[0] or changes[1] or changes[2])
977 977 and wctx.branch() == wctx.p1().branch()):
978 978 return None
979 979
980 980 ms = mergemod.mergestate(self)
981 981 for f in changes[0]:
982 982 if f in ms and ms[f] == 'u':
983 983 raise util.Abort(_("unresolved merge conflicts "
984 984 "(see hg help resolve)"))
985 985
986 986 cctx = context.workingctx(self, text, user, date, extra, changes)
987 987 if editor:
988 988 cctx._text = editor(self, cctx, subs)
989 989 edited = (text != cctx._text)
990 990
991 991 # commit subs
992 992 if subs or removedsubs:
993 993 state = wctx.substate.copy()
994 994 for s in sorted(subs):
995 995 sub = wctx.sub(s)
996 996 self.ui.status(_('committing subrepository %s\n') %
997 997 subrepo.subrelpath(sub))
998 998 sr = sub.commit(cctx._text, user, date)
999 999 state[s] = (state[s][0], sr)
1000 1000 subrepo.writestate(self, state)
1001 1001
1002 1002 # Save commit message in case this transaction gets rolled back
1003 1003 # (e.g. by a pretxncommit hook). Leave the content alone on
1004 1004 # the assumption that the user will use the same editor again.
1005 1005 msgfile = self.opener('last-message.txt', 'wb')
1006 1006 msgfile.write(cctx._text)
1007 1007 msgfile.close()
1008 1008
1009 1009 p1, p2 = self.dirstate.parents()
1010 1010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1011 1011 try:
1012 1012 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1013 1013 ret = self.commitctx(cctx, True)
1014 1014 except:
1015 1015 if edited:
1016 1016 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1017 1017 self.ui.write(
1018 1018 _('note: commit message saved in %s\n') % msgfn)
1019 1019 raise
1020 1020
1021 1021 # update bookmarks, dirstate and mergestate
1022 1022 bookmarks.update(self, p1, ret)
1023 1023 for f in changes[0] + changes[1]:
1024 1024 self.dirstate.normal(f)
1025 1025 for f in changes[2]:
1026 1026 self.dirstate.forget(f)
1027 1027 self.dirstate.setparents(ret)
1028 1028 ms.reset()
1029 1029 finally:
1030 1030 wlock.release()
1031 1031
1032 1032 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1033 1033 return ret
1034 1034
1035 1035 def commitctx(self, ctx, error=False):
1036 1036 """Add a new revision to current repository.
1037 1037 Revision information is passed via the context argument.
1038 1038 """
1039 1039
1040 1040 tr = lock = None
1041 1041 removed = list(ctx.removed())
1042 1042 p1, p2 = ctx.p1(), ctx.p2()
1043 1043 m1 = p1.manifest().copy()
1044 1044 m2 = p2.manifest()
1045 1045 user = ctx.user()
1046 1046
1047 1047 lock = self.lock()
1048 1048 try:
1049 1049 tr = self.transaction("commit")
1050 1050 trp = weakref.proxy(tr)
1051 1051
1052 1052 # check in files
1053 1053 new = {}
1054 1054 changed = []
1055 1055 linkrev = len(self)
1056 1056 for f in sorted(ctx.modified() + ctx.added()):
1057 1057 self.ui.note(f + "\n")
1058 1058 try:
1059 1059 fctx = ctx[f]
1060 1060 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1061 1061 changed)
1062 1062 m1.set(f, fctx.flags())
1063 1063 except OSError, inst:
1064 1064 self.ui.warn(_("trouble committing %s!\n") % f)
1065 1065 raise
1066 1066 except IOError, inst:
1067 1067 errcode = getattr(inst, 'errno', errno.ENOENT)
1068 1068 if error or errcode and errcode != errno.ENOENT:
1069 1069 self.ui.warn(_("trouble committing %s!\n") % f)
1070 1070 raise
1071 1071 else:
1072 1072 removed.append(f)
1073 1073
1074 1074 # update manifest
1075 1075 m1.update(new)
1076 1076 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1077 1077 drop = [f for f in removed if f in m1]
1078 1078 for f in drop:
1079 1079 del m1[f]
1080 1080 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1081 1081 p2.manifestnode(), (new, drop))
1082 1082
1083 1083 # update changelog
1084 1084 self.changelog.delayupdate()
1085 1085 n = self.changelog.add(mn, changed + removed, ctx.description(),
1086 1086 trp, p1.node(), p2.node(),
1087 1087 user, ctx.date(), ctx.extra().copy())
1088 1088 p = lambda: self.changelog.writepending() and self.root or ""
1089 1089 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1090 1090 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1091 1091 parent2=xp2, pending=p)
1092 1092 self.changelog.finalize(trp)
1093 1093 tr.close()
1094 1094
1095 1095 if self._branchcache:
1096 1096 self.updatebranchcache()
1097 1097 return n
1098 1098 finally:
1099 1099 if tr:
1100 1100 tr.release()
1101 1101 lock.release()
1102 1102
1103 1103 def destroyed(self):
1104 1104 '''Inform the repository that nodes have been destroyed.
1105 1105 Intended for use by strip and rollback, so there's a common
1106 1106 place for anything that has to be done after destroying history.'''
1107 1107 # XXX it might be nice if we could take the list of destroyed
1108 1108 # nodes, but I don't see an easy way for rollback() to do that
1109 1109
1110 1110 # Ensure the persistent tag cache is updated. Doing it now
1111 1111 # means that the tag cache only has to worry about destroyed
1112 1112 # heads immediately after a strip/rollback. That in turn
1113 1113 # guarantees that "cachetip == currenttip" (comparing both rev
1114 1114 # and node) always means no nodes have been added or destroyed.
1115 1115
1116 1116 # XXX this is suboptimal when qrefresh'ing: we strip the current
1117 1117 # head, refresh the tag cache, then immediately add a new head.
1118 1118 # But I think doing it this way is necessary for the "instant
1119 1119 # tag cache retrieval" case to work.
1120 1120 self.invalidatecaches()
1121 1121
1122 1122 def walk(self, match, node=None):
1123 1123 '''
1124 1124 walk recursively through the directory tree or a given
1125 1125 changeset, finding all files matched by the match
1126 1126 function
1127 1127 '''
1128 1128 return self[node].walk(match)
1129 1129
1130 1130 def status(self, node1='.', node2=None, match=None,
1131 1131 ignored=False, clean=False, unknown=False,
1132 1132 listsubrepos=False):
1133 1133 """return status of files between two nodes or node and working directory
1134 1134
1135 1135 If node1 is None, use the first dirstate parent instead.
1136 1136 If node2 is None, compare node1 with working directory.
1137 1137 """
1138 1138
1139 1139 def mfmatches(ctx):
1140 1140 mf = ctx.manifest().copy()
1141 1141 for fn in mf.keys():
1142 1142 if not match(fn):
1143 1143 del mf[fn]
1144 1144 return mf
1145 1145
1146 1146 if isinstance(node1, context.changectx):
1147 1147 ctx1 = node1
1148 1148 else:
1149 1149 ctx1 = self[node1]
1150 1150 if isinstance(node2, context.changectx):
1151 1151 ctx2 = node2
1152 1152 else:
1153 1153 ctx2 = self[node2]
1154 1154
1155 1155 working = ctx2.rev() is None
1156 1156 parentworking = working and ctx1 == self['.']
1157 1157 match = match or matchmod.always(self.root, self.getcwd())
1158 1158 listignored, listclean, listunknown = ignored, clean, unknown
1159 1159
1160 1160 # load earliest manifest first for caching reasons
1161 1161 if not working and ctx2.rev() < ctx1.rev():
1162 1162 ctx2.manifest()
1163 1163
1164 1164 if not parentworking:
1165 1165 def bad(f, msg):
1166 1166 if f not in ctx1:
1167 1167 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1168 1168 match.bad = bad
1169 1169
1170 1170 if working: # we need to scan the working dir
1171 1171 subrepos = []
1172 1172 if '.hgsub' in self.dirstate:
1173 1173 subrepos = ctx1.substate.keys()
1174 1174 s = self.dirstate.status(match, subrepos, listignored,
1175 1175 listclean, listunknown)
1176 1176 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1177 1177
1178 1178 # check for any possibly clean files
1179 1179 if parentworking and cmp:
1180 1180 fixup = []
1181 1181 # do a full compare of any files that might have changed
1182 1182 for f in sorted(cmp):
1183 1183 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1184 1184 or ctx1[f].cmp(ctx2[f])):
1185 1185 modified.append(f)
1186 1186 else:
1187 1187 fixup.append(f)
1188 1188
1189 1189 # update dirstate for files that are actually clean
1190 1190 if fixup:
1191 1191 if listclean:
1192 1192 clean += fixup
1193 1193
1194 1194 try:
1195 1195 # updating the dirstate is optional
1196 1196 # so we don't wait on the lock
1197 1197 wlock = self.wlock(False)
1198 1198 try:
1199 1199 for f in fixup:
1200 1200 self.dirstate.normal(f)
1201 1201 finally:
1202 1202 wlock.release()
1203 1203 except error.LockError:
1204 1204 pass
1205 1205
1206 1206 if not parentworking:
1207 1207 mf1 = mfmatches(ctx1)
1208 1208 if working:
1209 1209 # we are comparing working dir against non-parent
1210 1210 # generate a pseudo-manifest for the working dir
1211 1211 mf2 = mfmatches(self['.'])
1212 1212 for f in cmp + modified + added:
1213 1213 mf2[f] = None
1214 1214 mf2.set(f, ctx2.flags(f))
1215 1215 for f in removed:
1216 1216 if f in mf2:
1217 1217 del mf2[f]
1218 1218 else:
1219 1219 # we are comparing two revisions
1220 1220 deleted, unknown, ignored = [], [], []
1221 1221 mf2 = mfmatches(ctx2)
1222 1222
1223 1223 modified, added, clean = [], [], []
1224 1224 for fn in mf2:
1225 1225 if fn in mf1:
1226 1226 if (mf1.flags(fn) != mf2.flags(fn) or
1227 1227 (mf1[fn] != mf2[fn] and
1228 1228 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1229 1229 modified.append(fn)
1230 1230 elif listclean:
1231 1231 clean.append(fn)
1232 1232 del mf1[fn]
1233 1233 else:
1234 1234 added.append(fn)
1235 1235 removed = mf1.keys()
1236 1236
1237 1237 r = modified, added, removed, deleted, unknown, ignored, clean
1238 1238
1239 1239 if listsubrepos:
1240 1240 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1241 1241 if working:
1242 1242 rev2 = None
1243 1243 else:
1244 1244 rev2 = ctx2.substate[subpath][1]
1245 1245 try:
1246 1246 submatch = matchmod.narrowmatcher(subpath, match)
1247 1247 s = sub.status(rev2, match=submatch, ignored=listignored,
1248 1248 clean=listclean, unknown=listunknown,
1249 1249 listsubrepos=True)
1250 1250 for rfiles, sfiles in zip(r, s):
1251 1251 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1252 1252 except error.LookupError:
1253 1253 self.ui.status(_("skipping missing subrepository: %s\n")
1254 1254 % subpath)
1255 1255
1256 1256 for l in r:
1257 1257 l.sort()
1258 1258 return r
1259 1259
1260 1260 def heads(self, start=None):
1261 1261 heads = self.changelog.heads(start)
1262 1262 # sort the output in rev descending order
1263 1263 return sorted(heads, key=self.changelog.rev, reverse=True)
1264 1264
1265 1265 def branchheads(self, branch=None, start=None, closed=False):
1266 1266 '''return a (possibly filtered) list of heads for the given branch
1267 1267
1268 1268 Heads are returned in topological order, from newest to oldest.
1269 1269 If branch is None, use the dirstate branch.
1270 1270 If start is not None, return only heads reachable from start.
1271 1271 If closed is True, return heads that are marked as closed as well.
1272 1272 '''
1273 1273 if branch is None:
1274 1274 branch = self[None].branch()
1275 1275 branches = self.branchmap()
1276 1276 if branch not in branches:
1277 1277 return []
1278 1278 # the cache returns heads ordered lowest to highest
1279 1279 bheads = list(reversed(branches[branch]))
1280 1280 if start is not None:
1281 1281 # filter out the heads that cannot be reached from startrev
1282 1282 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1283 1283 bheads = [h for h in bheads if h in fbheads]
1284 1284 if not closed:
1285 1285 bheads = [h for h in bheads if
1286 1286 ('close' not in self.changelog.read(h)[5])]
1287 1287 return bheads
1288 1288
1289 1289 def branches(self, nodes):
1290 1290 if not nodes:
1291 1291 nodes = [self.changelog.tip()]
1292 1292 b = []
1293 1293 for n in nodes:
1294 1294 t = n
1295 1295 while 1:
1296 1296 p = self.changelog.parents(n)
1297 1297 if p[1] != nullid or p[0] == nullid:
1298 1298 b.append((t, n, p[0], p[1]))
1299 1299 break
1300 1300 n = p[0]
1301 1301 return b
1302 1302
1303 1303 def between(self, pairs):
1304 1304 r = []
1305 1305
1306 1306 for top, bottom in pairs:
1307 1307 n, l, i = top, [], 0
1308 1308 f = 1
1309 1309
1310 1310 while n != bottom and n != nullid:
1311 1311 p = self.changelog.parents(n)[0]
1312 1312 if i == f:
1313 1313 l.append(n)
1314 1314 f = f * 2
1315 1315 n = p
1316 1316 i += 1
1317 1317
1318 1318 r.append(l)
1319 1319
1320 1320 return r
1321 1321
1322 1322 def pull(self, remote, heads=None, force=False):
1323 1323 lock = self.lock()
1324 1324 try:
1325 1325 usecommon = remote.capable('getbundle')
1326 1326 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1327 1327 force=force, commononly=usecommon)
1328 1328 common, fetch, rheads = tmp
1329 1329 if not fetch:
1330 1330 self.ui.status(_("no changes found\n"))
1331 1331 result = 0
1332 1332 else:
1333 1333 if heads is None and list(common) == [nullid]:
1334 1334 self.ui.status(_("requesting all changes\n"))
1335 1335 elif heads is None and remote.capable('changegroupsubset'):
1336 1336 # issue1320, avoid a race if remote changed after discovery
1337 1337 heads = rheads
1338 1338
1339 1339 if usecommon:
1340 1340 cg = remote.getbundle('pull', common=common,
1341 1341 heads=heads or rheads)
1342 1342 elif heads is None:
1343 1343 cg = remote.changegroup(fetch, 'pull')
1344 1344 elif not remote.capable('changegroupsubset'):
1345 1345 raise util.Abort(_("partial pull cannot be done because "
1346 1346 "other repository doesn't support "
1347 1347 "changegroupsubset."))
1348 1348 else:
1349 1349 cg = remote.changegroupsubset(fetch, heads, 'pull')
1350 1350 result = self.addchangegroup(cg, 'pull', remote.url(),
1351 1351 lock=lock)
1352 1352 finally:
1353 1353 lock.release()
1354 1354
1355 1355 return result
1356 1356
1357 1357 def checkpush(self, force, revs):
1358 1358 """Extensions can override this function if additional checks have
1359 1359 to be performed before pushing, or call it if they override push
1360 1360 command.
1361 1361 """
1362 1362 pass
1363 1363
1364 1364 def push(self, remote, force=False, revs=None, newbranch=False):
1365 1365 '''Push outgoing changesets (limited by revs) from the current
1366 1366 repository to remote. Return an integer:
1367 1367 - 0 means HTTP error *or* nothing to push
1368 1368 - 1 means we pushed and remote head count is unchanged *or*
1369 1369 we have outgoing changesets but refused to push
1370 1370 - other values as described by addchangegroup()
1371 1371 '''
1372 1372 # there are two ways to push to remote repo:
1373 1373 #
1374 1374 # addchangegroup assumes local user can lock remote
1375 1375 # repo (local filesystem, old ssh servers).
1376 1376 #
1377 1377 # unbundle assumes local user cannot lock remote repo (new ssh
1378 1378 # servers, http servers).
1379 1379
1380 1380 self.checkpush(force, revs)
1381 1381 lock = None
1382 1382 unbundle = remote.capable('unbundle')
1383 1383 if not unbundle:
1384 1384 lock = remote.lock()
1385 1385 try:
1386 1386 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1387 1387 newbranch)
1388 1388 ret = remote_heads
1389 1389 if cg is not None:
1390 1390 if unbundle:
1391 1391 # local repo finds heads on server, finds out what
1392 1392 # revs it must push. once revs transferred, if server
1393 1393 # finds it has different heads (someone else won
1394 1394 # commit/push race), server aborts.
1395 1395 if force:
1396 1396 remote_heads = ['force']
1397 1397 # ssh: return remote's addchangegroup()
1398 1398 # http: return remote's addchangegroup() or 0 for error
1399 1399 ret = remote.unbundle(cg, remote_heads, 'push')
1400 1400 else:
1401 1401 # we return an integer indicating remote head count change
1402 1402 ret = remote.addchangegroup(cg, 'push', self.url(),
1403 1403 lock=lock)
1404 1404 finally:
1405 1405 if lock is not None:
1406 1406 lock.release()
1407 1407
1408 1408 self.ui.debug("checking for updated bookmarks\n")
1409 1409 rb = remote.listkeys('bookmarks')
1410 1410 for k in rb.keys():
1411 1411 if k in self._bookmarks:
1412 1412 nr, nl = rb[k], hex(self._bookmarks[k])
1413 1413 if nr in self:
1414 1414 cr = self[nr]
1415 1415 cl = self[nl]
1416 1416 if cl in cr.descendants():
1417 1417 r = remote.pushkey('bookmarks', k, nr, nl)
1418 1418 if r:
1419 1419 self.ui.status(_("updating bookmark %s\n") % k)
1420 1420 else:
1421 1421 self.ui.warn(_('updating bookmark %s'
1422 1422 ' failed!\n') % k)
1423 1423
1424 1424 return ret
1425 1425
1426 1426 def changegroupinfo(self, nodes, source):
1427 1427 if self.ui.verbose or source == 'bundle':
1428 1428 self.ui.status(_("%d changesets found\n") % len(nodes))
1429 1429 if self.ui.debugflag:
1430 1430 self.ui.debug("list of changesets:\n")
1431 1431 for node in nodes:
1432 1432 self.ui.debug("%s\n" % hex(node))
1433 1433
1434 1434 def changegroupsubset(self, bases, heads, source):
1435 1435 """Compute a changegroup consisting of all the nodes that are
1436 1436 descendents of any of the bases and ancestors of any of the heads.
1437 1437 Return a chunkbuffer object whose read() method will return
1438 1438 successive changegroup chunks.
1439 1439
1440 1440 It is fairly complex as determining which filenodes and which
1441 1441 manifest nodes need to be included for the changeset to be complete
1442 1442 is non-trivial.
1443 1443
1444 1444 Another wrinkle is doing the reverse, figuring out which changeset in
1445 1445 the changegroup a particular filenode or manifestnode belongs to.
1446 1446 """
1447 1447 cl = self.changelog
1448 1448 if not bases:
1449 1449 bases = [nullid]
1450 1450 csets, bases, heads = cl.nodesbetween(bases, heads)
1451 1451 # We assume that all ancestors of bases are known
1452 1452 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1453 1453 return self._changegroupsubset(common, csets, heads, source)
1454 1454
1455 1455 def getbundle(self, source, heads=None, common=None):
1456 1456 """Like changegroupsubset, but returns the set difference between the
1457 1457 ancestors of heads and the ancestors common.
1458 1458
1459 1459 If heads is None, use the local heads. If common is None, use [nullid].
1460 1460
1461 1461 The nodes in common might not all be known locally due to the way the
1462 1462 current discovery protocol works.
1463 1463 """
1464 1464 cl = self.changelog
1465 1465 if common:
1466 1466 nm = cl.nodemap
1467 1467 common = [n for n in common if n in nm]
1468 1468 else:
1469 1469 common = [nullid]
1470 1470 if not heads:
1471 1471 heads = cl.heads()
1472 1472 common, missing = cl.findcommonmissing(common, heads)
1473 1473 return self._changegroupsubset(common, missing, heads, source)
1474 1474
1475 1475 def _changegroupsubset(self, commonrevs, csets, heads, source):
1476 1476
1477 1477 cl = self.changelog
1478 1478 mf = self.manifest
1479 1479 mfs = {} # needed manifests
1480 1480 fnodes = {} # needed file nodes
1481 1481 changedfiles = set()
1482 1482 count = [0]
1483 1483
1484 1484 # can we go through the fast path ?
1485 1485 heads.sort()
1486 1486 if heads == sorted(self.heads()):
1487 1487 return self._changegroup(csets, source)
1488 1488
1489 1489 # slow path
1490 1490 self.hook('preoutgoing', throw=True, source=source)
1491 1491 self.changegroupinfo(csets, source)
1492 1492
1493 1493 # filter any nodes that claim to be part of the known set
1494 1494 def prune(revlog, missing):
1495 1495 for n in missing:
1496 1496 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1497 1497 yield n
1498 1498
1499 1499 def clookup(revlog, x):
1500 1500 c = cl.read(x)
1501 1501 changedfiles.update(c[3])
1502 1502 mfs.setdefault(c[0], x)
1503 1503 count[0] += 1
1504 1504 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1505 1505 return x
1506 1506
1507 1507 def mlookup(revlog, x):
1508 1508 clnode = mfs[x]
1509 1509 mdata = mf.readfast(x)
1510 1510 for f in changedfiles:
1511 1511 if f in mdata:
1512 1512 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1513 1513 count[0] += 1
1514 1514 self.ui.progress(_('bundling'), count[0],
1515 1515 unit=_('manifests'), total=len(mfs))
1516 1516 return mfs[x]
1517 1517
1518 1518 # Now that we have all theses utility functions to help out and
1519 1519 # logically divide up the task, generate the group.
1520 1520 def gengroup():
1521 1521 # Create a changenode group generator that will call our functions
1522 1522 # back to lookup the owning changenode and collect information.
1523 1523 for chunk in cl.group(csets, clookup):
1524 1524 yield chunk
1525 1525 efiles = len(changedfiles)
1526 1526 self.ui.progress(_('bundling'), None)
1527 1527
1528 1528 # Create a generator for the manifestnodes that calls our lookup
1529 1529 # and data collection functions back.
1530 1530 count[0] = 0
1531 1531 for chunk in mf.group(prune(mf, mfs), mlookup):
1532 1532 yield chunk
1533 1533 self.ui.progress(_('bundling'), None)
1534 1534
1535 1535 mfs.clear()
1536 1536
1537 1537 # Go through all our files in order sorted by name.
1538 1538 for idx, fname in enumerate(sorted(changedfiles)):
1539 1539 filerevlog = self.file(fname)
1540 1540 if not len(filerevlog):
1541 1541 raise util.Abort(_("empty or missing revlog for %s") % fname)
1542 1542 # Toss out the filenodes that the recipient isn't really
1543 1543 # missing.
1544 1544 missingfnodes = fnodes.pop(fname, {})
1545 1545 first = True
1546 1546
1547 1547 def flookup(revlog, x):
1548 1548 # even though we print the same progress on
1549 1549 # most loop iterations, put the progress call
1550 1550 # here so that time estimates (if any) can be updated
1551 1551 self.ui.progress(
1552 1552 _('bundling'), idx, item=fname,
1553 1553 unit=_('files'), total=efiles)
1554 1554 return missingfnodes[x]
1555 1555
1556 1556 for chunk in filerevlog.group(prune(filerevlog, missingfnodes),
1557 1557 flookup):
1558 1558 if first:
1559 1559 if chunk == changegroup.closechunk():
1560 1560 break
1561 1561 yield changegroup.chunkheader(len(fname))
1562 1562 yield fname
1563 1563 first = False
1564 1564 yield chunk
1565 1565 # Signal that no more groups are left.
1566 1566 yield changegroup.closechunk()
1567 1567 self.ui.progress(_('bundling'), None)
1568 1568
1569 1569 if csets:
1570 1570 self.hook('outgoing', node=hex(csets[0]), source=source)
1571 1571
1572 1572 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1573 1573
1574 1574 def changegroup(self, basenodes, source):
1575 1575 # to avoid a race we use changegroupsubset() (issue1320)
1576 1576 return self.changegroupsubset(basenodes, self.heads(), source)
1577 1577
1578 1578 def _changegroup(self, nodes, source):
1579 1579 """Compute the changegroup of all nodes that we have that a recipient
1580 1580 doesn't. Return a chunkbuffer object whose read() method will return
1581 1581 successive changegroup chunks.
1582 1582
1583 1583 This is much easier than the previous function as we can assume that
1584 1584 the recipient has any changenode we aren't sending them.
1585 1585
1586 1586 nodes is the set of nodes to send"""
1587 1587
1588 1588 cl = self.changelog
1589 1589 mf = self.manifest
1590 1590 mfs = {}
1591 1591 changedfiles = set()
1592 1592
1593 1593 self.hook('preoutgoing', throw=True, source=source)
1594 1594 self.changegroupinfo(nodes, source)
1595 1595
1596 1596 revset = set([cl.rev(n) for n in nodes])
1597 1597
1598 1598 def gennodelst(log):
1599 1599 for r in log:
1600 1600 if log.linkrev(r) in revset:
1601 1601 yield log.node(r)
1602 1602
1603 1603 def gengroup():
1604 1604 '''yield a sequence of changegroup chunks (strings)'''
1605 1605 # construct a list of all changed files
1606 1606
1607 1607 count = [0]
1608 1608 def clookup(revlog, x):
1609 1609 c = cl.read(x)
1610 1610 changedfiles.update(c[3])
1611 1611 mfs.setdefault(c[0], x)
1612 1612 count[0] += 1
1613 1613 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1614 1614 return x
1615 1615
1616 1616 for chunk in cl.group(nodes, clookup):
1617 1617 yield chunk
1618 1618 efiles = len(changedfiles)
1619 1619 changecount = count[0]
1620 1620 self.ui.progress(_('bundling'), None)
1621 1621
1622 1622 count = [0]
1623 1623 def mlookup(revlog, x):
1624 1624 count[0] += 1
1625 1625 self.ui.progress(_('bundling'), count[0],
1626 1626 unit=_('manifests'), total=changecount)
1627 1627 return cl.node(revlog.linkrev(revlog.rev(x)))
1628 1628
1629 1629 for chunk in mf.group(gennodelst(mf), mlookup):
1630 1630 yield chunk
1631 1631 self.ui.progress(_('bundling'), None)
1632 1632
1633 1633 for idx, fname in enumerate(sorted(changedfiles)):
1634 1634 filerevlog = self.file(fname)
1635 1635 if not len(filerevlog):
1636 1636 raise util.Abort(_("empty or missing revlog for %s") % fname)
1637 1637 first = True
1638 1638 def flookup(revlog, x):
1639 1639 self.ui.progress(
1640 1640 _('bundling'), idx, item=fname,
1641 1641 total=efiles, unit=_('files'))
1642 1642 return cl.node(revlog.linkrev(revlog.rev(x)))
1643 1643
1644 1644 for chunk in filerevlog.group(gennodelst(filerevlog), flookup):
1645 1645 if first:
1646 1646 if chunk == changegroup.closechunk():
1647 1647 break
1648 1648 yield changegroup.chunkheader(len(fname))
1649 1649 yield fname
1650 1650 first = False
1651 1651 yield chunk
1652 1652 yield changegroup.closechunk()
1653 1653 self.ui.progress(_('bundling'), None)
1654 1654
1655 1655 if nodes:
1656 1656 self.hook('outgoing', node=hex(nodes[0]), source=source)
1657 1657
1658 1658 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1659 1659
1660 1660 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1661 1661 """Add the changegroup returned by source.read() to this repo.
1662 1662 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1663 1663 the URL of the repo where this changegroup is coming from.
1664 1664 If lock is not None, the function takes ownership of the lock
1665 1665 and releases it after the changegroup is added.
1666 1666
1667 1667 Return an integer summarizing the change to this repo:
1668 1668 - nothing changed or no source: 0
1669 1669 - more heads than before: 1+added heads (2..n)
1670 1670 - fewer heads than before: -1-removed heads (-2..-n)
1671 1671 - number of heads stays the same: 1
1672 1672 """
1673 1673 def csmap(x):
1674 1674 self.ui.debug("add changeset %s\n" % short(x))
1675 1675 return len(cl)
1676 1676
1677 1677 def revmap(x):
1678 1678 return cl.rev(x)
1679 1679
1680 1680 if not source:
1681 1681 return 0
1682 1682
1683 1683 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1684 1684
1685 1685 changesets = files = revisions = 0
1686 1686 efiles = set()
1687 1687
1688 1688 # write changelog data to temp files so concurrent readers will not see
1689 1689 # inconsistent view
1690 1690 cl = self.changelog
1691 1691 cl.delayupdate()
1692 1692 oldheads = len(cl.heads())
1693 1693
1694 1694 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1695 1695 try:
1696 1696 trp = weakref.proxy(tr)
1697 1697 # pull off the changeset group
1698 1698 self.ui.status(_("adding changesets\n"))
1699 1699 clstart = len(cl)
1700 1700 class prog(object):
1701 1701 step = _('changesets')
1702 1702 count = 1
1703 1703 ui = self.ui
1704 1704 total = None
1705 1705 def __call__(self):
1706 1706 self.ui.progress(self.step, self.count, unit=_('chunks'),
1707 1707 total=self.total)
1708 1708 self.count += 1
1709 1709 pr = prog()
1710 1710 source.callback = pr
1711 1711
1712 1712 if (cl.addgroup(source, csmap, trp) is None
1713 1713 and not emptyok):
1714 1714 raise util.Abort(_("received changelog group is empty"))
1715 1715 clend = len(cl)
1716 1716 changesets = clend - clstart
1717 1717 for c in xrange(clstart, clend):
1718 1718 efiles.update(self[c].files())
1719 1719 efiles = len(efiles)
1720 1720 self.ui.progress(_('changesets'), None)
1721 1721
1722 1722 # pull off the manifest group
1723 1723 self.ui.status(_("adding manifests\n"))
1724 1724 pr.step = _('manifests')
1725 1725 pr.count = 1
1726 1726 pr.total = changesets # manifests <= changesets
1727 1727 # no need to check for empty manifest group here:
1728 1728 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1729 1729 # no new manifest will be created and the manifest group will
1730 1730 # be empty during the pull
1731 1731 self.manifest.addgroup(source, revmap, trp)
1732 1732 self.ui.progress(_('manifests'), None)
1733 1733
1734 1734 needfiles = {}
1735 1735 if self.ui.configbool('server', 'validate', default=False):
1736 1736 # validate incoming csets have their manifests
1737 1737 for cset in xrange(clstart, clend):
1738 1738 mfest = self.changelog.read(self.changelog.node(cset))[0]
1739 1739 mfest = self.manifest.readdelta(mfest)
1740 1740 # store file nodes we must see
1741 1741 for f, n in mfest.iteritems():
1742 1742 needfiles.setdefault(f, set()).add(n)
1743 1743
1744 1744 # process the files
1745 1745 self.ui.status(_("adding file changes\n"))
1746 1746 pr.step = 'files'
1747 1747 pr.count = 1
1748 1748 pr.total = efiles
1749 1749 source.callback = None
1750 1750
1751 1751 while 1:
1752 1752 f = source.chunk()
1753 1753 if not f:
1754 1754 break
1755 1755 self.ui.debug("adding %s revisions\n" % f)
1756 1756 pr()
1757 1757 fl = self.file(f)
1758 1758 o = len(fl)
1759 1759 if fl.addgroup(source, revmap, trp) is None:
1760 1760 raise util.Abort(_("received file revlog group is empty"))
1761 1761 revisions += len(fl) - o
1762 1762 files += 1
1763 1763 if f in needfiles:
1764 1764 needs = needfiles[f]
1765 1765 for new in xrange(o, len(fl)):
1766 1766 n = fl.node(new)
1767 1767 if n in needs:
1768 1768 needs.remove(n)
1769 1769 if not needs:
1770 1770 del needfiles[f]
1771 1771 self.ui.progress(_('files'), None)
1772 1772
1773 1773 for f, needs in needfiles.iteritems():
1774 1774 fl = self.file(f)
1775 1775 for n in needs:
1776 1776 try:
1777 1777 fl.rev(n)
1778 1778 except error.LookupError:
1779 1779 raise util.Abort(
1780 1780 _('missing file data for %s:%s - run hg verify') %
1781 1781 (f, hex(n)))
1782 1782
1783 1783 newheads = len(cl.heads())
1784 1784 heads = ""
1785 1785 if oldheads and newheads != oldheads:
1786 1786 heads = _(" (%+d heads)") % (newheads - oldheads)
1787 1787
1788 1788 self.ui.status(_("added %d changesets"
1789 1789 " with %d changes to %d files%s\n")
1790 1790 % (changesets, revisions, files, heads))
1791 1791
1792 1792 if changesets > 0:
1793 1793 p = lambda: cl.writepending() and self.root or ""
1794 1794 self.hook('pretxnchangegroup', throw=True,
1795 1795 node=hex(cl.node(clstart)), source=srctype,
1796 1796 url=url, pending=p)
1797 1797
1798 1798 # make changelog see real files again
1799 1799 cl.finalize(trp)
1800 1800
1801 1801 tr.close()
1802 1802 finally:
1803 1803 tr.release()
1804 1804 if lock:
1805 1805 lock.release()
1806 1806
1807 1807 if changesets > 0:
1808 1808 # forcefully update the on-disk branch cache
1809 1809 self.ui.debug("updating the branch cache\n")
1810 1810 self.updatebranchcache()
1811 1811 self.hook("changegroup", node=hex(cl.node(clstart)),
1812 1812 source=srctype, url=url)
1813 1813
1814 1814 for i in xrange(clstart, clend):
1815 1815 self.hook("incoming", node=hex(cl.node(i)),
1816 1816 source=srctype, url=url)
1817 1817
1818 1818 # never return 0 here:
1819 1819 if newheads < oldheads:
1820 1820 return newheads - oldheads - 1
1821 1821 else:
1822 1822 return newheads - oldheads + 1
1823 1823
1824 1824
1825 1825 def stream_in(self, remote, requirements):
1826 1826 lock = self.lock()
1827 1827 try:
1828 1828 fp = remote.stream_out()
1829 1829 l = fp.readline()
1830 1830 try:
1831 1831 resp = int(l)
1832 1832 except ValueError:
1833 1833 raise error.ResponseError(
1834 1834 _('Unexpected response from remote server:'), l)
1835 1835 if resp == 1:
1836 1836 raise util.Abort(_('operation forbidden by server'))
1837 1837 elif resp == 2:
1838 1838 raise util.Abort(_('locking the remote repository failed'))
1839 1839 elif resp != 0:
1840 1840 raise util.Abort(_('the server sent an unknown error code'))
1841 1841 self.ui.status(_('streaming all changes\n'))
1842 1842 l = fp.readline()
1843 1843 try:
1844 1844 total_files, total_bytes = map(int, l.split(' ', 1))
1845 1845 except (ValueError, TypeError):
1846 1846 raise error.ResponseError(
1847 1847 _('Unexpected response from remote server:'), l)
1848 1848 self.ui.status(_('%d files to transfer, %s of data\n') %
1849 1849 (total_files, util.bytecount(total_bytes)))
1850 1850 start = time.time()
1851 1851 for i in xrange(total_files):
1852 1852 # XXX doesn't support '\n' or '\r' in filenames
1853 1853 l = fp.readline()
1854 1854 try:
1855 1855 name, size = l.split('\0', 1)
1856 1856 size = int(size)
1857 1857 except (ValueError, TypeError):
1858 1858 raise error.ResponseError(
1859 1859 _('Unexpected response from remote server:'), l)
1860 1860 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1861 1861 # for backwards compat, name was partially encoded
1862 1862 ofp = self.sopener(store.decodedir(name), 'w')
1863 1863 for chunk in util.filechunkiter(fp, limit=size):
1864 1864 ofp.write(chunk)
1865 1865 ofp.close()
1866 1866 elapsed = time.time() - start
1867 1867 if elapsed <= 0:
1868 1868 elapsed = 0.001
1869 1869 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1870 1870 (util.bytecount(total_bytes), elapsed,
1871 1871 util.bytecount(total_bytes / elapsed)))
1872 1872
1873 1873 # new requirements = old non-format requirements + new format-related
1874 1874 # requirements from the streamed-in repository
1875 1875 requirements.update(set(self.requirements) - self.supportedformats)
1876 1876 self._applyrequirements(requirements)
1877 1877 self._writerequirements()
1878 1878
1879 1879 self.invalidate()
1880 1880 return len(self.heads()) + 1
1881 1881 finally:
1882 1882 lock.release()
1883 1883
1884 1884 def clone(self, remote, heads=[], stream=False):
1885 1885 '''clone remote repository.
1886 1886
1887 1887 keyword arguments:
1888 1888 heads: list of revs to clone (forces use of pull)
1889 1889 stream: use streaming clone if possible'''
1890 1890
1891 1891 # now, all clients that can request uncompressed clones can
1892 1892 # read repo formats supported by all servers that can serve
1893 1893 # them.
1894 1894
1895 1895 # if revlog format changes, client will have to check version
1896 1896 # and format flags on "stream" capability, and use
1897 1897 # uncompressed only if compatible.
1898 1898
1899 1899 if stream and not heads:
1900 1900 # 'stream' means remote revlog format is revlogv1 only
1901 1901 if remote.capable('stream'):
1902 1902 return self.stream_in(remote, set(('revlogv1',)))
1903 1903 # otherwise, 'streamreqs' contains the remote revlog format
1904 1904 streamreqs = remote.capable('streamreqs')
1905 1905 if streamreqs:
1906 1906 streamreqs = set(streamreqs.split(','))
1907 1907 # if we support it, stream in and adjust our requirements
1908 1908 if not streamreqs - self.supportedformats:
1909 1909 return self.stream_in(remote, streamreqs)
1910 1910 return self.pull(remote, heads)
1911 1911
1912 1912 def pushkey(self, namespace, key, old, new):
1913 1913 return pushkey.push(self, namespace, key, old, new)
1914 1914
1915 1915 def listkeys(self, namespace):
1916 1916 return pushkey.list(self, namespace)
1917 1917
1918 1918 def debugwireargs(self, one, two, three=None, four=None):
1919 1919 '''used to test argument passing over the wire'''
1920 1920 return "%s %s %s %s" % (one, two, three, four)
1921 1921
1922 1922 # used to avoid circular references so destructors work
1923 1923 def aftertrans(files):
1924 1924 renamefiles = [tuple(t) for t in files]
1925 1925 def a():
1926 1926 for src, dest in renamefiles:
1927 1927 util.rename(src, dest)
1928 1928 return a
1929 1929
1930 1930 def instance(ui, path, create):
1931 return localrepository(ui, util.drop_scheme('file', path), create)
1931 return localrepository(ui, urlmod.localpath(path), create)
1932 1932
1933 1933 def islocal(path):
1934 1934 return True
@@ -1,937 +1,960 b''
1 1 # url.py - HTTP handling for mercurial
2 2 #
3 3 # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 import urllib, urllib2, httplib, os, socket, cStringIO
11 11 import __builtin__
12 12 from i18n import _
13 13 import keepalive, util
14 14
15 15 class url(object):
16 16 """Reliable URL parser.
17 17
18 18 This parses URLs and provides attributes for the following
19 19 components:
20 20
21 21 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
22 22
23 23 Missing components are set to None. The only exception is
24 24 fragment, which is set to '' if present but empty.
25 25
26 26 If parse_fragment is False, fragment is included in query. If
27 27 parse_query is False, query is included in path. If both are
28 28 False, both fragment and query are included in path.
29 29
30 30 See http://www.ietf.org/rfc/rfc2396.txt for more information.
31 31
32 32 Note that for backward compatibility reasons, bundle URLs do not
33 33 take host names. That means 'bundle://../' has a path of '../'.
34 34
35 35 Examples:
36 36
37 37 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
38 38 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
39 39 >>> url('ssh://[::1]:2200//home/joe/repo')
40 40 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
41 41 >>> url('file:///home/joe/repo')
42 42 <url scheme: 'file', path: '/home/joe/repo'>
43 43 >>> url('bundle:foo')
44 44 <url scheme: 'bundle', path: 'foo'>
45 45 >>> url('bundle://../foo')
46 46 <url scheme: 'bundle', path: '../foo'>
47 47 >>> url('c:\\\\foo\\\\bar')
48 48 <url path: 'c:\\\\foo\\\\bar'>
49 49
50 50 Authentication credentials:
51 51
52 52 >>> url('ssh://joe:xyz@x/repo')
53 53 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
54 54 >>> url('ssh://joe@x/repo')
55 55 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
56 56
57 57 Query strings and fragments:
58 58
59 59 >>> url('http://host/a?b#c')
60 60 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
61 61 >>> url('http://host/a?b#c', parse_query=False, parse_fragment=False)
62 62 <url scheme: 'http', host: 'host', path: 'a?b#c'>
63 63 """
64 64
65 65 _safechars = "!~*'()+"
66 66 _safepchars = "/!~*'()+"
67 67
68 68 def __init__(self, path, parse_query=True, parse_fragment=True):
69 69 # We slowly chomp away at path until we have only the path left
70 70 self.scheme = self.user = self.passwd = self.host = None
71 71 self.port = self.path = self.query = self.fragment = None
72 72 self._localpath = True
73 self._hostport = ''
74 self._origpath = path
73 75
74 76 # special case for Windows drive letters
75 77 if has_drive_letter(path):
76 78 self.path = path
77 79 return
78 80
79 81 # For compatibility reasons, we can't handle bundle paths as
80 82 # normal URLS
81 83 if path.startswith('bundle:'):
82 84 self.scheme = 'bundle'
83 85 path = path[7:]
84 86 if path.startswith('//'):
85 87 path = path[2:]
86 88 self.path = path
87 89 return
88 90
89 91 if not path.startswith('/') and ':' in path:
90 92 parts = path.split(':', 1)
91 93 if parts[0]:
92 94 self.scheme, path = parts
93 95 self._localpath = False
94 96
95 97 if not path:
96 98 path = None
97 99 if self._localpath:
98 100 self.path = ''
99 101 return
100 102 else:
101 103 if parse_fragment and '#' in path:
102 104 path, self.fragment = path.split('#', 1)
103 105 if not path:
104 106 path = None
105 107 if self._localpath:
106 108 self.path = path
107 109 return
108 110
109 111 if parse_query and '?' in path:
110 112 path, self.query = path.split('?', 1)
111 113 if not path:
112 114 path = None
113 115 if not self.query:
114 116 self.query = None
115 117
116 118 # // is required to specify a host/authority
117 119 if path and path.startswith('//'):
118 120 parts = path[2:].split('/', 1)
119 121 if len(parts) > 1:
120 122 self.host, path = parts
121 123 path = path
122 124 else:
123 125 self.host = parts[0]
124 126 path = None
125 127 if not self.host:
126 128 self.host = None
127 129 if path:
128 130 path = '/' + path
129 131
130 132 if self.host and '@' in self.host:
131 133 self.user, self.host = self.host.rsplit('@', 1)
132 134 if ':' in self.user:
133 135 self.user, self.passwd = self.user.split(':', 1)
134 136 if not self.host:
135 137 self.host = None
136 138
137 139 # Don't split on colons in IPv6 addresses without ports
138 140 if (self.host and ':' in self.host and
139 141 not (self.host.startswith('[') and self.host.endswith(']'))):
142 self._hostport = self.host
140 143 self.host, self.port = self.host.rsplit(':', 1)
141 144 if not self.host:
142 145 self.host = None
143 146
144 147 if (self.host and self.scheme == 'file' and
145 148 self.host not in ('localhost', '127.0.0.1', '[::1]')):
146 149 raise util.Abort(_('file:// URLs can only refer to localhost'))
147 150
148 151 self.path = path
149 152
150 153 for a in ('user', 'passwd', 'host', 'port',
151 154 'path', 'query', 'fragment'):
152 155 v = getattr(self, a)
153 156 if v is not None:
154 157 setattr(self, a, urllib.unquote(v))
155 158
156 159 def __repr__(self):
157 160 attrs = []
158 161 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
159 162 'query', 'fragment'):
160 163 v = getattr(self, a)
161 164 if v is not None:
162 165 attrs.append('%s: %r' % (a, v))
163 166 return '<url %s>' % ', '.join(attrs)
164 167
165 168 def __str__(self):
166 169 """Join the URL's components back into a URL string.
167 170
168 171 Examples:
169 172
170 173 >>> str(url('http://user:pw@host:80/?foo#bar'))
171 174 'http://user:pw@host:80/?foo#bar'
172 175 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
173 176 'ssh://user:pw@[::1]:2200//home/joe#'
174 177 >>> str(url('http://localhost:80//'))
175 178 'http://localhost:80//'
176 179 >>> str(url('http://localhost:80/'))
177 180 'http://localhost:80/'
178 181 >>> str(url('http://localhost:80'))
179 182 'http://localhost:80/'
180 183 >>> str(url('bundle:foo'))
181 184 'bundle:foo'
182 185 >>> str(url('bundle://../foo'))
183 186 'bundle:../foo'
184 187 >>> str(url('path'))
185 188 'path'
186 189 """
187 190 if self._localpath:
188 191 s = self.path
189 192 if self.scheme == 'bundle':
190 193 s = 'bundle:' + s
191 194 if self.fragment:
192 195 s += '#' + self.fragment
193 196 return s
194 197
195 198 s = self.scheme + ':'
196 199 if (self.user or self.passwd or self.host or
197 200 self.scheme and not self.path):
198 201 s += '//'
199 202 if self.user:
200 203 s += urllib.quote(self.user, safe=self._safechars)
201 204 if self.passwd:
202 205 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
203 206 if self.user or self.passwd:
204 207 s += '@'
205 208 if self.host:
206 209 if not (self.host.startswith('[') and self.host.endswith(']')):
207 210 s += urllib.quote(self.host)
208 211 else:
209 212 s += self.host
210 213 if self.port:
211 214 s += ':' + urllib.quote(self.port)
212 215 if self.host:
213 216 s += '/'
214 217 if self.path:
215 218 s += urllib.quote(self.path, safe=self._safepchars)
216 219 if self.query:
217 220 s += '?' + urllib.quote(self.query, safe=self._safepchars)
218 221 if self.fragment is not None:
219 222 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
220 223 return s
221 224
222 225 def authinfo(self):
223 226 user, passwd = self.user, self.passwd
224 227 try:
225 228 self.user, self.passwd = None, None
226 229 s = str(self)
227 230 finally:
228 231 self.user, self.passwd = user, passwd
229 232 if not self.user:
230 233 return (s, None)
231 234 return (s, (None, (str(self), self.host),
232 235 self.user, self.passwd or ''))
233 236
237 def localpath(self):
238 if self.scheme == 'file' or self.scheme == 'bundle':
239 path = self.path or '/'
240 # For Windows, we need to promote hosts containing drive
241 # letters to paths with drive letters.
242 if has_drive_letter(self._hostport):
243 path = self._hostport + '/' + self.path
244 elif self.host is not None and self.path:
245 path = '/' + path
246 # We also need to handle the case of file:///C:/, which
247 # should return C:/, not /C:/.
248 elif has_drive_letter(path):
249 # Strip leading slash from paths with drive names
250 return path[1:]
251 return path
252 return self._origpath
253
234 254 def has_scheme(path):
235 255 return bool(url(path).scheme)
236 256
237 257 def has_drive_letter(path):
238 258 return path[1:2] == ':' and path[0:1].isalpha()
239 259
260 def localpath(path):
261 return url(path, parse_query=False, parse_fragment=False).localpath()
262
240 263 def hidepassword(u):
241 264 '''hide user credential in a url string'''
242 265 u = url(u)
243 266 if u.passwd:
244 267 u.passwd = '***'
245 268 return str(u)
246 269
247 270 def removeauth(u):
248 271 '''remove all authentication information from a url string'''
249 272 u = url(u)
250 273 u.user = u.passwd = None
251 274 return str(u)
252 275
253 276 def netlocsplit(netloc):
254 277 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
255 278
256 279 a = netloc.find('@')
257 280 if a == -1:
258 281 user, passwd = None, None
259 282 else:
260 283 userpass, netloc = netloc[:a], netloc[a + 1:]
261 284 c = userpass.find(':')
262 285 if c == -1:
263 286 user, passwd = urllib.unquote(userpass), None
264 287 else:
265 288 user = urllib.unquote(userpass[:c])
266 289 passwd = urllib.unquote(userpass[c + 1:])
267 290 c = netloc.find(':')
268 291 if c == -1:
269 292 host, port = netloc, None
270 293 else:
271 294 host, port = netloc[:c], netloc[c + 1:]
272 295 return host, port, user, passwd
273 296
274 297 def netlocunsplit(host, port, user=None, passwd=None):
275 298 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
276 299 if port:
277 300 hostport = host + ':' + port
278 301 else:
279 302 hostport = host
280 303 if user:
281 304 quote = lambda s: urllib.quote(s, safe='')
282 305 if passwd:
283 306 userpass = quote(user) + ':' + quote(passwd)
284 307 else:
285 308 userpass = quote(user)
286 309 return userpass + '@' + hostport
287 310 return hostport
288 311
289 312 def readauthforuri(ui, uri):
290 313 # Read configuration
291 314 config = dict()
292 315 for key, val in ui.configitems('auth'):
293 316 if '.' not in key:
294 317 ui.warn(_("ignoring invalid [auth] key '%s'\n") % key)
295 318 continue
296 319 group, setting = key.rsplit('.', 1)
297 320 gdict = config.setdefault(group, dict())
298 321 if setting in ('username', 'cert', 'key'):
299 322 val = util.expandpath(val)
300 323 gdict[setting] = val
301 324
302 325 # Find the best match
303 326 scheme, hostpath = uri.split('://', 1)
304 327 bestlen = 0
305 328 bestauth = None
306 329 for group, auth in config.iteritems():
307 330 prefix = auth.get('prefix')
308 331 if not prefix:
309 332 continue
310 333 p = prefix.split('://', 1)
311 334 if len(p) > 1:
312 335 schemes, prefix = [p[0]], p[1]
313 336 else:
314 337 schemes = (auth.get('schemes') or 'https').split()
315 338 if (prefix == '*' or hostpath.startswith(prefix)) and \
316 339 len(prefix) > bestlen and scheme in schemes:
317 340 bestlen = len(prefix)
318 341 bestauth = group, auth
319 342 return bestauth
320 343
321 344 _safe = ('abcdefghijklmnopqrstuvwxyz'
322 345 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
323 346 '0123456789' '_.-/')
324 347 _safeset = None
325 348 _hex = None
326 349 def quotepath(path):
327 350 '''quote the path part of a URL
328 351
329 352 This is similar to urllib.quote, but it also tries to avoid
330 353 quoting things twice (inspired by wget):
331 354
332 355 >>> quotepath('abc def')
333 356 'abc%20def'
334 357 >>> quotepath('abc%20def')
335 358 'abc%20def'
336 359 >>> quotepath('abc%20 def')
337 360 'abc%20%20def'
338 361 >>> quotepath('abc def%20')
339 362 'abc%20def%20'
340 363 >>> quotepath('abc def%2')
341 364 'abc%20def%252'
342 365 >>> quotepath('abc def%')
343 366 'abc%20def%25'
344 367 '''
345 368 global _safeset, _hex
346 369 if _safeset is None:
347 370 _safeset = set(_safe)
348 371 _hex = set('abcdefABCDEF0123456789')
349 372 l = list(path)
350 373 for i in xrange(len(l)):
351 374 c = l[i]
352 375 if (c == '%' and i + 2 < len(l) and
353 376 l[i + 1] in _hex and l[i + 2] in _hex):
354 377 pass
355 378 elif c not in _safeset:
356 379 l[i] = '%%%02X' % ord(c)
357 380 return ''.join(l)
358 381
359 382 class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
360 383 def __init__(self, ui):
361 384 urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
362 385 self.ui = ui
363 386
364 387 def find_user_password(self, realm, authuri):
365 388 authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
366 389 self, realm, authuri)
367 390 user, passwd = authinfo
368 391 if user and passwd:
369 392 self._writedebug(user, passwd)
370 393 return (user, passwd)
371 394
372 395 if not user:
373 396 res = readauthforuri(self.ui, authuri)
374 397 if res:
375 398 group, auth = res
376 399 user, passwd = auth.get('username'), auth.get('password')
377 400 self.ui.debug("using auth.%s.* for authentication\n" % group)
378 401 if not user or not passwd:
379 402 if not self.ui.interactive():
380 403 raise util.Abort(_('http authorization required'))
381 404
382 405 self.ui.write(_("http authorization required\n"))
383 406 self.ui.write(_("realm: %s\n") % realm)
384 407 if user:
385 408 self.ui.write(_("user: %s\n") % user)
386 409 else:
387 410 user = self.ui.prompt(_("user:"), default=None)
388 411
389 412 if not passwd:
390 413 passwd = self.ui.getpass()
391 414
392 415 self.add_password(realm, authuri, user, passwd)
393 416 self._writedebug(user, passwd)
394 417 return (user, passwd)
395 418
396 419 def _writedebug(self, user, passwd):
397 420 msg = _('http auth: user %s, password %s\n')
398 421 self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
399 422
400 423 class proxyhandler(urllib2.ProxyHandler):
401 424 def __init__(self, ui):
402 425 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
403 426 # XXX proxyauthinfo = None
404 427
405 428 if proxyurl:
406 429 # proxy can be proper url or host[:port]
407 430 if not (proxyurl.startswith('http:') or
408 431 proxyurl.startswith('https:')):
409 432 proxyurl = 'http://' + proxyurl + '/'
410 433 proxy = url(proxyurl)
411 434 if not proxy.user:
412 435 proxy.user = ui.config("http_proxy", "user")
413 436 proxy.passwd = ui.config("http_proxy", "passwd")
414 437
415 438 # see if we should use a proxy for this url
416 439 no_list = ["localhost", "127.0.0.1"]
417 440 no_list.extend([p.lower() for
418 441 p in ui.configlist("http_proxy", "no")])
419 442 no_list.extend([p.strip().lower() for
420 443 p in os.getenv("no_proxy", '').split(',')
421 444 if p.strip()])
422 445 # "http_proxy.always" config is for running tests on localhost
423 446 if ui.configbool("http_proxy", "always"):
424 447 self.no_list = []
425 448 else:
426 449 self.no_list = no_list
427 450
428 451 proxyurl = str(proxy)
429 452 proxies = {'http': proxyurl, 'https': proxyurl}
430 453 ui.debug('proxying through http://%s:%s\n' %
431 454 (proxy.host, proxy.port))
432 455 else:
433 456 proxies = {}
434 457
435 458 # urllib2 takes proxy values from the environment and those
436 459 # will take precedence if found, so drop them
437 460 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
438 461 try:
439 462 if env in os.environ:
440 463 del os.environ[env]
441 464 except OSError:
442 465 pass
443 466
444 467 urllib2.ProxyHandler.__init__(self, proxies)
445 468 self.ui = ui
446 469
447 470 def proxy_open(self, req, proxy, type_):
448 471 host = req.get_host().split(':')[0]
449 472 if host in self.no_list:
450 473 return None
451 474
452 475 # work around a bug in Python < 2.4.2
453 476 # (it leaves a "\n" at the end of Proxy-authorization headers)
454 477 baseclass = req.__class__
455 478 class _request(baseclass):
456 479 def add_header(self, key, val):
457 480 if key.lower() == 'proxy-authorization':
458 481 val = val.strip()
459 482 return baseclass.add_header(self, key, val)
460 483 req.__class__ = _request
461 484
462 485 return urllib2.ProxyHandler.proxy_open(self, req, proxy, type_)
463 486
464 487 class httpsendfile(object):
465 488 """This is a wrapper around the objects returned by python's "open".
466 489
467 490 Its purpose is to send file-like objects via HTTP and, to do so, it
468 491 defines a __len__ attribute to feed the Content-Length header.
469 492 """
470 493
471 494 def __init__(self, ui, *args, **kwargs):
472 495 # We can't just "self._data = open(*args, **kwargs)" here because there
473 496 # is an "open" function defined in this module that shadows the global
474 497 # one
475 498 self.ui = ui
476 499 self._data = __builtin__.open(*args, **kwargs)
477 500 self.seek = self._data.seek
478 501 self.close = self._data.close
479 502 self.write = self._data.write
480 503 self._len = os.fstat(self._data.fileno()).st_size
481 504 self._pos = 0
482 505 self._total = len(self) / 1024 * 2
483 506
484 507 def read(self, *args, **kwargs):
485 508 try:
486 509 ret = self._data.read(*args, **kwargs)
487 510 except EOFError:
488 511 self.ui.progress(_('sending'), None)
489 512 self._pos += len(ret)
490 513 # We pass double the max for total because we currently have
491 514 # to send the bundle twice in the case of a server that
492 515 # requires authentication. Since we can't know until we try
493 516 # once whether authentication will be required, just lie to
494 517 # the user and maybe the push succeeds suddenly at 50%.
495 518 self.ui.progress(_('sending'), self._pos / 1024,
496 519 unit=_('kb'), total=self._total)
497 520 return ret
498 521
499 522 def __len__(self):
500 523 return self._len
501 524
502 525 def _gen_sendfile(orgsend):
503 526 def _sendfile(self, data):
504 527 # send a file
505 528 if isinstance(data, httpsendfile):
506 529 # if auth required, some data sent twice, so rewind here
507 530 data.seek(0)
508 531 for chunk in util.filechunkiter(data):
509 532 orgsend(self, chunk)
510 533 else:
511 534 orgsend(self, data)
512 535 return _sendfile
513 536
514 537 has_https = hasattr(urllib2, 'HTTPSHandler')
515 538 if has_https:
516 539 try:
517 540 # avoid using deprecated/broken FakeSocket in python 2.6
518 541 import ssl
519 542 _ssl_wrap_socket = ssl.wrap_socket
520 543 CERT_REQUIRED = ssl.CERT_REQUIRED
521 544 except ImportError:
522 545 CERT_REQUIRED = 2
523 546
524 547 def _ssl_wrap_socket(sock, key_file, cert_file,
525 548 cert_reqs=CERT_REQUIRED, ca_certs=None):
526 549 if ca_certs:
527 550 raise util.Abort(_(
528 551 'certificate checking requires Python 2.6'))
529 552
530 553 ssl = socket.ssl(sock, key_file, cert_file)
531 554 return httplib.FakeSocket(sock, ssl)
532 555
533 556 try:
534 557 _create_connection = socket.create_connection
535 558 except AttributeError:
536 559 _GLOBAL_DEFAULT_TIMEOUT = object()
537 560
538 561 def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
539 562 source_address=None):
540 563 # lifted from Python 2.6
541 564
542 565 msg = "getaddrinfo returns an empty list"
543 566 host, port = address
544 567 for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
545 568 af, socktype, proto, canonname, sa = res
546 569 sock = None
547 570 try:
548 571 sock = socket.socket(af, socktype, proto)
549 572 if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
550 573 sock.settimeout(timeout)
551 574 if source_address:
552 575 sock.bind(source_address)
553 576 sock.connect(sa)
554 577 return sock
555 578
556 579 except socket.error, msg:
557 580 if sock is not None:
558 581 sock.close()
559 582
560 583 raise socket.error, msg
561 584
562 585 class httpconnection(keepalive.HTTPConnection):
563 586 # must be able to send big bundle as stream.
564 587 send = _gen_sendfile(keepalive.HTTPConnection.send)
565 588
566 589 def connect(self):
567 590 if has_https and self.realhostport: # use CONNECT proxy
568 591 self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
569 592 self.sock.connect((self.host, self.port))
570 593 if _generic_proxytunnel(self):
571 594 # we do not support client x509 certificates
572 595 self.sock = _ssl_wrap_socket(self.sock, None, None)
573 596 else:
574 597 keepalive.HTTPConnection.connect(self)
575 598
576 599 def getresponse(self):
577 600 proxyres = getattr(self, 'proxyres', None)
578 601 if proxyres:
579 602 if proxyres.will_close:
580 603 self.close()
581 604 self.proxyres = None
582 605 return proxyres
583 606 return keepalive.HTTPConnection.getresponse(self)
584 607
585 608 # general transaction handler to support different ways to handle
586 609 # HTTPS proxying before and after Python 2.6.3.
587 610 def _generic_start_transaction(handler, h, req):
588 611 if hasattr(req, '_tunnel_host') and req._tunnel_host:
589 612 tunnel_host = req._tunnel_host
590 613 if tunnel_host[:7] not in ['http://', 'https:/']:
591 614 tunnel_host = 'https://' + tunnel_host
592 615 new_tunnel = True
593 616 else:
594 617 tunnel_host = req.get_selector()
595 618 new_tunnel = False
596 619
597 620 if new_tunnel or tunnel_host == req.get_full_url(): # has proxy
598 621 u = url(tunnel_host)
599 622 if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS
600 623 h.realhostport = ':'.join([u.host, (u.port or '443')])
601 624 h.headers = req.headers.copy()
602 625 h.headers.update(handler.parent.addheaders)
603 626 return
604 627
605 628 h.realhostport = None
606 629 h.headers = None
607 630
608 631 def _generic_proxytunnel(self):
609 632 proxyheaders = dict(
610 633 [(x, self.headers[x]) for x in self.headers
611 634 if x.lower().startswith('proxy-')])
612 635 self._set_hostport(self.host, self.port)
613 636 self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport)
614 637 for header in proxyheaders.iteritems():
615 638 self.send('%s: %s\r\n' % header)
616 639 self.send('\r\n')
617 640
618 641 # majority of the following code is duplicated from
619 642 # httplib.HTTPConnection as there are no adequate places to
620 643 # override functions to provide the needed functionality
621 644 res = self.response_class(self.sock,
622 645 strict=self.strict,
623 646 method=self._method)
624 647
625 648 while True:
626 649 version, status, reason = res._read_status()
627 650 if status != httplib.CONTINUE:
628 651 break
629 652 while True:
630 653 skip = res.fp.readline().strip()
631 654 if not skip:
632 655 break
633 656 res.status = status
634 657 res.reason = reason.strip()
635 658
636 659 if res.status == 200:
637 660 while True:
638 661 line = res.fp.readline()
639 662 if line == '\r\n':
640 663 break
641 664 return True
642 665
643 666 if version == 'HTTP/1.0':
644 667 res.version = 10
645 668 elif version.startswith('HTTP/1.'):
646 669 res.version = 11
647 670 elif version == 'HTTP/0.9':
648 671 res.version = 9
649 672 else:
650 673 raise httplib.UnknownProtocol(version)
651 674
652 675 if res.version == 9:
653 676 res.length = None
654 677 res.chunked = 0
655 678 res.will_close = 1
656 679 res.msg = httplib.HTTPMessage(cStringIO.StringIO())
657 680 return False
658 681
659 682 res.msg = httplib.HTTPMessage(res.fp)
660 683 res.msg.fp = None
661 684
662 685 # are we using the chunked-style of transfer encoding?
663 686 trenc = res.msg.getheader('transfer-encoding')
664 687 if trenc and trenc.lower() == "chunked":
665 688 res.chunked = 1
666 689 res.chunk_left = None
667 690 else:
668 691 res.chunked = 0
669 692
670 693 # will the connection close at the end of the response?
671 694 res.will_close = res._check_close()
672 695
673 696 # do we have a Content-Length?
674 697 # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
675 698 length = res.msg.getheader('content-length')
676 699 if length and not res.chunked:
677 700 try:
678 701 res.length = int(length)
679 702 except ValueError:
680 703 res.length = None
681 704 else:
682 705 if res.length < 0: # ignore nonsensical negative lengths
683 706 res.length = None
684 707 else:
685 708 res.length = None
686 709
687 710 # does the body have a fixed length? (of zero)
688 711 if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
689 712 100 <= status < 200 or # 1xx codes
690 713 res._method == 'HEAD'):
691 714 res.length = 0
692 715
693 716 # if the connection remains open, and we aren't using chunked, and
694 717 # a content-length was not provided, then assume that the connection
695 718 # WILL close.
696 719 if (not res.will_close and
697 720 not res.chunked and
698 721 res.length is None):
699 722 res.will_close = 1
700 723
701 724 self.proxyres = res
702 725
703 726 return False
704 727
705 728 class httphandler(keepalive.HTTPHandler):
706 729 def http_open(self, req):
707 730 return self.do_open(httpconnection, req)
708 731
709 732 def _start_transaction(self, h, req):
710 733 _generic_start_transaction(self, h, req)
711 734 return keepalive.HTTPHandler._start_transaction(self, h, req)
712 735
713 736 def _verifycert(cert, hostname):
714 737 '''Verify that cert (in socket.getpeercert() format) matches hostname.
715 738 CRLs is not handled.
716 739
717 740 Returns error message if any problems are found and None on success.
718 741 '''
719 742 if not cert:
720 743 return _('no certificate received')
721 744 dnsname = hostname.lower()
722 745 def matchdnsname(certname):
723 746 return (certname == dnsname or
724 747 '.' in dnsname and certname == '*.' + dnsname.split('.', 1)[1])
725 748
726 749 san = cert.get('subjectAltName', [])
727 750 if san:
728 751 certnames = [value.lower() for key, value in san if key == 'DNS']
729 752 for name in certnames:
730 753 if matchdnsname(name):
731 754 return None
732 755 return _('certificate is for %s') % ', '.join(certnames)
733 756
734 757 # subject is only checked when subjectAltName is empty
735 758 for s in cert.get('subject', []):
736 759 key, value = s[0]
737 760 if key == 'commonName':
738 761 try:
739 762 # 'subject' entries are unicode
740 763 certname = value.lower().encode('ascii')
741 764 except UnicodeEncodeError:
742 765 return _('IDN in certificate not supported')
743 766 if matchdnsname(certname):
744 767 return None
745 768 return _('certificate is for %s') % certname
746 769 return _('no commonName or subjectAltName found in certificate')
747 770
748 771 if has_https:
749 772 class httpsconnection(httplib.HTTPSConnection):
750 773 response_class = keepalive.HTTPResponse
751 774 # must be able to send big bundle as stream.
752 775 send = _gen_sendfile(keepalive.safesend)
753 776 getresponse = keepalive.wrapgetresponse(httplib.HTTPSConnection)
754 777
755 778 def connect(self):
756 779 self.sock = _create_connection((self.host, self.port))
757 780
758 781 host = self.host
759 782 if self.realhostport: # use CONNECT proxy
760 783 something = _generic_proxytunnel(self)
761 784 host = self.realhostport.rsplit(':', 1)[0]
762 785
763 786 cacerts = self.ui.config('web', 'cacerts')
764 787 hostfingerprint = self.ui.config('hostfingerprints', host)
765 788
766 789 if cacerts and not hostfingerprint:
767 790 cacerts = util.expandpath(cacerts)
768 791 if not os.path.exists(cacerts):
769 792 raise util.Abort(_('could not find '
770 793 'web.cacerts: %s') % cacerts)
771 794 self.sock = _ssl_wrap_socket(self.sock, self.key_file,
772 795 self.cert_file, cert_reqs=CERT_REQUIRED,
773 796 ca_certs=cacerts)
774 797 msg = _verifycert(self.sock.getpeercert(), host)
775 798 if msg:
776 799 raise util.Abort(_('%s certificate error: %s '
777 800 '(use --insecure to connect '
778 801 'insecurely)') % (host, msg))
779 802 self.ui.debug('%s certificate successfully verified\n' % host)
780 803 else:
781 804 self.sock = _ssl_wrap_socket(self.sock, self.key_file,
782 805 self.cert_file)
783 806 if hasattr(self.sock, 'getpeercert'):
784 807 peercert = self.sock.getpeercert(True)
785 808 peerfingerprint = util.sha1(peercert).hexdigest()
786 809 nicefingerprint = ":".join([peerfingerprint[x:x + 2]
787 810 for x in xrange(0, len(peerfingerprint), 2)])
788 811 if hostfingerprint:
789 812 if peerfingerprint.lower() != \
790 813 hostfingerprint.replace(':', '').lower():
791 814 raise util.Abort(_('invalid certificate for %s '
792 815 'with fingerprint %s') %
793 816 (host, nicefingerprint))
794 817 self.ui.debug('%s certificate matched fingerprint %s\n' %
795 818 (host, nicefingerprint))
796 819 else:
797 820 self.ui.warn(_('warning: %s certificate '
798 821 'with fingerprint %s not verified '
799 822 '(check hostfingerprints or web.cacerts '
800 823 'config setting)\n') %
801 824 (host, nicefingerprint))
802 825 else: # python 2.5 ?
803 826 if hostfingerprint:
804 827 raise util.Abort(_('no certificate for %s with '
805 828 'configured hostfingerprint') % host)
806 829 self.ui.warn(_('warning: %s certificate not verified '
807 830 '(check web.cacerts config setting)\n') %
808 831 host)
809 832
810 833 class httpshandler(keepalive.KeepAliveHandler, urllib2.HTTPSHandler):
811 834 def __init__(self, ui):
812 835 keepalive.KeepAliveHandler.__init__(self)
813 836 urllib2.HTTPSHandler.__init__(self)
814 837 self.ui = ui
815 838 self.pwmgr = passwordmgr(self.ui)
816 839
817 840 def _start_transaction(self, h, req):
818 841 _generic_start_transaction(self, h, req)
819 842 return keepalive.KeepAliveHandler._start_transaction(self, h, req)
820 843
821 844 def https_open(self, req):
822 845 res = readauthforuri(self.ui, req.get_full_url())
823 846 if res:
824 847 group, auth = res
825 848 self.auth = auth
826 849 self.ui.debug("using auth.%s.* for authentication\n" % group)
827 850 else:
828 851 self.auth = None
829 852 return self.do_open(self._makeconnection, req)
830 853
831 854 def _makeconnection(self, host, port=None, *args, **kwargs):
832 855 keyfile = None
833 856 certfile = None
834 857
835 858 if len(args) >= 1: # key_file
836 859 keyfile = args[0]
837 860 if len(args) >= 2: # cert_file
838 861 certfile = args[1]
839 862 args = args[2:]
840 863
841 864 # if the user has specified different key/cert files in
842 865 # hgrc, we prefer these
843 866 if self.auth and 'key' in self.auth and 'cert' in self.auth:
844 867 keyfile = self.auth['key']
845 868 certfile = self.auth['cert']
846 869
847 870 conn = httpsconnection(host, port, keyfile, certfile, *args, **kwargs)
848 871 conn.ui = self.ui
849 872 return conn
850 873
851 874 class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler):
852 875 def __init__(self, *args, **kwargs):
853 876 urllib2.HTTPDigestAuthHandler.__init__(self, *args, **kwargs)
854 877 self.retried_req = None
855 878
856 879 def reset_retry_count(self):
857 880 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
858 881 # forever. We disable reset_retry_count completely and reset in
859 882 # http_error_auth_reqed instead.
860 883 pass
861 884
862 885 def http_error_auth_reqed(self, auth_header, host, req, headers):
863 886 # Reset the retry counter once for each request.
864 887 if req is not self.retried_req:
865 888 self.retried_req = req
866 889 self.retried = 0
867 890 # In python < 2.5 AbstractDigestAuthHandler raises a ValueError if
868 891 # it doesn't know about the auth type requested. This can happen if
869 892 # somebody is using BasicAuth and types a bad password.
870 893 try:
871 894 return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed(
872 895 self, auth_header, host, req, headers)
873 896 except ValueError, inst:
874 897 arg = inst.args[0]
875 898 if arg.startswith("AbstractDigestAuthHandler doesn't know "):
876 899 return
877 900 raise
878 901
879 902 class httpbasicauthhandler(urllib2.HTTPBasicAuthHandler):
880 903 def __init__(self, *args, **kwargs):
881 904 urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs)
882 905 self.retried_req = None
883 906
884 907 def reset_retry_count(self):
885 908 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
886 909 # forever. We disable reset_retry_count completely and reset in
887 910 # http_error_auth_reqed instead.
888 911 pass
889 912
890 913 def http_error_auth_reqed(self, auth_header, host, req, headers):
891 914 # Reset the retry counter once for each request.
892 915 if req is not self.retried_req:
893 916 self.retried_req = req
894 917 self.retried = 0
895 918 return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed(
896 919 self, auth_header, host, req, headers)
897 920
898 921 handlerfuncs = []
899 922
900 923 def opener(ui, authinfo=None):
901 924 '''
902 925 construct an opener suitable for urllib2
903 926 authinfo will be added to the password manager
904 927 '''
905 928 handlers = [httphandler()]
906 929 if has_https:
907 930 handlers.append(httpshandler(ui))
908 931
909 932 handlers.append(proxyhandler(ui))
910 933
911 934 passmgr = passwordmgr(ui)
912 935 if authinfo is not None:
913 936 passmgr.add_password(*authinfo)
914 937 user, passwd = authinfo[2:4]
915 938 ui.debug('http auth: user %s, password %s\n' %
916 939 (user, passwd and '*' * len(passwd) or 'not set'))
917 940
918 941 handlers.extend((httpbasicauthhandler(passmgr),
919 942 httpdigestauthhandler(passmgr)))
920 943 handlers.extend([h(ui, passmgr) for h in handlerfuncs])
921 944 opener = urllib2.build_opener(*handlers)
922 945
923 946 # 1.0 here is the _protocol_ version
924 947 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
925 948 opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
926 949 return opener
927 950
928 951 def open(ui, url_, data=None):
929 952 u = url(url_)
930 953 if u.scheme:
931 954 u.scheme = u.scheme.lower()
932 955 url_, authinfo = u.authinfo()
933 956 else:
934 957 path = util.normpath(os.path.abspath(url_))
935 958 url_ = 'file://' + urllib.pathname2url(path)
936 959 authinfo = None
937 960 return opener(ui, authinfo).open(url_, data)
@@ -1,1583 +1,1563 b''
1 1 # util.py - Mercurial utility functions and platform specfic implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specfic implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from i18n import _
17 17 import error, osutil, encoding
18 18 import errno, re, shutil, sys, tempfile, traceback
19 19 import os, stat, time, calendar, textwrap, unicodedata, signal
20 20 import imp, socket
21 21
22 22 # Python compatibility
23 23
24 24 def sha1(s):
25 25 return _fastsha1(s)
26 26
27 27 def _fastsha1(s):
28 28 # This function will import sha1 from hashlib or sha (whichever is
29 29 # available) and overwrite itself with it on the first call.
30 30 # Subsequent calls will go directly to the imported function.
31 31 if sys.version_info >= (2, 5):
32 32 from hashlib import sha1 as _sha1
33 33 else:
34 34 from sha import sha as _sha1
35 35 global _fastsha1, sha1
36 36 _fastsha1 = sha1 = _sha1
37 37 return _sha1(s)
38 38
39 39 import __builtin__
40 40
41 41 if sys.version_info[0] < 3:
42 42 def fakebuffer(sliceable, offset=0):
43 43 return sliceable[offset:]
44 44 else:
45 45 def fakebuffer(sliceable, offset=0):
46 46 return memoryview(sliceable)[offset:]
47 47 try:
48 48 buffer
49 49 except NameError:
50 50 __builtin__.buffer = fakebuffer
51 51
52 52 import subprocess
53 53 closefds = os.name == 'posix'
54 54
55 55 def popen2(cmd, env=None, newlines=False):
56 56 # Setting bufsize to -1 lets the system decide the buffer size.
57 57 # The default for bufsize is 0, meaning unbuffered. This leads to
58 58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
59 59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
60 60 close_fds=closefds,
61 61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
62 62 universal_newlines=newlines,
63 63 env=env)
64 64 return p.stdin, p.stdout
65 65
66 66 def popen3(cmd, env=None, newlines=False):
67 67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
68 68 close_fds=closefds,
69 69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
70 70 stderr=subprocess.PIPE,
71 71 universal_newlines=newlines,
72 72 env=env)
73 73 return p.stdin, p.stdout, p.stderr
74 74
75 75 def version():
76 76 """Return version information if available."""
77 77 try:
78 78 import __version__
79 79 return __version__.version
80 80 except ImportError:
81 81 return 'unknown'
82 82
83 83 # used by parsedate
84 84 defaultdateformats = (
85 85 '%Y-%m-%d %H:%M:%S',
86 86 '%Y-%m-%d %I:%M:%S%p',
87 87 '%Y-%m-%d %H:%M',
88 88 '%Y-%m-%d %I:%M%p',
89 89 '%Y-%m-%d',
90 90 '%m-%d',
91 91 '%m/%d',
92 92 '%m/%d/%y',
93 93 '%m/%d/%Y',
94 94 '%a %b %d %H:%M:%S %Y',
95 95 '%a %b %d %I:%M:%S%p %Y',
96 96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
97 97 '%b %d %H:%M:%S %Y',
98 98 '%b %d %I:%M:%S%p %Y',
99 99 '%b %d %H:%M:%S',
100 100 '%b %d %I:%M:%S%p',
101 101 '%b %d %H:%M',
102 102 '%b %d %I:%M%p',
103 103 '%b %d %Y',
104 104 '%b %d',
105 105 '%H:%M:%S',
106 106 '%I:%M:%S%p',
107 107 '%H:%M',
108 108 '%I:%M%p',
109 109 )
110 110
111 111 extendeddateformats = defaultdateformats + (
112 112 "%Y",
113 113 "%Y-%m",
114 114 "%b",
115 115 "%b %Y",
116 116 )
117 117
118 118 def cachefunc(func):
119 119 '''cache the result of function calls'''
120 120 # XXX doesn't handle keywords args
121 121 cache = {}
122 122 if func.func_code.co_argcount == 1:
123 123 # we gain a small amount of time because
124 124 # we don't need to pack/unpack the list
125 125 def f(arg):
126 126 if arg not in cache:
127 127 cache[arg] = func(arg)
128 128 return cache[arg]
129 129 else:
130 130 def f(*args):
131 131 if args not in cache:
132 132 cache[args] = func(*args)
133 133 return cache[args]
134 134
135 135 return f
136 136
137 137 def lrucachefunc(func):
138 138 '''cache most recent results of function calls'''
139 139 cache = {}
140 140 order = []
141 141 if func.func_code.co_argcount == 1:
142 142 def f(arg):
143 143 if arg not in cache:
144 144 if len(cache) > 20:
145 145 del cache[order.pop(0)]
146 146 cache[arg] = func(arg)
147 147 else:
148 148 order.remove(arg)
149 149 order.append(arg)
150 150 return cache[arg]
151 151 else:
152 152 def f(*args):
153 153 if args not in cache:
154 154 if len(cache) > 20:
155 155 del cache[order.pop(0)]
156 156 cache[args] = func(*args)
157 157 else:
158 158 order.remove(args)
159 159 order.append(args)
160 160 return cache[args]
161 161
162 162 return f
163 163
164 164 class propertycache(object):
165 165 def __init__(self, func):
166 166 self.func = func
167 167 self.name = func.__name__
168 168 def __get__(self, obj, type=None):
169 169 result = self.func(obj)
170 170 setattr(obj, self.name, result)
171 171 return result
172 172
173 173 def pipefilter(s, cmd):
174 174 '''filter string S through command CMD, returning its output'''
175 175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
176 176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
177 177 pout, perr = p.communicate(s)
178 178 return pout
179 179
180 180 def tempfilter(s, cmd):
181 181 '''filter string S through a pair of temporary files with CMD.
182 182 CMD is used as a template to create the real command to be run,
183 183 with the strings INFILE and OUTFILE replaced by the real names of
184 184 the temporary files generated.'''
185 185 inname, outname = None, None
186 186 try:
187 187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
188 188 fp = os.fdopen(infd, 'wb')
189 189 fp.write(s)
190 190 fp.close()
191 191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
192 192 os.close(outfd)
193 193 cmd = cmd.replace('INFILE', inname)
194 194 cmd = cmd.replace('OUTFILE', outname)
195 195 code = os.system(cmd)
196 196 if sys.platform == 'OpenVMS' and code & 1:
197 197 code = 0
198 198 if code:
199 199 raise Abort(_("command '%s' failed: %s") %
200 200 (cmd, explain_exit(code)))
201 201 fp = open(outname, 'rb')
202 202 r = fp.read()
203 203 fp.close()
204 204 return r
205 205 finally:
206 206 try:
207 207 if inname:
208 208 os.unlink(inname)
209 209 except:
210 210 pass
211 211 try:
212 212 if outname:
213 213 os.unlink(outname)
214 214 except:
215 215 pass
216 216
217 217 filtertable = {
218 218 'tempfile:': tempfilter,
219 219 'pipe:': pipefilter,
220 220 }
221 221
222 222 def filter(s, cmd):
223 223 "filter a string through a command that transforms its input to its output"
224 224 for name, fn in filtertable.iteritems():
225 225 if cmd.startswith(name):
226 226 return fn(s, cmd[len(name):].lstrip())
227 227 return pipefilter(s, cmd)
228 228
229 229 def binary(s):
230 230 """return true if a string is binary data"""
231 231 return bool(s and '\0' in s)
232 232
233 233 def increasingchunks(source, min=1024, max=65536):
234 234 '''return no less than min bytes per chunk while data remains,
235 235 doubling min after each chunk until it reaches max'''
236 236 def log2(x):
237 237 if not x:
238 238 return 0
239 239 i = 0
240 240 while x:
241 241 x >>= 1
242 242 i += 1
243 243 return i - 1
244 244
245 245 buf = []
246 246 blen = 0
247 247 for chunk in source:
248 248 buf.append(chunk)
249 249 blen += len(chunk)
250 250 if blen >= min:
251 251 if min < max:
252 252 min = min << 1
253 253 nmin = 1 << log2(blen)
254 254 if nmin > min:
255 255 min = nmin
256 256 if min > max:
257 257 min = max
258 258 yield ''.join(buf)
259 259 blen = 0
260 260 buf = []
261 261 if buf:
262 262 yield ''.join(buf)
263 263
264 264 Abort = error.Abort
265 265
266 266 def always(fn):
267 267 return True
268 268
269 269 def never(fn):
270 270 return False
271 271
272 272 def pathto(root, n1, n2):
273 273 '''return the relative path from one place to another.
274 274 root should use os.sep to separate directories
275 275 n1 should use os.sep to separate directories
276 276 n2 should use "/" to separate directories
277 277 returns an os.sep-separated path.
278 278
279 279 If n1 is a relative path, it's assumed it's
280 280 relative to root.
281 281 n2 should always be relative to root.
282 282 '''
283 283 if not n1:
284 284 return localpath(n2)
285 285 if os.path.isabs(n1):
286 286 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
287 287 return os.path.join(root, localpath(n2))
288 288 n2 = '/'.join((pconvert(root), n2))
289 289 a, b = splitpath(n1), n2.split('/')
290 290 a.reverse()
291 291 b.reverse()
292 292 while a and b and a[-1] == b[-1]:
293 293 a.pop()
294 294 b.pop()
295 295 b.reverse()
296 296 return os.sep.join((['..'] * len(a)) + b) or '.'
297 297
298 298 def canonpath(root, cwd, myname, auditor=None):
299 299 """return the canonical path of myname, given cwd and root"""
300 300 if endswithsep(root):
301 301 rootsep = root
302 302 else:
303 303 rootsep = root + os.sep
304 304 name = myname
305 305 if not os.path.isabs(name):
306 306 name = os.path.join(root, cwd, name)
307 307 name = os.path.normpath(name)
308 308 if auditor is None:
309 309 auditor = path_auditor(root)
310 310 if name != rootsep and name.startswith(rootsep):
311 311 name = name[len(rootsep):]
312 312 auditor(name)
313 313 return pconvert(name)
314 314 elif name == root:
315 315 return ''
316 316 else:
317 317 # Determine whether `name' is in the hierarchy at or beneath `root',
318 318 # by iterating name=dirname(name) until that causes no change (can't
319 319 # check name == '/', because that doesn't work on windows). For each
320 320 # `name', compare dev/inode numbers. If they match, the list `rel'
321 321 # holds the reversed list of components making up the relative file
322 322 # name we want.
323 323 root_st = os.stat(root)
324 324 rel = []
325 325 while True:
326 326 try:
327 327 name_st = os.stat(name)
328 328 except OSError:
329 329 break
330 330 if samestat(name_st, root_st):
331 331 if not rel:
332 332 # name was actually the same as root (maybe a symlink)
333 333 return ''
334 334 rel.reverse()
335 335 name = os.path.join(*rel)
336 336 auditor(name)
337 337 return pconvert(name)
338 338 dirname, basename = os.path.split(name)
339 339 rel.append(basename)
340 340 if dirname == name:
341 341 break
342 342 name = dirname
343 343
344 344 raise Abort('%s not under root' % myname)
345 345
346 346 _hgexecutable = None
347 347
348 348 def main_is_frozen():
349 349 """return True if we are a frozen executable.
350 350
351 351 The code supports py2exe (most common, Windows only) and tools/freeze
352 352 (portable, not much used).
353 353 """
354 354 return (hasattr(sys, "frozen") or # new py2exe
355 355 hasattr(sys, "importers") or # old py2exe
356 356 imp.is_frozen("__main__")) # tools/freeze
357 357
358 358 def hgexecutable():
359 359 """return location of the 'hg' executable.
360 360
361 361 Defaults to $HG or 'hg' in the search path.
362 362 """
363 363 if _hgexecutable is None:
364 364 hg = os.environ.get('HG')
365 365 if hg:
366 366 set_hgexecutable(hg)
367 367 elif main_is_frozen():
368 368 set_hgexecutable(sys.executable)
369 369 else:
370 370 exe = find_exe('hg') or os.path.basename(sys.argv[0])
371 371 set_hgexecutable(exe)
372 372 return _hgexecutable
373 373
374 374 def set_hgexecutable(path):
375 375 """set location of the 'hg' executable"""
376 376 global _hgexecutable
377 377 _hgexecutable = path
378 378
379 379 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
380 380 '''enhanced shell command execution.
381 381 run with environment maybe modified, maybe in different dir.
382 382
383 383 if command fails and onerr is None, return status. if ui object,
384 384 print error message and return status, else raise onerr object as
385 385 exception.
386 386
387 387 if out is specified, it is assumed to be a file-like object that has a
388 388 write() method. stdout and stderr will be redirected to out.'''
389 389 try:
390 390 sys.stdout.flush()
391 391 except Exception:
392 392 pass
393 393 def py2shell(val):
394 394 'convert python object into string that is useful to shell'
395 395 if val is None or val is False:
396 396 return '0'
397 397 if val is True:
398 398 return '1'
399 399 return str(val)
400 400 origcmd = cmd
401 401 cmd = quotecommand(cmd)
402 402 env = dict(os.environ)
403 403 env.update((k, py2shell(v)) for k, v in environ.iteritems())
404 404 env['HG'] = hgexecutable()
405 405 if out is None:
406 406 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
407 407 env=env, cwd=cwd)
408 408 else:
409 409 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
410 410 env=env, cwd=cwd, stdout=subprocess.PIPE,
411 411 stderr=subprocess.STDOUT)
412 412 for line in proc.stdout:
413 413 out.write(line)
414 414 proc.wait()
415 415 rc = proc.returncode
416 416 if sys.platform == 'OpenVMS' and rc & 1:
417 417 rc = 0
418 418 if rc and onerr:
419 419 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
420 420 explain_exit(rc)[0])
421 421 if errprefix:
422 422 errmsg = '%s: %s' % (errprefix, errmsg)
423 423 try:
424 424 onerr.warn(errmsg + '\n')
425 425 except AttributeError:
426 426 raise onerr(errmsg)
427 427 return rc
428 428
429 429 def checksignature(func):
430 430 '''wrap a function with code to check for calling errors'''
431 431 def check(*args, **kwargs):
432 432 try:
433 433 return func(*args, **kwargs)
434 434 except TypeError:
435 435 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
436 436 raise error.SignatureError
437 437 raise
438 438
439 439 return check
440 440
441 441 def makedir(path, notindexed):
442 442 os.mkdir(path)
443 443
444 444 def unlinkpath(f):
445 445 """unlink and remove the directory if it is empty"""
446 446 os.unlink(f)
447 447 # try removing directories that might now be empty
448 448 try:
449 449 os.removedirs(os.path.dirname(f))
450 450 except OSError:
451 451 pass
452 452
453 453 def copyfile(src, dest):
454 454 "copy a file, preserving mode and atime/mtime"
455 455 if os.path.islink(src):
456 456 try:
457 457 os.unlink(dest)
458 458 except:
459 459 pass
460 460 os.symlink(os.readlink(src), dest)
461 461 else:
462 462 try:
463 463 shutil.copyfile(src, dest)
464 464 shutil.copymode(src, dest)
465 465 except shutil.Error, inst:
466 466 raise Abort(str(inst))
467 467
468 468 def copyfiles(src, dst, hardlink=None):
469 469 """Copy a directory tree using hardlinks if possible"""
470 470
471 471 if hardlink is None:
472 472 hardlink = (os.stat(src).st_dev ==
473 473 os.stat(os.path.dirname(dst)).st_dev)
474 474
475 475 num = 0
476 476 if os.path.isdir(src):
477 477 os.mkdir(dst)
478 478 for name, kind in osutil.listdir(src):
479 479 srcname = os.path.join(src, name)
480 480 dstname = os.path.join(dst, name)
481 481 hardlink, n = copyfiles(srcname, dstname, hardlink)
482 482 num += n
483 483 else:
484 484 if hardlink:
485 485 try:
486 486 os_link(src, dst)
487 487 except (IOError, OSError):
488 488 hardlink = False
489 489 shutil.copy(src, dst)
490 490 else:
491 491 shutil.copy(src, dst)
492 492 num += 1
493 493
494 494 return hardlink, num
495 495
496 496 class path_auditor(object):
497 497 '''ensure that a filesystem path contains no banned components.
498 498 the following properties of a path are checked:
499 499
500 500 - ends with a directory separator
501 501 - under top-level .hg
502 502 - starts at the root of a windows drive
503 503 - contains ".."
504 504 - traverses a symlink (e.g. a/symlink_here/b)
505 505 - inside a nested repository (a callback can be used to approve
506 506 some nested repositories, e.g., subrepositories)
507 507 '''
508 508
509 509 def __init__(self, root, callback=None):
510 510 self.audited = set()
511 511 self.auditeddir = set()
512 512 self.root = root
513 513 self.callback = callback
514 514
515 515 def __call__(self, path):
516 516 if path in self.audited:
517 517 return
518 518 # AIX ignores "/" at end of path, others raise EISDIR.
519 519 if endswithsep(path):
520 520 raise Abort(_("path ends in directory separator: %s") % path)
521 521 normpath = os.path.normcase(path)
522 522 parts = splitpath(normpath)
523 523 if (os.path.splitdrive(path)[0]
524 524 or parts[0].lower() in ('.hg', '.hg.', '')
525 525 or os.pardir in parts):
526 526 raise Abort(_("path contains illegal component: %s") % path)
527 527 if '.hg' in path.lower():
528 528 lparts = [p.lower() for p in parts]
529 529 for p in '.hg', '.hg.':
530 530 if p in lparts[1:]:
531 531 pos = lparts.index(p)
532 532 base = os.path.join(*parts[:pos])
533 533 raise Abort(_('path %r is inside repo %r') % (path, base))
534 534 def check(prefix):
535 535 curpath = os.path.join(self.root, prefix)
536 536 try:
537 537 st = os.lstat(curpath)
538 538 except OSError, err:
539 539 # EINVAL can be raised as invalid path syntax under win32.
540 540 # They must be ignored for patterns can be checked too.
541 541 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
542 542 raise
543 543 else:
544 544 if stat.S_ISLNK(st.st_mode):
545 545 raise Abort(_('path %r traverses symbolic link %r') %
546 546 (path, prefix))
547 547 elif (stat.S_ISDIR(st.st_mode) and
548 548 os.path.isdir(os.path.join(curpath, '.hg'))):
549 549 if not self.callback or not self.callback(curpath):
550 550 raise Abort(_('path %r is inside repo %r') %
551 551 (path, prefix))
552 552 parts.pop()
553 553 prefixes = []
554 554 while parts:
555 555 prefix = os.sep.join(parts)
556 556 if prefix in self.auditeddir:
557 557 break
558 558 check(prefix)
559 559 prefixes.append(prefix)
560 560 parts.pop()
561 561
562 562 self.audited.add(path)
563 563 # only add prefixes to the cache after checking everything: we don't
564 564 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
565 565 self.auditeddir.update(prefixes)
566 566
567 567 def lookup_reg(key, name=None, scope=None):
568 568 return None
569 569
570 570 def hidewindow():
571 571 """Hide current shell window.
572 572
573 573 Used to hide the window opened when starting asynchronous
574 574 child process under Windows, unneeded on other systems.
575 575 """
576 576 pass
577 577
578 578 if os.name == 'nt':
579 579 from windows import *
580 580 else:
581 581 from posix import *
582 582
583 583 def makelock(info, pathname):
584 584 try:
585 585 return os.symlink(info, pathname)
586 586 except OSError, why:
587 587 if why.errno == errno.EEXIST:
588 588 raise
589 589 except AttributeError: # no symlink in os
590 590 pass
591 591
592 592 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
593 593 os.write(ld, info)
594 594 os.close(ld)
595 595
596 596 def readlock(pathname):
597 597 try:
598 598 return os.readlink(pathname)
599 599 except OSError, why:
600 600 if why.errno not in (errno.EINVAL, errno.ENOSYS):
601 601 raise
602 602 except AttributeError: # no symlink in os
603 603 pass
604 604 fp = posixfile(pathname)
605 605 r = fp.read()
606 606 fp.close()
607 607 return r
608 608
609 609 def fstat(fp):
610 610 '''stat file object that may not have fileno method.'''
611 611 try:
612 612 return os.fstat(fp.fileno())
613 613 except AttributeError:
614 614 return os.stat(fp.name)
615 615
616 616 # File system features
617 617
618 618 def checkcase(path):
619 619 """
620 620 Check whether the given path is on a case-sensitive filesystem
621 621
622 622 Requires a path (like /foo/.hg) ending with a foldable final
623 623 directory component.
624 624 """
625 625 s1 = os.stat(path)
626 626 d, b = os.path.split(path)
627 627 p2 = os.path.join(d, b.upper())
628 628 if path == p2:
629 629 p2 = os.path.join(d, b.lower())
630 630 try:
631 631 s2 = os.stat(p2)
632 632 if s2 == s1:
633 633 return False
634 634 return True
635 635 except:
636 636 return True
637 637
638 638 _fspathcache = {}
639 639 def fspath(name, root):
640 640 '''Get name in the case stored in the filesystem
641 641
642 642 The name is either relative to root, or it is an absolute path starting
643 643 with root. Note that this function is unnecessary, and should not be
644 644 called, for case-sensitive filesystems (simply because it's expensive).
645 645 '''
646 646 # If name is absolute, make it relative
647 647 if name.lower().startswith(root.lower()):
648 648 l = len(root)
649 649 if name[l] == os.sep or name[l] == os.altsep:
650 650 l = l + 1
651 651 name = name[l:]
652 652
653 653 if not os.path.lexists(os.path.join(root, name)):
654 654 return None
655 655
656 656 seps = os.sep
657 657 if os.altsep:
658 658 seps = seps + os.altsep
659 659 # Protect backslashes. This gets silly very quickly.
660 660 seps.replace('\\','\\\\')
661 661 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
662 662 dir = os.path.normcase(os.path.normpath(root))
663 663 result = []
664 664 for part, sep in pattern.findall(name):
665 665 if sep:
666 666 result.append(sep)
667 667 continue
668 668
669 669 if dir not in _fspathcache:
670 670 _fspathcache[dir] = os.listdir(dir)
671 671 contents = _fspathcache[dir]
672 672
673 673 lpart = part.lower()
674 674 lenp = len(part)
675 675 for n in contents:
676 676 if lenp == len(n) and n.lower() == lpart:
677 677 result.append(n)
678 678 break
679 679 else:
680 680 # Cannot happen, as the file exists!
681 681 result.append(part)
682 682 dir = os.path.join(dir, lpart)
683 683
684 684 return ''.join(result)
685 685
686 686 def checkexec(path):
687 687 """
688 688 Check whether the given path is on a filesystem with UNIX-like exec flags
689 689
690 690 Requires a directory (like /foo/.hg)
691 691 """
692 692
693 693 # VFAT on some Linux versions can flip mode but it doesn't persist
694 694 # a FS remount. Frequently we can detect it if files are created
695 695 # with exec bit on.
696 696
697 697 try:
698 698 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
699 699 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
700 700 try:
701 701 os.close(fh)
702 702 m = os.stat(fn).st_mode & 0777
703 703 new_file_has_exec = m & EXECFLAGS
704 704 os.chmod(fn, m ^ EXECFLAGS)
705 705 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
706 706 finally:
707 707 os.unlink(fn)
708 708 except (IOError, OSError):
709 709 # we don't care, the user probably won't be able to commit anyway
710 710 return False
711 711 return not (new_file_has_exec or exec_flags_cannot_flip)
712 712
713 713 def checklink(path):
714 714 """check whether the given path is on a symlink-capable filesystem"""
715 715 # mktemp is not racy because symlink creation will fail if the
716 716 # file already exists
717 717 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
718 718 try:
719 719 os.symlink(".", name)
720 720 os.unlink(name)
721 721 return True
722 722 except (OSError, AttributeError):
723 723 return False
724 724
725 725 def checknlink(testfile):
726 726 '''check whether hardlink count reporting works properly'''
727 727
728 728 # testfile may be open, so we need a separate file for checking to
729 729 # work around issue2543 (or testfile may get lost on Samba shares)
730 730 f1 = testfile + ".hgtmp1"
731 731 if os.path.lexists(f1):
732 732 return False
733 733 try:
734 734 posixfile(f1, 'w').close()
735 735 except IOError:
736 736 return False
737 737
738 738 f2 = testfile + ".hgtmp2"
739 739 fd = None
740 740 try:
741 741 try:
742 742 os_link(f1, f2)
743 743 except OSError:
744 744 return False
745 745
746 746 # nlinks() may behave differently for files on Windows shares if
747 747 # the file is open.
748 748 fd = posixfile(f2)
749 749 return nlinks(f2) > 1
750 750 finally:
751 751 if fd is not None:
752 752 fd.close()
753 753 for f in (f1, f2):
754 754 try:
755 755 os.unlink(f)
756 756 except OSError:
757 757 pass
758 758
759 759 return False
760 760
761 761 def endswithsep(path):
762 762 '''Check path ends with os.sep or os.altsep.'''
763 763 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
764 764
765 765 def splitpath(path):
766 766 '''Split path by os.sep.
767 767 Note that this function does not use os.altsep because this is
768 768 an alternative of simple "xxx.split(os.sep)".
769 769 It is recommended to use os.path.normpath() before using this
770 770 function if need.'''
771 771 return path.split(os.sep)
772 772
773 773 def gui():
774 774 '''Are we running in a GUI?'''
775 775 if sys.platform == 'darwin':
776 776 if 'SSH_CONNECTION' in os.environ:
777 777 # handle SSH access to a box where the user is logged in
778 778 return False
779 779 elif getattr(osutil, 'isgui', None):
780 780 # check if a CoreGraphics session is available
781 781 return osutil.isgui()
782 782 else:
783 783 # pure build; use a safe default
784 784 return True
785 785 else:
786 786 return os.name == "nt" or os.environ.get("DISPLAY")
787 787
788 788 def mktempcopy(name, emptyok=False, createmode=None):
789 789 """Create a temporary file with the same contents from name
790 790
791 791 The permission bits are copied from the original file.
792 792
793 793 If the temporary file is going to be truncated immediately, you
794 794 can use emptyok=True as an optimization.
795 795
796 796 Returns the name of the temporary file.
797 797 """
798 798 d, fn = os.path.split(name)
799 799 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
800 800 os.close(fd)
801 801 # Temporary files are created with mode 0600, which is usually not
802 802 # what we want. If the original file already exists, just copy
803 803 # its mode. Otherwise, manually obey umask.
804 804 try:
805 805 st_mode = os.lstat(name).st_mode & 0777
806 806 except OSError, inst:
807 807 if inst.errno != errno.ENOENT:
808 808 raise
809 809 st_mode = createmode
810 810 if st_mode is None:
811 811 st_mode = ~umask
812 812 st_mode &= 0666
813 813 os.chmod(temp, st_mode)
814 814 if emptyok:
815 815 return temp
816 816 try:
817 817 try:
818 818 ifp = posixfile(name, "rb")
819 819 except IOError, inst:
820 820 if inst.errno == errno.ENOENT:
821 821 return temp
822 822 if not getattr(inst, 'filename', None):
823 823 inst.filename = name
824 824 raise
825 825 ofp = posixfile(temp, "wb")
826 826 for chunk in filechunkiter(ifp):
827 827 ofp.write(chunk)
828 828 ifp.close()
829 829 ofp.close()
830 830 except:
831 831 try: os.unlink(temp)
832 832 except: pass
833 833 raise
834 834 return temp
835 835
836 836 class atomictempfile(object):
837 837 """file-like object that atomically updates a file
838 838
839 839 All writes will be redirected to a temporary copy of the original
840 840 file. When rename is called, the copy is renamed to the original
841 841 name, making the changes visible.
842 842 """
843 843 def __init__(self, name, mode='w+b', createmode=None):
844 844 self.__name = name
845 845 self._fp = None
846 846 self.temp = mktempcopy(name, emptyok=('w' in mode),
847 847 createmode=createmode)
848 848 self._fp = posixfile(self.temp, mode)
849 849
850 850 def __getattr__(self, name):
851 851 return getattr(self._fp, name)
852 852
853 853 def rename(self):
854 854 if not self._fp.closed:
855 855 self._fp.close()
856 856 rename(self.temp, localpath(self.__name))
857 857
858 858 def close(self):
859 859 if not self._fp:
860 860 return
861 861 if not self._fp.closed:
862 862 try:
863 863 os.unlink(self.temp)
864 864 except: pass
865 865 self._fp.close()
866 866
867 867 def __del__(self):
868 868 self.close()
869 869
870 870 def makedirs(name, mode=None):
871 871 """recursive directory creation with parent mode inheritance"""
872 872 parent = os.path.abspath(os.path.dirname(name))
873 873 try:
874 874 os.mkdir(name)
875 875 if mode is not None:
876 876 os.chmod(name, mode)
877 877 return
878 878 except OSError, err:
879 879 if err.errno == errno.EEXIST:
880 880 return
881 881 if not name or parent == name or err.errno != errno.ENOENT:
882 882 raise
883 883 makedirs(parent, mode)
884 884 makedirs(name, mode)
885 885
886 886 class opener(object):
887 887 """Open files relative to a base directory
888 888
889 889 This class is used to hide the details of COW semantics and
890 890 remote file access from higher level code.
891 891 """
892 892 def __init__(self, base, audit=True):
893 893 self.base = base
894 894 if audit:
895 895 self.auditor = path_auditor(base)
896 896 else:
897 897 self.auditor = always
898 898 self.createmode = None
899 899 self._trustnlink = None
900 900
901 901 @propertycache
902 902 def _can_symlink(self):
903 903 return checklink(self.base)
904 904
905 905 def _fixfilemode(self, name):
906 906 if self.createmode is None:
907 907 return
908 908 os.chmod(name, self.createmode & 0666)
909 909
910 910 def __call__(self, path, mode="r", text=False, atomictemp=False):
911 911 self.auditor(path)
912 912 f = os.path.join(self.base, path)
913 913
914 914 if not text and "b" not in mode:
915 915 mode += "b" # for that other OS
916 916
917 917 nlink = -1
918 918 dirname, basename = os.path.split(f)
919 919 # If basename is empty, then the path is malformed because it points
920 920 # to a directory. Let the posixfile() call below raise IOError.
921 921 if basename and mode not in ('r', 'rb'):
922 922 if atomictemp:
923 923 if not os.path.isdir(dirname):
924 924 makedirs(dirname, self.createmode)
925 925 return atomictempfile(f, mode, self.createmode)
926 926 try:
927 927 if 'w' in mode:
928 928 unlink(f)
929 929 nlink = 0
930 930 else:
931 931 # nlinks() may behave differently for files on Windows
932 932 # shares if the file is open.
933 933 fd = posixfile(f)
934 934 nlink = nlinks(f)
935 935 if nlink < 1:
936 936 nlink = 2 # force mktempcopy (issue1922)
937 937 fd.close()
938 938 except (OSError, IOError), e:
939 939 if e.errno != errno.ENOENT:
940 940 raise
941 941 nlink = 0
942 942 if not os.path.isdir(dirname):
943 943 makedirs(dirname, self.createmode)
944 944 if nlink > 0:
945 945 if self._trustnlink is None:
946 946 self._trustnlink = nlink > 1 or checknlink(f)
947 947 if nlink > 1 or not self._trustnlink:
948 948 rename(mktempcopy(f), f)
949 949 fp = posixfile(f, mode)
950 950 if nlink == 0:
951 951 self._fixfilemode(f)
952 952 return fp
953 953
954 954 def symlink(self, src, dst):
955 955 self.auditor(dst)
956 956 linkname = os.path.join(self.base, dst)
957 957 try:
958 958 os.unlink(linkname)
959 959 except OSError:
960 960 pass
961 961
962 962 dirname = os.path.dirname(linkname)
963 963 if not os.path.exists(dirname):
964 964 makedirs(dirname, self.createmode)
965 965
966 966 if self._can_symlink:
967 967 try:
968 968 os.symlink(src, linkname)
969 969 except OSError, err:
970 970 raise OSError(err.errno, _('could not symlink to %r: %s') %
971 971 (src, err.strerror), linkname)
972 972 else:
973 973 f = self(dst, "w")
974 974 f.write(src)
975 975 f.close()
976 976 self._fixfilemode(dst)
977 977
978 978 class chunkbuffer(object):
979 979 """Allow arbitrary sized chunks of data to be efficiently read from an
980 980 iterator over chunks of arbitrary size."""
981 981
982 982 def __init__(self, in_iter):
983 983 """in_iter is the iterator that's iterating over the input chunks.
984 984 targetsize is how big a buffer to try to maintain."""
985 985 def splitbig(chunks):
986 986 for chunk in chunks:
987 987 if len(chunk) > 2**20:
988 988 pos = 0
989 989 while pos < len(chunk):
990 990 end = pos + 2 ** 18
991 991 yield chunk[pos:end]
992 992 pos = end
993 993 else:
994 994 yield chunk
995 995 self.iter = splitbig(in_iter)
996 996 self._queue = []
997 997
998 998 def read(self, l):
999 999 """Read L bytes of data from the iterator of chunks of data.
1000 1000 Returns less than L bytes if the iterator runs dry."""
1001 1001 left = l
1002 1002 buf = ''
1003 1003 queue = self._queue
1004 1004 while left > 0:
1005 1005 # refill the queue
1006 1006 if not queue:
1007 1007 target = 2**18
1008 1008 for chunk in self.iter:
1009 1009 queue.append(chunk)
1010 1010 target -= len(chunk)
1011 1011 if target <= 0:
1012 1012 break
1013 1013 if not queue:
1014 1014 break
1015 1015
1016 1016 chunk = queue.pop(0)
1017 1017 left -= len(chunk)
1018 1018 if left < 0:
1019 1019 queue.insert(0, chunk[left:])
1020 1020 buf += chunk[:left]
1021 1021 else:
1022 1022 buf += chunk
1023 1023
1024 1024 return buf
1025 1025
1026 1026 def filechunkiter(f, size=65536, limit=None):
1027 1027 """Create a generator that produces the data in the file size
1028 1028 (default 65536) bytes at a time, up to optional limit (default is
1029 1029 to read all data). Chunks may be less than size bytes if the
1030 1030 chunk is the last chunk in the file, or the file is a socket or
1031 1031 some other type of file that sometimes reads less data than is
1032 1032 requested."""
1033 1033 assert size >= 0
1034 1034 assert limit is None or limit >= 0
1035 1035 while True:
1036 1036 if limit is None:
1037 1037 nbytes = size
1038 1038 else:
1039 1039 nbytes = min(limit, size)
1040 1040 s = nbytes and f.read(nbytes)
1041 1041 if not s:
1042 1042 break
1043 1043 if limit:
1044 1044 limit -= len(s)
1045 1045 yield s
1046 1046
1047 1047 def makedate():
1048 1048 lt = time.localtime()
1049 1049 if lt[8] == 1 and time.daylight:
1050 1050 tz = time.altzone
1051 1051 else:
1052 1052 tz = time.timezone
1053 1053 t = time.mktime(lt)
1054 1054 if t < 0:
1055 1055 hint = _("check your clock")
1056 1056 raise Abort(_("negative timestamp: %d") % t, hint=hint)
1057 1057 return t, tz
1058 1058
1059 1059 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1060 1060 """represent a (unixtime, offset) tuple as a localized time.
1061 1061 unixtime is seconds since the epoch, and offset is the time zone's
1062 1062 number of seconds away from UTC. if timezone is false, do not
1063 1063 append time zone to string."""
1064 1064 t, tz = date or makedate()
1065 1065 if t < 0:
1066 1066 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1067 1067 tz = 0
1068 1068 if "%1" in format or "%2" in format:
1069 1069 sign = (tz > 0) and "-" or "+"
1070 1070 minutes = abs(tz) // 60
1071 1071 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1072 1072 format = format.replace("%2", "%02d" % (minutes % 60))
1073 1073 s = time.strftime(format, time.gmtime(float(t) - tz))
1074 1074 return s
1075 1075
1076 1076 def shortdate(date=None):
1077 1077 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1078 1078 return datestr(date, format='%Y-%m-%d')
1079 1079
1080 1080 def strdate(string, format, defaults=[]):
1081 1081 """parse a localized time string and return a (unixtime, offset) tuple.
1082 1082 if the string cannot be parsed, ValueError is raised."""
1083 1083 def timezone(string):
1084 1084 tz = string.split()[-1]
1085 1085 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1086 1086 sign = (tz[0] == "+") and 1 or -1
1087 1087 hours = int(tz[1:3])
1088 1088 minutes = int(tz[3:5])
1089 1089 return -sign * (hours * 60 + minutes) * 60
1090 1090 if tz == "GMT" or tz == "UTC":
1091 1091 return 0
1092 1092 return None
1093 1093
1094 1094 # NOTE: unixtime = localunixtime + offset
1095 1095 offset, date = timezone(string), string
1096 1096 if offset is not None:
1097 1097 date = " ".join(string.split()[:-1])
1098 1098
1099 1099 # add missing elements from defaults
1100 1100 usenow = False # default to using biased defaults
1101 1101 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1102 1102 found = [True for p in part if ("%"+p) in format]
1103 1103 if not found:
1104 1104 date += "@" + defaults[part][usenow]
1105 1105 format += "@%" + part[0]
1106 1106 else:
1107 1107 # We've found a specific time element, less specific time
1108 1108 # elements are relative to today
1109 1109 usenow = True
1110 1110
1111 1111 timetuple = time.strptime(date, format)
1112 1112 localunixtime = int(calendar.timegm(timetuple))
1113 1113 if offset is None:
1114 1114 # local timezone
1115 1115 unixtime = int(time.mktime(timetuple))
1116 1116 offset = unixtime - localunixtime
1117 1117 else:
1118 1118 unixtime = localunixtime + offset
1119 1119 return unixtime, offset
1120 1120
1121 1121 def parsedate(date, formats=None, bias={}):
1122 1122 """parse a localized date/time and return a (unixtime, offset) tuple.
1123 1123
1124 1124 The date may be a "unixtime offset" string or in one of the specified
1125 1125 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1126 1126 """
1127 1127 if not date:
1128 1128 return 0, 0
1129 1129 if isinstance(date, tuple) and len(date) == 2:
1130 1130 return date
1131 1131 if not formats:
1132 1132 formats = defaultdateformats
1133 1133 date = date.strip()
1134 1134 try:
1135 1135 when, offset = map(int, date.split(' '))
1136 1136 except ValueError:
1137 1137 # fill out defaults
1138 1138 now = makedate()
1139 1139 defaults = {}
1140 1140 nowmap = {}
1141 1141 for part in ("d", "mb", "yY", "HI", "M", "S"):
1142 1142 # this piece is for rounding the specific end of unknowns
1143 1143 b = bias.get(part)
1144 1144 if b is None:
1145 1145 if part[0] in "HMS":
1146 1146 b = "00"
1147 1147 else:
1148 1148 b = "0"
1149 1149
1150 1150 # this piece is for matching the generic end to today's date
1151 1151 n = datestr(now, "%" + part[0])
1152 1152
1153 1153 defaults[part] = (b, n)
1154 1154
1155 1155 for format in formats:
1156 1156 try:
1157 1157 when, offset = strdate(date, format, defaults)
1158 1158 except (ValueError, OverflowError):
1159 1159 pass
1160 1160 else:
1161 1161 break
1162 1162 else:
1163 1163 raise Abort(_('invalid date: %r') % date)
1164 1164 # validate explicit (probably user-specified) date and
1165 1165 # time zone offset. values must fit in signed 32 bits for
1166 1166 # current 32-bit linux runtimes. timezones go from UTC-12
1167 1167 # to UTC+14
1168 1168 if abs(when) > 0x7fffffff:
1169 1169 raise Abort(_('date exceeds 32 bits: %d') % when)
1170 1170 if when < 0:
1171 1171 raise Abort(_('negative date value: %d') % when)
1172 1172 if offset < -50400 or offset > 43200:
1173 1173 raise Abort(_('impossible time zone offset: %d') % offset)
1174 1174 return when, offset
1175 1175
1176 1176 def matchdate(date):
1177 1177 """Return a function that matches a given date match specifier
1178 1178
1179 1179 Formats include:
1180 1180
1181 1181 '{date}' match a given date to the accuracy provided
1182 1182
1183 1183 '<{date}' on or before a given date
1184 1184
1185 1185 '>{date}' on or after a given date
1186 1186
1187 1187 >>> p1 = parsedate("10:29:59")
1188 1188 >>> p2 = parsedate("10:30:00")
1189 1189 >>> p3 = parsedate("10:30:59")
1190 1190 >>> p4 = parsedate("10:31:00")
1191 1191 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1192 1192 >>> f = matchdate("10:30")
1193 1193 >>> f(p1[0])
1194 1194 False
1195 1195 >>> f(p2[0])
1196 1196 True
1197 1197 >>> f(p3[0])
1198 1198 True
1199 1199 >>> f(p4[0])
1200 1200 False
1201 1201 >>> f(p5[0])
1202 1202 False
1203 1203 """
1204 1204
1205 1205 def lower(date):
1206 1206 d = dict(mb="1", d="1")
1207 1207 return parsedate(date, extendeddateformats, d)[0]
1208 1208
1209 1209 def upper(date):
1210 1210 d = dict(mb="12", HI="23", M="59", S="59")
1211 1211 for days in ("31", "30", "29"):
1212 1212 try:
1213 1213 d["d"] = days
1214 1214 return parsedate(date, extendeddateformats, d)[0]
1215 1215 except:
1216 1216 pass
1217 1217 d["d"] = "28"
1218 1218 return parsedate(date, extendeddateformats, d)[0]
1219 1219
1220 1220 date = date.strip()
1221 1221
1222 1222 if not date:
1223 1223 raise Abort(_("dates cannot consist entirely of whitespace"))
1224 1224 elif date[0] == "<":
1225 1225 when = upper(date[1:])
1226 1226 return lambda x: x <= when
1227 1227 elif date[0] == ">":
1228 1228 when = lower(date[1:])
1229 1229 return lambda x: x >= when
1230 1230 elif date[0] == "-":
1231 1231 try:
1232 1232 days = int(date[1:])
1233 1233 except ValueError:
1234 1234 raise Abort(_("invalid day spec: %s") % date[1:])
1235 1235 when = makedate()[0] - days * 3600 * 24
1236 1236 return lambda x: x >= when
1237 1237 elif " to " in date:
1238 1238 a, b = date.split(" to ")
1239 1239 start, stop = lower(a), upper(b)
1240 1240 return lambda x: x >= start and x <= stop
1241 1241 else:
1242 1242 start, stop = lower(date), upper(date)
1243 1243 return lambda x: x >= start and x <= stop
1244 1244
1245 1245 def shortuser(user):
1246 1246 """Return a short representation of a user name or email address."""
1247 1247 f = user.find('@')
1248 1248 if f >= 0:
1249 1249 user = user[:f]
1250 1250 f = user.find('<')
1251 1251 if f >= 0:
1252 1252 user = user[f + 1:]
1253 1253 f = user.find(' ')
1254 1254 if f >= 0:
1255 1255 user = user[:f]
1256 1256 f = user.find('.')
1257 1257 if f >= 0:
1258 1258 user = user[:f]
1259 1259 return user
1260 1260
1261 1261 def email(author):
1262 1262 '''get email of author.'''
1263 1263 r = author.find('>')
1264 1264 if r == -1:
1265 1265 r = None
1266 1266 return author[author.find('<') + 1:r]
1267 1267
1268 1268 def _ellipsis(text, maxlength):
1269 1269 if len(text) <= maxlength:
1270 1270 return text, False
1271 1271 else:
1272 1272 return "%s..." % (text[:maxlength - 3]), True
1273 1273
1274 1274 def ellipsis(text, maxlength=400):
1275 1275 """Trim string to at most maxlength (default: 400) characters."""
1276 1276 try:
1277 1277 # use unicode not to split at intermediate multi-byte sequence
1278 1278 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1279 1279 maxlength)
1280 1280 if not truncated:
1281 1281 return text
1282 1282 return utext.encode(encoding.encoding)
1283 1283 except (UnicodeDecodeError, UnicodeEncodeError):
1284 1284 return _ellipsis(text, maxlength)[0]
1285 1285
1286 1286 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1287 1287 '''yield every hg repository under path, recursively.'''
1288 1288 def errhandler(err):
1289 1289 if err.filename == path:
1290 1290 raise err
1291 1291 if followsym and hasattr(os.path, 'samestat'):
1292 1292 def _add_dir_if_not_there(dirlst, dirname):
1293 1293 match = False
1294 1294 samestat = os.path.samestat
1295 1295 dirstat = os.stat(dirname)
1296 1296 for lstdirstat in dirlst:
1297 1297 if samestat(dirstat, lstdirstat):
1298 1298 match = True
1299 1299 break
1300 1300 if not match:
1301 1301 dirlst.append(dirstat)
1302 1302 return not match
1303 1303 else:
1304 1304 followsym = False
1305 1305
1306 1306 if (seen_dirs is None) and followsym:
1307 1307 seen_dirs = []
1308 1308 _add_dir_if_not_there(seen_dirs, path)
1309 1309 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1310 1310 dirs.sort()
1311 1311 if '.hg' in dirs:
1312 1312 yield root # found a repository
1313 1313 qroot = os.path.join(root, '.hg', 'patches')
1314 1314 if os.path.isdir(os.path.join(qroot, '.hg')):
1315 1315 yield qroot # we have a patch queue repo here
1316 1316 if recurse:
1317 1317 # avoid recursing inside the .hg directory
1318 1318 dirs.remove('.hg')
1319 1319 else:
1320 1320 dirs[:] = [] # don't descend further
1321 1321 elif followsym:
1322 1322 newdirs = []
1323 1323 for d in dirs:
1324 1324 fname = os.path.join(root, d)
1325 1325 if _add_dir_if_not_there(seen_dirs, fname):
1326 1326 if os.path.islink(fname):
1327 1327 for hgname in walkrepos(fname, True, seen_dirs):
1328 1328 yield hgname
1329 1329 else:
1330 1330 newdirs.append(d)
1331 1331 dirs[:] = newdirs
1332 1332
1333 1333 _rcpath = None
1334 1334
1335 1335 def os_rcpath():
1336 1336 '''return default os-specific hgrc search path'''
1337 1337 path = system_rcpath()
1338 1338 path.extend(user_rcpath())
1339 1339 path = [os.path.normpath(f) for f in path]
1340 1340 return path
1341 1341
1342 1342 def rcpath():
1343 1343 '''return hgrc search path. if env var HGRCPATH is set, use it.
1344 1344 for each item in path, if directory, use files ending in .rc,
1345 1345 else use item.
1346 1346 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1347 1347 if no HGRCPATH, use default os-specific path.'''
1348 1348 global _rcpath
1349 1349 if _rcpath is None:
1350 1350 if 'HGRCPATH' in os.environ:
1351 1351 _rcpath = []
1352 1352 for p in os.environ['HGRCPATH'].split(os.pathsep):
1353 1353 if not p:
1354 1354 continue
1355 1355 p = expandpath(p)
1356 1356 if os.path.isdir(p):
1357 1357 for f, kind in osutil.listdir(p):
1358 1358 if f.endswith('.rc'):
1359 1359 _rcpath.append(os.path.join(p, f))
1360 1360 else:
1361 1361 _rcpath.append(p)
1362 1362 else:
1363 1363 _rcpath = os_rcpath()
1364 1364 return _rcpath
1365 1365
1366 1366 def bytecount(nbytes):
1367 1367 '''return byte count formatted as readable string, with units'''
1368 1368
1369 1369 units = (
1370 1370 (100, 1 << 30, _('%.0f GB')),
1371 1371 (10, 1 << 30, _('%.1f GB')),
1372 1372 (1, 1 << 30, _('%.2f GB')),
1373 1373 (100, 1 << 20, _('%.0f MB')),
1374 1374 (10, 1 << 20, _('%.1f MB')),
1375 1375 (1, 1 << 20, _('%.2f MB')),
1376 1376 (100, 1 << 10, _('%.0f KB')),
1377 1377 (10, 1 << 10, _('%.1f KB')),
1378 1378 (1, 1 << 10, _('%.2f KB')),
1379 1379 (1, 1, _('%.0f bytes')),
1380 1380 )
1381 1381
1382 1382 for multiplier, divisor, format in units:
1383 1383 if nbytes >= divisor * multiplier:
1384 1384 return format % (nbytes / float(divisor))
1385 1385 return units[-1][2] % nbytes
1386 1386
1387 def drop_scheme(scheme, path):
1388 sc = scheme + ':'
1389 if path.startswith(sc):
1390 path = path[len(sc):]
1391 if path.startswith('//'):
1392 if scheme == 'file':
1393 i = path.find('/', 2)
1394 if i == -1:
1395 return ''
1396 # On Windows, absolute paths are rooted at the current drive
1397 # root. On POSIX they are rooted at the file system root.
1398 if os.name == 'nt':
1399 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1400 path = os.path.join(droot, path[i + 1:])
1401 else:
1402 path = path[i:]
1403 else:
1404 path = path[2:]
1405 return path
1406
1407 1387 def uirepr(s):
1408 1388 # Avoid double backslash in Windows path repr()
1409 1389 return repr(s).replace('\\\\', '\\')
1410 1390
1411 1391 # delay import of textwrap
1412 1392 def MBTextWrapper(**kwargs):
1413 1393 class tw(textwrap.TextWrapper):
1414 1394 """
1415 1395 Extend TextWrapper for double-width characters.
1416 1396
1417 1397 Some Asian characters use two terminal columns instead of one.
1418 1398 A good example of this behavior can be seen with u'\u65e5\u672c',
1419 1399 the two Japanese characters for "Japan":
1420 1400 len() returns 2, but when printed to a terminal, they eat 4 columns.
1421 1401
1422 1402 (Note that this has nothing to do whatsoever with unicode
1423 1403 representation, or encoding of the underlying string)
1424 1404 """
1425 1405 def __init__(self, **kwargs):
1426 1406 textwrap.TextWrapper.__init__(self, **kwargs)
1427 1407
1428 1408 def _cutdown(self, str, space_left):
1429 1409 l = 0
1430 1410 ucstr = unicode(str, encoding.encoding)
1431 1411 colwidth = unicodedata.east_asian_width
1432 1412 for i in xrange(len(ucstr)):
1433 1413 l += colwidth(ucstr[i]) in 'WFA' and 2 or 1
1434 1414 if space_left < l:
1435 1415 return (ucstr[:i].encode(encoding.encoding),
1436 1416 ucstr[i:].encode(encoding.encoding))
1437 1417 return str, ''
1438 1418
1439 1419 # overriding of base class
1440 1420 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1441 1421 space_left = max(width - cur_len, 1)
1442 1422
1443 1423 if self.break_long_words:
1444 1424 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1445 1425 cur_line.append(cut)
1446 1426 reversed_chunks[-1] = res
1447 1427 elif not cur_line:
1448 1428 cur_line.append(reversed_chunks.pop())
1449 1429
1450 1430 global MBTextWrapper
1451 1431 MBTextWrapper = tw
1452 1432 return tw(**kwargs)
1453 1433
1454 1434 def wrap(line, width, initindent='', hangindent=''):
1455 1435 maxindent = max(len(hangindent), len(initindent))
1456 1436 if width <= maxindent:
1457 1437 # adjust for weird terminal size
1458 1438 width = max(78, maxindent + 1)
1459 1439 wrapper = MBTextWrapper(width=width,
1460 1440 initial_indent=initindent,
1461 1441 subsequent_indent=hangindent)
1462 1442 return wrapper.fill(line)
1463 1443
1464 1444 def iterlines(iterator):
1465 1445 for chunk in iterator:
1466 1446 for line in chunk.splitlines():
1467 1447 yield line
1468 1448
1469 1449 def expandpath(path):
1470 1450 return os.path.expanduser(os.path.expandvars(path))
1471 1451
1472 1452 def hgcmd():
1473 1453 """Return the command used to execute current hg
1474 1454
1475 1455 This is different from hgexecutable() because on Windows we want
1476 1456 to avoid things opening new shell windows like batch files, so we
1477 1457 get either the python call or current executable.
1478 1458 """
1479 1459 if main_is_frozen():
1480 1460 return [sys.executable]
1481 1461 return gethgcmd()
1482 1462
1483 1463 def rundetached(args, condfn):
1484 1464 """Execute the argument list in a detached process.
1485 1465
1486 1466 condfn is a callable which is called repeatedly and should return
1487 1467 True once the child process is known to have started successfully.
1488 1468 At this point, the child process PID is returned. If the child
1489 1469 process fails to start or finishes before condfn() evaluates to
1490 1470 True, return -1.
1491 1471 """
1492 1472 # Windows case is easier because the child process is either
1493 1473 # successfully starting and validating the condition or exiting
1494 1474 # on failure. We just poll on its PID. On Unix, if the child
1495 1475 # process fails to start, it will be left in a zombie state until
1496 1476 # the parent wait on it, which we cannot do since we expect a long
1497 1477 # running process on success. Instead we listen for SIGCHLD telling
1498 1478 # us our child process terminated.
1499 1479 terminated = set()
1500 1480 def handler(signum, frame):
1501 1481 terminated.add(os.wait())
1502 1482 prevhandler = None
1503 1483 if hasattr(signal, 'SIGCHLD'):
1504 1484 prevhandler = signal.signal(signal.SIGCHLD, handler)
1505 1485 try:
1506 1486 pid = spawndetached(args)
1507 1487 while not condfn():
1508 1488 if ((pid in terminated or not testpid(pid))
1509 1489 and not condfn()):
1510 1490 return -1
1511 1491 time.sleep(0.1)
1512 1492 return pid
1513 1493 finally:
1514 1494 if prevhandler is not None:
1515 1495 signal.signal(signal.SIGCHLD, prevhandler)
1516 1496
1517 1497 try:
1518 1498 any, all = any, all
1519 1499 except NameError:
1520 1500 def any(iterable):
1521 1501 for i in iterable:
1522 1502 if i:
1523 1503 return True
1524 1504 return False
1525 1505
1526 1506 def all(iterable):
1527 1507 for i in iterable:
1528 1508 if not i:
1529 1509 return False
1530 1510 return True
1531 1511
1532 1512 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1533 1513 """Return the result of interpolating items in the mapping into string s.
1534 1514
1535 1515 prefix is a single character string, or a two character string with
1536 1516 a backslash as the first character if the prefix needs to be escaped in
1537 1517 a regular expression.
1538 1518
1539 1519 fn is an optional function that will be applied to the replacement text
1540 1520 just before replacement.
1541 1521
1542 1522 escape_prefix is an optional flag that allows using doubled prefix for
1543 1523 its escaping.
1544 1524 """
1545 1525 fn = fn or (lambda s: s)
1546 1526 patterns = '|'.join(mapping.keys())
1547 1527 if escape_prefix:
1548 1528 patterns += '|' + prefix
1549 1529 if len(prefix) > 1:
1550 1530 prefix_char = prefix[1:]
1551 1531 else:
1552 1532 prefix_char = prefix
1553 1533 mapping[prefix_char] = prefix_char
1554 1534 r = re.compile(r'%s(%s)' % (prefix, patterns))
1555 1535 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1556 1536
1557 1537 def getport(port):
1558 1538 """Return the port for a given network service.
1559 1539
1560 1540 If port is an integer, it's returned as is. If it's a string, it's
1561 1541 looked up using socket.getservbyname(). If there's no matching
1562 1542 service, util.Abort is raised.
1563 1543 """
1564 1544 try:
1565 1545 return int(port)
1566 1546 except ValueError:
1567 1547 pass
1568 1548
1569 1549 try:
1570 1550 return socket.getservbyname(port)
1571 1551 except socket.error:
1572 1552 raise Abort(_("no port number associated with service '%s'") % port)
1573 1553
1574 1554 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1575 1555 '0': False, 'no': False, 'false': False, 'off': False,
1576 1556 'never': False}
1577 1557
1578 1558 def parsebool(s):
1579 1559 """Parse s into a boolean.
1580 1560
1581 1561 If s is not a valid boolean, returns None.
1582 1562 """
1583 1563 return _booleans.get(s.lower(), None)
@@ -1,560 +1,576 b''
1 1 Setting up test
2 2
3 3 $ hg init test
4 4 $ cd test
5 5 $ echo 0 > afile
6 6 $ hg add afile
7 7 $ hg commit -m "0.0"
8 8 $ echo 1 >> afile
9 9 $ hg commit -m "0.1"
10 10 $ echo 2 >> afile
11 11 $ hg commit -m "0.2"
12 12 $ echo 3 >> afile
13 13 $ hg commit -m "0.3"
14 14 $ hg update -C 0
15 15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 16 $ echo 1 >> afile
17 17 $ hg commit -m "1.1"
18 18 created new head
19 19 $ echo 2 >> afile
20 20 $ hg commit -m "1.2"
21 21 $ echo "a line" > fred
22 22 $ echo 3 >> afile
23 23 $ hg add fred
24 24 $ hg commit -m "1.3"
25 25 $ hg mv afile adifferentfile
26 26 $ hg commit -m "1.3m"
27 27 $ hg update -C 3
28 28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
29 29 $ hg mv afile anotherfile
30 30 $ hg commit -m "0.3m"
31 31 $ hg verify
32 32 checking changesets
33 33 checking manifests
34 34 crosschecking files in changesets and manifests
35 35 checking files
36 36 4 files, 9 changesets, 7 total revisions
37 37 $ cd ..
38 38 $ hg init empty
39 39
40 40 Bundle --all
41 41
42 42 $ hg -R test bundle --all all.hg
43 43 9 changesets found
44 44
45 45 Bundle test to full.hg
46 46
47 47 $ hg -R test bundle full.hg empty
48 48 searching for changes
49 49 9 changesets found
50 50
51 51 Unbundle full.hg in test
52 52
53 53 $ hg -R test unbundle full.hg
54 54 adding changesets
55 55 adding manifests
56 56 adding file changes
57 57 added 0 changesets with 0 changes to 4 files
58 58 (run 'hg update' to get a working copy)
59 59
60 60 Verify empty
61 61
62 62 $ hg -R empty heads
63 63 [1]
64 64 $ hg -R empty verify
65 65 checking changesets
66 66 checking manifests
67 67 crosschecking files in changesets and manifests
68 68 checking files
69 69 0 files, 0 changesets, 0 total revisions
70 70
71 71 Pull full.hg into test (using --cwd)
72 72
73 73 $ hg --cwd test pull ../full.hg
74 74 pulling from ../full.hg
75 75 searching for changes
76 76 no changes found
77 77
78 78 Pull full.hg into empty (using --cwd)
79 79
80 80 $ hg --cwd empty pull ../full.hg
81 81 pulling from ../full.hg
82 82 requesting all changes
83 83 adding changesets
84 84 adding manifests
85 85 adding file changes
86 86 added 9 changesets with 7 changes to 4 files (+1 heads)
87 87 (run 'hg heads' to see heads, 'hg merge' to merge)
88 88
89 89 Rollback empty
90 90
91 91 $ hg -R empty rollback
92 92 repository tip rolled back to revision -1 (undo pull)
93 93 working directory now based on revision -1
94 94
95 95 Pull full.hg into empty again (using --cwd)
96 96
97 97 $ hg --cwd empty pull ../full.hg
98 98 pulling from ../full.hg
99 99 requesting all changes
100 100 adding changesets
101 101 adding manifests
102 102 adding file changes
103 103 added 9 changesets with 7 changes to 4 files (+1 heads)
104 104 (run 'hg heads' to see heads, 'hg merge' to merge)
105 105
106 106 Pull full.hg into test (using -R)
107 107
108 108 $ hg -R test pull full.hg
109 109 pulling from full.hg
110 110 searching for changes
111 111 no changes found
112 112
113 113 Pull full.hg into empty (using -R)
114 114
115 115 $ hg -R empty pull full.hg
116 116 pulling from full.hg
117 117 searching for changes
118 118 no changes found
119 119
120 120 Rollback empty
121 121
122 122 $ hg -R empty rollback
123 123 repository tip rolled back to revision -1 (undo pull)
124 124 working directory now based on revision -1
125 125
126 126 Pull full.hg into empty again (using -R)
127 127
128 128 $ hg -R empty pull full.hg
129 129 pulling from full.hg
130 130 requesting all changes
131 131 adding changesets
132 132 adding manifests
133 133 adding file changes
134 134 added 9 changesets with 7 changes to 4 files (+1 heads)
135 135 (run 'hg heads' to see heads, 'hg merge' to merge)
136 136
137 137 Log -R full.hg in fresh empty
138 138
139 139 $ rm -r empty
140 140 $ hg init empty
141 141 $ cd empty
142 142 $ hg -R bundle://../full.hg log
143 143 changeset: 8:aa35859c02ea
144 144 tag: tip
145 145 parent: 3:eebf5a27f8ca
146 146 user: test
147 147 date: Thu Jan 01 00:00:00 1970 +0000
148 148 summary: 0.3m
149 149
150 150 changeset: 7:a6a34bfa0076
151 151 user: test
152 152 date: Thu Jan 01 00:00:00 1970 +0000
153 153 summary: 1.3m
154 154
155 155 changeset: 6:7373c1169842
156 156 user: test
157 157 date: Thu Jan 01 00:00:00 1970 +0000
158 158 summary: 1.3
159 159
160 160 changeset: 5:1bb50a9436a7
161 161 user: test
162 162 date: Thu Jan 01 00:00:00 1970 +0000
163 163 summary: 1.2
164 164
165 165 changeset: 4:095197eb4973
166 166 parent: 0:f9ee2f85a263
167 167 user: test
168 168 date: Thu Jan 01 00:00:00 1970 +0000
169 169 summary: 1.1
170 170
171 171 changeset: 3:eebf5a27f8ca
172 172 user: test
173 173 date: Thu Jan 01 00:00:00 1970 +0000
174 174 summary: 0.3
175 175
176 176 changeset: 2:e38ba6f5b7e0
177 177 user: test
178 178 date: Thu Jan 01 00:00:00 1970 +0000
179 179 summary: 0.2
180 180
181 181 changeset: 1:34c2bf6b0626
182 182 user: test
183 183 date: Thu Jan 01 00:00:00 1970 +0000
184 184 summary: 0.1
185 185
186 186 changeset: 0:f9ee2f85a263
187 187 user: test
188 188 date: Thu Jan 01 00:00:00 1970 +0000
189 189 summary: 0.0
190 190
191 191 Make sure bundlerepo doesn't leak tempfiles (issue2491)
192 192
193 193 $ ls .hg
194 194 00changelog.i
195 195 cache
196 196 requires
197 197 store
198 198
199 199 Pull ../full.hg into empty (with hook)
200 200
201 201 $ echo '[hooks]' >> .hg/hgrc
202 202 $ echo 'changegroup = python "$TESTDIR"/printenv.py changegroup' >> .hg/hgrc
203 203
204 204 doesn't work (yet ?)
205 205
206 206 hg -R bundle://../full.hg verify
207 207
208 208 $ hg pull bundle://../full.hg
209 209 pulling from bundle:../full.hg
210 210 requesting all changes
211 211 adding changesets
212 212 adding manifests
213 213 adding file changes
214 214 added 9 changesets with 7 changes to 4 files (+1 heads)
215 215 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_URL=bundle:../full.hg
216 216 (run 'hg heads' to see heads, 'hg merge' to merge)
217 217
218 218 Rollback empty
219 219
220 220 $ hg rollback
221 221 repository tip rolled back to revision -1 (undo pull)
222 222 working directory now based on revision -1
223 223 $ cd ..
224 224
225 225 Log -R bundle:empty+full.hg
226 226
227 227 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
228 228 8 7 6 5 4 3 2 1 0
229 229
230 230 Pull full.hg into empty again (using -R; with hook)
231 231
232 232 $ hg -R empty pull full.hg
233 233 pulling from full.hg
234 234 requesting all changes
235 235 adding changesets
236 236 adding manifests
237 237 adding file changes
238 238 added 9 changesets with 7 changes to 4 files (+1 heads)
239 239 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_URL=bundle:empty+full.hg
240 240 (run 'hg heads' to see heads, 'hg merge' to merge)
241 241
242 242 Create partial clones
243 243
244 244 $ rm -r empty
245 245 $ hg init empty
246 246 $ hg clone -r 3 test partial
247 247 adding changesets
248 248 adding manifests
249 249 adding file changes
250 250 added 4 changesets with 4 changes to 1 files
251 251 updating to branch default
252 252 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
253 253 $ hg clone partial partial2
254 254 updating to branch default
255 255 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
256 256 $ cd partial
257 257
258 258 Log -R full.hg in partial
259 259
260 260 $ hg -R bundle://../full.hg log
261 261 changeset: 8:aa35859c02ea
262 262 tag: tip
263 263 parent: 3:eebf5a27f8ca
264 264 user: test
265 265 date: Thu Jan 01 00:00:00 1970 +0000
266 266 summary: 0.3m
267 267
268 268 changeset: 7:a6a34bfa0076
269 269 user: test
270 270 date: Thu Jan 01 00:00:00 1970 +0000
271 271 summary: 1.3m
272 272
273 273 changeset: 6:7373c1169842
274 274 user: test
275 275 date: Thu Jan 01 00:00:00 1970 +0000
276 276 summary: 1.3
277 277
278 278 changeset: 5:1bb50a9436a7
279 279 user: test
280 280 date: Thu Jan 01 00:00:00 1970 +0000
281 281 summary: 1.2
282 282
283 283 changeset: 4:095197eb4973
284 284 parent: 0:f9ee2f85a263
285 285 user: test
286 286 date: Thu Jan 01 00:00:00 1970 +0000
287 287 summary: 1.1
288 288
289 289 changeset: 3:eebf5a27f8ca
290 290 user: test
291 291 date: Thu Jan 01 00:00:00 1970 +0000
292 292 summary: 0.3
293 293
294 294 changeset: 2:e38ba6f5b7e0
295 295 user: test
296 296 date: Thu Jan 01 00:00:00 1970 +0000
297 297 summary: 0.2
298 298
299 299 changeset: 1:34c2bf6b0626
300 300 user: test
301 301 date: Thu Jan 01 00:00:00 1970 +0000
302 302 summary: 0.1
303 303
304 304 changeset: 0:f9ee2f85a263
305 305 user: test
306 306 date: Thu Jan 01 00:00:00 1970 +0000
307 307 summary: 0.0
308 308
309 309
310 310 Incoming full.hg in partial
311 311
312 312 $ hg incoming bundle://../full.hg
313 313 comparing with bundle:../full.hg
314 314 searching for changes
315 315 changeset: 4:095197eb4973
316 316 parent: 0:f9ee2f85a263
317 317 user: test
318 318 date: Thu Jan 01 00:00:00 1970 +0000
319 319 summary: 1.1
320 320
321 321 changeset: 5:1bb50a9436a7
322 322 user: test
323 323 date: Thu Jan 01 00:00:00 1970 +0000
324 324 summary: 1.2
325 325
326 326 changeset: 6:7373c1169842
327 327 user: test
328 328 date: Thu Jan 01 00:00:00 1970 +0000
329 329 summary: 1.3
330 330
331 331 changeset: 7:a6a34bfa0076
332 332 user: test
333 333 date: Thu Jan 01 00:00:00 1970 +0000
334 334 summary: 1.3m
335 335
336 336 changeset: 8:aa35859c02ea
337 337 tag: tip
338 338 parent: 3:eebf5a27f8ca
339 339 user: test
340 340 date: Thu Jan 01 00:00:00 1970 +0000
341 341 summary: 0.3m
342 342
343 343
344 344 Outgoing -R full.hg vs partial2 in partial
345 345
346 346 $ hg -R bundle://../full.hg outgoing ../partial2
347 347 comparing with ../partial2
348 348 searching for changes
349 349 changeset: 4:095197eb4973
350 350 parent: 0:f9ee2f85a263
351 351 user: test
352 352 date: Thu Jan 01 00:00:00 1970 +0000
353 353 summary: 1.1
354 354
355 355 changeset: 5:1bb50a9436a7
356 356 user: test
357 357 date: Thu Jan 01 00:00:00 1970 +0000
358 358 summary: 1.2
359 359
360 360 changeset: 6:7373c1169842
361 361 user: test
362 362 date: Thu Jan 01 00:00:00 1970 +0000
363 363 summary: 1.3
364 364
365 365 changeset: 7:a6a34bfa0076
366 366 user: test
367 367 date: Thu Jan 01 00:00:00 1970 +0000
368 368 summary: 1.3m
369 369
370 370 changeset: 8:aa35859c02ea
371 371 tag: tip
372 372 parent: 3:eebf5a27f8ca
373 373 user: test
374 374 date: Thu Jan 01 00:00:00 1970 +0000
375 375 summary: 0.3m
376 376
377 377
378 378 Outgoing -R does-not-exist.hg vs partial2 in partial
379 379
380 380 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
381 381 abort: No such file or directory: ../does-not-exist.hg
382 382 [255]
383 383 $ cd ..
384 384
385 385 Direct clone from bundle (all-history)
386 386
387 387 $ hg clone full.hg full-clone
388 388 requesting all changes
389 389 adding changesets
390 390 adding manifests
391 391 adding file changes
392 392 added 9 changesets with 7 changes to 4 files (+1 heads)
393 393 updating to branch default
394 394 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
395 395 $ hg -R full-clone heads
396 396 changeset: 8:aa35859c02ea
397 397 tag: tip
398 398 parent: 3:eebf5a27f8ca
399 399 user: test
400 400 date: Thu Jan 01 00:00:00 1970 +0000
401 401 summary: 0.3m
402 402
403 403 changeset: 7:a6a34bfa0076
404 404 user: test
405 405 date: Thu Jan 01 00:00:00 1970 +0000
406 406 summary: 1.3m
407 407
408 408 $ rm -r full-clone
409 409
410 410 When cloning from a non-copiable repository into '', do not
411 411 recurse infinitely (issue 2528)
412 412
413 413 $ hg clone full.hg ''
414 414 abort: No such file or directory
415 415 [255]
416 416
417 417 test for http://mercurial.selenic.com/bts/issue216
418 418
419 419 Unbundle incremental bundles into fresh empty in one go
420 420
421 421 $ rm -r empty
422 422 $ hg init empty
423 423 $ hg -R test bundle --base null -r 0 ../0.hg
424 424 1 changesets found
425 425 $ hg -R test bundle --base 0 -r 1 ../1.hg
426 426 1 changesets found
427 427 $ hg -R empty unbundle -u ../0.hg ../1.hg
428 428 adding changesets
429 429 adding manifests
430 430 adding file changes
431 431 added 1 changesets with 1 changes to 1 files
432 432 adding changesets
433 433 adding manifests
434 434 adding file changes
435 435 added 1 changesets with 1 changes to 1 files
436 436 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
437 437
438 438 test for 540d1059c802
439 439
440 440 test for 540d1059c802
441 441
442 442 $ hg init orig
443 443 $ cd orig
444 444 $ echo foo > foo
445 445 $ hg add foo
446 446 $ hg ci -m 'add foo'
447 447
448 448 $ hg clone . ../copy
449 449 updating to branch default
450 450 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
451 451 $ hg tag foo
452 452
453 453 $ cd ../copy
454 454 $ echo >> foo
455 455 $ hg ci -m 'change foo'
456 456 $ hg bundle ../bundle.hg ../orig
457 457 searching for changes
458 458 1 changesets found
459 459
460 460 $ cd ../orig
461 461 $ hg incoming ../bundle.hg
462 462 comparing with ../bundle.hg
463 463 searching for changes
464 464 changeset: 2:ed1b79f46b9a
465 465 tag: tip
466 466 parent: 0:bbd179dfa0a7
467 467 user: test
468 468 date: Thu Jan 01 00:00:00 1970 +0000
469 469 summary: change foo
470 470
471 471 $ cd ..
472 472
473 test bundle with # in the filename (issue2154):
474
475 $ cp bundle.hg 'test#bundle.hg'
476 $ cd orig
477 $ hg incoming '../test#bundle.hg'
478 comparing with ../test
479 abort: unknown revision 'bundle.hg'!
480 [255]
481
482 note that percent encoding is not handled:
483
484 $ hg incoming ../test%23bundle.hg
485 abort: repository ../test%23bundle.hg not found!
486 [255]
487 $ cd ..
488
473 489 test for http://mercurial.selenic.com/bts/issue1144
474 490
475 491 test that verify bundle does not traceback
476 492
477 493 partial history bundle, fails w/ unkown parent
478 494
479 495 $ hg -R bundle.hg verify
480 496 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
481 497 [255]
482 498
483 499 full history bundle, refuses to verify non-local repo
484 500
485 501 $ hg -R all.hg verify
486 502 abort: cannot verify bundle or remote repos
487 503 [255]
488 504
489 505 but, regular verify must continue to work
490 506
491 507 $ hg -R orig verify
492 508 checking changesets
493 509 checking manifests
494 510 crosschecking files in changesets and manifests
495 511 checking files
496 512 2 files, 2 changesets, 2 total revisions
497 513
498 514 diff against bundle
499 515
500 516 $ hg init b
501 517 $ cd b
502 518 $ hg -R ../all.hg diff -r tip
503 519 diff -r aa35859c02ea anotherfile
504 520 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
505 521 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
506 522 @@ -1,4 +0,0 @@
507 523 -0
508 524 -1
509 525 -2
510 526 -3
511 527 $ cd ..
512 528
513 529 bundle single branch
514 530
515 531 $ hg init branchy
516 532 $ cd branchy
517 533 $ echo a >a
518 534 $ hg ci -Ama
519 535 adding a
520 536 $ echo b >b
521 537 $ hg ci -Amb
522 538 adding b
523 539 $ echo b1 >b1
524 540 $ hg ci -Amb1
525 541 adding b1
526 542 $ hg up 0
527 543 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
528 544 $ echo c >c
529 545 $ hg ci -Amc
530 546 adding c
531 547 created new head
532 548 $ echo c1 >c1
533 549 $ hg ci -Amc1
534 550 adding c1
535 551 $ hg clone -q .#tip part
536 552
537 553 == bundling via incoming
538 554
539 555 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
540 556 comparing with .
541 557 searching for changes
542 558 d2ae7f538514cd87c17547b0de4cea71fe1af9fb
543 559 5ece8e77363e2b5269e27c66828b72da29e4341a
544 560
545 561 == bundling
546 562
547 563 $ hg bundle bundle.hg part --debug
548 564 searching for changes
549 565 common changesets up to c0025332f9ed
550 566 2 changesets found
551 567 list of changesets:
552 568 d2ae7f538514cd87c17547b0de4cea71fe1af9fb
553 569 5ece8e77363e2b5269e27c66828b72da29e4341a
554 570 bundling: 1 changesets
555 571 bundling: 2 changesets
556 572 bundling: 1/2 manifests (50.00%)
557 573 bundling: 2/2 manifests (100.00%)
558 574 bundling: b 0/2 files (0.00%)
559 575 bundling: b1 1/2 files (50.00%)
560 576
@@ -1,85 +1,89 b''
1 1 $ mkdir test
2 2 $ cd test
3 3
4 4 $ echo foo>foo
5 5 $ hg init
6 6 $ hg addremove
7 7 adding foo
8 8 $ hg commit -m 1
9 9
10 10 $ hg verify
11 11 checking changesets
12 12 checking manifests
13 13 crosschecking files in changesets and manifests
14 14 checking files
15 15 1 files, 1 changesets, 1 total revisions
16 16
17 17 $ hg serve -p $HGPORT -d --pid-file=hg.pid
18 18 $ cat hg.pid >> $DAEMON_PIDS
19 19 $ cd ..
20 20
21 21 $ hg clone --pull http://foo:bar@localhost:$HGPORT/ copy
22 22 requesting all changes
23 23 adding changesets
24 24 adding manifests
25 25 adding file changes
26 26 added 1 changesets with 1 changes to 1 files
27 27 updating to branch default
28 28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 29
30 30 $ cd copy
31 31 $ hg verify
32 32 checking changesets
33 33 checking manifests
34 34 crosschecking files in changesets and manifests
35 35 checking files
36 36 1 files, 1 changesets, 1 total revisions
37 37
38 38 $ hg co
39 39 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 40 $ cat foo
41 41 foo
42 42
43 43 $ hg manifest --debug
44 44 2ed2a3912a0b24502043eae84ee4b279c18b90dd 644 foo
45 45
46 46 $ hg pull
47 47 pulling from http://foo:***@localhost:$HGPORT/
48 48 searching for changes
49 49 no changes found
50 50
51 51 $ hg rollback --dry-run --verbose
52 52 repository tip rolled back to revision -1 (undo pull: http://foo:***@localhost:$HGPORT/)
53 53
54 54 Issue622: hg init && hg pull -u URL doesn't checkout default branch
55 55
56 56 $ cd ..
57 57 $ hg init empty
58 58 $ cd empty
59 59 $ hg pull -u ../test
60 60 pulling from ../test
61 61 requesting all changes
62 62 adding changesets
63 63 adding manifests
64 64 adding file changes
65 65 added 1 changesets with 1 changes to 1 files
66 66 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
67 67
68 68 Test 'file:' uri handling:
69 69
70 70 $ hg pull -q file://../test-doesnt-exist
71 71 abort: file:// URLs can only refer to localhost
72 72 [255]
73 73
74 $ hg pull -q file://../test
75 abort: file:// URLs can only refer to localhost
76 [255]
77
74 78 $ hg pull -q file:../test
75 79
76 80 It's tricky to make file:// URLs working on every platform with
77 81 regular shell commands.
78 82
79 83 $ URL=`python -c "import os; print 'file://foobar' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test'"`
80 84 $ hg pull -q "$URL"
81 85 abort: file:// URLs can only refer to localhost
82 86 [255]
83 87
84 88 $ URL=`python -c "import os; print 'file://localhost' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test'"`
85 89 $ hg pull -q "$URL"
General Comments 0
You need to be logged in to leave comments. Login now