##// END OF EJS Templates
expand paths to local repository or bundle in appropriate classes...
Alexander Solovyov -
r11154:17031fea stable
parent child Browse files
Show More
@@ -1,305 +1,305 b''
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from node import nullid
15 15 from i18n import _
16 16 import os, struct, bz2, zlib, tempfile, shutil
17 17 import changegroup, util, mdiff
18 18 import localrepo, changelog, manifest, filelog, revlog, error
19 19
20 20 class bundlerevlog(revlog.revlog):
21 21 def __init__(self, opener, indexfile, bundlefile,
22 22 linkmapper=None):
23 23 # How it works:
24 24 # to retrieve a revision, we need to know the offset of
25 25 # the revision in the bundlefile (an opened file).
26 26 #
27 27 # We store this offset in the index (start), to differentiate a
28 28 # rev in the bundle and from a rev in the revlog, we check
29 29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
30 30 # (it is bigger since we store the node to which the delta is)
31 31 #
32 32 revlog.revlog.__init__(self, opener, indexfile)
33 33 self.bundlefile = bundlefile
34 34 self.basemap = {}
35 35 def chunkpositer():
36 36 for chunk in changegroup.chunkiter(bundlefile):
37 37 pos = bundlefile.tell()
38 38 yield chunk, pos - len(chunk)
39 39 n = len(self)
40 40 prev = None
41 41 for chunk, start in chunkpositer():
42 42 size = len(chunk)
43 43 if size < 80:
44 44 raise util.Abort(_("invalid changegroup"))
45 45 start += 80
46 46 size -= 80
47 47 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
48 48 if node in self.nodemap:
49 49 prev = node
50 50 continue
51 51 for p in (p1, p2):
52 52 if not p in self.nodemap:
53 53 raise error.LookupError(p, self.indexfile,
54 54 _("unknown parent"))
55 55 if linkmapper is None:
56 56 link = n
57 57 else:
58 58 link = linkmapper(cs)
59 59
60 60 if not prev:
61 61 prev = p1
62 62 # start, size, full unc. size, base (unused), link, p1, p2, node
63 63 e = (revlog.offset_type(start, 0), size, -1, -1, link,
64 64 self.rev(p1), self.rev(p2), node)
65 65 self.basemap[n] = prev
66 66 self.index.insert(-1, e)
67 67 self.nodemap[node] = n
68 68 prev = node
69 69 n += 1
70 70
71 71 def bundle(self, rev):
72 72 """is rev from the bundle"""
73 73 if rev < 0:
74 74 return False
75 75 return rev in self.basemap
76 76 def bundlebase(self, rev):
77 77 return self.basemap[rev]
78 78 def _chunk(self, rev):
79 79 # Warning: in case of bundle, the diff is against bundlebase,
80 80 # not against rev - 1
81 81 # XXX: could use some caching
82 82 if not self.bundle(rev):
83 83 return revlog.revlog._chunk(self, rev)
84 84 self.bundlefile.seek(self.start(rev))
85 85 return self.bundlefile.read(self.length(rev))
86 86
87 87 def revdiff(self, rev1, rev2):
88 88 """return or calculate a delta between two revisions"""
89 89 if self.bundle(rev1) and self.bundle(rev2):
90 90 # hot path for bundle
91 91 revb = self.rev(self.bundlebase(rev2))
92 92 if revb == rev1:
93 93 return self._chunk(rev2)
94 94 elif not self.bundle(rev1) and not self.bundle(rev2):
95 95 return revlog.revlog.revdiff(self, rev1, rev2)
96 96
97 97 return mdiff.textdiff(self.revision(self.node(rev1)),
98 98 self.revision(self.node(rev2)))
99 99
100 100 def revision(self, node):
101 101 """return an uncompressed revision of a given"""
102 102 if node == nullid:
103 103 return ""
104 104
105 105 text = None
106 106 chain = []
107 107 iter_node = node
108 108 rev = self.rev(iter_node)
109 109 # reconstruct the revision if it is from a changegroup
110 110 while self.bundle(rev):
111 111 if self._cache and self._cache[0] == iter_node:
112 112 text = self._cache[2]
113 113 break
114 114 chain.append(rev)
115 115 iter_node = self.bundlebase(rev)
116 116 rev = self.rev(iter_node)
117 117 if text is None:
118 118 text = revlog.revlog.revision(self, iter_node)
119 119
120 120 while chain:
121 121 delta = self._chunk(chain.pop())
122 122 text = mdiff.patches(text, [delta])
123 123
124 124 p1, p2 = self.parents(node)
125 125 if node != revlog.hash(text, p1, p2):
126 126 raise error.RevlogError(_("integrity check failed on %s:%d")
127 127 % (self.datafile, self.rev(node)))
128 128
129 129 self._cache = (node, self.rev(node), text)
130 130 return text
131 131
132 132 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
133 133 raise NotImplementedError
134 134 def addgroup(self, revs, linkmapper, transaction):
135 135 raise NotImplementedError
136 136 def strip(self, rev, minlink):
137 137 raise NotImplementedError
138 138 def checksize(self):
139 139 raise NotImplementedError
140 140
141 141 class bundlechangelog(bundlerevlog, changelog.changelog):
142 142 def __init__(self, opener, bundlefile):
143 143 changelog.changelog.__init__(self, opener)
144 144 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
145 145
146 146 class bundlemanifest(bundlerevlog, manifest.manifest):
147 147 def __init__(self, opener, bundlefile, linkmapper):
148 148 manifest.manifest.__init__(self, opener)
149 149 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
150 150 linkmapper)
151 151
152 152 class bundlefilelog(bundlerevlog, filelog.filelog):
153 153 def __init__(self, opener, path, bundlefile, linkmapper):
154 154 filelog.filelog.__init__(self, opener, path)
155 155 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
156 156 linkmapper)
157 157
158 158 class bundlerepository(localrepo.localrepository):
159 159 def __init__(self, ui, path, bundlename):
160 160 self._tempparent = None
161 161 try:
162 162 localrepo.localrepository.__init__(self, ui, path)
163 163 except error.RepoError:
164 164 self._tempparent = tempfile.mkdtemp()
165 165 localrepo.instance(ui, self._tempparent, 1)
166 166 localrepo.localrepository.__init__(self, ui, self._tempparent)
167 167
168 168 if path:
169 self._url = 'bundle:' + path + '+' + bundlename
169 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
170 170 else:
171 171 self._url = 'bundle:' + bundlename
172 172
173 173 self.tempfile = None
174 174 self.bundlefile = open(bundlename, "rb")
175 175 header = self.bundlefile.read(6)
176 176 if not header.startswith("HG"):
177 177 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
178 178 elif not header.startswith("HG10"):
179 179 raise util.Abort(_("%s: unknown bundle version") % bundlename)
180 180 elif (header == "HG10BZ") or (header == "HG10GZ"):
181 181 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
182 182 suffix=".hg10un", dir=self.path)
183 183 self.tempfile = temp
184 184 fptemp = os.fdopen(fdtemp, 'wb')
185 185 def generator(f):
186 186 if header == "HG10BZ":
187 187 zd = bz2.BZ2Decompressor()
188 188 zd.decompress("BZ")
189 189 elif header == "HG10GZ":
190 190 zd = zlib.decompressobj()
191 191 for chunk in f:
192 192 yield zd.decompress(chunk)
193 193 gen = generator(util.filechunkiter(self.bundlefile, 4096))
194 194
195 195 try:
196 196 fptemp.write("HG10UN")
197 197 for chunk in gen:
198 198 fptemp.write(chunk)
199 199 finally:
200 200 fptemp.close()
201 201 self.bundlefile.close()
202 202
203 203 self.bundlefile = open(self.tempfile, "rb")
204 204 # seek right after the header
205 205 self.bundlefile.seek(6)
206 206 elif header == "HG10UN":
207 207 # nothing to do
208 208 pass
209 209 else:
210 210 raise util.Abort(_("%s: unknown bundle compression type")
211 211 % bundlename)
212 212 # dict with the mapping 'filename' -> position in the bundle
213 213 self.bundlefilespos = {}
214 214
215 215 @util.propertycache
216 216 def changelog(self):
217 217 c = bundlechangelog(self.sopener, self.bundlefile)
218 218 self.manstart = self.bundlefile.tell()
219 219 return c
220 220
221 221 @util.propertycache
222 222 def manifest(self):
223 223 self.bundlefile.seek(self.manstart)
224 224 m = bundlemanifest(self.sopener, self.bundlefile, self.changelog.rev)
225 225 self.filestart = self.bundlefile.tell()
226 226 return m
227 227
228 228 @util.propertycache
229 229 def manstart(self):
230 230 self.changelog
231 231 return self.manstart
232 232
233 233 @util.propertycache
234 234 def filestart(self):
235 235 self.manifest
236 236 return self.filestart
237 237
238 238 def url(self):
239 239 return self._url
240 240
241 241 def file(self, f):
242 242 if not self.bundlefilespos:
243 243 self.bundlefile.seek(self.filestart)
244 244 while 1:
245 245 chunk = changegroup.getchunk(self.bundlefile)
246 246 if not chunk:
247 247 break
248 248 self.bundlefilespos[chunk] = self.bundlefile.tell()
249 249 for c in changegroup.chunkiter(self.bundlefile):
250 250 pass
251 251
252 252 if f[0] == '/':
253 253 f = f[1:]
254 254 if f in self.bundlefilespos:
255 255 self.bundlefile.seek(self.bundlefilespos[f])
256 256 return bundlefilelog(self.sopener, f, self.bundlefile,
257 257 self.changelog.rev)
258 258 else:
259 259 return filelog.filelog(self.sopener, f)
260 260
261 261 def close(self):
262 262 """Close assigned bundle file immediately."""
263 263 self.bundlefile.close()
264 264
265 265 def __del__(self):
266 266 bundlefile = getattr(self, 'bundlefile', None)
267 267 if bundlefile and not bundlefile.closed:
268 268 bundlefile.close()
269 269 tempfile = getattr(self, 'tempfile', None)
270 270 if tempfile is not None:
271 271 os.unlink(tempfile)
272 272 if self._tempparent:
273 273 shutil.rmtree(self._tempparent, True)
274 274
275 275 def cancopy(self):
276 276 return False
277 277
278 278 def getcwd(self):
279 279 return os.getcwd() # always outside the repo
280 280
281 281 def instance(ui, path, create):
282 282 if create:
283 283 raise util.Abort(_('cannot create new bundle repository'))
284 284 parentpath = ui.config("bundle", "mainreporoot", "")
285 285 if parentpath:
286 286 # Try to make the full path relative so we get a nice, short URL.
287 287 # In particular, we don't want temp dir names in test outputs.
288 288 cwd = os.getcwd()
289 289 if parentpath == cwd:
290 290 parentpath = ''
291 291 else:
292 292 cwd = os.path.join(cwd,'')
293 293 if parentpath.startswith(cwd):
294 294 parentpath = parentpath[len(cwd):]
295 295 path = util.drop_scheme('file', path)
296 296 if path.startswith('bundle:'):
297 297 path = util.drop_scheme('bundle', path)
298 298 s = path.split("+", 1)
299 299 if len(s) == 1:
300 300 repopath, bundlename = parentpath, s[0]
301 301 else:
302 302 repopath, bundlename = s
303 303 else:
304 304 repopath, bundlename = parentpath, path
305 305 return bundlerepository(ui, repopath, bundlename)
@@ -1,395 +1,395 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from i18n import _
10 10 from lock import release
11 11 import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo
12 12 import lock, util, extensions, error, encoding, node
13 13 import merge as _merge
14 14 import verify as _verify
15 15 import errno, os, shutil
16 16
17 17 def _local(path):
18 return (os.path.isfile(util.drop_scheme('file', path)) and
19 bundlerepo or localrepo)
18 path = util.expandpath(util.drop_scheme('file', path))
19 return (os.path.isfile(path) and bundlerepo or localrepo)
20 20
21 21 def addbranchrevs(lrepo, repo, branches, revs):
22 22 if not branches:
23 23 return revs or None, revs and revs[0] or None
24 24 revs = revs and list(revs) or []
25 25 if not repo.capable('branchmap'):
26 26 revs.extend(branches)
27 27 return revs, revs[0]
28 28 branchmap = repo.branchmap()
29 29 for branch in branches:
30 30 if branch == '.':
31 31 if not lrepo or not lrepo.local():
32 32 raise util.Abort(_("dirstate branch not accessible"))
33 33 revs.append(lrepo.dirstate.branch())
34 34 else:
35 35 butf8 = encoding.fromlocal(branch)
36 36 if butf8 in branchmap:
37 37 revs.extend(node.hex(r) for r in reversed(branchmap[butf8]))
38 38 else:
39 39 revs.append(branch)
40 40 return revs, revs[0]
41 41
42 42 def parseurl(url, branches=None):
43 43 '''parse url#branch, returning url, branches+[branch]'''
44 44
45 45 if '#' not in url:
46 46 return url, branches or []
47 47 url, branch = url.split('#', 1)
48 48 return url, (branches or []) + [branch]
49 49
50 50 schemes = {
51 51 'bundle': bundlerepo,
52 52 'file': _local,
53 53 'http': httprepo,
54 54 'https': httprepo,
55 55 'ssh': sshrepo,
56 56 'static-http': statichttprepo,
57 57 }
58 58
59 59 def _lookup(path):
60 60 scheme = 'file'
61 61 if path:
62 62 c = path.find(':')
63 63 if c > 0:
64 64 scheme = path[:c]
65 65 thing = schemes.get(scheme) or schemes['file']
66 66 try:
67 67 return thing(path)
68 68 except TypeError:
69 69 return thing
70 70
71 71 def islocal(repo):
72 72 '''return true if repo or path is local'''
73 73 if isinstance(repo, str):
74 74 try:
75 75 return _lookup(repo).islocal(repo)
76 76 except AttributeError:
77 77 return False
78 78 return repo.local()
79 79
80 80 def repository(ui, path='', create=False):
81 81 """return a repository object for the specified path"""
82 82 repo = _lookup(path).instance(ui, path, create)
83 83 ui = getattr(repo, "ui", ui)
84 84 for name, module in extensions.extensions():
85 85 hook = getattr(module, 'reposetup', None)
86 86 if hook:
87 87 hook(ui, repo)
88 88 return repo
89 89
90 90 def defaultdest(source):
91 91 '''return default destination of clone if none is given'''
92 92 return os.path.basename(os.path.normpath(source))
93 93
94 94 def localpath(path):
95 95 if path.startswith('file://localhost/'):
96 96 return path[16:]
97 97 if path.startswith('file://'):
98 98 return path[7:]
99 99 if path.startswith('file:'):
100 100 return path[5:]
101 101 return path
102 102
103 103 def share(ui, source, dest=None, update=True):
104 104 '''create a shared repository'''
105 105
106 106 if not islocal(source):
107 107 raise util.Abort(_('can only share local repositories'))
108 108
109 109 if not dest:
110 110 dest = defaultdest(source)
111 111 else:
112 112 dest = ui.expandpath(dest)
113 113
114 114 if isinstance(source, str):
115 115 origsource = ui.expandpath(source)
116 116 source, branches = parseurl(origsource)
117 117 srcrepo = repository(ui, source)
118 118 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
119 119 else:
120 120 srcrepo = source
121 121 origsource = source = srcrepo.url()
122 122 checkout = None
123 123
124 124 sharedpath = srcrepo.sharedpath # if our source is already sharing
125 125
126 126 root = os.path.realpath(dest)
127 127 roothg = os.path.join(root, '.hg')
128 128
129 129 if os.path.exists(roothg):
130 130 raise util.Abort(_('destination already exists'))
131 131
132 132 if not os.path.isdir(root):
133 133 os.mkdir(root)
134 134 os.mkdir(roothg)
135 135
136 136 requirements = ''
137 137 try:
138 138 requirements = srcrepo.opener('requires').read()
139 139 except IOError, inst:
140 140 if inst.errno != errno.ENOENT:
141 141 raise
142 142
143 143 requirements += 'shared\n'
144 144 file(os.path.join(roothg, 'requires'), 'w').write(requirements)
145 145 file(os.path.join(roothg, 'sharedpath'), 'w').write(sharedpath)
146 146
147 147 default = srcrepo.ui.config('paths', 'default')
148 148 if default:
149 149 f = file(os.path.join(roothg, 'hgrc'), 'w')
150 150 f.write('[paths]\ndefault = %s\n' % default)
151 151 f.close()
152 152
153 153 r = repository(ui, root)
154 154
155 155 if update:
156 156 r.ui.status(_("updating working directory\n"))
157 157 if update is not True:
158 158 checkout = update
159 159 for test in (checkout, 'default', 'tip'):
160 160 if test is None:
161 161 continue
162 162 try:
163 163 uprev = r.lookup(test)
164 164 break
165 165 except error.RepoLookupError:
166 166 continue
167 167 _update(r, uprev)
168 168
169 169 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
170 170 stream=False, branch=None):
171 171 """Make a copy of an existing repository.
172 172
173 173 Create a copy of an existing repository in a new directory. The
174 174 source and destination are URLs, as passed to the repository
175 175 function. Returns a pair of repository objects, the source and
176 176 newly created destination.
177 177
178 178 The location of the source is added to the new repository's
179 179 .hg/hgrc file, as the default to be used for future pulls and
180 180 pushes.
181 181
182 182 If an exception is raised, the partly cloned/updated destination
183 183 repository will be deleted.
184 184
185 185 Arguments:
186 186
187 187 source: repository object or URL
188 188
189 189 dest: URL of destination repository to create (defaults to base
190 190 name of source repository)
191 191
192 192 pull: always pull from source repository, even in local case
193 193
194 194 stream: stream raw data uncompressed from repository (fast over
195 195 LAN, slow over WAN)
196 196
197 197 rev: revision to clone up to (implies pull=True)
198 198
199 199 update: update working directory after clone completes, if
200 200 destination is local repository (True means update to default rev,
201 201 anything else is treated as a revision)
202 202
203 203 branch: branches to clone
204 204 """
205 205
206 206 if isinstance(source, str):
207 207 origsource = ui.expandpath(source)
208 208 source, branch = parseurl(origsource, branch)
209 209 src_repo = repository(ui, source)
210 210 else:
211 211 src_repo = source
212 212 branch = None
213 213 origsource = source = src_repo.url()
214 214 rev, checkout = addbranchrevs(src_repo, src_repo, branch, rev)
215 215
216 216 if dest is None:
217 217 dest = defaultdest(source)
218 218 ui.status(_("destination directory: %s\n") % dest)
219 219 else:
220 220 dest = ui.expandpath(dest)
221 221
222 222 dest = localpath(dest)
223 223 source = localpath(source)
224 224
225 225 if os.path.exists(dest):
226 226 if not os.path.isdir(dest):
227 227 raise util.Abort(_("destination '%s' already exists") % dest)
228 228 elif os.listdir(dest):
229 229 raise util.Abort(_("destination '%s' is not empty") % dest)
230 230
231 231 class DirCleanup(object):
232 232 def __init__(self, dir_):
233 233 self.rmtree = shutil.rmtree
234 234 self.dir_ = dir_
235 235 def close(self):
236 236 self.dir_ = None
237 237 def cleanup(self):
238 238 if self.dir_:
239 239 self.rmtree(self.dir_, True)
240 240
241 241 src_lock = dest_lock = dir_cleanup = None
242 242 try:
243 243 if islocal(dest):
244 244 dir_cleanup = DirCleanup(dest)
245 245
246 246 abspath = origsource
247 247 copy = False
248 248 if src_repo.cancopy() and islocal(dest):
249 249 abspath = os.path.abspath(util.drop_scheme('file', origsource))
250 250 copy = not pull and not rev
251 251
252 252 if copy:
253 253 try:
254 254 # we use a lock here because if we race with commit, we
255 255 # can end up with extra data in the cloned revlogs that's
256 256 # not pointed to by changesets, thus causing verify to
257 257 # fail
258 258 src_lock = src_repo.lock(wait=False)
259 259 except error.LockError:
260 260 copy = False
261 261
262 262 if copy:
263 263 src_repo.hook('preoutgoing', throw=True, source='clone')
264 264 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
265 265 if not os.path.exists(dest):
266 266 os.mkdir(dest)
267 267 else:
268 268 # only clean up directories we create ourselves
269 269 dir_cleanup.dir_ = hgdir
270 270 try:
271 271 dest_path = hgdir
272 272 os.mkdir(dest_path)
273 273 except OSError, inst:
274 274 if inst.errno == errno.EEXIST:
275 275 dir_cleanup.close()
276 276 raise util.Abort(_("destination '%s' already exists")
277 277 % dest)
278 278 raise
279 279
280 280 for f in src_repo.store.copylist():
281 281 src = os.path.join(src_repo.sharedpath, f)
282 282 dst = os.path.join(dest_path, f)
283 283 dstbase = os.path.dirname(dst)
284 284 if dstbase and not os.path.exists(dstbase):
285 285 os.mkdir(dstbase)
286 286 if os.path.exists(src):
287 287 if dst.endswith('data'):
288 288 # lock to avoid premature writing to the target
289 289 dest_lock = lock.lock(os.path.join(dstbase, "lock"))
290 290 util.copyfiles(src, dst)
291 291
292 292 # we need to re-init the repo after manually copying the data
293 293 # into it
294 294 dest_repo = repository(ui, dest)
295 295 src_repo.hook('outgoing', source='clone', node='0'*40)
296 296 else:
297 297 try:
298 298 dest_repo = repository(ui, dest, create=True)
299 299 except OSError, inst:
300 300 if inst.errno == errno.EEXIST:
301 301 dir_cleanup.close()
302 302 raise util.Abort(_("destination '%s' already exists")
303 303 % dest)
304 304 raise
305 305
306 306 revs = None
307 307 if rev:
308 308 if 'lookup' not in src_repo.capabilities:
309 309 raise util.Abort(_("src repository does not support "
310 310 "revision lookup and so doesn't "
311 311 "support clone by revision"))
312 312 revs = [src_repo.lookup(r) for r in rev]
313 313 checkout = revs[0]
314 314 if dest_repo.local():
315 315 dest_repo.clone(src_repo, heads=revs, stream=stream)
316 316 elif src_repo.local():
317 317 src_repo.push(dest_repo, revs=revs)
318 318 else:
319 319 raise util.Abort(_("clone from remote to remote not supported"))
320 320
321 321 if dir_cleanup:
322 322 dir_cleanup.close()
323 323
324 324 if dest_repo.local():
325 325 fp = dest_repo.opener("hgrc", "w", text=True)
326 326 fp.write("[paths]\n")
327 327 fp.write("default = %s\n" % abspath)
328 328 fp.close()
329 329
330 330 dest_repo.ui.setconfig('paths', 'default', abspath)
331 331
332 332 if update:
333 333 if update is not True:
334 334 checkout = update
335 335 if src_repo.local():
336 336 checkout = src_repo.lookup(update)
337 337 for test in (checkout, 'default', 'tip'):
338 338 if test is None:
339 339 continue
340 340 try:
341 341 uprev = dest_repo.lookup(test)
342 342 break
343 343 except error.RepoLookupError:
344 344 continue
345 345 bn = dest_repo[uprev].branch()
346 346 dest_repo.ui.status(_("updating to branch %s\n")
347 347 % encoding.tolocal(bn))
348 348 _update(dest_repo, uprev)
349 349
350 350 return src_repo, dest_repo
351 351 finally:
352 352 release(src_lock, dest_lock)
353 353 if dir_cleanup is not None:
354 354 dir_cleanup.cleanup()
355 355
356 356 def _showstats(repo, stats):
357 357 repo.ui.status(_("%d files updated, %d files merged, "
358 358 "%d files removed, %d files unresolved\n") % stats)
359 359
360 360 def update(repo, node):
361 361 """update the working directory to node, merging linear changes"""
362 362 stats = _merge.update(repo, node, False, False, None)
363 363 _showstats(repo, stats)
364 364 if stats[3]:
365 365 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
366 366 return stats[3] > 0
367 367
368 368 # naming conflict in clone()
369 369 _update = update
370 370
371 371 def clean(repo, node, show_stats=True):
372 372 """forcibly switch the working directory to node, clobbering changes"""
373 373 stats = _merge.update(repo, node, False, True, None)
374 374 if show_stats:
375 375 _showstats(repo, stats)
376 376 return stats[3] > 0
377 377
378 378 def merge(repo, node, force=None, remind=True):
379 379 """branch merge with node, resolving changes"""
380 380 stats = _merge.update(repo, node, True, force, False)
381 381 _showstats(repo, stats)
382 382 if stats[3]:
383 383 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
384 384 "or 'hg update -C' to abandon\n"))
385 385 elif remind:
386 386 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
387 387 return stats[3] > 0
388 388
389 389 def revert(repo, node, choose):
390 390 """revert changes to revision in node without updating dirstate"""
391 391 return _merge.update(repo, node, False, True, choose)[3] > 0
392 392
393 393 def verify(repo):
394 394 """verify the consistency of a repository"""
395 395 return _verify.verify(repo)
@@ -1,2224 +1,2224 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 import tags as tags_
17 17 from lock import release
18 18 import weakref, stat, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 23 supported = set('revlogv1 store fncache shared'.split())
24 24
25 25 def __init__(self, baseui, path=None, create=0):
26 26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(util.expandpath(path))
28 28 self.path = os.path.join(self.root, ".hg")
29 29 self.origroot = path
30 30 self.opener = util.opener(self.path)
31 31 self.wopener = util.opener(self.root)
32 32 self.baseui = baseui
33 33 self.ui = baseui.copy()
34 34
35 35 try:
36 36 self.ui.readconfig(self.join("hgrc"), self.root)
37 37 extensions.loadall(self.ui)
38 38 except IOError:
39 39 pass
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 requirements = ["revlogv1"]
47 47 if self.ui.configbool('format', 'usestore', True):
48 48 os.mkdir(os.path.join(self.path, "store"))
49 49 requirements.append("store")
50 50 if self.ui.configbool('format', 'usefncache', True):
51 51 requirements.append("fncache")
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 reqfile = self.opener("requires", "w")
58 58 for r in requirements:
59 59 reqfile.write("%s\n" % r)
60 60 reqfile.close()
61 61 else:
62 62 raise error.RepoError(_("repository %s not found") % path)
63 63 elif create:
64 64 raise error.RepoError(_("repository %s already exists") % path)
65 65 else:
66 66 # find requirements
67 67 requirements = set()
68 68 try:
69 69 requirements = set(self.opener("requires").read().splitlines())
70 70 except IOError, inst:
71 71 if inst.errno != errno.ENOENT:
72 72 raise
73 73 for r in requirements - self.supported:
74 74 raise error.RepoError(_("requirement '%s' not supported") % r)
75 75
76 76 self.sharedpath = self.path
77 77 try:
78 78 s = os.path.realpath(self.opener("sharedpath").read())
79 79 if not os.path.exists(s):
80 80 raise error.RepoError(
81 81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 82 self.sharedpath = s
83 83 except IOError, inst:
84 84 if inst.errno != errno.ENOENT:
85 85 raise
86 86
87 87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 88 self.spath = self.store.path
89 89 self.sopener = self.store.opener
90 90 self.sjoin = self.store.join
91 91 self.opener.createmode = self.store.createmode
92 92 self.sopener.options = {}
93 93
94 94 # These two define the set of tags for this repository. _tags
95 95 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 96 # 'local'. (Global tags are defined by .hgtags across all
97 97 # heads, and local tags are defined in .hg/localtags.) They
98 98 # constitute the in-memory cache of tags.
99 99 self._tags = None
100 100 self._tagtypes = None
101 101
102 102 self._branchcache = None # in UTF-8
103 103 self._branchcachetip = None
104 104 self.nodetagscache = None
105 105 self.filterpats = {}
106 106 self._datafilters = {}
107 107 self._transref = self._lockref = self._wlockref = None
108 108
109 109 @propertycache
110 110 def changelog(self):
111 111 c = changelog.changelog(self.sopener)
112 112 if 'HG_PENDING' in os.environ:
113 113 p = os.environ['HG_PENDING']
114 114 if p.startswith(self.root):
115 115 c.readpending('00changelog.i.a')
116 116 self.sopener.options['defversion'] = c.version
117 117 return c
118 118
119 119 @propertycache
120 120 def manifest(self):
121 121 return manifest.manifest(self.sopener)
122 122
123 123 @propertycache
124 124 def dirstate(self):
125 125 return dirstate.dirstate(self.opener, self.ui, self.root)
126 126
127 127 def __getitem__(self, changeid):
128 128 if changeid is None:
129 129 return context.workingctx(self)
130 130 return context.changectx(self, changeid)
131 131
132 132 def __contains__(self, changeid):
133 133 try:
134 134 return bool(self.lookup(changeid))
135 135 except error.RepoLookupError:
136 136 return False
137 137
138 138 def __nonzero__(self):
139 139 return True
140 140
141 141 def __len__(self):
142 142 return len(self.changelog)
143 143
144 144 def __iter__(self):
145 145 for i in xrange(len(self)):
146 146 yield i
147 147
148 148 def url(self):
149 149 return 'file:' + self.root
150 150
151 151 def hook(self, name, throw=False, **args):
152 152 return hook.hook(self.ui, self, name, throw, **args)
153 153
154 154 tag_disallowed = ':\r\n'
155 155
156 156 def _tag(self, names, node, message, local, user, date, extra={}):
157 157 if isinstance(names, str):
158 158 allchars = names
159 159 names = (names,)
160 160 else:
161 161 allchars = ''.join(names)
162 162 for c in self.tag_disallowed:
163 163 if c in allchars:
164 164 raise util.Abort(_('%r cannot be used in a tag name') % c)
165 165
166 166 for name in names:
167 167 self.hook('pretag', throw=True, node=hex(node), tag=name,
168 168 local=local)
169 169
170 170 def writetags(fp, names, munge, prevtags):
171 171 fp.seek(0, 2)
172 172 if prevtags and prevtags[-1] != '\n':
173 173 fp.write('\n')
174 174 for name in names:
175 175 m = munge and munge(name) or name
176 176 if self._tagtypes and name in self._tagtypes:
177 177 old = self._tags.get(name, nullid)
178 178 fp.write('%s %s\n' % (hex(old), m))
179 179 fp.write('%s %s\n' % (hex(node), m))
180 180 fp.close()
181 181
182 182 prevtags = ''
183 183 if local:
184 184 try:
185 185 fp = self.opener('localtags', 'r+')
186 186 except IOError:
187 187 fp = self.opener('localtags', 'a')
188 188 else:
189 189 prevtags = fp.read()
190 190
191 191 # local tags are stored in the current charset
192 192 writetags(fp, names, None, prevtags)
193 193 for name in names:
194 194 self.hook('tag', node=hex(node), tag=name, local=local)
195 195 return
196 196
197 197 try:
198 198 fp = self.wfile('.hgtags', 'rb+')
199 199 except IOError:
200 200 fp = self.wfile('.hgtags', 'ab')
201 201 else:
202 202 prevtags = fp.read()
203 203
204 204 # committed tags are stored in UTF-8
205 205 writetags(fp, names, encoding.fromlocal, prevtags)
206 206
207 207 if '.hgtags' not in self.dirstate:
208 208 self.add(['.hgtags'])
209 209
210 210 m = match_.exact(self.root, '', ['.hgtags'])
211 211 tagnode = self.commit(message, user, date, extra=extra, match=m)
212 212
213 213 for name in names:
214 214 self.hook('tag', node=hex(node), tag=name, local=local)
215 215
216 216 return tagnode
217 217
218 218 def tag(self, names, node, message, local, user, date):
219 219 '''tag a revision with one or more symbolic names.
220 220
221 221 names is a list of strings or, when adding a single tag, names may be a
222 222 string.
223 223
224 224 if local is True, the tags are stored in a per-repository file.
225 225 otherwise, they are stored in the .hgtags file, and a new
226 226 changeset is committed with the change.
227 227
228 228 keyword arguments:
229 229
230 230 local: whether to store tags in non-version-controlled file
231 231 (default False)
232 232
233 233 message: commit message to use if committing
234 234
235 235 user: name of user to use if committing
236 236
237 237 date: date tuple to use if committing'''
238 238
239 239 for x in self.status()[:5]:
240 240 if '.hgtags' in x:
241 241 raise util.Abort(_('working copy of .hgtags is changed '
242 242 '(please commit .hgtags manually)'))
243 243
244 244 self.tags() # instantiate the cache
245 245 self._tag(names, node, message, local, user, date)
246 246
247 247 def tags(self):
248 248 '''return a mapping of tag to node'''
249 249 if self._tags is None:
250 250 (self._tags, self._tagtypes) = self._findtags()
251 251
252 252 return self._tags
253 253
254 254 def _findtags(self):
255 255 '''Do the hard work of finding tags. Return a pair of dicts
256 256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
257 257 maps tag name to a string like \'global\' or \'local\'.
258 258 Subclasses or extensions are free to add their own tags, but
259 259 should be aware that the returned dicts will be retained for the
260 260 duration of the localrepo object.'''
261 261
262 262 # XXX what tagtype should subclasses/extensions use? Currently
263 263 # mq and bookmarks add tags, but do not set the tagtype at all.
264 264 # Should each extension invent its own tag type? Should there
265 265 # be one tagtype for all such "virtual" tags? Or is the status
266 266 # quo fine?
267 267
268 268 alltags = {} # map tag name to (node, hist)
269 269 tagtypes = {}
270 270
271 271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
272 272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
273 273
274 274 # Build the return dicts. Have to re-encode tag names because
275 275 # the tags module always uses UTF-8 (in order not to lose info
276 276 # writing to the cache), but the rest of Mercurial wants them in
277 277 # local encoding.
278 278 tags = {}
279 279 for (name, (node, hist)) in alltags.iteritems():
280 280 if node != nullid:
281 281 tags[encoding.tolocal(name)] = node
282 282 tags['tip'] = self.changelog.tip()
283 283 tagtypes = dict([(encoding.tolocal(name), value)
284 284 for (name, value) in tagtypes.iteritems()])
285 285 return (tags, tagtypes)
286 286
287 287 def tagtype(self, tagname):
288 288 '''
289 289 return the type of the given tag. result can be:
290 290
291 291 'local' : a local tag
292 292 'global' : a global tag
293 293 None : tag does not exist
294 294 '''
295 295
296 296 self.tags()
297 297
298 298 return self._tagtypes.get(tagname)
299 299
300 300 def tagslist(self):
301 301 '''return a list of tags ordered by revision'''
302 302 l = []
303 303 for t, n in self.tags().iteritems():
304 304 try:
305 305 r = self.changelog.rev(n)
306 306 except:
307 307 r = -2 # sort to the beginning of the list if unknown
308 308 l.append((r, t, n))
309 309 return [(t, n) for r, t, n in sorted(l)]
310 310
311 311 def nodetags(self, node):
312 312 '''return the tags associated with a node'''
313 313 if not self.nodetagscache:
314 314 self.nodetagscache = {}
315 315 for t, n in self.tags().iteritems():
316 316 self.nodetagscache.setdefault(n, []).append(t)
317 317 return self.nodetagscache.get(node, [])
318 318
319 319 def _branchtags(self, partial, lrev):
320 320 # TODO: rename this function?
321 321 tiprev = len(self) - 1
322 322 if lrev != tiprev:
323 323 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
324 324 self._updatebranchcache(partial, ctxgen)
325 325 self._writebranchcache(partial, self.changelog.tip(), tiprev)
326 326
327 327 return partial
328 328
329 329 def branchmap(self):
330 330 '''returns a dictionary {branch: [branchheads]}'''
331 331 tip = self.changelog.tip()
332 332 if self._branchcache is not None and self._branchcachetip == tip:
333 333 return self._branchcache
334 334
335 335 oldtip = self._branchcachetip
336 336 self._branchcachetip = tip
337 337 if oldtip is None or oldtip not in self.changelog.nodemap:
338 338 partial, last, lrev = self._readbranchcache()
339 339 else:
340 340 lrev = self.changelog.rev(oldtip)
341 341 partial = self._branchcache
342 342
343 343 self._branchtags(partial, lrev)
344 344 # this private cache holds all heads (not just tips)
345 345 self._branchcache = partial
346 346
347 347 return self._branchcache
348 348
349 349 def branchtags(self):
350 350 '''return a dict where branch names map to the tipmost head of
351 351 the branch, open heads come before closed'''
352 352 bt = {}
353 353 for bn, heads in self.branchmap().iteritems():
354 354 tip = heads[-1]
355 355 for h in reversed(heads):
356 356 if 'close' not in self.changelog.read(h)[5]:
357 357 tip = h
358 358 break
359 359 bt[bn] = tip
360 360 return bt
361 361
362 362
363 363 def _readbranchcache(self):
364 364 partial = {}
365 365 try:
366 366 f = self.opener("branchheads.cache")
367 367 lines = f.read().split('\n')
368 368 f.close()
369 369 except (IOError, OSError):
370 370 return {}, nullid, nullrev
371 371
372 372 try:
373 373 last, lrev = lines.pop(0).split(" ", 1)
374 374 last, lrev = bin(last), int(lrev)
375 375 if lrev >= len(self) or self[lrev].node() != last:
376 376 # invalidate the cache
377 377 raise ValueError('invalidating branch cache (tip differs)')
378 378 for l in lines:
379 379 if not l:
380 380 continue
381 381 node, label = l.split(" ", 1)
382 382 partial.setdefault(label.strip(), []).append(bin(node))
383 383 except KeyboardInterrupt:
384 384 raise
385 385 except Exception, inst:
386 386 if self.ui.debugflag:
387 387 self.ui.warn(str(inst), '\n')
388 388 partial, last, lrev = {}, nullid, nullrev
389 389 return partial, last, lrev
390 390
391 391 def _writebranchcache(self, branches, tip, tiprev):
392 392 try:
393 393 f = self.opener("branchheads.cache", "w", atomictemp=True)
394 394 f.write("%s %s\n" % (hex(tip), tiprev))
395 395 for label, nodes in branches.iteritems():
396 396 for node in nodes:
397 397 f.write("%s %s\n" % (hex(node), label))
398 398 f.rename()
399 399 except (IOError, OSError):
400 400 pass
401 401
402 402 def _updatebranchcache(self, partial, ctxgen):
403 403 # collect new branch entries
404 404 newbranches = {}
405 405 for c in ctxgen:
406 406 newbranches.setdefault(c.branch(), []).append(c.node())
407 407 # if older branchheads are reachable from new ones, they aren't
408 408 # really branchheads. Note checking parents is insufficient:
409 409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
410 410 for branch, newnodes in newbranches.iteritems():
411 411 bheads = partial.setdefault(branch, [])
412 412 bheads.extend(newnodes)
413 413 if len(bheads) < 2:
414 414 continue
415 415 newbheads = []
416 416 # starting from tip means fewer passes over reachable
417 417 while newnodes:
418 418 latest = newnodes.pop()
419 419 if latest not in bheads:
420 420 continue
421 421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
422 422 reachable = self.changelog.reachable(latest, minbhrev)
423 423 bheads = [b for b in bheads if b not in reachable]
424 424 newbheads.insert(0, latest)
425 425 bheads.extend(newbheads)
426 426 partial[branch] = bheads
427 427
428 428 def lookup(self, key):
429 429 if isinstance(key, int):
430 430 return self.changelog.node(key)
431 431 elif key == '.':
432 432 return self.dirstate.parents()[0]
433 433 elif key == 'null':
434 434 return nullid
435 435 elif key == 'tip':
436 436 return self.changelog.tip()
437 437 n = self.changelog._match(key)
438 438 if n:
439 439 return n
440 440 if key in self.tags():
441 441 return self.tags()[key]
442 442 if key in self.branchtags():
443 443 return self.branchtags()[key]
444 444 n = self.changelog._partialmatch(key)
445 445 if n:
446 446 return n
447 447
448 448 # can't find key, check if it might have come from damaged dirstate
449 449 if key in self.dirstate.parents():
450 450 raise error.Abort(_("working directory has unknown parent '%s'!")
451 451 % short(key))
452 452 try:
453 453 if len(key) == 20:
454 454 key = hex(key)
455 455 except:
456 456 pass
457 457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
458 458
459 459 def local(self):
460 460 return True
461 461
462 462 def join(self, f):
463 463 return os.path.join(self.path, f)
464 464
465 465 def wjoin(self, f):
466 466 return os.path.join(self.root, f)
467 467
468 468 def rjoin(self, f):
469 469 return os.path.join(self.root, util.pconvert(f))
470 470
471 471 def file(self, f):
472 472 if f[0] == '/':
473 473 f = f[1:]
474 474 return filelog.filelog(self.sopener, f)
475 475
476 476 def changectx(self, changeid):
477 477 return self[changeid]
478 478
479 479 def parents(self, changeid=None):
480 480 '''get list of changectxs for parents of changeid'''
481 481 return self[changeid].parents()
482 482
483 483 def filectx(self, path, changeid=None, fileid=None):
484 484 """changeid can be a changeset revision, node, or tag.
485 485 fileid can be a file revision or node."""
486 486 return context.filectx(self, path, changeid, fileid)
487 487
488 488 def getcwd(self):
489 489 return self.dirstate.getcwd()
490 490
491 491 def pathto(self, f, cwd=None):
492 492 return self.dirstate.pathto(f, cwd)
493 493
494 494 def wfile(self, f, mode='r'):
495 495 return self.wopener(f, mode)
496 496
497 497 def _link(self, f):
498 498 return os.path.islink(self.wjoin(f))
499 499
500 500 def _filter(self, filter, filename, data):
501 501 if filter not in self.filterpats:
502 502 l = []
503 503 for pat, cmd in self.ui.configitems(filter):
504 504 if cmd == '!':
505 505 continue
506 506 mf = match_.match(self.root, '', [pat])
507 507 fn = None
508 508 params = cmd
509 509 for name, filterfn in self._datafilters.iteritems():
510 510 if cmd.startswith(name):
511 511 fn = filterfn
512 512 params = cmd[len(name):].lstrip()
513 513 break
514 514 if not fn:
515 515 fn = lambda s, c, **kwargs: util.filter(s, c)
516 516 # Wrap old filters not supporting keyword arguments
517 517 if not inspect.getargspec(fn)[2]:
518 518 oldfn = fn
519 519 fn = lambda s, c, **kwargs: oldfn(s, c)
520 520 l.append((mf, fn, params))
521 521 self.filterpats[filter] = l
522 522
523 523 for mf, fn, cmd in self.filterpats[filter]:
524 524 if mf(filename):
525 525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
526 526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 527 break
528 528
529 529 return data
530 530
531 531 def adddatafilter(self, name, filter):
532 532 self._datafilters[name] = filter
533 533
534 534 def wread(self, filename):
535 535 if self._link(filename):
536 536 data = os.readlink(self.wjoin(filename))
537 537 else:
538 538 data = self.wopener(filename, 'r').read()
539 539 return self._filter("encode", filename, data)
540 540
541 541 def wwrite(self, filename, data, flags):
542 542 data = self._filter("decode", filename, data)
543 543 try:
544 544 os.unlink(self.wjoin(filename))
545 545 except OSError:
546 546 pass
547 547 if 'l' in flags:
548 548 self.wopener.symlink(data, filename)
549 549 else:
550 550 self.wopener(filename, 'w').write(data)
551 551 if 'x' in flags:
552 552 util.set_flags(self.wjoin(filename), False, True)
553 553
554 554 def wwritedata(self, filename, data):
555 555 return self._filter("decode", filename, data)
556 556
557 557 def transaction(self):
558 558 tr = self._transref and self._transref() or None
559 559 if tr and tr.running():
560 560 return tr.nest()
561 561
562 562 # abort here if the journal already exists
563 563 if os.path.exists(self.sjoin("journal")):
564 564 raise error.RepoError(
565 565 _("abandoned transaction found - run hg recover"))
566 566
567 567 # save dirstate for rollback
568 568 try:
569 569 ds = self.opener("dirstate").read()
570 570 except IOError:
571 571 ds = ""
572 572 self.opener("journal.dirstate", "w").write(ds)
573 573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574 574
575 575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 577 (self.join("journal.branch"), self.join("undo.branch"))]
578 578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 579 self.sjoin("journal"),
580 580 aftertrans(renames),
581 581 self.store.createmode)
582 582 self._transref = weakref.ref(tr)
583 583 return tr
584 584
585 585 def recover(self):
586 586 lock = self.lock()
587 587 try:
588 588 if os.path.exists(self.sjoin("journal")):
589 589 self.ui.status(_("rolling back interrupted transaction\n"))
590 590 transaction.rollback(self.sopener, self.sjoin("journal"),
591 591 self.ui.warn)
592 592 self.invalidate()
593 593 return True
594 594 else:
595 595 self.ui.warn(_("no interrupted transaction available\n"))
596 596 return False
597 597 finally:
598 598 lock.release()
599 599
600 600 def rollback(self):
601 601 wlock = lock = None
602 602 try:
603 603 wlock = self.wlock()
604 604 lock = self.lock()
605 605 if os.path.exists(self.sjoin("undo")):
606 606 self.ui.status(_("rolling back last transaction\n"))
607 607 transaction.rollback(self.sopener, self.sjoin("undo"),
608 608 self.ui.warn)
609 609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 610 try:
611 611 branch = self.opener("undo.branch").read()
612 612 self.dirstate.setbranch(branch)
613 613 except IOError:
614 614 self.ui.warn(_("Named branch could not be reset, "
615 615 "current branch still is: %s\n")
616 616 % encoding.tolocal(self.dirstate.branch()))
617 617 self.invalidate()
618 618 self.dirstate.invalidate()
619 619 self.destroyed()
620 620 else:
621 621 self.ui.warn(_("no rollback information available\n"))
622 622 finally:
623 623 release(lock, wlock)
624 624
625 625 def invalidatecaches(self):
626 626 self._tags = None
627 627 self._tagtypes = None
628 628 self.nodetagscache = None
629 629 self._branchcache = None # in UTF-8
630 630 self._branchcachetip = None
631 631
632 632 def invalidate(self):
633 633 for a in "changelog manifest".split():
634 634 if a in self.__dict__:
635 635 delattr(self, a)
636 636 self.invalidatecaches()
637 637
638 638 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
639 639 try:
640 640 l = lock.lock(lockname, 0, releasefn, desc=desc)
641 641 except error.LockHeld, inst:
642 642 if not wait:
643 643 raise
644 644 self.ui.warn(_("waiting for lock on %s held by %r\n") %
645 645 (desc, inst.locker))
646 646 # default to 600 seconds timeout
647 647 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
648 648 releasefn, desc=desc)
649 649 if acquirefn:
650 650 acquirefn()
651 651 return l
652 652
653 653 def lock(self, wait=True):
654 654 '''Lock the repository store (.hg/store) and return a weak reference
655 655 to the lock. Use this before modifying the store (e.g. committing or
656 656 stripping). If you are opening a transaction, get a lock as well.)'''
657 657 l = self._lockref and self._lockref()
658 658 if l is not None and l.held:
659 659 l.lock()
660 660 return l
661 661
662 662 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
663 663 _('repository %s') % self.origroot)
664 664 self._lockref = weakref.ref(l)
665 665 return l
666 666
667 667 def wlock(self, wait=True):
668 668 '''Lock the non-store parts of the repository (everything under
669 669 .hg except .hg/store) and return a weak reference to the lock.
670 670 Use this before modifying files in .hg.'''
671 671 l = self._wlockref and self._wlockref()
672 672 if l is not None and l.held:
673 673 l.lock()
674 674 return l
675 675
676 676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 677 self.dirstate.invalidate, _('working directory of %s') %
678 678 self.origroot)
679 679 self._wlockref = weakref.ref(l)
680 680 return l
681 681
682 682 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 683 """
684 684 commit an individual file as part of a larger transaction
685 685 """
686 686
687 687 fname = fctx.path()
688 688 text = fctx.data()
689 689 flog = self.file(fname)
690 690 fparent1 = manifest1.get(fname, nullid)
691 691 fparent2 = fparent2o = manifest2.get(fname, nullid)
692 692
693 693 meta = {}
694 694 copy = fctx.renamed()
695 695 if copy and copy[0] != fname:
696 696 # Mark the new revision of this file as a copy of another
697 697 # file. This copy data will effectively act as a parent
698 698 # of this new revision. If this is a merge, the first
699 699 # parent will be the nullid (meaning "look up the copy data")
700 700 # and the second one will be the other parent. For example:
701 701 #
702 702 # 0 --- 1 --- 3 rev1 changes file foo
703 703 # \ / rev2 renames foo to bar and changes it
704 704 # \- 2 -/ rev3 should have bar with all changes and
705 705 # should record that bar descends from
706 706 # bar in rev2 and foo in rev1
707 707 #
708 708 # this allows this merge to succeed:
709 709 #
710 710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 711 # \ / merging rev3 and rev4 should use bar@rev2
712 712 # \- 2 --- 4 as the merge base
713 713 #
714 714
715 715 cfname = copy[0]
716 716 crev = manifest1.get(cfname)
717 717 newfparent = fparent2
718 718
719 719 if manifest2: # branch merge
720 720 if fparent2 == nullid or crev is None: # copied on remote side
721 721 if cfname in manifest2:
722 722 crev = manifest2[cfname]
723 723 newfparent = fparent1
724 724
725 725 # find source in nearest ancestor if we've lost track
726 726 if not crev:
727 727 self.ui.debug(" %s: searching for copy revision for %s\n" %
728 728 (fname, cfname))
729 729 for ancestor in self['.'].ancestors():
730 730 if cfname in ancestor:
731 731 crev = ancestor[cfname].filenode()
732 732 break
733 733
734 734 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
735 735 meta["copy"] = cfname
736 736 meta["copyrev"] = hex(crev)
737 737 fparent1, fparent2 = nullid, newfparent
738 738 elif fparent2 != nullid:
739 739 # is one parent an ancestor of the other?
740 740 fparentancestor = flog.ancestor(fparent1, fparent2)
741 741 if fparentancestor == fparent1:
742 742 fparent1, fparent2 = fparent2, nullid
743 743 elif fparentancestor == fparent2:
744 744 fparent2 = nullid
745 745
746 746 # is the file changed?
747 747 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
748 748 changelist.append(fname)
749 749 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
750 750
751 751 # are just the flags changed during merge?
752 752 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
753 753 changelist.append(fname)
754 754
755 755 return fparent1
756 756
757 757 def commit(self, text="", user=None, date=None, match=None, force=False,
758 758 editor=False, extra={}):
759 759 """Add a new revision to current repository.
760 760
761 761 Revision information is gathered from the working directory,
762 762 match can be used to filter the committed files. If editor is
763 763 supplied, it is called to get a commit message.
764 764 """
765 765
766 766 def fail(f, msg):
767 767 raise util.Abort('%s: %s' % (f, msg))
768 768
769 769 if not match:
770 770 match = match_.always(self.root, '')
771 771
772 772 if not force:
773 773 vdirs = []
774 774 match.dir = vdirs.append
775 775 match.bad = fail
776 776
777 777 wlock = self.wlock()
778 778 try:
779 779 p1, p2 = self.dirstate.parents()
780 780 wctx = self[None]
781 781
782 782 if (not force and p2 != nullid and match and
783 783 (match.files() or match.anypats())):
784 784 raise util.Abort(_('cannot partially commit a merge '
785 785 '(do not specify files or patterns)'))
786 786
787 787 changes = self.status(match=match, clean=force)
788 788 if force:
789 789 changes[0].extend(changes[6]) # mq may commit unchanged files
790 790
791 791 # check subrepos
792 792 subs = []
793 793 removedsubs = set()
794 794 for p in wctx.parents():
795 795 removedsubs.update(s for s in p.substate if match(s))
796 796 for s in wctx.substate:
797 797 removedsubs.discard(s)
798 798 if match(s) and wctx.sub(s).dirty():
799 799 subs.append(s)
800 800 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
801 801 changes[0].insert(0, '.hgsubstate')
802 802
803 803 # make sure all explicit patterns are matched
804 804 if not force and match.files():
805 805 matched = set(changes[0] + changes[1] + changes[2])
806 806
807 807 for f in match.files():
808 808 if f == '.' or f in matched or f in wctx.substate:
809 809 continue
810 810 if f in changes[3]: # missing
811 811 fail(f, _('file not found!'))
812 812 if f in vdirs: # visited directory
813 813 d = f + '/'
814 814 for mf in matched:
815 815 if mf.startswith(d):
816 816 break
817 817 else:
818 818 fail(f, _("no match under directory!"))
819 819 elif f not in self.dirstate:
820 820 fail(f, _("file not tracked!"))
821 821
822 822 if (not force and not extra.get("close") and p2 == nullid
823 823 and not (changes[0] or changes[1] or changes[2])
824 824 and self[None].branch() == self['.'].branch()):
825 825 return None
826 826
827 827 ms = merge_.mergestate(self)
828 828 for f in changes[0]:
829 829 if f in ms and ms[f] == 'u':
830 830 raise util.Abort(_("unresolved merge conflicts "
831 831 "(see hg resolve)"))
832 832
833 833 cctx = context.workingctx(self, (p1, p2), text, user, date,
834 834 extra, changes)
835 835 if editor:
836 836 cctx._text = editor(self, cctx, subs)
837 837 edited = (text != cctx._text)
838 838
839 839 # commit subs
840 840 if subs or removedsubs:
841 841 state = wctx.substate.copy()
842 842 for s in subs:
843 843 self.ui.status(_('committing subrepository %s\n') % s)
844 844 sr = wctx.sub(s).commit(cctx._text, user, date)
845 845 state[s] = (state[s][0], sr)
846 846 subrepo.writestate(self, state)
847 847
848 848 # Save commit message in case this transaction gets rolled back
849 849 # (e.g. by a pretxncommit hook). Leave the content alone on
850 850 # the assumption that the user will use the same editor again.
851 851 msgfile = self.opener('last-message.txt', 'wb')
852 852 msgfile.write(cctx._text)
853 853 msgfile.close()
854 854
855 855 try:
856 856 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
857 857 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
858 858 ret = self.commitctx(cctx, True)
859 859 except:
860 860 if edited:
861 861 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
862 862 self.ui.write(
863 863 _('note: commit message saved in %s\n') % msgfn)
864 864 raise
865 865
866 866 # update dirstate and mergestate
867 867 for f in changes[0] + changes[1]:
868 868 self.dirstate.normal(f)
869 869 for f in changes[2]:
870 870 self.dirstate.forget(f)
871 871 self.dirstate.setparents(ret)
872 872 ms.reset()
873 873 finally:
874 874 wlock.release()
875 875
876 876 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
877 877 return ret
878 878
879 879 def commitctx(self, ctx, error=False):
880 880 """Add a new revision to current repository.
881 881 Revision information is passed via the context argument.
882 882 """
883 883
884 884 tr = lock = None
885 885 removed = ctx.removed()
886 886 p1, p2 = ctx.p1(), ctx.p2()
887 887 m1 = p1.manifest().copy()
888 888 m2 = p2.manifest()
889 889 user = ctx.user()
890 890
891 891 lock = self.lock()
892 892 try:
893 893 tr = self.transaction()
894 894 trp = weakref.proxy(tr)
895 895
896 896 # check in files
897 897 new = {}
898 898 changed = []
899 899 linkrev = len(self)
900 900 for f in sorted(ctx.modified() + ctx.added()):
901 901 self.ui.note(f + "\n")
902 902 try:
903 903 fctx = ctx[f]
904 904 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
905 905 changed)
906 906 m1.set(f, fctx.flags())
907 907 except OSError, inst:
908 908 self.ui.warn(_("trouble committing %s!\n") % f)
909 909 raise
910 910 except IOError, inst:
911 911 errcode = getattr(inst, 'errno', errno.ENOENT)
912 912 if error or errcode and errcode != errno.ENOENT:
913 913 self.ui.warn(_("trouble committing %s!\n") % f)
914 914 raise
915 915 else:
916 916 removed.append(f)
917 917
918 918 # update manifest
919 919 m1.update(new)
920 920 removed = [f for f in sorted(removed) if f in m1 or f in m2]
921 921 drop = [f for f in removed if f in m1]
922 922 for f in drop:
923 923 del m1[f]
924 924 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
925 925 p2.manifestnode(), (new, drop))
926 926
927 927 # update changelog
928 928 self.changelog.delayupdate()
929 929 n = self.changelog.add(mn, changed + removed, ctx.description(),
930 930 trp, p1.node(), p2.node(),
931 931 user, ctx.date(), ctx.extra().copy())
932 932 p = lambda: self.changelog.writepending() and self.root or ""
933 933 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
934 934 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
935 935 parent2=xp2, pending=p)
936 936 self.changelog.finalize(trp)
937 937 tr.close()
938 938
939 939 if self._branchcache:
940 940 self.branchtags()
941 941 return n
942 942 finally:
943 943 del tr
944 944 lock.release()
945 945
946 946 def destroyed(self):
947 947 '''Inform the repository that nodes have been destroyed.
948 948 Intended for use by strip and rollback, so there's a common
949 949 place for anything that has to be done after destroying history.'''
950 950 # XXX it might be nice if we could take the list of destroyed
951 951 # nodes, but I don't see an easy way for rollback() to do that
952 952
953 953 # Ensure the persistent tag cache is updated. Doing it now
954 954 # means that the tag cache only has to worry about destroyed
955 955 # heads immediately after a strip/rollback. That in turn
956 956 # guarantees that "cachetip == currenttip" (comparing both rev
957 957 # and node) always means no nodes have been added or destroyed.
958 958
959 959 # XXX this is suboptimal when qrefresh'ing: we strip the current
960 960 # head, refresh the tag cache, then immediately add a new head.
961 961 # But I think doing it this way is necessary for the "instant
962 962 # tag cache retrieval" case to work.
963 963 self.invalidatecaches()
964 964
965 965 def walk(self, match, node=None):
966 966 '''
967 967 walk recursively through the directory tree or a given
968 968 changeset, finding all files matched by the match
969 969 function
970 970 '''
971 971 return self[node].walk(match)
972 972
973 973 def status(self, node1='.', node2=None, match=None,
974 974 ignored=False, clean=False, unknown=False):
975 975 """return status of files between two nodes or node and working directory
976 976
977 977 If node1 is None, use the first dirstate parent instead.
978 978 If node2 is None, compare node1 with working directory.
979 979 """
980 980
981 981 def mfmatches(ctx):
982 982 mf = ctx.manifest().copy()
983 983 for fn in mf.keys():
984 984 if not match(fn):
985 985 del mf[fn]
986 986 return mf
987 987
988 988 if isinstance(node1, context.changectx):
989 989 ctx1 = node1
990 990 else:
991 991 ctx1 = self[node1]
992 992 if isinstance(node2, context.changectx):
993 993 ctx2 = node2
994 994 else:
995 995 ctx2 = self[node2]
996 996
997 997 working = ctx2.rev() is None
998 998 parentworking = working and ctx1 == self['.']
999 999 match = match or match_.always(self.root, self.getcwd())
1000 1000 listignored, listclean, listunknown = ignored, clean, unknown
1001 1001
1002 1002 # load earliest manifest first for caching reasons
1003 1003 if not working and ctx2.rev() < ctx1.rev():
1004 1004 ctx2.manifest()
1005 1005
1006 1006 if not parentworking:
1007 1007 def bad(f, msg):
1008 1008 if f not in ctx1:
1009 1009 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1010 1010 match.bad = bad
1011 1011
1012 1012 if working: # we need to scan the working dir
1013 1013 subrepos = ctx1.substate.keys()
1014 1014 s = self.dirstate.status(match, subrepos, listignored,
1015 1015 listclean, listunknown)
1016 1016 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1017 1017
1018 1018 # check for any possibly clean files
1019 1019 if parentworking and cmp:
1020 1020 fixup = []
1021 1021 # do a full compare of any files that might have changed
1022 1022 for f in sorted(cmp):
1023 1023 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1024 1024 or ctx1[f].cmp(ctx2[f].data())):
1025 1025 modified.append(f)
1026 1026 else:
1027 1027 fixup.append(f)
1028 1028
1029 1029 if listclean:
1030 1030 clean += fixup
1031 1031
1032 1032 # update dirstate for files that are actually clean
1033 1033 if fixup:
1034 1034 try:
1035 1035 # updating the dirstate is optional
1036 1036 # so we don't wait on the lock
1037 1037 wlock = self.wlock(False)
1038 1038 try:
1039 1039 for f in fixup:
1040 1040 self.dirstate.normal(f)
1041 1041 finally:
1042 1042 wlock.release()
1043 1043 except error.LockError:
1044 1044 pass
1045 1045
1046 1046 if not parentworking:
1047 1047 mf1 = mfmatches(ctx1)
1048 1048 if working:
1049 1049 # we are comparing working dir against non-parent
1050 1050 # generate a pseudo-manifest for the working dir
1051 1051 mf2 = mfmatches(self['.'])
1052 1052 for f in cmp + modified + added:
1053 1053 mf2[f] = None
1054 1054 mf2.set(f, ctx2.flags(f))
1055 1055 for f in removed:
1056 1056 if f in mf2:
1057 1057 del mf2[f]
1058 1058 else:
1059 1059 # we are comparing two revisions
1060 1060 deleted, unknown, ignored = [], [], []
1061 1061 mf2 = mfmatches(ctx2)
1062 1062
1063 1063 modified, added, clean = [], [], []
1064 1064 for fn in mf2:
1065 1065 if fn in mf1:
1066 1066 if (mf1.flags(fn) != mf2.flags(fn) or
1067 1067 (mf1[fn] != mf2[fn] and
1068 1068 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1069 1069 modified.append(fn)
1070 1070 elif listclean:
1071 1071 clean.append(fn)
1072 1072 del mf1[fn]
1073 1073 else:
1074 1074 added.append(fn)
1075 1075 removed = mf1.keys()
1076 1076
1077 1077 r = modified, added, removed, deleted, unknown, ignored, clean
1078 1078 [l.sort() for l in r]
1079 1079 return r
1080 1080
1081 1081 def add(self, list):
1082 1082 wlock = self.wlock()
1083 1083 try:
1084 1084 rejected = []
1085 1085 for f in list:
1086 1086 p = self.wjoin(f)
1087 1087 try:
1088 1088 st = os.lstat(p)
1089 1089 except:
1090 1090 self.ui.warn(_("%s does not exist!\n") % f)
1091 1091 rejected.append(f)
1092 1092 continue
1093 1093 if st.st_size > 10000000:
1094 1094 self.ui.warn(_("%s: up to %d MB of RAM may be required "
1095 1095 "to manage this file\n"
1096 1096 "(use 'hg revert %s' to cancel the "
1097 1097 "pending addition)\n")
1098 1098 % (f, 3 * st.st_size // 1000000, f))
1099 1099 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1100 1100 self.ui.warn(_("%s not added: only files and symlinks "
1101 1101 "supported currently\n") % f)
1102 1102 rejected.append(p)
1103 1103 elif self.dirstate[f] in 'amn':
1104 1104 self.ui.warn(_("%s already tracked!\n") % f)
1105 1105 elif self.dirstate[f] == 'r':
1106 1106 self.dirstate.normallookup(f)
1107 1107 else:
1108 1108 self.dirstate.add(f)
1109 1109 return rejected
1110 1110 finally:
1111 1111 wlock.release()
1112 1112
1113 1113 def forget(self, list):
1114 1114 wlock = self.wlock()
1115 1115 try:
1116 1116 for f in list:
1117 1117 if self.dirstate[f] != 'a':
1118 1118 self.ui.warn(_("%s not added!\n") % f)
1119 1119 else:
1120 1120 self.dirstate.forget(f)
1121 1121 finally:
1122 1122 wlock.release()
1123 1123
1124 1124 def remove(self, list, unlink=False):
1125 1125 if unlink:
1126 1126 for f in list:
1127 1127 try:
1128 1128 util.unlink(self.wjoin(f))
1129 1129 except OSError, inst:
1130 1130 if inst.errno != errno.ENOENT:
1131 1131 raise
1132 1132 wlock = self.wlock()
1133 1133 try:
1134 1134 for f in list:
1135 1135 if unlink and os.path.exists(self.wjoin(f)):
1136 1136 self.ui.warn(_("%s still exists!\n") % f)
1137 1137 elif self.dirstate[f] == 'a':
1138 1138 self.dirstate.forget(f)
1139 1139 elif f not in self.dirstate:
1140 1140 self.ui.warn(_("%s not tracked!\n") % f)
1141 1141 else:
1142 1142 self.dirstate.remove(f)
1143 1143 finally:
1144 1144 wlock.release()
1145 1145
1146 1146 def undelete(self, list):
1147 1147 manifests = [self.manifest.read(self.changelog.read(p)[0])
1148 1148 for p in self.dirstate.parents() if p != nullid]
1149 1149 wlock = self.wlock()
1150 1150 try:
1151 1151 for f in list:
1152 1152 if self.dirstate[f] != 'r':
1153 1153 self.ui.warn(_("%s not removed!\n") % f)
1154 1154 else:
1155 1155 m = f in manifests[0] and manifests[0] or manifests[1]
1156 1156 t = self.file(f).read(m[f])
1157 1157 self.wwrite(f, t, m.flags(f))
1158 1158 self.dirstate.normal(f)
1159 1159 finally:
1160 1160 wlock.release()
1161 1161
1162 1162 def copy(self, source, dest):
1163 1163 p = self.wjoin(dest)
1164 1164 if not (os.path.exists(p) or os.path.islink(p)):
1165 1165 self.ui.warn(_("%s does not exist!\n") % dest)
1166 1166 elif not (os.path.isfile(p) or os.path.islink(p)):
1167 1167 self.ui.warn(_("copy failed: %s is not a file or a "
1168 1168 "symbolic link\n") % dest)
1169 1169 else:
1170 1170 wlock = self.wlock()
1171 1171 try:
1172 1172 if self.dirstate[dest] in '?r':
1173 1173 self.dirstate.add(dest)
1174 1174 self.dirstate.copy(source, dest)
1175 1175 finally:
1176 1176 wlock.release()
1177 1177
1178 1178 def heads(self, start=None):
1179 1179 heads = self.changelog.heads(start)
1180 1180 # sort the output in rev descending order
1181 1181 heads = [(-self.changelog.rev(h), h) for h in heads]
1182 1182 return [n for (r, n) in sorted(heads)]
1183 1183
1184 1184 def branchheads(self, branch=None, start=None, closed=False):
1185 1185 '''return a (possibly filtered) list of heads for the given branch
1186 1186
1187 1187 Heads are returned in topological order, from newest to oldest.
1188 1188 If branch is None, use the dirstate branch.
1189 1189 If start is not None, return only heads reachable from start.
1190 1190 If closed is True, return heads that are marked as closed as well.
1191 1191 '''
1192 1192 if branch is None:
1193 1193 branch = self[None].branch()
1194 1194 branches = self.branchmap()
1195 1195 if branch not in branches:
1196 1196 return []
1197 1197 # the cache returns heads ordered lowest to highest
1198 1198 bheads = list(reversed(branches[branch]))
1199 1199 if start is not None:
1200 1200 # filter out the heads that cannot be reached from startrev
1201 1201 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1202 1202 bheads = [h for h in bheads if h in fbheads]
1203 1203 if not closed:
1204 1204 bheads = [h for h in bheads if
1205 1205 ('close' not in self.changelog.read(h)[5])]
1206 1206 return bheads
1207 1207
1208 1208 def branches(self, nodes):
1209 1209 if not nodes:
1210 1210 nodes = [self.changelog.tip()]
1211 1211 b = []
1212 1212 for n in nodes:
1213 1213 t = n
1214 1214 while 1:
1215 1215 p = self.changelog.parents(n)
1216 1216 if p[1] != nullid or p[0] == nullid:
1217 1217 b.append((t, n, p[0], p[1]))
1218 1218 break
1219 1219 n = p[0]
1220 1220 return b
1221 1221
1222 1222 def between(self, pairs):
1223 1223 r = []
1224 1224
1225 1225 for top, bottom in pairs:
1226 1226 n, l, i = top, [], 0
1227 1227 f = 1
1228 1228
1229 1229 while n != bottom and n != nullid:
1230 1230 p = self.changelog.parents(n)[0]
1231 1231 if i == f:
1232 1232 l.append(n)
1233 1233 f = f * 2
1234 1234 n = p
1235 1235 i += 1
1236 1236
1237 1237 r.append(l)
1238 1238
1239 1239 return r
1240 1240
1241 1241 def findincoming(self, remote, base=None, heads=None, force=False):
1242 1242 """Return list of roots of the subsets of missing nodes from remote
1243 1243
1244 1244 If base dict is specified, assume that these nodes and their parents
1245 1245 exist on the remote side and that no child of a node of base exists
1246 1246 in both remote and self.
1247 1247 Furthermore base will be updated to include the nodes that exists
1248 1248 in self and remote but no children exists in self and remote.
1249 1249 If a list of heads is specified, return only nodes which are heads
1250 1250 or ancestors of these heads.
1251 1251
1252 1252 All the ancestors of base are in self and in remote.
1253 1253 All the descendants of the list returned are missing in self.
1254 1254 (and so we know that the rest of the nodes are missing in remote, see
1255 1255 outgoing)
1256 1256 """
1257 1257 return self.findcommonincoming(remote, base, heads, force)[1]
1258 1258
1259 1259 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1260 1260 """Return a tuple (common, missing roots, heads) used to identify
1261 1261 missing nodes from remote.
1262 1262
1263 1263 If base dict is specified, assume that these nodes and their parents
1264 1264 exist on the remote side and that no child of a node of base exists
1265 1265 in both remote and self.
1266 1266 Furthermore base will be updated to include the nodes that exists
1267 1267 in self and remote but no children exists in self and remote.
1268 1268 If a list of heads is specified, return only nodes which are heads
1269 1269 or ancestors of these heads.
1270 1270
1271 1271 All the ancestors of base are in self and in remote.
1272 1272 """
1273 1273 m = self.changelog.nodemap
1274 1274 search = []
1275 1275 fetch = set()
1276 1276 seen = set()
1277 1277 seenbranch = set()
1278 1278 if base is None:
1279 1279 base = {}
1280 1280
1281 1281 if not heads:
1282 1282 heads = remote.heads()
1283 1283
1284 1284 if self.changelog.tip() == nullid:
1285 1285 base[nullid] = 1
1286 1286 if heads != [nullid]:
1287 1287 return [nullid], [nullid], list(heads)
1288 1288 return [nullid], [], []
1289 1289
1290 1290 # assume we're closer to the tip than the root
1291 1291 # and start by examining the heads
1292 1292 self.ui.status(_("searching for changes\n"))
1293 1293
1294 1294 unknown = []
1295 1295 for h in heads:
1296 1296 if h not in m:
1297 1297 unknown.append(h)
1298 1298 else:
1299 1299 base[h] = 1
1300 1300
1301 1301 heads = unknown
1302 1302 if not unknown:
1303 1303 return base.keys(), [], []
1304 1304
1305 1305 req = set(unknown)
1306 1306 reqcnt = 0
1307 1307
1308 1308 # search through remote branches
1309 1309 # a 'branch' here is a linear segment of history, with four parts:
1310 1310 # head, root, first parent, second parent
1311 1311 # (a branch always has two parents (or none) by definition)
1312 1312 unknown = remote.branches(unknown)
1313 1313 while unknown:
1314 1314 r = []
1315 1315 while unknown:
1316 1316 n = unknown.pop(0)
1317 1317 if n[0] in seen:
1318 1318 continue
1319 1319
1320 1320 self.ui.debug("examining %s:%s\n"
1321 1321 % (short(n[0]), short(n[1])))
1322 1322 if n[0] == nullid: # found the end of the branch
1323 1323 pass
1324 1324 elif n in seenbranch:
1325 1325 self.ui.debug("branch already found\n")
1326 1326 continue
1327 1327 elif n[1] and n[1] in m: # do we know the base?
1328 1328 self.ui.debug("found incomplete branch %s:%s\n"
1329 1329 % (short(n[0]), short(n[1])))
1330 1330 search.append(n[0:2]) # schedule branch range for scanning
1331 1331 seenbranch.add(n)
1332 1332 else:
1333 1333 if n[1] not in seen and n[1] not in fetch:
1334 1334 if n[2] in m and n[3] in m:
1335 1335 self.ui.debug("found new changeset %s\n" %
1336 1336 short(n[1]))
1337 1337 fetch.add(n[1]) # earliest unknown
1338 1338 for p in n[2:4]:
1339 1339 if p in m:
1340 1340 base[p] = 1 # latest known
1341 1341
1342 1342 for p in n[2:4]:
1343 1343 if p not in req and p not in m:
1344 1344 r.append(p)
1345 1345 req.add(p)
1346 1346 seen.add(n[0])
1347 1347
1348 1348 if r:
1349 1349 reqcnt += 1
1350 1350 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1351 1351 self.ui.debug("request %d: %s\n" %
1352 1352 (reqcnt, " ".join(map(short, r))))
1353 1353 for p in xrange(0, len(r), 10):
1354 1354 for b in remote.branches(r[p:p + 10]):
1355 1355 self.ui.debug("received %s:%s\n" %
1356 1356 (short(b[0]), short(b[1])))
1357 1357 unknown.append(b)
1358 1358
1359 1359 # do binary search on the branches we found
1360 1360 while search:
1361 1361 newsearch = []
1362 1362 reqcnt += 1
1363 1363 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1364 1364 for n, l in zip(search, remote.between(search)):
1365 1365 l.append(n[1])
1366 1366 p = n[0]
1367 1367 f = 1
1368 1368 for i in l:
1369 1369 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1370 1370 if i in m:
1371 1371 if f <= 2:
1372 1372 self.ui.debug("found new branch changeset %s\n" %
1373 1373 short(p))
1374 1374 fetch.add(p)
1375 1375 base[i] = 1
1376 1376 else:
1377 1377 self.ui.debug("narrowed branch search to %s:%s\n"
1378 1378 % (short(p), short(i)))
1379 1379 newsearch.append((p, i))
1380 1380 break
1381 1381 p, f = i, f * 2
1382 1382 search = newsearch
1383 1383
1384 1384 # sanity check our fetch list
1385 1385 for f in fetch:
1386 1386 if f in m:
1387 1387 raise error.RepoError(_("already have changeset ")
1388 1388 + short(f[:4]))
1389 1389
1390 1390 if base.keys() == [nullid]:
1391 1391 if force:
1392 1392 self.ui.warn(_("warning: repository is unrelated\n"))
1393 1393 else:
1394 1394 raise util.Abort(_("repository is unrelated"))
1395 1395
1396 1396 self.ui.debug("found new changesets starting at " +
1397 1397 " ".join([short(f) for f in fetch]) + "\n")
1398 1398
1399 1399 self.ui.progress(_('searching'), None, unit=_('queries'))
1400 1400 self.ui.debug("%d total queries\n" % reqcnt)
1401 1401
1402 1402 return base.keys(), list(fetch), heads
1403 1403
1404 1404 def findoutgoing(self, remote, base=None, heads=None, force=False):
1405 1405 """Return list of nodes that are roots of subsets not in remote
1406 1406
1407 1407 If base dict is specified, assume that these nodes and their parents
1408 1408 exist on the remote side.
1409 1409 If a list of heads is specified, return only nodes which are heads
1410 1410 or ancestors of these heads, and return a second element which
1411 1411 contains all remote heads which get new children.
1412 1412 """
1413 1413 if base is None:
1414 1414 base = {}
1415 1415 self.findincoming(remote, base, heads, force=force)
1416 1416
1417 1417 self.ui.debug("common changesets up to "
1418 1418 + " ".join(map(short, base.keys())) + "\n")
1419 1419
1420 1420 remain = set(self.changelog.nodemap)
1421 1421
1422 1422 # prune everything remote has from the tree
1423 1423 remain.remove(nullid)
1424 1424 remove = base.keys()
1425 1425 while remove:
1426 1426 n = remove.pop(0)
1427 1427 if n in remain:
1428 1428 remain.remove(n)
1429 1429 for p in self.changelog.parents(n):
1430 1430 remove.append(p)
1431 1431
1432 1432 # find every node whose parents have been pruned
1433 1433 subset = []
1434 1434 # find every remote head that will get new children
1435 1435 updated_heads = set()
1436 1436 for n in remain:
1437 1437 p1, p2 = self.changelog.parents(n)
1438 1438 if p1 not in remain and p2 not in remain:
1439 1439 subset.append(n)
1440 1440 if heads:
1441 1441 if p1 in heads:
1442 1442 updated_heads.add(p1)
1443 1443 if p2 in heads:
1444 1444 updated_heads.add(p2)
1445 1445
1446 1446 # this is the set of all roots we have to push
1447 1447 if heads:
1448 1448 return subset, list(updated_heads)
1449 1449 else:
1450 1450 return subset
1451 1451
1452 1452 def pull(self, remote, heads=None, force=False):
1453 1453 lock = self.lock()
1454 1454 try:
1455 1455 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1456 1456 force=force)
1457 1457 if not fetch:
1458 1458 self.ui.status(_("no changes found\n"))
1459 1459 return 0
1460 1460
1461 1461 if fetch == [nullid]:
1462 1462 self.ui.status(_("requesting all changes\n"))
1463 1463 elif heads is None and remote.capable('changegroupsubset'):
1464 1464 # issue1320, avoid a race if remote changed after discovery
1465 1465 heads = rheads
1466 1466
1467 1467 if heads is None:
1468 1468 cg = remote.changegroup(fetch, 'pull')
1469 1469 else:
1470 1470 if not remote.capable('changegroupsubset'):
1471 1471 raise util.Abort(_("Partial pull cannot be done because "
1472 1472 "other repository doesn't support "
1473 1473 "changegroupsubset."))
1474 1474 cg = remote.changegroupsubset(fetch, heads, 'pull')
1475 1475 return self.addchangegroup(cg, 'pull', remote.url())
1476 1476 finally:
1477 1477 lock.release()
1478 1478
1479 1479 def push(self, remote, force=False, revs=None):
1480 1480 # there are two ways to push to remote repo:
1481 1481 #
1482 1482 # addchangegroup assumes local user can lock remote
1483 1483 # repo (local filesystem, old ssh servers).
1484 1484 #
1485 1485 # unbundle assumes local user cannot lock remote repo (new ssh
1486 1486 # servers, http servers).
1487 1487
1488 1488 if remote.capable('unbundle'):
1489 1489 return self.push_unbundle(remote, force, revs)
1490 1490 return self.push_addchangegroup(remote, force, revs)
1491 1491
1492 1492 def prepush(self, remote, force, revs):
1493 1493 '''Analyze the local and remote repositories and determine which
1494 1494 changesets need to be pushed to the remote. Return a tuple
1495 1495 (changegroup, remoteheads). changegroup is a readable file-like
1496 1496 object whose read() returns successive changegroup chunks ready to
1497 1497 be sent over the wire. remoteheads is the list of remote heads.
1498 1498 '''
1499 1499 common = {}
1500 1500 remote_heads = remote.heads()
1501 1501 inc = self.findincoming(remote, common, remote_heads, force=force)
1502 1502
1503 1503 cl = self.changelog
1504 1504 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1505 1505 outg, bases, heads = cl.nodesbetween(update, revs)
1506 1506
1507 1507 if not bases:
1508 1508 self.ui.status(_("no changes found\n"))
1509 1509 return None, 1
1510 1510
1511 1511 if not force and remote_heads != [nullid]:
1512 1512
1513 1513 def fail_multiple_heads(unsynced, branch=None):
1514 1514 if branch:
1515 1515 msg = _("abort: push creates new remote heads"
1516 1516 " on branch '%s'!\n") % branch
1517 1517 else:
1518 1518 msg = _("abort: push creates new remote heads!\n")
1519 1519 self.ui.warn(msg)
1520 1520 if unsynced:
1521 1521 self.ui.status(_("(you should pull and merge or"
1522 1522 " use push -f to force)\n"))
1523 1523 else:
1524 1524 self.ui.status(_("(did you forget to merge?"
1525 1525 " use push -f to force)\n"))
1526 1526 return None, 0
1527 1527
1528 1528 if remote.capable('branchmap'):
1529 1529 # Check for each named branch if we're creating new remote heads.
1530 1530 # To be a remote head after push, node must be either:
1531 1531 # - unknown locally
1532 1532 # - a local outgoing head descended from update
1533 1533 # - a remote head that's known locally and not
1534 1534 # ancestral to an outgoing head
1535 1535 #
1536 1536 # New named branches cannot be created without --force.
1537 1537
1538 1538 # 1. Create set of branches involved in the push.
1539 1539 branches = set(self[n].branch() for n in outg)
1540 1540
1541 1541 # 2. Check for new branches on the remote.
1542 1542 remotemap = remote.branchmap()
1543 1543 newbranches = branches - set(remotemap)
1544 1544 if newbranches: # new branch requires --force
1545 1545 branchnames = ', '.join("%s" % b for b in newbranches)
1546 1546 self.ui.warn(_("abort: push creates "
1547 1547 "new remote branches: %s!\n")
1548 1548 % branchnames)
1549 1549 self.ui.status(_("(use 'hg push -f' to force)\n"))
1550 1550 return None, 0
1551 1551
1552 1552 # 3. Construct the initial oldmap and newmap dicts.
1553 1553 # They contain information about the remote heads before and
1554 1554 # after the push, respectively.
1555 1555 # Heads not found locally are not included in either dict,
1556 1556 # since they won't be affected by the push.
1557 1557 # unsynced contains all branches with incoming changesets.
1558 1558 oldmap = {}
1559 1559 newmap = {}
1560 1560 unsynced = set()
1561 1561 for branch in branches:
1562 1562 remoteheads = remotemap[branch]
1563 1563 prunedheads = [h for h in remoteheads if h in cl.nodemap]
1564 1564 oldmap[branch] = prunedheads
1565 1565 newmap[branch] = list(prunedheads)
1566 1566 if len(remoteheads) > len(prunedheads):
1567 1567 unsynced.add(branch)
1568 1568
1569 1569 # 4. Update newmap with outgoing changes.
1570 1570 # This will possibly add new heads and remove existing ones.
1571 1571 ctxgen = (self[n] for n in outg)
1572 1572 self._updatebranchcache(newmap, ctxgen)
1573 1573
1574 1574 # 5. Check for new heads.
1575 1575 # If there are more heads after the push than before, a suitable
1576 1576 # warning, depending on unsynced status, is displayed.
1577 1577 for branch in branches:
1578 1578 if len(newmap[branch]) > len(oldmap[branch]):
1579 1579 return fail_multiple_heads(branch in unsynced, branch)
1580 1580
1581 1581 # 6. Check for unsynced changes on involved branches.
1582 1582 if unsynced:
1583 1583 self.ui.warn(_("note: unsynced remote changes!\n"))
1584 1584
1585 1585 else:
1586 1586 # Old servers: Check for new topological heads.
1587 1587 # Code based on _updatebranchcache.
1588 1588 newheads = set(h for h in remote_heads if h in cl.nodemap)
1589 1589 oldheadcnt = len(newheads)
1590 1590 newheads.update(outg)
1591 1591 if len(newheads) > 1:
1592 1592 for latest in reversed(outg):
1593 1593 if latest not in newheads:
1594 1594 continue
1595 1595 minhrev = min(cl.rev(h) for h in newheads)
1596 1596 reachable = cl.reachable(latest, cl.node(minhrev))
1597 1597 reachable.remove(latest)
1598 1598 newheads.difference_update(reachable)
1599 1599 if len(newheads) > oldheadcnt:
1600 1600 return fail_multiple_heads(inc)
1601 1601 if inc:
1602 1602 self.ui.warn(_("note: unsynced remote changes!\n"))
1603 1603
1604 1604 if revs is None:
1605 1605 # use the fast path, no race possible on push
1606 1606 nodes = self.changelog.findmissing(common.keys())
1607 1607 cg = self._changegroup(nodes, 'push')
1608 1608 else:
1609 1609 cg = self.changegroupsubset(update, revs, 'push')
1610 1610 return cg, remote_heads
1611 1611
1612 1612 def push_addchangegroup(self, remote, force, revs):
1613 1613 lock = remote.lock()
1614 1614 try:
1615 1615 ret = self.prepush(remote, force, revs)
1616 1616 if ret[0] is not None:
1617 1617 cg, remote_heads = ret
1618 1618 return remote.addchangegroup(cg, 'push', self.url())
1619 1619 return ret[1]
1620 1620 finally:
1621 1621 lock.release()
1622 1622
1623 1623 def push_unbundle(self, remote, force, revs):
1624 1624 # local repo finds heads on server, finds out what revs it
1625 1625 # must push. once revs transferred, if server finds it has
1626 1626 # different heads (someone else won commit/push race), server
1627 1627 # aborts.
1628 1628
1629 1629 ret = self.prepush(remote, force, revs)
1630 1630 if ret[0] is not None:
1631 1631 cg, remote_heads = ret
1632 1632 if force:
1633 1633 remote_heads = ['force']
1634 1634 return remote.unbundle(cg, remote_heads, 'push')
1635 1635 return ret[1]
1636 1636
1637 1637 def changegroupinfo(self, nodes, source):
1638 1638 if self.ui.verbose or source == 'bundle':
1639 1639 self.ui.status(_("%d changesets found\n") % len(nodes))
1640 1640 if self.ui.debugflag:
1641 1641 self.ui.debug("list of changesets:\n")
1642 1642 for node in nodes:
1643 1643 self.ui.debug("%s\n" % hex(node))
1644 1644
1645 1645 def changegroupsubset(self, bases, heads, source, extranodes=None):
1646 1646 """Compute a changegroup consisting of all the nodes that are
1647 1647 descendents of any of the bases and ancestors of any of the heads.
1648 1648 Return a chunkbuffer object whose read() method will return
1649 1649 successive changegroup chunks.
1650 1650
1651 1651 It is fairly complex as determining which filenodes and which
1652 1652 manifest nodes need to be included for the changeset to be complete
1653 1653 is non-trivial.
1654 1654
1655 1655 Another wrinkle is doing the reverse, figuring out which changeset in
1656 1656 the changegroup a particular filenode or manifestnode belongs to.
1657 1657
1658 1658 The caller can specify some nodes that must be included in the
1659 1659 changegroup using the extranodes argument. It should be a dict
1660 1660 where the keys are the filenames (or 1 for the manifest), and the
1661 1661 values are lists of (node, linknode) tuples, where node is a wanted
1662 1662 node and linknode is the changelog node that should be transmitted as
1663 1663 the linkrev.
1664 1664 """
1665 1665
1666 1666 # Set up some initial variables
1667 1667 # Make it easy to refer to self.changelog
1668 1668 cl = self.changelog
1669 1669 # msng is short for missing - compute the list of changesets in this
1670 1670 # changegroup.
1671 1671 if not bases:
1672 1672 bases = [nullid]
1673 1673 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1674 1674
1675 1675 if extranodes is None:
1676 1676 # can we go through the fast path ?
1677 1677 heads.sort()
1678 1678 allheads = self.heads()
1679 1679 allheads.sort()
1680 1680 if heads == allheads:
1681 1681 return self._changegroup(msng_cl_lst, source)
1682 1682
1683 1683 # slow path
1684 1684 self.hook('preoutgoing', throw=True, source=source)
1685 1685
1686 1686 self.changegroupinfo(msng_cl_lst, source)
1687 1687 # Some bases may turn out to be superfluous, and some heads may be
1688 1688 # too. nodesbetween will return the minimal set of bases and heads
1689 1689 # necessary to re-create the changegroup.
1690 1690
1691 1691 # Known heads are the list of heads that it is assumed the recipient
1692 1692 # of this changegroup will know about.
1693 1693 knownheads = set()
1694 1694 # We assume that all parents of bases are known heads.
1695 1695 for n in bases:
1696 1696 knownheads.update(cl.parents(n))
1697 1697 knownheads.discard(nullid)
1698 1698 knownheads = list(knownheads)
1699 1699 if knownheads:
1700 1700 # Now that we know what heads are known, we can compute which
1701 1701 # changesets are known. The recipient must know about all
1702 1702 # changesets required to reach the known heads from the null
1703 1703 # changeset.
1704 1704 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1705 1705 junk = None
1706 1706 # Transform the list into a set.
1707 1707 has_cl_set = set(has_cl_set)
1708 1708 else:
1709 1709 # If there were no known heads, the recipient cannot be assumed to
1710 1710 # know about any changesets.
1711 1711 has_cl_set = set()
1712 1712
1713 1713 # Make it easy to refer to self.manifest
1714 1714 mnfst = self.manifest
1715 1715 # We don't know which manifests are missing yet
1716 1716 msng_mnfst_set = {}
1717 1717 # Nor do we know which filenodes are missing.
1718 1718 msng_filenode_set = {}
1719 1719
1720 1720 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1721 1721 junk = None
1722 1722
1723 1723 # A changeset always belongs to itself, so the changenode lookup
1724 1724 # function for a changenode is identity.
1725 1725 def identity(x):
1726 1726 return x
1727 1727
1728 1728 # If we determine that a particular file or manifest node must be a
1729 1729 # node that the recipient of the changegroup will already have, we can
1730 1730 # also assume the recipient will have all the parents. This function
1731 1731 # prunes them from the set of missing nodes.
1732 1732 def prune_parents(revlog, hasset, msngset):
1733 1733 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1734 1734 msngset.pop(revlog.node(r), None)
1735 1735
1736 1736 # Use the information collected in collect_manifests_and_files to say
1737 1737 # which changenode any manifestnode belongs to.
1738 1738 def lookup_manifest_link(mnfstnode):
1739 1739 return msng_mnfst_set[mnfstnode]
1740 1740
1741 1741 # A function generating function that sets up the initial environment
1742 1742 # the inner function.
1743 1743 def filenode_collector(changedfiles):
1744 1744 # This gathers information from each manifestnode included in the
1745 1745 # changegroup about which filenodes the manifest node references
1746 1746 # so we can include those in the changegroup too.
1747 1747 #
1748 1748 # It also remembers which changenode each filenode belongs to. It
1749 1749 # does this by assuming the a filenode belongs to the changenode
1750 1750 # the first manifest that references it belongs to.
1751 1751 def collect_msng_filenodes(mnfstnode):
1752 1752 r = mnfst.rev(mnfstnode)
1753 1753 if r - 1 in mnfst.parentrevs(r):
1754 1754 # If the previous rev is one of the parents,
1755 1755 # we only need to see a diff.
1756 1756 deltamf = mnfst.readdelta(mnfstnode)
1757 1757 # For each line in the delta
1758 1758 for f, fnode in deltamf.iteritems():
1759 1759 f = changedfiles.get(f, None)
1760 1760 # And if the file is in the list of files we care
1761 1761 # about.
1762 1762 if f is not None:
1763 1763 # Get the changenode this manifest belongs to
1764 1764 clnode = msng_mnfst_set[mnfstnode]
1765 1765 # Create the set of filenodes for the file if
1766 1766 # there isn't one already.
1767 1767 ndset = msng_filenode_set.setdefault(f, {})
1768 1768 # And set the filenode's changelog node to the
1769 1769 # manifest's if it hasn't been set already.
1770 1770 ndset.setdefault(fnode, clnode)
1771 1771 else:
1772 1772 # Otherwise we need a full manifest.
1773 1773 m = mnfst.read(mnfstnode)
1774 1774 # For every file in we care about.
1775 1775 for f in changedfiles:
1776 1776 fnode = m.get(f, None)
1777 1777 # If it's in the manifest
1778 1778 if fnode is not None:
1779 1779 # See comments above.
1780 1780 clnode = msng_mnfst_set[mnfstnode]
1781 1781 ndset = msng_filenode_set.setdefault(f, {})
1782 1782 ndset.setdefault(fnode, clnode)
1783 1783 return collect_msng_filenodes
1784 1784
1785 1785 # We have a list of filenodes we think we need for a file, lets remove
1786 1786 # all those we know the recipient must have.
1787 1787 def prune_filenodes(f, filerevlog):
1788 1788 msngset = msng_filenode_set[f]
1789 1789 hasset = set()
1790 1790 # If a 'missing' filenode thinks it belongs to a changenode we
1791 1791 # assume the recipient must have, then the recipient must have
1792 1792 # that filenode.
1793 1793 for n in msngset:
1794 1794 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1795 1795 if clnode in has_cl_set:
1796 1796 hasset.add(n)
1797 1797 prune_parents(filerevlog, hasset, msngset)
1798 1798
1799 1799 # A function generator function that sets up the a context for the
1800 1800 # inner function.
1801 1801 def lookup_filenode_link_func(fname):
1802 1802 msngset = msng_filenode_set[fname]
1803 1803 # Lookup the changenode the filenode belongs to.
1804 1804 def lookup_filenode_link(fnode):
1805 1805 return msngset[fnode]
1806 1806 return lookup_filenode_link
1807 1807
1808 1808 # Add the nodes that were explicitly requested.
1809 1809 def add_extra_nodes(name, nodes):
1810 1810 if not extranodes or name not in extranodes:
1811 1811 return
1812 1812
1813 1813 for node, linknode in extranodes[name]:
1814 1814 if node not in nodes:
1815 1815 nodes[node] = linknode
1816 1816
1817 1817 # Now that we have all theses utility functions to help out and
1818 1818 # logically divide up the task, generate the group.
1819 1819 def gengroup():
1820 1820 # The set of changed files starts empty.
1821 1821 changedfiles = {}
1822 1822 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1823 1823
1824 1824 # Create a changenode group generator that will call our functions
1825 1825 # back to lookup the owning changenode and collect information.
1826 1826 group = cl.group(msng_cl_lst, identity, collect)
1827 1827 cnt = 0
1828 1828 for chnk in group:
1829 1829 yield chnk
1830 1830 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1831 1831 cnt += 1
1832 1832 self.ui.progress(_('bundling changes'), None, unit=_('chunks'))
1833 1833
1834 1834
1835 1835 # Figure out which manifest nodes (of the ones we think might be
1836 1836 # part of the changegroup) the recipient must know about and
1837 1837 # remove them from the changegroup.
1838 1838 has_mnfst_set = set()
1839 1839 for n in msng_mnfst_set:
1840 1840 # If a 'missing' manifest thinks it belongs to a changenode
1841 1841 # the recipient is assumed to have, obviously the recipient
1842 1842 # must have that manifest.
1843 1843 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1844 1844 if linknode in has_cl_set:
1845 1845 has_mnfst_set.add(n)
1846 1846 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1847 1847 add_extra_nodes(1, msng_mnfst_set)
1848 1848 msng_mnfst_lst = msng_mnfst_set.keys()
1849 1849 # Sort the manifestnodes by revision number.
1850 1850 msng_mnfst_lst.sort(key=mnfst.rev)
1851 1851 # Create a generator for the manifestnodes that calls our lookup
1852 1852 # and data collection functions back.
1853 1853 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1854 1854 filenode_collector(changedfiles))
1855 1855 cnt = 0
1856 1856 for chnk in group:
1857 1857 yield chnk
1858 1858 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1859 1859 cnt += 1
1860 1860 self.ui.progress(_('bundling manifests'), None, unit=_('chunks'))
1861 1861
1862 1862 # These are no longer needed, dereference and toss the memory for
1863 1863 # them.
1864 1864 msng_mnfst_lst = None
1865 1865 msng_mnfst_set.clear()
1866 1866
1867 1867 if extranodes:
1868 1868 for fname in extranodes:
1869 1869 if isinstance(fname, int):
1870 1870 continue
1871 1871 msng_filenode_set.setdefault(fname, {})
1872 1872 changedfiles[fname] = 1
1873 1873 # Go through all our files in order sorted by name.
1874 1874 cnt = 0
1875 1875 for fname in sorted(changedfiles):
1876 1876 filerevlog = self.file(fname)
1877 1877 if not len(filerevlog):
1878 1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1879 1879 # Toss out the filenodes that the recipient isn't really
1880 1880 # missing.
1881 1881 if fname in msng_filenode_set:
1882 1882 prune_filenodes(fname, filerevlog)
1883 1883 add_extra_nodes(fname, msng_filenode_set[fname])
1884 1884 msng_filenode_lst = msng_filenode_set[fname].keys()
1885 1885 else:
1886 1886 msng_filenode_lst = []
1887 1887 # If any filenodes are left, generate the group for them,
1888 1888 # otherwise don't bother.
1889 1889 if len(msng_filenode_lst) > 0:
1890 1890 yield changegroup.chunkheader(len(fname))
1891 1891 yield fname
1892 1892 # Sort the filenodes by their revision #
1893 1893 msng_filenode_lst.sort(key=filerevlog.rev)
1894 1894 # Create a group generator and only pass in a changenode
1895 1895 # lookup function as we need to collect no information
1896 1896 # from filenodes.
1897 1897 group = filerevlog.group(msng_filenode_lst,
1898 1898 lookup_filenode_link_func(fname))
1899 1899 for chnk in group:
1900 1900 self.ui.progress(
1901 1901 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1902 1902 cnt += 1
1903 1903 yield chnk
1904 1904 if fname in msng_filenode_set:
1905 1905 # Don't need this anymore, toss it to free memory.
1906 1906 del msng_filenode_set[fname]
1907 1907 # Signal that no more groups are left.
1908 1908 yield changegroup.closechunk()
1909 1909 self.ui.progress(_('bundling files'), None, unit=_('chunks'))
1910 1910
1911 1911 if msng_cl_lst:
1912 1912 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1913 1913
1914 1914 return util.chunkbuffer(gengroup())
1915 1915
1916 1916 def changegroup(self, basenodes, source):
1917 1917 # to avoid a race we use changegroupsubset() (issue1320)
1918 1918 return self.changegroupsubset(basenodes, self.heads(), source)
1919 1919
1920 1920 def _changegroup(self, nodes, source):
1921 1921 """Compute the changegroup of all nodes that we have that a recipient
1922 1922 doesn't. Return a chunkbuffer object whose read() method will return
1923 1923 successive changegroup chunks.
1924 1924
1925 1925 This is much easier than the previous function as we can assume that
1926 1926 the recipient has any changenode we aren't sending them.
1927 1927
1928 1928 nodes is the set of nodes to send"""
1929 1929
1930 1930 self.hook('preoutgoing', throw=True, source=source)
1931 1931
1932 1932 cl = self.changelog
1933 1933 revset = set([cl.rev(n) for n in nodes])
1934 1934 self.changegroupinfo(nodes, source)
1935 1935
1936 1936 def identity(x):
1937 1937 return x
1938 1938
1939 1939 def gennodelst(log):
1940 1940 for r in log:
1941 1941 if log.linkrev(r) in revset:
1942 1942 yield log.node(r)
1943 1943
1944 1944 def lookuprevlink_func(revlog):
1945 1945 def lookuprevlink(n):
1946 1946 return cl.node(revlog.linkrev(revlog.rev(n)))
1947 1947 return lookuprevlink
1948 1948
1949 1949 def gengroup():
1950 1950 '''yield a sequence of changegroup chunks (strings)'''
1951 1951 # construct a list of all changed files
1952 1952 changedfiles = {}
1953 1953 mmfs = {}
1954 1954 collect = changegroup.collector(cl, mmfs, changedfiles)
1955 1955
1956 1956 cnt = 0
1957 1957 for chnk in cl.group(nodes, identity, collect):
1958 1958 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1959 1959 cnt += 1
1960 1960 yield chnk
1961 1961 self.ui.progress(_('bundling changes'), None, unit=_('chunks'))
1962 1962
1963 1963 mnfst = self.manifest
1964 1964 nodeiter = gennodelst(mnfst)
1965 1965 cnt = 0
1966 1966 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1967 1967 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1968 1968 cnt += 1
1969 1969 yield chnk
1970 1970 self.ui.progress(_('bundling manifests'), None, unit=_('chunks'))
1971 1971
1972 1972 cnt = 0
1973 1973 for fname in sorted(changedfiles):
1974 1974 filerevlog = self.file(fname)
1975 1975 if not len(filerevlog):
1976 1976 raise util.Abort(_("empty or missing revlog for %s") % fname)
1977 1977 nodeiter = gennodelst(filerevlog)
1978 1978 nodeiter = list(nodeiter)
1979 1979 if nodeiter:
1980 1980 yield changegroup.chunkheader(len(fname))
1981 1981 yield fname
1982 1982 lookup = lookuprevlink_func(filerevlog)
1983 1983 for chnk in filerevlog.group(nodeiter, lookup):
1984 1984 self.ui.progress(
1985 1985 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1986 1986 cnt += 1
1987 1987 yield chnk
1988 1988 self.ui.progress(_('bundling files'), None, unit=_('chunks'))
1989 1989
1990 1990 yield changegroup.closechunk()
1991 1991
1992 1992 if nodes:
1993 1993 self.hook('outgoing', node=hex(nodes[0]), source=source)
1994 1994
1995 1995 return util.chunkbuffer(gengroup())
1996 1996
1997 1997 def addchangegroup(self, source, srctype, url, emptyok=False):
1998 1998 """add changegroup to repo.
1999 1999
2000 2000 return values:
2001 2001 - nothing changed or no source: 0
2002 2002 - more heads than before: 1+added heads (2..n)
2003 2003 - less heads than before: -1-removed heads (-2..-n)
2004 2004 - number of heads stays the same: 1
2005 2005 """
2006 2006 def csmap(x):
2007 2007 self.ui.debug("add changeset %s\n" % short(x))
2008 2008 return len(cl)
2009 2009
2010 2010 def revmap(x):
2011 2011 return cl.rev(x)
2012 2012
2013 2013 if not source:
2014 2014 return 0
2015 2015
2016 2016 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2017 2017
2018 2018 changesets = files = revisions = 0
2019 2019
2020 2020 # write changelog data to temp files so concurrent readers will not see
2021 2021 # inconsistent view
2022 2022 cl = self.changelog
2023 2023 cl.delayupdate()
2024 2024 oldheads = len(cl.heads())
2025 2025
2026 2026 tr = self.transaction()
2027 2027 try:
2028 2028 trp = weakref.proxy(tr)
2029 2029 # pull off the changeset group
2030 2030 self.ui.status(_("adding changesets\n"))
2031 2031 clstart = len(cl)
2032 2032 class prog(object):
2033 2033 step = _('changesets')
2034 2034 count = 1
2035 2035 ui = self.ui
2036 2036 def __call__(self):
2037 2037 self.ui.progress(self.step, self.count, unit=_('chunks'))
2038 2038 self.count += 1
2039 2039 pr = prog()
2040 2040 chunkiter = changegroup.chunkiter(source, progress=pr)
2041 2041 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2042 2042 raise util.Abort(_("received changelog group is empty"))
2043 2043 clend = len(cl)
2044 2044 changesets = clend - clstart
2045 2045 self.ui.progress(_('changesets'), None)
2046 2046
2047 2047 # pull off the manifest group
2048 2048 self.ui.status(_("adding manifests\n"))
2049 2049 pr.step = _('manifests')
2050 2050 pr.count = 1
2051 2051 chunkiter = changegroup.chunkiter(source, progress=pr)
2052 2052 # no need to check for empty manifest group here:
2053 2053 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2054 2054 # no new manifest will be created and the manifest group will
2055 2055 # be empty during the pull
2056 2056 self.manifest.addgroup(chunkiter, revmap, trp)
2057 2057 self.ui.progress(_('manifests'), None)
2058 2058
2059 2059 needfiles = {}
2060 2060 if self.ui.configbool('server', 'validate', default=False):
2061 2061 # validate incoming csets have their manifests
2062 2062 for cset in xrange(clstart, clend):
2063 2063 mfest = self.changelog.read(self.changelog.node(cset))[0]
2064 2064 mfest = self.manifest.readdelta(mfest)
2065 2065 # store file nodes we must see
2066 2066 for f, n in mfest.iteritems():
2067 2067 needfiles.setdefault(f, set()).add(n)
2068 2068
2069 2069 # process the files
2070 2070 self.ui.status(_("adding file changes\n"))
2071 2071 pr.step = 'files'
2072 2072 pr.count = 1
2073 2073 while 1:
2074 2074 f = changegroup.getchunk(source)
2075 2075 if not f:
2076 2076 break
2077 2077 self.ui.debug("adding %s revisions\n" % f)
2078 2078 fl = self.file(f)
2079 2079 o = len(fl)
2080 2080 chunkiter = changegroup.chunkiter(source, progress=pr)
2081 2081 if fl.addgroup(chunkiter, revmap, trp) is None:
2082 2082 raise util.Abort(_("received file revlog group is empty"))
2083 2083 revisions += len(fl) - o
2084 2084 files += 1
2085 2085 if f in needfiles:
2086 2086 needs = needfiles[f]
2087 2087 for new in xrange(o, len(fl)):
2088 2088 n = fl.node(new)
2089 2089 if n in needs:
2090 2090 needs.remove(n)
2091 2091 if not needs:
2092 2092 del needfiles[f]
2093 2093 self.ui.progress(_('files'), None)
2094 2094
2095 2095 for f, needs in needfiles.iteritems():
2096 2096 fl = self.file(f)
2097 2097 for n in needs:
2098 2098 try:
2099 2099 fl.rev(n)
2100 2100 except error.LookupError:
2101 2101 raise util.Abort(
2102 2102 _('missing file data for %s:%s - run hg verify') %
2103 2103 (f, hex(n)))
2104 2104
2105 2105 newheads = len(cl.heads())
2106 2106 heads = ""
2107 2107 if oldheads and newheads != oldheads:
2108 2108 heads = _(" (%+d heads)") % (newheads - oldheads)
2109 2109
2110 2110 self.ui.status(_("added %d changesets"
2111 2111 " with %d changes to %d files%s\n")
2112 2112 % (changesets, revisions, files, heads))
2113 2113
2114 2114 if changesets > 0:
2115 2115 p = lambda: cl.writepending() and self.root or ""
2116 2116 self.hook('pretxnchangegroup', throw=True,
2117 2117 node=hex(cl.node(clstart)), source=srctype,
2118 2118 url=url, pending=p)
2119 2119
2120 2120 # make changelog see real files again
2121 2121 cl.finalize(trp)
2122 2122
2123 2123 tr.close()
2124 2124 finally:
2125 2125 del tr
2126 2126
2127 2127 if changesets > 0:
2128 2128 # forcefully update the on-disk branch cache
2129 2129 self.ui.debug("updating the branch cache\n")
2130 2130 self.branchtags()
2131 2131 self.hook("changegroup", node=hex(cl.node(clstart)),
2132 2132 source=srctype, url=url)
2133 2133
2134 2134 for i in xrange(clstart, clend):
2135 2135 self.hook("incoming", node=hex(cl.node(i)),
2136 2136 source=srctype, url=url)
2137 2137
2138 2138 # never return 0 here:
2139 2139 if newheads < oldheads:
2140 2140 return newheads - oldheads - 1
2141 2141 else:
2142 2142 return newheads - oldheads + 1
2143 2143
2144 2144
2145 2145 def stream_in(self, remote):
2146 2146 fp = remote.stream_out()
2147 2147 l = fp.readline()
2148 2148 try:
2149 2149 resp = int(l)
2150 2150 except ValueError:
2151 2151 raise error.ResponseError(
2152 2152 _('Unexpected response from remote server:'), l)
2153 2153 if resp == 1:
2154 2154 raise util.Abort(_('operation forbidden by server'))
2155 2155 elif resp == 2:
2156 2156 raise util.Abort(_('locking the remote repository failed'))
2157 2157 elif resp != 0:
2158 2158 raise util.Abort(_('the server sent an unknown error code'))
2159 2159 self.ui.status(_('streaming all changes\n'))
2160 2160 l = fp.readline()
2161 2161 try:
2162 2162 total_files, total_bytes = map(int, l.split(' ', 1))
2163 2163 except (ValueError, TypeError):
2164 2164 raise error.ResponseError(
2165 2165 _('Unexpected response from remote server:'), l)
2166 2166 self.ui.status(_('%d files to transfer, %s of data\n') %
2167 2167 (total_files, util.bytecount(total_bytes)))
2168 2168 start = time.time()
2169 2169 for i in xrange(total_files):
2170 2170 # XXX doesn't support '\n' or '\r' in filenames
2171 2171 l = fp.readline()
2172 2172 try:
2173 2173 name, size = l.split('\0', 1)
2174 2174 size = int(size)
2175 2175 except (ValueError, TypeError):
2176 2176 raise error.ResponseError(
2177 2177 _('Unexpected response from remote server:'), l)
2178 2178 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2179 2179 # for backwards compat, name was partially encoded
2180 2180 ofp = self.sopener(store.decodedir(name), 'w')
2181 2181 for chunk in util.filechunkiter(fp, limit=size):
2182 2182 ofp.write(chunk)
2183 2183 ofp.close()
2184 2184 elapsed = time.time() - start
2185 2185 if elapsed <= 0:
2186 2186 elapsed = 0.001
2187 2187 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2188 2188 (util.bytecount(total_bytes), elapsed,
2189 2189 util.bytecount(total_bytes / elapsed)))
2190 2190 self.invalidate()
2191 2191 return len(self.heads()) + 1
2192 2192
2193 2193 def clone(self, remote, heads=[], stream=False):
2194 2194 '''clone remote repository.
2195 2195
2196 2196 keyword arguments:
2197 2197 heads: list of revs to clone (forces use of pull)
2198 2198 stream: use streaming clone if possible'''
2199 2199
2200 2200 # now, all clients that can request uncompressed clones can
2201 2201 # read repo formats supported by all servers that can serve
2202 2202 # them.
2203 2203
2204 2204 # if revlog format changes, client will have to check version
2205 2205 # and format flags on "stream" capability, and use
2206 2206 # uncompressed only if compatible.
2207 2207
2208 2208 if stream and not heads and remote.capable('stream'):
2209 2209 return self.stream_in(remote)
2210 2210 return self.pull(remote, heads)
2211 2211
2212 2212 # used to avoid circular references so destructors work
2213 2213 def aftertrans(files):
2214 2214 renamefiles = [tuple(t) for t in files]
2215 2215 def a():
2216 2216 for src, dest in renamefiles:
2217 2217 util.rename(src, dest)
2218 2218 return a
2219 2219
2220 2220 def instance(ui, path, create):
2221 2221 return localrepository(ui, util.drop_scheme('file', path), create)
2222 2222
2223 2223 def islocal(path):
2224 2224 return True
@@ -1,26 +1,30 b''
1 1 #!/bin/sh
2 2
3 3 cat <<EOF >> $HGRCPATH
4 4 [extensions]
5 5 schemes=
6 6
7 7 [schemes]
8 8 l = http://localhost:$HGPORT/
9 9 parts = http://{1}:$HGPORT/
10 z = file:\$PWD/
10 11 EOF
11 12
12 13 hg init test
13 14 cd test
14 15 echo a > a
15 16 hg ci -Am initial
16 17
17 18 hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
18 19 cat hg.pid >> $DAEMON_PIDS
19 20
20 21 hg incoming l://
21 22
22 23 echo % check that {1} syntax works
23 24 hg incoming --debug parts://localhost | sed 's/[0-9]//g'
24 25
26 echo % check that paths are expanded
27 PWD=`pwd` hg incoming z://
28
25 29 echo % errors
26 30 cat errors.log
@@ -1,12 +1,16 b''
1 1 adding a
2 2 comparing with l://
3 3 searching for changes
4 4 no changes found
5 5 % check that {1} syntax works
6 6 using http://localhost:/
7 7 sending between command
8 8 comparing with parts://localhost
9 9 sending heads command
10 10 searching for changes
11 11 no changes found
12 % check that paths are expanded
13 comparing with z://
14 searching for changes
15 no changes found
12 16 % errors
General Comments 0
You need to be logged in to leave comments. Login now