##// END OF EJS Templates
clfilter: ensure that filecache on localrepo is unfiltered...
Pierre-Yves David -
r18014:a39fe76c default
parent child Browse files
Show More
@@ -1,389 +1,389 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from node import nullid
14 from node import nullid
15 from i18n import _
15 from i18n import _
16 import os, tempfile, shutil
16 import os, tempfile, shutil
17 import changegroup, util, mdiff, discovery, cmdutil
17 import changegroup, util, mdiff, discovery, cmdutil
18 import localrepo, changelog, manifest, filelog, revlog, error
18 import localrepo, changelog, manifest, filelog, revlog, error
19
19
20 class bundlerevlog(revlog.revlog):
20 class bundlerevlog(revlog.revlog):
21 def __init__(self, opener, indexfile, bundle, linkmapper):
21 def __init__(self, opener, indexfile, bundle, linkmapper):
22 # How it works:
22 # How it works:
23 # to retrieve a revision, we need to know the offset of
23 # to retrieve a revision, we need to know the offset of
24 # the revision in the bundle (an unbundle object).
24 # the revision in the bundle (an unbundle object).
25 #
25 #
26 # We store this offset in the index (start), to differentiate a
26 # We store this offset in the index (start), to differentiate a
27 # rev in the bundle and from a rev in the revlog, we check
27 # rev in the bundle and from a rev in the revlog, we check
28 # len(index[r]). If the tuple is bigger than 7, it is a bundle
28 # len(index[r]). If the tuple is bigger than 7, it is a bundle
29 # (it is bigger since we store the node to which the delta is)
29 # (it is bigger since we store the node to which the delta is)
30 #
30 #
31 revlog.revlog.__init__(self, opener, indexfile)
31 revlog.revlog.__init__(self, opener, indexfile)
32 self.bundle = bundle
32 self.bundle = bundle
33 self.basemap = {}
33 self.basemap = {}
34 n = len(self)
34 n = len(self)
35 chain = None
35 chain = None
36 self.bundlenodes = []
36 self.bundlenodes = []
37 while True:
37 while True:
38 chunkdata = bundle.deltachunk(chain)
38 chunkdata = bundle.deltachunk(chain)
39 if not chunkdata:
39 if not chunkdata:
40 break
40 break
41 node = chunkdata['node']
41 node = chunkdata['node']
42 p1 = chunkdata['p1']
42 p1 = chunkdata['p1']
43 p2 = chunkdata['p2']
43 p2 = chunkdata['p2']
44 cs = chunkdata['cs']
44 cs = chunkdata['cs']
45 deltabase = chunkdata['deltabase']
45 deltabase = chunkdata['deltabase']
46 delta = chunkdata['delta']
46 delta = chunkdata['delta']
47
47
48 size = len(delta)
48 size = len(delta)
49 start = bundle.tell() - size
49 start = bundle.tell() - size
50
50
51 link = linkmapper(cs)
51 link = linkmapper(cs)
52 self.bundlenodes.append(node)
52 self.bundlenodes.append(node)
53 if node in self.nodemap:
53 if node in self.nodemap:
54 # this can happen if two branches make the same change
54 # this can happen if two branches make the same change
55 chain = node
55 chain = node
56 continue
56 continue
57
57
58 for p in (p1, p2):
58 for p in (p1, p2):
59 if p not in self.nodemap:
59 if p not in self.nodemap:
60 raise error.LookupError(p, self.indexfile,
60 raise error.LookupError(p, self.indexfile,
61 _("unknown parent"))
61 _("unknown parent"))
62 # start, size, full unc. size, base (unused), link, p1, p2, node
62 # start, size, full unc. size, base (unused), link, p1, p2, node
63 e = (revlog.offset_type(start, 0), size, -1, -1, link,
63 e = (revlog.offset_type(start, 0), size, -1, -1, link,
64 self.rev(p1), self.rev(p2), node)
64 self.rev(p1), self.rev(p2), node)
65 self.basemap[n] = deltabase
65 self.basemap[n] = deltabase
66 self.index.insert(-1, e)
66 self.index.insert(-1, e)
67 self.nodemap[node] = n
67 self.nodemap[node] = n
68 chain = node
68 chain = node
69 n += 1
69 n += 1
70
70
71 def inbundle(self, rev):
71 def inbundle(self, rev):
72 """is rev from the bundle"""
72 """is rev from the bundle"""
73 if rev < 0:
73 if rev < 0:
74 return False
74 return False
75 return rev in self.basemap
75 return rev in self.basemap
76 def bundlebase(self, rev):
76 def bundlebase(self, rev):
77 return self.basemap[rev]
77 return self.basemap[rev]
78 def _chunk(self, rev):
78 def _chunk(self, rev):
79 # Warning: in case of bundle, the diff is against bundlebase,
79 # Warning: in case of bundle, the diff is against bundlebase,
80 # not against rev - 1
80 # not against rev - 1
81 # XXX: could use some caching
81 # XXX: could use some caching
82 if not self.inbundle(rev):
82 if not self.inbundle(rev):
83 return revlog.revlog._chunk(self, rev)
83 return revlog.revlog._chunk(self, rev)
84 self.bundle.seek(self.start(rev))
84 self.bundle.seek(self.start(rev))
85 return self.bundle.read(self.length(rev))
85 return self.bundle.read(self.length(rev))
86
86
87 def revdiff(self, rev1, rev2):
87 def revdiff(self, rev1, rev2):
88 """return or calculate a delta between two revisions"""
88 """return or calculate a delta between two revisions"""
89 if self.inbundle(rev1) and self.inbundle(rev2):
89 if self.inbundle(rev1) and self.inbundle(rev2):
90 # hot path for bundle
90 # hot path for bundle
91 revb = self.rev(self.bundlebase(rev2))
91 revb = self.rev(self.bundlebase(rev2))
92 if revb == rev1:
92 if revb == rev1:
93 return self._chunk(rev2)
93 return self._chunk(rev2)
94 elif not self.inbundle(rev1) and not self.inbundle(rev2):
94 elif not self.inbundle(rev1) and not self.inbundle(rev2):
95 return revlog.revlog.revdiff(self, rev1, rev2)
95 return revlog.revlog.revdiff(self, rev1, rev2)
96
96
97 return mdiff.textdiff(self.revision(self.node(rev1)),
97 return mdiff.textdiff(self.revision(self.node(rev1)),
98 self.revision(self.node(rev2)))
98 self.revision(self.node(rev2)))
99
99
100 def revision(self, nodeorrev):
100 def revision(self, nodeorrev):
101 """return an uncompressed revision of a given node or revision
101 """return an uncompressed revision of a given node or revision
102 number.
102 number.
103 """
103 """
104 if isinstance(nodeorrev, int):
104 if isinstance(nodeorrev, int):
105 rev = nodeorrev
105 rev = nodeorrev
106 node = self.node(rev)
106 node = self.node(rev)
107 else:
107 else:
108 node = nodeorrev
108 node = nodeorrev
109 rev = self.rev(node)
109 rev = self.rev(node)
110
110
111 if node == nullid:
111 if node == nullid:
112 return ""
112 return ""
113
113
114 text = None
114 text = None
115 chain = []
115 chain = []
116 iter_node = node
116 iter_node = node
117 # reconstruct the revision if it is from a changegroup
117 # reconstruct the revision if it is from a changegroup
118 while self.inbundle(rev):
118 while self.inbundle(rev):
119 if self._cache and self._cache[0] == iter_node:
119 if self._cache and self._cache[0] == iter_node:
120 text = self._cache[2]
120 text = self._cache[2]
121 break
121 break
122 chain.append(rev)
122 chain.append(rev)
123 iter_node = self.bundlebase(rev)
123 iter_node = self.bundlebase(rev)
124 rev = self.rev(iter_node)
124 rev = self.rev(iter_node)
125 if text is None:
125 if text is None:
126 text = revlog.revlog.revision(self, iter_node)
126 text = revlog.revlog.revision(self, iter_node)
127
127
128 while chain:
128 while chain:
129 delta = self._chunk(chain.pop())
129 delta = self._chunk(chain.pop())
130 text = mdiff.patches(text, [delta])
130 text = mdiff.patches(text, [delta])
131
131
132 p1, p2 = self.parents(node)
132 p1, p2 = self.parents(node)
133 if node != revlog.hash(text, p1, p2):
133 if node != revlog.hash(text, p1, p2):
134 raise error.RevlogError(_("integrity check failed on %s:%d")
134 raise error.RevlogError(_("integrity check failed on %s:%d")
135 % (self.datafile, self.rev(node)))
135 % (self.datafile, self.rev(node)))
136
136
137 self._cache = (node, self.rev(node), text)
137 self._cache = (node, self.rev(node), text)
138 return text
138 return text
139
139
140 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
140 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
141 raise NotImplementedError
141 raise NotImplementedError
142 def addgroup(self, revs, linkmapper, transaction):
142 def addgroup(self, revs, linkmapper, transaction):
143 raise NotImplementedError
143 raise NotImplementedError
144 def strip(self, rev, minlink):
144 def strip(self, rev, minlink):
145 raise NotImplementedError
145 raise NotImplementedError
146 def checksize(self):
146 def checksize(self):
147 raise NotImplementedError
147 raise NotImplementedError
148
148
149 class bundlechangelog(bundlerevlog, changelog.changelog):
149 class bundlechangelog(bundlerevlog, changelog.changelog):
150 def __init__(self, opener, bundle):
150 def __init__(self, opener, bundle):
151 changelog.changelog.__init__(self, opener)
151 changelog.changelog.__init__(self, opener)
152 linkmapper = lambda x: x
152 linkmapper = lambda x: x
153 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
153 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
154 linkmapper)
154 linkmapper)
155
155
156 class bundlemanifest(bundlerevlog, manifest.manifest):
156 class bundlemanifest(bundlerevlog, manifest.manifest):
157 def __init__(self, opener, bundle, linkmapper):
157 def __init__(self, opener, bundle, linkmapper):
158 manifest.manifest.__init__(self, opener)
158 manifest.manifest.__init__(self, opener)
159 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
159 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
160 linkmapper)
160 linkmapper)
161
161
162 class bundlefilelog(bundlerevlog, filelog.filelog):
162 class bundlefilelog(bundlerevlog, filelog.filelog):
163 def __init__(self, opener, path, bundle, linkmapper, repo):
163 def __init__(self, opener, path, bundle, linkmapper, repo):
164 filelog.filelog.__init__(self, opener, path)
164 filelog.filelog.__init__(self, opener, path)
165 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
165 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
166 linkmapper)
166 linkmapper)
167 self._repo = repo
167 self._repo = repo
168
168
169 def _file(self, f):
169 def _file(self, f):
170 self._repo.file(f)
170 self._repo.file(f)
171
171
172 class bundlepeer(localrepo.localpeer):
172 class bundlepeer(localrepo.localpeer):
173 def canpush(self):
173 def canpush(self):
174 return False
174 return False
175
175
176 class bundlerepository(localrepo.localrepository):
176 class bundlerepository(localrepo.localrepository):
177 def __init__(self, ui, path, bundlename):
177 def __init__(self, ui, path, bundlename):
178 self._tempparent = None
178 self._tempparent = None
179 try:
179 try:
180 localrepo.localrepository.__init__(self, ui, path)
180 localrepo.localrepository.__init__(self, ui, path)
181 except error.RepoError:
181 except error.RepoError:
182 self._tempparent = tempfile.mkdtemp()
182 self._tempparent = tempfile.mkdtemp()
183 localrepo.instance(ui, self._tempparent, 1)
183 localrepo.instance(ui, self._tempparent, 1)
184 localrepo.localrepository.__init__(self, ui, self._tempparent)
184 localrepo.localrepository.__init__(self, ui, self._tempparent)
185 self.ui.setconfig('phases', 'publish', False)
185 self.ui.setconfig('phases', 'publish', False)
186
186
187 if path:
187 if path:
188 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
188 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
189 else:
189 else:
190 self._url = 'bundle:' + bundlename
190 self._url = 'bundle:' + bundlename
191
191
192 self.tempfile = None
192 self.tempfile = None
193 f = util.posixfile(bundlename, "rb")
193 f = util.posixfile(bundlename, "rb")
194 self.bundle = changegroup.readbundle(f, bundlename)
194 self.bundle = changegroup.readbundle(f, bundlename)
195 if self.bundle.compressed():
195 if self.bundle.compressed():
196 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
196 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
197 suffix=".hg10un", dir=self.path)
197 suffix=".hg10un", dir=self.path)
198 self.tempfile = temp
198 self.tempfile = temp
199 fptemp = os.fdopen(fdtemp, 'wb')
199 fptemp = os.fdopen(fdtemp, 'wb')
200
200
201 try:
201 try:
202 fptemp.write("HG10UN")
202 fptemp.write("HG10UN")
203 while True:
203 while True:
204 chunk = self.bundle.read(2**18)
204 chunk = self.bundle.read(2**18)
205 if not chunk:
205 if not chunk:
206 break
206 break
207 fptemp.write(chunk)
207 fptemp.write(chunk)
208 finally:
208 finally:
209 fptemp.close()
209 fptemp.close()
210
210
211 f = util.posixfile(self.tempfile, "rb")
211 f = util.posixfile(self.tempfile, "rb")
212 self.bundle = changegroup.readbundle(f, bundlename)
212 self.bundle = changegroup.readbundle(f, bundlename)
213
213
214 # dict with the mapping 'filename' -> position in the bundle
214 # dict with the mapping 'filename' -> position in the bundle
215 self.bundlefilespos = {}
215 self.bundlefilespos = {}
216
216
217 @util.propertycache
217 @localrepo.unfilteredpropertycache
218 def changelog(self):
218 def changelog(self):
219 # consume the header if it exists
219 # consume the header if it exists
220 self.bundle.changelogheader()
220 self.bundle.changelogheader()
221 c = bundlechangelog(self.sopener, self.bundle)
221 c = bundlechangelog(self.sopener, self.bundle)
222 self.manstart = self.bundle.tell()
222 self.manstart = self.bundle.tell()
223 return c
223 return c
224
224
225 @util.propertycache
225 @localrepo.unfilteredpropertycache
226 def manifest(self):
226 def manifest(self):
227 self.bundle.seek(self.manstart)
227 self.bundle.seek(self.manstart)
228 # consume the header if it exists
228 # consume the header if it exists
229 self.bundle.manifestheader()
229 self.bundle.manifestheader()
230 m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev)
230 m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev)
231 self.filestart = self.bundle.tell()
231 self.filestart = self.bundle.tell()
232 return m
232 return m
233
233
234 @util.propertycache
234 @localrepo.unfilteredpropertycache
235 def manstart(self):
235 def manstart(self):
236 self.changelog
236 self.changelog
237 return self.manstart
237 return self.manstart
238
238
239 @util.propertycache
239 @localrepo.unfilteredpropertycache
240 def filestart(self):
240 def filestart(self):
241 self.manifest
241 self.manifest
242 return self.filestart
242 return self.filestart
243
243
244 def url(self):
244 def url(self):
245 return self._url
245 return self._url
246
246
247 def file(self, f):
247 def file(self, f):
248 if not self.bundlefilespos:
248 if not self.bundlefilespos:
249 self.bundle.seek(self.filestart)
249 self.bundle.seek(self.filestart)
250 while True:
250 while True:
251 chunkdata = self.bundle.filelogheader()
251 chunkdata = self.bundle.filelogheader()
252 if not chunkdata:
252 if not chunkdata:
253 break
253 break
254 fname = chunkdata['filename']
254 fname = chunkdata['filename']
255 self.bundlefilespos[fname] = self.bundle.tell()
255 self.bundlefilespos[fname] = self.bundle.tell()
256 while True:
256 while True:
257 c = self.bundle.deltachunk(None)
257 c = self.bundle.deltachunk(None)
258 if not c:
258 if not c:
259 break
259 break
260
260
261 if f[0] == '/':
261 if f[0] == '/':
262 f = f[1:]
262 f = f[1:]
263 if f in self.bundlefilespos:
263 if f in self.bundlefilespos:
264 self.bundle.seek(self.bundlefilespos[f])
264 self.bundle.seek(self.bundlefilespos[f])
265 return bundlefilelog(self.sopener, f, self.bundle,
265 return bundlefilelog(self.sopener, f, self.bundle,
266 self.changelog.rev, self)
266 self.changelog.rev, self)
267 else:
267 else:
268 return filelog.filelog(self.sopener, f)
268 return filelog.filelog(self.sopener, f)
269
269
270 def close(self):
270 def close(self):
271 """Close assigned bundle file immediately."""
271 """Close assigned bundle file immediately."""
272 self.bundle.close()
272 self.bundle.close()
273 if self.tempfile is not None:
273 if self.tempfile is not None:
274 os.unlink(self.tempfile)
274 os.unlink(self.tempfile)
275 if self._tempparent:
275 if self._tempparent:
276 shutil.rmtree(self._tempparent, True)
276 shutil.rmtree(self._tempparent, True)
277
277
278 def cancopy(self):
278 def cancopy(self):
279 return False
279 return False
280
280
281 def peer(self):
281 def peer(self):
282 return bundlepeer(self)
282 return bundlepeer(self)
283
283
284 def getcwd(self):
284 def getcwd(self):
285 return os.getcwd() # always outside the repo
285 return os.getcwd() # always outside the repo
286
286
287 def _writebranchcache(self, branches, tip, tiprev):
287 def _writebranchcache(self, branches, tip, tiprev):
288 # don't overwrite the disk cache with bundle-augmented data
288 # don't overwrite the disk cache with bundle-augmented data
289 pass
289 pass
290
290
291 def instance(ui, path, create):
291 def instance(ui, path, create):
292 if create:
292 if create:
293 raise util.Abort(_('cannot create new bundle repository'))
293 raise util.Abort(_('cannot create new bundle repository'))
294 parentpath = ui.config("bundle", "mainreporoot", "")
294 parentpath = ui.config("bundle", "mainreporoot", "")
295 if not parentpath:
295 if not parentpath:
296 # try to find the correct path to the working directory repo
296 # try to find the correct path to the working directory repo
297 parentpath = cmdutil.findrepo(os.getcwd())
297 parentpath = cmdutil.findrepo(os.getcwd())
298 if parentpath is None:
298 if parentpath is None:
299 parentpath = ''
299 parentpath = ''
300 if parentpath:
300 if parentpath:
301 # Try to make the full path relative so we get a nice, short URL.
301 # Try to make the full path relative so we get a nice, short URL.
302 # In particular, we don't want temp dir names in test outputs.
302 # In particular, we don't want temp dir names in test outputs.
303 cwd = os.getcwd()
303 cwd = os.getcwd()
304 if parentpath == cwd:
304 if parentpath == cwd:
305 parentpath = ''
305 parentpath = ''
306 else:
306 else:
307 cwd = os.path.join(cwd,'')
307 cwd = os.path.join(cwd,'')
308 if parentpath.startswith(cwd):
308 if parentpath.startswith(cwd):
309 parentpath = parentpath[len(cwd):]
309 parentpath = parentpath[len(cwd):]
310 u = util.url(path)
310 u = util.url(path)
311 path = u.localpath()
311 path = u.localpath()
312 if u.scheme == 'bundle':
312 if u.scheme == 'bundle':
313 s = path.split("+", 1)
313 s = path.split("+", 1)
314 if len(s) == 1:
314 if len(s) == 1:
315 repopath, bundlename = parentpath, s[0]
315 repopath, bundlename = parentpath, s[0]
316 else:
316 else:
317 repopath, bundlename = s
317 repopath, bundlename = s
318 else:
318 else:
319 repopath, bundlename = parentpath, path
319 repopath, bundlename = parentpath, path
320 return bundlerepository(ui, repopath, bundlename)
320 return bundlerepository(ui, repopath, bundlename)
321
321
322 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
322 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
323 force=False):
323 force=False):
324 '''obtains a bundle of changes incoming from other
324 '''obtains a bundle of changes incoming from other
325
325
326 "onlyheads" restricts the returned changes to those reachable from the
326 "onlyheads" restricts the returned changes to those reachable from the
327 specified heads.
327 specified heads.
328 "bundlename", if given, stores the bundle to this file path permanently;
328 "bundlename", if given, stores the bundle to this file path permanently;
329 otherwise it's stored to a temp file and gets deleted again when you call
329 otherwise it's stored to a temp file and gets deleted again when you call
330 the returned "cleanupfn".
330 the returned "cleanupfn".
331 "force" indicates whether to proceed on unrelated repos.
331 "force" indicates whether to proceed on unrelated repos.
332
332
333 Returns a tuple (local, csets, cleanupfn):
333 Returns a tuple (local, csets, cleanupfn):
334
334
335 "local" is a local repo from which to obtain the actual incoming
335 "local" is a local repo from which to obtain the actual incoming
336 changesets; it is a bundlerepo for the obtained bundle when the
336 changesets; it is a bundlerepo for the obtained bundle when the
337 original "other" is remote.
337 original "other" is remote.
338 "csets" lists the incoming changeset node ids.
338 "csets" lists the incoming changeset node ids.
339 "cleanupfn" must be called without arguments when you're done processing
339 "cleanupfn" must be called without arguments when you're done processing
340 the changes; it closes both the original "other" and the one returned
340 the changes; it closes both the original "other" and the one returned
341 here.
341 here.
342 '''
342 '''
343 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
343 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
344 force=force)
344 force=force)
345 common, incoming, rheads = tmp
345 common, incoming, rheads = tmp
346 if not incoming:
346 if not incoming:
347 try:
347 try:
348 if bundlename:
348 if bundlename:
349 os.unlink(bundlename)
349 os.unlink(bundlename)
350 except OSError:
350 except OSError:
351 pass
351 pass
352 return other, [], other.close
352 return other, [], other.close
353
353
354 bundle = None
354 bundle = None
355 bundlerepo = None
355 bundlerepo = None
356 localrepo = other.local()
356 localrepo = other.local()
357 if bundlename or not localrepo:
357 if bundlename or not localrepo:
358 # create a bundle (uncompressed if other repo is not local)
358 # create a bundle (uncompressed if other repo is not local)
359
359
360 if other.capable('getbundle'):
360 if other.capable('getbundle'):
361 cg = other.getbundle('incoming', common=common, heads=rheads)
361 cg = other.getbundle('incoming', common=common, heads=rheads)
362 elif onlyheads is None and not other.capable('changegroupsubset'):
362 elif onlyheads is None and not other.capable('changegroupsubset'):
363 # compat with older servers when pulling all remote heads
363 # compat with older servers when pulling all remote heads
364 cg = other.changegroup(incoming, "incoming")
364 cg = other.changegroup(incoming, "incoming")
365 rheads = None
365 rheads = None
366 else:
366 else:
367 cg = other.changegroupsubset(incoming, rheads, 'incoming')
367 cg = other.changegroupsubset(incoming, rheads, 'incoming')
368 bundletype = localrepo and "HG10BZ" or "HG10UN"
368 bundletype = localrepo and "HG10BZ" or "HG10UN"
369 fname = bundle = changegroup.writebundle(cg, bundlename, bundletype)
369 fname = bundle = changegroup.writebundle(cg, bundlename, bundletype)
370 # keep written bundle?
370 # keep written bundle?
371 if bundlename:
371 if bundlename:
372 bundle = None
372 bundle = None
373 if not localrepo:
373 if not localrepo:
374 # use the created uncompressed bundlerepo
374 # use the created uncompressed bundlerepo
375 localrepo = bundlerepo = bundlerepository(ui, repo.root, fname)
375 localrepo = bundlerepo = bundlerepository(ui, repo.root, fname)
376 # this repo contains local and other now, so filter out local again
376 # this repo contains local and other now, so filter out local again
377 common = repo.heads()
377 common = repo.heads()
378
378
379 csets = localrepo.changelog.findmissing(common, rheads)
379 csets = localrepo.changelog.findmissing(common, rheads)
380
380
381 def cleanup():
381 def cleanup():
382 if bundlerepo:
382 if bundlerepo:
383 bundlerepo.close()
383 bundlerepo.close()
384 if bundle:
384 if bundle:
385 os.unlink(bundle)
385 os.unlink(bundle)
386 other.close()
386 other.close()
387
387
388 return (localrepo, csets, cleanup)
388 return (localrepo, csets, cleanup)
389
389
@@ -1,2669 +1,2680 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class repofilecache(filecache):
22 """All filecache usage on repo are done for logic that should be unfiltered
23 """
24
25 def __get__(self, repo, type=None):
26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 def __set__(self, repo, value):
28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 def __delete__(self, repo):
30 return super(repofilecache, self).__delete__(repo.unfiltered())
31
32 class storecache(repofilecache):
22 """filecache for files in the store"""
33 """filecache for files in the store"""
23 def join(self, obj, fname):
34 def join(self, obj, fname):
24 return obj.sjoin(fname)
35 return obj.sjoin(fname)
25
36
26 class unfilteredpropertycache(propertycache):
37 class unfilteredpropertycache(propertycache):
27 """propertycache that apply to unfiltered repo only"""
38 """propertycache that apply to unfiltered repo only"""
28
39
29 def __get__(self, repo, type=None):
40 def __get__(self, repo, type=None):
30 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
31
42
32 class filteredpropertycache(propertycache):
43 class filteredpropertycache(propertycache):
33 """propertycache that must take filtering in account"""
44 """propertycache that must take filtering in account"""
34
45
35 def cachevalue(self, obj, value):
46 def cachevalue(self, obj, value):
36 object.__setattr__(obj, self.name, value)
47 object.__setattr__(obj, self.name, value)
37
48
38
49
39 def hasunfilteredcache(repo, name):
50 def hasunfilteredcache(repo, name):
40 """check if an repo and a unfilteredproperty cached value for <name>"""
51 """check if an repo and a unfilteredproperty cached value for <name>"""
41 return name in vars(repo.unfiltered())
52 return name in vars(repo.unfiltered())
42
53
43 def unfilteredmeth(orig):
54 def unfilteredmeth(orig):
44 """decorate method that always need to be run on unfiltered version"""
55 """decorate method that always need to be run on unfiltered version"""
45 def wrapper(repo, *args, **kwargs):
56 def wrapper(repo, *args, **kwargs):
46 return orig(repo.unfiltered(), *args, **kwargs)
57 return orig(repo.unfiltered(), *args, **kwargs)
47 return wrapper
58 return wrapper
48
59
49 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
50 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
51
62
52 class localpeer(peer.peerrepository):
63 class localpeer(peer.peerrepository):
53 '''peer for a local repo; reflects only the most recent API'''
64 '''peer for a local repo; reflects only the most recent API'''
54
65
55 def __init__(self, repo, caps=MODERNCAPS):
66 def __init__(self, repo, caps=MODERNCAPS):
56 peer.peerrepository.__init__(self)
67 peer.peerrepository.__init__(self)
57 self._repo = repo
68 self._repo = repo
58 self.ui = repo.ui
69 self.ui = repo.ui
59 self._caps = repo._restrictcapabilities(caps)
70 self._caps = repo._restrictcapabilities(caps)
60 self.requirements = repo.requirements
71 self.requirements = repo.requirements
61 self.supportedformats = repo.supportedformats
72 self.supportedformats = repo.supportedformats
62
73
63 def close(self):
74 def close(self):
64 self._repo.close()
75 self._repo.close()
65
76
66 def _capabilities(self):
77 def _capabilities(self):
67 return self._caps
78 return self._caps
68
79
69 def local(self):
80 def local(self):
70 return self._repo
81 return self._repo
71
82
72 def canpush(self):
83 def canpush(self):
73 return True
84 return True
74
85
75 def url(self):
86 def url(self):
76 return self._repo.url()
87 return self._repo.url()
77
88
78 def lookup(self, key):
89 def lookup(self, key):
79 return self._repo.lookup(key)
90 return self._repo.lookup(key)
80
91
81 def branchmap(self):
92 def branchmap(self):
82 return discovery.visiblebranchmap(self._repo)
93 return discovery.visiblebranchmap(self._repo)
83
94
84 def heads(self):
95 def heads(self):
85 return discovery.visibleheads(self._repo)
96 return discovery.visibleheads(self._repo)
86
97
87 def known(self, nodes):
98 def known(self, nodes):
88 return self._repo.known(nodes)
99 return self._repo.known(nodes)
89
100
90 def getbundle(self, source, heads=None, common=None):
101 def getbundle(self, source, heads=None, common=None):
91 return self._repo.getbundle(source, heads=heads, common=common)
102 return self._repo.getbundle(source, heads=heads, common=common)
92
103
93 # TODO We might want to move the next two calls into legacypeer and add
104 # TODO We might want to move the next two calls into legacypeer and add
94 # unbundle instead.
105 # unbundle instead.
95
106
96 def lock(self):
107 def lock(self):
97 return self._repo.lock()
108 return self._repo.lock()
98
109
99 def addchangegroup(self, cg, source, url):
110 def addchangegroup(self, cg, source, url):
100 return self._repo.addchangegroup(cg, source, url)
111 return self._repo.addchangegroup(cg, source, url)
101
112
102 def pushkey(self, namespace, key, old, new):
113 def pushkey(self, namespace, key, old, new):
103 return self._repo.pushkey(namespace, key, old, new)
114 return self._repo.pushkey(namespace, key, old, new)
104
115
105 def listkeys(self, namespace):
116 def listkeys(self, namespace):
106 return self._repo.listkeys(namespace)
117 return self._repo.listkeys(namespace)
107
118
108 def debugwireargs(self, one, two, three=None, four=None, five=None):
119 def debugwireargs(self, one, two, three=None, four=None, five=None):
109 '''used to test argument passing over the wire'''
120 '''used to test argument passing over the wire'''
110 return "%s %s %s %s %s" % (one, two, three, four, five)
121 return "%s %s %s %s %s" % (one, two, three, four, five)
111
122
112 class locallegacypeer(localpeer):
123 class locallegacypeer(localpeer):
113 '''peer extension which implements legacy methods too; used for tests with
124 '''peer extension which implements legacy methods too; used for tests with
114 restricted capabilities'''
125 restricted capabilities'''
115
126
116 def __init__(self, repo):
127 def __init__(self, repo):
117 localpeer.__init__(self, repo, caps=LEGACYCAPS)
128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
118
129
119 def branches(self, nodes):
130 def branches(self, nodes):
120 return self._repo.branches(nodes)
131 return self._repo.branches(nodes)
121
132
122 def between(self, pairs):
133 def between(self, pairs):
123 return self._repo.between(pairs)
134 return self._repo.between(pairs)
124
135
125 def changegroup(self, basenodes, source):
136 def changegroup(self, basenodes, source):
126 return self._repo.changegroup(basenodes, source)
137 return self._repo.changegroup(basenodes, source)
127
138
128 def changegroupsubset(self, bases, heads, source):
139 def changegroupsubset(self, bases, heads, source):
129 return self._repo.changegroupsubset(bases, heads, source)
140 return self._repo.changegroupsubset(bases, heads, source)
130
141
131 class localrepository(object):
142 class localrepository(object):
132
143
133 supportedformats = set(('revlogv1', 'generaldelta'))
144 supportedformats = set(('revlogv1', 'generaldelta'))
134 supported = supportedformats | set(('store', 'fncache', 'shared',
145 supported = supportedformats | set(('store', 'fncache', 'shared',
135 'dotencode'))
146 'dotencode'))
136 openerreqs = set(('revlogv1', 'generaldelta'))
147 openerreqs = set(('revlogv1', 'generaldelta'))
137 requirements = ['revlogv1']
148 requirements = ['revlogv1']
138
149
139 def _baserequirements(self, create):
150 def _baserequirements(self, create):
140 return self.requirements[:]
151 return self.requirements[:]
141
152
142 def __init__(self, baseui, path=None, create=False):
153 def __init__(self, baseui, path=None, create=False):
143 self.wvfs = scmutil.vfs(path, expand=True)
154 self.wvfs = scmutil.vfs(path, expand=True)
144 self.wopener = self.wvfs
155 self.wopener = self.wvfs
145 self.root = self.wvfs.base
156 self.root = self.wvfs.base
146 self.path = self.wvfs.join(".hg")
157 self.path = self.wvfs.join(".hg")
147 self.origroot = path
158 self.origroot = path
148 self.auditor = scmutil.pathauditor(self.root, self._checknested)
159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
149 self.vfs = scmutil.vfs(self.path)
160 self.vfs = scmutil.vfs(self.path)
150 self.opener = self.vfs
161 self.opener = self.vfs
151 self.baseui = baseui
162 self.baseui = baseui
152 self.ui = baseui.copy()
163 self.ui = baseui.copy()
153 # A list of callback to shape the phase if no data were found.
164 # A list of callback to shape the phase if no data were found.
154 # Callback are in the form: func(repo, roots) --> processed root.
165 # Callback are in the form: func(repo, roots) --> processed root.
155 # This list it to be filled by extension during repo setup
166 # This list it to be filled by extension during repo setup
156 self._phasedefaults = []
167 self._phasedefaults = []
157 try:
168 try:
158 self.ui.readconfig(self.join("hgrc"), self.root)
169 self.ui.readconfig(self.join("hgrc"), self.root)
159 extensions.loadall(self.ui)
170 extensions.loadall(self.ui)
160 except IOError:
171 except IOError:
161 pass
172 pass
162
173
163 if not self.vfs.isdir():
174 if not self.vfs.isdir():
164 if create:
175 if create:
165 if not self.wvfs.exists():
176 if not self.wvfs.exists():
166 self.wvfs.makedirs()
177 self.wvfs.makedirs()
167 self.vfs.makedir(notindexed=True)
178 self.vfs.makedir(notindexed=True)
168 requirements = self._baserequirements(create)
179 requirements = self._baserequirements(create)
169 if self.ui.configbool('format', 'usestore', True):
180 if self.ui.configbool('format', 'usestore', True):
170 self.vfs.mkdir("store")
181 self.vfs.mkdir("store")
171 requirements.append("store")
182 requirements.append("store")
172 if self.ui.configbool('format', 'usefncache', True):
183 if self.ui.configbool('format', 'usefncache', True):
173 requirements.append("fncache")
184 requirements.append("fncache")
174 if self.ui.configbool('format', 'dotencode', True):
185 if self.ui.configbool('format', 'dotencode', True):
175 requirements.append('dotencode')
186 requirements.append('dotencode')
176 # create an invalid changelog
187 # create an invalid changelog
177 self.vfs.append(
188 self.vfs.append(
178 "00changelog.i",
189 "00changelog.i",
179 '\0\0\0\2' # represents revlogv2
190 '\0\0\0\2' # represents revlogv2
180 ' dummy changelog to prevent using the old repo layout'
191 ' dummy changelog to prevent using the old repo layout'
181 )
192 )
182 if self.ui.configbool('format', 'generaldelta', False):
193 if self.ui.configbool('format', 'generaldelta', False):
183 requirements.append("generaldelta")
194 requirements.append("generaldelta")
184 requirements = set(requirements)
195 requirements = set(requirements)
185 else:
196 else:
186 raise error.RepoError(_("repository %s not found") % path)
197 raise error.RepoError(_("repository %s not found") % path)
187 elif create:
198 elif create:
188 raise error.RepoError(_("repository %s already exists") % path)
199 raise error.RepoError(_("repository %s already exists") % path)
189 else:
200 else:
190 try:
201 try:
191 requirements = scmutil.readrequires(self.vfs, self.supported)
202 requirements = scmutil.readrequires(self.vfs, self.supported)
192 except IOError, inst:
203 except IOError, inst:
193 if inst.errno != errno.ENOENT:
204 if inst.errno != errno.ENOENT:
194 raise
205 raise
195 requirements = set()
206 requirements = set()
196
207
197 self.sharedpath = self.path
208 self.sharedpath = self.path
198 try:
209 try:
199 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
200 if not os.path.exists(s):
211 if not os.path.exists(s):
201 raise error.RepoError(
212 raise error.RepoError(
202 _('.hg/sharedpath points to nonexistent directory %s') % s)
213 _('.hg/sharedpath points to nonexistent directory %s') % s)
203 self.sharedpath = s
214 self.sharedpath = s
204 except IOError, inst:
215 except IOError, inst:
205 if inst.errno != errno.ENOENT:
216 if inst.errno != errno.ENOENT:
206 raise
217 raise
207
218
208 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
209 self.spath = self.store.path
220 self.spath = self.store.path
210 self.svfs = self.store.vfs
221 self.svfs = self.store.vfs
211 self.sopener = self.svfs
222 self.sopener = self.svfs
212 self.sjoin = self.store.join
223 self.sjoin = self.store.join
213 self.vfs.createmode = self.store.createmode
224 self.vfs.createmode = self.store.createmode
214 self._applyrequirements(requirements)
225 self._applyrequirements(requirements)
215 if create:
226 if create:
216 self._writerequirements()
227 self._writerequirements()
217
228
218
229
219 self._branchcache = None
230 self._branchcache = None
220 self._branchcachetip = None
231 self._branchcachetip = None
221 self.filterpats = {}
232 self.filterpats = {}
222 self._datafilters = {}
233 self._datafilters = {}
223 self._transref = self._lockref = self._wlockref = None
234 self._transref = self._lockref = self._wlockref = None
224
235
225 # A cache for various files under .hg/ that tracks file changes,
236 # A cache for various files under .hg/ that tracks file changes,
226 # (used by the filecache decorator)
237 # (used by the filecache decorator)
227 #
238 #
228 # Maps a property name to its util.filecacheentry
239 # Maps a property name to its util.filecacheentry
229 self._filecache = {}
240 self._filecache = {}
230
241
231 def close(self):
242 def close(self):
232 pass
243 pass
233
244
234 def _restrictcapabilities(self, caps):
245 def _restrictcapabilities(self, caps):
235 return caps
246 return caps
236
247
237 def _applyrequirements(self, requirements):
248 def _applyrequirements(self, requirements):
238 self.requirements = requirements
249 self.requirements = requirements
239 self.sopener.options = dict((r, 1) for r in requirements
250 self.sopener.options = dict((r, 1) for r in requirements
240 if r in self.openerreqs)
251 if r in self.openerreqs)
241
252
242 def _writerequirements(self):
253 def _writerequirements(self):
243 reqfile = self.opener("requires", "w")
254 reqfile = self.opener("requires", "w")
244 for r in self.requirements:
255 for r in self.requirements:
245 reqfile.write("%s\n" % r)
256 reqfile.write("%s\n" % r)
246 reqfile.close()
257 reqfile.close()
247
258
248 def _checknested(self, path):
259 def _checknested(self, path):
249 """Determine if path is a legal nested repository."""
260 """Determine if path is a legal nested repository."""
250 if not path.startswith(self.root):
261 if not path.startswith(self.root):
251 return False
262 return False
252 subpath = path[len(self.root) + 1:]
263 subpath = path[len(self.root) + 1:]
253 normsubpath = util.pconvert(subpath)
264 normsubpath = util.pconvert(subpath)
254
265
255 # XXX: Checking against the current working copy is wrong in
266 # XXX: Checking against the current working copy is wrong in
256 # the sense that it can reject things like
267 # the sense that it can reject things like
257 #
268 #
258 # $ hg cat -r 10 sub/x.txt
269 # $ hg cat -r 10 sub/x.txt
259 #
270 #
260 # if sub/ is no longer a subrepository in the working copy
271 # if sub/ is no longer a subrepository in the working copy
261 # parent revision.
272 # parent revision.
262 #
273 #
263 # However, it can of course also allow things that would have
274 # However, it can of course also allow things that would have
264 # been rejected before, such as the above cat command if sub/
275 # been rejected before, such as the above cat command if sub/
265 # is a subrepository now, but was a normal directory before.
276 # is a subrepository now, but was a normal directory before.
266 # The old path auditor would have rejected by mistake since it
277 # The old path auditor would have rejected by mistake since it
267 # panics when it sees sub/.hg/.
278 # panics when it sees sub/.hg/.
268 #
279 #
269 # All in all, checking against the working copy seems sensible
280 # All in all, checking against the working copy seems sensible
270 # since we want to prevent access to nested repositories on
281 # since we want to prevent access to nested repositories on
271 # the filesystem *now*.
282 # the filesystem *now*.
272 ctx = self[None]
283 ctx = self[None]
273 parts = util.splitpath(subpath)
284 parts = util.splitpath(subpath)
274 while parts:
285 while parts:
275 prefix = '/'.join(parts)
286 prefix = '/'.join(parts)
276 if prefix in ctx.substate:
287 if prefix in ctx.substate:
277 if prefix == normsubpath:
288 if prefix == normsubpath:
278 return True
289 return True
279 else:
290 else:
280 sub = ctx.sub(prefix)
291 sub = ctx.sub(prefix)
281 return sub.checknested(subpath[len(prefix) + 1:])
292 return sub.checknested(subpath[len(prefix) + 1:])
282 else:
293 else:
283 parts.pop()
294 parts.pop()
284 return False
295 return False
285
296
286 def peer(self):
297 def peer(self):
287 return localpeer(self) # not cached to avoid reference cycle
298 return localpeer(self) # not cached to avoid reference cycle
288
299
289 def unfiltered(self):
300 def unfiltered(self):
290 """Return unfiltered version of the repository
301 """Return unfiltered version of the repository
291
302
292 Intended to be ovewritten by filtered repo."""
303 Intended to be ovewritten by filtered repo."""
293 return self
304 return self
294
305
295 @filecache('bookmarks')
306 @repofilecache('bookmarks')
296 def _bookmarks(self):
307 def _bookmarks(self):
297 return bookmarks.bmstore(self)
308 return bookmarks.bmstore(self)
298
309
299 @filecache('bookmarks.current')
310 @repofilecache('bookmarks.current')
300 def _bookmarkcurrent(self):
311 def _bookmarkcurrent(self):
301 return bookmarks.readcurrent(self)
312 return bookmarks.readcurrent(self)
302
313
303 def bookmarkheads(self, bookmark):
314 def bookmarkheads(self, bookmark):
304 name = bookmark.split('@', 1)[0]
315 name = bookmark.split('@', 1)[0]
305 heads = []
316 heads = []
306 for mark, n in self._bookmarks.iteritems():
317 for mark, n in self._bookmarks.iteritems():
307 if mark.split('@', 1)[0] == name:
318 if mark.split('@', 1)[0] == name:
308 heads.append(n)
319 heads.append(n)
309 return heads
320 return heads
310
321
311 @storecache('phaseroots')
322 @storecache('phaseroots')
312 def _phasecache(self):
323 def _phasecache(self):
313 return phases.phasecache(self, self._phasedefaults)
324 return phases.phasecache(self, self._phasedefaults)
314
325
315 @storecache('obsstore')
326 @storecache('obsstore')
316 def obsstore(self):
327 def obsstore(self):
317 store = obsolete.obsstore(self.sopener)
328 store = obsolete.obsstore(self.sopener)
318 if store and not obsolete._enabled:
329 if store and not obsolete._enabled:
319 # message is rare enough to not be translated
330 # message is rare enough to not be translated
320 msg = 'obsolete feature not enabled but %i markers found!\n'
331 msg = 'obsolete feature not enabled but %i markers found!\n'
321 self.ui.warn(msg % len(list(store)))
332 self.ui.warn(msg % len(list(store)))
322 return store
333 return store
323
334
324 @unfilteredpropertycache
335 @unfilteredpropertycache
325 def hiddenrevs(self):
336 def hiddenrevs(self):
326 """hiddenrevs: revs that should be hidden by command and tools
337 """hiddenrevs: revs that should be hidden by command and tools
327
338
328 This set is carried on the repo to ease initialization and lazy
339 This set is carried on the repo to ease initialization and lazy
329 loading; it'll probably move back to changelog for efficiency and
340 loading; it'll probably move back to changelog for efficiency and
330 consistency reasons.
341 consistency reasons.
331
342
332 Note that the hiddenrevs will needs invalidations when
343 Note that the hiddenrevs will needs invalidations when
333 - a new changesets is added (possible unstable above extinct)
344 - a new changesets is added (possible unstable above extinct)
334 - a new obsolete marker is added (possible new extinct changeset)
345 - a new obsolete marker is added (possible new extinct changeset)
335
346
336 hidden changesets cannot have non-hidden descendants
347 hidden changesets cannot have non-hidden descendants
337 """
348 """
338 hidden = set()
349 hidden = set()
339 if self.obsstore:
350 if self.obsstore:
340 ### hide extinct changeset that are not accessible by any mean
351 ### hide extinct changeset that are not accessible by any mean
341 hiddenquery = 'extinct() - ::(. + bookmark())'
352 hiddenquery = 'extinct() - ::(. + bookmark())'
342 hidden.update(self.revs(hiddenquery))
353 hidden.update(self.revs(hiddenquery))
343 return hidden
354 return hidden
344
355
345 @storecache('00changelog.i')
356 @storecache('00changelog.i')
346 def changelog(self):
357 def changelog(self):
347 c = changelog.changelog(self.sopener)
358 c = changelog.changelog(self.sopener)
348 if 'HG_PENDING' in os.environ:
359 if 'HG_PENDING' in os.environ:
349 p = os.environ['HG_PENDING']
360 p = os.environ['HG_PENDING']
350 if p.startswith(self.root):
361 if p.startswith(self.root):
351 c.readpending('00changelog.i.a')
362 c.readpending('00changelog.i.a')
352 return c
363 return c
353
364
354 @storecache('00manifest.i')
365 @storecache('00manifest.i')
355 def manifest(self):
366 def manifest(self):
356 return manifest.manifest(self.sopener)
367 return manifest.manifest(self.sopener)
357
368
358 @filecache('dirstate')
369 @repofilecache('dirstate')
359 def dirstate(self):
370 def dirstate(self):
360 warned = [0]
371 warned = [0]
361 def validate(node):
372 def validate(node):
362 try:
373 try:
363 self.changelog.rev(node)
374 self.changelog.rev(node)
364 return node
375 return node
365 except error.LookupError:
376 except error.LookupError:
366 if not warned[0]:
377 if not warned[0]:
367 warned[0] = True
378 warned[0] = True
368 self.ui.warn(_("warning: ignoring unknown"
379 self.ui.warn(_("warning: ignoring unknown"
369 " working parent %s!\n") % short(node))
380 " working parent %s!\n") % short(node))
370 return nullid
381 return nullid
371
382
372 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
373
384
374 def __getitem__(self, changeid):
385 def __getitem__(self, changeid):
375 if changeid is None:
386 if changeid is None:
376 return context.workingctx(self)
387 return context.workingctx(self)
377 return context.changectx(self, changeid)
388 return context.changectx(self, changeid)
378
389
379 def __contains__(self, changeid):
390 def __contains__(self, changeid):
380 try:
391 try:
381 return bool(self.lookup(changeid))
392 return bool(self.lookup(changeid))
382 except error.RepoLookupError:
393 except error.RepoLookupError:
383 return False
394 return False
384
395
385 def __nonzero__(self):
396 def __nonzero__(self):
386 return True
397 return True
387
398
388 def __len__(self):
399 def __len__(self):
389 return len(self.changelog)
400 return len(self.changelog)
390
401
391 def __iter__(self):
402 def __iter__(self):
392 return iter(self.changelog)
403 return iter(self.changelog)
393
404
394 def revs(self, expr, *args):
405 def revs(self, expr, *args):
395 '''Return a list of revisions matching the given revset'''
406 '''Return a list of revisions matching the given revset'''
396 expr = revset.formatspec(expr, *args)
407 expr = revset.formatspec(expr, *args)
397 m = revset.match(None, expr)
408 m = revset.match(None, expr)
398 return [r for r in m(self, list(self))]
409 return [r for r in m(self, list(self))]
399
410
400 def set(self, expr, *args):
411 def set(self, expr, *args):
401 '''
412 '''
402 Yield a context for each matching revision, after doing arg
413 Yield a context for each matching revision, after doing arg
403 replacement via revset.formatspec
414 replacement via revset.formatspec
404 '''
415 '''
405 for r in self.revs(expr, *args):
416 for r in self.revs(expr, *args):
406 yield self[r]
417 yield self[r]
407
418
408 def url(self):
419 def url(self):
409 return 'file:' + self.root
420 return 'file:' + self.root
410
421
411 def hook(self, name, throw=False, **args):
422 def hook(self, name, throw=False, **args):
412 return hook.hook(self.ui, self, name, throw, **args)
423 return hook.hook(self.ui, self, name, throw, **args)
413
424
414 @unfilteredmeth
425 @unfilteredmeth
415 def _tag(self, names, node, message, local, user, date, extra={}):
426 def _tag(self, names, node, message, local, user, date, extra={}):
416 if isinstance(names, str):
427 if isinstance(names, str):
417 names = (names,)
428 names = (names,)
418
429
419 branches = self.branchmap()
430 branches = self.branchmap()
420 for name in names:
431 for name in names:
421 self.hook('pretag', throw=True, node=hex(node), tag=name,
432 self.hook('pretag', throw=True, node=hex(node), tag=name,
422 local=local)
433 local=local)
423 if name in branches:
434 if name in branches:
424 self.ui.warn(_("warning: tag %s conflicts with existing"
435 self.ui.warn(_("warning: tag %s conflicts with existing"
425 " branch name\n") % name)
436 " branch name\n") % name)
426
437
427 def writetags(fp, names, munge, prevtags):
438 def writetags(fp, names, munge, prevtags):
428 fp.seek(0, 2)
439 fp.seek(0, 2)
429 if prevtags and prevtags[-1] != '\n':
440 if prevtags and prevtags[-1] != '\n':
430 fp.write('\n')
441 fp.write('\n')
431 for name in names:
442 for name in names:
432 m = munge and munge(name) or name
443 m = munge and munge(name) or name
433 if (self._tagscache.tagtypes and
444 if (self._tagscache.tagtypes and
434 name in self._tagscache.tagtypes):
445 name in self._tagscache.tagtypes):
435 old = self.tags().get(name, nullid)
446 old = self.tags().get(name, nullid)
436 fp.write('%s %s\n' % (hex(old), m))
447 fp.write('%s %s\n' % (hex(old), m))
437 fp.write('%s %s\n' % (hex(node), m))
448 fp.write('%s %s\n' % (hex(node), m))
438 fp.close()
449 fp.close()
439
450
440 prevtags = ''
451 prevtags = ''
441 if local:
452 if local:
442 try:
453 try:
443 fp = self.opener('localtags', 'r+')
454 fp = self.opener('localtags', 'r+')
444 except IOError:
455 except IOError:
445 fp = self.opener('localtags', 'a')
456 fp = self.opener('localtags', 'a')
446 else:
457 else:
447 prevtags = fp.read()
458 prevtags = fp.read()
448
459
449 # local tags are stored in the current charset
460 # local tags are stored in the current charset
450 writetags(fp, names, None, prevtags)
461 writetags(fp, names, None, prevtags)
451 for name in names:
462 for name in names:
452 self.hook('tag', node=hex(node), tag=name, local=local)
463 self.hook('tag', node=hex(node), tag=name, local=local)
453 return
464 return
454
465
455 try:
466 try:
456 fp = self.wfile('.hgtags', 'rb+')
467 fp = self.wfile('.hgtags', 'rb+')
457 except IOError, e:
468 except IOError, e:
458 if e.errno != errno.ENOENT:
469 if e.errno != errno.ENOENT:
459 raise
470 raise
460 fp = self.wfile('.hgtags', 'ab')
471 fp = self.wfile('.hgtags', 'ab')
461 else:
472 else:
462 prevtags = fp.read()
473 prevtags = fp.read()
463
474
464 # committed tags are stored in UTF-8
475 # committed tags are stored in UTF-8
465 writetags(fp, names, encoding.fromlocal, prevtags)
476 writetags(fp, names, encoding.fromlocal, prevtags)
466
477
467 fp.close()
478 fp.close()
468
479
469 self.invalidatecaches()
480 self.invalidatecaches()
470
481
471 if '.hgtags' not in self.dirstate:
482 if '.hgtags' not in self.dirstate:
472 self[None].add(['.hgtags'])
483 self[None].add(['.hgtags'])
473
484
474 m = matchmod.exact(self.root, '', ['.hgtags'])
485 m = matchmod.exact(self.root, '', ['.hgtags'])
475 tagnode = self.commit(message, user, date, extra=extra, match=m)
486 tagnode = self.commit(message, user, date, extra=extra, match=m)
476
487
477 for name in names:
488 for name in names:
478 self.hook('tag', node=hex(node), tag=name, local=local)
489 self.hook('tag', node=hex(node), tag=name, local=local)
479
490
480 return tagnode
491 return tagnode
481
492
482 def tag(self, names, node, message, local, user, date):
493 def tag(self, names, node, message, local, user, date):
483 '''tag a revision with one or more symbolic names.
494 '''tag a revision with one or more symbolic names.
484
495
485 names is a list of strings or, when adding a single tag, names may be a
496 names is a list of strings or, when adding a single tag, names may be a
486 string.
497 string.
487
498
488 if local is True, the tags are stored in a per-repository file.
499 if local is True, the tags are stored in a per-repository file.
489 otherwise, they are stored in the .hgtags file, and a new
500 otherwise, they are stored in the .hgtags file, and a new
490 changeset is committed with the change.
501 changeset is committed with the change.
491
502
492 keyword arguments:
503 keyword arguments:
493
504
494 local: whether to store tags in non-version-controlled file
505 local: whether to store tags in non-version-controlled file
495 (default False)
506 (default False)
496
507
497 message: commit message to use if committing
508 message: commit message to use if committing
498
509
499 user: name of user to use if committing
510 user: name of user to use if committing
500
511
501 date: date tuple to use if committing'''
512 date: date tuple to use if committing'''
502
513
503 if not local:
514 if not local:
504 for x in self.status()[:5]:
515 for x in self.status()[:5]:
505 if '.hgtags' in x:
516 if '.hgtags' in x:
506 raise util.Abort(_('working copy of .hgtags is changed '
517 raise util.Abort(_('working copy of .hgtags is changed '
507 '(please commit .hgtags manually)'))
518 '(please commit .hgtags manually)'))
508
519
509 self.tags() # instantiate the cache
520 self.tags() # instantiate the cache
510 self._tag(names, node, message, local, user, date)
521 self._tag(names, node, message, local, user, date)
511
522
512 @filteredpropertycache
523 @filteredpropertycache
513 def _tagscache(self):
524 def _tagscache(self):
514 '''Returns a tagscache object that contains various tags related
525 '''Returns a tagscache object that contains various tags related
515 caches.'''
526 caches.'''
516
527
517 # This simplifies its cache management by having one decorated
528 # This simplifies its cache management by having one decorated
518 # function (this one) and the rest simply fetch things from it.
529 # function (this one) and the rest simply fetch things from it.
519 class tagscache(object):
530 class tagscache(object):
520 def __init__(self):
531 def __init__(self):
521 # These two define the set of tags for this repository. tags
532 # These two define the set of tags for this repository. tags
522 # maps tag name to node; tagtypes maps tag name to 'global' or
533 # maps tag name to node; tagtypes maps tag name to 'global' or
523 # 'local'. (Global tags are defined by .hgtags across all
534 # 'local'. (Global tags are defined by .hgtags across all
524 # heads, and local tags are defined in .hg/localtags.)
535 # heads, and local tags are defined in .hg/localtags.)
525 # They constitute the in-memory cache of tags.
536 # They constitute the in-memory cache of tags.
526 self.tags = self.tagtypes = None
537 self.tags = self.tagtypes = None
527
538
528 self.nodetagscache = self.tagslist = None
539 self.nodetagscache = self.tagslist = None
529
540
530 cache = tagscache()
541 cache = tagscache()
531 cache.tags, cache.tagtypes = self._findtags()
542 cache.tags, cache.tagtypes = self._findtags()
532
543
533 return cache
544 return cache
534
545
535 def tags(self):
546 def tags(self):
536 '''return a mapping of tag to node'''
547 '''return a mapping of tag to node'''
537 t = {}
548 t = {}
538 if self.changelog.filteredrevs:
549 if self.changelog.filteredrevs:
539 tags, tt = self._findtags()
550 tags, tt = self._findtags()
540 else:
551 else:
541 tags = self._tagscache.tags
552 tags = self._tagscache.tags
542 for k, v in tags.iteritems():
553 for k, v in tags.iteritems():
543 try:
554 try:
544 # ignore tags to unknown nodes
555 # ignore tags to unknown nodes
545 self.changelog.rev(v)
556 self.changelog.rev(v)
546 t[k] = v
557 t[k] = v
547 except (error.LookupError, ValueError):
558 except (error.LookupError, ValueError):
548 pass
559 pass
549 return t
560 return t
550
561
551 def _findtags(self):
562 def _findtags(self):
552 '''Do the hard work of finding tags. Return a pair of dicts
563 '''Do the hard work of finding tags. Return a pair of dicts
553 (tags, tagtypes) where tags maps tag name to node, and tagtypes
564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
554 maps tag name to a string like \'global\' or \'local\'.
565 maps tag name to a string like \'global\' or \'local\'.
555 Subclasses or extensions are free to add their own tags, but
566 Subclasses or extensions are free to add their own tags, but
556 should be aware that the returned dicts will be retained for the
567 should be aware that the returned dicts will be retained for the
557 duration of the localrepo object.'''
568 duration of the localrepo object.'''
558
569
559 # XXX what tagtype should subclasses/extensions use? Currently
570 # XXX what tagtype should subclasses/extensions use? Currently
560 # mq and bookmarks add tags, but do not set the tagtype at all.
571 # mq and bookmarks add tags, but do not set the tagtype at all.
561 # Should each extension invent its own tag type? Should there
572 # Should each extension invent its own tag type? Should there
562 # be one tagtype for all such "virtual" tags? Or is the status
573 # be one tagtype for all such "virtual" tags? Or is the status
563 # quo fine?
574 # quo fine?
564
575
565 alltags = {} # map tag name to (node, hist)
576 alltags = {} # map tag name to (node, hist)
566 tagtypes = {}
577 tagtypes = {}
567
578
568 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
569 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
570
581
571 # Build the return dicts. Have to re-encode tag names because
582 # Build the return dicts. Have to re-encode tag names because
572 # the tags module always uses UTF-8 (in order not to lose info
583 # the tags module always uses UTF-8 (in order not to lose info
573 # writing to the cache), but the rest of Mercurial wants them in
584 # writing to the cache), but the rest of Mercurial wants them in
574 # local encoding.
585 # local encoding.
575 tags = {}
586 tags = {}
576 for (name, (node, hist)) in alltags.iteritems():
587 for (name, (node, hist)) in alltags.iteritems():
577 if node != nullid:
588 if node != nullid:
578 tags[encoding.tolocal(name)] = node
589 tags[encoding.tolocal(name)] = node
579 tags['tip'] = self.changelog.tip()
590 tags['tip'] = self.changelog.tip()
580 tagtypes = dict([(encoding.tolocal(name), value)
591 tagtypes = dict([(encoding.tolocal(name), value)
581 for (name, value) in tagtypes.iteritems()])
592 for (name, value) in tagtypes.iteritems()])
582 return (tags, tagtypes)
593 return (tags, tagtypes)
583
594
584 def tagtype(self, tagname):
595 def tagtype(self, tagname):
585 '''
596 '''
586 return the type of the given tag. result can be:
597 return the type of the given tag. result can be:
587
598
588 'local' : a local tag
599 'local' : a local tag
589 'global' : a global tag
600 'global' : a global tag
590 None : tag does not exist
601 None : tag does not exist
591 '''
602 '''
592
603
593 return self._tagscache.tagtypes.get(tagname)
604 return self._tagscache.tagtypes.get(tagname)
594
605
595 def tagslist(self):
606 def tagslist(self):
596 '''return a list of tags ordered by revision'''
607 '''return a list of tags ordered by revision'''
597 if not self._tagscache.tagslist:
608 if not self._tagscache.tagslist:
598 l = []
609 l = []
599 for t, n in self.tags().iteritems():
610 for t, n in self.tags().iteritems():
600 r = self.changelog.rev(n)
611 r = self.changelog.rev(n)
601 l.append((r, t, n))
612 l.append((r, t, n))
602 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
603
614
604 return self._tagscache.tagslist
615 return self._tagscache.tagslist
605
616
606 def nodetags(self, node):
617 def nodetags(self, node):
607 '''return the tags associated with a node'''
618 '''return the tags associated with a node'''
608 if not self._tagscache.nodetagscache:
619 if not self._tagscache.nodetagscache:
609 nodetagscache = {}
620 nodetagscache = {}
610 for t, n in self._tagscache.tags.iteritems():
621 for t, n in self._tagscache.tags.iteritems():
611 nodetagscache.setdefault(n, []).append(t)
622 nodetagscache.setdefault(n, []).append(t)
612 for tags in nodetagscache.itervalues():
623 for tags in nodetagscache.itervalues():
613 tags.sort()
624 tags.sort()
614 self._tagscache.nodetagscache = nodetagscache
625 self._tagscache.nodetagscache = nodetagscache
615 return self._tagscache.nodetagscache.get(node, [])
626 return self._tagscache.nodetagscache.get(node, [])
616
627
617 def nodebookmarks(self, node):
628 def nodebookmarks(self, node):
618 marks = []
629 marks = []
619 for bookmark, n in self._bookmarks.iteritems():
630 for bookmark, n in self._bookmarks.iteritems():
620 if n == node:
631 if n == node:
621 marks.append(bookmark)
632 marks.append(bookmark)
622 return sorted(marks)
633 return sorted(marks)
623
634
624 def _branchtags(self, partial, lrev):
635 def _branchtags(self, partial, lrev):
625 # TODO: rename this function?
636 # TODO: rename this function?
626 tiprev = len(self) - 1
637 tiprev = len(self) - 1
627 if lrev != tiprev:
638 if lrev != tiprev:
628 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
639 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
629 self._updatebranchcache(partial, ctxgen)
640 self._updatebranchcache(partial, ctxgen)
630 self._writebranchcache(partial, self.changelog.tip(), tiprev)
641 self._writebranchcache(partial, self.changelog.tip(), tiprev)
631
642
632 return partial
643 return partial
633
644
634 @unfilteredmeth # Until we get a smarter cache management
645 @unfilteredmeth # Until we get a smarter cache management
635 def updatebranchcache(self):
646 def updatebranchcache(self):
636 tip = self.changelog.tip()
647 tip = self.changelog.tip()
637 if self._branchcache is not None and self._branchcachetip == tip:
648 if self._branchcache is not None and self._branchcachetip == tip:
638 return
649 return
639
650
640 oldtip = self._branchcachetip
651 oldtip = self._branchcachetip
641 self._branchcachetip = tip
652 self._branchcachetip = tip
642 if oldtip is None or oldtip not in self.changelog.nodemap:
653 if oldtip is None or oldtip not in self.changelog.nodemap:
643 partial, last, lrev = self._readbranchcache()
654 partial, last, lrev = self._readbranchcache()
644 else:
655 else:
645 lrev = self.changelog.rev(oldtip)
656 lrev = self.changelog.rev(oldtip)
646 partial = self._branchcache
657 partial = self._branchcache
647
658
648 self._branchtags(partial, lrev)
659 self._branchtags(partial, lrev)
649 # this private cache holds all heads (not just the branch tips)
660 # this private cache holds all heads (not just the branch tips)
650 self._branchcache = partial
661 self._branchcache = partial
651
662
652 def branchmap(self):
663 def branchmap(self):
653 '''returns a dictionary {branch: [branchheads]}'''
664 '''returns a dictionary {branch: [branchheads]}'''
654 if self.changelog.filteredrevs:
665 if self.changelog.filteredrevs:
655 # some changeset are excluded we can't use the cache
666 # some changeset are excluded we can't use the cache
656 branchmap = {}
667 branchmap = {}
657 self._updatebranchcache(branchmap, (self[r] for r in self))
668 self._updatebranchcache(branchmap, (self[r] for r in self))
658 return branchmap
669 return branchmap
659 else:
670 else:
660 self.updatebranchcache()
671 self.updatebranchcache()
661 return self._branchcache
672 return self._branchcache
662
673
663
674
664 def _branchtip(self, heads):
675 def _branchtip(self, heads):
665 '''return the tipmost branch head in heads'''
676 '''return the tipmost branch head in heads'''
666 tip = heads[-1]
677 tip = heads[-1]
667 for h in reversed(heads):
678 for h in reversed(heads):
668 if not self[h].closesbranch():
679 if not self[h].closesbranch():
669 tip = h
680 tip = h
670 break
681 break
671 return tip
682 return tip
672
683
673 def branchtip(self, branch):
684 def branchtip(self, branch):
674 '''return the tip node for a given branch'''
685 '''return the tip node for a given branch'''
675 if branch not in self.branchmap():
686 if branch not in self.branchmap():
676 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
687 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
677 return self._branchtip(self.branchmap()[branch])
688 return self._branchtip(self.branchmap()[branch])
678
689
679 def branchtags(self):
690 def branchtags(self):
680 '''return a dict where branch names map to the tipmost head of
691 '''return a dict where branch names map to the tipmost head of
681 the branch, open heads come before closed'''
692 the branch, open heads come before closed'''
682 bt = {}
693 bt = {}
683 for bn, heads in self.branchmap().iteritems():
694 for bn, heads in self.branchmap().iteritems():
684 bt[bn] = self._branchtip(heads)
695 bt[bn] = self._branchtip(heads)
685 return bt
696 return bt
686
697
687 @unfilteredmeth # Until we get a smarter cache management
698 @unfilteredmeth # Until we get a smarter cache management
688 def _readbranchcache(self):
699 def _readbranchcache(self):
689 partial = {}
700 partial = {}
690 try:
701 try:
691 f = self.opener("cache/branchheads")
702 f = self.opener("cache/branchheads")
692 lines = f.read().split('\n')
703 lines = f.read().split('\n')
693 f.close()
704 f.close()
694 except (IOError, OSError):
705 except (IOError, OSError):
695 return {}, nullid, nullrev
706 return {}, nullid, nullrev
696
707
697 try:
708 try:
698 last, lrev = lines.pop(0).split(" ", 1)
709 last, lrev = lines.pop(0).split(" ", 1)
699 last, lrev = bin(last), int(lrev)
710 last, lrev = bin(last), int(lrev)
700 if lrev >= len(self) or self[lrev].node() != last:
711 if lrev >= len(self) or self[lrev].node() != last:
701 # invalidate the cache
712 # invalidate the cache
702 raise ValueError('invalidating branch cache (tip differs)')
713 raise ValueError('invalidating branch cache (tip differs)')
703 for l in lines:
714 for l in lines:
704 if not l:
715 if not l:
705 continue
716 continue
706 node, label = l.split(" ", 1)
717 node, label = l.split(" ", 1)
707 label = encoding.tolocal(label.strip())
718 label = encoding.tolocal(label.strip())
708 if not node in self:
719 if not node in self:
709 raise ValueError('invalidating branch cache because node '+
720 raise ValueError('invalidating branch cache because node '+
710 '%s does not exist' % node)
721 '%s does not exist' % node)
711 partial.setdefault(label, []).append(bin(node))
722 partial.setdefault(label, []).append(bin(node))
712 except KeyboardInterrupt:
723 except KeyboardInterrupt:
713 raise
724 raise
714 except Exception, inst:
725 except Exception, inst:
715 if self.ui.debugflag:
726 if self.ui.debugflag:
716 self.ui.warn(str(inst), '\n')
727 self.ui.warn(str(inst), '\n')
717 partial, last, lrev = {}, nullid, nullrev
728 partial, last, lrev = {}, nullid, nullrev
718 return partial, last, lrev
729 return partial, last, lrev
719
730
720 @unfilteredmeth # Until we get a smarter cache management
731 @unfilteredmeth # Until we get a smarter cache management
721 def _writebranchcache(self, branches, tip, tiprev):
732 def _writebranchcache(self, branches, tip, tiprev):
722 try:
733 try:
723 f = self.opener("cache/branchheads", "w", atomictemp=True)
734 f = self.opener("cache/branchheads", "w", atomictemp=True)
724 f.write("%s %s\n" % (hex(tip), tiprev))
735 f.write("%s %s\n" % (hex(tip), tiprev))
725 for label, nodes in branches.iteritems():
736 for label, nodes in branches.iteritems():
726 for node in nodes:
737 for node in nodes:
727 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
738 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
728 f.close()
739 f.close()
729 except (IOError, OSError):
740 except (IOError, OSError):
730 pass
741 pass
731
742
732 @unfilteredmeth # Until we get a smarter cache management
743 @unfilteredmeth # Until we get a smarter cache management
733 def _updatebranchcache(self, partial, ctxgen):
744 def _updatebranchcache(self, partial, ctxgen):
734 """Given a branchhead cache, partial, that may have extra nodes or be
745 """Given a branchhead cache, partial, that may have extra nodes or be
735 missing heads, and a generator of nodes that are at least a superset of
746 missing heads, and a generator of nodes that are at least a superset of
736 heads missing, this function updates partial to be correct.
747 heads missing, this function updates partial to be correct.
737 """
748 """
738 # collect new branch entries
749 # collect new branch entries
739 newbranches = {}
750 newbranches = {}
740 for c in ctxgen:
751 for c in ctxgen:
741 newbranches.setdefault(c.branch(), []).append(c.node())
752 newbranches.setdefault(c.branch(), []).append(c.node())
742 # if older branchheads are reachable from new ones, they aren't
753 # if older branchheads are reachable from new ones, they aren't
743 # really branchheads. Note checking parents is insufficient:
754 # really branchheads. Note checking parents is insufficient:
744 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
755 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
745 for branch, newnodes in newbranches.iteritems():
756 for branch, newnodes in newbranches.iteritems():
746 bheads = partial.setdefault(branch, [])
757 bheads = partial.setdefault(branch, [])
747 # Remove candidate heads that no longer are in the repo (e.g., as
758 # Remove candidate heads that no longer are in the repo (e.g., as
748 # the result of a strip that just happened). Avoid using 'node in
759 # the result of a strip that just happened). Avoid using 'node in
749 # self' here because that dives down into branchcache code somewhat
760 # self' here because that dives down into branchcache code somewhat
750 # recursively.
761 # recursively.
751 bheadrevs = [self.changelog.rev(node) for node in bheads
762 bheadrevs = [self.changelog.rev(node) for node in bheads
752 if self.changelog.hasnode(node)]
763 if self.changelog.hasnode(node)]
753 newheadrevs = [self.changelog.rev(node) for node in newnodes
764 newheadrevs = [self.changelog.rev(node) for node in newnodes
754 if self.changelog.hasnode(node)]
765 if self.changelog.hasnode(node)]
755 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
766 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
756 # Remove duplicates - nodes that are in newheadrevs and are already
767 # Remove duplicates - nodes that are in newheadrevs and are already
757 # in bheadrevs. This can happen if you strip a node whose parent
768 # in bheadrevs. This can happen if you strip a node whose parent
758 # was already a head (because they're on different branches).
769 # was already a head (because they're on different branches).
759 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
770 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
760
771
761 # Starting from tip means fewer passes over reachable. If we know
772 # Starting from tip means fewer passes over reachable. If we know
762 # the new candidates are not ancestors of existing heads, we don't
773 # the new candidates are not ancestors of existing heads, we don't
763 # have to examine ancestors of existing heads
774 # have to examine ancestors of existing heads
764 if ctxisnew:
775 if ctxisnew:
765 iterrevs = sorted(newheadrevs)
776 iterrevs = sorted(newheadrevs)
766 else:
777 else:
767 iterrevs = list(bheadrevs)
778 iterrevs = list(bheadrevs)
768
779
769 # This loop prunes out two kinds of heads - heads that are
780 # This loop prunes out two kinds of heads - heads that are
770 # superseded by a head in newheadrevs, and newheadrevs that are not
781 # superseded by a head in newheadrevs, and newheadrevs that are not
771 # heads because an existing head is their descendant.
782 # heads because an existing head is their descendant.
772 while iterrevs:
783 while iterrevs:
773 latest = iterrevs.pop()
784 latest = iterrevs.pop()
774 if latest not in bheadrevs:
785 if latest not in bheadrevs:
775 continue
786 continue
776 ancestors = set(self.changelog.ancestors([latest],
787 ancestors = set(self.changelog.ancestors([latest],
777 bheadrevs[0]))
788 bheadrevs[0]))
778 if ancestors:
789 if ancestors:
779 bheadrevs = [b for b in bheadrevs if b not in ancestors]
790 bheadrevs = [b for b in bheadrevs if b not in ancestors]
780 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
791 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
781
792
782 # There may be branches that cease to exist when the last commit in the
793 # There may be branches that cease to exist when the last commit in the
783 # branch was stripped. This code filters them out. Note that the
794 # branch was stripped. This code filters them out. Note that the
784 # branch that ceased to exist may not be in newbranches because
795 # branch that ceased to exist may not be in newbranches because
785 # newbranches is the set of candidate heads, which when you strip the
796 # newbranches is the set of candidate heads, which when you strip the
786 # last commit in a branch will be the parent branch.
797 # last commit in a branch will be the parent branch.
787 for branch in partial.keys():
798 for branch in partial.keys():
788 nodes = [head for head in partial[branch]
799 nodes = [head for head in partial[branch]
789 if self.changelog.hasnode(head)]
800 if self.changelog.hasnode(head)]
790 if not nodes:
801 if not nodes:
791 del partial[branch]
802 del partial[branch]
792
803
793 def lookup(self, key):
804 def lookup(self, key):
794 return self[key].node()
805 return self[key].node()
795
806
796 def lookupbranch(self, key, remote=None):
807 def lookupbranch(self, key, remote=None):
797 repo = remote or self
808 repo = remote or self
798 if key in repo.branchmap():
809 if key in repo.branchmap():
799 return key
810 return key
800
811
801 repo = (remote and remote.local()) and remote or self
812 repo = (remote and remote.local()) and remote or self
802 return repo[key].branch()
813 return repo[key].branch()
803
814
804 def known(self, nodes):
815 def known(self, nodes):
805 nm = self.changelog.nodemap
816 nm = self.changelog.nodemap
806 pc = self._phasecache
817 pc = self._phasecache
807 result = []
818 result = []
808 for n in nodes:
819 for n in nodes:
809 r = nm.get(n)
820 r = nm.get(n)
810 resp = not (r is None or pc.phase(self, r) >= phases.secret)
821 resp = not (r is None or pc.phase(self, r) >= phases.secret)
811 result.append(resp)
822 result.append(resp)
812 return result
823 return result
813
824
814 def local(self):
825 def local(self):
815 return self
826 return self
816
827
817 def cancopy(self):
828 def cancopy(self):
818 return self.local() # so statichttprepo's override of local() works
829 return self.local() # so statichttprepo's override of local() works
819
830
820 def join(self, f):
831 def join(self, f):
821 return os.path.join(self.path, f)
832 return os.path.join(self.path, f)
822
833
823 def wjoin(self, f):
834 def wjoin(self, f):
824 return os.path.join(self.root, f)
835 return os.path.join(self.root, f)
825
836
826 def file(self, f):
837 def file(self, f):
827 if f[0] == '/':
838 if f[0] == '/':
828 f = f[1:]
839 f = f[1:]
829 return filelog.filelog(self.sopener, f)
840 return filelog.filelog(self.sopener, f)
830
841
831 def changectx(self, changeid):
842 def changectx(self, changeid):
832 return self[changeid]
843 return self[changeid]
833
844
834 def parents(self, changeid=None):
845 def parents(self, changeid=None):
835 '''get list of changectxs for parents of changeid'''
846 '''get list of changectxs for parents of changeid'''
836 return self[changeid].parents()
847 return self[changeid].parents()
837
848
838 def setparents(self, p1, p2=nullid):
849 def setparents(self, p1, p2=nullid):
839 copies = self.dirstate.setparents(p1, p2)
850 copies = self.dirstate.setparents(p1, p2)
840 if copies:
851 if copies:
841 # Adjust copy records, the dirstate cannot do it, it
852 # Adjust copy records, the dirstate cannot do it, it
842 # requires access to parents manifests. Preserve them
853 # requires access to parents manifests. Preserve them
843 # only for entries added to first parent.
854 # only for entries added to first parent.
844 pctx = self[p1]
855 pctx = self[p1]
845 for f in copies:
856 for f in copies:
846 if f not in pctx and copies[f] in pctx:
857 if f not in pctx and copies[f] in pctx:
847 self.dirstate.copy(copies[f], f)
858 self.dirstate.copy(copies[f], f)
848
859
849 def filectx(self, path, changeid=None, fileid=None):
860 def filectx(self, path, changeid=None, fileid=None):
850 """changeid can be a changeset revision, node, or tag.
861 """changeid can be a changeset revision, node, or tag.
851 fileid can be a file revision or node."""
862 fileid can be a file revision or node."""
852 return context.filectx(self, path, changeid, fileid)
863 return context.filectx(self, path, changeid, fileid)
853
864
854 def getcwd(self):
865 def getcwd(self):
855 return self.dirstate.getcwd()
866 return self.dirstate.getcwd()
856
867
857 def pathto(self, f, cwd=None):
868 def pathto(self, f, cwd=None):
858 return self.dirstate.pathto(f, cwd)
869 return self.dirstate.pathto(f, cwd)
859
870
860 def wfile(self, f, mode='r'):
871 def wfile(self, f, mode='r'):
861 return self.wopener(f, mode)
872 return self.wopener(f, mode)
862
873
863 def _link(self, f):
874 def _link(self, f):
864 return os.path.islink(self.wjoin(f))
875 return os.path.islink(self.wjoin(f))
865
876
866 def _loadfilter(self, filter):
877 def _loadfilter(self, filter):
867 if filter not in self.filterpats:
878 if filter not in self.filterpats:
868 l = []
879 l = []
869 for pat, cmd in self.ui.configitems(filter):
880 for pat, cmd in self.ui.configitems(filter):
870 if cmd == '!':
881 if cmd == '!':
871 continue
882 continue
872 mf = matchmod.match(self.root, '', [pat])
883 mf = matchmod.match(self.root, '', [pat])
873 fn = None
884 fn = None
874 params = cmd
885 params = cmd
875 for name, filterfn in self._datafilters.iteritems():
886 for name, filterfn in self._datafilters.iteritems():
876 if cmd.startswith(name):
887 if cmd.startswith(name):
877 fn = filterfn
888 fn = filterfn
878 params = cmd[len(name):].lstrip()
889 params = cmd[len(name):].lstrip()
879 break
890 break
880 if not fn:
891 if not fn:
881 fn = lambda s, c, **kwargs: util.filter(s, c)
892 fn = lambda s, c, **kwargs: util.filter(s, c)
882 # Wrap old filters not supporting keyword arguments
893 # Wrap old filters not supporting keyword arguments
883 if not inspect.getargspec(fn)[2]:
894 if not inspect.getargspec(fn)[2]:
884 oldfn = fn
895 oldfn = fn
885 fn = lambda s, c, **kwargs: oldfn(s, c)
896 fn = lambda s, c, **kwargs: oldfn(s, c)
886 l.append((mf, fn, params))
897 l.append((mf, fn, params))
887 self.filterpats[filter] = l
898 self.filterpats[filter] = l
888 return self.filterpats[filter]
899 return self.filterpats[filter]
889
900
890 def _filter(self, filterpats, filename, data):
901 def _filter(self, filterpats, filename, data):
891 for mf, fn, cmd in filterpats:
902 for mf, fn, cmd in filterpats:
892 if mf(filename):
903 if mf(filename):
893 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
904 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
905 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 break
906 break
896
907
897 return data
908 return data
898
909
899 @unfilteredpropertycache
910 @unfilteredpropertycache
900 def _encodefilterpats(self):
911 def _encodefilterpats(self):
901 return self._loadfilter('encode')
912 return self._loadfilter('encode')
902
913
903 @unfilteredpropertycache
914 @unfilteredpropertycache
904 def _decodefilterpats(self):
915 def _decodefilterpats(self):
905 return self._loadfilter('decode')
916 return self._loadfilter('decode')
906
917
907 def adddatafilter(self, name, filter):
918 def adddatafilter(self, name, filter):
908 self._datafilters[name] = filter
919 self._datafilters[name] = filter
909
920
910 def wread(self, filename):
921 def wread(self, filename):
911 if self._link(filename):
922 if self._link(filename):
912 data = os.readlink(self.wjoin(filename))
923 data = os.readlink(self.wjoin(filename))
913 else:
924 else:
914 data = self.wopener.read(filename)
925 data = self.wopener.read(filename)
915 return self._filter(self._encodefilterpats, filename, data)
926 return self._filter(self._encodefilterpats, filename, data)
916
927
917 def wwrite(self, filename, data, flags):
928 def wwrite(self, filename, data, flags):
918 data = self._filter(self._decodefilterpats, filename, data)
929 data = self._filter(self._decodefilterpats, filename, data)
919 if 'l' in flags:
930 if 'l' in flags:
920 self.wopener.symlink(data, filename)
931 self.wopener.symlink(data, filename)
921 else:
932 else:
922 self.wopener.write(filename, data)
933 self.wopener.write(filename, data)
923 if 'x' in flags:
934 if 'x' in flags:
924 util.setflags(self.wjoin(filename), False, True)
935 util.setflags(self.wjoin(filename), False, True)
925
936
926 def wwritedata(self, filename, data):
937 def wwritedata(self, filename, data):
927 return self._filter(self._decodefilterpats, filename, data)
938 return self._filter(self._decodefilterpats, filename, data)
928
939
929 def transaction(self, desc):
940 def transaction(self, desc):
930 tr = self._transref and self._transref() or None
941 tr = self._transref and self._transref() or None
931 if tr and tr.running():
942 if tr and tr.running():
932 return tr.nest()
943 return tr.nest()
933
944
934 # abort here if the journal already exists
945 # abort here if the journal already exists
935 if os.path.exists(self.sjoin("journal")):
946 if os.path.exists(self.sjoin("journal")):
936 raise error.RepoError(
947 raise error.RepoError(
937 _("abandoned transaction found - run hg recover"))
948 _("abandoned transaction found - run hg recover"))
938
949
939 self._writejournal(desc)
950 self._writejournal(desc)
940 renames = [(x, undoname(x)) for x in self._journalfiles()]
951 renames = [(x, undoname(x)) for x in self._journalfiles()]
941
952
942 tr = transaction.transaction(self.ui.warn, self.sopener,
953 tr = transaction.transaction(self.ui.warn, self.sopener,
943 self.sjoin("journal"),
954 self.sjoin("journal"),
944 aftertrans(renames),
955 aftertrans(renames),
945 self.store.createmode)
956 self.store.createmode)
946 self._transref = weakref.ref(tr)
957 self._transref = weakref.ref(tr)
947 return tr
958 return tr
948
959
949 def _journalfiles(self):
960 def _journalfiles(self):
950 return (self.sjoin('journal'), self.join('journal.dirstate'),
961 return (self.sjoin('journal'), self.join('journal.dirstate'),
951 self.join('journal.branch'), self.join('journal.desc'),
962 self.join('journal.branch'), self.join('journal.desc'),
952 self.join('journal.bookmarks'),
963 self.join('journal.bookmarks'),
953 self.sjoin('journal.phaseroots'))
964 self.sjoin('journal.phaseroots'))
954
965
955 def undofiles(self):
966 def undofiles(self):
956 return [undoname(x) for x in self._journalfiles()]
967 return [undoname(x) for x in self._journalfiles()]
957
968
958 def _writejournal(self, desc):
969 def _writejournal(self, desc):
959 self.opener.write("journal.dirstate",
970 self.opener.write("journal.dirstate",
960 self.opener.tryread("dirstate"))
971 self.opener.tryread("dirstate"))
961 self.opener.write("journal.branch",
972 self.opener.write("journal.branch",
962 encoding.fromlocal(self.dirstate.branch()))
973 encoding.fromlocal(self.dirstate.branch()))
963 self.opener.write("journal.desc",
974 self.opener.write("journal.desc",
964 "%d\n%s\n" % (len(self), desc))
975 "%d\n%s\n" % (len(self), desc))
965 self.opener.write("journal.bookmarks",
976 self.opener.write("journal.bookmarks",
966 self.opener.tryread("bookmarks"))
977 self.opener.tryread("bookmarks"))
967 self.sopener.write("journal.phaseroots",
978 self.sopener.write("journal.phaseroots",
968 self.sopener.tryread("phaseroots"))
979 self.sopener.tryread("phaseroots"))
969
980
970 def recover(self):
981 def recover(self):
971 lock = self.lock()
982 lock = self.lock()
972 try:
983 try:
973 if os.path.exists(self.sjoin("journal")):
984 if os.path.exists(self.sjoin("journal")):
974 self.ui.status(_("rolling back interrupted transaction\n"))
985 self.ui.status(_("rolling back interrupted transaction\n"))
975 transaction.rollback(self.sopener, self.sjoin("journal"),
986 transaction.rollback(self.sopener, self.sjoin("journal"),
976 self.ui.warn)
987 self.ui.warn)
977 self.invalidate()
988 self.invalidate()
978 return True
989 return True
979 else:
990 else:
980 self.ui.warn(_("no interrupted transaction available\n"))
991 self.ui.warn(_("no interrupted transaction available\n"))
981 return False
992 return False
982 finally:
993 finally:
983 lock.release()
994 lock.release()
984
995
985 def rollback(self, dryrun=False, force=False):
996 def rollback(self, dryrun=False, force=False):
986 wlock = lock = None
997 wlock = lock = None
987 try:
998 try:
988 wlock = self.wlock()
999 wlock = self.wlock()
989 lock = self.lock()
1000 lock = self.lock()
990 if os.path.exists(self.sjoin("undo")):
1001 if os.path.exists(self.sjoin("undo")):
991 return self._rollback(dryrun, force)
1002 return self._rollback(dryrun, force)
992 else:
1003 else:
993 self.ui.warn(_("no rollback information available\n"))
1004 self.ui.warn(_("no rollback information available\n"))
994 return 1
1005 return 1
995 finally:
1006 finally:
996 release(lock, wlock)
1007 release(lock, wlock)
997
1008
998 @unfilteredmeth # Until we get smarter cache management
1009 @unfilteredmeth # Until we get smarter cache management
999 def _rollback(self, dryrun, force):
1010 def _rollback(self, dryrun, force):
1000 ui = self.ui
1011 ui = self.ui
1001 try:
1012 try:
1002 args = self.opener.read('undo.desc').splitlines()
1013 args = self.opener.read('undo.desc').splitlines()
1003 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1014 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1004 if len(args) >= 3:
1015 if len(args) >= 3:
1005 detail = args[2]
1016 detail = args[2]
1006 oldtip = oldlen - 1
1017 oldtip = oldlen - 1
1007
1018
1008 if detail and ui.verbose:
1019 if detail and ui.verbose:
1009 msg = (_('repository tip rolled back to revision %s'
1020 msg = (_('repository tip rolled back to revision %s'
1010 ' (undo %s: %s)\n')
1021 ' (undo %s: %s)\n')
1011 % (oldtip, desc, detail))
1022 % (oldtip, desc, detail))
1012 else:
1023 else:
1013 msg = (_('repository tip rolled back to revision %s'
1024 msg = (_('repository tip rolled back to revision %s'
1014 ' (undo %s)\n')
1025 ' (undo %s)\n')
1015 % (oldtip, desc))
1026 % (oldtip, desc))
1016 except IOError:
1027 except IOError:
1017 msg = _('rolling back unknown transaction\n')
1028 msg = _('rolling back unknown transaction\n')
1018 desc = None
1029 desc = None
1019
1030
1020 if not force and self['.'] != self['tip'] and desc == 'commit':
1031 if not force and self['.'] != self['tip'] and desc == 'commit':
1021 raise util.Abort(
1032 raise util.Abort(
1022 _('rollback of last commit while not checked out '
1033 _('rollback of last commit while not checked out '
1023 'may lose data'), hint=_('use -f to force'))
1034 'may lose data'), hint=_('use -f to force'))
1024
1035
1025 ui.status(msg)
1036 ui.status(msg)
1026 if dryrun:
1037 if dryrun:
1027 return 0
1038 return 0
1028
1039
1029 parents = self.dirstate.parents()
1040 parents = self.dirstate.parents()
1030 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1041 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1031 if os.path.exists(self.join('undo.bookmarks')):
1042 if os.path.exists(self.join('undo.bookmarks')):
1032 util.rename(self.join('undo.bookmarks'),
1043 util.rename(self.join('undo.bookmarks'),
1033 self.join('bookmarks'))
1044 self.join('bookmarks'))
1034 if os.path.exists(self.sjoin('undo.phaseroots')):
1045 if os.path.exists(self.sjoin('undo.phaseroots')):
1035 util.rename(self.sjoin('undo.phaseroots'),
1046 util.rename(self.sjoin('undo.phaseroots'),
1036 self.sjoin('phaseroots'))
1047 self.sjoin('phaseroots'))
1037 self.invalidate()
1048 self.invalidate()
1038
1049
1039 # Discard all cache entries to force reloading everything.
1050 # Discard all cache entries to force reloading everything.
1040 self._filecache.clear()
1051 self._filecache.clear()
1041
1052
1042 parentgone = (parents[0] not in self.changelog.nodemap or
1053 parentgone = (parents[0] not in self.changelog.nodemap or
1043 parents[1] not in self.changelog.nodemap)
1054 parents[1] not in self.changelog.nodemap)
1044 if parentgone:
1055 if parentgone:
1045 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1056 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1046 try:
1057 try:
1047 branch = self.opener.read('undo.branch')
1058 branch = self.opener.read('undo.branch')
1048 self.dirstate.setbranch(encoding.tolocal(branch))
1059 self.dirstate.setbranch(encoding.tolocal(branch))
1049 except IOError:
1060 except IOError:
1050 ui.warn(_('named branch could not be reset: '
1061 ui.warn(_('named branch could not be reset: '
1051 'current branch is still \'%s\'\n')
1062 'current branch is still \'%s\'\n')
1052 % self.dirstate.branch())
1063 % self.dirstate.branch())
1053
1064
1054 self.dirstate.invalidate()
1065 self.dirstate.invalidate()
1055 parents = tuple([p.rev() for p in self.parents()])
1066 parents = tuple([p.rev() for p in self.parents()])
1056 if len(parents) > 1:
1067 if len(parents) > 1:
1057 ui.status(_('working directory now based on '
1068 ui.status(_('working directory now based on '
1058 'revisions %d and %d\n') % parents)
1069 'revisions %d and %d\n') % parents)
1059 else:
1070 else:
1060 ui.status(_('working directory now based on '
1071 ui.status(_('working directory now based on '
1061 'revision %d\n') % parents)
1072 'revision %d\n') % parents)
1062 # TODO: if we know which new heads may result from this rollback, pass
1073 # TODO: if we know which new heads may result from this rollback, pass
1063 # them to destroy(), which will prevent the branchhead cache from being
1074 # them to destroy(), which will prevent the branchhead cache from being
1064 # invalidated.
1075 # invalidated.
1065 self.destroyed()
1076 self.destroyed()
1066 return 0
1077 return 0
1067
1078
1068 def invalidatecaches(self):
1079 def invalidatecaches(self):
1069
1080
1070 if '_tagscache' in vars(self):
1081 if '_tagscache' in vars(self):
1071 # can't use delattr on proxy
1082 # can't use delattr on proxy
1072 del self.__dict__['_tagscache']
1083 del self.__dict__['_tagscache']
1073
1084
1074 self.unfiltered()._branchcache = None # in UTF-8
1085 self.unfiltered()._branchcache = None # in UTF-8
1075 self.unfiltered()._branchcachetip = None
1086 self.unfiltered()._branchcachetip = None
1076 obsolete.clearobscaches(self)
1087 obsolete.clearobscaches(self)
1077
1088
1078 def invalidatedirstate(self):
1089 def invalidatedirstate(self):
1079 '''Invalidates the dirstate, causing the next call to dirstate
1090 '''Invalidates the dirstate, causing the next call to dirstate
1080 to check if it was modified since the last time it was read,
1091 to check if it was modified since the last time it was read,
1081 rereading it if it has.
1092 rereading it if it has.
1082
1093
1083 This is different to dirstate.invalidate() that it doesn't always
1094 This is different to dirstate.invalidate() that it doesn't always
1084 rereads the dirstate. Use dirstate.invalidate() if you want to
1095 rereads the dirstate. Use dirstate.invalidate() if you want to
1085 explicitly read the dirstate again (i.e. restoring it to a previous
1096 explicitly read the dirstate again (i.e. restoring it to a previous
1086 known good state).'''
1097 known good state).'''
1087 if hasunfilteredcache(self, 'dirstate'):
1098 if hasunfilteredcache(self, 'dirstate'):
1088 for k in self.dirstate._filecache:
1099 for k in self.dirstate._filecache:
1089 try:
1100 try:
1090 delattr(self.dirstate, k)
1101 delattr(self.dirstate, k)
1091 except AttributeError:
1102 except AttributeError:
1092 pass
1103 pass
1093 delattr(self.unfiltered(), 'dirstate')
1104 delattr(self.unfiltered(), 'dirstate')
1094
1105
1095 def invalidate(self):
1106 def invalidate(self):
1096 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1107 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1097 for k in self._filecache:
1108 for k in self._filecache:
1098 # dirstate is invalidated separately in invalidatedirstate()
1109 # dirstate is invalidated separately in invalidatedirstate()
1099 if k == 'dirstate':
1110 if k == 'dirstate':
1100 continue
1111 continue
1101
1112
1102 try:
1113 try:
1103 delattr(unfiltered, k)
1114 delattr(unfiltered, k)
1104 except AttributeError:
1115 except AttributeError:
1105 pass
1116 pass
1106 self.invalidatecaches()
1117 self.invalidatecaches()
1107
1118
1108 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1119 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1109 try:
1120 try:
1110 l = lock.lock(lockname, 0, releasefn, desc=desc)
1121 l = lock.lock(lockname, 0, releasefn, desc=desc)
1111 except error.LockHeld, inst:
1122 except error.LockHeld, inst:
1112 if not wait:
1123 if not wait:
1113 raise
1124 raise
1114 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1125 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1115 (desc, inst.locker))
1126 (desc, inst.locker))
1116 # default to 600 seconds timeout
1127 # default to 600 seconds timeout
1117 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1128 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1118 releasefn, desc=desc)
1129 releasefn, desc=desc)
1119 if acquirefn:
1130 if acquirefn:
1120 acquirefn()
1131 acquirefn()
1121 return l
1132 return l
1122
1133
1123 def _afterlock(self, callback):
1134 def _afterlock(self, callback):
1124 """add a callback to the current repository lock.
1135 """add a callback to the current repository lock.
1125
1136
1126 The callback will be executed on lock release."""
1137 The callback will be executed on lock release."""
1127 l = self._lockref and self._lockref()
1138 l = self._lockref and self._lockref()
1128 if l:
1139 if l:
1129 l.postrelease.append(callback)
1140 l.postrelease.append(callback)
1130 else:
1141 else:
1131 callback()
1142 callback()
1132
1143
1133 def lock(self, wait=True):
1144 def lock(self, wait=True):
1134 '''Lock the repository store (.hg/store) and return a weak reference
1145 '''Lock the repository store (.hg/store) and return a weak reference
1135 to the lock. Use this before modifying the store (e.g. committing or
1146 to the lock. Use this before modifying the store (e.g. committing or
1136 stripping). If you are opening a transaction, get a lock as well.)'''
1147 stripping). If you are opening a transaction, get a lock as well.)'''
1137 l = self._lockref and self._lockref()
1148 l = self._lockref and self._lockref()
1138 if l is not None and l.held:
1149 if l is not None and l.held:
1139 l.lock()
1150 l.lock()
1140 return l
1151 return l
1141
1152
1142 def unlock():
1153 def unlock():
1143 self.store.write()
1154 self.store.write()
1144 if hasunfilteredcache(self, '_phasecache'):
1155 if hasunfilteredcache(self, '_phasecache'):
1145 self._phasecache.write()
1156 self._phasecache.write()
1146 for k, ce in self._filecache.items():
1157 for k, ce in self._filecache.items():
1147 if k == 'dirstate':
1158 if k == 'dirstate':
1148 continue
1159 continue
1149 ce.refresh()
1160 ce.refresh()
1150
1161
1151 l = self._lock(self.sjoin("lock"), wait, unlock,
1162 l = self._lock(self.sjoin("lock"), wait, unlock,
1152 self.invalidate, _('repository %s') % self.origroot)
1163 self.invalidate, _('repository %s') % self.origroot)
1153 self._lockref = weakref.ref(l)
1164 self._lockref = weakref.ref(l)
1154 return l
1165 return l
1155
1166
1156 def wlock(self, wait=True):
1167 def wlock(self, wait=True):
1157 '''Lock the non-store parts of the repository (everything under
1168 '''Lock the non-store parts of the repository (everything under
1158 .hg except .hg/store) and return a weak reference to the lock.
1169 .hg except .hg/store) and return a weak reference to the lock.
1159 Use this before modifying files in .hg.'''
1170 Use this before modifying files in .hg.'''
1160 l = self._wlockref and self._wlockref()
1171 l = self._wlockref and self._wlockref()
1161 if l is not None and l.held:
1172 if l is not None and l.held:
1162 l.lock()
1173 l.lock()
1163 return l
1174 return l
1164
1175
1165 def unlock():
1176 def unlock():
1166 self.dirstate.write()
1177 self.dirstate.write()
1167 ce = self._filecache.get('dirstate')
1178 ce = self._filecache.get('dirstate')
1168 if ce:
1179 if ce:
1169 ce.refresh()
1180 ce.refresh()
1170
1181
1171 l = self._lock(self.join("wlock"), wait, unlock,
1182 l = self._lock(self.join("wlock"), wait, unlock,
1172 self.invalidatedirstate, _('working directory of %s') %
1183 self.invalidatedirstate, _('working directory of %s') %
1173 self.origroot)
1184 self.origroot)
1174 self._wlockref = weakref.ref(l)
1185 self._wlockref = weakref.ref(l)
1175 return l
1186 return l
1176
1187
1177 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1188 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1178 """
1189 """
1179 commit an individual file as part of a larger transaction
1190 commit an individual file as part of a larger transaction
1180 """
1191 """
1181
1192
1182 fname = fctx.path()
1193 fname = fctx.path()
1183 text = fctx.data()
1194 text = fctx.data()
1184 flog = self.file(fname)
1195 flog = self.file(fname)
1185 fparent1 = manifest1.get(fname, nullid)
1196 fparent1 = manifest1.get(fname, nullid)
1186 fparent2 = fparent2o = manifest2.get(fname, nullid)
1197 fparent2 = fparent2o = manifest2.get(fname, nullid)
1187
1198
1188 meta = {}
1199 meta = {}
1189 copy = fctx.renamed()
1200 copy = fctx.renamed()
1190 if copy and copy[0] != fname:
1201 if copy and copy[0] != fname:
1191 # Mark the new revision of this file as a copy of another
1202 # Mark the new revision of this file as a copy of another
1192 # file. This copy data will effectively act as a parent
1203 # file. This copy data will effectively act as a parent
1193 # of this new revision. If this is a merge, the first
1204 # of this new revision. If this is a merge, the first
1194 # parent will be the nullid (meaning "look up the copy data")
1205 # parent will be the nullid (meaning "look up the copy data")
1195 # and the second one will be the other parent. For example:
1206 # and the second one will be the other parent. For example:
1196 #
1207 #
1197 # 0 --- 1 --- 3 rev1 changes file foo
1208 # 0 --- 1 --- 3 rev1 changes file foo
1198 # \ / rev2 renames foo to bar and changes it
1209 # \ / rev2 renames foo to bar and changes it
1199 # \- 2 -/ rev3 should have bar with all changes and
1210 # \- 2 -/ rev3 should have bar with all changes and
1200 # should record that bar descends from
1211 # should record that bar descends from
1201 # bar in rev2 and foo in rev1
1212 # bar in rev2 and foo in rev1
1202 #
1213 #
1203 # this allows this merge to succeed:
1214 # this allows this merge to succeed:
1204 #
1215 #
1205 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1216 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1206 # \ / merging rev3 and rev4 should use bar@rev2
1217 # \ / merging rev3 and rev4 should use bar@rev2
1207 # \- 2 --- 4 as the merge base
1218 # \- 2 --- 4 as the merge base
1208 #
1219 #
1209
1220
1210 cfname = copy[0]
1221 cfname = copy[0]
1211 crev = manifest1.get(cfname)
1222 crev = manifest1.get(cfname)
1212 newfparent = fparent2
1223 newfparent = fparent2
1213
1224
1214 if manifest2: # branch merge
1225 if manifest2: # branch merge
1215 if fparent2 == nullid or crev is None: # copied on remote side
1226 if fparent2 == nullid or crev is None: # copied on remote side
1216 if cfname in manifest2:
1227 if cfname in manifest2:
1217 crev = manifest2[cfname]
1228 crev = manifest2[cfname]
1218 newfparent = fparent1
1229 newfparent = fparent1
1219
1230
1220 # find source in nearest ancestor if we've lost track
1231 # find source in nearest ancestor if we've lost track
1221 if not crev:
1232 if not crev:
1222 self.ui.debug(" %s: searching for copy revision for %s\n" %
1233 self.ui.debug(" %s: searching for copy revision for %s\n" %
1223 (fname, cfname))
1234 (fname, cfname))
1224 for ancestor in self[None].ancestors():
1235 for ancestor in self[None].ancestors():
1225 if cfname in ancestor:
1236 if cfname in ancestor:
1226 crev = ancestor[cfname].filenode()
1237 crev = ancestor[cfname].filenode()
1227 break
1238 break
1228
1239
1229 if crev:
1240 if crev:
1230 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1241 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1231 meta["copy"] = cfname
1242 meta["copy"] = cfname
1232 meta["copyrev"] = hex(crev)
1243 meta["copyrev"] = hex(crev)
1233 fparent1, fparent2 = nullid, newfparent
1244 fparent1, fparent2 = nullid, newfparent
1234 else:
1245 else:
1235 self.ui.warn(_("warning: can't find ancestor for '%s' "
1246 self.ui.warn(_("warning: can't find ancestor for '%s' "
1236 "copied from '%s'!\n") % (fname, cfname))
1247 "copied from '%s'!\n") % (fname, cfname))
1237
1248
1238 elif fparent2 != nullid:
1249 elif fparent2 != nullid:
1239 # is one parent an ancestor of the other?
1250 # is one parent an ancestor of the other?
1240 fparentancestor = flog.ancestor(fparent1, fparent2)
1251 fparentancestor = flog.ancestor(fparent1, fparent2)
1241 if fparentancestor == fparent1:
1252 if fparentancestor == fparent1:
1242 fparent1, fparent2 = fparent2, nullid
1253 fparent1, fparent2 = fparent2, nullid
1243 elif fparentancestor == fparent2:
1254 elif fparentancestor == fparent2:
1244 fparent2 = nullid
1255 fparent2 = nullid
1245
1256
1246 # is the file changed?
1257 # is the file changed?
1247 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1258 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1248 changelist.append(fname)
1259 changelist.append(fname)
1249 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1260 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1250
1261
1251 # are just the flags changed during merge?
1262 # are just the flags changed during merge?
1252 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1263 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1253 changelist.append(fname)
1264 changelist.append(fname)
1254
1265
1255 return fparent1
1266 return fparent1
1256
1267
1257 @unfilteredmeth
1268 @unfilteredmeth
1258 def commit(self, text="", user=None, date=None, match=None, force=False,
1269 def commit(self, text="", user=None, date=None, match=None, force=False,
1259 editor=False, extra={}):
1270 editor=False, extra={}):
1260 """Add a new revision to current repository.
1271 """Add a new revision to current repository.
1261
1272
1262 Revision information is gathered from the working directory,
1273 Revision information is gathered from the working directory,
1263 match can be used to filter the committed files. If editor is
1274 match can be used to filter the committed files. If editor is
1264 supplied, it is called to get a commit message.
1275 supplied, it is called to get a commit message.
1265 """
1276 """
1266
1277
1267 def fail(f, msg):
1278 def fail(f, msg):
1268 raise util.Abort('%s: %s' % (f, msg))
1279 raise util.Abort('%s: %s' % (f, msg))
1269
1280
1270 if not match:
1281 if not match:
1271 match = matchmod.always(self.root, '')
1282 match = matchmod.always(self.root, '')
1272
1283
1273 if not force:
1284 if not force:
1274 vdirs = []
1285 vdirs = []
1275 match.dir = vdirs.append
1286 match.dir = vdirs.append
1276 match.bad = fail
1287 match.bad = fail
1277
1288
1278 wlock = self.wlock()
1289 wlock = self.wlock()
1279 try:
1290 try:
1280 wctx = self[None]
1291 wctx = self[None]
1281 merge = len(wctx.parents()) > 1
1292 merge = len(wctx.parents()) > 1
1282
1293
1283 if (not force and merge and match and
1294 if (not force and merge and match and
1284 (match.files() or match.anypats())):
1295 (match.files() or match.anypats())):
1285 raise util.Abort(_('cannot partially commit a merge '
1296 raise util.Abort(_('cannot partially commit a merge '
1286 '(do not specify files or patterns)'))
1297 '(do not specify files or patterns)'))
1287
1298
1288 changes = self.status(match=match, clean=force)
1299 changes = self.status(match=match, clean=force)
1289 if force:
1300 if force:
1290 changes[0].extend(changes[6]) # mq may commit unchanged files
1301 changes[0].extend(changes[6]) # mq may commit unchanged files
1291
1302
1292 # check subrepos
1303 # check subrepos
1293 subs = []
1304 subs = []
1294 commitsubs = set()
1305 commitsubs = set()
1295 newstate = wctx.substate.copy()
1306 newstate = wctx.substate.copy()
1296 # only manage subrepos and .hgsubstate if .hgsub is present
1307 # only manage subrepos and .hgsubstate if .hgsub is present
1297 if '.hgsub' in wctx:
1308 if '.hgsub' in wctx:
1298 # we'll decide whether to track this ourselves, thanks
1309 # we'll decide whether to track this ourselves, thanks
1299 if '.hgsubstate' in changes[0]:
1310 if '.hgsubstate' in changes[0]:
1300 changes[0].remove('.hgsubstate')
1311 changes[0].remove('.hgsubstate')
1301 if '.hgsubstate' in changes[2]:
1312 if '.hgsubstate' in changes[2]:
1302 changes[2].remove('.hgsubstate')
1313 changes[2].remove('.hgsubstate')
1303
1314
1304 # compare current state to last committed state
1315 # compare current state to last committed state
1305 # build new substate based on last committed state
1316 # build new substate based on last committed state
1306 oldstate = wctx.p1().substate
1317 oldstate = wctx.p1().substate
1307 for s in sorted(newstate.keys()):
1318 for s in sorted(newstate.keys()):
1308 if not match(s):
1319 if not match(s):
1309 # ignore working copy, use old state if present
1320 # ignore working copy, use old state if present
1310 if s in oldstate:
1321 if s in oldstate:
1311 newstate[s] = oldstate[s]
1322 newstate[s] = oldstate[s]
1312 continue
1323 continue
1313 if not force:
1324 if not force:
1314 raise util.Abort(
1325 raise util.Abort(
1315 _("commit with new subrepo %s excluded") % s)
1326 _("commit with new subrepo %s excluded") % s)
1316 if wctx.sub(s).dirty(True):
1327 if wctx.sub(s).dirty(True):
1317 if not self.ui.configbool('ui', 'commitsubrepos'):
1328 if not self.ui.configbool('ui', 'commitsubrepos'):
1318 raise util.Abort(
1329 raise util.Abort(
1319 _("uncommitted changes in subrepo %s") % s,
1330 _("uncommitted changes in subrepo %s") % s,
1320 hint=_("use --subrepos for recursive commit"))
1331 hint=_("use --subrepos for recursive commit"))
1321 subs.append(s)
1332 subs.append(s)
1322 commitsubs.add(s)
1333 commitsubs.add(s)
1323 else:
1334 else:
1324 bs = wctx.sub(s).basestate()
1335 bs = wctx.sub(s).basestate()
1325 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1336 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1326 if oldstate.get(s, (None, None, None))[1] != bs:
1337 if oldstate.get(s, (None, None, None))[1] != bs:
1327 subs.append(s)
1338 subs.append(s)
1328
1339
1329 # check for removed subrepos
1340 # check for removed subrepos
1330 for p in wctx.parents():
1341 for p in wctx.parents():
1331 r = [s for s in p.substate if s not in newstate]
1342 r = [s for s in p.substate if s not in newstate]
1332 subs += [s for s in r if match(s)]
1343 subs += [s for s in r if match(s)]
1333 if subs:
1344 if subs:
1334 if (not match('.hgsub') and
1345 if (not match('.hgsub') and
1335 '.hgsub' in (wctx.modified() + wctx.added())):
1346 '.hgsub' in (wctx.modified() + wctx.added())):
1336 raise util.Abort(
1347 raise util.Abort(
1337 _("can't commit subrepos without .hgsub"))
1348 _("can't commit subrepos without .hgsub"))
1338 changes[0].insert(0, '.hgsubstate')
1349 changes[0].insert(0, '.hgsubstate')
1339
1350
1340 elif '.hgsub' in changes[2]:
1351 elif '.hgsub' in changes[2]:
1341 # clean up .hgsubstate when .hgsub is removed
1352 # clean up .hgsubstate when .hgsub is removed
1342 if ('.hgsubstate' in wctx and
1353 if ('.hgsubstate' in wctx and
1343 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1354 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1344 changes[2].insert(0, '.hgsubstate')
1355 changes[2].insert(0, '.hgsubstate')
1345
1356
1346 # make sure all explicit patterns are matched
1357 # make sure all explicit patterns are matched
1347 if not force and match.files():
1358 if not force and match.files():
1348 matched = set(changes[0] + changes[1] + changes[2])
1359 matched = set(changes[0] + changes[1] + changes[2])
1349
1360
1350 for f in match.files():
1361 for f in match.files():
1351 f = self.dirstate.normalize(f)
1362 f = self.dirstate.normalize(f)
1352 if f == '.' or f in matched or f in wctx.substate:
1363 if f == '.' or f in matched or f in wctx.substate:
1353 continue
1364 continue
1354 if f in changes[3]: # missing
1365 if f in changes[3]: # missing
1355 fail(f, _('file not found!'))
1366 fail(f, _('file not found!'))
1356 if f in vdirs: # visited directory
1367 if f in vdirs: # visited directory
1357 d = f + '/'
1368 d = f + '/'
1358 for mf in matched:
1369 for mf in matched:
1359 if mf.startswith(d):
1370 if mf.startswith(d):
1360 break
1371 break
1361 else:
1372 else:
1362 fail(f, _("no match under directory!"))
1373 fail(f, _("no match under directory!"))
1363 elif f not in self.dirstate:
1374 elif f not in self.dirstate:
1364 fail(f, _("file not tracked!"))
1375 fail(f, _("file not tracked!"))
1365
1376
1366 if (not force and not extra.get("close") and not merge
1377 if (not force and not extra.get("close") and not merge
1367 and not (changes[0] or changes[1] or changes[2])
1378 and not (changes[0] or changes[1] or changes[2])
1368 and wctx.branch() == wctx.p1().branch()):
1379 and wctx.branch() == wctx.p1().branch()):
1369 return None
1380 return None
1370
1381
1371 if merge and changes[3]:
1382 if merge and changes[3]:
1372 raise util.Abort(_("cannot commit merge with missing files"))
1383 raise util.Abort(_("cannot commit merge with missing files"))
1373
1384
1374 ms = mergemod.mergestate(self)
1385 ms = mergemod.mergestate(self)
1375 for f in changes[0]:
1386 for f in changes[0]:
1376 if f in ms and ms[f] == 'u':
1387 if f in ms and ms[f] == 'u':
1377 raise util.Abort(_("unresolved merge conflicts "
1388 raise util.Abort(_("unresolved merge conflicts "
1378 "(see hg help resolve)"))
1389 "(see hg help resolve)"))
1379
1390
1380 cctx = context.workingctx(self, text, user, date, extra, changes)
1391 cctx = context.workingctx(self, text, user, date, extra, changes)
1381 if editor:
1392 if editor:
1382 cctx._text = editor(self, cctx, subs)
1393 cctx._text = editor(self, cctx, subs)
1383 edited = (text != cctx._text)
1394 edited = (text != cctx._text)
1384
1395
1385 # commit subs and write new state
1396 # commit subs and write new state
1386 if subs:
1397 if subs:
1387 for s in sorted(commitsubs):
1398 for s in sorted(commitsubs):
1388 sub = wctx.sub(s)
1399 sub = wctx.sub(s)
1389 self.ui.status(_('committing subrepository %s\n') %
1400 self.ui.status(_('committing subrepository %s\n') %
1390 subrepo.subrelpath(sub))
1401 subrepo.subrelpath(sub))
1391 sr = sub.commit(cctx._text, user, date)
1402 sr = sub.commit(cctx._text, user, date)
1392 newstate[s] = (newstate[s][0], sr)
1403 newstate[s] = (newstate[s][0], sr)
1393 subrepo.writestate(self, newstate)
1404 subrepo.writestate(self, newstate)
1394
1405
1395 # Save commit message in case this transaction gets rolled back
1406 # Save commit message in case this transaction gets rolled back
1396 # (e.g. by a pretxncommit hook). Leave the content alone on
1407 # (e.g. by a pretxncommit hook). Leave the content alone on
1397 # the assumption that the user will use the same editor again.
1408 # the assumption that the user will use the same editor again.
1398 msgfn = self.savecommitmessage(cctx._text)
1409 msgfn = self.savecommitmessage(cctx._text)
1399
1410
1400 p1, p2 = self.dirstate.parents()
1411 p1, p2 = self.dirstate.parents()
1401 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1412 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1402 try:
1413 try:
1403 self.hook("precommit", throw=True, parent1=hookp1,
1414 self.hook("precommit", throw=True, parent1=hookp1,
1404 parent2=hookp2)
1415 parent2=hookp2)
1405 ret = self.commitctx(cctx, True)
1416 ret = self.commitctx(cctx, True)
1406 except: # re-raises
1417 except: # re-raises
1407 if edited:
1418 if edited:
1408 self.ui.write(
1419 self.ui.write(
1409 _('note: commit message saved in %s\n') % msgfn)
1420 _('note: commit message saved in %s\n') % msgfn)
1410 raise
1421 raise
1411
1422
1412 # update bookmarks, dirstate and mergestate
1423 # update bookmarks, dirstate and mergestate
1413 bookmarks.update(self, [p1, p2], ret)
1424 bookmarks.update(self, [p1, p2], ret)
1414 for f in changes[0] + changes[1]:
1425 for f in changes[0] + changes[1]:
1415 self.dirstate.normal(f)
1426 self.dirstate.normal(f)
1416 for f in changes[2]:
1427 for f in changes[2]:
1417 self.dirstate.drop(f)
1428 self.dirstate.drop(f)
1418 self.dirstate.setparents(ret)
1429 self.dirstate.setparents(ret)
1419 ms.reset()
1430 ms.reset()
1420 finally:
1431 finally:
1421 wlock.release()
1432 wlock.release()
1422
1433
1423 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1434 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1424 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1435 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1425 self._afterlock(commithook)
1436 self._afterlock(commithook)
1426 return ret
1437 return ret
1427
1438
1428 @unfilteredmeth
1439 @unfilteredmeth
1429 def commitctx(self, ctx, error=False):
1440 def commitctx(self, ctx, error=False):
1430 """Add a new revision to current repository.
1441 """Add a new revision to current repository.
1431 Revision information is passed via the context argument.
1442 Revision information is passed via the context argument.
1432 """
1443 """
1433
1444
1434 tr = lock = None
1445 tr = lock = None
1435 removed = list(ctx.removed())
1446 removed = list(ctx.removed())
1436 p1, p2 = ctx.p1(), ctx.p2()
1447 p1, p2 = ctx.p1(), ctx.p2()
1437 user = ctx.user()
1448 user = ctx.user()
1438
1449
1439 lock = self.lock()
1450 lock = self.lock()
1440 try:
1451 try:
1441 tr = self.transaction("commit")
1452 tr = self.transaction("commit")
1442 trp = weakref.proxy(tr)
1453 trp = weakref.proxy(tr)
1443
1454
1444 if ctx.files():
1455 if ctx.files():
1445 m1 = p1.manifest().copy()
1456 m1 = p1.manifest().copy()
1446 m2 = p2.manifest()
1457 m2 = p2.manifest()
1447
1458
1448 # check in files
1459 # check in files
1449 new = {}
1460 new = {}
1450 changed = []
1461 changed = []
1451 linkrev = len(self)
1462 linkrev = len(self)
1452 for f in sorted(ctx.modified() + ctx.added()):
1463 for f in sorted(ctx.modified() + ctx.added()):
1453 self.ui.note(f + "\n")
1464 self.ui.note(f + "\n")
1454 try:
1465 try:
1455 fctx = ctx[f]
1466 fctx = ctx[f]
1456 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1467 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1457 changed)
1468 changed)
1458 m1.set(f, fctx.flags())
1469 m1.set(f, fctx.flags())
1459 except OSError, inst:
1470 except OSError, inst:
1460 self.ui.warn(_("trouble committing %s!\n") % f)
1471 self.ui.warn(_("trouble committing %s!\n") % f)
1461 raise
1472 raise
1462 except IOError, inst:
1473 except IOError, inst:
1463 errcode = getattr(inst, 'errno', errno.ENOENT)
1474 errcode = getattr(inst, 'errno', errno.ENOENT)
1464 if error or errcode and errcode != errno.ENOENT:
1475 if error or errcode and errcode != errno.ENOENT:
1465 self.ui.warn(_("trouble committing %s!\n") % f)
1476 self.ui.warn(_("trouble committing %s!\n") % f)
1466 raise
1477 raise
1467 else:
1478 else:
1468 removed.append(f)
1479 removed.append(f)
1469
1480
1470 # update manifest
1481 # update manifest
1471 m1.update(new)
1482 m1.update(new)
1472 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1483 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1473 drop = [f for f in removed if f in m1]
1484 drop = [f for f in removed if f in m1]
1474 for f in drop:
1485 for f in drop:
1475 del m1[f]
1486 del m1[f]
1476 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1487 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1477 p2.manifestnode(), (new, drop))
1488 p2.manifestnode(), (new, drop))
1478 files = changed + removed
1489 files = changed + removed
1479 else:
1490 else:
1480 mn = p1.manifestnode()
1491 mn = p1.manifestnode()
1481 files = []
1492 files = []
1482
1493
1483 # update changelog
1494 # update changelog
1484 self.changelog.delayupdate()
1495 self.changelog.delayupdate()
1485 n = self.changelog.add(mn, files, ctx.description(),
1496 n = self.changelog.add(mn, files, ctx.description(),
1486 trp, p1.node(), p2.node(),
1497 trp, p1.node(), p2.node(),
1487 user, ctx.date(), ctx.extra().copy())
1498 user, ctx.date(), ctx.extra().copy())
1488 p = lambda: self.changelog.writepending() and self.root or ""
1499 p = lambda: self.changelog.writepending() and self.root or ""
1489 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1500 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1490 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1501 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1491 parent2=xp2, pending=p)
1502 parent2=xp2, pending=p)
1492 self.changelog.finalize(trp)
1503 self.changelog.finalize(trp)
1493 # set the new commit is proper phase
1504 # set the new commit is proper phase
1494 targetphase = phases.newcommitphase(self.ui)
1505 targetphase = phases.newcommitphase(self.ui)
1495 if targetphase:
1506 if targetphase:
1496 # retract boundary do not alter parent changeset.
1507 # retract boundary do not alter parent changeset.
1497 # if a parent have higher the resulting phase will
1508 # if a parent have higher the resulting phase will
1498 # be compliant anyway
1509 # be compliant anyway
1499 #
1510 #
1500 # if minimal phase was 0 we don't need to retract anything
1511 # if minimal phase was 0 we don't need to retract anything
1501 phases.retractboundary(self, targetphase, [n])
1512 phases.retractboundary(self, targetphase, [n])
1502 tr.close()
1513 tr.close()
1503 self.updatebranchcache()
1514 self.updatebranchcache()
1504 return n
1515 return n
1505 finally:
1516 finally:
1506 if tr:
1517 if tr:
1507 tr.release()
1518 tr.release()
1508 lock.release()
1519 lock.release()
1509
1520
1510 @unfilteredmeth
1521 @unfilteredmeth
1511 def destroyed(self, newheadnodes=None):
1522 def destroyed(self, newheadnodes=None):
1512 '''Inform the repository that nodes have been destroyed.
1523 '''Inform the repository that nodes have been destroyed.
1513 Intended for use by strip and rollback, so there's a common
1524 Intended for use by strip and rollback, so there's a common
1514 place for anything that has to be done after destroying history.
1525 place for anything that has to be done after destroying history.
1515
1526
1516 If you know the branchheadcache was uptodate before nodes were removed
1527 If you know the branchheadcache was uptodate before nodes were removed
1517 and you also know the set of candidate new heads that may have resulted
1528 and you also know the set of candidate new heads that may have resulted
1518 from the destruction, you can set newheadnodes. This will enable the
1529 from the destruction, you can set newheadnodes. This will enable the
1519 code to update the branchheads cache, rather than having future code
1530 code to update the branchheads cache, rather than having future code
1520 decide it's invalid and regenerating it from scratch.
1531 decide it's invalid and regenerating it from scratch.
1521 '''
1532 '''
1522 # If we have info, newheadnodes, on how to update the branch cache, do
1533 # If we have info, newheadnodes, on how to update the branch cache, do
1523 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1534 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1524 # will be caught the next time it is read.
1535 # will be caught the next time it is read.
1525 if newheadnodes:
1536 if newheadnodes:
1526 tiprev = len(self) - 1
1537 tiprev = len(self) - 1
1527 ctxgen = (self[node] for node in newheadnodes
1538 ctxgen = (self[node] for node in newheadnodes
1528 if self.changelog.hasnode(node))
1539 if self.changelog.hasnode(node))
1529 self._updatebranchcache(self._branchcache, ctxgen)
1540 self._updatebranchcache(self._branchcache, ctxgen)
1530 self._writebranchcache(self._branchcache, self.changelog.tip(),
1541 self._writebranchcache(self._branchcache, self.changelog.tip(),
1531 tiprev)
1542 tiprev)
1532
1543
1533 # Ensure the persistent tag cache is updated. Doing it now
1544 # Ensure the persistent tag cache is updated. Doing it now
1534 # means that the tag cache only has to worry about destroyed
1545 # means that the tag cache only has to worry about destroyed
1535 # heads immediately after a strip/rollback. That in turn
1546 # heads immediately after a strip/rollback. That in turn
1536 # guarantees that "cachetip == currenttip" (comparing both rev
1547 # guarantees that "cachetip == currenttip" (comparing both rev
1537 # and node) always means no nodes have been added or destroyed.
1548 # and node) always means no nodes have been added or destroyed.
1538
1549
1539 # XXX this is suboptimal when qrefresh'ing: we strip the current
1550 # XXX this is suboptimal when qrefresh'ing: we strip the current
1540 # head, refresh the tag cache, then immediately add a new head.
1551 # head, refresh the tag cache, then immediately add a new head.
1541 # But I think doing it this way is necessary for the "instant
1552 # But I think doing it this way is necessary for the "instant
1542 # tag cache retrieval" case to work.
1553 # tag cache retrieval" case to work.
1543 self.invalidatecaches()
1554 self.invalidatecaches()
1544
1555
1545 # Discard all cache entries to force reloading everything.
1556 # Discard all cache entries to force reloading everything.
1546 self._filecache.clear()
1557 self._filecache.clear()
1547
1558
1548 def walk(self, match, node=None):
1559 def walk(self, match, node=None):
1549 '''
1560 '''
1550 walk recursively through the directory tree or a given
1561 walk recursively through the directory tree or a given
1551 changeset, finding all files matched by the match
1562 changeset, finding all files matched by the match
1552 function
1563 function
1553 '''
1564 '''
1554 return self[node].walk(match)
1565 return self[node].walk(match)
1555
1566
1556 def status(self, node1='.', node2=None, match=None,
1567 def status(self, node1='.', node2=None, match=None,
1557 ignored=False, clean=False, unknown=False,
1568 ignored=False, clean=False, unknown=False,
1558 listsubrepos=False):
1569 listsubrepos=False):
1559 """return status of files between two nodes or node and working
1570 """return status of files between two nodes or node and working
1560 directory.
1571 directory.
1561
1572
1562 If node1 is None, use the first dirstate parent instead.
1573 If node1 is None, use the first dirstate parent instead.
1563 If node2 is None, compare node1 with working directory.
1574 If node2 is None, compare node1 with working directory.
1564 """
1575 """
1565
1576
1566 def mfmatches(ctx):
1577 def mfmatches(ctx):
1567 mf = ctx.manifest().copy()
1578 mf = ctx.manifest().copy()
1568 if match.always():
1579 if match.always():
1569 return mf
1580 return mf
1570 for fn in mf.keys():
1581 for fn in mf.keys():
1571 if not match(fn):
1582 if not match(fn):
1572 del mf[fn]
1583 del mf[fn]
1573 return mf
1584 return mf
1574
1585
1575 if isinstance(node1, context.changectx):
1586 if isinstance(node1, context.changectx):
1576 ctx1 = node1
1587 ctx1 = node1
1577 else:
1588 else:
1578 ctx1 = self[node1]
1589 ctx1 = self[node1]
1579 if isinstance(node2, context.changectx):
1590 if isinstance(node2, context.changectx):
1580 ctx2 = node2
1591 ctx2 = node2
1581 else:
1592 else:
1582 ctx2 = self[node2]
1593 ctx2 = self[node2]
1583
1594
1584 working = ctx2.rev() is None
1595 working = ctx2.rev() is None
1585 parentworking = working and ctx1 == self['.']
1596 parentworking = working and ctx1 == self['.']
1586 match = match or matchmod.always(self.root, self.getcwd())
1597 match = match or matchmod.always(self.root, self.getcwd())
1587 listignored, listclean, listunknown = ignored, clean, unknown
1598 listignored, listclean, listunknown = ignored, clean, unknown
1588
1599
1589 # load earliest manifest first for caching reasons
1600 # load earliest manifest first for caching reasons
1590 if not working and ctx2.rev() < ctx1.rev():
1601 if not working and ctx2.rev() < ctx1.rev():
1591 ctx2.manifest()
1602 ctx2.manifest()
1592
1603
1593 if not parentworking:
1604 if not parentworking:
1594 def bad(f, msg):
1605 def bad(f, msg):
1595 # 'f' may be a directory pattern from 'match.files()',
1606 # 'f' may be a directory pattern from 'match.files()',
1596 # so 'f not in ctx1' is not enough
1607 # so 'f not in ctx1' is not enough
1597 if f not in ctx1 and f not in ctx1.dirs():
1608 if f not in ctx1 and f not in ctx1.dirs():
1598 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1609 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1599 match.bad = bad
1610 match.bad = bad
1600
1611
1601 if working: # we need to scan the working dir
1612 if working: # we need to scan the working dir
1602 subrepos = []
1613 subrepos = []
1603 if '.hgsub' in self.dirstate:
1614 if '.hgsub' in self.dirstate:
1604 subrepos = ctx2.substate.keys()
1615 subrepos = ctx2.substate.keys()
1605 s = self.dirstate.status(match, subrepos, listignored,
1616 s = self.dirstate.status(match, subrepos, listignored,
1606 listclean, listunknown)
1617 listclean, listunknown)
1607 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1618 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1608
1619
1609 # check for any possibly clean files
1620 # check for any possibly clean files
1610 if parentworking and cmp:
1621 if parentworking and cmp:
1611 fixup = []
1622 fixup = []
1612 # do a full compare of any files that might have changed
1623 # do a full compare of any files that might have changed
1613 for f in sorted(cmp):
1624 for f in sorted(cmp):
1614 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1625 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1615 or ctx1[f].cmp(ctx2[f])):
1626 or ctx1[f].cmp(ctx2[f])):
1616 modified.append(f)
1627 modified.append(f)
1617 else:
1628 else:
1618 fixup.append(f)
1629 fixup.append(f)
1619
1630
1620 # update dirstate for files that are actually clean
1631 # update dirstate for files that are actually clean
1621 if fixup:
1632 if fixup:
1622 if listclean:
1633 if listclean:
1623 clean += fixup
1634 clean += fixup
1624
1635
1625 try:
1636 try:
1626 # updating the dirstate is optional
1637 # updating the dirstate is optional
1627 # so we don't wait on the lock
1638 # so we don't wait on the lock
1628 wlock = self.wlock(False)
1639 wlock = self.wlock(False)
1629 try:
1640 try:
1630 for f in fixup:
1641 for f in fixup:
1631 self.dirstate.normal(f)
1642 self.dirstate.normal(f)
1632 finally:
1643 finally:
1633 wlock.release()
1644 wlock.release()
1634 except error.LockError:
1645 except error.LockError:
1635 pass
1646 pass
1636
1647
1637 if not parentworking:
1648 if not parentworking:
1638 mf1 = mfmatches(ctx1)
1649 mf1 = mfmatches(ctx1)
1639 if working:
1650 if working:
1640 # we are comparing working dir against non-parent
1651 # we are comparing working dir against non-parent
1641 # generate a pseudo-manifest for the working dir
1652 # generate a pseudo-manifest for the working dir
1642 mf2 = mfmatches(self['.'])
1653 mf2 = mfmatches(self['.'])
1643 for f in cmp + modified + added:
1654 for f in cmp + modified + added:
1644 mf2[f] = None
1655 mf2[f] = None
1645 mf2.set(f, ctx2.flags(f))
1656 mf2.set(f, ctx2.flags(f))
1646 for f in removed:
1657 for f in removed:
1647 if f in mf2:
1658 if f in mf2:
1648 del mf2[f]
1659 del mf2[f]
1649 else:
1660 else:
1650 # we are comparing two revisions
1661 # we are comparing two revisions
1651 deleted, unknown, ignored = [], [], []
1662 deleted, unknown, ignored = [], [], []
1652 mf2 = mfmatches(ctx2)
1663 mf2 = mfmatches(ctx2)
1653
1664
1654 modified, added, clean = [], [], []
1665 modified, added, clean = [], [], []
1655 withflags = mf1.withflags() | mf2.withflags()
1666 withflags = mf1.withflags() | mf2.withflags()
1656 for fn in mf2:
1667 for fn in mf2:
1657 if fn in mf1:
1668 if fn in mf1:
1658 if (fn not in deleted and
1669 if (fn not in deleted and
1659 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1670 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1660 (mf1[fn] != mf2[fn] and
1671 (mf1[fn] != mf2[fn] and
1661 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1672 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1662 modified.append(fn)
1673 modified.append(fn)
1663 elif listclean:
1674 elif listclean:
1664 clean.append(fn)
1675 clean.append(fn)
1665 del mf1[fn]
1676 del mf1[fn]
1666 elif fn not in deleted:
1677 elif fn not in deleted:
1667 added.append(fn)
1678 added.append(fn)
1668 removed = mf1.keys()
1679 removed = mf1.keys()
1669
1680
1670 if working and modified and not self.dirstate._checklink:
1681 if working and modified and not self.dirstate._checklink:
1671 # Symlink placeholders may get non-symlink-like contents
1682 # Symlink placeholders may get non-symlink-like contents
1672 # via user error or dereferencing by NFS or Samba servers,
1683 # via user error or dereferencing by NFS or Samba servers,
1673 # so we filter out any placeholders that don't look like a
1684 # so we filter out any placeholders that don't look like a
1674 # symlink
1685 # symlink
1675 sane = []
1686 sane = []
1676 for f in modified:
1687 for f in modified:
1677 if ctx2.flags(f) == 'l':
1688 if ctx2.flags(f) == 'l':
1678 d = ctx2[f].data()
1689 d = ctx2[f].data()
1679 if len(d) >= 1024 or '\n' in d or util.binary(d):
1690 if len(d) >= 1024 or '\n' in d or util.binary(d):
1680 self.ui.debug('ignoring suspect symlink placeholder'
1691 self.ui.debug('ignoring suspect symlink placeholder'
1681 ' "%s"\n' % f)
1692 ' "%s"\n' % f)
1682 continue
1693 continue
1683 sane.append(f)
1694 sane.append(f)
1684 modified = sane
1695 modified = sane
1685
1696
1686 r = modified, added, removed, deleted, unknown, ignored, clean
1697 r = modified, added, removed, deleted, unknown, ignored, clean
1687
1698
1688 if listsubrepos:
1699 if listsubrepos:
1689 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1700 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1690 if working:
1701 if working:
1691 rev2 = None
1702 rev2 = None
1692 else:
1703 else:
1693 rev2 = ctx2.substate[subpath][1]
1704 rev2 = ctx2.substate[subpath][1]
1694 try:
1705 try:
1695 submatch = matchmod.narrowmatcher(subpath, match)
1706 submatch = matchmod.narrowmatcher(subpath, match)
1696 s = sub.status(rev2, match=submatch, ignored=listignored,
1707 s = sub.status(rev2, match=submatch, ignored=listignored,
1697 clean=listclean, unknown=listunknown,
1708 clean=listclean, unknown=listunknown,
1698 listsubrepos=True)
1709 listsubrepos=True)
1699 for rfiles, sfiles in zip(r, s):
1710 for rfiles, sfiles in zip(r, s):
1700 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1711 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1701 except error.LookupError:
1712 except error.LookupError:
1702 self.ui.status(_("skipping missing subrepository: %s\n")
1713 self.ui.status(_("skipping missing subrepository: %s\n")
1703 % subpath)
1714 % subpath)
1704
1715
1705 for l in r:
1716 for l in r:
1706 l.sort()
1717 l.sort()
1707 return r
1718 return r
1708
1719
1709 def heads(self, start=None):
1720 def heads(self, start=None):
1710 heads = self.changelog.heads(start)
1721 heads = self.changelog.heads(start)
1711 # sort the output in rev descending order
1722 # sort the output in rev descending order
1712 return sorted(heads, key=self.changelog.rev, reverse=True)
1723 return sorted(heads, key=self.changelog.rev, reverse=True)
1713
1724
1714 def branchheads(self, branch=None, start=None, closed=False):
1725 def branchheads(self, branch=None, start=None, closed=False):
1715 '''return a (possibly filtered) list of heads for the given branch
1726 '''return a (possibly filtered) list of heads for the given branch
1716
1727
1717 Heads are returned in topological order, from newest to oldest.
1728 Heads are returned in topological order, from newest to oldest.
1718 If branch is None, use the dirstate branch.
1729 If branch is None, use the dirstate branch.
1719 If start is not None, return only heads reachable from start.
1730 If start is not None, return only heads reachable from start.
1720 If closed is True, return heads that are marked as closed as well.
1731 If closed is True, return heads that are marked as closed as well.
1721 '''
1732 '''
1722 if branch is None:
1733 if branch is None:
1723 branch = self[None].branch()
1734 branch = self[None].branch()
1724 branches = self.branchmap()
1735 branches = self.branchmap()
1725 if branch not in branches:
1736 if branch not in branches:
1726 return []
1737 return []
1727 # the cache returns heads ordered lowest to highest
1738 # the cache returns heads ordered lowest to highest
1728 bheads = list(reversed(branches[branch]))
1739 bheads = list(reversed(branches[branch]))
1729 if start is not None:
1740 if start is not None:
1730 # filter out the heads that cannot be reached from startrev
1741 # filter out the heads that cannot be reached from startrev
1731 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1742 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1732 bheads = [h for h in bheads if h in fbheads]
1743 bheads = [h for h in bheads if h in fbheads]
1733 if not closed:
1744 if not closed:
1734 bheads = [h for h in bheads if not self[h].closesbranch()]
1745 bheads = [h for h in bheads if not self[h].closesbranch()]
1735 return bheads
1746 return bheads
1736
1747
1737 def branches(self, nodes):
1748 def branches(self, nodes):
1738 if not nodes:
1749 if not nodes:
1739 nodes = [self.changelog.tip()]
1750 nodes = [self.changelog.tip()]
1740 b = []
1751 b = []
1741 for n in nodes:
1752 for n in nodes:
1742 t = n
1753 t = n
1743 while True:
1754 while True:
1744 p = self.changelog.parents(n)
1755 p = self.changelog.parents(n)
1745 if p[1] != nullid or p[0] == nullid:
1756 if p[1] != nullid or p[0] == nullid:
1746 b.append((t, n, p[0], p[1]))
1757 b.append((t, n, p[0], p[1]))
1747 break
1758 break
1748 n = p[0]
1759 n = p[0]
1749 return b
1760 return b
1750
1761
1751 def between(self, pairs):
1762 def between(self, pairs):
1752 r = []
1763 r = []
1753
1764
1754 for top, bottom in pairs:
1765 for top, bottom in pairs:
1755 n, l, i = top, [], 0
1766 n, l, i = top, [], 0
1756 f = 1
1767 f = 1
1757
1768
1758 while n != bottom and n != nullid:
1769 while n != bottom and n != nullid:
1759 p = self.changelog.parents(n)[0]
1770 p = self.changelog.parents(n)[0]
1760 if i == f:
1771 if i == f:
1761 l.append(n)
1772 l.append(n)
1762 f = f * 2
1773 f = f * 2
1763 n = p
1774 n = p
1764 i += 1
1775 i += 1
1765
1776
1766 r.append(l)
1777 r.append(l)
1767
1778
1768 return r
1779 return r
1769
1780
1770 def pull(self, remote, heads=None, force=False):
1781 def pull(self, remote, heads=None, force=False):
1771 # don't open transaction for nothing or you break future useful
1782 # don't open transaction for nothing or you break future useful
1772 # rollback call
1783 # rollback call
1773 tr = None
1784 tr = None
1774 trname = 'pull\n' + util.hidepassword(remote.url())
1785 trname = 'pull\n' + util.hidepassword(remote.url())
1775 lock = self.lock()
1786 lock = self.lock()
1776 try:
1787 try:
1777 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1788 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1778 force=force)
1789 force=force)
1779 common, fetch, rheads = tmp
1790 common, fetch, rheads = tmp
1780 if not fetch:
1791 if not fetch:
1781 self.ui.status(_("no changes found\n"))
1792 self.ui.status(_("no changes found\n"))
1782 added = []
1793 added = []
1783 result = 0
1794 result = 0
1784 else:
1795 else:
1785 tr = self.transaction(trname)
1796 tr = self.transaction(trname)
1786 if heads is None and list(common) == [nullid]:
1797 if heads is None and list(common) == [nullid]:
1787 self.ui.status(_("requesting all changes\n"))
1798 self.ui.status(_("requesting all changes\n"))
1788 elif heads is None and remote.capable('changegroupsubset'):
1799 elif heads is None and remote.capable('changegroupsubset'):
1789 # issue1320, avoid a race if remote changed after discovery
1800 # issue1320, avoid a race if remote changed after discovery
1790 heads = rheads
1801 heads = rheads
1791
1802
1792 if remote.capable('getbundle'):
1803 if remote.capable('getbundle'):
1793 cg = remote.getbundle('pull', common=common,
1804 cg = remote.getbundle('pull', common=common,
1794 heads=heads or rheads)
1805 heads=heads or rheads)
1795 elif heads is None:
1806 elif heads is None:
1796 cg = remote.changegroup(fetch, 'pull')
1807 cg = remote.changegroup(fetch, 'pull')
1797 elif not remote.capable('changegroupsubset'):
1808 elif not remote.capable('changegroupsubset'):
1798 raise util.Abort(_("partial pull cannot be done because "
1809 raise util.Abort(_("partial pull cannot be done because "
1799 "other repository doesn't support "
1810 "other repository doesn't support "
1800 "changegroupsubset."))
1811 "changegroupsubset."))
1801 else:
1812 else:
1802 cg = remote.changegroupsubset(fetch, heads, 'pull')
1813 cg = remote.changegroupsubset(fetch, heads, 'pull')
1803 clstart = len(self.changelog)
1814 clstart = len(self.changelog)
1804 result = self.addchangegroup(cg, 'pull', remote.url())
1815 result = self.addchangegroup(cg, 'pull', remote.url())
1805 clend = len(self.changelog)
1816 clend = len(self.changelog)
1806 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1817 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1807
1818
1808 # compute target subset
1819 # compute target subset
1809 if heads is None:
1820 if heads is None:
1810 # We pulled every thing possible
1821 # We pulled every thing possible
1811 # sync on everything common
1822 # sync on everything common
1812 subset = common + added
1823 subset = common + added
1813 else:
1824 else:
1814 # We pulled a specific subset
1825 # We pulled a specific subset
1815 # sync on this subset
1826 # sync on this subset
1816 subset = heads
1827 subset = heads
1817
1828
1818 # Get remote phases data from remote
1829 # Get remote phases data from remote
1819 remotephases = remote.listkeys('phases')
1830 remotephases = remote.listkeys('phases')
1820 publishing = bool(remotephases.get('publishing', False))
1831 publishing = bool(remotephases.get('publishing', False))
1821 if remotephases and not publishing:
1832 if remotephases and not publishing:
1822 # remote is new and unpublishing
1833 # remote is new and unpublishing
1823 pheads, _dr = phases.analyzeremotephases(self, subset,
1834 pheads, _dr = phases.analyzeremotephases(self, subset,
1824 remotephases)
1835 remotephases)
1825 phases.advanceboundary(self, phases.public, pheads)
1836 phases.advanceboundary(self, phases.public, pheads)
1826 phases.advanceboundary(self, phases.draft, subset)
1837 phases.advanceboundary(self, phases.draft, subset)
1827 else:
1838 else:
1828 # Remote is old or publishing all common changesets
1839 # Remote is old or publishing all common changesets
1829 # should be seen as public
1840 # should be seen as public
1830 phases.advanceboundary(self, phases.public, subset)
1841 phases.advanceboundary(self, phases.public, subset)
1831
1842
1832 if obsolete._enabled:
1843 if obsolete._enabled:
1833 self.ui.debug('fetching remote obsolete markers\n')
1844 self.ui.debug('fetching remote obsolete markers\n')
1834 remoteobs = remote.listkeys('obsolete')
1845 remoteobs = remote.listkeys('obsolete')
1835 if 'dump0' in remoteobs:
1846 if 'dump0' in remoteobs:
1836 if tr is None:
1847 if tr is None:
1837 tr = self.transaction(trname)
1848 tr = self.transaction(trname)
1838 for key in sorted(remoteobs, reverse=True):
1849 for key in sorted(remoteobs, reverse=True):
1839 if key.startswith('dump'):
1850 if key.startswith('dump'):
1840 data = base85.b85decode(remoteobs[key])
1851 data = base85.b85decode(remoteobs[key])
1841 self.obsstore.mergemarkers(tr, data)
1852 self.obsstore.mergemarkers(tr, data)
1842 if tr is not None:
1853 if tr is not None:
1843 tr.close()
1854 tr.close()
1844 finally:
1855 finally:
1845 if tr is not None:
1856 if tr is not None:
1846 tr.release()
1857 tr.release()
1847 lock.release()
1858 lock.release()
1848
1859
1849 return result
1860 return result
1850
1861
1851 def checkpush(self, force, revs):
1862 def checkpush(self, force, revs):
1852 """Extensions can override this function if additional checks have
1863 """Extensions can override this function if additional checks have
1853 to be performed before pushing, or call it if they override push
1864 to be performed before pushing, or call it if they override push
1854 command.
1865 command.
1855 """
1866 """
1856 pass
1867 pass
1857
1868
1858 def push(self, remote, force=False, revs=None, newbranch=False):
1869 def push(self, remote, force=False, revs=None, newbranch=False):
1859 '''Push outgoing changesets (limited by revs) from the current
1870 '''Push outgoing changesets (limited by revs) from the current
1860 repository to remote. Return an integer:
1871 repository to remote. Return an integer:
1861 - None means nothing to push
1872 - None means nothing to push
1862 - 0 means HTTP error
1873 - 0 means HTTP error
1863 - 1 means we pushed and remote head count is unchanged *or*
1874 - 1 means we pushed and remote head count is unchanged *or*
1864 we have outgoing changesets but refused to push
1875 we have outgoing changesets but refused to push
1865 - other values as described by addchangegroup()
1876 - other values as described by addchangegroup()
1866 '''
1877 '''
1867 # there are two ways to push to remote repo:
1878 # there are two ways to push to remote repo:
1868 #
1879 #
1869 # addchangegroup assumes local user can lock remote
1880 # addchangegroup assumes local user can lock remote
1870 # repo (local filesystem, old ssh servers).
1881 # repo (local filesystem, old ssh servers).
1871 #
1882 #
1872 # unbundle assumes local user cannot lock remote repo (new ssh
1883 # unbundle assumes local user cannot lock remote repo (new ssh
1873 # servers, http servers).
1884 # servers, http servers).
1874
1885
1875 if not remote.canpush():
1886 if not remote.canpush():
1876 raise util.Abort(_("destination does not support push"))
1887 raise util.Abort(_("destination does not support push"))
1877 # get local lock as we might write phase data
1888 # get local lock as we might write phase data
1878 unfi = self.unfiltered()
1889 unfi = self.unfiltered()
1879 locallock = self.lock()
1890 locallock = self.lock()
1880 try:
1891 try:
1881 self.checkpush(force, revs)
1892 self.checkpush(force, revs)
1882 lock = None
1893 lock = None
1883 unbundle = remote.capable('unbundle')
1894 unbundle = remote.capable('unbundle')
1884 if not unbundle:
1895 if not unbundle:
1885 lock = remote.lock()
1896 lock = remote.lock()
1886 try:
1897 try:
1887 # discovery
1898 # discovery
1888 fci = discovery.findcommonincoming
1899 fci = discovery.findcommonincoming
1889 commoninc = fci(unfi, remote, force=force)
1900 commoninc = fci(unfi, remote, force=force)
1890 common, inc, remoteheads = commoninc
1901 common, inc, remoteheads = commoninc
1891 fco = discovery.findcommonoutgoing
1902 fco = discovery.findcommonoutgoing
1892 outgoing = fco(unfi, remote, onlyheads=revs,
1903 outgoing = fco(unfi, remote, onlyheads=revs,
1893 commoninc=commoninc, force=force)
1904 commoninc=commoninc, force=force)
1894
1905
1895
1906
1896 if not outgoing.missing:
1907 if not outgoing.missing:
1897 # nothing to push
1908 # nothing to push
1898 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1909 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1899 ret = None
1910 ret = None
1900 else:
1911 else:
1901 # something to push
1912 # something to push
1902 if not force:
1913 if not force:
1903 # if self.obsstore == False --> no obsolete
1914 # if self.obsstore == False --> no obsolete
1904 # then, save the iteration
1915 # then, save the iteration
1905 if unfi.obsstore:
1916 if unfi.obsstore:
1906 # this message are here for 80 char limit reason
1917 # this message are here for 80 char limit reason
1907 mso = _("push includes obsolete changeset: %s!")
1918 mso = _("push includes obsolete changeset: %s!")
1908 msu = _("push includes unstable changeset: %s!")
1919 msu = _("push includes unstable changeset: %s!")
1909 msb = _("push includes bumped changeset: %s!")
1920 msb = _("push includes bumped changeset: %s!")
1910 # If we are to push if there is at least one
1921 # If we are to push if there is at least one
1911 # obsolete or unstable changeset in missing, at
1922 # obsolete or unstable changeset in missing, at
1912 # least one of the missinghead will be obsolete or
1923 # least one of the missinghead will be obsolete or
1913 # unstable. So checking heads only is ok
1924 # unstable. So checking heads only is ok
1914 for node in outgoing.missingheads:
1925 for node in outgoing.missingheads:
1915 ctx = unfi[node]
1926 ctx = unfi[node]
1916 if ctx.obsolete():
1927 if ctx.obsolete():
1917 raise util.Abort(mso % ctx)
1928 raise util.Abort(mso % ctx)
1918 elif ctx.unstable():
1929 elif ctx.unstable():
1919 raise util.Abort(msu % ctx)
1930 raise util.Abort(msu % ctx)
1920 elif ctx.bumped():
1931 elif ctx.bumped():
1921 raise util.Abort(msb % ctx)
1932 raise util.Abort(msb % ctx)
1922 discovery.checkheads(unfi, remote, outgoing,
1933 discovery.checkheads(unfi, remote, outgoing,
1923 remoteheads, newbranch,
1934 remoteheads, newbranch,
1924 bool(inc))
1935 bool(inc))
1925
1936
1926 # create a changegroup from local
1937 # create a changegroup from local
1927 if revs is None and not outgoing.excluded:
1938 if revs is None and not outgoing.excluded:
1928 # push everything,
1939 # push everything,
1929 # use the fast path, no race possible on push
1940 # use the fast path, no race possible on push
1930 cg = self._changegroup(outgoing.missing, 'push')
1941 cg = self._changegroup(outgoing.missing, 'push')
1931 else:
1942 else:
1932 cg = self.getlocalbundle('push', outgoing)
1943 cg = self.getlocalbundle('push', outgoing)
1933
1944
1934 # apply changegroup to remote
1945 # apply changegroup to remote
1935 if unbundle:
1946 if unbundle:
1936 # local repo finds heads on server, finds out what
1947 # local repo finds heads on server, finds out what
1937 # revs it must push. once revs transferred, if server
1948 # revs it must push. once revs transferred, if server
1938 # finds it has different heads (someone else won
1949 # finds it has different heads (someone else won
1939 # commit/push race), server aborts.
1950 # commit/push race), server aborts.
1940 if force:
1951 if force:
1941 remoteheads = ['force']
1952 remoteheads = ['force']
1942 # ssh: return remote's addchangegroup()
1953 # ssh: return remote's addchangegroup()
1943 # http: return remote's addchangegroup() or 0 for error
1954 # http: return remote's addchangegroup() or 0 for error
1944 ret = remote.unbundle(cg, remoteheads, 'push')
1955 ret = remote.unbundle(cg, remoteheads, 'push')
1945 else:
1956 else:
1946 # we return an integer indicating remote head count
1957 # we return an integer indicating remote head count
1947 # change
1958 # change
1948 ret = remote.addchangegroup(cg, 'push', self.url())
1959 ret = remote.addchangegroup(cg, 'push', self.url())
1949
1960
1950 if ret:
1961 if ret:
1951 # push succeed, synchronize target of the push
1962 # push succeed, synchronize target of the push
1952 cheads = outgoing.missingheads
1963 cheads = outgoing.missingheads
1953 elif revs is None:
1964 elif revs is None:
1954 # All out push fails. synchronize all common
1965 # All out push fails. synchronize all common
1955 cheads = outgoing.commonheads
1966 cheads = outgoing.commonheads
1956 else:
1967 else:
1957 # I want cheads = heads(::missingheads and ::commonheads)
1968 # I want cheads = heads(::missingheads and ::commonheads)
1958 # (missingheads is revs with secret changeset filtered out)
1969 # (missingheads is revs with secret changeset filtered out)
1959 #
1970 #
1960 # This can be expressed as:
1971 # This can be expressed as:
1961 # cheads = ( (missingheads and ::commonheads)
1972 # cheads = ( (missingheads and ::commonheads)
1962 # + (commonheads and ::missingheads))"
1973 # + (commonheads and ::missingheads))"
1963 # )
1974 # )
1964 #
1975 #
1965 # while trying to push we already computed the following:
1976 # while trying to push we already computed the following:
1966 # common = (::commonheads)
1977 # common = (::commonheads)
1967 # missing = ((commonheads::missingheads) - commonheads)
1978 # missing = ((commonheads::missingheads) - commonheads)
1968 #
1979 #
1969 # We can pick:
1980 # We can pick:
1970 # * missingheads part of common (::commonheads)
1981 # * missingheads part of common (::commonheads)
1971 common = set(outgoing.common)
1982 common = set(outgoing.common)
1972 cheads = [node for node in revs if node in common]
1983 cheads = [node for node in revs if node in common]
1973 # and
1984 # and
1974 # * commonheads parents on missing
1985 # * commonheads parents on missing
1975 revset = unfi.set('%ln and parents(roots(%ln))',
1986 revset = unfi.set('%ln and parents(roots(%ln))',
1976 outgoing.commonheads,
1987 outgoing.commonheads,
1977 outgoing.missing)
1988 outgoing.missing)
1978 cheads.extend(c.node() for c in revset)
1989 cheads.extend(c.node() for c in revset)
1979 # even when we don't push, exchanging phase data is useful
1990 # even when we don't push, exchanging phase data is useful
1980 remotephases = remote.listkeys('phases')
1991 remotephases = remote.listkeys('phases')
1981 if not remotephases: # old server or public only repo
1992 if not remotephases: # old server or public only repo
1982 phases.advanceboundary(self, phases.public, cheads)
1993 phases.advanceboundary(self, phases.public, cheads)
1983 # don't push any phase data as there is nothing to push
1994 # don't push any phase data as there is nothing to push
1984 else:
1995 else:
1985 ana = phases.analyzeremotephases(self, cheads, remotephases)
1996 ana = phases.analyzeremotephases(self, cheads, remotephases)
1986 pheads, droots = ana
1997 pheads, droots = ana
1987 ### Apply remote phase on local
1998 ### Apply remote phase on local
1988 if remotephases.get('publishing', False):
1999 if remotephases.get('publishing', False):
1989 phases.advanceboundary(self, phases.public, cheads)
2000 phases.advanceboundary(self, phases.public, cheads)
1990 else: # publish = False
2001 else: # publish = False
1991 phases.advanceboundary(self, phases.public, pheads)
2002 phases.advanceboundary(self, phases.public, pheads)
1992 phases.advanceboundary(self, phases.draft, cheads)
2003 phases.advanceboundary(self, phases.draft, cheads)
1993 ### Apply local phase on remote
2004 ### Apply local phase on remote
1994
2005
1995 # Get the list of all revs draft on remote by public here.
2006 # Get the list of all revs draft on remote by public here.
1996 # XXX Beware that revset break if droots is not strictly
2007 # XXX Beware that revset break if droots is not strictly
1997 # XXX root we may want to ensure it is but it is costly
2008 # XXX root we may want to ensure it is but it is costly
1998 outdated = unfi.set('heads((%ln::%ln) and public())',
2009 outdated = unfi.set('heads((%ln::%ln) and public())',
1999 droots, cheads)
2010 droots, cheads)
2000 for newremotehead in outdated:
2011 for newremotehead in outdated:
2001 r = remote.pushkey('phases',
2012 r = remote.pushkey('phases',
2002 newremotehead.hex(),
2013 newremotehead.hex(),
2003 str(phases.draft),
2014 str(phases.draft),
2004 str(phases.public))
2015 str(phases.public))
2005 if not r:
2016 if not r:
2006 self.ui.warn(_('updating %s to public failed!\n')
2017 self.ui.warn(_('updating %s to public failed!\n')
2007 % newremotehead)
2018 % newremotehead)
2008 self.ui.debug('try to push obsolete markers to remote\n')
2019 self.ui.debug('try to push obsolete markers to remote\n')
2009 if (obsolete._enabled and self.obsstore and
2020 if (obsolete._enabled and self.obsstore and
2010 'obsolete' in remote.listkeys('namespaces')):
2021 'obsolete' in remote.listkeys('namespaces')):
2011 rslts = []
2022 rslts = []
2012 remotedata = self.listkeys('obsolete')
2023 remotedata = self.listkeys('obsolete')
2013 for key in sorted(remotedata, reverse=True):
2024 for key in sorted(remotedata, reverse=True):
2014 # reverse sort to ensure we end with dump0
2025 # reverse sort to ensure we end with dump0
2015 data = remotedata[key]
2026 data = remotedata[key]
2016 rslts.append(remote.pushkey('obsolete', key, '', data))
2027 rslts.append(remote.pushkey('obsolete', key, '', data))
2017 if [r for r in rslts if not r]:
2028 if [r for r in rslts if not r]:
2018 msg = _('failed to push some obsolete markers!\n')
2029 msg = _('failed to push some obsolete markers!\n')
2019 self.ui.warn(msg)
2030 self.ui.warn(msg)
2020 finally:
2031 finally:
2021 if lock is not None:
2032 if lock is not None:
2022 lock.release()
2033 lock.release()
2023 finally:
2034 finally:
2024 locallock.release()
2035 locallock.release()
2025
2036
2026 self.ui.debug("checking for updated bookmarks\n")
2037 self.ui.debug("checking for updated bookmarks\n")
2027 rb = remote.listkeys('bookmarks')
2038 rb = remote.listkeys('bookmarks')
2028 for k in rb.keys():
2039 for k in rb.keys():
2029 if k in unfi._bookmarks:
2040 if k in unfi._bookmarks:
2030 nr, nl = rb[k], hex(self._bookmarks[k])
2041 nr, nl = rb[k], hex(self._bookmarks[k])
2031 if nr in unfi:
2042 if nr in unfi:
2032 cr = unfi[nr]
2043 cr = unfi[nr]
2033 cl = unfi[nl]
2044 cl = unfi[nl]
2034 if bookmarks.validdest(unfi, cr, cl):
2045 if bookmarks.validdest(unfi, cr, cl):
2035 r = remote.pushkey('bookmarks', k, nr, nl)
2046 r = remote.pushkey('bookmarks', k, nr, nl)
2036 if r:
2047 if r:
2037 self.ui.status(_("updating bookmark %s\n") % k)
2048 self.ui.status(_("updating bookmark %s\n") % k)
2038 else:
2049 else:
2039 self.ui.warn(_('updating bookmark %s'
2050 self.ui.warn(_('updating bookmark %s'
2040 ' failed!\n') % k)
2051 ' failed!\n') % k)
2041
2052
2042 return ret
2053 return ret
2043
2054
2044 def changegroupinfo(self, nodes, source):
2055 def changegroupinfo(self, nodes, source):
2045 if self.ui.verbose or source == 'bundle':
2056 if self.ui.verbose or source == 'bundle':
2046 self.ui.status(_("%d changesets found\n") % len(nodes))
2057 self.ui.status(_("%d changesets found\n") % len(nodes))
2047 if self.ui.debugflag:
2058 if self.ui.debugflag:
2048 self.ui.debug("list of changesets:\n")
2059 self.ui.debug("list of changesets:\n")
2049 for node in nodes:
2060 for node in nodes:
2050 self.ui.debug("%s\n" % hex(node))
2061 self.ui.debug("%s\n" % hex(node))
2051
2062
2052 def changegroupsubset(self, bases, heads, source):
2063 def changegroupsubset(self, bases, heads, source):
2053 """Compute a changegroup consisting of all the nodes that are
2064 """Compute a changegroup consisting of all the nodes that are
2054 descendants of any of the bases and ancestors of any of the heads.
2065 descendants of any of the bases and ancestors of any of the heads.
2055 Return a chunkbuffer object whose read() method will return
2066 Return a chunkbuffer object whose read() method will return
2056 successive changegroup chunks.
2067 successive changegroup chunks.
2057
2068
2058 It is fairly complex as determining which filenodes and which
2069 It is fairly complex as determining which filenodes and which
2059 manifest nodes need to be included for the changeset to be complete
2070 manifest nodes need to be included for the changeset to be complete
2060 is non-trivial.
2071 is non-trivial.
2061
2072
2062 Another wrinkle is doing the reverse, figuring out which changeset in
2073 Another wrinkle is doing the reverse, figuring out which changeset in
2063 the changegroup a particular filenode or manifestnode belongs to.
2074 the changegroup a particular filenode or manifestnode belongs to.
2064 """
2075 """
2065 cl = self.changelog
2076 cl = self.changelog
2066 if not bases:
2077 if not bases:
2067 bases = [nullid]
2078 bases = [nullid]
2068 csets, bases, heads = cl.nodesbetween(bases, heads)
2079 csets, bases, heads = cl.nodesbetween(bases, heads)
2069 # We assume that all ancestors of bases are known
2080 # We assume that all ancestors of bases are known
2070 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2081 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2071 return self._changegroupsubset(common, csets, heads, source)
2082 return self._changegroupsubset(common, csets, heads, source)
2072
2083
2073 def getlocalbundle(self, source, outgoing):
2084 def getlocalbundle(self, source, outgoing):
2074 """Like getbundle, but taking a discovery.outgoing as an argument.
2085 """Like getbundle, but taking a discovery.outgoing as an argument.
2075
2086
2076 This is only implemented for local repos and reuses potentially
2087 This is only implemented for local repos and reuses potentially
2077 precomputed sets in outgoing."""
2088 precomputed sets in outgoing."""
2078 if not outgoing.missing:
2089 if not outgoing.missing:
2079 return None
2090 return None
2080 return self._changegroupsubset(outgoing.common,
2091 return self._changegroupsubset(outgoing.common,
2081 outgoing.missing,
2092 outgoing.missing,
2082 outgoing.missingheads,
2093 outgoing.missingheads,
2083 source)
2094 source)
2084
2095
2085 def getbundle(self, source, heads=None, common=None):
2096 def getbundle(self, source, heads=None, common=None):
2086 """Like changegroupsubset, but returns the set difference between the
2097 """Like changegroupsubset, but returns the set difference between the
2087 ancestors of heads and the ancestors common.
2098 ancestors of heads and the ancestors common.
2088
2099
2089 If heads is None, use the local heads. If common is None, use [nullid].
2100 If heads is None, use the local heads. If common is None, use [nullid].
2090
2101
2091 The nodes in common might not all be known locally due to the way the
2102 The nodes in common might not all be known locally due to the way the
2092 current discovery protocol works.
2103 current discovery protocol works.
2093 """
2104 """
2094 cl = self.changelog
2105 cl = self.changelog
2095 if common:
2106 if common:
2096 nm = cl.nodemap
2107 nm = cl.nodemap
2097 common = [n for n in common if n in nm]
2108 common = [n for n in common if n in nm]
2098 else:
2109 else:
2099 common = [nullid]
2110 common = [nullid]
2100 if not heads:
2111 if not heads:
2101 heads = cl.heads()
2112 heads = cl.heads()
2102 return self.getlocalbundle(source,
2113 return self.getlocalbundle(source,
2103 discovery.outgoing(cl, common, heads))
2114 discovery.outgoing(cl, common, heads))
2104
2115
2105 @unfilteredmeth
2116 @unfilteredmeth
2106 def _changegroupsubset(self, commonrevs, csets, heads, source):
2117 def _changegroupsubset(self, commonrevs, csets, heads, source):
2107
2118
2108 cl = self.changelog
2119 cl = self.changelog
2109 mf = self.manifest
2120 mf = self.manifest
2110 mfs = {} # needed manifests
2121 mfs = {} # needed manifests
2111 fnodes = {} # needed file nodes
2122 fnodes = {} # needed file nodes
2112 changedfiles = set()
2123 changedfiles = set()
2113 fstate = ['', {}]
2124 fstate = ['', {}]
2114 count = [0, 0]
2125 count = [0, 0]
2115
2126
2116 # can we go through the fast path ?
2127 # can we go through the fast path ?
2117 heads.sort()
2128 heads.sort()
2118 if heads == sorted(self.heads()):
2129 if heads == sorted(self.heads()):
2119 return self._changegroup(csets, source)
2130 return self._changegroup(csets, source)
2120
2131
2121 # slow path
2132 # slow path
2122 self.hook('preoutgoing', throw=True, source=source)
2133 self.hook('preoutgoing', throw=True, source=source)
2123 self.changegroupinfo(csets, source)
2134 self.changegroupinfo(csets, source)
2124
2135
2125 # filter any nodes that claim to be part of the known set
2136 # filter any nodes that claim to be part of the known set
2126 def prune(revlog, missing):
2137 def prune(revlog, missing):
2127 rr, rl = revlog.rev, revlog.linkrev
2138 rr, rl = revlog.rev, revlog.linkrev
2128 return [n for n in missing
2139 return [n for n in missing
2129 if rl(rr(n)) not in commonrevs]
2140 if rl(rr(n)) not in commonrevs]
2130
2141
2131 progress = self.ui.progress
2142 progress = self.ui.progress
2132 _bundling = _('bundling')
2143 _bundling = _('bundling')
2133 _changesets = _('changesets')
2144 _changesets = _('changesets')
2134 _manifests = _('manifests')
2145 _manifests = _('manifests')
2135 _files = _('files')
2146 _files = _('files')
2136
2147
2137 def lookup(revlog, x):
2148 def lookup(revlog, x):
2138 if revlog == cl:
2149 if revlog == cl:
2139 c = cl.read(x)
2150 c = cl.read(x)
2140 changedfiles.update(c[3])
2151 changedfiles.update(c[3])
2141 mfs.setdefault(c[0], x)
2152 mfs.setdefault(c[0], x)
2142 count[0] += 1
2153 count[0] += 1
2143 progress(_bundling, count[0],
2154 progress(_bundling, count[0],
2144 unit=_changesets, total=count[1])
2155 unit=_changesets, total=count[1])
2145 return x
2156 return x
2146 elif revlog == mf:
2157 elif revlog == mf:
2147 clnode = mfs[x]
2158 clnode = mfs[x]
2148 mdata = mf.readfast(x)
2159 mdata = mf.readfast(x)
2149 for f, n in mdata.iteritems():
2160 for f, n in mdata.iteritems():
2150 if f in changedfiles:
2161 if f in changedfiles:
2151 fnodes[f].setdefault(n, clnode)
2162 fnodes[f].setdefault(n, clnode)
2152 count[0] += 1
2163 count[0] += 1
2153 progress(_bundling, count[0],
2164 progress(_bundling, count[0],
2154 unit=_manifests, total=count[1])
2165 unit=_manifests, total=count[1])
2155 return clnode
2166 return clnode
2156 else:
2167 else:
2157 progress(_bundling, count[0], item=fstate[0],
2168 progress(_bundling, count[0], item=fstate[0],
2158 unit=_files, total=count[1])
2169 unit=_files, total=count[1])
2159 return fstate[1][x]
2170 return fstate[1][x]
2160
2171
2161 bundler = changegroup.bundle10(lookup)
2172 bundler = changegroup.bundle10(lookup)
2162 reorder = self.ui.config('bundle', 'reorder', 'auto')
2173 reorder = self.ui.config('bundle', 'reorder', 'auto')
2163 if reorder == 'auto':
2174 if reorder == 'auto':
2164 reorder = None
2175 reorder = None
2165 else:
2176 else:
2166 reorder = util.parsebool(reorder)
2177 reorder = util.parsebool(reorder)
2167
2178
2168 def gengroup():
2179 def gengroup():
2169 # Create a changenode group generator that will call our functions
2180 # Create a changenode group generator that will call our functions
2170 # back to lookup the owning changenode and collect information.
2181 # back to lookup the owning changenode and collect information.
2171 count[:] = [0, len(csets)]
2182 count[:] = [0, len(csets)]
2172 for chunk in cl.group(csets, bundler, reorder=reorder):
2183 for chunk in cl.group(csets, bundler, reorder=reorder):
2173 yield chunk
2184 yield chunk
2174 progress(_bundling, None)
2185 progress(_bundling, None)
2175
2186
2176 # Create a generator for the manifestnodes that calls our lookup
2187 # Create a generator for the manifestnodes that calls our lookup
2177 # and data collection functions back.
2188 # and data collection functions back.
2178 for f in changedfiles:
2189 for f in changedfiles:
2179 fnodes[f] = {}
2190 fnodes[f] = {}
2180 count[:] = [0, len(mfs)]
2191 count[:] = [0, len(mfs)]
2181 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2192 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2182 yield chunk
2193 yield chunk
2183 progress(_bundling, None)
2194 progress(_bundling, None)
2184
2195
2185 mfs.clear()
2196 mfs.clear()
2186
2197
2187 # Go through all our files in order sorted by name.
2198 # Go through all our files in order sorted by name.
2188 count[:] = [0, len(changedfiles)]
2199 count[:] = [0, len(changedfiles)]
2189 for fname in sorted(changedfiles):
2200 for fname in sorted(changedfiles):
2190 filerevlog = self.file(fname)
2201 filerevlog = self.file(fname)
2191 if not len(filerevlog):
2202 if not len(filerevlog):
2192 raise util.Abort(_("empty or missing revlog for %s")
2203 raise util.Abort(_("empty or missing revlog for %s")
2193 % fname)
2204 % fname)
2194 fstate[0] = fname
2205 fstate[0] = fname
2195 fstate[1] = fnodes.pop(fname, {})
2206 fstate[1] = fnodes.pop(fname, {})
2196
2207
2197 nodelist = prune(filerevlog, fstate[1])
2208 nodelist = prune(filerevlog, fstate[1])
2198 if nodelist:
2209 if nodelist:
2199 count[0] += 1
2210 count[0] += 1
2200 yield bundler.fileheader(fname)
2211 yield bundler.fileheader(fname)
2201 for chunk in filerevlog.group(nodelist, bundler, reorder):
2212 for chunk in filerevlog.group(nodelist, bundler, reorder):
2202 yield chunk
2213 yield chunk
2203
2214
2204 # Signal that no more groups are left.
2215 # Signal that no more groups are left.
2205 yield bundler.close()
2216 yield bundler.close()
2206 progress(_bundling, None)
2217 progress(_bundling, None)
2207
2218
2208 if csets:
2219 if csets:
2209 self.hook('outgoing', node=hex(csets[0]), source=source)
2220 self.hook('outgoing', node=hex(csets[0]), source=source)
2210
2221
2211 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2222 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2212
2223
2213 def changegroup(self, basenodes, source):
2224 def changegroup(self, basenodes, source):
2214 # to avoid a race we use changegroupsubset() (issue1320)
2225 # to avoid a race we use changegroupsubset() (issue1320)
2215 return self.changegroupsubset(basenodes, self.heads(), source)
2226 return self.changegroupsubset(basenodes, self.heads(), source)
2216
2227
2217 @unfilteredmeth
2228 @unfilteredmeth
2218 def _changegroup(self, nodes, source):
2229 def _changegroup(self, nodes, source):
2219 """Compute the changegroup of all nodes that we have that a recipient
2230 """Compute the changegroup of all nodes that we have that a recipient
2220 doesn't. Return a chunkbuffer object whose read() method will return
2231 doesn't. Return a chunkbuffer object whose read() method will return
2221 successive changegroup chunks.
2232 successive changegroup chunks.
2222
2233
2223 This is much easier than the previous function as we can assume that
2234 This is much easier than the previous function as we can assume that
2224 the recipient has any changenode we aren't sending them.
2235 the recipient has any changenode we aren't sending them.
2225
2236
2226 nodes is the set of nodes to send"""
2237 nodes is the set of nodes to send"""
2227
2238
2228 cl = self.changelog
2239 cl = self.changelog
2229 mf = self.manifest
2240 mf = self.manifest
2230 mfs = {}
2241 mfs = {}
2231 changedfiles = set()
2242 changedfiles = set()
2232 fstate = ['']
2243 fstate = ['']
2233 count = [0, 0]
2244 count = [0, 0]
2234
2245
2235 self.hook('preoutgoing', throw=True, source=source)
2246 self.hook('preoutgoing', throw=True, source=source)
2236 self.changegroupinfo(nodes, source)
2247 self.changegroupinfo(nodes, source)
2237
2248
2238 revset = set([cl.rev(n) for n in nodes])
2249 revset = set([cl.rev(n) for n in nodes])
2239
2250
2240 def gennodelst(log):
2251 def gennodelst(log):
2241 ln, llr = log.node, log.linkrev
2252 ln, llr = log.node, log.linkrev
2242 return [ln(r) for r in log if llr(r) in revset]
2253 return [ln(r) for r in log if llr(r) in revset]
2243
2254
2244 progress = self.ui.progress
2255 progress = self.ui.progress
2245 _bundling = _('bundling')
2256 _bundling = _('bundling')
2246 _changesets = _('changesets')
2257 _changesets = _('changesets')
2247 _manifests = _('manifests')
2258 _manifests = _('manifests')
2248 _files = _('files')
2259 _files = _('files')
2249
2260
2250 def lookup(revlog, x):
2261 def lookup(revlog, x):
2251 if revlog == cl:
2262 if revlog == cl:
2252 c = cl.read(x)
2263 c = cl.read(x)
2253 changedfiles.update(c[3])
2264 changedfiles.update(c[3])
2254 mfs.setdefault(c[0], x)
2265 mfs.setdefault(c[0], x)
2255 count[0] += 1
2266 count[0] += 1
2256 progress(_bundling, count[0],
2267 progress(_bundling, count[0],
2257 unit=_changesets, total=count[1])
2268 unit=_changesets, total=count[1])
2258 return x
2269 return x
2259 elif revlog == mf:
2270 elif revlog == mf:
2260 count[0] += 1
2271 count[0] += 1
2261 progress(_bundling, count[0],
2272 progress(_bundling, count[0],
2262 unit=_manifests, total=count[1])
2273 unit=_manifests, total=count[1])
2263 return cl.node(revlog.linkrev(revlog.rev(x)))
2274 return cl.node(revlog.linkrev(revlog.rev(x)))
2264 else:
2275 else:
2265 progress(_bundling, count[0], item=fstate[0],
2276 progress(_bundling, count[0], item=fstate[0],
2266 total=count[1], unit=_files)
2277 total=count[1], unit=_files)
2267 return cl.node(revlog.linkrev(revlog.rev(x)))
2278 return cl.node(revlog.linkrev(revlog.rev(x)))
2268
2279
2269 bundler = changegroup.bundle10(lookup)
2280 bundler = changegroup.bundle10(lookup)
2270 reorder = self.ui.config('bundle', 'reorder', 'auto')
2281 reorder = self.ui.config('bundle', 'reorder', 'auto')
2271 if reorder == 'auto':
2282 if reorder == 'auto':
2272 reorder = None
2283 reorder = None
2273 else:
2284 else:
2274 reorder = util.parsebool(reorder)
2285 reorder = util.parsebool(reorder)
2275
2286
2276 def gengroup():
2287 def gengroup():
2277 '''yield a sequence of changegroup chunks (strings)'''
2288 '''yield a sequence of changegroup chunks (strings)'''
2278 # construct a list of all changed files
2289 # construct a list of all changed files
2279
2290
2280 count[:] = [0, len(nodes)]
2291 count[:] = [0, len(nodes)]
2281 for chunk in cl.group(nodes, bundler, reorder=reorder):
2292 for chunk in cl.group(nodes, bundler, reorder=reorder):
2282 yield chunk
2293 yield chunk
2283 progress(_bundling, None)
2294 progress(_bundling, None)
2284
2295
2285 count[:] = [0, len(mfs)]
2296 count[:] = [0, len(mfs)]
2286 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2297 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2287 yield chunk
2298 yield chunk
2288 progress(_bundling, None)
2299 progress(_bundling, None)
2289
2300
2290 count[:] = [0, len(changedfiles)]
2301 count[:] = [0, len(changedfiles)]
2291 for fname in sorted(changedfiles):
2302 for fname in sorted(changedfiles):
2292 filerevlog = self.file(fname)
2303 filerevlog = self.file(fname)
2293 if not len(filerevlog):
2304 if not len(filerevlog):
2294 raise util.Abort(_("empty or missing revlog for %s")
2305 raise util.Abort(_("empty or missing revlog for %s")
2295 % fname)
2306 % fname)
2296 fstate[0] = fname
2307 fstate[0] = fname
2297 nodelist = gennodelst(filerevlog)
2308 nodelist = gennodelst(filerevlog)
2298 if nodelist:
2309 if nodelist:
2299 count[0] += 1
2310 count[0] += 1
2300 yield bundler.fileheader(fname)
2311 yield bundler.fileheader(fname)
2301 for chunk in filerevlog.group(nodelist, bundler, reorder):
2312 for chunk in filerevlog.group(nodelist, bundler, reorder):
2302 yield chunk
2313 yield chunk
2303 yield bundler.close()
2314 yield bundler.close()
2304 progress(_bundling, None)
2315 progress(_bundling, None)
2305
2316
2306 if nodes:
2317 if nodes:
2307 self.hook('outgoing', node=hex(nodes[0]), source=source)
2318 self.hook('outgoing', node=hex(nodes[0]), source=source)
2308
2319
2309 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2320 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2310
2321
2311 @unfilteredmeth
2322 @unfilteredmeth
2312 def addchangegroup(self, source, srctype, url, emptyok=False):
2323 def addchangegroup(self, source, srctype, url, emptyok=False):
2313 """Add the changegroup returned by source.read() to this repo.
2324 """Add the changegroup returned by source.read() to this repo.
2314 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2325 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2315 the URL of the repo where this changegroup is coming from.
2326 the URL of the repo where this changegroup is coming from.
2316
2327
2317 Return an integer summarizing the change to this repo:
2328 Return an integer summarizing the change to this repo:
2318 - nothing changed or no source: 0
2329 - nothing changed or no source: 0
2319 - more heads than before: 1+added heads (2..n)
2330 - more heads than before: 1+added heads (2..n)
2320 - fewer heads than before: -1-removed heads (-2..-n)
2331 - fewer heads than before: -1-removed heads (-2..-n)
2321 - number of heads stays the same: 1
2332 - number of heads stays the same: 1
2322 """
2333 """
2323 def csmap(x):
2334 def csmap(x):
2324 self.ui.debug("add changeset %s\n" % short(x))
2335 self.ui.debug("add changeset %s\n" % short(x))
2325 return len(cl)
2336 return len(cl)
2326
2337
2327 def revmap(x):
2338 def revmap(x):
2328 return cl.rev(x)
2339 return cl.rev(x)
2329
2340
2330 if not source:
2341 if not source:
2331 return 0
2342 return 0
2332
2343
2333 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2344 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2334
2345
2335 changesets = files = revisions = 0
2346 changesets = files = revisions = 0
2336 efiles = set()
2347 efiles = set()
2337
2348
2338 # write changelog data to temp files so concurrent readers will not see
2349 # write changelog data to temp files so concurrent readers will not see
2339 # inconsistent view
2350 # inconsistent view
2340 cl = self.changelog
2351 cl = self.changelog
2341 cl.delayupdate()
2352 cl.delayupdate()
2342 oldheads = cl.heads()
2353 oldheads = cl.heads()
2343
2354
2344 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2355 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2345 try:
2356 try:
2346 trp = weakref.proxy(tr)
2357 trp = weakref.proxy(tr)
2347 # pull off the changeset group
2358 # pull off the changeset group
2348 self.ui.status(_("adding changesets\n"))
2359 self.ui.status(_("adding changesets\n"))
2349 clstart = len(cl)
2360 clstart = len(cl)
2350 class prog(object):
2361 class prog(object):
2351 step = _('changesets')
2362 step = _('changesets')
2352 count = 1
2363 count = 1
2353 ui = self.ui
2364 ui = self.ui
2354 total = None
2365 total = None
2355 def __call__(self):
2366 def __call__(self):
2356 self.ui.progress(self.step, self.count, unit=_('chunks'),
2367 self.ui.progress(self.step, self.count, unit=_('chunks'),
2357 total=self.total)
2368 total=self.total)
2358 self.count += 1
2369 self.count += 1
2359 pr = prog()
2370 pr = prog()
2360 source.callback = pr
2371 source.callback = pr
2361
2372
2362 source.changelogheader()
2373 source.changelogheader()
2363 srccontent = cl.addgroup(source, csmap, trp)
2374 srccontent = cl.addgroup(source, csmap, trp)
2364 if not (srccontent or emptyok):
2375 if not (srccontent or emptyok):
2365 raise util.Abort(_("received changelog group is empty"))
2376 raise util.Abort(_("received changelog group is empty"))
2366 clend = len(cl)
2377 clend = len(cl)
2367 changesets = clend - clstart
2378 changesets = clend - clstart
2368 for c in xrange(clstart, clend):
2379 for c in xrange(clstart, clend):
2369 efiles.update(self[c].files())
2380 efiles.update(self[c].files())
2370 efiles = len(efiles)
2381 efiles = len(efiles)
2371 self.ui.progress(_('changesets'), None)
2382 self.ui.progress(_('changesets'), None)
2372
2383
2373 # pull off the manifest group
2384 # pull off the manifest group
2374 self.ui.status(_("adding manifests\n"))
2385 self.ui.status(_("adding manifests\n"))
2375 pr.step = _('manifests')
2386 pr.step = _('manifests')
2376 pr.count = 1
2387 pr.count = 1
2377 pr.total = changesets # manifests <= changesets
2388 pr.total = changesets # manifests <= changesets
2378 # no need to check for empty manifest group here:
2389 # no need to check for empty manifest group here:
2379 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2390 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2380 # no new manifest will be created and the manifest group will
2391 # no new manifest will be created and the manifest group will
2381 # be empty during the pull
2392 # be empty during the pull
2382 source.manifestheader()
2393 source.manifestheader()
2383 self.manifest.addgroup(source, revmap, trp)
2394 self.manifest.addgroup(source, revmap, trp)
2384 self.ui.progress(_('manifests'), None)
2395 self.ui.progress(_('manifests'), None)
2385
2396
2386 needfiles = {}
2397 needfiles = {}
2387 if self.ui.configbool('server', 'validate', default=False):
2398 if self.ui.configbool('server', 'validate', default=False):
2388 # validate incoming csets have their manifests
2399 # validate incoming csets have their manifests
2389 for cset in xrange(clstart, clend):
2400 for cset in xrange(clstart, clend):
2390 mfest = self.changelog.read(self.changelog.node(cset))[0]
2401 mfest = self.changelog.read(self.changelog.node(cset))[0]
2391 mfest = self.manifest.readdelta(mfest)
2402 mfest = self.manifest.readdelta(mfest)
2392 # store file nodes we must see
2403 # store file nodes we must see
2393 for f, n in mfest.iteritems():
2404 for f, n in mfest.iteritems():
2394 needfiles.setdefault(f, set()).add(n)
2405 needfiles.setdefault(f, set()).add(n)
2395
2406
2396 # process the files
2407 # process the files
2397 self.ui.status(_("adding file changes\n"))
2408 self.ui.status(_("adding file changes\n"))
2398 pr.step = _('files')
2409 pr.step = _('files')
2399 pr.count = 1
2410 pr.count = 1
2400 pr.total = efiles
2411 pr.total = efiles
2401 source.callback = None
2412 source.callback = None
2402
2413
2403 while True:
2414 while True:
2404 chunkdata = source.filelogheader()
2415 chunkdata = source.filelogheader()
2405 if not chunkdata:
2416 if not chunkdata:
2406 break
2417 break
2407 f = chunkdata["filename"]
2418 f = chunkdata["filename"]
2408 self.ui.debug("adding %s revisions\n" % f)
2419 self.ui.debug("adding %s revisions\n" % f)
2409 pr()
2420 pr()
2410 fl = self.file(f)
2421 fl = self.file(f)
2411 o = len(fl)
2422 o = len(fl)
2412 if not fl.addgroup(source, revmap, trp):
2423 if not fl.addgroup(source, revmap, trp):
2413 raise util.Abort(_("received file revlog group is empty"))
2424 raise util.Abort(_("received file revlog group is empty"))
2414 revisions += len(fl) - o
2425 revisions += len(fl) - o
2415 files += 1
2426 files += 1
2416 if f in needfiles:
2427 if f in needfiles:
2417 needs = needfiles[f]
2428 needs = needfiles[f]
2418 for new in xrange(o, len(fl)):
2429 for new in xrange(o, len(fl)):
2419 n = fl.node(new)
2430 n = fl.node(new)
2420 if n in needs:
2431 if n in needs:
2421 needs.remove(n)
2432 needs.remove(n)
2422 if not needs:
2433 if not needs:
2423 del needfiles[f]
2434 del needfiles[f]
2424 self.ui.progress(_('files'), None)
2435 self.ui.progress(_('files'), None)
2425
2436
2426 for f, needs in needfiles.iteritems():
2437 for f, needs in needfiles.iteritems():
2427 fl = self.file(f)
2438 fl = self.file(f)
2428 for n in needs:
2439 for n in needs:
2429 try:
2440 try:
2430 fl.rev(n)
2441 fl.rev(n)
2431 except error.LookupError:
2442 except error.LookupError:
2432 raise util.Abort(
2443 raise util.Abort(
2433 _('missing file data for %s:%s - run hg verify') %
2444 _('missing file data for %s:%s - run hg verify') %
2434 (f, hex(n)))
2445 (f, hex(n)))
2435
2446
2436 dh = 0
2447 dh = 0
2437 if oldheads:
2448 if oldheads:
2438 heads = cl.heads()
2449 heads = cl.heads()
2439 dh = len(heads) - len(oldheads)
2450 dh = len(heads) - len(oldheads)
2440 for h in heads:
2451 for h in heads:
2441 if h not in oldheads and self[h].closesbranch():
2452 if h not in oldheads and self[h].closesbranch():
2442 dh -= 1
2453 dh -= 1
2443 htext = ""
2454 htext = ""
2444 if dh:
2455 if dh:
2445 htext = _(" (%+d heads)") % dh
2456 htext = _(" (%+d heads)") % dh
2446
2457
2447 self.ui.status(_("added %d changesets"
2458 self.ui.status(_("added %d changesets"
2448 " with %d changes to %d files%s\n")
2459 " with %d changes to %d files%s\n")
2449 % (changesets, revisions, files, htext))
2460 % (changesets, revisions, files, htext))
2450 obsolete.clearobscaches(self)
2461 obsolete.clearobscaches(self)
2451
2462
2452 if changesets > 0:
2463 if changesets > 0:
2453 p = lambda: cl.writepending() and self.root or ""
2464 p = lambda: cl.writepending() and self.root or ""
2454 self.hook('pretxnchangegroup', throw=True,
2465 self.hook('pretxnchangegroup', throw=True,
2455 node=hex(cl.node(clstart)), source=srctype,
2466 node=hex(cl.node(clstart)), source=srctype,
2456 url=url, pending=p)
2467 url=url, pending=p)
2457
2468
2458 added = [cl.node(r) for r in xrange(clstart, clend)]
2469 added = [cl.node(r) for r in xrange(clstart, clend)]
2459 publishing = self.ui.configbool('phases', 'publish', True)
2470 publishing = self.ui.configbool('phases', 'publish', True)
2460 if srctype == 'push':
2471 if srctype == 'push':
2461 # Old server can not push the boundary themself.
2472 # Old server can not push the boundary themself.
2462 # New server won't push the boundary if changeset already
2473 # New server won't push the boundary if changeset already
2463 # existed locally as secrete
2474 # existed locally as secrete
2464 #
2475 #
2465 # We should not use added here but the list of all change in
2476 # We should not use added here but the list of all change in
2466 # the bundle
2477 # the bundle
2467 if publishing:
2478 if publishing:
2468 phases.advanceboundary(self, phases.public, srccontent)
2479 phases.advanceboundary(self, phases.public, srccontent)
2469 else:
2480 else:
2470 phases.advanceboundary(self, phases.draft, srccontent)
2481 phases.advanceboundary(self, phases.draft, srccontent)
2471 phases.retractboundary(self, phases.draft, added)
2482 phases.retractboundary(self, phases.draft, added)
2472 elif srctype != 'strip':
2483 elif srctype != 'strip':
2473 # publishing only alter behavior during push
2484 # publishing only alter behavior during push
2474 #
2485 #
2475 # strip should not touch boundary at all
2486 # strip should not touch boundary at all
2476 phases.retractboundary(self, phases.draft, added)
2487 phases.retractboundary(self, phases.draft, added)
2477
2488
2478 # make changelog see real files again
2489 # make changelog see real files again
2479 cl.finalize(trp)
2490 cl.finalize(trp)
2480
2491
2481 tr.close()
2492 tr.close()
2482
2493
2483 if changesets > 0:
2494 if changesets > 0:
2484 self.updatebranchcache()
2495 self.updatebranchcache()
2485 def runhooks():
2496 def runhooks():
2486 # forcefully update the on-disk branch cache
2497 # forcefully update the on-disk branch cache
2487 self.ui.debug("updating the branch cache\n")
2498 self.ui.debug("updating the branch cache\n")
2488 self.hook("changegroup", node=hex(cl.node(clstart)),
2499 self.hook("changegroup", node=hex(cl.node(clstart)),
2489 source=srctype, url=url)
2500 source=srctype, url=url)
2490
2501
2491 for n in added:
2502 for n in added:
2492 self.hook("incoming", node=hex(n), source=srctype,
2503 self.hook("incoming", node=hex(n), source=srctype,
2493 url=url)
2504 url=url)
2494 self._afterlock(runhooks)
2505 self._afterlock(runhooks)
2495
2506
2496 finally:
2507 finally:
2497 tr.release()
2508 tr.release()
2498 # never return 0 here:
2509 # never return 0 here:
2499 if dh < 0:
2510 if dh < 0:
2500 return dh - 1
2511 return dh - 1
2501 else:
2512 else:
2502 return dh + 1
2513 return dh + 1
2503
2514
2504 def stream_in(self, remote, requirements):
2515 def stream_in(self, remote, requirements):
2505 lock = self.lock()
2516 lock = self.lock()
2506 try:
2517 try:
2507 # Save remote branchmap. We will use it later
2518 # Save remote branchmap. We will use it later
2508 # to speed up branchcache creation
2519 # to speed up branchcache creation
2509 rbranchmap = None
2520 rbranchmap = None
2510 if remote.capable("branchmap"):
2521 if remote.capable("branchmap"):
2511 rbranchmap = remote.branchmap()
2522 rbranchmap = remote.branchmap()
2512
2523
2513 fp = remote.stream_out()
2524 fp = remote.stream_out()
2514 l = fp.readline()
2525 l = fp.readline()
2515 try:
2526 try:
2516 resp = int(l)
2527 resp = int(l)
2517 except ValueError:
2528 except ValueError:
2518 raise error.ResponseError(
2529 raise error.ResponseError(
2519 _('unexpected response from remote server:'), l)
2530 _('unexpected response from remote server:'), l)
2520 if resp == 1:
2531 if resp == 1:
2521 raise util.Abort(_('operation forbidden by server'))
2532 raise util.Abort(_('operation forbidden by server'))
2522 elif resp == 2:
2533 elif resp == 2:
2523 raise util.Abort(_('locking the remote repository failed'))
2534 raise util.Abort(_('locking the remote repository failed'))
2524 elif resp != 0:
2535 elif resp != 0:
2525 raise util.Abort(_('the server sent an unknown error code'))
2536 raise util.Abort(_('the server sent an unknown error code'))
2526 self.ui.status(_('streaming all changes\n'))
2537 self.ui.status(_('streaming all changes\n'))
2527 l = fp.readline()
2538 l = fp.readline()
2528 try:
2539 try:
2529 total_files, total_bytes = map(int, l.split(' ', 1))
2540 total_files, total_bytes = map(int, l.split(' ', 1))
2530 except (ValueError, TypeError):
2541 except (ValueError, TypeError):
2531 raise error.ResponseError(
2542 raise error.ResponseError(
2532 _('unexpected response from remote server:'), l)
2543 _('unexpected response from remote server:'), l)
2533 self.ui.status(_('%d files to transfer, %s of data\n') %
2544 self.ui.status(_('%d files to transfer, %s of data\n') %
2534 (total_files, util.bytecount(total_bytes)))
2545 (total_files, util.bytecount(total_bytes)))
2535 handled_bytes = 0
2546 handled_bytes = 0
2536 self.ui.progress(_('clone'), 0, total=total_bytes)
2547 self.ui.progress(_('clone'), 0, total=total_bytes)
2537 start = time.time()
2548 start = time.time()
2538 for i in xrange(total_files):
2549 for i in xrange(total_files):
2539 # XXX doesn't support '\n' or '\r' in filenames
2550 # XXX doesn't support '\n' or '\r' in filenames
2540 l = fp.readline()
2551 l = fp.readline()
2541 try:
2552 try:
2542 name, size = l.split('\0', 1)
2553 name, size = l.split('\0', 1)
2543 size = int(size)
2554 size = int(size)
2544 except (ValueError, TypeError):
2555 except (ValueError, TypeError):
2545 raise error.ResponseError(
2556 raise error.ResponseError(
2546 _('unexpected response from remote server:'), l)
2557 _('unexpected response from remote server:'), l)
2547 if self.ui.debugflag:
2558 if self.ui.debugflag:
2548 self.ui.debug('adding %s (%s)\n' %
2559 self.ui.debug('adding %s (%s)\n' %
2549 (name, util.bytecount(size)))
2560 (name, util.bytecount(size)))
2550 # for backwards compat, name was partially encoded
2561 # for backwards compat, name was partially encoded
2551 ofp = self.sopener(store.decodedir(name), 'w')
2562 ofp = self.sopener(store.decodedir(name), 'w')
2552 for chunk in util.filechunkiter(fp, limit=size):
2563 for chunk in util.filechunkiter(fp, limit=size):
2553 handled_bytes += len(chunk)
2564 handled_bytes += len(chunk)
2554 self.ui.progress(_('clone'), handled_bytes,
2565 self.ui.progress(_('clone'), handled_bytes,
2555 total=total_bytes)
2566 total=total_bytes)
2556 ofp.write(chunk)
2567 ofp.write(chunk)
2557 ofp.close()
2568 ofp.close()
2558 elapsed = time.time() - start
2569 elapsed = time.time() - start
2559 if elapsed <= 0:
2570 if elapsed <= 0:
2560 elapsed = 0.001
2571 elapsed = 0.001
2561 self.ui.progress(_('clone'), None)
2572 self.ui.progress(_('clone'), None)
2562 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2573 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2563 (util.bytecount(total_bytes), elapsed,
2574 (util.bytecount(total_bytes), elapsed,
2564 util.bytecount(total_bytes / elapsed)))
2575 util.bytecount(total_bytes / elapsed)))
2565
2576
2566 # new requirements = old non-format requirements +
2577 # new requirements = old non-format requirements +
2567 # new format-related
2578 # new format-related
2568 # requirements from the streamed-in repository
2579 # requirements from the streamed-in repository
2569 requirements.update(set(self.requirements) - self.supportedformats)
2580 requirements.update(set(self.requirements) - self.supportedformats)
2570 self._applyrequirements(requirements)
2581 self._applyrequirements(requirements)
2571 self._writerequirements()
2582 self._writerequirements()
2572
2583
2573 if rbranchmap:
2584 if rbranchmap:
2574 rbheads = []
2585 rbheads = []
2575 for bheads in rbranchmap.itervalues():
2586 for bheads in rbranchmap.itervalues():
2576 rbheads.extend(bheads)
2587 rbheads.extend(bheads)
2577
2588
2578 self.branchcache = rbranchmap
2589 self.branchcache = rbranchmap
2579 if rbheads:
2590 if rbheads:
2580 rtiprev = max((int(self.changelog.rev(node))
2591 rtiprev = max((int(self.changelog.rev(node))
2581 for node in rbheads))
2592 for node in rbheads))
2582 self._writebranchcache(self.branchcache,
2593 self._writebranchcache(self.branchcache,
2583 self[rtiprev].node(), rtiprev)
2594 self[rtiprev].node(), rtiprev)
2584 self.invalidate()
2595 self.invalidate()
2585 return len(self.heads()) + 1
2596 return len(self.heads()) + 1
2586 finally:
2597 finally:
2587 lock.release()
2598 lock.release()
2588
2599
2589 def clone(self, remote, heads=[], stream=False):
2600 def clone(self, remote, heads=[], stream=False):
2590 '''clone remote repository.
2601 '''clone remote repository.
2591
2602
2592 keyword arguments:
2603 keyword arguments:
2593 heads: list of revs to clone (forces use of pull)
2604 heads: list of revs to clone (forces use of pull)
2594 stream: use streaming clone if possible'''
2605 stream: use streaming clone if possible'''
2595
2606
2596 # now, all clients that can request uncompressed clones can
2607 # now, all clients that can request uncompressed clones can
2597 # read repo formats supported by all servers that can serve
2608 # read repo formats supported by all servers that can serve
2598 # them.
2609 # them.
2599
2610
2600 # if revlog format changes, client will have to check version
2611 # if revlog format changes, client will have to check version
2601 # and format flags on "stream" capability, and use
2612 # and format flags on "stream" capability, and use
2602 # uncompressed only if compatible.
2613 # uncompressed only if compatible.
2603
2614
2604 if not stream:
2615 if not stream:
2605 # if the server explicitly prefers to stream (for fast LANs)
2616 # if the server explicitly prefers to stream (for fast LANs)
2606 stream = remote.capable('stream-preferred')
2617 stream = remote.capable('stream-preferred')
2607
2618
2608 if stream and not heads:
2619 if stream and not heads:
2609 # 'stream' means remote revlog format is revlogv1 only
2620 # 'stream' means remote revlog format is revlogv1 only
2610 if remote.capable('stream'):
2621 if remote.capable('stream'):
2611 return self.stream_in(remote, set(('revlogv1',)))
2622 return self.stream_in(remote, set(('revlogv1',)))
2612 # otherwise, 'streamreqs' contains the remote revlog format
2623 # otherwise, 'streamreqs' contains the remote revlog format
2613 streamreqs = remote.capable('streamreqs')
2624 streamreqs = remote.capable('streamreqs')
2614 if streamreqs:
2625 if streamreqs:
2615 streamreqs = set(streamreqs.split(','))
2626 streamreqs = set(streamreqs.split(','))
2616 # if we support it, stream in and adjust our requirements
2627 # if we support it, stream in and adjust our requirements
2617 if not streamreqs - self.supportedformats:
2628 if not streamreqs - self.supportedformats:
2618 return self.stream_in(remote, streamreqs)
2629 return self.stream_in(remote, streamreqs)
2619 return self.pull(remote, heads)
2630 return self.pull(remote, heads)
2620
2631
2621 def pushkey(self, namespace, key, old, new):
2632 def pushkey(self, namespace, key, old, new):
2622 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2633 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2623 old=old, new=new)
2634 old=old, new=new)
2624 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2635 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2625 ret = pushkey.push(self, namespace, key, old, new)
2636 ret = pushkey.push(self, namespace, key, old, new)
2626 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2637 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2627 ret=ret)
2638 ret=ret)
2628 return ret
2639 return ret
2629
2640
2630 def listkeys(self, namespace):
2641 def listkeys(self, namespace):
2631 self.hook('prelistkeys', throw=True, namespace=namespace)
2642 self.hook('prelistkeys', throw=True, namespace=namespace)
2632 self.ui.debug('listing keys for "%s"\n' % namespace)
2643 self.ui.debug('listing keys for "%s"\n' % namespace)
2633 values = pushkey.list(self, namespace)
2644 values = pushkey.list(self, namespace)
2634 self.hook('listkeys', namespace=namespace, values=values)
2645 self.hook('listkeys', namespace=namespace, values=values)
2635 return values
2646 return values
2636
2647
2637 def debugwireargs(self, one, two, three=None, four=None, five=None):
2648 def debugwireargs(self, one, two, three=None, four=None, five=None):
2638 '''used to test argument passing over the wire'''
2649 '''used to test argument passing over the wire'''
2639 return "%s %s %s %s %s" % (one, two, three, four, five)
2650 return "%s %s %s %s %s" % (one, two, three, four, five)
2640
2651
2641 def savecommitmessage(self, text):
2652 def savecommitmessage(self, text):
2642 fp = self.opener('last-message.txt', 'wb')
2653 fp = self.opener('last-message.txt', 'wb')
2643 try:
2654 try:
2644 fp.write(text)
2655 fp.write(text)
2645 finally:
2656 finally:
2646 fp.close()
2657 fp.close()
2647 return self.pathto(fp.name[len(self.root)+1:])
2658 return self.pathto(fp.name[len(self.root)+1:])
2648
2659
2649 # used to avoid circular references so destructors work
2660 # used to avoid circular references so destructors work
2650 def aftertrans(files):
2661 def aftertrans(files):
2651 renamefiles = [tuple(t) for t in files]
2662 renamefiles = [tuple(t) for t in files]
2652 def a():
2663 def a():
2653 for src, dest in renamefiles:
2664 for src, dest in renamefiles:
2654 try:
2665 try:
2655 util.rename(src, dest)
2666 util.rename(src, dest)
2656 except OSError: # journal file does not yet exist
2667 except OSError: # journal file does not yet exist
2657 pass
2668 pass
2658 return a
2669 return a
2659
2670
2660 def undoname(fn):
2671 def undoname(fn):
2661 base, name = os.path.split(fn)
2672 base, name = os.path.split(fn)
2662 assert name.startswith('journal')
2673 assert name.startswith('journal')
2663 return os.path.join(base, name.replace('journal', 'undo', 1))
2674 return os.path.join(base, name.replace('journal', 'undo', 1))
2664
2675
2665 def instance(ui, path, create):
2676 def instance(ui, path, create):
2666 return localrepository(ui, util.urllocalpath(path), create)
2677 return localrepository(ui, util.urllocalpath(path), create)
2667
2678
2668 def islocal(path):
2679 def islocal(path):
2669 return True
2680 return True
General Comments 0
You need to be logged in to leave comments. Login now