##// END OF EJS Templates
clfilter: ensure that filecache on localrepo is unfiltered...
Pierre-Yves David -
r18014:a39fe76c default
parent child Browse files
Show More
@@ -1,389 +1,389 b''
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from node import nullid
15 15 from i18n import _
16 16 import os, tempfile, shutil
17 17 import changegroup, util, mdiff, discovery, cmdutil
18 18 import localrepo, changelog, manifest, filelog, revlog, error
19 19
20 20 class bundlerevlog(revlog.revlog):
21 21 def __init__(self, opener, indexfile, bundle, linkmapper):
22 22 # How it works:
23 23 # to retrieve a revision, we need to know the offset of
24 24 # the revision in the bundle (an unbundle object).
25 25 #
26 26 # We store this offset in the index (start), to differentiate a
27 27 # rev in the bundle and from a rev in the revlog, we check
28 28 # len(index[r]). If the tuple is bigger than 7, it is a bundle
29 29 # (it is bigger since we store the node to which the delta is)
30 30 #
31 31 revlog.revlog.__init__(self, opener, indexfile)
32 32 self.bundle = bundle
33 33 self.basemap = {}
34 34 n = len(self)
35 35 chain = None
36 36 self.bundlenodes = []
37 37 while True:
38 38 chunkdata = bundle.deltachunk(chain)
39 39 if not chunkdata:
40 40 break
41 41 node = chunkdata['node']
42 42 p1 = chunkdata['p1']
43 43 p2 = chunkdata['p2']
44 44 cs = chunkdata['cs']
45 45 deltabase = chunkdata['deltabase']
46 46 delta = chunkdata['delta']
47 47
48 48 size = len(delta)
49 49 start = bundle.tell() - size
50 50
51 51 link = linkmapper(cs)
52 52 self.bundlenodes.append(node)
53 53 if node in self.nodemap:
54 54 # this can happen if two branches make the same change
55 55 chain = node
56 56 continue
57 57
58 58 for p in (p1, p2):
59 59 if p not in self.nodemap:
60 60 raise error.LookupError(p, self.indexfile,
61 61 _("unknown parent"))
62 62 # start, size, full unc. size, base (unused), link, p1, p2, node
63 63 e = (revlog.offset_type(start, 0), size, -1, -1, link,
64 64 self.rev(p1), self.rev(p2), node)
65 65 self.basemap[n] = deltabase
66 66 self.index.insert(-1, e)
67 67 self.nodemap[node] = n
68 68 chain = node
69 69 n += 1
70 70
71 71 def inbundle(self, rev):
72 72 """is rev from the bundle"""
73 73 if rev < 0:
74 74 return False
75 75 return rev in self.basemap
76 76 def bundlebase(self, rev):
77 77 return self.basemap[rev]
78 78 def _chunk(self, rev):
79 79 # Warning: in case of bundle, the diff is against bundlebase,
80 80 # not against rev - 1
81 81 # XXX: could use some caching
82 82 if not self.inbundle(rev):
83 83 return revlog.revlog._chunk(self, rev)
84 84 self.bundle.seek(self.start(rev))
85 85 return self.bundle.read(self.length(rev))
86 86
87 87 def revdiff(self, rev1, rev2):
88 88 """return or calculate a delta between two revisions"""
89 89 if self.inbundle(rev1) and self.inbundle(rev2):
90 90 # hot path for bundle
91 91 revb = self.rev(self.bundlebase(rev2))
92 92 if revb == rev1:
93 93 return self._chunk(rev2)
94 94 elif not self.inbundle(rev1) and not self.inbundle(rev2):
95 95 return revlog.revlog.revdiff(self, rev1, rev2)
96 96
97 97 return mdiff.textdiff(self.revision(self.node(rev1)),
98 98 self.revision(self.node(rev2)))
99 99
100 100 def revision(self, nodeorrev):
101 101 """return an uncompressed revision of a given node or revision
102 102 number.
103 103 """
104 104 if isinstance(nodeorrev, int):
105 105 rev = nodeorrev
106 106 node = self.node(rev)
107 107 else:
108 108 node = nodeorrev
109 109 rev = self.rev(node)
110 110
111 111 if node == nullid:
112 112 return ""
113 113
114 114 text = None
115 115 chain = []
116 116 iter_node = node
117 117 # reconstruct the revision if it is from a changegroup
118 118 while self.inbundle(rev):
119 119 if self._cache and self._cache[0] == iter_node:
120 120 text = self._cache[2]
121 121 break
122 122 chain.append(rev)
123 123 iter_node = self.bundlebase(rev)
124 124 rev = self.rev(iter_node)
125 125 if text is None:
126 126 text = revlog.revlog.revision(self, iter_node)
127 127
128 128 while chain:
129 129 delta = self._chunk(chain.pop())
130 130 text = mdiff.patches(text, [delta])
131 131
132 132 p1, p2 = self.parents(node)
133 133 if node != revlog.hash(text, p1, p2):
134 134 raise error.RevlogError(_("integrity check failed on %s:%d")
135 135 % (self.datafile, self.rev(node)))
136 136
137 137 self._cache = (node, self.rev(node), text)
138 138 return text
139 139
140 140 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
141 141 raise NotImplementedError
142 142 def addgroup(self, revs, linkmapper, transaction):
143 143 raise NotImplementedError
144 144 def strip(self, rev, minlink):
145 145 raise NotImplementedError
146 146 def checksize(self):
147 147 raise NotImplementedError
148 148
149 149 class bundlechangelog(bundlerevlog, changelog.changelog):
150 150 def __init__(self, opener, bundle):
151 151 changelog.changelog.__init__(self, opener)
152 152 linkmapper = lambda x: x
153 153 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
154 154 linkmapper)
155 155
156 156 class bundlemanifest(bundlerevlog, manifest.manifest):
157 157 def __init__(self, opener, bundle, linkmapper):
158 158 manifest.manifest.__init__(self, opener)
159 159 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
160 160 linkmapper)
161 161
162 162 class bundlefilelog(bundlerevlog, filelog.filelog):
163 163 def __init__(self, opener, path, bundle, linkmapper, repo):
164 164 filelog.filelog.__init__(self, opener, path)
165 165 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
166 166 linkmapper)
167 167 self._repo = repo
168 168
169 169 def _file(self, f):
170 170 self._repo.file(f)
171 171
172 172 class bundlepeer(localrepo.localpeer):
173 173 def canpush(self):
174 174 return False
175 175
176 176 class bundlerepository(localrepo.localrepository):
177 177 def __init__(self, ui, path, bundlename):
178 178 self._tempparent = None
179 179 try:
180 180 localrepo.localrepository.__init__(self, ui, path)
181 181 except error.RepoError:
182 182 self._tempparent = tempfile.mkdtemp()
183 183 localrepo.instance(ui, self._tempparent, 1)
184 184 localrepo.localrepository.__init__(self, ui, self._tempparent)
185 185 self.ui.setconfig('phases', 'publish', False)
186 186
187 187 if path:
188 188 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
189 189 else:
190 190 self._url = 'bundle:' + bundlename
191 191
192 192 self.tempfile = None
193 193 f = util.posixfile(bundlename, "rb")
194 194 self.bundle = changegroup.readbundle(f, bundlename)
195 195 if self.bundle.compressed():
196 196 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
197 197 suffix=".hg10un", dir=self.path)
198 198 self.tempfile = temp
199 199 fptemp = os.fdopen(fdtemp, 'wb')
200 200
201 201 try:
202 202 fptemp.write("HG10UN")
203 203 while True:
204 204 chunk = self.bundle.read(2**18)
205 205 if not chunk:
206 206 break
207 207 fptemp.write(chunk)
208 208 finally:
209 209 fptemp.close()
210 210
211 211 f = util.posixfile(self.tempfile, "rb")
212 212 self.bundle = changegroup.readbundle(f, bundlename)
213 213
214 214 # dict with the mapping 'filename' -> position in the bundle
215 215 self.bundlefilespos = {}
216 216
217 @util.propertycache
217 @localrepo.unfilteredpropertycache
218 218 def changelog(self):
219 219 # consume the header if it exists
220 220 self.bundle.changelogheader()
221 221 c = bundlechangelog(self.sopener, self.bundle)
222 222 self.manstart = self.bundle.tell()
223 223 return c
224 224
225 @util.propertycache
225 @localrepo.unfilteredpropertycache
226 226 def manifest(self):
227 227 self.bundle.seek(self.manstart)
228 228 # consume the header if it exists
229 229 self.bundle.manifestheader()
230 230 m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev)
231 231 self.filestart = self.bundle.tell()
232 232 return m
233 233
234 @util.propertycache
234 @localrepo.unfilteredpropertycache
235 235 def manstart(self):
236 236 self.changelog
237 237 return self.manstart
238 238
239 @util.propertycache
239 @localrepo.unfilteredpropertycache
240 240 def filestart(self):
241 241 self.manifest
242 242 return self.filestart
243 243
244 244 def url(self):
245 245 return self._url
246 246
247 247 def file(self, f):
248 248 if not self.bundlefilespos:
249 249 self.bundle.seek(self.filestart)
250 250 while True:
251 251 chunkdata = self.bundle.filelogheader()
252 252 if not chunkdata:
253 253 break
254 254 fname = chunkdata['filename']
255 255 self.bundlefilespos[fname] = self.bundle.tell()
256 256 while True:
257 257 c = self.bundle.deltachunk(None)
258 258 if not c:
259 259 break
260 260
261 261 if f[0] == '/':
262 262 f = f[1:]
263 263 if f in self.bundlefilespos:
264 264 self.bundle.seek(self.bundlefilespos[f])
265 265 return bundlefilelog(self.sopener, f, self.bundle,
266 266 self.changelog.rev, self)
267 267 else:
268 268 return filelog.filelog(self.sopener, f)
269 269
270 270 def close(self):
271 271 """Close assigned bundle file immediately."""
272 272 self.bundle.close()
273 273 if self.tempfile is not None:
274 274 os.unlink(self.tempfile)
275 275 if self._tempparent:
276 276 shutil.rmtree(self._tempparent, True)
277 277
278 278 def cancopy(self):
279 279 return False
280 280
281 281 def peer(self):
282 282 return bundlepeer(self)
283 283
284 284 def getcwd(self):
285 285 return os.getcwd() # always outside the repo
286 286
287 287 def _writebranchcache(self, branches, tip, tiprev):
288 288 # don't overwrite the disk cache with bundle-augmented data
289 289 pass
290 290
291 291 def instance(ui, path, create):
292 292 if create:
293 293 raise util.Abort(_('cannot create new bundle repository'))
294 294 parentpath = ui.config("bundle", "mainreporoot", "")
295 295 if not parentpath:
296 296 # try to find the correct path to the working directory repo
297 297 parentpath = cmdutil.findrepo(os.getcwd())
298 298 if parentpath is None:
299 299 parentpath = ''
300 300 if parentpath:
301 301 # Try to make the full path relative so we get a nice, short URL.
302 302 # In particular, we don't want temp dir names in test outputs.
303 303 cwd = os.getcwd()
304 304 if parentpath == cwd:
305 305 parentpath = ''
306 306 else:
307 307 cwd = os.path.join(cwd,'')
308 308 if parentpath.startswith(cwd):
309 309 parentpath = parentpath[len(cwd):]
310 310 u = util.url(path)
311 311 path = u.localpath()
312 312 if u.scheme == 'bundle':
313 313 s = path.split("+", 1)
314 314 if len(s) == 1:
315 315 repopath, bundlename = parentpath, s[0]
316 316 else:
317 317 repopath, bundlename = s
318 318 else:
319 319 repopath, bundlename = parentpath, path
320 320 return bundlerepository(ui, repopath, bundlename)
321 321
322 322 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
323 323 force=False):
324 324 '''obtains a bundle of changes incoming from other
325 325
326 326 "onlyheads" restricts the returned changes to those reachable from the
327 327 specified heads.
328 328 "bundlename", if given, stores the bundle to this file path permanently;
329 329 otherwise it's stored to a temp file and gets deleted again when you call
330 330 the returned "cleanupfn".
331 331 "force" indicates whether to proceed on unrelated repos.
332 332
333 333 Returns a tuple (local, csets, cleanupfn):
334 334
335 335 "local" is a local repo from which to obtain the actual incoming
336 336 changesets; it is a bundlerepo for the obtained bundle when the
337 337 original "other" is remote.
338 338 "csets" lists the incoming changeset node ids.
339 339 "cleanupfn" must be called without arguments when you're done processing
340 340 the changes; it closes both the original "other" and the one returned
341 341 here.
342 342 '''
343 343 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
344 344 force=force)
345 345 common, incoming, rheads = tmp
346 346 if not incoming:
347 347 try:
348 348 if bundlename:
349 349 os.unlink(bundlename)
350 350 except OSError:
351 351 pass
352 352 return other, [], other.close
353 353
354 354 bundle = None
355 355 bundlerepo = None
356 356 localrepo = other.local()
357 357 if bundlename or not localrepo:
358 358 # create a bundle (uncompressed if other repo is not local)
359 359
360 360 if other.capable('getbundle'):
361 361 cg = other.getbundle('incoming', common=common, heads=rheads)
362 362 elif onlyheads is None and not other.capable('changegroupsubset'):
363 363 # compat with older servers when pulling all remote heads
364 364 cg = other.changegroup(incoming, "incoming")
365 365 rheads = None
366 366 else:
367 367 cg = other.changegroupsubset(incoming, rheads, 'incoming')
368 368 bundletype = localrepo and "HG10BZ" or "HG10UN"
369 369 fname = bundle = changegroup.writebundle(cg, bundlename, bundletype)
370 370 # keep written bundle?
371 371 if bundlename:
372 372 bundle = None
373 373 if not localrepo:
374 374 # use the created uncompressed bundlerepo
375 375 localrepo = bundlerepo = bundlerepository(ui, repo.root, fname)
376 376 # this repo contains local and other now, so filter out local again
377 377 common = repo.heads()
378 378
379 379 csets = localrepo.changelog.findmissing(common, rheads)
380 380
381 381 def cleanup():
382 382 if bundlerepo:
383 383 bundlerepo.close()
384 384 if bundle:
385 385 os.unlink(bundle)
386 386 other.close()
387 387
388 388 return (localrepo, csets, cleanup)
389 389
@@ -1,2669 +1,2680 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 class storecache(filecache):
21 class repofilecache(filecache):
22 """All filecache usage on repo are done for logic that should be unfiltered
23 """
24
25 def __get__(self, repo, type=None):
26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 def __set__(self, repo, value):
28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 def __delete__(self, repo):
30 return super(repofilecache, self).__delete__(repo.unfiltered())
31
32 class storecache(repofilecache):
22 33 """filecache for files in the store"""
23 34 def join(self, obj, fname):
24 35 return obj.sjoin(fname)
25 36
26 37 class unfilteredpropertycache(propertycache):
27 38 """propertycache that apply to unfiltered repo only"""
28 39
29 40 def __get__(self, repo, type=None):
30 41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
31 42
32 43 class filteredpropertycache(propertycache):
33 44 """propertycache that must take filtering in account"""
34 45
35 46 def cachevalue(self, obj, value):
36 47 object.__setattr__(obj, self.name, value)
37 48
38 49
39 50 def hasunfilteredcache(repo, name):
40 51 """check if an repo and a unfilteredproperty cached value for <name>"""
41 52 return name in vars(repo.unfiltered())
42 53
43 54 def unfilteredmeth(orig):
44 55 """decorate method that always need to be run on unfiltered version"""
45 56 def wrapper(repo, *args, **kwargs):
46 57 return orig(repo.unfiltered(), *args, **kwargs)
47 58 return wrapper
48 59
49 60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
50 61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
51 62
52 63 class localpeer(peer.peerrepository):
53 64 '''peer for a local repo; reflects only the most recent API'''
54 65
55 66 def __init__(self, repo, caps=MODERNCAPS):
56 67 peer.peerrepository.__init__(self)
57 68 self._repo = repo
58 69 self.ui = repo.ui
59 70 self._caps = repo._restrictcapabilities(caps)
60 71 self.requirements = repo.requirements
61 72 self.supportedformats = repo.supportedformats
62 73
63 74 def close(self):
64 75 self._repo.close()
65 76
66 77 def _capabilities(self):
67 78 return self._caps
68 79
69 80 def local(self):
70 81 return self._repo
71 82
72 83 def canpush(self):
73 84 return True
74 85
75 86 def url(self):
76 87 return self._repo.url()
77 88
78 89 def lookup(self, key):
79 90 return self._repo.lookup(key)
80 91
81 92 def branchmap(self):
82 93 return discovery.visiblebranchmap(self._repo)
83 94
84 95 def heads(self):
85 96 return discovery.visibleheads(self._repo)
86 97
87 98 def known(self, nodes):
88 99 return self._repo.known(nodes)
89 100
90 101 def getbundle(self, source, heads=None, common=None):
91 102 return self._repo.getbundle(source, heads=heads, common=common)
92 103
93 104 # TODO We might want to move the next two calls into legacypeer and add
94 105 # unbundle instead.
95 106
96 107 def lock(self):
97 108 return self._repo.lock()
98 109
99 110 def addchangegroup(self, cg, source, url):
100 111 return self._repo.addchangegroup(cg, source, url)
101 112
102 113 def pushkey(self, namespace, key, old, new):
103 114 return self._repo.pushkey(namespace, key, old, new)
104 115
105 116 def listkeys(self, namespace):
106 117 return self._repo.listkeys(namespace)
107 118
108 119 def debugwireargs(self, one, two, three=None, four=None, five=None):
109 120 '''used to test argument passing over the wire'''
110 121 return "%s %s %s %s %s" % (one, two, three, four, five)
111 122
112 123 class locallegacypeer(localpeer):
113 124 '''peer extension which implements legacy methods too; used for tests with
114 125 restricted capabilities'''
115 126
116 127 def __init__(self, repo):
117 128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
118 129
119 130 def branches(self, nodes):
120 131 return self._repo.branches(nodes)
121 132
122 133 def between(self, pairs):
123 134 return self._repo.between(pairs)
124 135
125 136 def changegroup(self, basenodes, source):
126 137 return self._repo.changegroup(basenodes, source)
127 138
128 139 def changegroupsubset(self, bases, heads, source):
129 140 return self._repo.changegroupsubset(bases, heads, source)
130 141
131 142 class localrepository(object):
132 143
133 144 supportedformats = set(('revlogv1', 'generaldelta'))
134 145 supported = supportedformats | set(('store', 'fncache', 'shared',
135 146 'dotencode'))
136 147 openerreqs = set(('revlogv1', 'generaldelta'))
137 148 requirements = ['revlogv1']
138 149
139 150 def _baserequirements(self, create):
140 151 return self.requirements[:]
141 152
142 153 def __init__(self, baseui, path=None, create=False):
143 154 self.wvfs = scmutil.vfs(path, expand=True)
144 155 self.wopener = self.wvfs
145 156 self.root = self.wvfs.base
146 157 self.path = self.wvfs.join(".hg")
147 158 self.origroot = path
148 159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
149 160 self.vfs = scmutil.vfs(self.path)
150 161 self.opener = self.vfs
151 162 self.baseui = baseui
152 163 self.ui = baseui.copy()
153 164 # A list of callback to shape the phase if no data were found.
154 165 # Callback are in the form: func(repo, roots) --> processed root.
155 166 # This list it to be filled by extension during repo setup
156 167 self._phasedefaults = []
157 168 try:
158 169 self.ui.readconfig(self.join("hgrc"), self.root)
159 170 extensions.loadall(self.ui)
160 171 except IOError:
161 172 pass
162 173
163 174 if not self.vfs.isdir():
164 175 if create:
165 176 if not self.wvfs.exists():
166 177 self.wvfs.makedirs()
167 178 self.vfs.makedir(notindexed=True)
168 179 requirements = self._baserequirements(create)
169 180 if self.ui.configbool('format', 'usestore', True):
170 181 self.vfs.mkdir("store")
171 182 requirements.append("store")
172 183 if self.ui.configbool('format', 'usefncache', True):
173 184 requirements.append("fncache")
174 185 if self.ui.configbool('format', 'dotencode', True):
175 186 requirements.append('dotencode')
176 187 # create an invalid changelog
177 188 self.vfs.append(
178 189 "00changelog.i",
179 190 '\0\0\0\2' # represents revlogv2
180 191 ' dummy changelog to prevent using the old repo layout'
181 192 )
182 193 if self.ui.configbool('format', 'generaldelta', False):
183 194 requirements.append("generaldelta")
184 195 requirements = set(requirements)
185 196 else:
186 197 raise error.RepoError(_("repository %s not found") % path)
187 198 elif create:
188 199 raise error.RepoError(_("repository %s already exists") % path)
189 200 else:
190 201 try:
191 202 requirements = scmutil.readrequires(self.vfs, self.supported)
192 203 except IOError, inst:
193 204 if inst.errno != errno.ENOENT:
194 205 raise
195 206 requirements = set()
196 207
197 208 self.sharedpath = self.path
198 209 try:
199 210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
200 211 if not os.path.exists(s):
201 212 raise error.RepoError(
202 213 _('.hg/sharedpath points to nonexistent directory %s') % s)
203 214 self.sharedpath = s
204 215 except IOError, inst:
205 216 if inst.errno != errno.ENOENT:
206 217 raise
207 218
208 219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
209 220 self.spath = self.store.path
210 221 self.svfs = self.store.vfs
211 222 self.sopener = self.svfs
212 223 self.sjoin = self.store.join
213 224 self.vfs.createmode = self.store.createmode
214 225 self._applyrequirements(requirements)
215 226 if create:
216 227 self._writerequirements()
217 228
218 229
219 230 self._branchcache = None
220 231 self._branchcachetip = None
221 232 self.filterpats = {}
222 233 self._datafilters = {}
223 234 self._transref = self._lockref = self._wlockref = None
224 235
225 236 # A cache for various files under .hg/ that tracks file changes,
226 237 # (used by the filecache decorator)
227 238 #
228 239 # Maps a property name to its util.filecacheentry
229 240 self._filecache = {}
230 241
231 242 def close(self):
232 243 pass
233 244
234 245 def _restrictcapabilities(self, caps):
235 246 return caps
236 247
237 248 def _applyrequirements(self, requirements):
238 249 self.requirements = requirements
239 250 self.sopener.options = dict((r, 1) for r in requirements
240 251 if r in self.openerreqs)
241 252
242 253 def _writerequirements(self):
243 254 reqfile = self.opener("requires", "w")
244 255 for r in self.requirements:
245 256 reqfile.write("%s\n" % r)
246 257 reqfile.close()
247 258
248 259 def _checknested(self, path):
249 260 """Determine if path is a legal nested repository."""
250 261 if not path.startswith(self.root):
251 262 return False
252 263 subpath = path[len(self.root) + 1:]
253 264 normsubpath = util.pconvert(subpath)
254 265
255 266 # XXX: Checking against the current working copy is wrong in
256 267 # the sense that it can reject things like
257 268 #
258 269 # $ hg cat -r 10 sub/x.txt
259 270 #
260 271 # if sub/ is no longer a subrepository in the working copy
261 272 # parent revision.
262 273 #
263 274 # However, it can of course also allow things that would have
264 275 # been rejected before, such as the above cat command if sub/
265 276 # is a subrepository now, but was a normal directory before.
266 277 # The old path auditor would have rejected by mistake since it
267 278 # panics when it sees sub/.hg/.
268 279 #
269 280 # All in all, checking against the working copy seems sensible
270 281 # since we want to prevent access to nested repositories on
271 282 # the filesystem *now*.
272 283 ctx = self[None]
273 284 parts = util.splitpath(subpath)
274 285 while parts:
275 286 prefix = '/'.join(parts)
276 287 if prefix in ctx.substate:
277 288 if prefix == normsubpath:
278 289 return True
279 290 else:
280 291 sub = ctx.sub(prefix)
281 292 return sub.checknested(subpath[len(prefix) + 1:])
282 293 else:
283 294 parts.pop()
284 295 return False
285 296
286 297 def peer(self):
287 298 return localpeer(self) # not cached to avoid reference cycle
288 299
289 300 def unfiltered(self):
290 301 """Return unfiltered version of the repository
291 302
292 303 Intended to be ovewritten by filtered repo."""
293 304 return self
294 305
295 @filecache('bookmarks')
306 @repofilecache('bookmarks')
296 307 def _bookmarks(self):
297 308 return bookmarks.bmstore(self)
298 309
299 @filecache('bookmarks.current')
310 @repofilecache('bookmarks.current')
300 311 def _bookmarkcurrent(self):
301 312 return bookmarks.readcurrent(self)
302 313
303 314 def bookmarkheads(self, bookmark):
304 315 name = bookmark.split('@', 1)[0]
305 316 heads = []
306 317 for mark, n in self._bookmarks.iteritems():
307 318 if mark.split('@', 1)[0] == name:
308 319 heads.append(n)
309 320 return heads
310 321
311 322 @storecache('phaseroots')
312 323 def _phasecache(self):
313 324 return phases.phasecache(self, self._phasedefaults)
314 325
315 326 @storecache('obsstore')
316 327 def obsstore(self):
317 328 store = obsolete.obsstore(self.sopener)
318 329 if store and not obsolete._enabled:
319 330 # message is rare enough to not be translated
320 331 msg = 'obsolete feature not enabled but %i markers found!\n'
321 332 self.ui.warn(msg % len(list(store)))
322 333 return store
323 334
324 335 @unfilteredpropertycache
325 336 def hiddenrevs(self):
326 337 """hiddenrevs: revs that should be hidden by command and tools
327 338
328 339 This set is carried on the repo to ease initialization and lazy
329 340 loading; it'll probably move back to changelog for efficiency and
330 341 consistency reasons.
331 342
332 343 Note that the hiddenrevs will needs invalidations when
333 344 - a new changesets is added (possible unstable above extinct)
334 345 - a new obsolete marker is added (possible new extinct changeset)
335 346
336 347 hidden changesets cannot have non-hidden descendants
337 348 """
338 349 hidden = set()
339 350 if self.obsstore:
340 351 ### hide extinct changeset that are not accessible by any mean
341 352 hiddenquery = 'extinct() - ::(. + bookmark())'
342 353 hidden.update(self.revs(hiddenquery))
343 354 return hidden
344 355
345 356 @storecache('00changelog.i')
346 357 def changelog(self):
347 358 c = changelog.changelog(self.sopener)
348 359 if 'HG_PENDING' in os.environ:
349 360 p = os.environ['HG_PENDING']
350 361 if p.startswith(self.root):
351 362 c.readpending('00changelog.i.a')
352 363 return c
353 364
354 365 @storecache('00manifest.i')
355 366 def manifest(self):
356 367 return manifest.manifest(self.sopener)
357 368
358 @filecache('dirstate')
369 @repofilecache('dirstate')
359 370 def dirstate(self):
360 371 warned = [0]
361 372 def validate(node):
362 373 try:
363 374 self.changelog.rev(node)
364 375 return node
365 376 except error.LookupError:
366 377 if not warned[0]:
367 378 warned[0] = True
368 379 self.ui.warn(_("warning: ignoring unknown"
369 380 " working parent %s!\n") % short(node))
370 381 return nullid
371 382
372 383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
373 384
374 385 def __getitem__(self, changeid):
375 386 if changeid is None:
376 387 return context.workingctx(self)
377 388 return context.changectx(self, changeid)
378 389
379 390 def __contains__(self, changeid):
380 391 try:
381 392 return bool(self.lookup(changeid))
382 393 except error.RepoLookupError:
383 394 return False
384 395
385 396 def __nonzero__(self):
386 397 return True
387 398
388 399 def __len__(self):
389 400 return len(self.changelog)
390 401
391 402 def __iter__(self):
392 403 return iter(self.changelog)
393 404
394 405 def revs(self, expr, *args):
395 406 '''Return a list of revisions matching the given revset'''
396 407 expr = revset.formatspec(expr, *args)
397 408 m = revset.match(None, expr)
398 409 return [r for r in m(self, list(self))]
399 410
400 411 def set(self, expr, *args):
401 412 '''
402 413 Yield a context for each matching revision, after doing arg
403 414 replacement via revset.formatspec
404 415 '''
405 416 for r in self.revs(expr, *args):
406 417 yield self[r]
407 418
408 419 def url(self):
409 420 return 'file:' + self.root
410 421
411 422 def hook(self, name, throw=False, **args):
412 423 return hook.hook(self.ui, self, name, throw, **args)
413 424
414 425 @unfilteredmeth
415 426 def _tag(self, names, node, message, local, user, date, extra={}):
416 427 if isinstance(names, str):
417 428 names = (names,)
418 429
419 430 branches = self.branchmap()
420 431 for name in names:
421 432 self.hook('pretag', throw=True, node=hex(node), tag=name,
422 433 local=local)
423 434 if name in branches:
424 435 self.ui.warn(_("warning: tag %s conflicts with existing"
425 436 " branch name\n") % name)
426 437
427 438 def writetags(fp, names, munge, prevtags):
428 439 fp.seek(0, 2)
429 440 if prevtags and prevtags[-1] != '\n':
430 441 fp.write('\n')
431 442 for name in names:
432 443 m = munge and munge(name) or name
433 444 if (self._tagscache.tagtypes and
434 445 name in self._tagscache.tagtypes):
435 446 old = self.tags().get(name, nullid)
436 447 fp.write('%s %s\n' % (hex(old), m))
437 448 fp.write('%s %s\n' % (hex(node), m))
438 449 fp.close()
439 450
440 451 prevtags = ''
441 452 if local:
442 453 try:
443 454 fp = self.opener('localtags', 'r+')
444 455 except IOError:
445 456 fp = self.opener('localtags', 'a')
446 457 else:
447 458 prevtags = fp.read()
448 459
449 460 # local tags are stored in the current charset
450 461 writetags(fp, names, None, prevtags)
451 462 for name in names:
452 463 self.hook('tag', node=hex(node), tag=name, local=local)
453 464 return
454 465
455 466 try:
456 467 fp = self.wfile('.hgtags', 'rb+')
457 468 except IOError, e:
458 469 if e.errno != errno.ENOENT:
459 470 raise
460 471 fp = self.wfile('.hgtags', 'ab')
461 472 else:
462 473 prevtags = fp.read()
463 474
464 475 # committed tags are stored in UTF-8
465 476 writetags(fp, names, encoding.fromlocal, prevtags)
466 477
467 478 fp.close()
468 479
469 480 self.invalidatecaches()
470 481
471 482 if '.hgtags' not in self.dirstate:
472 483 self[None].add(['.hgtags'])
473 484
474 485 m = matchmod.exact(self.root, '', ['.hgtags'])
475 486 tagnode = self.commit(message, user, date, extra=extra, match=m)
476 487
477 488 for name in names:
478 489 self.hook('tag', node=hex(node), tag=name, local=local)
479 490
480 491 return tagnode
481 492
482 493 def tag(self, names, node, message, local, user, date):
483 494 '''tag a revision with one or more symbolic names.
484 495
485 496 names is a list of strings or, when adding a single tag, names may be a
486 497 string.
487 498
488 499 if local is True, the tags are stored in a per-repository file.
489 500 otherwise, they are stored in the .hgtags file, and a new
490 501 changeset is committed with the change.
491 502
492 503 keyword arguments:
493 504
494 505 local: whether to store tags in non-version-controlled file
495 506 (default False)
496 507
497 508 message: commit message to use if committing
498 509
499 510 user: name of user to use if committing
500 511
501 512 date: date tuple to use if committing'''
502 513
503 514 if not local:
504 515 for x in self.status()[:5]:
505 516 if '.hgtags' in x:
506 517 raise util.Abort(_('working copy of .hgtags is changed '
507 518 '(please commit .hgtags manually)'))
508 519
509 520 self.tags() # instantiate the cache
510 521 self._tag(names, node, message, local, user, date)
511 522
512 523 @filteredpropertycache
513 524 def _tagscache(self):
514 525 '''Returns a tagscache object that contains various tags related
515 526 caches.'''
516 527
517 528 # This simplifies its cache management by having one decorated
518 529 # function (this one) and the rest simply fetch things from it.
519 530 class tagscache(object):
520 531 def __init__(self):
521 532 # These two define the set of tags for this repository. tags
522 533 # maps tag name to node; tagtypes maps tag name to 'global' or
523 534 # 'local'. (Global tags are defined by .hgtags across all
524 535 # heads, and local tags are defined in .hg/localtags.)
525 536 # They constitute the in-memory cache of tags.
526 537 self.tags = self.tagtypes = None
527 538
528 539 self.nodetagscache = self.tagslist = None
529 540
530 541 cache = tagscache()
531 542 cache.tags, cache.tagtypes = self._findtags()
532 543
533 544 return cache
534 545
535 546 def tags(self):
536 547 '''return a mapping of tag to node'''
537 548 t = {}
538 549 if self.changelog.filteredrevs:
539 550 tags, tt = self._findtags()
540 551 else:
541 552 tags = self._tagscache.tags
542 553 for k, v in tags.iteritems():
543 554 try:
544 555 # ignore tags to unknown nodes
545 556 self.changelog.rev(v)
546 557 t[k] = v
547 558 except (error.LookupError, ValueError):
548 559 pass
549 560 return t
550 561
551 562 def _findtags(self):
552 563 '''Do the hard work of finding tags. Return a pair of dicts
553 564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
554 565 maps tag name to a string like \'global\' or \'local\'.
555 566 Subclasses or extensions are free to add their own tags, but
556 567 should be aware that the returned dicts will be retained for the
557 568 duration of the localrepo object.'''
558 569
559 570 # XXX what tagtype should subclasses/extensions use? Currently
560 571 # mq and bookmarks add tags, but do not set the tagtype at all.
561 572 # Should each extension invent its own tag type? Should there
562 573 # be one tagtype for all such "virtual" tags? Or is the status
563 574 # quo fine?
564 575
565 576 alltags = {} # map tag name to (node, hist)
566 577 tagtypes = {}
567 578
568 579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
569 580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
570 581
571 582 # Build the return dicts. Have to re-encode tag names because
572 583 # the tags module always uses UTF-8 (in order not to lose info
573 584 # writing to the cache), but the rest of Mercurial wants them in
574 585 # local encoding.
575 586 tags = {}
576 587 for (name, (node, hist)) in alltags.iteritems():
577 588 if node != nullid:
578 589 tags[encoding.tolocal(name)] = node
579 590 tags['tip'] = self.changelog.tip()
580 591 tagtypes = dict([(encoding.tolocal(name), value)
581 592 for (name, value) in tagtypes.iteritems()])
582 593 return (tags, tagtypes)
583 594
584 595 def tagtype(self, tagname):
585 596 '''
586 597 return the type of the given tag. result can be:
587 598
588 599 'local' : a local tag
589 600 'global' : a global tag
590 601 None : tag does not exist
591 602 '''
592 603
593 604 return self._tagscache.tagtypes.get(tagname)
594 605
595 606 def tagslist(self):
596 607 '''return a list of tags ordered by revision'''
597 608 if not self._tagscache.tagslist:
598 609 l = []
599 610 for t, n in self.tags().iteritems():
600 611 r = self.changelog.rev(n)
601 612 l.append((r, t, n))
602 613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
603 614
604 615 return self._tagscache.tagslist
605 616
606 617 def nodetags(self, node):
607 618 '''return the tags associated with a node'''
608 619 if not self._tagscache.nodetagscache:
609 620 nodetagscache = {}
610 621 for t, n in self._tagscache.tags.iteritems():
611 622 nodetagscache.setdefault(n, []).append(t)
612 623 for tags in nodetagscache.itervalues():
613 624 tags.sort()
614 625 self._tagscache.nodetagscache = nodetagscache
615 626 return self._tagscache.nodetagscache.get(node, [])
616 627
617 628 def nodebookmarks(self, node):
618 629 marks = []
619 630 for bookmark, n in self._bookmarks.iteritems():
620 631 if n == node:
621 632 marks.append(bookmark)
622 633 return sorted(marks)
623 634
624 635 def _branchtags(self, partial, lrev):
625 636 # TODO: rename this function?
626 637 tiprev = len(self) - 1
627 638 if lrev != tiprev:
628 639 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
629 640 self._updatebranchcache(partial, ctxgen)
630 641 self._writebranchcache(partial, self.changelog.tip(), tiprev)
631 642
632 643 return partial
633 644
634 645 @unfilteredmeth # Until we get a smarter cache management
635 646 def updatebranchcache(self):
636 647 tip = self.changelog.tip()
637 648 if self._branchcache is not None and self._branchcachetip == tip:
638 649 return
639 650
640 651 oldtip = self._branchcachetip
641 652 self._branchcachetip = tip
642 653 if oldtip is None or oldtip not in self.changelog.nodemap:
643 654 partial, last, lrev = self._readbranchcache()
644 655 else:
645 656 lrev = self.changelog.rev(oldtip)
646 657 partial = self._branchcache
647 658
648 659 self._branchtags(partial, lrev)
649 660 # this private cache holds all heads (not just the branch tips)
650 661 self._branchcache = partial
651 662
652 663 def branchmap(self):
653 664 '''returns a dictionary {branch: [branchheads]}'''
654 665 if self.changelog.filteredrevs:
655 666 # some changeset are excluded we can't use the cache
656 667 branchmap = {}
657 668 self._updatebranchcache(branchmap, (self[r] for r in self))
658 669 return branchmap
659 670 else:
660 671 self.updatebranchcache()
661 672 return self._branchcache
662 673
663 674
664 675 def _branchtip(self, heads):
665 676 '''return the tipmost branch head in heads'''
666 677 tip = heads[-1]
667 678 for h in reversed(heads):
668 679 if not self[h].closesbranch():
669 680 tip = h
670 681 break
671 682 return tip
672 683
673 684 def branchtip(self, branch):
674 685 '''return the tip node for a given branch'''
675 686 if branch not in self.branchmap():
676 687 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
677 688 return self._branchtip(self.branchmap()[branch])
678 689
679 690 def branchtags(self):
680 691 '''return a dict where branch names map to the tipmost head of
681 692 the branch, open heads come before closed'''
682 693 bt = {}
683 694 for bn, heads in self.branchmap().iteritems():
684 695 bt[bn] = self._branchtip(heads)
685 696 return bt
686 697
687 698 @unfilteredmeth # Until we get a smarter cache management
688 699 def _readbranchcache(self):
689 700 partial = {}
690 701 try:
691 702 f = self.opener("cache/branchheads")
692 703 lines = f.read().split('\n')
693 704 f.close()
694 705 except (IOError, OSError):
695 706 return {}, nullid, nullrev
696 707
697 708 try:
698 709 last, lrev = lines.pop(0).split(" ", 1)
699 710 last, lrev = bin(last), int(lrev)
700 711 if lrev >= len(self) or self[lrev].node() != last:
701 712 # invalidate the cache
702 713 raise ValueError('invalidating branch cache (tip differs)')
703 714 for l in lines:
704 715 if not l:
705 716 continue
706 717 node, label = l.split(" ", 1)
707 718 label = encoding.tolocal(label.strip())
708 719 if not node in self:
709 720 raise ValueError('invalidating branch cache because node '+
710 721 '%s does not exist' % node)
711 722 partial.setdefault(label, []).append(bin(node))
712 723 except KeyboardInterrupt:
713 724 raise
714 725 except Exception, inst:
715 726 if self.ui.debugflag:
716 727 self.ui.warn(str(inst), '\n')
717 728 partial, last, lrev = {}, nullid, nullrev
718 729 return partial, last, lrev
719 730
720 731 @unfilteredmeth # Until we get a smarter cache management
721 732 def _writebranchcache(self, branches, tip, tiprev):
722 733 try:
723 734 f = self.opener("cache/branchheads", "w", atomictemp=True)
724 735 f.write("%s %s\n" % (hex(tip), tiprev))
725 736 for label, nodes in branches.iteritems():
726 737 for node in nodes:
727 738 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
728 739 f.close()
729 740 except (IOError, OSError):
730 741 pass
731 742
732 743 @unfilteredmeth # Until we get a smarter cache management
733 744 def _updatebranchcache(self, partial, ctxgen):
734 745 """Given a branchhead cache, partial, that may have extra nodes or be
735 746 missing heads, and a generator of nodes that are at least a superset of
736 747 heads missing, this function updates partial to be correct.
737 748 """
738 749 # collect new branch entries
739 750 newbranches = {}
740 751 for c in ctxgen:
741 752 newbranches.setdefault(c.branch(), []).append(c.node())
742 753 # if older branchheads are reachable from new ones, they aren't
743 754 # really branchheads. Note checking parents is insufficient:
744 755 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
745 756 for branch, newnodes in newbranches.iteritems():
746 757 bheads = partial.setdefault(branch, [])
747 758 # Remove candidate heads that no longer are in the repo (e.g., as
748 759 # the result of a strip that just happened). Avoid using 'node in
749 760 # self' here because that dives down into branchcache code somewhat
750 761 # recursively.
751 762 bheadrevs = [self.changelog.rev(node) for node in bheads
752 763 if self.changelog.hasnode(node)]
753 764 newheadrevs = [self.changelog.rev(node) for node in newnodes
754 765 if self.changelog.hasnode(node)]
755 766 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
756 767 # Remove duplicates - nodes that are in newheadrevs and are already
757 768 # in bheadrevs. This can happen if you strip a node whose parent
758 769 # was already a head (because they're on different branches).
759 770 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
760 771
761 772 # Starting from tip means fewer passes over reachable. If we know
762 773 # the new candidates are not ancestors of existing heads, we don't
763 774 # have to examine ancestors of existing heads
764 775 if ctxisnew:
765 776 iterrevs = sorted(newheadrevs)
766 777 else:
767 778 iterrevs = list(bheadrevs)
768 779
769 780 # This loop prunes out two kinds of heads - heads that are
770 781 # superseded by a head in newheadrevs, and newheadrevs that are not
771 782 # heads because an existing head is their descendant.
772 783 while iterrevs:
773 784 latest = iterrevs.pop()
774 785 if latest not in bheadrevs:
775 786 continue
776 787 ancestors = set(self.changelog.ancestors([latest],
777 788 bheadrevs[0]))
778 789 if ancestors:
779 790 bheadrevs = [b for b in bheadrevs if b not in ancestors]
780 791 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
781 792
782 793 # There may be branches that cease to exist when the last commit in the
783 794 # branch was stripped. This code filters them out. Note that the
784 795 # branch that ceased to exist may not be in newbranches because
785 796 # newbranches is the set of candidate heads, which when you strip the
786 797 # last commit in a branch will be the parent branch.
787 798 for branch in partial.keys():
788 799 nodes = [head for head in partial[branch]
789 800 if self.changelog.hasnode(head)]
790 801 if not nodes:
791 802 del partial[branch]
792 803
793 804 def lookup(self, key):
794 805 return self[key].node()
795 806
796 807 def lookupbranch(self, key, remote=None):
797 808 repo = remote or self
798 809 if key in repo.branchmap():
799 810 return key
800 811
801 812 repo = (remote and remote.local()) and remote or self
802 813 return repo[key].branch()
803 814
804 815 def known(self, nodes):
805 816 nm = self.changelog.nodemap
806 817 pc = self._phasecache
807 818 result = []
808 819 for n in nodes:
809 820 r = nm.get(n)
810 821 resp = not (r is None or pc.phase(self, r) >= phases.secret)
811 822 result.append(resp)
812 823 return result
813 824
814 825 def local(self):
815 826 return self
816 827
817 828 def cancopy(self):
818 829 return self.local() # so statichttprepo's override of local() works
819 830
820 831 def join(self, f):
821 832 return os.path.join(self.path, f)
822 833
823 834 def wjoin(self, f):
824 835 return os.path.join(self.root, f)
825 836
826 837 def file(self, f):
827 838 if f[0] == '/':
828 839 f = f[1:]
829 840 return filelog.filelog(self.sopener, f)
830 841
831 842 def changectx(self, changeid):
832 843 return self[changeid]
833 844
834 845 def parents(self, changeid=None):
835 846 '''get list of changectxs for parents of changeid'''
836 847 return self[changeid].parents()
837 848
838 849 def setparents(self, p1, p2=nullid):
839 850 copies = self.dirstate.setparents(p1, p2)
840 851 if copies:
841 852 # Adjust copy records, the dirstate cannot do it, it
842 853 # requires access to parents manifests. Preserve them
843 854 # only for entries added to first parent.
844 855 pctx = self[p1]
845 856 for f in copies:
846 857 if f not in pctx and copies[f] in pctx:
847 858 self.dirstate.copy(copies[f], f)
848 859
849 860 def filectx(self, path, changeid=None, fileid=None):
850 861 """changeid can be a changeset revision, node, or tag.
851 862 fileid can be a file revision or node."""
852 863 return context.filectx(self, path, changeid, fileid)
853 864
854 865 def getcwd(self):
855 866 return self.dirstate.getcwd()
856 867
857 868 def pathto(self, f, cwd=None):
858 869 return self.dirstate.pathto(f, cwd)
859 870
860 871 def wfile(self, f, mode='r'):
861 872 return self.wopener(f, mode)
862 873
863 874 def _link(self, f):
864 875 return os.path.islink(self.wjoin(f))
865 876
866 877 def _loadfilter(self, filter):
867 878 if filter not in self.filterpats:
868 879 l = []
869 880 for pat, cmd in self.ui.configitems(filter):
870 881 if cmd == '!':
871 882 continue
872 883 mf = matchmod.match(self.root, '', [pat])
873 884 fn = None
874 885 params = cmd
875 886 for name, filterfn in self._datafilters.iteritems():
876 887 if cmd.startswith(name):
877 888 fn = filterfn
878 889 params = cmd[len(name):].lstrip()
879 890 break
880 891 if not fn:
881 892 fn = lambda s, c, **kwargs: util.filter(s, c)
882 893 # Wrap old filters not supporting keyword arguments
883 894 if not inspect.getargspec(fn)[2]:
884 895 oldfn = fn
885 896 fn = lambda s, c, **kwargs: oldfn(s, c)
886 897 l.append((mf, fn, params))
887 898 self.filterpats[filter] = l
888 899 return self.filterpats[filter]
889 900
890 901 def _filter(self, filterpats, filename, data):
891 902 for mf, fn, cmd in filterpats:
892 903 if mf(filename):
893 904 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 905 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 906 break
896 907
897 908 return data
898 909
899 910 @unfilteredpropertycache
900 911 def _encodefilterpats(self):
901 912 return self._loadfilter('encode')
902 913
903 914 @unfilteredpropertycache
904 915 def _decodefilterpats(self):
905 916 return self._loadfilter('decode')
906 917
907 918 def adddatafilter(self, name, filter):
908 919 self._datafilters[name] = filter
909 920
910 921 def wread(self, filename):
911 922 if self._link(filename):
912 923 data = os.readlink(self.wjoin(filename))
913 924 else:
914 925 data = self.wopener.read(filename)
915 926 return self._filter(self._encodefilterpats, filename, data)
916 927
917 928 def wwrite(self, filename, data, flags):
918 929 data = self._filter(self._decodefilterpats, filename, data)
919 930 if 'l' in flags:
920 931 self.wopener.symlink(data, filename)
921 932 else:
922 933 self.wopener.write(filename, data)
923 934 if 'x' in flags:
924 935 util.setflags(self.wjoin(filename), False, True)
925 936
926 937 def wwritedata(self, filename, data):
927 938 return self._filter(self._decodefilterpats, filename, data)
928 939
929 940 def transaction(self, desc):
930 941 tr = self._transref and self._transref() or None
931 942 if tr and tr.running():
932 943 return tr.nest()
933 944
934 945 # abort here if the journal already exists
935 946 if os.path.exists(self.sjoin("journal")):
936 947 raise error.RepoError(
937 948 _("abandoned transaction found - run hg recover"))
938 949
939 950 self._writejournal(desc)
940 951 renames = [(x, undoname(x)) for x in self._journalfiles()]
941 952
942 953 tr = transaction.transaction(self.ui.warn, self.sopener,
943 954 self.sjoin("journal"),
944 955 aftertrans(renames),
945 956 self.store.createmode)
946 957 self._transref = weakref.ref(tr)
947 958 return tr
948 959
949 960 def _journalfiles(self):
950 961 return (self.sjoin('journal'), self.join('journal.dirstate'),
951 962 self.join('journal.branch'), self.join('journal.desc'),
952 963 self.join('journal.bookmarks'),
953 964 self.sjoin('journal.phaseroots'))
954 965
955 966 def undofiles(self):
956 967 return [undoname(x) for x in self._journalfiles()]
957 968
958 969 def _writejournal(self, desc):
959 970 self.opener.write("journal.dirstate",
960 971 self.opener.tryread("dirstate"))
961 972 self.opener.write("journal.branch",
962 973 encoding.fromlocal(self.dirstate.branch()))
963 974 self.opener.write("journal.desc",
964 975 "%d\n%s\n" % (len(self), desc))
965 976 self.opener.write("journal.bookmarks",
966 977 self.opener.tryread("bookmarks"))
967 978 self.sopener.write("journal.phaseroots",
968 979 self.sopener.tryread("phaseroots"))
969 980
970 981 def recover(self):
971 982 lock = self.lock()
972 983 try:
973 984 if os.path.exists(self.sjoin("journal")):
974 985 self.ui.status(_("rolling back interrupted transaction\n"))
975 986 transaction.rollback(self.sopener, self.sjoin("journal"),
976 987 self.ui.warn)
977 988 self.invalidate()
978 989 return True
979 990 else:
980 991 self.ui.warn(_("no interrupted transaction available\n"))
981 992 return False
982 993 finally:
983 994 lock.release()
984 995
985 996 def rollback(self, dryrun=False, force=False):
986 997 wlock = lock = None
987 998 try:
988 999 wlock = self.wlock()
989 1000 lock = self.lock()
990 1001 if os.path.exists(self.sjoin("undo")):
991 1002 return self._rollback(dryrun, force)
992 1003 else:
993 1004 self.ui.warn(_("no rollback information available\n"))
994 1005 return 1
995 1006 finally:
996 1007 release(lock, wlock)
997 1008
998 1009 @unfilteredmeth # Until we get smarter cache management
999 1010 def _rollback(self, dryrun, force):
1000 1011 ui = self.ui
1001 1012 try:
1002 1013 args = self.opener.read('undo.desc').splitlines()
1003 1014 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1004 1015 if len(args) >= 3:
1005 1016 detail = args[2]
1006 1017 oldtip = oldlen - 1
1007 1018
1008 1019 if detail and ui.verbose:
1009 1020 msg = (_('repository tip rolled back to revision %s'
1010 1021 ' (undo %s: %s)\n')
1011 1022 % (oldtip, desc, detail))
1012 1023 else:
1013 1024 msg = (_('repository tip rolled back to revision %s'
1014 1025 ' (undo %s)\n')
1015 1026 % (oldtip, desc))
1016 1027 except IOError:
1017 1028 msg = _('rolling back unknown transaction\n')
1018 1029 desc = None
1019 1030
1020 1031 if not force and self['.'] != self['tip'] and desc == 'commit':
1021 1032 raise util.Abort(
1022 1033 _('rollback of last commit while not checked out '
1023 1034 'may lose data'), hint=_('use -f to force'))
1024 1035
1025 1036 ui.status(msg)
1026 1037 if dryrun:
1027 1038 return 0
1028 1039
1029 1040 parents = self.dirstate.parents()
1030 1041 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1031 1042 if os.path.exists(self.join('undo.bookmarks')):
1032 1043 util.rename(self.join('undo.bookmarks'),
1033 1044 self.join('bookmarks'))
1034 1045 if os.path.exists(self.sjoin('undo.phaseroots')):
1035 1046 util.rename(self.sjoin('undo.phaseroots'),
1036 1047 self.sjoin('phaseroots'))
1037 1048 self.invalidate()
1038 1049
1039 1050 # Discard all cache entries to force reloading everything.
1040 1051 self._filecache.clear()
1041 1052
1042 1053 parentgone = (parents[0] not in self.changelog.nodemap or
1043 1054 parents[1] not in self.changelog.nodemap)
1044 1055 if parentgone:
1045 1056 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1046 1057 try:
1047 1058 branch = self.opener.read('undo.branch')
1048 1059 self.dirstate.setbranch(encoding.tolocal(branch))
1049 1060 except IOError:
1050 1061 ui.warn(_('named branch could not be reset: '
1051 1062 'current branch is still \'%s\'\n')
1052 1063 % self.dirstate.branch())
1053 1064
1054 1065 self.dirstate.invalidate()
1055 1066 parents = tuple([p.rev() for p in self.parents()])
1056 1067 if len(parents) > 1:
1057 1068 ui.status(_('working directory now based on '
1058 1069 'revisions %d and %d\n') % parents)
1059 1070 else:
1060 1071 ui.status(_('working directory now based on '
1061 1072 'revision %d\n') % parents)
1062 1073 # TODO: if we know which new heads may result from this rollback, pass
1063 1074 # them to destroy(), which will prevent the branchhead cache from being
1064 1075 # invalidated.
1065 1076 self.destroyed()
1066 1077 return 0
1067 1078
1068 1079 def invalidatecaches(self):
1069 1080
1070 1081 if '_tagscache' in vars(self):
1071 1082 # can't use delattr on proxy
1072 1083 del self.__dict__['_tagscache']
1073 1084
1074 1085 self.unfiltered()._branchcache = None # in UTF-8
1075 1086 self.unfiltered()._branchcachetip = None
1076 1087 obsolete.clearobscaches(self)
1077 1088
1078 1089 def invalidatedirstate(self):
1079 1090 '''Invalidates the dirstate, causing the next call to dirstate
1080 1091 to check if it was modified since the last time it was read,
1081 1092 rereading it if it has.
1082 1093
1083 1094 This is different to dirstate.invalidate() that it doesn't always
1084 1095 rereads the dirstate. Use dirstate.invalidate() if you want to
1085 1096 explicitly read the dirstate again (i.e. restoring it to a previous
1086 1097 known good state).'''
1087 1098 if hasunfilteredcache(self, 'dirstate'):
1088 1099 for k in self.dirstate._filecache:
1089 1100 try:
1090 1101 delattr(self.dirstate, k)
1091 1102 except AttributeError:
1092 1103 pass
1093 1104 delattr(self.unfiltered(), 'dirstate')
1094 1105
1095 1106 def invalidate(self):
1096 1107 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1097 1108 for k in self._filecache:
1098 1109 # dirstate is invalidated separately in invalidatedirstate()
1099 1110 if k == 'dirstate':
1100 1111 continue
1101 1112
1102 1113 try:
1103 1114 delattr(unfiltered, k)
1104 1115 except AttributeError:
1105 1116 pass
1106 1117 self.invalidatecaches()
1107 1118
1108 1119 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1109 1120 try:
1110 1121 l = lock.lock(lockname, 0, releasefn, desc=desc)
1111 1122 except error.LockHeld, inst:
1112 1123 if not wait:
1113 1124 raise
1114 1125 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1115 1126 (desc, inst.locker))
1116 1127 # default to 600 seconds timeout
1117 1128 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1118 1129 releasefn, desc=desc)
1119 1130 if acquirefn:
1120 1131 acquirefn()
1121 1132 return l
1122 1133
1123 1134 def _afterlock(self, callback):
1124 1135 """add a callback to the current repository lock.
1125 1136
1126 1137 The callback will be executed on lock release."""
1127 1138 l = self._lockref and self._lockref()
1128 1139 if l:
1129 1140 l.postrelease.append(callback)
1130 1141 else:
1131 1142 callback()
1132 1143
1133 1144 def lock(self, wait=True):
1134 1145 '''Lock the repository store (.hg/store) and return a weak reference
1135 1146 to the lock. Use this before modifying the store (e.g. committing or
1136 1147 stripping). If you are opening a transaction, get a lock as well.)'''
1137 1148 l = self._lockref and self._lockref()
1138 1149 if l is not None and l.held:
1139 1150 l.lock()
1140 1151 return l
1141 1152
1142 1153 def unlock():
1143 1154 self.store.write()
1144 1155 if hasunfilteredcache(self, '_phasecache'):
1145 1156 self._phasecache.write()
1146 1157 for k, ce in self._filecache.items():
1147 1158 if k == 'dirstate':
1148 1159 continue
1149 1160 ce.refresh()
1150 1161
1151 1162 l = self._lock(self.sjoin("lock"), wait, unlock,
1152 1163 self.invalidate, _('repository %s') % self.origroot)
1153 1164 self._lockref = weakref.ref(l)
1154 1165 return l
1155 1166
1156 1167 def wlock(self, wait=True):
1157 1168 '''Lock the non-store parts of the repository (everything under
1158 1169 .hg except .hg/store) and return a weak reference to the lock.
1159 1170 Use this before modifying files in .hg.'''
1160 1171 l = self._wlockref and self._wlockref()
1161 1172 if l is not None and l.held:
1162 1173 l.lock()
1163 1174 return l
1164 1175
1165 1176 def unlock():
1166 1177 self.dirstate.write()
1167 1178 ce = self._filecache.get('dirstate')
1168 1179 if ce:
1169 1180 ce.refresh()
1170 1181
1171 1182 l = self._lock(self.join("wlock"), wait, unlock,
1172 1183 self.invalidatedirstate, _('working directory of %s') %
1173 1184 self.origroot)
1174 1185 self._wlockref = weakref.ref(l)
1175 1186 return l
1176 1187
1177 1188 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1178 1189 """
1179 1190 commit an individual file as part of a larger transaction
1180 1191 """
1181 1192
1182 1193 fname = fctx.path()
1183 1194 text = fctx.data()
1184 1195 flog = self.file(fname)
1185 1196 fparent1 = manifest1.get(fname, nullid)
1186 1197 fparent2 = fparent2o = manifest2.get(fname, nullid)
1187 1198
1188 1199 meta = {}
1189 1200 copy = fctx.renamed()
1190 1201 if copy and copy[0] != fname:
1191 1202 # Mark the new revision of this file as a copy of another
1192 1203 # file. This copy data will effectively act as a parent
1193 1204 # of this new revision. If this is a merge, the first
1194 1205 # parent will be the nullid (meaning "look up the copy data")
1195 1206 # and the second one will be the other parent. For example:
1196 1207 #
1197 1208 # 0 --- 1 --- 3 rev1 changes file foo
1198 1209 # \ / rev2 renames foo to bar and changes it
1199 1210 # \- 2 -/ rev3 should have bar with all changes and
1200 1211 # should record that bar descends from
1201 1212 # bar in rev2 and foo in rev1
1202 1213 #
1203 1214 # this allows this merge to succeed:
1204 1215 #
1205 1216 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1206 1217 # \ / merging rev3 and rev4 should use bar@rev2
1207 1218 # \- 2 --- 4 as the merge base
1208 1219 #
1209 1220
1210 1221 cfname = copy[0]
1211 1222 crev = manifest1.get(cfname)
1212 1223 newfparent = fparent2
1213 1224
1214 1225 if manifest2: # branch merge
1215 1226 if fparent2 == nullid or crev is None: # copied on remote side
1216 1227 if cfname in manifest2:
1217 1228 crev = manifest2[cfname]
1218 1229 newfparent = fparent1
1219 1230
1220 1231 # find source in nearest ancestor if we've lost track
1221 1232 if not crev:
1222 1233 self.ui.debug(" %s: searching for copy revision for %s\n" %
1223 1234 (fname, cfname))
1224 1235 for ancestor in self[None].ancestors():
1225 1236 if cfname in ancestor:
1226 1237 crev = ancestor[cfname].filenode()
1227 1238 break
1228 1239
1229 1240 if crev:
1230 1241 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1231 1242 meta["copy"] = cfname
1232 1243 meta["copyrev"] = hex(crev)
1233 1244 fparent1, fparent2 = nullid, newfparent
1234 1245 else:
1235 1246 self.ui.warn(_("warning: can't find ancestor for '%s' "
1236 1247 "copied from '%s'!\n") % (fname, cfname))
1237 1248
1238 1249 elif fparent2 != nullid:
1239 1250 # is one parent an ancestor of the other?
1240 1251 fparentancestor = flog.ancestor(fparent1, fparent2)
1241 1252 if fparentancestor == fparent1:
1242 1253 fparent1, fparent2 = fparent2, nullid
1243 1254 elif fparentancestor == fparent2:
1244 1255 fparent2 = nullid
1245 1256
1246 1257 # is the file changed?
1247 1258 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1248 1259 changelist.append(fname)
1249 1260 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1250 1261
1251 1262 # are just the flags changed during merge?
1252 1263 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1253 1264 changelist.append(fname)
1254 1265
1255 1266 return fparent1
1256 1267
1257 1268 @unfilteredmeth
1258 1269 def commit(self, text="", user=None, date=None, match=None, force=False,
1259 1270 editor=False, extra={}):
1260 1271 """Add a new revision to current repository.
1261 1272
1262 1273 Revision information is gathered from the working directory,
1263 1274 match can be used to filter the committed files. If editor is
1264 1275 supplied, it is called to get a commit message.
1265 1276 """
1266 1277
1267 1278 def fail(f, msg):
1268 1279 raise util.Abort('%s: %s' % (f, msg))
1269 1280
1270 1281 if not match:
1271 1282 match = matchmod.always(self.root, '')
1272 1283
1273 1284 if not force:
1274 1285 vdirs = []
1275 1286 match.dir = vdirs.append
1276 1287 match.bad = fail
1277 1288
1278 1289 wlock = self.wlock()
1279 1290 try:
1280 1291 wctx = self[None]
1281 1292 merge = len(wctx.parents()) > 1
1282 1293
1283 1294 if (not force and merge and match and
1284 1295 (match.files() or match.anypats())):
1285 1296 raise util.Abort(_('cannot partially commit a merge '
1286 1297 '(do not specify files or patterns)'))
1287 1298
1288 1299 changes = self.status(match=match, clean=force)
1289 1300 if force:
1290 1301 changes[0].extend(changes[6]) # mq may commit unchanged files
1291 1302
1292 1303 # check subrepos
1293 1304 subs = []
1294 1305 commitsubs = set()
1295 1306 newstate = wctx.substate.copy()
1296 1307 # only manage subrepos and .hgsubstate if .hgsub is present
1297 1308 if '.hgsub' in wctx:
1298 1309 # we'll decide whether to track this ourselves, thanks
1299 1310 if '.hgsubstate' in changes[0]:
1300 1311 changes[0].remove('.hgsubstate')
1301 1312 if '.hgsubstate' in changes[2]:
1302 1313 changes[2].remove('.hgsubstate')
1303 1314
1304 1315 # compare current state to last committed state
1305 1316 # build new substate based on last committed state
1306 1317 oldstate = wctx.p1().substate
1307 1318 for s in sorted(newstate.keys()):
1308 1319 if not match(s):
1309 1320 # ignore working copy, use old state if present
1310 1321 if s in oldstate:
1311 1322 newstate[s] = oldstate[s]
1312 1323 continue
1313 1324 if not force:
1314 1325 raise util.Abort(
1315 1326 _("commit with new subrepo %s excluded") % s)
1316 1327 if wctx.sub(s).dirty(True):
1317 1328 if not self.ui.configbool('ui', 'commitsubrepos'):
1318 1329 raise util.Abort(
1319 1330 _("uncommitted changes in subrepo %s") % s,
1320 1331 hint=_("use --subrepos for recursive commit"))
1321 1332 subs.append(s)
1322 1333 commitsubs.add(s)
1323 1334 else:
1324 1335 bs = wctx.sub(s).basestate()
1325 1336 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1326 1337 if oldstate.get(s, (None, None, None))[1] != bs:
1327 1338 subs.append(s)
1328 1339
1329 1340 # check for removed subrepos
1330 1341 for p in wctx.parents():
1331 1342 r = [s for s in p.substate if s not in newstate]
1332 1343 subs += [s for s in r if match(s)]
1333 1344 if subs:
1334 1345 if (not match('.hgsub') and
1335 1346 '.hgsub' in (wctx.modified() + wctx.added())):
1336 1347 raise util.Abort(
1337 1348 _("can't commit subrepos without .hgsub"))
1338 1349 changes[0].insert(0, '.hgsubstate')
1339 1350
1340 1351 elif '.hgsub' in changes[2]:
1341 1352 # clean up .hgsubstate when .hgsub is removed
1342 1353 if ('.hgsubstate' in wctx and
1343 1354 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1344 1355 changes[2].insert(0, '.hgsubstate')
1345 1356
1346 1357 # make sure all explicit patterns are matched
1347 1358 if not force and match.files():
1348 1359 matched = set(changes[0] + changes[1] + changes[2])
1349 1360
1350 1361 for f in match.files():
1351 1362 f = self.dirstate.normalize(f)
1352 1363 if f == '.' or f in matched or f in wctx.substate:
1353 1364 continue
1354 1365 if f in changes[3]: # missing
1355 1366 fail(f, _('file not found!'))
1356 1367 if f in vdirs: # visited directory
1357 1368 d = f + '/'
1358 1369 for mf in matched:
1359 1370 if mf.startswith(d):
1360 1371 break
1361 1372 else:
1362 1373 fail(f, _("no match under directory!"))
1363 1374 elif f not in self.dirstate:
1364 1375 fail(f, _("file not tracked!"))
1365 1376
1366 1377 if (not force and not extra.get("close") and not merge
1367 1378 and not (changes[0] or changes[1] or changes[2])
1368 1379 and wctx.branch() == wctx.p1().branch()):
1369 1380 return None
1370 1381
1371 1382 if merge and changes[3]:
1372 1383 raise util.Abort(_("cannot commit merge with missing files"))
1373 1384
1374 1385 ms = mergemod.mergestate(self)
1375 1386 for f in changes[0]:
1376 1387 if f in ms and ms[f] == 'u':
1377 1388 raise util.Abort(_("unresolved merge conflicts "
1378 1389 "(see hg help resolve)"))
1379 1390
1380 1391 cctx = context.workingctx(self, text, user, date, extra, changes)
1381 1392 if editor:
1382 1393 cctx._text = editor(self, cctx, subs)
1383 1394 edited = (text != cctx._text)
1384 1395
1385 1396 # commit subs and write new state
1386 1397 if subs:
1387 1398 for s in sorted(commitsubs):
1388 1399 sub = wctx.sub(s)
1389 1400 self.ui.status(_('committing subrepository %s\n') %
1390 1401 subrepo.subrelpath(sub))
1391 1402 sr = sub.commit(cctx._text, user, date)
1392 1403 newstate[s] = (newstate[s][0], sr)
1393 1404 subrepo.writestate(self, newstate)
1394 1405
1395 1406 # Save commit message in case this transaction gets rolled back
1396 1407 # (e.g. by a pretxncommit hook). Leave the content alone on
1397 1408 # the assumption that the user will use the same editor again.
1398 1409 msgfn = self.savecommitmessage(cctx._text)
1399 1410
1400 1411 p1, p2 = self.dirstate.parents()
1401 1412 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1402 1413 try:
1403 1414 self.hook("precommit", throw=True, parent1=hookp1,
1404 1415 parent2=hookp2)
1405 1416 ret = self.commitctx(cctx, True)
1406 1417 except: # re-raises
1407 1418 if edited:
1408 1419 self.ui.write(
1409 1420 _('note: commit message saved in %s\n') % msgfn)
1410 1421 raise
1411 1422
1412 1423 # update bookmarks, dirstate and mergestate
1413 1424 bookmarks.update(self, [p1, p2], ret)
1414 1425 for f in changes[0] + changes[1]:
1415 1426 self.dirstate.normal(f)
1416 1427 for f in changes[2]:
1417 1428 self.dirstate.drop(f)
1418 1429 self.dirstate.setparents(ret)
1419 1430 ms.reset()
1420 1431 finally:
1421 1432 wlock.release()
1422 1433
1423 1434 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1424 1435 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1425 1436 self._afterlock(commithook)
1426 1437 return ret
1427 1438
1428 1439 @unfilteredmeth
1429 1440 def commitctx(self, ctx, error=False):
1430 1441 """Add a new revision to current repository.
1431 1442 Revision information is passed via the context argument.
1432 1443 """
1433 1444
1434 1445 tr = lock = None
1435 1446 removed = list(ctx.removed())
1436 1447 p1, p2 = ctx.p1(), ctx.p2()
1437 1448 user = ctx.user()
1438 1449
1439 1450 lock = self.lock()
1440 1451 try:
1441 1452 tr = self.transaction("commit")
1442 1453 trp = weakref.proxy(tr)
1443 1454
1444 1455 if ctx.files():
1445 1456 m1 = p1.manifest().copy()
1446 1457 m2 = p2.manifest()
1447 1458
1448 1459 # check in files
1449 1460 new = {}
1450 1461 changed = []
1451 1462 linkrev = len(self)
1452 1463 for f in sorted(ctx.modified() + ctx.added()):
1453 1464 self.ui.note(f + "\n")
1454 1465 try:
1455 1466 fctx = ctx[f]
1456 1467 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1457 1468 changed)
1458 1469 m1.set(f, fctx.flags())
1459 1470 except OSError, inst:
1460 1471 self.ui.warn(_("trouble committing %s!\n") % f)
1461 1472 raise
1462 1473 except IOError, inst:
1463 1474 errcode = getattr(inst, 'errno', errno.ENOENT)
1464 1475 if error or errcode and errcode != errno.ENOENT:
1465 1476 self.ui.warn(_("trouble committing %s!\n") % f)
1466 1477 raise
1467 1478 else:
1468 1479 removed.append(f)
1469 1480
1470 1481 # update manifest
1471 1482 m1.update(new)
1472 1483 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1473 1484 drop = [f for f in removed if f in m1]
1474 1485 for f in drop:
1475 1486 del m1[f]
1476 1487 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1477 1488 p2.manifestnode(), (new, drop))
1478 1489 files = changed + removed
1479 1490 else:
1480 1491 mn = p1.manifestnode()
1481 1492 files = []
1482 1493
1483 1494 # update changelog
1484 1495 self.changelog.delayupdate()
1485 1496 n = self.changelog.add(mn, files, ctx.description(),
1486 1497 trp, p1.node(), p2.node(),
1487 1498 user, ctx.date(), ctx.extra().copy())
1488 1499 p = lambda: self.changelog.writepending() and self.root or ""
1489 1500 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1490 1501 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1491 1502 parent2=xp2, pending=p)
1492 1503 self.changelog.finalize(trp)
1493 1504 # set the new commit is proper phase
1494 1505 targetphase = phases.newcommitphase(self.ui)
1495 1506 if targetphase:
1496 1507 # retract boundary do not alter parent changeset.
1497 1508 # if a parent have higher the resulting phase will
1498 1509 # be compliant anyway
1499 1510 #
1500 1511 # if minimal phase was 0 we don't need to retract anything
1501 1512 phases.retractboundary(self, targetphase, [n])
1502 1513 tr.close()
1503 1514 self.updatebranchcache()
1504 1515 return n
1505 1516 finally:
1506 1517 if tr:
1507 1518 tr.release()
1508 1519 lock.release()
1509 1520
1510 1521 @unfilteredmeth
1511 1522 def destroyed(self, newheadnodes=None):
1512 1523 '''Inform the repository that nodes have been destroyed.
1513 1524 Intended for use by strip and rollback, so there's a common
1514 1525 place for anything that has to be done after destroying history.
1515 1526
1516 1527 If you know the branchheadcache was uptodate before nodes were removed
1517 1528 and you also know the set of candidate new heads that may have resulted
1518 1529 from the destruction, you can set newheadnodes. This will enable the
1519 1530 code to update the branchheads cache, rather than having future code
1520 1531 decide it's invalid and regenerating it from scratch.
1521 1532 '''
1522 1533 # If we have info, newheadnodes, on how to update the branch cache, do
1523 1534 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1524 1535 # will be caught the next time it is read.
1525 1536 if newheadnodes:
1526 1537 tiprev = len(self) - 1
1527 1538 ctxgen = (self[node] for node in newheadnodes
1528 1539 if self.changelog.hasnode(node))
1529 1540 self._updatebranchcache(self._branchcache, ctxgen)
1530 1541 self._writebranchcache(self._branchcache, self.changelog.tip(),
1531 1542 tiprev)
1532 1543
1533 1544 # Ensure the persistent tag cache is updated. Doing it now
1534 1545 # means that the tag cache only has to worry about destroyed
1535 1546 # heads immediately after a strip/rollback. That in turn
1536 1547 # guarantees that "cachetip == currenttip" (comparing both rev
1537 1548 # and node) always means no nodes have been added or destroyed.
1538 1549
1539 1550 # XXX this is suboptimal when qrefresh'ing: we strip the current
1540 1551 # head, refresh the tag cache, then immediately add a new head.
1541 1552 # But I think doing it this way is necessary for the "instant
1542 1553 # tag cache retrieval" case to work.
1543 1554 self.invalidatecaches()
1544 1555
1545 1556 # Discard all cache entries to force reloading everything.
1546 1557 self._filecache.clear()
1547 1558
1548 1559 def walk(self, match, node=None):
1549 1560 '''
1550 1561 walk recursively through the directory tree or a given
1551 1562 changeset, finding all files matched by the match
1552 1563 function
1553 1564 '''
1554 1565 return self[node].walk(match)
1555 1566
1556 1567 def status(self, node1='.', node2=None, match=None,
1557 1568 ignored=False, clean=False, unknown=False,
1558 1569 listsubrepos=False):
1559 1570 """return status of files between two nodes or node and working
1560 1571 directory.
1561 1572
1562 1573 If node1 is None, use the first dirstate parent instead.
1563 1574 If node2 is None, compare node1 with working directory.
1564 1575 """
1565 1576
1566 1577 def mfmatches(ctx):
1567 1578 mf = ctx.manifest().copy()
1568 1579 if match.always():
1569 1580 return mf
1570 1581 for fn in mf.keys():
1571 1582 if not match(fn):
1572 1583 del mf[fn]
1573 1584 return mf
1574 1585
1575 1586 if isinstance(node1, context.changectx):
1576 1587 ctx1 = node1
1577 1588 else:
1578 1589 ctx1 = self[node1]
1579 1590 if isinstance(node2, context.changectx):
1580 1591 ctx2 = node2
1581 1592 else:
1582 1593 ctx2 = self[node2]
1583 1594
1584 1595 working = ctx2.rev() is None
1585 1596 parentworking = working and ctx1 == self['.']
1586 1597 match = match or matchmod.always(self.root, self.getcwd())
1587 1598 listignored, listclean, listunknown = ignored, clean, unknown
1588 1599
1589 1600 # load earliest manifest first for caching reasons
1590 1601 if not working and ctx2.rev() < ctx1.rev():
1591 1602 ctx2.manifest()
1592 1603
1593 1604 if not parentworking:
1594 1605 def bad(f, msg):
1595 1606 # 'f' may be a directory pattern from 'match.files()',
1596 1607 # so 'f not in ctx1' is not enough
1597 1608 if f not in ctx1 and f not in ctx1.dirs():
1598 1609 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1599 1610 match.bad = bad
1600 1611
1601 1612 if working: # we need to scan the working dir
1602 1613 subrepos = []
1603 1614 if '.hgsub' in self.dirstate:
1604 1615 subrepos = ctx2.substate.keys()
1605 1616 s = self.dirstate.status(match, subrepos, listignored,
1606 1617 listclean, listunknown)
1607 1618 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1608 1619
1609 1620 # check for any possibly clean files
1610 1621 if parentworking and cmp:
1611 1622 fixup = []
1612 1623 # do a full compare of any files that might have changed
1613 1624 for f in sorted(cmp):
1614 1625 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1615 1626 or ctx1[f].cmp(ctx2[f])):
1616 1627 modified.append(f)
1617 1628 else:
1618 1629 fixup.append(f)
1619 1630
1620 1631 # update dirstate for files that are actually clean
1621 1632 if fixup:
1622 1633 if listclean:
1623 1634 clean += fixup
1624 1635
1625 1636 try:
1626 1637 # updating the dirstate is optional
1627 1638 # so we don't wait on the lock
1628 1639 wlock = self.wlock(False)
1629 1640 try:
1630 1641 for f in fixup:
1631 1642 self.dirstate.normal(f)
1632 1643 finally:
1633 1644 wlock.release()
1634 1645 except error.LockError:
1635 1646 pass
1636 1647
1637 1648 if not parentworking:
1638 1649 mf1 = mfmatches(ctx1)
1639 1650 if working:
1640 1651 # we are comparing working dir against non-parent
1641 1652 # generate a pseudo-manifest for the working dir
1642 1653 mf2 = mfmatches(self['.'])
1643 1654 for f in cmp + modified + added:
1644 1655 mf2[f] = None
1645 1656 mf2.set(f, ctx2.flags(f))
1646 1657 for f in removed:
1647 1658 if f in mf2:
1648 1659 del mf2[f]
1649 1660 else:
1650 1661 # we are comparing two revisions
1651 1662 deleted, unknown, ignored = [], [], []
1652 1663 mf2 = mfmatches(ctx2)
1653 1664
1654 1665 modified, added, clean = [], [], []
1655 1666 withflags = mf1.withflags() | mf2.withflags()
1656 1667 for fn in mf2:
1657 1668 if fn in mf1:
1658 1669 if (fn not in deleted and
1659 1670 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1660 1671 (mf1[fn] != mf2[fn] and
1661 1672 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1662 1673 modified.append(fn)
1663 1674 elif listclean:
1664 1675 clean.append(fn)
1665 1676 del mf1[fn]
1666 1677 elif fn not in deleted:
1667 1678 added.append(fn)
1668 1679 removed = mf1.keys()
1669 1680
1670 1681 if working and modified and not self.dirstate._checklink:
1671 1682 # Symlink placeholders may get non-symlink-like contents
1672 1683 # via user error or dereferencing by NFS or Samba servers,
1673 1684 # so we filter out any placeholders that don't look like a
1674 1685 # symlink
1675 1686 sane = []
1676 1687 for f in modified:
1677 1688 if ctx2.flags(f) == 'l':
1678 1689 d = ctx2[f].data()
1679 1690 if len(d) >= 1024 or '\n' in d or util.binary(d):
1680 1691 self.ui.debug('ignoring suspect symlink placeholder'
1681 1692 ' "%s"\n' % f)
1682 1693 continue
1683 1694 sane.append(f)
1684 1695 modified = sane
1685 1696
1686 1697 r = modified, added, removed, deleted, unknown, ignored, clean
1687 1698
1688 1699 if listsubrepos:
1689 1700 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1690 1701 if working:
1691 1702 rev2 = None
1692 1703 else:
1693 1704 rev2 = ctx2.substate[subpath][1]
1694 1705 try:
1695 1706 submatch = matchmod.narrowmatcher(subpath, match)
1696 1707 s = sub.status(rev2, match=submatch, ignored=listignored,
1697 1708 clean=listclean, unknown=listunknown,
1698 1709 listsubrepos=True)
1699 1710 for rfiles, sfiles in zip(r, s):
1700 1711 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1701 1712 except error.LookupError:
1702 1713 self.ui.status(_("skipping missing subrepository: %s\n")
1703 1714 % subpath)
1704 1715
1705 1716 for l in r:
1706 1717 l.sort()
1707 1718 return r
1708 1719
1709 1720 def heads(self, start=None):
1710 1721 heads = self.changelog.heads(start)
1711 1722 # sort the output in rev descending order
1712 1723 return sorted(heads, key=self.changelog.rev, reverse=True)
1713 1724
1714 1725 def branchheads(self, branch=None, start=None, closed=False):
1715 1726 '''return a (possibly filtered) list of heads for the given branch
1716 1727
1717 1728 Heads are returned in topological order, from newest to oldest.
1718 1729 If branch is None, use the dirstate branch.
1719 1730 If start is not None, return only heads reachable from start.
1720 1731 If closed is True, return heads that are marked as closed as well.
1721 1732 '''
1722 1733 if branch is None:
1723 1734 branch = self[None].branch()
1724 1735 branches = self.branchmap()
1725 1736 if branch not in branches:
1726 1737 return []
1727 1738 # the cache returns heads ordered lowest to highest
1728 1739 bheads = list(reversed(branches[branch]))
1729 1740 if start is not None:
1730 1741 # filter out the heads that cannot be reached from startrev
1731 1742 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1732 1743 bheads = [h for h in bheads if h in fbheads]
1733 1744 if not closed:
1734 1745 bheads = [h for h in bheads if not self[h].closesbranch()]
1735 1746 return bheads
1736 1747
1737 1748 def branches(self, nodes):
1738 1749 if not nodes:
1739 1750 nodes = [self.changelog.tip()]
1740 1751 b = []
1741 1752 for n in nodes:
1742 1753 t = n
1743 1754 while True:
1744 1755 p = self.changelog.parents(n)
1745 1756 if p[1] != nullid or p[0] == nullid:
1746 1757 b.append((t, n, p[0], p[1]))
1747 1758 break
1748 1759 n = p[0]
1749 1760 return b
1750 1761
1751 1762 def between(self, pairs):
1752 1763 r = []
1753 1764
1754 1765 for top, bottom in pairs:
1755 1766 n, l, i = top, [], 0
1756 1767 f = 1
1757 1768
1758 1769 while n != bottom and n != nullid:
1759 1770 p = self.changelog.parents(n)[0]
1760 1771 if i == f:
1761 1772 l.append(n)
1762 1773 f = f * 2
1763 1774 n = p
1764 1775 i += 1
1765 1776
1766 1777 r.append(l)
1767 1778
1768 1779 return r
1769 1780
1770 1781 def pull(self, remote, heads=None, force=False):
1771 1782 # don't open transaction for nothing or you break future useful
1772 1783 # rollback call
1773 1784 tr = None
1774 1785 trname = 'pull\n' + util.hidepassword(remote.url())
1775 1786 lock = self.lock()
1776 1787 try:
1777 1788 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1778 1789 force=force)
1779 1790 common, fetch, rheads = tmp
1780 1791 if not fetch:
1781 1792 self.ui.status(_("no changes found\n"))
1782 1793 added = []
1783 1794 result = 0
1784 1795 else:
1785 1796 tr = self.transaction(trname)
1786 1797 if heads is None and list(common) == [nullid]:
1787 1798 self.ui.status(_("requesting all changes\n"))
1788 1799 elif heads is None and remote.capable('changegroupsubset'):
1789 1800 # issue1320, avoid a race if remote changed after discovery
1790 1801 heads = rheads
1791 1802
1792 1803 if remote.capable('getbundle'):
1793 1804 cg = remote.getbundle('pull', common=common,
1794 1805 heads=heads or rheads)
1795 1806 elif heads is None:
1796 1807 cg = remote.changegroup(fetch, 'pull')
1797 1808 elif not remote.capable('changegroupsubset'):
1798 1809 raise util.Abort(_("partial pull cannot be done because "
1799 1810 "other repository doesn't support "
1800 1811 "changegroupsubset."))
1801 1812 else:
1802 1813 cg = remote.changegroupsubset(fetch, heads, 'pull')
1803 1814 clstart = len(self.changelog)
1804 1815 result = self.addchangegroup(cg, 'pull', remote.url())
1805 1816 clend = len(self.changelog)
1806 1817 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1807 1818
1808 1819 # compute target subset
1809 1820 if heads is None:
1810 1821 # We pulled every thing possible
1811 1822 # sync on everything common
1812 1823 subset = common + added
1813 1824 else:
1814 1825 # We pulled a specific subset
1815 1826 # sync on this subset
1816 1827 subset = heads
1817 1828
1818 1829 # Get remote phases data from remote
1819 1830 remotephases = remote.listkeys('phases')
1820 1831 publishing = bool(remotephases.get('publishing', False))
1821 1832 if remotephases and not publishing:
1822 1833 # remote is new and unpublishing
1823 1834 pheads, _dr = phases.analyzeremotephases(self, subset,
1824 1835 remotephases)
1825 1836 phases.advanceboundary(self, phases.public, pheads)
1826 1837 phases.advanceboundary(self, phases.draft, subset)
1827 1838 else:
1828 1839 # Remote is old or publishing all common changesets
1829 1840 # should be seen as public
1830 1841 phases.advanceboundary(self, phases.public, subset)
1831 1842
1832 1843 if obsolete._enabled:
1833 1844 self.ui.debug('fetching remote obsolete markers\n')
1834 1845 remoteobs = remote.listkeys('obsolete')
1835 1846 if 'dump0' in remoteobs:
1836 1847 if tr is None:
1837 1848 tr = self.transaction(trname)
1838 1849 for key in sorted(remoteobs, reverse=True):
1839 1850 if key.startswith('dump'):
1840 1851 data = base85.b85decode(remoteobs[key])
1841 1852 self.obsstore.mergemarkers(tr, data)
1842 1853 if tr is not None:
1843 1854 tr.close()
1844 1855 finally:
1845 1856 if tr is not None:
1846 1857 tr.release()
1847 1858 lock.release()
1848 1859
1849 1860 return result
1850 1861
1851 1862 def checkpush(self, force, revs):
1852 1863 """Extensions can override this function if additional checks have
1853 1864 to be performed before pushing, or call it if they override push
1854 1865 command.
1855 1866 """
1856 1867 pass
1857 1868
1858 1869 def push(self, remote, force=False, revs=None, newbranch=False):
1859 1870 '''Push outgoing changesets (limited by revs) from the current
1860 1871 repository to remote. Return an integer:
1861 1872 - None means nothing to push
1862 1873 - 0 means HTTP error
1863 1874 - 1 means we pushed and remote head count is unchanged *or*
1864 1875 we have outgoing changesets but refused to push
1865 1876 - other values as described by addchangegroup()
1866 1877 '''
1867 1878 # there are two ways to push to remote repo:
1868 1879 #
1869 1880 # addchangegroup assumes local user can lock remote
1870 1881 # repo (local filesystem, old ssh servers).
1871 1882 #
1872 1883 # unbundle assumes local user cannot lock remote repo (new ssh
1873 1884 # servers, http servers).
1874 1885
1875 1886 if not remote.canpush():
1876 1887 raise util.Abort(_("destination does not support push"))
1877 1888 # get local lock as we might write phase data
1878 1889 unfi = self.unfiltered()
1879 1890 locallock = self.lock()
1880 1891 try:
1881 1892 self.checkpush(force, revs)
1882 1893 lock = None
1883 1894 unbundle = remote.capable('unbundle')
1884 1895 if not unbundle:
1885 1896 lock = remote.lock()
1886 1897 try:
1887 1898 # discovery
1888 1899 fci = discovery.findcommonincoming
1889 1900 commoninc = fci(unfi, remote, force=force)
1890 1901 common, inc, remoteheads = commoninc
1891 1902 fco = discovery.findcommonoutgoing
1892 1903 outgoing = fco(unfi, remote, onlyheads=revs,
1893 1904 commoninc=commoninc, force=force)
1894 1905
1895 1906
1896 1907 if not outgoing.missing:
1897 1908 # nothing to push
1898 1909 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1899 1910 ret = None
1900 1911 else:
1901 1912 # something to push
1902 1913 if not force:
1903 1914 # if self.obsstore == False --> no obsolete
1904 1915 # then, save the iteration
1905 1916 if unfi.obsstore:
1906 1917 # this message are here for 80 char limit reason
1907 1918 mso = _("push includes obsolete changeset: %s!")
1908 1919 msu = _("push includes unstable changeset: %s!")
1909 1920 msb = _("push includes bumped changeset: %s!")
1910 1921 # If we are to push if there is at least one
1911 1922 # obsolete or unstable changeset in missing, at
1912 1923 # least one of the missinghead will be obsolete or
1913 1924 # unstable. So checking heads only is ok
1914 1925 for node in outgoing.missingheads:
1915 1926 ctx = unfi[node]
1916 1927 if ctx.obsolete():
1917 1928 raise util.Abort(mso % ctx)
1918 1929 elif ctx.unstable():
1919 1930 raise util.Abort(msu % ctx)
1920 1931 elif ctx.bumped():
1921 1932 raise util.Abort(msb % ctx)
1922 1933 discovery.checkheads(unfi, remote, outgoing,
1923 1934 remoteheads, newbranch,
1924 1935 bool(inc))
1925 1936
1926 1937 # create a changegroup from local
1927 1938 if revs is None and not outgoing.excluded:
1928 1939 # push everything,
1929 1940 # use the fast path, no race possible on push
1930 1941 cg = self._changegroup(outgoing.missing, 'push')
1931 1942 else:
1932 1943 cg = self.getlocalbundle('push', outgoing)
1933 1944
1934 1945 # apply changegroup to remote
1935 1946 if unbundle:
1936 1947 # local repo finds heads on server, finds out what
1937 1948 # revs it must push. once revs transferred, if server
1938 1949 # finds it has different heads (someone else won
1939 1950 # commit/push race), server aborts.
1940 1951 if force:
1941 1952 remoteheads = ['force']
1942 1953 # ssh: return remote's addchangegroup()
1943 1954 # http: return remote's addchangegroup() or 0 for error
1944 1955 ret = remote.unbundle(cg, remoteheads, 'push')
1945 1956 else:
1946 1957 # we return an integer indicating remote head count
1947 1958 # change
1948 1959 ret = remote.addchangegroup(cg, 'push', self.url())
1949 1960
1950 1961 if ret:
1951 1962 # push succeed, synchronize target of the push
1952 1963 cheads = outgoing.missingheads
1953 1964 elif revs is None:
1954 1965 # All out push fails. synchronize all common
1955 1966 cheads = outgoing.commonheads
1956 1967 else:
1957 1968 # I want cheads = heads(::missingheads and ::commonheads)
1958 1969 # (missingheads is revs with secret changeset filtered out)
1959 1970 #
1960 1971 # This can be expressed as:
1961 1972 # cheads = ( (missingheads and ::commonheads)
1962 1973 # + (commonheads and ::missingheads))"
1963 1974 # )
1964 1975 #
1965 1976 # while trying to push we already computed the following:
1966 1977 # common = (::commonheads)
1967 1978 # missing = ((commonheads::missingheads) - commonheads)
1968 1979 #
1969 1980 # We can pick:
1970 1981 # * missingheads part of common (::commonheads)
1971 1982 common = set(outgoing.common)
1972 1983 cheads = [node for node in revs if node in common]
1973 1984 # and
1974 1985 # * commonheads parents on missing
1975 1986 revset = unfi.set('%ln and parents(roots(%ln))',
1976 1987 outgoing.commonheads,
1977 1988 outgoing.missing)
1978 1989 cheads.extend(c.node() for c in revset)
1979 1990 # even when we don't push, exchanging phase data is useful
1980 1991 remotephases = remote.listkeys('phases')
1981 1992 if not remotephases: # old server or public only repo
1982 1993 phases.advanceboundary(self, phases.public, cheads)
1983 1994 # don't push any phase data as there is nothing to push
1984 1995 else:
1985 1996 ana = phases.analyzeremotephases(self, cheads, remotephases)
1986 1997 pheads, droots = ana
1987 1998 ### Apply remote phase on local
1988 1999 if remotephases.get('publishing', False):
1989 2000 phases.advanceboundary(self, phases.public, cheads)
1990 2001 else: # publish = False
1991 2002 phases.advanceboundary(self, phases.public, pheads)
1992 2003 phases.advanceboundary(self, phases.draft, cheads)
1993 2004 ### Apply local phase on remote
1994 2005
1995 2006 # Get the list of all revs draft on remote by public here.
1996 2007 # XXX Beware that revset break if droots is not strictly
1997 2008 # XXX root we may want to ensure it is but it is costly
1998 2009 outdated = unfi.set('heads((%ln::%ln) and public())',
1999 2010 droots, cheads)
2000 2011 for newremotehead in outdated:
2001 2012 r = remote.pushkey('phases',
2002 2013 newremotehead.hex(),
2003 2014 str(phases.draft),
2004 2015 str(phases.public))
2005 2016 if not r:
2006 2017 self.ui.warn(_('updating %s to public failed!\n')
2007 2018 % newremotehead)
2008 2019 self.ui.debug('try to push obsolete markers to remote\n')
2009 2020 if (obsolete._enabled and self.obsstore and
2010 2021 'obsolete' in remote.listkeys('namespaces')):
2011 2022 rslts = []
2012 2023 remotedata = self.listkeys('obsolete')
2013 2024 for key in sorted(remotedata, reverse=True):
2014 2025 # reverse sort to ensure we end with dump0
2015 2026 data = remotedata[key]
2016 2027 rslts.append(remote.pushkey('obsolete', key, '', data))
2017 2028 if [r for r in rslts if not r]:
2018 2029 msg = _('failed to push some obsolete markers!\n')
2019 2030 self.ui.warn(msg)
2020 2031 finally:
2021 2032 if lock is not None:
2022 2033 lock.release()
2023 2034 finally:
2024 2035 locallock.release()
2025 2036
2026 2037 self.ui.debug("checking for updated bookmarks\n")
2027 2038 rb = remote.listkeys('bookmarks')
2028 2039 for k in rb.keys():
2029 2040 if k in unfi._bookmarks:
2030 2041 nr, nl = rb[k], hex(self._bookmarks[k])
2031 2042 if nr in unfi:
2032 2043 cr = unfi[nr]
2033 2044 cl = unfi[nl]
2034 2045 if bookmarks.validdest(unfi, cr, cl):
2035 2046 r = remote.pushkey('bookmarks', k, nr, nl)
2036 2047 if r:
2037 2048 self.ui.status(_("updating bookmark %s\n") % k)
2038 2049 else:
2039 2050 self.ui.warn(_('updating bookmark %s'
2040 2051 ' failed!\n') % k)
2041 2052
2042 2053 return ret
2043 2054
2044 2055 def changegroupinfo(self, nodes, source):
2045 2056 if self.ui.verbose or source == 'bundle':
2046 2057 self.ui.status(_("%d changesets found\n") % len(nodes))
2047 2058 if self.ui.debugflag:
2048 2059 self.ui.debug("list of changesets:\n")
2049 2060 for node in nodes:
2050 2061 self.ui.debug("%s\n" % hex(node))
2051 2062
2052 2063 def changegroupsubset(self, bases, heads, source):
2053 2064 """Compute a changegroup consisting of all the nodes that are
2054 2065 descendants of any of the bases and ancestors of any of the heads.
2055 2066 Return a chunkbuffer object whose read() method will return
2056 2067 successive changegroup chunks.
2057 2068
2058 2069 It is fairly complex as determining which filenodes and which
2059 2070 manifest nodes need to be included for the changeset to be complete
2060 2071 is non-trivial.
2061 2072
2062 2073 Another wrinkle is doing the reverse, figuring out which changeset in
2063 2074 the changegroup a particular filenode or manifestnode belongs to.
2064 2075 """
2065 2076 cl = self.changelog
2066 2077 if not bases:
2067 2078 bases = [nullid]
2068 2079 csets, bases, heads = cl.nodesbetween(bases, heads)
2069 2080 # We assume that all ancestors of bases are known
2070 2081 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2071 2082 return self._changegroupsubset(common, csets, heads, source)
2072 2083
2073 2084 def getlocalbundle(self, source, outgoing):
2074 2085 """Like getbundle, but taking a discovery.outgoing as an argument.
2075 2086
2076 2087 This is only implemented for local repos and reuses potentially
2077 2088 precomputed sets in outgoing."""
2078 2089 if not outgoing.missing:
2079 2090 return None
2080 2091 return self._changegroupsubset(outgoing.common,
2081 2092 outgoing.missing,
2082 2093 outgoing.missingheads,
2083 2094 source)
2084 2095
2085 2096 def getbundle(self, source, heads=None, common=None):
2086 2097 """Like changegroupsubset, but returns the set difference between the
2087 2098 ancestors of heads and the ancestors common.
2088 2099
2089 2100 If heads is None, use the local heads. If common is None, use [nullid].
2090 2101
2091 2102 The nodes in common might not all be known locally due to the way the
2092 2103 current discovery protocol works.
2093 2104 """
2094 2105 cl = self.changelog
2095 2106 if common:
2096 2107 nm = cl.nodemap
2097 2108 common = [n for n in common if n in nm]
2098 2109 else:
2099 2110 common = [nullid]
2100 2111 if not heads:
2101 2112 heads = cl.heads()
2102 2113 return self.getlocalbundle(source,
2103 2114 discovery.outgoing(cl, common, heads))
2104 2115
2105 2116 @unfilteredmeth
2106 2117 def _changegroupsubset(self, commonrevs, csets, heads, source):
2107 2118
2108 2119 cl = self.changelog
2109 2120 mf = self.manifest
2110 2121 mfs = {} # needed manifests
2111 2122 fnodes = {} # needed file nodes
2112 2123 changedfiles = set()
2113 2124 fstate = ['', {}]
2114 2125 count = [0, 0]
2115 2126
2116 2127 # can we go through the fast path ?
2117 2128 heads.sort()
2118 2129 if heads == sorted(self.heads()):
2119 2130 return self._changegroup(csets, source)
2120 2131
2121 2132 # slow path
2122 2133 self.hook('preoutgoing', throw=True, source=source)
2123 2134 self.changegroupinfo(csets, source)
2124 2135
2125 2136 # filter any nodes that claim to be part of the known set
2126 2137 def prune(revlog, missing):
2127 2138 rr, rl = revlog.rev, revlog.linkrev
2128 2139 return [n for n in missing
2129 2140 if rl(rr(n)) not in commonrevs]
2130 2141
2131 2142 progress = self.ui.progress
2132 2143 _bundling = _('bundling')
2133 2144 _changesets = _('changesets')
2134 2145 _manifests = _('manifests')
2135 2146 _files = _('files')
2136 2147
2137 2148 def lookup(revlog, x):
2138 2149 if revlog == cl:
2139 2150 c = cl.read(x)
2140 2151 changedfiles.update(c[3])
2141 2152 mfs.setdefault(c[0], x)
2142 2153 count[0] += 1
2143 2154 progress(_bundling, count[0],
2144 2155 unit=_changesets, total=count[1])
2145 2156 return x
2146 2157 elif revlog == mf:
2147 2158 clnode = mfs[x]
2148 2159 mdata = mf.readfast(x)
2149 2160 for f, n in mdata.iteritems():
2150 2161 if f in changedfiles:
2151 2162 fnodes[f].setdefault(n, clnode)
2152 2163 count[0] += 1
2153 2164 progress(_bundling, count[0],
2154 2165 unit=_manifests, total=count[1])
2155 2166 return clnode
2156 2167 else:
2157 2168 progress(_bundling, count[0], item=fstate[0],
2158 2169 unit=_files, total=count[1])
2159 2170 return fstate[1][x]
2160 2171
2161 2172 bundler = changegroup.bundle10(lookup)
2162 2173 reorder = self.ui.config('bundle', 'reorder', 'auto')
2163 2174 if reorder == 'auto':
2164 2175 reorder = None
2165 2176 else:
2166 2177 reorder = util.parsebool(reorder)
2167 2178
2168 2179 def gengroup():
2169 2180 # Create a changenode group generator that will call our functions
2170 2181 # back to lookup the owning changenode and collect information.
2171 2182 count[:] = [0, len(csets)]
2172 2183 for chunk in cl.group(csets, bundler, reorder=reorder):
2173 2184 yield chunk
2174 2185 progress(_bundling, None)
2175 2186
2176 2187 # Create a generator for the manifestnodes that calls our lookup
2177 2188 # and data collection functions back.
2178 2189 for f in changedfiles:
2179 2190 fnodes[f] = {}
2180 2191 count[:] = [0, len(mfs)]
2181 2192 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2182 2193 yield chunk
2183 2194 progress(_bundling, None)
2184 2195
2185 2196 mfs.clear()
2186 2197
2187 2198 # Go through all our files in order sorted by name.
2188 2199 count[:] = [0, len(changedfiles)]
2189 2200 for fname in sorted(changedfiles):
2190 2201 filerevlog = self.file(fname)
2191 2202 if not len(filerevlog):
2192 2203 raise util.Abort(_("empty or missing revlog for %s")
2193 2204 % fname)
2194 2205 fstate[0] = fname
2195 2206 fstate[1] = fnodes.pop(fname, {})
2196 2207
2197 2208 nodelist = prune(filerevlog, fstate[1])
2198 2209 if nodelist:
2199 2210 count[0] += 1
2200 2211 yield bundler.fileheader(fname)
2201 2212 for chunk in filerevlog.group(nodelist, bundler, reorder):
2202 2213 yield chunk
2203 2214
2204 2215 # Signal that no more groups are left.
2205 2216 yield bundler.close()
2206 2217 progress(_bundling, None)
2207 2218
2208 2219 if csets:
2209 2220 self.hook('outgoing', node=hex(csets[0]), source=source)
2210 2221
2211 2222 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2212 2223
2213 2224 def changegroup(self, basenodes, source):
2214 2225 # to avoid a race we use changegroupsubset() (issue1320)
2215 2226 return self.changegroupsubset(basenodes, self.heads(), source)
2216 2227
2217 2228 @unfilteredmeth
2218 2229 def _changegroup(self, nodes, source):
2219 2230 """Compute the changegroup of all nodes that we have that a recipient
2220 2231 doesn't. Return a chunkbuffer object whose read() method will return
2221 2232 successive changegroup chunks.
2222 2233
2223 2234 This is much easier than the previous function as we can assume that
2224 2235 the recipient has any changenode we aren't sending them.
2225 2236
2226 2237 nodes is the set of nodes to send"""
2227 2238
2228 2239 cl = self.changelog
2229 2240 mf = self.manifest
2230 2241 mfs = {}
2231 2242 changedfiles = set()
2232 2243 fstate = ['']
2233 2244 count = [0, 0]
2234 2245
2235 2246 self.hook('preoutgoing', throw=True, source=source)
2236 2247 self.changegroupinfo(nodes, source)
2237 2248
2238 2249 revset = set([cl.rev(n) for n in nodes])
2239 2250
2240 2251 def gennodelst(log):
2241 2252 ln, llr = log.node, log.linkrev
2242 2253 return [ln(r) for r in log if llr(r) in revset]
2243 2254
2244 2255 progress = self.ui.progress
2245 2256 _bundling = _('bundling')
2246 2257 _changesets = _('changesets')
2247 2258 _manifests = _('manifests')
2248 2259 _files = _('files')
2249 2260
2250 2261 def lookup(revlog, x):
2251 2262 if revlog == cl:
2252 2263 c = cl.read(x)
2253 2264 changedfiles.update(c[3])
2254 2265 mfs.setdefault(c[0], x)
2255 2266 count[0] += 1
2256 2267 progress(_bundling, count[0],
2257 2268 unit=_changesets, total=count[1])
2258 2269 return x
2259 2270 elif revlog == mf:
2260 2271 count[0] += 1
2261 2272 progress(_bundling, count[0],
2262 2273 unit=_manifests, total=count[1])
2263 2274 return cl.node(revlog.linkrev(revlog.rev(x)))
2264 2275 else:
2265 2276 progress(_bundling, count[0], item=fstate[0],
2266 2277 total=count[1], unit=_files)
2267 2278 return cl.node(revlog.linkrev(revlog.rev(x)))
2268 2279
2269 2280 bundler = changegroup.bundle10(lookup)
2270 2281 reorder = self.ui.config('bundle', 'reorder', 'auto')
2271 2282 if reorder == 'auto':
2272 2283 reorder = None
2273 2284 else:
2274 2285 reorder = util.parsebool(reorder)
2275 2286
2276 2287 def gengroup():
2277 2288 '''yield a sequence of changegroup chunks (strings)'''
2278 2289 # construct a list of all changed files
2279 2290
2280 2291 count[:] = [0, len(nodes)]
2281 2292 for chunk in cl.group(nodes, bundler, reorder=reorder):
2282 2293 yield chunk
2283 2294 progress(_bundling, None)
2284 2295
2285 2296 count[:] = [0, len(mfs)]
2286 2297 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2287 2298 yield chunk
2288 2299 progress(_bundling, None)
2289 2300
2290 2301 count[:] = [0, len(changedfiles)]
2291 2302 for fname in sorted(changedfiles):
2292 2303 filerevlog = self.file(fname)
2293 2304 if not len(filerevlog):
2294 2305 raise util.Abort(_("empty or missing revlog for %s")
2295 2306 % fname)
2296 2307 fstate[0] = fname
2297 2308 nodelist = gennodelst(filerevlog)
2298 2309 if nodelist:
2299 2310 count[0] += 1
2300 2311 yield bundler.fileheader(fname)
2301 2312 for chunk in filerevlog.group(nodelist, bundler, reorder):
2302 2313 yield chunk
2303 2314 yield bundler.close()
2304 2315 progress(_bundling, None)
2305 2316
2306 2317 if nodes:
2307 2318 self.hook('outgoing', node=hex(nodes[0]), source=source)
2308 2319
2309 2320 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2310 2321
2311 2322 @unfilteredmeth
2312 2323 def addchangegroup(self, source, srctype, url, emptyok=False):
2313 2324 """Add the changegroup returned by source.read() to this repo.
2314 2325 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2315 2326 the URL of the repo where this changegroup is coming from.
2316 2327
2317 2328 Return an integer summarizing the change to this repo:
2318 2329 - nothing changed or no source: 0
2319 2330 - more heads than before: 1+added heads (2..n)
2320 2331 - fewer heads than before: -1-removed heads (-2..-n)
2321 2332 - number of heads stays the same: 1
2322 2333 """
2323 2334 def csmap(x):
2324 2335 self.ui.debug("add changeset %s\n" % short(x))
2325 2336 return len(cl)
2326 2337
2327 2338 def revmap(x):
2328 2339 return cl.rev(x)
2329 2340
2330 2341 if not source:
2331 2342 return 0
2332 2343
2333 2344 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2334 2345
2335 2346 changesets = files = revisions = 0
2336 2347 efiles = set()
2337 2348
2338 2349 # write changelog data to temp files so concurrent readers will not see
2339 2350 # inconsistent view
2340 2351 cl = self.changelog
2341 2352 cl.delayupdate()
2342 2353 oldheads = cl.heads()
2343 2354
2344 2355 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2345 2356 try:
2346 2357 trp = weakref.proxy(tr)
2347 2358 # pull off the changeset group
2348 2359 self.ui.status(_("adding changesets\n"))
2349 2360 clstart = len(cl)
2350 2361 class prog(object):
2351 2362 step = _('changesets')
2352 2363 count = 1
2353 2364 ui = self.ui
2354 2365 total = None
2355 2366 def __call__(self):
2356 2367 self.ui.progress(self.step, self.count, unit=_('chunks'),
2357 2368 total=self.total)
2358 2369 self.count += 1
2359 2370 pr = prog()
2360 2371 source.callback = pr
2361 2372
2362 2373 source.changelogheader()
2363 2374 srccontent = cl.addgroup(source, csmap, trp)
2364 2375 if not (srccontent or emptyok):
2365 2376 raise util.Abort(_("received changelog group is empty"))
2366 2377 clend = len(cl)
2367 2378 changesets = clend - clstart
2368 2379 for c in xrange(clstart, clend):
2369 2380 efiles.update(self[c].files())
2370 2381 efiles = len(efiles)
2371 2382 self.ui.progress(_('changesets'), None)
2372 2383
2373 2384 # pull off the manifest group
2374 2385 self.ui.status(_("adding manifests\n"))
2375 2386 pr.step = _('manifests')
2376 2387 pr.count = 1
2377 2388 pr.total = changesets # manifests <= changesets
2378 2389 # no need to check for empty manifest group here:
2379 2390 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2380 2391 # no new manifest will be created and the manifest group will
2381 2392 # be empty during the pull
2382 2393 source.manifestheader()
2383 2394 self.manifest.addgroup(source, revmap, trp)
2384 2395 self.ui.progress(_('manifests'), None)
2385 2396
2386 2397 needfiles = {}
2387 2398 if self.ui.configbool('server', 'validate', default=False):
2388 2399 # validate incoming csets have their manifests
2389 2400 for cset in xrange(clstart, clend):
2390 2401 mfest = self.changelog.read(self.changelog.node(cset))[0]
2391 2402 mfest = self.manifest.readdelta(mfest)
2392 2403 # store file nodes we must see
2393 2404 for f, n in mfest.iteritems():
2394 2405 needfiles.setdefault(f, set()).add(n)
2395 2406
2396 2407 # process the files
2397 2408 self.ui.status(_("adding file changes\n"))
2398 2409 pr.step = _('files')
2399 2410 pr.count = 1
2400 2411 pr.total = efiles
2401 2412 source.callback = None
2402 2413
2403 2414 while True:
2404 2415 chunkdata = source.filelogheader()
2405 2416 if not chunkdata:
2406 2417 break
2407 2418 f = chunkdata["filename"]
2408 2419 self.ui.debug("adding %s revisions\n" % f)
2409 2420 pr()
2410 2421 fl = self.file(f)
2411 2422 o = len(fl)
2412 2423 if not fl.addgroup(source, revmap, trp):
2413 2424 raise util.Abort(_("received file revlog group is empty"))
2414 2425 revisions += len(fl) - o
2415 2426 files += 1
2416 2427 if f in needfiles:
2417 2428 needs = needfiles[f]
2418 2429 for new in xrange(o, len(fl)):
2419 2430 n = fl.node(new)
2420 2431 if n in needs:
2421 2432 needs.remove(n)
2422 2433 if not needs:
2423 2434 del needfiles[f]
2424 2435 self.ui.progress(_('files'), None)
2425 2436
2426 2437 for f, needs in needfiles.iteritems():
2427 2438 fl = self.file(f)
2428 2439 for n in needs:
2429 2440 try:
2430 2441 fl.rev(n)
2431 2442 except error.LookupError:
2432 2443 raise util.Abort(
2433 2444 _('missing file data for %s:%s - run hg verify') %
2434 2445 (f, hex(n)))
2435 2446
2436 2447 dh = 0
2437 2448 if oldheads:
2438 2449 heads = cl.heads()
2439 2450 dh = len(heads) - len(oldheads)
2440 2451 for h in heads:
2441 2452 if h not in oldheads and self[h].closesbranch():
2442 2453 dh -= 1
2443 2454 htext = ""
2444 2455 if dh:
2445 2456 htext = _(" (%+d heads)") % dh
2446 2457
2447 2458 self.ui.status(_("added %d changesets"
2448 2459 " with %d changes to %d files%s\n")
2449 2460 % (changesets, revisions, files, htext))
2450 2461 obsolete.clearobscaches(self)
2451 2462
2452 2463 if changesets > 0:
2453 2464 p = lambda: cl.writepending() and self.root or ""
2454 2465 self.hook('pretxnchangegroup', throw=True,
2455 2466 node=hex(cl.node(clstart)), source=srctype,
2456 2467 url=url, pending=p)
2457 2468
2458 2469 added = [cl.node(r) for r in xrange(clstart, clend)]
2459 2470 publishing = self.ui.configbool('phases', 'publish', True)
2460 2471 if srctype == 'push':
2461 2472 # Old server can not push the boundary themself.
2462 2473 # New server won't push the boundary if changeset already
2463 2474 # existed locally as secrete
2464 2475 #
2465 2476 # We should not use added here but the list of all change in
2466 2477 # the bundle
2467 2478 if publishing:
2468 2479 phases.advanceboundary(self, phases.public, srccontent)
2469 2480 else:
2470 2481 phases.advanceboundary(self, phases.draft, srccontent)
2471 2482 phases.retractboundary(self, phases.draft, added)
2472 2483 elif srctype != 'strip':
2473 2484 # publishing only alter behavior during push
2474 2485 #
2475 2486 # strip should not touch boundary at all
2476 2487 phases.retractboundary(self, phases.draft, added)
2477 2488
2478 2489 # make changelog see real files again
2479 2490 cl.finalize(trp)
2480 2491
2481 2492 tr.close()
2482 2493
2483 2494 if changesets > 0:
2484 2495 self.updatebranchcache()
2485 2496 def runhooks():
2486 2497 # forcefully update the on-disk branch cache
2487 2498 self.ui.debug("updating the branch cache\n")
2488 2499 self.hook("changegroup", node=hex(cl.node(clstart)),
2489 2500 source=srctype, url=url)
2490 2501
2491 2502 for n in added:
2492 2503 self.hook("incoming", node=hex(n), source=srctype,
2493 2504 url=url)
2494 2505 self._afterlock(runhooks)
2495 2506
2496 2507 finally:
2497 2508 tr.release()
2498 2509 # never return 0 here:
2499 2510 if dh < 0:
2500 2511 return dh - 1
2501 2512 else:
2502 2513 return dh + 1
2503 2514
2504 2515 def stream_in(self, remote, requirements):
2505 2516 lock = self.lock()
2506 2517 try:
2507 2518 # Save remote branchmap. We will use it later
2508 2519 # to speed up branchcache creation
2509 2520 rbranchmap = None
2510 2521 if remote.capable("branchmap"):
2511 2522 rbranchmap = remote.branchmap()
2512 2523
2513 2524 fp = remote.stream_out()
2514 2525 l = fp.readline()
2515 2526 try:
2516 2527 resp = int(l)
2517 2528 except ValueError:
2518 2529 raise error.ResponseError(
2519 2530 _('unexpected response from remote server:'), l)
2520 2531 if resp == 1:
2521 2532 raise util.Abort(_('operation forbidden by server'))
2522 2533 elif resp == 2:
2523 2534 raise util.Abort(_('locking the remote repository failed'))
2524 2535 elif resp != 0:
2525 2536 raise util.Abort(_('the server sent an unknown error code'))
2526 2537 self.ui.status(_('streaming all changes\n'))
2527 2538 l = fp.readline()
2528 2539 try:
2529 2540 total_files, total_bytes = map(int, l.split(' ', 1))
2530 2541 except (ValueError, TypeError):
2531 2542 raise error.ResponseError(
2532 2543 _('unexpected response from remote server:'), l)
2533 2544 self.ui.status(_('%d files to transfer, %s of data\n') %
2534 2545 (total_files, util.bytecount(total_bytes)))
2535 2546 handled_bytes = 0
2536 2547 self.ui.progress(_('clone'), 0, total=total_bytes)
2537 2548 start = time.time()
2538 2549 for i in xrange(total_files):
2539 2550 # XXX doesn't support '\n' or '\r' in filenames
2540 2551 l = fp.readline()
2541 2552 try:
2542 2553 name, size = l.split('\0', 1)
2543 2554 size = int(size)
2544 2555 except (ValueError, TypeError):
2545 2556 raise error.ResponseError(
2546 2557 _('unexpected response from remote server:'), l)
2547 2558 if self.ui.debugflag:
2548 2559 self.ui.debug('adding %s (%s)\n' %
2549 2560 (name, util.bytecount(size)))
2550 2561 # for backwards compat, name was partially encoded
2551 2562 ofp = self.sopener(store.decodedir(name), 'w')
2552 2563 for chunk in util.filechunkiter(fp, limit=size):
2553 2564 handled_bytes += len(chunk)
2554 2565 self.ui.progress(_('clone'), handled_bytes,
2555 2566 total=total_bytes)
2556 2567 ofp.write(chunk)
2557 2568 ofp.close()
2558 2569 elapsed = time.time() - start
2559 2570 if elapsed <= 0:
2560 2571 elapsed = 0.001
2561 2572 self.ui.progress(_('clone'), None)
2562 2573 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2563 2574 (util.bytecount(total_bytes), elapsed,
2564 2575 util.bytecount(total_bytes / elapsed)))
2565 2576
2566 2577 # new requirements = old non-format requirements +
2567 2578 # new format-related
2568 2579 # requirements from the streamed-in repository
2569 2580 requirements.update(set(self.requirements) - self.supportedformats)
2570 2581 self._applyrequirements(requirements)
2571 2582 self._writerequirements()
2572 2583
2573 2584 if rbranchmap:
2574 2585 rbheads = []
2575 2586 for bheads in rbranchmap.itervalues():
2576 2587 rbheads.extend(bheads)
2577 2588
2578 2589 self.branchcache = rbranchmap
2579 2590 if rbheads:
2580 2591 rtiprev = max((int(self.changelog.rev(node))
2581 2592 for node in rbheads))
2582 2593 self._writebranchcache(self.branchcache,
2583 2594 self[rtiprev].node(), rtiprev)
2584 2595 self.invalidate()
2585 2596 return len(self.heads()) + 1
2586 2597 finally:
2587 2598 lock.release()
2588 2599
2589 2600 def clone(self, remote, heads=[], stream=False):
2590 2601 '''clone remote repository.
2591 2602
2592 2603 keyword arguments:
2593 2604 heads: list of revs to clone (forces use of pull)
2594 2605 stream: use streaming clone if possible'''
2595 2606
2596 2607 # now, all clients that can request uncompressed clones can
2597 2608 # read repo formats supported by all servers that can serve
2598 2609 # them.
2599 2610
2600 2611 # if revlog format changes, client will have to check version
2601 2612 # and format flags on "stream" capability, and use
2602 2613 # uncompressed only if compatible.
2603 2614
2604 2615 if not stream:
2605 2616 # if the server explicitly prefers to stream (for fast LANs)
2606 2617 stream = remote.capable('stream-preferred')
2607 2618
2608 2619 if stream and not heads:
2609 2620 # 'stream' means remote revlog format is revlogv1 only
2610 2621 if remote.capable('stream'):
2611 2622 return self.stream_in(remote, set(('revlogv1',)))
2612 2623 # otherwise, 'streamreqs' contains the remote revlog format
2613 2624 streamreqs = remote.capable('streamreqs')
2614 2625 if streamreqs:
2615 2626 streamreqs = set(streamreqs.split(','))
2616 2627 # if we support it, stream in and adjust our requirements
2617 2628 if not streamreqs - self.supportedformats:
2618 2629 return self.stream_in(remote, streamreqs)
2619 2630 return self.pull(remote, heads)
2620 2631
2621 2632 def pushkey(self, namespace, key, old, new):
2622 2633 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2623 2634 old=old, new=new)
2624 2635 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2625 2636 ret = pushkey.push(self, namespace, key, old, new)
2626 2637 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2627 2638 ret=ret)
2628 2639 return ret
2629 2640
2630 2641 def listkeys(self, namespace):
2631 2642 self.hook('prelistkeys', throw=True, namespace=namespace)
2632 2643 self.ui.debug('listing keys for "%s"\n' % namespace)
2633 2644 values = pushkey.list(self, namespace)
2634 2645 self.hook('listkeys', namespace=namespace, values=values)
2635 2646 return values
2636 2647
2637 2648 def debugwireargs(self, one, two, three=None, four=None, five=None):
2638 2649 '''used to test argument passing over the wire'''
2639 2650 return "%s %s %s %s %s" % (one, two, three, four, five)
2640 2651
2641 2652 def savecommitmessage(self, text):
2642 2653 fp = self.opener('last-message.txt', 'wb')
2643 2654 try:
2644 2655 fp.write(text)
2645 2656 finally:
2646 2657 fp.close()
2647 2658 return self.pathto(fp.name[len(self.root)+1:])
2648 2659
2649 2660 # used to avoid circular references so destructors work
2650 2661 def aftertrans(files):
2651 2662 renamefiles = [tuple(t) for t in files]
2652 2663 def a():
2653 2664 for src, dest in renamefiles:
2654 2665 try:
2655 2666 util.rename(src, dest)
2656 2667 except OSError: # journal file does not yet exist
2657 2668 pass
2658 2669 return a
2659 2670
2660 2671 def undoname(fn):
2661 2672 base, name = os.path.split(fn)
2662 2673 assert name.startswith('journal')
2663 2674 return os.path.join(base, name.replace('journal', 'undo', 1))
2664 2675
2665 2676 def instance(ui, path, create):
2666 2677 return localrepository(ui, util.urllocalpath(path), create)
2667 2678
2668 2679 def islocal(path):
2669 2680 return True
General Comments 0
You need to be logged in to leave comments. Login now