##// END OF EJS Templates
localrepo: make supported features manageable in each repositories individually...
FUJIWARA Katsunori -
r19778:55ef7903 default
parent child Browse files
Show More
@@ -1,178 +1,178 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles extension: uisetup'''
10 10
11 11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
12 12 httppeer, localrepo, merge, scmutil, sshpeer, wireproto, revset
13 13 from mercurial.i18n import _
14 14 from mercurial.hgweb import hgweb_mod, webcommands
15 15 from mercurial.subrepo import hgsubrepo
16 16
17 17 import overrides
18 18 import proto
19 19
20 20 def uisetup(ui):
21 21 # Disable auto-status for some commands which assume that all
22 22 # files in the result are under Mercurial's control
23 23
24 24 entry = extensions.wrapcommand(commands.table, 'add',
25 25 overrides.overrideadd)
26 26 addopt = [('', 'large', None, _('add as largefile')),
27 27 ('', 'normal', None, _('add as normal file')),
28 28 ('', 'lfsize', '', _('add all files above this size '
29 29 '(in megabytes) as largefiles '
30 30 '(default: 10)'))]
31 31 entry[1].extend(addopt)
32 32
33 33 # The scmutil function is called both by the (trivial) addremove command,
34 34 # and in the process of handling commit -A (issue3542)
35 35 entry = extensions.wrapfunction(scmutil, 'addremove',
36 36 overrides.scmutiladdremove)
37 37 entry = extensions.wrapcommand(commands.table, 'remove',
38 38 overrides.overrideremove)
39 39 entry = extensions.wrapcommand(commands.table, 'forget',
40 40 overrides.overrideforget)
41 41
42 42 # Subrepos call status function
43 43 entry = extensions.wrapcommand(commands.table, 'status',
44 44 overrides.overridestatus)
45 45 entry = extensions.wrapfunction(hgsubrepo, 'status',
46 46 overrides.overridestatusfn)
47 47
48 48 entry = extensions.wrapcommand(commands.table, 'log',
49 49 overrides.overridelog)
50 50 entry = extensions.wrapcommand(commands.table, 'rollback',
51 51 overrides.overriderollback)
52 52 entry = extensions.wrapcommand(commands.table, 'verify',
53 53 overrides.overrideverify)
54 54
55 55 verifyopt = [('', 'large', None,
56 56 _('verify that all largefiles in current revision exists')),
57 57 ('', 'lfa', None,
58 58 _('verify largefiles in all revisions, not just current')),
59 59 ('', 'lfc', None,
60 60 _('verify local largefile contents, not just existence'))]
61 61 entry[1].extend(verifyopt)
62 62
63 63 entry = extensions.wrapcommand(commands.table, 'debugstate',
64 64 overrides.overridedebugstate)
65 65 debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
66 66 entry[1].extend(debugstateopt)
67 67
68 68 entry = extensions.wrapcommand(commands.table, 'outgoing',
69 69 overrides.overrideoutgoing)
70 70 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
71 71 entry[1].extend(outgoingopt)
72 72 entry = extensions.wrapcommand(commands.table, 'summary',
73 73 overrides.overridesummary)
74 74 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
75 75 entry[1].extend(summaryopt)
76 76
77 77 entry = extensions.wrapcommand(commands.table, 'update',
78 78 overrides.overrideupdate)
79 79 entry = extensions.wrapcommand(commands.table, 'pull',
80 80 overrides.overridepull)
81 81 pullopt = [('', 'all-largefiles', None,
82 82 _('download all pulled versions of largefiles (DEPRECATED)')),
83 83 ('', 'lfrev', [],
84 84 _('download largefiles for these revisions'), _('REV'))]
85 85 entry[1].extend(pullopt)
86 86 revset.symbols['pulled'] = overrides.pulledrevsetsymbol
87 87
88 88 entry = extensions.wrapcommand(commands.table, 'clone',
89 89 overrides.overrideclone)
90 90 cloneopt = [('', 'all-largefiles', None,
91 91 _('download all versions of all largefiles'))]
92 92 entry[1].extend(cloneopt)
93 93 entry = extensions.wrapfunction(hg, 'clone', overrides.hgclone)
94 94
95 95 entry = extensions.wrapcommand(commands.table, 'cat',
96 96 overrides.overridecat)
97 97 entry = extensions.wrapfunction(merge, '_checkunknownfile',
98 98 overrides.overridecheckunknownfile)
99 99 entry = extensions.wrapfunction(merge, 'manifestmerge',
100 100 overrides.overridemanifestmerge)
101 101 entry = extensions.wrapfunction(filemerge, 'filemerge',
102 102 overrides.overridefilemerge)
103 103 entry = extensions.wrapfunction(cmdutil, 'copy',
104 104 overrides.overridecopy)
105 105
106 106 # Summary calls dirty on the subrepos
107 107 entry = extensions.wrapfunction(hgsubrepo, 'dirty',
108 108 overrides.overridedirty)
109 109
110 110 # Backout calls revert so we need to override both the command and the
111 111 # function
112 112 entry = extensions.wrapcommand(commands.table, 'revert',
113 113 overrides.overriderevert)
114 114 entry = extensions.wrapfunction(commands, 'revert',
115 115 overrides.overriderevert)
116 116
117 117 extensions.wrapfunction(hg, 'updaterepo', overrides.hgupdaterepo)
118 118 extensions.wrapfunction(hg, 'merge', overrides.hgmerge)
119 119
120 120 extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
121 121 extensions.wrapfunction(hgsubrepo, 'archive', overrides.hgsubrepoarchive)
122 122 extensions.wrapfunction(cmdutil, 'bailifchanged',
123 123 overrides.overridebailifchanged)
124 124
125 125 # create the new wireproto commands ...
126 126 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
127 127 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
128 128 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
129 129
130 130 # ... and wrap some existing ones
131 131 wireproto.commands['capabilities'] = (proto.capabilities, '')
132 132 wireproto.commands['heads'] = (proto.heads, '')
133 133 wireproto.commands['lheads'] = (wireproto.heads, '')
134 134
135 135 # make putlfile behave the same as push and {get,stat}lfile behave
136 136 # the same as pull w.r.t. permissions checks
137 137 hgweb_mod.perms['putlfile'] = 'push'
138 138 hgweb_mod.perms['getlfile'] = 'pull'
139 139 hgweb_mod.perms['statlfile'] = 'pull'
140 140
141 141 extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
142 142
143 143 # the hello wireproto command uses wireproto.capabilities, so it won't see
144 144 # our largefiles capability unless we replace the actual function as well.
145 145 proto.capabilitiesorig = wireproto.capabilities
146 146 wireproto.capabilities = proto.capabilities
147 147
148 148 # can't do this in reposetup because it needs to have happened before
149 149 # wirerepo.__init__ is called
150 150 proto.ssholdcallstream = sshpeer.sshpeer._callstream
151 151 proto.httpoldcallstream = httppeer.httppeer._callstream
152 152 sshpeer.sshpeer._callstream = proto.sshrepocallstream
153 153 httppeer.httppeer._callstream = proto.httprepocallstream
154 154
155 155 # don't die on seeing a repo with the largefiles requirement
156 localrepo.localrepository.supported |= set(['largefiles'])
156 localrepo.localrepository._basesupported |= set(['largefiles'])
157 157
158 158 # override some extensions' stuff as well
159 159 for name, module in extensions.extensions():
160 160 if name == 'fetch':
161 161 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
162 162 overrides.overridefetch)
163 163 if name == 'purge':
164 164 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
165 165 overrides.overridepurge)
166 166 if name == 'rebase':
167 167 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
168 168 overrides.overriderebase)
169 169 if name == 'transplant':
170 170 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
171 171 overrides.overridetransplant)
172 172 if name == 'convert':
173 173 convcmd = getattr(module, 'convcmd')
174 174 hgsink = getattr(convcmd, 'mercurial_sink')
175 175 extensions.wrapfunction(hgsink, 'before',
176 176 overrides.mercurialsinkbefore)
177 177 extensions.wrapfunction(hgsink, 'after',
178 178 overrides.mercurialsinkafter)
@@ -1,2444 +1,2469 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 if hasunfilteredcache(repo, self.name):
43 43 return getattr(repo.unfiltered(), self.name)
44 44 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
45 45
46 46 class filteredpropertycache(propertycache):
47 47 """propertycache that must take filtering in account"""
48 48
49 49 def cachevalue(self, obj, value):
50 50 object.__setattr__(obj, self.name, value)
51 51
52 52
53 53 def hasunfilteredcache(repo, name):
54 54 """check if a repo has an unfilteredpropertycache value for <name>"""
55 55 return name in vars(repo.unfiltered())
56 56
57 57 def unfilteredmethod(orig):
58 58 """decorate method that always need to be run on unfiltered version"""
59 59 def wrapper(repo, *args, **kwargs):
60 60 return orig(repo.unfiltered(), *args, **kwargs)
61 61 return wrapper
62 62
63 63 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
64 64 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
65 65
66 66 class localpeer(peer.peerrepository):
67 67 '''peer for a local repo; reflects only the most recent API'''
68 68
69 69 def __init__(self, repo, caps=MODERNCAPS):
70 70 peer.peerrepository.__init__(self)
71 71 self._repo = repo.filtered('served')
72 72 self.ui = repo.ui
73 73 self._caps = repo._restrictcapabilities(caps)
74 74 self.requirements = repo.requirements
75 75 self.supportedformats = repo.supportedformats
76 76
77 77 def close(self):
78 78 self._repo.close()
79 79
80 80 def _capabilities(self):
81 81 return self._caps
82 82
83 83 def local(self):
84 84 return self._repo
85 85
86 86 def canpush(self):
87 87 return True
88 88
89 89 def url(self):
90 90 return self._repo.url()
91 91
92 92 def lookup(self, key):
93 93 return self._repo.lookup(key)
94 94
95 95 def branchmap(self):
96 96 return self._repo.branchmap()
97 97
98 98 def heads(self):
99 99 return self._repo.heads()
100 100
101 101 def known(self, nodes):
102 102 return self._repo.known(nodes)
103 103
104 104 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
105 105 return self._repo.getbundle(source, heads=heads, common=common,
106 106 bundlecaps=None)
107 107
108 108 # TODO We might want to move the next two calls into legacypeer and add
109 109 # unbundle instead.
110 110
111 111 def lock(self):
112 112 return self._repo.lock()
113 113
114 114 def addchangegroup(self, cg, source, url):
115 115 return self._repo.addchangegroup(cg, source, url)
116 116
117 117 def pushkey(self, namespace, key, old, new):
118 118 return self._repo.pushkey(namespace, key, old, new)
119 119
120 120 def listkeys(self, namespace):
121 121 return self._repo.listkeys(namespace)
122 122
123 123 def debugwireargs(self, one, two, three=None, four=None, five=None):
124 124 '''used to test argument passing over the wire'''
125 125 return "%s %s %s %s %s" % (one, two, three, four, five)
126 126
127 127 class locallegacypeer(localpeer):
128 128 '''peer extension which implements legacy methods too; used for tests with
129 129 restricted capabilities'''
130 130
131 131 def __init__(self, repo):
132 132 localpeer.__init__(self, repo, caps=LEGACYCAPS)
133 133
134 134 def branches(self, nodes):
135 135 return self._repo.branches(nodes)
136 136
137 137 def between(self, pairs):
138 138 return self._repo.between(pairs)
139 139
140 140 def changegroup(self, basenodes, source):
141 141 return self._repo.changegroup(basenodes, source)
142 142
143 143 def changegroupsubset(self, bases, heads, source):
144 144 return self._repo.changegroupsubset(bases, heads, source)
145 145
146 146 class localrepository(object):
147 147
148 148 supportedformats = set(('revlogv1', 'generaldelta'))
149 supported = supportedformats | set(('store', 'fncache', 'shared',
150 'dotencode'))
149 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
150 'dotencode'))
151 151 openerreqs = set(('revlogv1', 'generaldelta'))
152 152 requirements = ['revlogv1']
153 153 filtername = None
154 154
155 featuresetupfuncs = set()
156
155 157 def _baserequirements(self, create):
156 158 return self.requirements[:]
157 159
158 160 def __init__(self, baseui, path=None, create=False):
159 161 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
160 162 self.wopener = self.wvfs
161 163 self.root = self.wvfs.base
162 164 self.path = self.wvfs.join(".hg")
163 165 self.origroot = path
164 166 self.auditor = scmutil.pathauditor(self.root, self._checknested)
165 167 self.vfs = scmutil.vfs(self.path)
166 168 self.opener = self.vfs
167 169 self.baseui = baseui
168 170 self.ui = baseui.copy()
169 171 # A list of callback to shape the phase if no data were found.
170 172 # Callback are in the form: func(repo, roots) --> processed root.
171 173 # This list it to be filled by extension during repo setup
172 174 self._phasedefaults = []
173 175 try:
174 176 self.ui.readconfig(self.join("hgrc"), self.root)
175 177 extensions.loadall(self.ui)
176 178 except IOError:
177 179 pass
178 180
181 if self.featuresetupfuncs:
182 self.supported = set(self._basesupported) # use private copy
183 for setupfunc in self.featuresetupfuncs:
184 setupfunc(self.ui, self.supported)
185 else:
186 self.supported = self._basesupported
187
179 188 if not self.vfs.isdir():
180 189 if create:
181 190 if not self.wvfs.exists():
182 191 self.wvfs.makedirs()
183 192 self.vfs.makedir(notindexed=True)
184 193 requirements = self._baserequirements(create)
185 194 if self.ui.configbool('format', 'usestore', True):
186 195 self.vfs.mkdir("store")
187 196 requirements.append("store")
188 197 if self.ui.configbool('format', 'usefncache', True):
189 198 requirements.append("fncache")
190 199 if self.ui.configbool('format', 'dotencode', True):
191 200 requirements.append('dotencode')
192 201 # create an invalid changelog
193 202 self.vfs.append(
194 203 "00changelog.i",
195 204 '\0\0\0\2' # represents revlogv2
196 205 ' dummy changelog to prevent using the old repo layout'
197 206 )
198 207 if self.ui.configbool('format', 'generaldelta', False):
199 208 requirements.append("generaldelta")
200 209 requirements = set(requirements)
201 210 else:
202 211 raise error.RepoError(_("repository %s not found") % path)
203 212 elif create:
204 213 raise error.RepoError(_("repository %s already exists") % path)
205 214 else:
206 215 try:
207 216 requirements = scmutil.readrequires(self.vfs, self.supported)
208 217 except IOError, inst:
209 218 if inst.errno != errno.ENOENT:
210 219 raise
211 220 requirements = set()
212 221
213 222 self.sharedpath = self.path
214 223 try:
215 224 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
216 225 realpath=True)
217 226 s = vfs.base
218 227 if not vfs.exists():
219 228 raise error.RepoError(
220 229 _('.hg/sharedpath points to nonexistent directory %s') % s)
221 230 self.sharedpath = s
222 231 except IOError, inst:
223 232 if inst.errno != errno.ENOENT:
224 233 raise
225 234
226 235 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
227 236 self.spath = self.store.path
228 237 self.svfs = self.store.vfs
229 238 self.sopener = self.svfs
230 239 self.sjoin = self.store.join
231 240 self.vfs.createmode = self.store.createmode
232 241 self._applyrequirements(requirements)
233 242 if create:
234 243 self._writerequirements()
235 244
236 245
237 246 self._branchcaches = {}
238 247 self.filterpats = {}
239 248 self._datafilters = {}
240 249 self._transref = self._lockref = self._wlockref = None
241 250
242 251 # A cache for various files under .hg/ that tracks file changes,
243 252 # (used by the filecache decorator)
244 253 #
245 254 # Maps a property name to its util.filecacheentry
246 255 self._filecache = {}
247 256
248 257 # hold sets of revision to be filtered
249 258 # should be cleared when something might have changed the filter value:
250 259 # - new changesets,
251 260 # - phase change,
252 261 # - new obsolescence marker,
253 262 # - working directory parent change,
254 263 # - bookmark changes
255 264 self.filteredrevcache = {}
256 265
257 266 def close(self):
258 267 pass
259 268
260 269 def _restrictcapabilities(self, caps):
261 270 return caps
262 271
263 272 def _applyrequirements(self, requirements):
264 273 self.requirements = requirements
265 274 self.sopener.options = dict((r, 1) for r in requirements
266 275 if r in self.openerreqs)
267 276
268 277 def _writerequirements(self):
269 278 reqfile = self.opener("requires", "w")
270 279 for r in sorted(self.requirements):
271 280 reqfile.write("%s\n" % r)
272 281 reqfile.close()
273 282
274 283 def _checknested(self, path):
275 284 """Determine if path is a legal nested repository."""
276 285 if not path.startswith(self.root):
277 286 return False
278 287 subpath = path[len(self.root) + 1:]
279 288 normsubpath = util.pconvert(subpath)
280 289
281 290 # XXX: Checking against the current working copy is wrong in
282 291 # the sense that it can reject things like
283 292 #
284 293 # $ hg cat -r 10 sub/x.txt
285 294 #
286 295 # if sub/ is no longer a subrepository in the working copy
287 296 # parent revision.
288 297 #
289 298 # However, it can of course also allow things that would have
290 299 # been rejected before, such as the above cat command if sub/
291 300 # is a subrepository now, but was a normal directory before.
292 301 # The old path auditor would have rejected by mistake since it
293 302 # panics when it sees sub/.hg/.
294 303 #
295 304 # All in all, checking against the working copy seems sensible
296 305 # since we want to prevent access to nested repositories on
297 306 # the filesystem *now*.
298 307 ctx = self[None]
299 308 parts = util.splitpath(subpath)
300 309 while parts:
301 310 prefix = '/'.join(parts)
302 311 if prefix in ctx.substate:
303 312 if prefix == normsubpath:
304 313 return True
305 314 else:
306 315 sub = ctx.sub(prefix)
307 316 return sub.checknested(subpath[len(prefix) + 1:])
308 317 else:
309 318 parts.pop()
310 319 return False
311 320
312 321 def peer(self):
313 322 return localpeer(self) # not cached to avoid reference cycle
314 323
315 324 def unfiltered(self):
316 325 """Return unfiltered version of the repository
317 326
318 327 Intended to be overwritten by filtered repo."""
319 328 return self
320 329
321 330 def filtered(self, name):
322 331 """Return a filtered version of a repository"""
323 332 # build a new class with the mixin and the current class
324 333 # (possibly subclass of the repo)
325 334 class proxycls(repoview.repoview, self.unfiltered().__class__):
326 335 pass
327 336 return proxycls(self, name)
328 337
329 338 @repofilecache('bookmarks')
330 339 def _bookmarks(self):
331 340 return bookmarks.bmstore(self)
332 341
333 342 @repofilecache('bookmarks.current')
334 343 def _bookmarkcurrent(self):
335 344 return bookmarks.readcurrent(self)
336 345
337 346 def bookmarkheads(self, bookmark):
338 347 name = bookmark.split('@', 1)[0]
339 348 heads = []
340 349 for mark, n in self._bookmarks.iteritems():
341 350 if mark.split('@', 1)[0] == name:
342 351 heads.append(n)
343 352 return heads
344 353
345 354 @storecache('phaseroots')
346 355 def _phasecache(self):
347 356 return phases.phasecache(self, self._phasedefaults)
348 357
349 358 @storecache('obsstore')
350 359 def obsstore(self):
351 360 store = obsolete.obsstore(self.sopener)
352 361 if store and not obsolete._enabled:
353 362 # message is rare enough to not be translated
354 363 msg = 'obsolete feature not enabled but %i markers found!\n'
355 364 self.ui.warn(msg % len(list(store)))
356 365 return store
357 366
358 367 @storecache('00changelog.i')
359 368 def changelog(self):
360 369 c = changelog.changelog(self.sopener)
361 370 if 'HG_PENDING' in os.environ:
362 371 p = os.environ['HG_PENDING']
363 372 if p.startswith(self.root):
364 373 c.readpending('00changelog.i.a')
365 374 return c
366 375
367 376 @storecache('00manifest.i')
368 377 def manifest(self):
369 378 return manifest.manifest(self.sopener)
370 379
371 380 @repofilecache('dirstate')
372 381 def dirstate(self):
373 382 warned = [0]
374 383 def validate(node):
375 384 try:
376 385 self.changelog.rev(node)
377 386 return node
378 387 except error.LookupError:
379 388 if not warned[0]:
380 389 warned[0] = True
381 390 self.ui.warn(_("warning: ignoring unknown"
382 391 " working parent %s!\n") % short(node))
383 392 return nullid
384 393
385 394 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
386 395
387 396 def __getitem__(self, changeid):
388 397 if changeid is None:
389 398 return context.workingctx(self)
390 399 return context.changectx(self, changeid)
391 400
392 401 def __contains__(self, changeid):
393 402 try:
394 403 return bool(self.lookup(changeid))
395 404 except error.RepoLookupError:
396 405 return False
397 406
398 407 def __nonzero__(self):
399 408 return True
400 409
401 410 def __len__(self):
402 411 return len(self.changelog)
403 412
404 413 def __iter__(self):
405 414 return iter(self.changelog)
406 415
407 416 def revs(self, expr, *args):
408 417 '''Return a list of revisions matching the given revset'''
409 418 expr = revset.formatspec(expr, *args)
410 419 m = revset.match(None, expr)
411 420 return [r for r in m(self, list(self))]
412 421
413 422 def set(self, expr, *args):
414 423 '''
415 424 Yield a context for each matching revision, after doing arg
416 425 replacement via revset.formatspec
417 426 '''
418 427 for r in self.revs(expr, *args):
419 428 yield self[r]
420 429
421 430 def url(self):
422 431 return 'file:' + self.root
423 432
424 433 def hook(self, name, throw=False, **args):
425 434 return hook.hook(self.ui, self, name, throw, **args)
426 435
427 436 @unfilteredmethod
428 437 def _tag(self, names, node, message, local, user, date, extra={}):
429 438 if isinstance(names, str):
430 439 names = (names,)
431 440
432 441 branches = self.branchmap()
433 442 for name in names:
434 443 self.hook('pretag', throw=True, node=hex(node), tag=name,
435 444 local=local)
436 445 if name in branches:
437 446 self.ui.warn(_("warning: tag %s conflicts with existing"
438 447 " branch name\n") % name)
439 448
440 449 def writetags(fp, names, munge, prevtags):
441 450 fp.seek(0, 2)
442 451 if prevtags and prevtags[-1] != '\n':
443 452 fp.write('\n')
444 453 for name in names:
445 454 m = munge and munge(name) or name
446 455 if (self._tagscache.tagtypes and
447 456 name in self._tagscache.tagtypes):
448 457 old = self.tags().get(name, nullid)
449 458 fp.write('%s %s\n' % (hex(old), m))
450 459 fp.write('%s %s\n' % (hex(node), m))
451 460 fp.close()
452 461
453 462 prevtags = ''
454 463 if local:
455 464 try:
456 465 fp = self.opener('localtags', 'r+')
457 466 except IOError:
458 467 fp = self.opener('localtags', 'a')
459 468 else:
460 469 prevtags = fp.read()
461 470
462 471 # local tags are stored in the current charset
463 472 writetags(fp, names, None, prevtags)
464 473 for name in names:
465 474 self.hook('tag', node=hex(node), tag=name, local=local)
466 475 return
467 476
468 477 try:
469 478 fp = self.wfile('.hgtags', 'rb+')
470 479 except IOError, e:
471 480 if e.errno != errno.ENOENT:
472 481 raise
473 482 fp = self.wfile('.hgtags', 'ab')
474 483 else:
475 484 prevtags = fp.read()
476 485
477 486 # committed tags are stored in UTF-8
478 487 writetags(fp, names, encoding.fromlocal, prevtags)
479 488
480 489 fp.close()
481 490
482 491 self.invalidatecaches()
483 492
484 493 if '.hgtags' not in self.dirstate:
485 494 self[None].add(['.hgtags'])
486 495
487 496 m = matchmod.exact(self.root, '', ['.hgtags'])
488 497 tagnode = self.commit(message, user, date, extra=extra, match=m)
489 498
490 499 for name in names:
491 500 self.hook('tag', node=hex(node), tag=name, local=local)
492 501
493 502 return tagnode
494 503
495 504 def tag(self, names, node, message, local, user, date):
496 505 '''tag a revision with one or more symbolic names.
497 506
498 507 names is a list of strings or, when adding a single tag, names may be a
499 508 string.
500 509
501 510 if local is True, the tags are stored in a per-repository file.
502 511 otherwise, they are stored in the .hgtags file, and a new
503 512 changeset is committed with the change.
504 513
505 514 keyword arguments:
506 515
507 516 local: whether to store tags in non-version-controlled file
508 517 (default False)
509 518
510 519 message: commit message to use if committing
511 520
512 521 user: name of user to use if committing
513 522
514 523 date: date tuple to use if committing'''
515 524
516 525 if not local:
517 526 for x in self.status()[:5]:
518 527 if '.hgtags' in x:
519 528 raise util.Abort(_('working copy of .hgtags is changed '
520 529 '(please commit .hgtags manually)'))
521 530
522 531 self.tags() # instantiate the cache
523 532 self._tag(names, node, message, local, user, date)
524 533
525 534 @filteredpropertycache
526 535 def _tagscache(self):
527 536 '''Returns a tagscache object that contains various tags related
528 537 caches.'''
529 538
530 539 # This simplifies its cache management by having one decorated
531 540 # function (this one) and the rest simply fetch things from it.
532 541 class tagscache(object):
533 542 def __init__(self):
534 543 # These two define the set of tags for this repository. tags
535 544 # maps tag name to node; tagtypes maps tag name to 'global' or
536 545 # 'local'. (Global tags are defined by .hgtags across all
537 546 # heads, and local tags are defined in .hg/localtags.)
538 547 # They constitute the in-memory cache of tags.
539 548 self.tags = self.tagtypes = None
540 549
541 550 self.nodetagscache = self.tagslist = None
542 551
543 552 cache = tagscache()
544 553 cache.tags, cache.tagtypes = self._findtags()
545 554
546 555 return cache
547 556
548 557 def tags(self):
549 558 '''return a mapping of tag to node'''
550 559 t = {}
551 560 if self.changelog.filteredrevs:
552 561 tags, tt = self._findtags()
553 562 else:
554 563 tags = self._tagscache.tags
555 564 for k, v in tags.iteritems():
556 565 try:
557 566 # ignore tags to unknown nodes
558 567 self.changelog.rev(v)
559 568 t[k] = v
560 569 except (error.LookupError, ValueError):
561 570 pass
562 571 return t
563 572
564 573 def _findtags(self):
565 574 '''Do the hard work of finding tags. Return a pair of dicts
566 575 (tags, tagtypes) where tags maps tag name to node, and tagtypes
567 576 maps tag name to a string like \'global\' or \'local\'.
568 577 Subclasses or extensions are free to add their own tags, but
569 578 should be aware that the returned dicts will be retained for the
570 579 duration of the localrepo object.'''
571 580
572 581 # XXX what tagtype should subclasses/extensions use? Currently
573 582 # mq and bookmarks add tags, but do not set the tagtype at all.
574 583 # Should each extension invent its own tag type? Should there
575 584 # be one tagtype for all such "virtual" tags? Or is the status
576 585 # quo fine?
577 586
578 587 alltags = {} # map tag name to (node, hist)
579 588 tagtypes = {}
580 589
581 590 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
582 591 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
583 592
584 593 # Build the return dicts. Have to re-encode tag names because
585 594 # the tags module always uses UTF-8 (in order not to lose info
586 595 # writing to the cache), but the rest of Mercurial wants them in
587 596 # local encoding.
588 597 tags = {}
589 598 for (name, (node, hist)) in alltags.iteritems():
590 599 if node != nullid:
591 600 tags[encoding.tolocal(name)] = node
592 601 tags['tip'] = self.changelog.tip()
593 602 tagtypes = dict([(encoding.tolocal(name), value)
594 603 for (name, value) in tagtypes.iteritems()])
595 604 return (tags, tagtypes)
596 605
597 606 def tagtype(self, tagname):
598 607 '''
599 608 return the type of the given tag. result can be:
600 609
601 610 'local' : a local tag
602 611 'global' : a global tag
603 612 None : tag does not exist
604 613 '''
605 614
606 615 return self._tagscache.tagtypes.get(tagname)
607 616
608 617 def tagslist(self):
609 618 '''return a list of tags ordered by revision'''
610 619 if not self._tagscache.tagslist:
611 620 l = []
612 621 for t, n in self.tags().iteritems():
613 622 r = self.changelog.rev(n)
614 623 l.append((r, t, n))
615 624 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
616 625
617 626 return self._tagscache.tagslist
618 627
619 628 def nodetags(self, node):
620 629 '''return the tags associated with a node'''
621 630 if not self._tagscache.nodetagscache:
622 631 nodetagscache = {}
623 632 for t, n in self._tagscache.tags.iteritems():
624 633 nodetagscache.setdefault(n, []).append(t)
625 634 for tags in nodetagscache.itervalues():
626 635 tags.sort()
627 636 self._tagscache.nodetagscache = nodetagscache
628 637 return self._tagscache.nodetagscache.get(node, [])
629 638
630 639 def nodebookmarks(self, node):
631 640 marks = []
632 641 for bookmark, n in self._bookmarks.iteritems():
633 642 if n == node:
634 643 marks.append(bookmark)
635 644 return sorted(marks)
636 645
637 646 def branchmap(self):
638 647 '''returns a dictionary {branch: [branchheads]}'''
639 648 branchmap.updatecache(self)
640 649 return self._branchcaches[self.filtername]
641 650
642 651
643 652 def _branchtip(self, heads):
644 653 '''return the tipmost branch head in heads'''
645 654 tip = heads[-1]
646 655 for h in reversed(heads):
647 656 if not self[h].closesbranch():
648 657 tip = h
649 658 break
650 659 return tip
651 660
652 661 def branchtip(self, branch):
653 662 '''return the tip node for a given branch'''
654 663 if branch not in self.branchmap():
655 664 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
656 665 return self._branchtip(self.branchmap()[branch])
657 666
658 667 def branchtags(self):
659 668 '''return a dict where branch names map to the tipmost head of
660 669 the branch, open heads come before closed'''
661 670 bt = {}
662 671 for bn, heads in self.branchmap().iteritems():
663 672 bt[bn] = self._branchtip(heads)
664 673 return bt
665 674
666 675 def lookup(self, key):
667 676 return self[key].node()
668 677
669 678 def lookupbranch(self, key, remote=None):
670 679 repo = remote or self
671 680 if key in repo.branchmap():
672 681 return key
673 682
674 683 repo = (remote and remote.local()) and remote or self
675 684 return repo[key].branch()
676 685
677 686 def known(self, nodes):
678 687 nm = self.changelog.nodemap
679 688 pc = self._phasecache
680 689 result = []
681 690 for n in nodes:
682 691 r = nm.get(n)
683 692 resp = not (r is None or pc.phase(self, r) >= phases.secret)
684 693 result.append(resp)
685 694 return result
686 695
687 696 def local(self):
688 697 return self
689 698
690 699 def cancopy(self):
691 700 return self.local() # so statichttprepo's override of local() works
692 701
693 702 def join(self, f):
694 703 return os.path.join(self.path, f)
695 704
696 705 def wjoin(self, f):
697 706 return os.path.join(self.root, f)
698 707
699 708 def file(self, f):
700 709 if f[0] == '/':
701 710 f = f[1:]
702 711 return filelog.filelog(self.sopener, f)
703 712
704 713 def changectx(self, changeid):
705 714 return self[changeid]
706 715
707 716 def parents(self, changeid=None):
708 717 '''get list of changectxs for parents of changeid'''
709 718 return self[changeid].parents()
710 719
711 720 def setparents(self, p1, p2=nullid):
712 721 copies = self.dirstate.setparents(p1, p2)
713 722 pctx = self[p1]
714 723 if copies:
715 724 # Adjust copy records, the dirstate cannot do it, it
716 725 # requires access to parents manifests. Preserve them
717 726 # only for entries added to first parent.
718 727 for f in copies:
719 728 if f not in pctx and copies[f] in pctx:
720 729 self.dirstate.copy(copies[f], f)
721 730 if p2 == nullid:
722 731 for f, s in sorted(self.dirstate.copies().items()):
723 732 if f not in pctx and s not in pctx:
724 733 self.dirstate.copy(None, f)
725 734
726 735 def filectx(self, path, changeid=None, fileid=None):
727 736 """changeid can be a changeset revision, node, or tag.
728 737 fileid can be a file revision or node."""
729 738 return context.filectx(self, path, changeid, fileid)
730 739
731 740 def getcwd(self):
732 741 return self.dirstate.getcwd()
733 742
734 743 def pathto(self, f, cwd=None):
735 744 return self.dirstate.pathto(f, cwd)
736 745
737 746 def wfile(self, f, mode='r'):
738 747 return self.wopener(f, mode)
739 748
740 749 def _link(self, f):
741 750 return self.wvfs.islink(f)
742 751
743 752 def _loadfilter(self, filter):
744 753 if filter not in self.filterpats:
745 754 l = []
746 755 for pat, cmd in self.ui.configitems(filter):
747 756 if cmd == '!':
748 757 continue
749 758 mf = matchmod.match(self.root, '', [pat])
750 759 fn = None
751 760 params = cmd
752 761 for name, filterfn in self._datafilters.iteritems():
753 762 if cmd.startswith(name):
754 763 fn = filterfn
755 764 params = cmd[len(name):].lstrip()
756 765 break
757 766 if not fn:
758 767 fn = lambda s, c, **kwargs: util.filter(s, c)
759 768 # Wrap old filters not supporting keyword arguments
760 769 if not inspect.getargspec(fn)[2]:
761 770 oldfn = fn
762 771 fn = lambda s, c, **kwargs: oldfn(s, c)
763 772 l.append((mf, fn, params))
764 773 self.filterpats[filter] = l
765 774 return self.filterpats[filter]
766 775
767 776 def _filter(self, filterpats, filename, data):
768 777 for mf, fn, cmd in filterpats:
769 778 if mf(filename):
770 779 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
771 780 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
772 781 break
773 782
774 783 return data
775 784
776 785 @unfilteredpropertycache
777 786 def _encodefilterpats(self):
778 787 return self._loadfilter('encode')
779 788
780 789 @unfilteredpropertycache
781 790 def _decodefilterpats(self):
782 791 return self._loadfilter('decode')
783 792
784 793 def adddatafilter(self, name, filter):
785 794 self._datafilters[name] = filter
786 795
787 796 def wread(self, filename):
788 797 if self._link(filename):
789 798 data = self.wvfs.readlink(filename)
790 799 else:
791 800 data = self.wopener.read(filename)
792 801 return self._filter(self._encodefilterpats, filename, data)
793 802
794 803 def wwrite(self, filename, data, flags):
795 804 data = self._filter(self._decodefilterpats, filename, data)
796 805 if 'l' in flags:
797 806 self.wopener.symlink(data, filename)
798 807 else:
799 808 self.wopener.write(filename, data)
800 809 if 'x' in flags:
801 810 self.wvfs.setflags(filename, False, True)
802 811
803 812 def wwritedata(self, filename, data):
804 813 return self._filter(self._decodefilterpats, filename, data)
805 814
806 815 def transaction(self, desc):
807 816 tr = self._transref and self._transref() or None
808 817 if tr and tr.running():
809 818 return tr.nest()
810 819
811 820 # abort here if the journal already exists
812 821 if self.svfs.exists("journal"):
813 822 raise error.RepoError(
814 823 _("abandoned transaction found - run hg recover"))
815 824
816 825 self._writejournal(desc)
817 826 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
818 827
819 828 tr = transaction.transaction(self.ui.warn, self.sopener,
820 829 self.sjoin("journal"),
821 830 aftertrans(renames),
822 831 self.store.createmode)
823 832 self._transref = weakref.ref(tr)
824 833 return tr
825 834
826 835 def _journalfiles(self):
827 836 return ((self.svfs, 'journal'),
828 837 (self.vfs, 'journal.dirstate'),
829 838 (self.vfs, 'journal.branch'),
830 839 (self.vfs, 'journal.desc'),
831 840 (self.vfs, 'journal.bookmarks'),
832 841 (self.svfs, 'journal.phaseroots'))
833 842
834 843 def undofiles(self):
835 844 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
836 845
837 846 def _writejournal(self, desc):
838 847 self.opener.write("journal.dirstate",
839 848 self.opener.tryread("dirstate"))
840 849 self.opener.write("journal.branch",
841 850 encoding.fromlocal(self.dirstate.branch()))
842 851 self.opener.write("journal.desc",
843 852 "%d\n%s\n" % (len(self), desc))
844 853 self.opener.write("journal.bookmarks",
845 854 self.opener.tryread("bookmarks"))
846 855 self.sopener.write("journal.phaseroots",
847 856 self.sopener.tryread("phaseroots"))
848 857
849 858 def recover(self):
850 859 lock = self.lock()
851 860 try:
852 861 if self.svfs.exists("journal"):
853 862 self.ui.status(_("rolling back interrupted transaction\n"))
854 863 transaction.rollback(self.sopener, self.sjoin("journal"),
855 864 self.ui.warn)
856 865 self.invalidate()
857 866 return True
858 867 else:
859 868 self.ui.warn(_("no interrupted transaction available\n"))
860 869 return False
861 870 finally:
862 871 lock.release()
863 872
864 873 def rollback(self, dryrun=False, force=False):
865 874 wlock = lock = None
866 875 try:
867 876 wlock = self.wlock()
868 877 lock = self.lock()
869 878 if self.svfs.exists("undo"):
870 879 return self._rollback(dryrun, force)
871 880 else:
872 881 self.ui.warn(_("no rollback information available\n"))
873 882 return 1
874 883 finally:
875 884 release(lock, wlock)
876 885
877 886 @unfilteredmethod # Until we get smarter cache management
878 887 def _rollback(self, dryrun, force):
879 888 ui = self.ui
880 889 try:
881 890 args = self.opener.read('undo.desc').splitlines()
882 891 (oldlen, desc, detail) = (int(args[0]), args[1], None)
883 892 if len(args) >= 3:
884 893 detail = args[2]
885 894 oldtip = oldlen - 1
886 895
887 896 if detail and ui.verbose:
888 897 msg = (_('repository tip rolled back to revision %s'
889 898 ' (undo %s: %s)\n')
890 899 % (oldtip, desc, detail))
891 900 else:
892 901 msg = (_('repository tip rolled back to revision %s'
893 902 ' (undo %s)\n')
894 903 % (oldtip, desc))
895 904 except IOError:
896 905 msg = _('rolling back unknown transaction\n')
897 906 desc = None
898 907
899 908 if not force and self['.'] != self['tip'] and desc == 'commit':
900 909 raise util.Abort(
901 910 _('rollback of last commit while not checked out '
902 911 'may lose data'), hint=_('use -f to force'))
903 912
904 913 ui.status(msg)
905 914 if dryrun:
906 915 return 0
907 916
908 917 parents = self.dirstate.parents()
909 918 self.destroying()
910 919 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
911 920 if self.vfs.exists('undo.bookmarks'):
912 921 self.vfs.rename('undo.bookmarks', 'bookmarks')
913 922 if self.svfs.exists('undo.phaseroots'):
914 923 self.svfs.rename('undo.phaseroots', 'phaseroots')
915 924 self.invalidate()
916 925
917 926 parentgone = (parents[0] not in self.changelog.nodemap or
918 927 parents[1] not in self.changelog.nodemap)
919 928 if parentgone:
920 929 self.vfs.rename('undo.dirstate', 'dirstate')
921 930 try:
922 931 branch = self.opener.read('undo.branch')
923 932 self.dirstate.setbranch(encoding.tolocal(branch))
924 933 except IOError:
925 934 ui.warn(_('named branch could not be reset: '
926 935 'current branch is still \'%s\'\n')
927 936 % self.dirstate.branch())
928 937
929 938 self.dirstate.invalidate()
930 939 parents = tuple([p.rev() for p in self.parents()])
931 940 if len(parents) > 1:
932 941 ui.status(_('working directory now based on '
933 942 'revisions %d and %d\n') % parents)
934 943 else:
935 944 ui.status(_('working directory now based on '
936 945 'revision %d\n') % parents)
937 946 # TODO: if we know which new heads may result from this rollback, pass
938 947 # them to destroy(), which will prevent the branchhead cache from being
939 948 # invalidated.
940 949 self.destroyed()
941 950 return 0
942 951
943 952 def invalidatecaches(self):
944 953
945 954 if '_tagscache' in vars(self):
946 955 # can't use delattr on proxy
947 956 del self.__dict__['_tagscache']
948 957
949 958 self.unfiltered()._branchcaches.clear()
950 959 self.invalidatevolatilesets()
951 960
952 961 def invalidatevolatilesets(self):
953 962 self.filteredrevcache.clear()
954 963 obsolete.clearobscaches(self)
955 964
956 965 def invalidatedirstate(self):
957 966 '''Invalidates the dirstate, causing the next call to dirstate
958 967 to check if it was modified since the last time it was read,
959 968 rereading it if it has.
960 969
961 970 This is different to dirstate.invalidate() that it doesn't always
962 971 rereads the dirstate. Use dirstate.invalidate() if you want to
963 972 explicitly read the dirstate again (i.e. restoring it to a previous
964 973 known good state).'''
965 974 if hasunfilteredcache(self, 'dirstate'):
966 975 for k in self.dirstate._filecache:
967 976 try:
968 977 delattr(self.dirstate, k)
969 978 except AttributeError:
970 979 pass
971 980 delattr(self.unfiltered(), 'dirstate')
972 981
973 982 def invalidate(self):
974 983 unfiltered = self.unfiltered() # all file caches are stored unfiltered
975 984 for k in self._filecache:
976 985 # dirstate is invalidated separately in invalidatedirstate()
977 986 if k == 'dirstate':
978 987 continue
979 988
980 989 try:
981 990 delattr(unfiltered, k)
982 991 except AttributeError:
983 992 pass
984 993 self.invalidatecaches()
985 994
986 995 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
987 996 try:
988 997 l = lock.lock(lockname, 0, releasefn, desc=desc)
989 998 except error.LockHeld, inst:
990 999 if not wait:
991 1000 raise
992 1001 self.ui.warn(_("waiting for lock on %s held by %r\n") %
993 1002 (desc, inst.locker))
994 1003 # default to 600 seconds timeout
995 1004 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
996 1005 releasefn, desc=desc)
997 1006 if acquirefn:
998 1007 acquirefn()
999 1008 return l
1000 1009
1001 1010 def _afterlock(self, callback):
1002 1011 """add a callback to the current repository lock.
1003 1012
1004 1013 The callback will be executed on lock release."""
1005 1014 l = self._lockref and self._lockref()
1006 1015 if l:
1007 1016 l.postrelease.append(callback)
1008 1017 else:
1009 1018 callback()
1010 1019
1011 1020 def lock(self, wait=True):
1012 1021 '''Lock the repository store (.hg/store) and return a weak reference
1013 1022 to the lock. Use this before modifying the store (e.g. committing or
1014 1023 stripping). If you are opening a transaction, get a lock as well.)'''
1015 1024 l = self._lockref and self._lockref()
1016 1025 if l is not None and l.held:
1017 1026 l.lock()
1018 1027 return l
1019 1028
1020 1029 def unlock():
1021 1030 self.store.write()
1022 1031 if hasunfilteredcache(self, '_phasecache'):
1023 1032 self._phasecache.write()
1024 1033 for k, ce in self._filecache.items():
1025 1034 if k == 'dirstate' or k not in self.__dict__:
1026 1035 continue
1027 1036 ce.refresh()
1028 1037
1029 1038 l = self._lock(self.sjoin("lock"), wait, unlock,
1030 1039 self.invalidate, _('repository %s') % self.origroot)
1031 1040 self._lockref = weakref.ref(l)
1032 1041 return l
1033 1042
1034 1043 def wlock(self, wait=True):
1035 1044 '''Lock the non-store parts of the repository (everything under
1036 1045 .hg except .hg/store) and return a weak reference to the lock.
1037 1046 Use this before modifying files in .hg.'''
1038 1047 l = self._wlockref and self._wlockref()
1039 1048 if l is not None and l.held:
1040 1049 l.lock()
1041 1050 return l
1042 1051
1043 1052 def unlock():
1044 1053 self.dirstate.write()
1045 1054 self._filecache['dirstate'].refresh()
1046 1055
1047 1056 l = self._lock(self.join("wlock"), wait, unlock,
1048 1057 self.invalidatedirstate, _('working directory of %s') %
1049 1058 self.origroot)
1050 1059 self._wlockref = weakref.ref(l)
1051 1060 return l
1052 1061
1053 1062 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1054 1063 """
1055 1064 commit an individual file as part of a larger transaction
1056 1065 """
1057 1066
1058 1067 fname = fctx.path()
1059 1068 text = fctx.data()
1060 1069 flog = self.file(fname)
1061 1070 fparent1 = manifest1.get(fname, nullid)
1062 1071 fparent2 = fparent2o = manifest2.get(fname, nullid)
1063 1072
1064 1073 meta = {}
1065 1074 copy = fctx.renamed()
1066 1075 if copy and copy[0] != fname:
1067 1076 # Mark the new revision of this file as a copy of another
1068 1077 # file. This copy data will effectively act as a parent
1069 1078 # of this new revision. If this is a merge, the first
1070 1079 # parent will be the nullid (meaning "look up the copy data")
1071 1080 # and the second one will be the other parent. For example:
1072 1081 #
1073 1082 # 0 --- 1 --- 3 rev1 changes file foo
1074 1083 # \ / rev2 renames foo to bar and changes it
1075 1084 # \- 2 -/ rev3 should have bar with all changes and
1076 1085 # should record that bar descends from
1077 1086 # bar in rev2 and foo in rev1
1078 1087 #
1079 1088 # this allows this merge to succeed:
1080 1089 #
1081 1090 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1082 1091 # \ / merging rev3 and rev4 should use bar@rev2
1083 1092 # \- 2 --- 4 as the merge base
1084 1093 #
1085 1094
1086 1095 cfname = copy[0]
1087 1096 crev = manifest1.get(cfname)
1088 1097 newfparent = fparent2
1089 1098
1090 1099 if manifest2: # branch merge
1091 1100 if fparent2 == nullid or crev is None: # copied on remote side
1092 1101 if cfname in manifest2:
1093 1102 crev = manifest2[cfname]
1094 1103 newfparent = fparent1
1095 1104
1096 1105 # find source in nearest ancestor if we've lost track
1097 1106 if not crev:
1098 1107 self.ui.debug(" %s: searching for copy revision for %s\n" %
1099 1108 (fname, cfname))
1100 1109 for ancestor in self[None].ancestors():
1101 1110 if cfname in ancestor:
1102 1111 crev = ancestor[cfname].filenode()
1103 1112 break
1104 1113
1105 1114 if crev:
1106 1115 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1107 1116 meta["copy"] = cfname
1108 1117 meta["copyrev"] = hex(crev)
1109 1118 fparent1, fparent2 = nullid, newfparent
1110 1119 else:
1111 1120 self.ui.warn(_("warning: can't find ancestor for '%s' "
1112 1121 "copied from '%s'!\n") % (fname, cfname))
1113 1122
1114 1123 elif fparent2 != nullid:
1115 1124 # is one parent an ancestor of the other?
1116 1125 fparentancestor = flog.ancestor(fparent1, fparent2)
1117 1126 if fparentancestor == fparent1:
1118 1127 fparent1, fparent2 = fparent2, nullid
1119 1128 elif fparentancestor == fparent2:
1120 1129 fparent2 = nullid
1121 1130
1122 1131 # is the file changed?
1123 1132 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1124 1133 changelist.append(fname)
1125 1134 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1126 1135
1127 1136 # are just the flags changed during merge?
1128 1137 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1129 1138 changelist.append(fname)
1130 1139
1131 1140 return fparent1
1132 1141
1133 1142 @unfilteredmethod
1134 1143 def commit(self, text="", user=None, date=None, match=None, force=False,
1135 1144 editor=False, extra={}):
1136 1145 """Add a new revision to current repository.
1137 1146
1138 1147 Revision information is gathered from the working directory,
1139 1148 match can be used to filter the committed files. If editor is
1140 1149 supplied, it is called to get a commit message.
1141 1150 """
1142 1151
1143 1152 def fail(f, msg):
1144 1153 raise util.Abort('%s: %s' % (f, msg))
1145 1154
1146 1155 if not match:
1147 1156 match = matchmod.always(self.root, '')
1148 1157
1149 1158 if not force:
1150 1159 vdirs = []
1151 1160 match.explicitdir = vdirs.append
1152 1161 match.bad = fail
1153 1162
1154 1163 wlock = self.wlock()
1155 1164 try:
1156 1165 wctx = self[None]
1157 1166 merge = len(wctx.parents()) > 1
1158 1167
1159 1168 if (not force and merge and match and
1160 1169 (match.files() or match.anypats())):
1161 1170 raise util.Abort(_('cannot partially commit a merge '
1162 1171 '(do not specify files or patterns)'))
1163 1172
1164 1173 changes = self.status(match=match, clean=force)
1165 1174 if force:
1166 1175 changes[0].extend(changes[6]) # mq may commit unchanged files
1167 1176
1168 1177 # check subrepos
1169 1178 subs = []
1170 1179 commitsubs = set()
1171 1180 newstate = wctx.substate.copy()
1172 1181 # only manage subrepos and .hgsubstate if .hgsub is present
1173 1182 if '.hgsub' in wctx:
1174 1183 # we'll decide whether to track this ourselves, thanks
1175 1184 if '.hgsubstate' in changes[0]:
1176 1185 changes[0].remove('.hgsubstate')
1177 1186 if '.hgsubstate' in changes[2]:
1178 1187 changes[2].remove('.hgsubstate')
1179 1188
1180 1189 # compare current state to last committed state
1181 1190 # build new substate based on last committed state
1182 1191 oldstate = wctx.p1().substate
1183 1192 for s in sorted(newstate.keys()):
1184 1193 if not match(s):
1185 1194 # ignore working copy, use old state if present
1186 1195 if s in oldstate:
1187 1196 newstate[s] = oldstate[s]
1188 1197 continue
1189 1198 if not force:
1190 1199 raise util.Abort(
1191 1200 _("commit with new subrepo %s excluded") % s)
1192 1201 if wctx.sub(s).dirty(True):
1193 1202 if not self.ui.configbool('ui', 'commitsubrepos'):
1194 1203 raise util.Abort(
1195 1204 _("uncommitted changes in subrepo %s") % s,
1196 1205 hint=_("use --subrepos for recursive commit"))
1197 1206 subs.append(s)
1198 1207 commitsubs.add(s)
1199 1208 else:
1200 1209 bs = wctx.sub(s).basestate()
1201 1210 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1202 1211 if oldstate.get(s, (None, None, None))[1] != bs:
1203 1212 subs.append(s)
1204 1213
1205 1214 # check for removed subrepos
1206 1215 for p in wctx.parents():
1207 1216 r = [s for s in p.substate if s not in newstate]
1208 1217 subs += [s for s in r if match(s)]
1209 1218 if subs:
1210 1219 if (not match('.hgsub') and
1211 1220 '.hgsub' in (wctx.modified() + wctx.added())):
1212 1221 raise util.Abort(
1213 1222 _("can't commit subrepos without .hgsub"))
1214 1223 changes[0].insert(0, '.hgsubstate')
1215 1224
1216 1225 elif '.hgsub' in changes[2]:
1217 1226 # clean up .hgsubstate when .hgsub is removed
1218 1227 if ('.hgsubstate' in wctx and
1219 1228 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1220 1229 changes[2].insert(0, '.hgsubstate')
1221 1230
1222 1231 # make sure all explicit patterns are matched
1223 1232 if not force and match.files():
1224 1233 matched = set(changes[0] + changes[1] + changes[2])
1225 1234
1226 1235 for f in match.files():
1227 1236 f = self.dirstate.normalize(f)
1228 1237 if f == '.' or f in matched or f in wctx.substate:
1229 1238 continue
1230 1239 if f in changes[3]: # missing
1231 1240 fail(f, _('file not found!'))
1232 1241 if f in vdirs: # visited directory
1233 1242 d = f + '/'
1234 1243 for mf in matched:
1235 1244 if mf.startswith(d):
1236 1245 break
1237 1246 else:
1238 1247 fail(f, _("no match under directory!"))
1239 1248 elif f not in self.dirstate:
1240 1249 fail(f, _("file not tracked!"))
1241 1250
1242 1251 cctx = context.workingctx(self, text, user, date, extra, changes)
1243 1252
1244 1253 if (not force and not extra.get("close") and not merge
1245 1254 and not cctx.files()
1246 1255 and wctx.branch() == wctx.p1().branch()):
1247 1256 return None
1248 1257
1249 1258 if merge and cctx.deleted():
1250 1259 raise util.Abort(_("cannot commit merge with missing files"))
1251 1260
1252 1261 ms = mergemod.mergestate(self)
1253 1262 for f in changes[0]:
1254 1263 if f in ms and ms[f] == 'u':
1255 1264 raise util.Abort(_("unresolved merge conflicts "
1256 1265 "(see hg help resolve)"))
1257 1266
1258 1267 if editor:
1259 1268 cctx._text = editor(self, cctx, subs)
1260 1269 edited = (text != cctx._text)
1261 1270
1262 1271 # commit subs and write new state
1263 1272 if subs:
1264 1273 for s in sorted(commitsubs):
1265 1274 sub = wctx.sub(s)
1266 1275 self.ui.status(_('committing subrepository %s\n') %
1267 1276 subrepo.subrelpath(sub))
1268 1277 sr = sub.commit(cctx._text, user, date)
1269 1278 newstate[s] = (newstate[s][0], sr)
1270 1279 subrepo.writestate(self, newstate)
1271 1280
1272 1281 # Save commit message in case this transaction gets rolled back
1273 1282 # (e.g. by a pretxncommit hook). Leave the content alone on
1274 1283 # the assumption that the user will use the same editor again.
1275 1284 msgfn = self.savecommitmessage(cctx._text)
1276 1285
1277 1286 p1, p2 = self.dirstate.parents()
1278 1287 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1279 1288 try:
1280 1289 self.hook("precommit", throw=True, parent1=hookp1,
1281 1290 parent2=hookp2)
1282 1291 ret = self.commitctx(cctx, True)
1283 1292 except: # re-raises
1284 1293 if edited:
1285 1294 self.ui.write(
1286 1295 _('note: commit message saved in %s\n') % msgfn)
1287 1296 raise
1288 1297
1289 1298 # update bookmarks, dirstate and mergestate
1290 1299 bookmarks.update(self, [p1, p2], ret)
1291 1300 cctx.markcommitted(ret)
1292 1301 ms.reset()
1293 1302 finally:
1294 1303 wlock.release()
1295 1304
1296 1305 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1297 1306 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1298 1307 self._afterlock(commithook)
1299 1308 return ret
1300 1309
1301 1310 @unfilteredmethod
1302 1311 def commitctx(self, ctx, error=False):
1303 1312 """Add a new revision to current repository.
1304 1313 Revision information is passed via the context argument.
1305 1314 """
1306 1315
1307 1316 tr = lock = None
1308 1317 removed = list(ctx.removed())
1309 1318 p1, p2 = ctx.p1(), ctx.p2()
1310 1319 user = ctx.user()
1311 1320
1312 1321 lock = self.lock()
1313 1322 try:
1314 1323 tr = self.transaction("commit")
1315 1324 trp = weakref.proxy(tr)
1316 1325
1317 1326 if ctx.files():
1318 1327 m1 = p1.manifest().copy()
1319 1328 m2 = p2.manifest()
1320 1329
1321 1330 # check in files
1322 1331 new = {}
1323 1332 changed = []
1324 1333 linkrev = len(self)
1325 1334 for f in sorted(ctx.modified() + ctx.added()):
1326 1335 self.ui.note(f + "\n")
1327 1336 try:
1328 1337 fctx = ctx[f]
1329 1338 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1330 1339 changed)
1331 1340 m1.set(f, fctx.flags())
1332 1341 except OSError, inst:
1333 1342 self.ui.warn(_("trouble committing %s!\n") % f)
1334 1343 raise
1335 1344 except IOError, inst:
1336 1345 errcode = getattr(inst, 'errno', errno.ENOENT)
1337 1346 if error or errcode and errcode != errno.ENOENT:
1338 1347 self.ui.warn(_("trouble committing %s!\n") % f)
1339 1348 raise
1340 1349 else:
1341 1350 removed.append(f)
1342 1351
1343 1352 # update manifest
1344 1353 m1.update(new)
1345 1354 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1346 1355 drop = [f for f in removed if f in m1]
1347 1356 for f in drop:
1348 1357 del m1[f]
1349 1358 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1350 1359 p2.manifestnode(), (new, drop))
1351 1360 files = changed + removed
1352 1361 else:
1353 1362 mn = p1.manifestnode()
1354 1363 files = []
1355 1364
1356 1365 # update changelog
1357 1366 self.changelog.delayupdate()
1358 1367 n = self.changelog.add(mn, files, ctx.description(),
1359 1368 trp, p1.node(), p2.node(),
1360 1369 user, ctx.date(), ctx.extra().copy())
1361 1370 p = lambda: self.changelog.writepending() and self.root or ""
1362 1371 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1363 1372 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1364 1373 parent2=xp2, pending=p)
1365 1374 self.changelog.finalize(trp)
1366 1375 # set the new commit is proper phase
1367 1376 targetphase = phases.newcommitphase(self.ui)
1368 1377 if targetphase:
1369 1378 # retract boundary do not alter parent changeset.
1370 1379 # if a parent have higher the resulting phase will
1371 1380 # be compliant anyway
1372 1381 #
1373 1382 # if minimal phase was 0 we don't need to retract anything
1374 1383 phases.retractboundary(self, targetphase, [n])
1375 1384 tr.close()
1376 1385 branchmap.updatecache(self.filtered('served'))
1377 1386 return n
1378 1387 finally:
1379 1388 if tr:
1380 1389 tr.release()
1381 1390 lock.release()
1382 1391
1383 1392 @unfilteredmethod
1384 1393 def destroying(self):
1385 1394 '''Inform the repository that nodes are about to be destroyed.
1386 1395 Intended for use by strip and rollback, so there's a common
1387 1396 place for anything that has to be done before destroying history.
1388 1397
1389 1398 This is mostly useful for saving state that is in memory and waiting
1390 1399 to be flushed when the current lock is released. Because a call to
1391 1400 destroyed is imminent, the repo will be invalidated causing those
1392 1401 changes to stay in memory (waiting for the next unlock), or vanish
1393 1402 completely.
1394 1403 '''
1395 1404 # When using the same lock to commit and strip, the phasecache is left
1396 1405 # dirty after committing. Then when we strip, the repo is invalidated,
1397 1406 # causing those changes to disappear.
1398 1407 if '_phasecache' in vars(self):
1399 1408 self._phasecache.write()
1400 1409
1401 1410 @unfilteredmethod
1402 1411 def destroyed(self):
1403 1412 '''Inform the repository that nodes have been destroyed.
1404 1413 Intended for use by strip and rollback, so there's a common
1405 1414 place for anything that has to be done after destroying history.
1406 1415 '''
1407 1416 # When one tries to:
1408 1417 # 1) destroy nodes thus calling this method (e.g. strip)
1409 1418 # 2) use phasecache somewhere (e.g. commit)
1410 1419 #
1411 1420 # then 2) will fail because the phasecache contains nodes that were
1412 1421 # removed. We can either remove phasecache from the filecache,
1413 1422 # causing it to reload next time it is accessed, or simply filter
1414 1423 # the removed nodes now and write the updated cache.
1415 1424 self._phasecache.filterunknown(self)
1416 1425 self._phasecache.write()
1417 1426
1418 1427 # update the 'served' branch cache to help read only server process
1419 1428 # Thanks to branchcache collaboration this is done from the nearest
1420 1429 # filtered subset and it is expected to be fast.
1421 1430 branchmap.updatecache(self.filtered('served'))
1422 1431
1423 1432 # Ensure the persistent tag cache is updated. Doing it now
1424 1433 # means that the tag cache only has to worry about destroyed
1425 1434 # heads immediately after a strip/rollback. That in turn
1426 1435 # guarantees that "cachetip == currenttip" (comparing both rev
1427 1436 # and node) always means no nodes have been added or destroyed.
1428 1437
1429 1438 # XXX this is suboptimal when qrefresh'ing: we strip the current
1430 1439 # head, refresh the tag cache, then immediately add a new head.
1431 1440 # But I think doing it this way is necessary for the "instant
1432 1441 # tag cache retrieval" case to work.
1433 1442 self.invalidate()
1434 1443
1435 1444 def walk(self, match, node=None):
1436 1445 '''
1437 1446 walk recursively through the directory tree or a given
1438 1447 changeset, finding all files matched by the match
1439 1448 function
1440 1449 '''
1441 1450 return self[node].walk(match)
1442 1451
1443 1452 def status(self, node1='.', node2=None, match=None,
1444 1453 ignored=False, clean=False, unknown=False,
1445 1454 listsubrepos=False):
1446 1455 """return status of files between two nodes or node and working
1447 1456 directory.
1448 1457
1449 1458 If node1 is None, use the first dirstate parent instead.
1450 1459 If node2 is None, compare node1 with working directory.
1451 1460 """
1452 1461
1453 1462 def mfmatches(ctx):
1454 1463 mf = ctx.manifest().copy()
1455 1464 if match.always():
1456 1465 return mf
1457 1466 for fn in mf.keys():
1458 1467 if not match(fn):
1459 1468 del mf[fn]
1460 1469 return mf
1461 1470
1462 1471 ctx1 = self[node1]
1463 1472 ctx2 = self[node2]
1464 1473
1465 1474 working = ctx2.rev() is None
1466 1475 parentworking = working and ctx1 == self['.']
1467 1476 match = match or matchmod.always(self.root, self.getcwd())
1468 1477 listignored, listclean, listunknown = ignored, clean, unknown
1469 1478
1470 1479 # load earliest manifest first for caching reasons
1471 1480 if not working and ctx2.rev() < ctx1.rev():
1472 1481 ctx2.manifest()
1473 1482
1474 1483 if not parentworking:
1475 1484 def bad(f, msg):
1476 1485 # 'f' may be a directory pattern from 'match.files()',
1477 1486 # so 'f not in ctx1' is not enough
1478 1487 if f not in ctx1 and f not in ctx1.dirs():
1479 1488 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1480 1489 match.bad = bad
1481 1490
1482 1491 if working: # we need to scan the working dir
1483 1492 subrepos = []
1484 1493 if '.hgsub' in self.dirstate:
1485 1494 subrepos = sorted(ctx2.substate)
1486 1495 s = self.dirstate.status(match, subrepos, listignored,
1487 1496 listclean, listunknown)
1488 1497 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1489 1498
1490 1499 # check for any possibly clean files
1491 1500 if parentworking and cmp:
1492 1501 fixup = []
1493 1502 # do a full compare of any files that might have changed
1494 1503 for f in sorted(cmp):
1495 1504 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1496 1505 or ctx1[f].cmp(ctx2[f])):
1497 1506 modified.append(f)
1498 1507 else:
1499 1508 fixup.append(f)
1500 1509
1501 1510 # update dirstate for files that are actually clean
1502 1511 if fixup:
1503 1512 if listclean:
1504 1513 clean += fixup
1505 1514
1506 1515 try:
1507 1516 # updating the dirstate is optional
1508 1517 # so we don't wait on the lock
1509 1518 wlock = self.wlock(False)
1510 1519 try:
1511 1520 for f in fixup:
1512 1521 self.dirstate.normal(f)
1513 1522 finally:
1514 1523 wlock.release()
1515 1524 except error.LockError:
1516 1525 pass
1517 1526
1518 1527 if not parentworking:
1519 1528 mf1 = mfmatches(ctx1)
1520 1529 if working:
1521 1530 # we are comparing working dir against non-parent
1522 1531 # generate a pseudo-manifest for the working dir
1523 1532 mf2 = mfmatches(self['.'])
1524 1533 for f in cmp + modified + added:
1525 1534 mf2[f] = None
1526 1535 mf2.set(f, ctx2.flags(f))
1527 1536 for f in removed:
1528 1537 if f in mf2:
1529 1538 del mf2[f]
1530 1539 else:
1531 1540 # we are comparing two revisions
1532 1541 deleted, unknown, ignored = [], [], []
1533 1542 mf2 = mfmatches(ctx2)
1534 1543
1535 1544 modified, added, clean = [], [], []
1536 1545 withflags = mf1.withflags() | mf2.withflags()
1537 1546 for fn, mf2node in mf2.iteritems():
1538 1547 if fn in mf1:
1539 1548 if (fn not in deleted and
1540 1549 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1541 1550 (mf1[fn] != mf2node and
1542 1551 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1543 1552 modified.append(fn)
1544 1553 elif listclean:
1545 1554 clean.append(fn)
1546 1555 del mf1[fn]
1547 1556 elif fn not in deleted:
1548 1557 added.append(fn)
1549 1558 removed = mf1.keys()
1550 1559
1551 1560 if working and modified and not self.dirstate._checklink:
1552 1561 # Symlink placeholders may get non-symlink-like contents
1553 1562 # via user error or dereferencing by NFS or Samba servers,
1554 1563 # so we filter out any placeholders that don't look like a
1555 1564 # symlink
1556 1565 sane = []
1557 1566 for f in modified:
1558 1567 if ctx2.flags(f) == 'l':
1559 1568 d = ctx2[f].data()
1560 1569 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1561 1570 self.ui.debug('ignoring suspect symlink placeholder'
1562 1571 ' "%s"\n' % f)
1563 1572 continue
1564 1573 sane.append(f)
1565 1574 modified = sane
1566 1575
1567 1576 r = modified, added, removed, deleted, unknown, ignored, clean
1568 1577
1569 1578 if listsubrepos:
1570 1579 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1571 1580 if working:
1572 1581 rev2 = None
1573 1582 else:
1574 1583 rev2 = ctx2.substate[subpath][1]
1575 1584 try:
1576 1585 submatch = matchmod.narrowmatcher(subpath, match)
1577 1586 s = sub.status(rev2, match=submatch, ignored=listignored,
1578 1587 clean=listclean, unknown=listunknown,
1579 1588 listsubrepos=True)
1580 1589 for rfiles, sfiles in zip(r, s):
1581 1590 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1582 1591 except error.LookupError:
1583 1592 self.ui.status(_("skipping missing subrepository: %s\n")
1584 1593 % subpath)
1585 1594
1586 1595 for l in r:
1587 1596 l.sort()
1588 1597 return r
1589 1598
1590 1599 def heads(self, start=None):
1591 1600 heads = self.changelog.heads(start)
1592 1601 # sort the output in rev descending order
1593 1602 return sorted(heads, key=self.changelog.rev, reverse=True)
1594 1603
1595 1604 def branchheads(self, branch=None, start=None, closed=False):
1596 1605 '''return a (possibly filtered) list of heads for the given branch
1597 1606
1598 1607 Heads are returned in topological order, from newest to oldest.
1599 1608 If branch is None, use the dirstate branch.
1600 1609 If start is not None, return only heads reachable from start.
1601 1610 If closed is True, return heads that are marked as closed as well.
1602 1611 '''
1603 1612 if branch is None:
1604 1613 branch = self[None].branch()
1605 1614 branches = self.branchmap()
1606 1615 if branch not in branches:
1607 1616 return []
1608 1617 # the cache returns heads ordered lowest to highest
1609 1618 bheads = list(reversed(branches[branch]))
1610 1619 if start is not None:
1611 1620 # filter out the heads that cannot be reached from startrev
1612 1621 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1613 1622 bheads = [h for h in bheads if h in fbheads]
1614 1623 if not closed:
1615 1624 bheads = [h for h in bheads if not self[h].closesbranch()]
1616 1625 return bheads
1617 1626
1618 1627 def branches(self, nodes):
1619 1628 if not nodes:
1620 1629 nodes = [self.changelog.tip()]
1621 1630 b = []
1622 1631 for n in nodes:
1623 1632 t = n
1624 1633 while True:
1625 1634 p = self.changelog.parents(n)
1626 1635 if p[1] != nullid or p[0] == nullid:
1627 1636 b.append((t, n, p[0], p[1]))
1628 1637 break
1629 1638 n = p[0]
1630 1639 return b
1631 1640
1632 1641 def between(self, pairs):
1633 1642 r = []
1634 1643
1635 1644 for top, bottom in pairs:
1636 1645 n, l, i = top, [], 0
1637 1646 f = 1
1638 1647
1639 1648 while n != bottom and n != nullid:
1640 1649 p = self.changelog.parents(n)[0]
1641 1650 if i == f:
1642 1651 l.append(n)
1643 1652 f = f * 2
1644 1653 n = p
1645 1654 i += 1
1646 1655
1647 1656 r.append(l)
1648 1657
1649 1658 return r
1650 1659
1651 1660 def pull(self, remote, heads=None, force=False):
1661 if remote.local():
1662 missing = set(remote.requirements) - self.supported
1663 if missing:
1664 msg = _("required features are not"
1665 " supported in the destination:"
1666 " %s") % (', '.join(sorted(missing)))
1667 raise util.Abort(msg)
1668
1652 1669 # don't open transaction for nothing or you break future useful
1653 1670 # rollback call
1654 1671 tr = None
1655 1672 trname = 'pull\n' + util.hidepassword(remote.url())
1656 1673 lock = self.lock()
1657 1674 try:
1658 1675 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1659 1676 force=force)
1660 1677 common, fetch, rheads = tmp
1661 1678 if not fetch:
1662 1679 self.ui.status(_("no changes found\n"))
1663 1680 added = []
1664 1681 result = 0
1665 1682 else:
1666 1683 tr = self.transaction(trname)
1667 1684 if heads is None and list(common) == [nullid]:
1668 1685 self.ui.status(_("requesting all changes\n"))
1669 1686 elif heads is None and remote.capable('changegroupsubset'):
1670 1687 # issue1320, avoid a race if remote changed after discovery
1671 1688 heads = rheads
1672 1689
1673 1690 if remote.capable('getbundle'):
1674 1691 # TODO: get bundlecaps from remote
1675 1692 cg = remote.getbundle('pull', common=common,
1676 1693 heads=heads or rheads)
1677 1694 elif heads is None:
1678 1695 cg = remote.changegroup(fetch, 'pull')
1679 1696 elif not remote.capable('changegroupsubset'):
1680 1697 raise util.Abort(_("partial pull cannot be done because "
1681 1698 "other repository doesn't support "
1682 1699 "changegroupsubset."))
1683 1700 else:
1684 1701 cg = remote.changegroupsubset(fetch, heads, 'pull')
1685 1702 # we use unfiltered changelog here because hidden revision must
1686 1703 # be taken in account for phase synchronization. They may
1687 1704 # becomes public and becomes visible again.
1688 1705 cl = self.unfiltered().changelog
1689 1706 clstart = len(cl)
1690 1707 result = self.addchangegroup(cg, 'pull', remote.url())
1691 1708 clend = len(cl)
1692 1709 added = [cl.node(r) for r in xrange(clstart, clend)]
1693 1710
1694 1711 # compute target subset
1695 1712 if heads is None:
1696 1713 # We pulled every thing possible
1697 1714 # sync on everything common
1698 1715 subset = common + added
1699 1716 else:
1700 1717 # We pulled a specific subset
1701 1718 # sync on this subset
1702 1719 subset = heads
1703 1720
1704 1721 # Get remote phases data from remote
1705 1722 remotephases = remote.listkeys('phases')
1706 1723 publishing = bool(remotephases.get('publishing', False))
1707 1724 if remotephases and not publishing:
1708 1725 # remote is new and unpublishing
1709 1726 pheads, _dr = phases.analyzeremotephases(self, subset,
1710 1727 remotephases)
1711 1728 phases.advanceboundary(self, phases.public, pheads)
1712 1729 phases.advanceboundary(self, phases.draft, subset)
1713 1730 else:
1714 1731 # Remote is old or publishing all common changesets
1715 1732 # should be seen as public
1716 1733 phases.advanceboundary(self, phases.public, subset)
1717 1734
1718 1735 def gettransaction():
1719 1736 if tr is None:
1720 1737 return self.transaction(trname)
1721 1738 return tr
1722 1739
1723 1740 obstr = obsolete.syncpull(self, remote, gettransaction)
1724 1741 if obstr is not None:
1725 1742 tr = obstr
1726 1743
1727 1744 if tr is not None:
1728 1745 tr.close()
1729 1746 finally:
1730 1747 if tr is not None:
1731 1748 tr.release()
1732 1749 lock.release()
1733 1750
1734 1751 return result
1735 1752
1736 1753 def checkpush(self, force, revs):
1737 1754 """Extensions can override this function if additional checks have
1738 1755 to be performed before pushing, or call it if they override push
1739 1756 command.
1740 1757 """
1741 1758 pass
1742 1759
1743 1760 def push(self, remote, force=False, revs=None, newbranch=False):
1744 1761 '''Push outgoing changesets (limited by revs) from the current
1745 1762 repository to remote. Return an integer:
1746 1763 - None means nothing to push
1747 1764 - 0 means HTTP error
1748 1765 - 1 means we pushed and remote head count is unchanged *or*
1749 1766 we have outgoing changesets but refused to push
1750 1767 - other values as described by addchangegroup()
1751 1768 '''
1769 if remote.local():
1770 missing = set(self.requirements) - remote.local().supported
1771 if missing:
1772 msg = _("required features are not"
1773 " supported in the destination:"
1774 " %s") % (', '.join(sorted(missing)))
1775 raise util.Abort(msg)
1776
1752 1777 # there are two ways to push to remote repo:
1753 1778 #
1754 1779 # addchangegroup assumes local user can lock remote
1755 1780 # repo (local filesystem, old ssh servers).
1756 1781 #
1757 1782 # unbundle assumes local user cannot lock remote repo (new ssh
1758 1783 # servers, http servers).
1759 1784
1760 1785 if not remote.canpush():
1761 1786 raise util.Abort(_("destination does not support push"))
1762 1787 unfi = self.unfiltered()
1763 1788 def localphasemove(nodes, phase=phases.public):
1764 1789 """move <nodes> to <phase> in the local source repo"""
1765 1790 if locallock is not None:
1766 1791 phases.advanceboundary(self, phase, nodes)
1767 1792 else:
1768 1793 # repo is not locked, do not change any phases!
1769 1794 # Informs the user that phases should have been moved when
1770 1795 # applicable.
1771 1796 actualmoves = [n for n in nodes if phase < self[n].phase()]
1772 1797 phasestr = phases.phasenames[phase]
1773 1798 if actualmoves:
1774 1799 self.ui.status(_('cannot lock source repo, skipping local'
1775 1800 ' %s phase update\n') % phasestr)
1776 1801 # get local lock as we might write phase data
1777 1802 locallock = None
1778 1803 try:
1779 1804 locallock = self.lock()
1780 1805 except IOError, err:
1781 1806 if err.errno != errno.EACCES:
1782 1807 raise
1783 1808 # source repo cannot be locked.
1784 1809 # We do not abort the push, but just disable the local phase
1785 1810 # synchronisation.
1786 1811 msg = 'cannot lock source repository: %s\n' % err
1787 1812 self.ui.debug(msg)
1788 1813 try:
1789 1814 self.checkpush(force, revs)
1790 1815 lock = None
1791 1816 unbundle = remote.capable('unbundle')
1792 1817 if not unbundle:
1793 1818 lock = remote.lock()
1794 1819 try:
1795 1820 # discovery
1796 1821 fci = discovery.findcommonincoming
1797 1822 commoninc = fci(unfi, remote, force=force)
1798 1823 common, inc, remoteheads = commoninc
1799 1824 fco = discovery.findcommonoutgoing
1800 1825 outgoing = fco(unfi, remote, onlyheads=revs,
1801 1826 commoninc=commoninc, force=force)
1802 1827
1803 1828
1804 1829 if not outgoing.missing:
1805 1830 # nothing to push
1806 1831 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1807 1832 ret = None
1808 1833 else:
1809 1834 # something to push
1810 1835 if not force:
1811 1836 # if self.obsstore == False --> no obsolete
1812 1837 # then, save the iteration
1813 1838 if unfi.obsstore:
1814 1839 # this message are here for 80 char limit reason
1815 1840 mso = _("push includes obsolete changeset: %s!")
1816 1841 mst = "push includes %s changeset: %s!"
1817 1842 # plain versions for i18n tool to detect them
1818 1843 _("push includes unstable changeset: %s!")
1819 1844 _("push includes bumped changeset: %s!")
1820 1845 _("push includes divergent changeset: %s!")
1821 1846 # If we are to push if there is at least one
1822 1847 # obsolete or unstable changeset in missing, at
1823 1848 # least one of the missinghead will be obsolete or
1824 1849 # unstable. So checking heads only is ok
1825 1850 for node in outgoing.missingheads:
1826 1851 ctx = unfi[node]
1827 1852 if ctx.obsolete():
1828 1853 raise util.Abort(mso % ctx)
1829 1854 elif ctx.troubled():
1830 1855 raise util.Abort(_(mst)
1831 1856 % (ctx.troubles()[0],
1832 1857 ctx))
1833 1858 discovery.checkheads(unfi, remote, outgoing,
1834 1859 remoteheads, newbranch,
1835 1860 bool(inc))
1836 1861
1837 1862 # TODO: get bundlecaps from remote
1838 1863 bundlecaps = None
1839 1864 # create a changegroup from local
1840 1865 if revs is None and not outgoing.excluded:
1841 1866 # push everything,
1842 1867 # use the fast path, no race possible on push
1843 1868 bundler = changegroup.bundle10(self, bundlecaps)
1844 1869 cg = self._changegroupsubset(outgoing,
1845 1870 bundler,
1846 1871 'push',
1847 1872 fastpath=True)
1848 1873 else:
1849 1874 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1850 1875
1851 1876 # apply changegroup to remote
1852 1877 if unbundle:
1853 1878 # local repo finds heads on server, finds out what
1854 1879 # revs it must push. once revs transferred, if server
1855 1880 # finds it has different heads (someone else won
1856 1881 # commit/push race), server aborts.
1857 1882 if force:
1858 1883 remoteheads = ['force']
1859 1884 # ssh: return remote's addchangegroup()
1860 1885 # http: return remote's addchangegroup() or 0 for error
1861 1886 ret = remote.unbundle(cg, remoteheads, 'push')
1862 1887 else:
1863 1888 # we return an integer indicating remote head count
1864 1889 # change
1865 1890 ret = remote.addchangegroup(cg, 'push', self.url())
1866 1891
1867 1892 if ret:
1868 1893 # push succeed, synchronize target of the push
1869 1894 cheads = outgoing.missingheads
1870 1895 elif revs is None:
1871 1896 # All out push fails. synchronize all common
1872 1897 cheads = outgoing.commonheads
1873 1898 else:
1874 1899 # I want cheads = heads(::missingheads and ::commonheads)
1875 1900 # (missingheads is revs with secret changeset filtered out)
1876 1901 #
1877 1902 # This can be expressed as:
1878 1903 # cheads = ( (missingheads and ::commonheads)
1879 1904 # + (commonheads and ::missingheads))"
1880 1905 # )
1881 1906 #
1882 1907 # while trying to push we already computed the following:
1883 1908 # common = (::commonheads)
1884 1909 # missing = ((commonheads::missingheads) - commonheads)
1885 1910 #
1886 1911 # We can pick:
1887 1912 # * missingheads part of common (::commonheads)
1888 1913 common = set(outgoing.common)
1889 1914 cheads = [node for node in revs if node in common]
1890 1915 # and
1891 1916 # * commonheads parents on missing
1892 1917 revset = unfi.set('%ln and parents(roots(%ln))',
1893 1918 outgoing.commonheads,
1894 1919 outgoing.missing)
1895 1920 cheads.extend(c.node() for c in revset)
1896 1921 # even when we don't push, exchanging phase data is useful
1897 1922 remotephases = remote.listkeys('phases')
1898 1923 if (self.ui.configbool('ui', '_usedassubrepo', False)
1899 1924 and remotephases # server supports phases
1900 1925 and ret is None # nothing was pushed
1901 1926 and remotephases.get('publishing', False)):
1902 1927 # When:
1903 1928 # - this is a subrepo push
1904 1929 # - and remote support phase
1905 1930 # - and no changeset was pushed
1906 1931 # - and remote is publishing
1907 1932 # We may be in issue 3871 case!
1908 1933 # We drop the possible phase synchronisation done by
1909 1934 # courtesy to publish changesets possibly locally draft
1910 1935 # on the remote.
1911 1936 remotephases = {'publishing': 'True'}
1912 1937 if not remotephases: # old server or public only repo
1913 1938 localphasemove(cheads)
1914 1939 # don't push any phase data as there is nothing to push
1915 1940 else:
1916 1941 ana = phases.analyzeremotephases(self, cheads, remotephases)
1917 1942 pheads, droots = ana
1918 1943 ### Apply remote phase on local
1919 1944 if remotephases.get('publishing', False):
1920 1945 localphasemove(cheads)
1921 1946 else: # publish = False
1922 1947 localphasemove(pheads)
1923 1948 localphasemove(cheads, phases.draft)
1924 1949 ### Apply local phase on remote
1925 1950
1926 1951 # Get the list of all revs draft on remote by public here.
1927 1952 # XXX Beware that revset break if droots is not strictly
1928 1953 # XXX root we may want to ensure it is but it is costly
1929 1954 outdated = unfi.set('heads((%ln::%ln) and public())',
1930 1955 droots, cheads)
1931 1956 for newremotehead in outdated:
1932 1957 r = remote.pushkey('phases',
1933 1958 newremotehead.hex(),
1934 1959 str(phases.draft),
1935 1960 str(phases.public))
1936 1961 if not r:
1937 1962 self.ui.warn(_('updating %s to public failed!\n')
1938 1963 % newremotehead)
1939 1964 self.ui.debug('try to push obsolete markers to remote\n')
1940 1965 obsolete.syncpush(self, remote)
1941 1966 finally:
1942 1967 if lock is not None:
1943 1968 lock.release()
1944 1969 finally:
1945 1970 if locallock is not None:
1946 1971 locallock.release()
1947 1972
1948 1973 self.ui.debug("checking for updated bookmarks\n")
1949 1974 rb = remote.listkeys('bookmarks')
1950 1975 revnums = map(unfi.changelog.rev, revs or [])
1951 1976 ancestors = [
1952 1977 a for a in unfi.changelog.ancestors(revnums, inclusive=True)]
1953 1978 for k in rb.keys():
1954 1979 if k in unfi._bookmarks:
1955 1980 nr, nl = rb[k], hex(self._bookmarks[k])
1956 1981 if nr in unfi:
1957 1982 cr = unfi[nr]
1958 1983 cl = unfi[nl]
1959 1984 if bookmarks.validdest(unfi, cr, cl):
1960 1985 if ancestors and cl.rev() not in ancestors:
1961 1986 continue
1962 1987 r = remote.pushkey('bookmarks', k, nr, nl)
1963 1988 if r:
1964 1989 self.ui.status(_("updating bookmark %s\n") % k)
1965 1990 else:
1966 1991 self.ui.warn(_('updating bookmark %s'
1967 1992 ' failed!\n') % k)
1968 1993
1969 1994 return ret
1970 1995
1971 1996 def changegroupinfo(self, nodes, source):
1972 1997 if self.ui.verbose or source == 'bundle':
1973 1998 self.ui.status(_("%d changesets found\n") % len(nodes))
1974 1999 if self.ui.debugflag:
1975 2000 self.ui.debug("list of changesets:\n")
1976 2001 for node in nodes:
1977 2002 self.ui.debug("%s\n" % hex(node))
1978 2003
1979 2004 def changegroupsubset(self, bases, heads, source):
1980 2005 """Compute a changegroup consisting of all the nodes that are
1981 2006 descendants of any of the bases and ancestors of any of the heads.
1982 2007 Return a chunkbuffer object whose read() method will return
1983 2008 successive changegroup chunks.
1984 2009
1985 2010 It is fairly complex as determining which filenodes and which
1986 2011 manifest nodes need to be included for the changeset to be complete
1987 2012 is non-trivial.
1988 2013
1989 2014 Another wrinkle is doing the reverse, figuring out which changeset in
1990 2015 the changegroup a particular filenode or manifestnode belongs to.
1991 2016 """
1992 2017 cl = self.changelog
1993 2018 if not bases:
1994 2019 bases = [nullid]
1995 2020 # TODO: remove call to nodesbetween.
1996 2021 csets, bases, heads = cl.nodesbetween(bases, heads)
1997 2022 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
1998 2023 outgoing = discovery.outgoing(cl, bases, heads)
1999 2024 bundler = changegroup.bundle10(self)
2000 2025 return self._changegroupsubset(outgoing, bundler, source)
2001 2026
2002 2027 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2003 2028 """Like getbundle, but taking a discovery.outgoing as an argument.
2004 2029
2005 2030 This is only implemented for local repos and reuses potentially
2006 2031 precomputed sets in outgoing."""
2007 2032 if not outgoing.missing:
2008 2033 return None
2009 2034 bundler = changegroup.bundle10(self, bundlecaps)
2010 2035 return self._changegroupsubset(outgoing, bundler, source)
2011 2036
2012 2037 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2013 2038 """Like changegroupsubset, but returns the set difference between the
2014 2039 ancestors of heads and the ancestors common.
2015 2040
2016 2041 If heads is None, use the local heads. If common is None, use [nullid].
2017 2042
2018 2043 The nodes in common might not all be known locally due to the way the
2019 2044 current discovery protocol works.
2020 2045 """
2021 2046 cl = self.changelog
2022 2047 if common:
2023 2048 hasnode = cl.hasnode
2024 2049 common = [n for n in common if hasnode(n)]
2025 2050 else:
2026 2051 common = [nullid]
2027 2052 if not heads:
2028 2053 heads = cl.heads()
2029 2054 return self.getlocalbundle(source,
2030 2055 discovery.outgoing(cl, common, heads),
2031 2056 bundlecaps=bundlecaps)
2032 2057
2033 2058 @unfilteredmethod
2034 2059 def _changegroupsubset(self, outgoing, bundler, source,
2035 2060 fastpath=False):
2036 2061 commonrevs = outgoing.common
2037 2062 csets = outgoing.missing
2038 2063 heads = outgoing.missingheads
2039 2064 # We go through the fast path if we get told to, or if all (unfiltered
2040 2065 # heads have been requested (since we then know there all linkrevs will
2041 2066 # be pulled by the client).
2042 2067 heads.sort()
2043 2068 fastpathlinkrev = fastpath or (
2044 2069 self.filtername is None and heads == sorted(self.heads()))
2045 2070
2046 2071 self.hook('preoutgoing', throw=True, source=source)
2047 2072 self.changegroupinfo(csets, source)
2048 2073 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2049 2074 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2050 2075
2051 2076 def changegroup(self, basenodes, source):
2052 2077 # to avoid a race we use changegroupsubset() (issue1320)
2053 2078 return self.changegroupsubset(basenodes, self.heads(), source)
2054 2079
2055 2080 @unfilteredmethod
2056 2081 def addchangegroup(self, source, srctype, url, emptyok=False):
2057 2082 """Add the changegroup returned by source.read() to this repo.
2058 2083 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2059 2084 the URL of the repo where this changegroup is coming from.
2060 2085
2061 2086 Return an integer summarizing the change to this repo:
2062 2087 - nothing changed or no source: 0
2063 2088 - more heads than before: 1+added heads (2..n)
2064 2089 - fewer heads than before: -1-removed heads (-2..-n)
2065 2090 - number of heads stays the same: 1
2066 2091 """
2067 2092 def csmap(x):
2068 2093 self.ui.debug("add changeset %s\n" % short(x))
2069 2094 return len(cl)
2070 2095
2071 2096 def revmap(x):
2072 2097 return cl.rev(x)
2073 2098
2074 2099 if not source:
2075 2100 return 0
2076 2101
2077 2102 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2078 2103
2079 2104 changesets = files = revisions = 0
2080 2105 efiles = set()
2081 2106
2082 2107 # write changelog data to temp files so concurrent readers will not see
2083 2108 # inconsistent view
2084 2109 cl = self.changelog
2085 2110 cl.delayupdate()
2086 2111 oldheads = cl.heads()
2087 2112
2088 2113 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2089 2114 try:
2090 2115 trp = weakref.proxy(tr)
2091 2116 # pull off the changeset group
2092 2117 self.ui.status(_("adding changesets\n"))
2093 2118 clstart = len(cl)
2094 2119 class prog(object):
2095 2120 step = _('changesets')
2096 2121 count = 1
2097 2122 ui = self.ui
2098 2123 total = None
2099 2124 def __call__(self):
2100 2125 self.ui.progress(self.step, self.count, unit=_('chunks'),
2101 2126 total=self.total)
2102 2127 self.count += 1
2103 2128 pr = prog()
2104 2129 source.callback = pr
2105 2130
2106 2131 source.changelogheader()
2107 2132 srccontent = cl.addgroup(source, csmap, trp)
2108 2133 if not (srccontent or emptyok):
2109 2134 raise util.Abort(_("received changelog group is empty"))
2110 2135 clend = len(cl)
2111 2136 changesets = clend - clstart
2112 2137 for c in xrange(clstart, clend):
2113 2138 efiles.update(self[c].files())
2114 2139 efiles = len(efiles)
2115 2140 self.ui.progress(_('changesets'), None)
2116 2141
2117 2142 # pull off the manifest group
2118 2143 self.ui.status(_("adding manifests\n"))
2119 2144 pr.step = _('manifests')
2120 2145 pr.count = 1
2121 2146 pr.total = changesets # manifests <= changesets
2122 2147 # no need to check for empty manifest group here:
2123 2148 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2124 2149 # no new manifest will be created and the manifest group will
2125 2150 # be empty during the pull
2126 2151 source.manifestheader()
2127 2152 self.manifest.addgroup(source, revmap, trp)
2128 2153 self.ui.progress(_('manifests'), None)
2129 2154
2130 2155 needfiles = {}
2131 2156 if self.ui.configbool('server', 'validate', default=False):
2132 2157 # validate incoming csets have their manifests
2133 2158 for cset in xrange(clstart, clend):
2134 2159 mfest = self.changelog.read(self.changelog.node(cset))[0]
2135 2160 mfest = self.manifest.readdelta(mfest)
2136 2161 # store file nodes we must see
2137 2162 for f, n in mfest.iteritems():
2138 2163 needfiles.setdefault(f, set()).add(n)
2139 2164
2140 2165 # process the files
2141 2166 self.ui.status(_("adding file changes\n"))
2142 2167 pr.step = _('files')
2143 2168 pr.count = 1
2144 2169 pr.total = efiles
2145 2170 source.callback = None
2146 2171
2147 2172 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2148 2173 pr, needfiles)
2149 2174 revisions += newrevs
2150 2175 files += newfiles
2151 2176
2152 2177 dh = 0
2153 2178 if oldheads:
2154 2179 heads = cl.heads()
2155 2180 dh = len(heads) - len(oldheads)
2156 2181 for h in heads:
2157 2182 if h not in oldheads and self[h].closesbranch():
2158 2183 dh -= 1
2159 2184 htext = ""
2160 2185 if dh:
2161 2186 htext = _(" (%+d heads)") % dh
2162 2187
2163 2188 self.ui.status(_("added %d changesets"
2164 2189 " with %d changes to %d files%s\n")
2165 2190 % (changesets, revisions, files, htext))
2166 2191 self.invalidatevolatilesets()
2167 2192
2168 2193 if changesets > 0:
2169 2194 p = lambda: cl.writepending() and self.root or ""
2170 2195 self.hook('pretxnchangegroup', throw=True,
2171 2196 node=hex(cl.node(clstart)), source=srctype,
2172 2197 url=url, pending=p)
2173 2198
2174 2199 added = [cl.node(r) for r in xrange(clstart, clend)]
2175 2200 publishing = self.ui.configbool('phases', 'publish', True)
2176 2201 if srctype == 'push':
2177 2202 # Old server can not push the boundary themself.
2178 2203 # New server won't push the boundary if changeset already
2179 2204 # existed locally as secrete
2180 2205 #
2181 2206 # We should not use added here but the list of all change in
2182 2207 # the bundle
2183 2208 if publishing:
2184 2209 phases.advanceboundary(self, phases.public, srccontent)
2185 2210 else:
2186 2211 phases.advanceboundary(self, phases.draft, srccontent)
2187 2212 phases.retractboundary(self, phases.draft, added)
2188 2213 elif srctype != 'strip':
2189 2214 # publishing only alter behavior during push
2190 2215 #
2191 2216 # strip should not touch boundary at all
2192 2217 phases.retractboundary(self, phases.draft, added)
2193 2218
2194 2219 # make changelog see real files again
2195 2220 cl.finalize(trp)
2196 2221
2197 2222 tr.close()
2198 2223
2199 2224 if changesets > 0:
2200 2225 if srctype != 'strip':
2201 2226 # During strip, branchcache is invalid but coming call to
2202 2227 # `destroyed` will repair it.
2203 2228 # In other case we can safely update cache on disk.
2204 2229 branchmap.updatecache(self.filtered('served'))
2205 2230 def runhooks():
2206 2231 # forcefully update the on-disk branch cache
2207 2232 self.ui.debug("updating the branch cache\n")
2208 2233 self.hook("changegroup", node=hex(cl.node(clstart)),
2209 2234 source=srctype, url=url)
2210 2235
2211 2236 for n in added:
2212 2237 self.hook("incoming", node=hex(n), source=srctype,
2213 2238 url=url)
2214 2239
2215 2240 newheads = [h for h in self.heads() if h not in oldheads]
2216 2241 self.ui.log("incoming",
2217 2242 "%s incoming changes - new heads: %s\n",
2218 2243 len(added),
2219 2244 ', '.join([hex(c[:6]) for c in newheads]))
2220 2245 self._afterlock(runhooks)
2221 2246
2222 2247 finally:
2223 2248 tr.release()
2224 2249 # never return 0 here:
2225 2250 if dh < 0:
2226 2251 return dh - 1
2227 2252 else:
2228 2253 return dh + 1
2229 2254
2230 2255 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2231 2256 revisions = 0
2232 2257 files = 0
2233 2258 while True:
2234 2259 chunkdata = source.filelogheader()
2235 2260 if not chunkdata:
2236 2261 break
2237 2262 f = chunkdata["filename"]
2238 2263 self.ui.debug("adding %s revisions\n" % f)
2239 2264 pr()
2240 2265 fl = self.file(f)
2241 2266 o = len(fl)
2242 2267 if not fl.addgroup(source, revmap, trp):
2243 2268 raise util.Abort(_("received file revlog group is empty"))
2244 2269 revisions += len(fl) - o
2245 2270 files += 1
2246 2271 if f in needfiles:
2247 2272 needs = needfiles[f]
2248 2273 for new in xrange(o, len(fl)):
2249 2274 n = fl.node(new)
2250 2275 if n in needs:
2251 2276 needs.remove(n)
2252 2277 else:
2253 2278 raise util.Abort(
2254 2279 _("received spurious file revlog entry"))
2255 2280 if not needs:
2256 2281 del needfiles[f]
2257 2282 self.ui.progress(_('files'), None)
2258 2283
2259 2284 for f, needs in needfiles.iteritems():
2260 2285 fl = self.file(f)
2261 2286 for n in needs:
2262 2287 try:
2263 2288 fl.rev(n)
2264 2289 except error.LookupError:
2265 2290 raise util.Abort(
2266 2291 _('missing file data for %s:%s - run hg verify') %
2267 2292 (f, hex(n)))
2268 2293
2269 2294 return revisions, files
2270 2295
2271 2296 def stream_in(self, remote, requirements):
2272 2297 lock = self.lock()
2273 2298 try:
2274 2299 # Save remote branchmap. We will use it later
2275 2300 # to speed up branchcache creation
2276 2301 rbranchmap = None
2277 2302 if remote.capable("branchmap"):
2278 2303 rbranchmap = remote.branchmap()
2279 2304
2280 2305 fp = remote.stream_out()
2281 2306 l = fp.readline()
2282 2307 try:
2283 2308 resp = int(l)
2284 2309 except ValueError:
2285 2310 raise error.ResponseError(
2286 2311 _('unexpected response from remote server:'), l)
2287 2312 if resp == 1:
2288 2313 raise util.Abort(_('operation forbidden by server'))
2289 2314 elif resp == 2:
2290 2315 raise util.Abort(_('locking the remote repository failed'))
2291 2316 elif resp != 0:
2292 2317 raise util.Abort(_('the server sent an unknown error code'))
2293 2318 self.ui.status(_('streaming all changes\n'))
2294 2319 l = fp.readline()
2295 2320 try:
2296 2321 total_files, total_bytes = map(int, l.split(' ', 1))
2297 2322 except (ValueError, TypeError):
2298 2323 raise error.ResponseError(
2299 2324 _('unexpected response from remote server:'), l)
2300 2325 self.ui.status(_('%d files to transfer, %s of data\n') %
2301 2326 (total_files, util.bytecount(total_bytes)))
2302 2327 handled_bytes = 0
2303 2328 self.ui.progress(_('clone'), 0, total=total_bytes)
2304 2329 start = time.time()
2305 2330 for i in xrange(total_files):
2306 2331 # XXX doesn't support '\n' or '\r' in filenames
2307 2332 l = fp.readline()
2308 2333 try:
2309 2334 name, size = l.split('\0', 1)
2310 2335 size = int(size)
2311 2336 except (ValueError, TypeError):
2312 2337 raise error.ResponseError(
2313 2338 _('unexpected response from remote server:'), l)
2314 2339 if self.ui.debugflag:
2315 2340 self.ui.debug('adding %s (%s)\n' %
2316 2341 (name, util.bytecount(size)))
2317 2342 # for backwards compat, name was partially encoded
2318 2343 ofp = self.sopener(store.decodedir(name), 'w')
2319 2344 for chunk in util.filechunkiter(fp, limit=size):
2320 2345 handled_bytes += len(chunk)
2321 2346 self.ui.progress(_('clone'), handled_bytes,
2322 2347 total=total_bytes)
2323 2348 ofp.write(chunk)
2324 2349 ofp.close()
2325 2350 elapsed = time.time() - start
2326 2351 if elapsed <= 0:
2327 2352 elapsed = 0.001
2328 2353 self.ui.progress(_('clone'), None)
2329 2354 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2330 2355 (util.bytecount(total_bytes), elapsed,
2331 2356 util.bytecount(total_bytes / elapsed)))
2332 2357
2333 2358 # new requirements = old non-format requirements +
2334 2359 # new format-related
2335 2360 # requirements from the streamed-in repository
2336 2361 requirements.update(set(self.requirements) - self.supportedformats)
2337 2362 self._applyrequirements(requirements)
2338 2363 self._writerequirements()
2339 2364
2340 2365 if rbranchmap:
2341 2366 rbheads = []
2342 2367 for bheads in rbranchmap.itervalues():
2343 2368 rbheads.extend(bheads)
2344 2369
2345 2370 if rbheads:
2346 2371 rtiprev = max((int(self.changelog.rev(node))
2347 2372 for node in rbheads))
2348 2373 cache = branchmap.branchcache(rbranchmap,
2349 2374 self[rtiprev].node(),
2350 2375 rtiprev)
2351 2376 # Try to stick it as low as possible
2352 2377 # filter above served are unlikely to be fetch from a clone
2353 2378 for candidate in ('base', 'immutable', 'served'):
2354 2379 rview = self.filtered(candidate)
2355 2380 if cache.validfor(rview):
2356 2381 self._branchcaches[candidate] = cache
2357 2382 cache.write(rview)
2358 2383 break
2359 2384 self.invalidate()
2360 2385 return len(self.heads()) + 1
2361 2386 finally:
2362 2387 lock.release()
2363 2388
2364 2389 def clone(self, remote, heads=[], stream=False):
2365 2390 '''clone remote repository.
2366 2391
2367 2392 keyword arguments:
2368 2393 heads: list of revs to clone (forces use of pull)
2369 2394 stream: use streaming clone if possible'''
2370 2395
2371 2396 # now, all clients that can request uncompressed clones can
2372 2397 # read repo formats supported by all servers that can serve
2373 2398 # them.
2374 2399
2375 2400 # if revlog format changes, client will have to check version
2376 2401 # and format flags on "stream" capability, and use
2377 2402 # uncompressed only if compatible.
2378 2403
2379 2404 if not stream:
2380 2405 # if the server explicitly prefers to stream (for fast LANs)
2381 2406 stream = remote.capable('stream-preferred')
2382 2407
2383 2408 if stream and not heads:
2384 2409 # 'stream' means remote revlog format is revlogv1 only
2385 2410 if remote.capable('stream'):
2386 2411 return self.stream_in(remote, set(('revlogv1',)))
2387 2412 # otherwise, 'streamreqs' contains the remote revlog format
2388 2413 streamreqs = remote.capable('streamreqs')
2389 2414 if streamreqs:
2390 2415 streamreqs = set(streamreqs.split(','))
2391 2416 # if we support it, stream in and adjust our requirements
2392 2417 if not streamreqs - self.supportedformats:
2393 2418 return self.stream_in(remote, streamreqs)
2394 2419 return self.pull(remote, heads)
2395 2420
2396 2421 def pushkey(self, namespace, key, old, new):
2397 2422 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2398 2423 old=old, new=new)
2399 2424 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2400 2425 ret = pushkey.push(self, namespace, key, old, new)
2401 2426 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2402 2427 ret=ret)
2403 2428 return ret
2404 2429
2405 2430 def listkeys(self, namespace):
2406 2431 self.hook('prelistkeys', throw=True, namespace=namespace)
2407 2432 self.ui.debug('listing keys for "%s"\n' % namespace)
2408 2433 values = pushkey.list(self, namespace)
2409 2434 self.hook('listkeys', namespace=namespace, values=values)
2410 2435 return values
2411 2436
2412 2437 def debugwireargs(self, one, two, three=None, four=None, five=None):
2413 2438 '''used to test argument passing over the wire'''
2414 2439 return "%s %s %s %s %s" % (one, two, three, four, five)
2415 2440
2416 2441 def savecommitmessage(self, text):
2417 2442 fp = self.opener('last-message.txt', 'wb')
2418 2443 try:
2419 2444 fp.write(text)
2420 2445 finally:
2421 2446 fp.close()
2422 2447 return self.pathto(fp.name[len(self.root) + 1:])
2423 2448
2424 2449 # used to avoid circular references so destructors work
2425 2450 def aftertrans(files):
2426 2451 renamefiles = [tuple(t) for t in files]
2427 2452 def a():
2428 2453 for vfs, src, dest in renamefiles:
2429 2454 try:
2430 2455 vfs.rename(src, dest)
2431 2456 except OSError: # journal file does not yet exist
2432 2457 pass
2433 2458 return a
2434 2459
2435 2460 def undoname(fn):
2436 2461 base, name = os.path.split(fn)
2437 2462 assert name.startswith('journal')
2438 2463 return os.path.join(base, name.replace('journal', 'undo', 1))
2439 2464
2440 2465 def instance(ui, path, create):
2441 2466 return localrepository(ui, util.urllocalpath(path), create)
2442 2467
2443 2468 def islocal(path):
2444 2469 return True
@@ -1,159 +1,161 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from i18n import _
11 11 import changelog, byterange, url, error
12 12 import localrepo, manifest, util, scmutil, store
13 13 import urllib, urllib2, errno, os
14 14
15 15 class httprangereader(object):
16 16 def __init__(self, url, opener):
17 17 # we assume opener has HTTPRangeHandler
18 18 self.url = url
19 19 self.pos = 0
20 20 self.opener = opener
21 21 self.name = url
22 22 def seek(self, pos):
23 23 self.pos = pos
24 24 def read(self, bytes=None):
25 25 req = urllib2.Request(self.url)
26 26 end = ''
27 27 if bytes:
28 28 end = self.pos + bytes - 1
29 29 if self.pos or end:
30 30 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
31 31
32 32 try:
33 33 f = self.opener.open(req)
34 34 data = f.read()
35 35 # Python 2.6+ defines a getcode() function, and 2.4 and
36 36 # 2.5 appear to always have an undocumented code attribute
37 37 # set. If we can't read either of those, fall back to 206
38 38 # and hope for the best.
39 39 code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))()
40 40 except urllib2.HTTPError, inst:
41 41 num = inst.code == 404 and errno.ENOENT or None
42 42 raise IOError(num, inst)
43 43 except urllib2.URLError, inst:
44 44 raise IOError(None, inst.reason[1])
45 45
46 46 if code == 200:
47 47 # HTTPRangeHandler does nothing if remote does not support
48 48 # Range headers and returns the full entity. Let's slice it.
49 49 if bytes:
50 50 data = data[self.pos:self.pos + bytes]
51 51 else:
52 52 data = data[self.pos:]
53 53 elif bytes:
54 54 data = data[:bytes]
55 55 self.pos += len(data)
56 56 return data
57 57 def __iter__(self):
58 58 return iter(self.read().splitlines(1))
59 59 def close(self):
60 60 pass
61 61
62 62 def build_opener(ui, authinfo):
63 63 # urllib cannot handle URLs with embedded user or passwd
64 64 urlopener = url.opener(ui, authinfo)
65 65 urlopener.add_handler(byterange.HTTPRangeHandler())
66 66
67 67 class statichttpvfs(scmutil.abstractvfs):
68 68 def __init__(self, base):
69 69 self.base = base
70 70
71 71 def __call__(self, path, mode="r", atomictemp=None):
72 72 if mode not in ('r', 'rb'):
73 73 raise IOError('Permission denied')
74 74 f = "/".join((self.base, urllib.quote(path)))
75 75 return httprangereader(f, urlopener)
76 76
77 77 def join(self, path):
78 78 if path:
79 79 return os.path.join(self.base, path)
80 80 else:
81 81 return self.base
82 82
83 83 return statichttpvfs
84 84
85 85 class statichttppeer(localrepo.localpeer):
86 86 def local(self):
87 87 return None
88 88 def canpush(self):
89 89 return False
90 90
91 91 class statichttprepository(localrepo.localrepository):
92 supported = localrepo.localrepository._basesupported
93
92 94 def __init__(self, ui, path):
93 95 self._url = path
94 96 self.ui = ui
95 97
96 98 self.root = path
97 99 u = util.url(path.rstrip('/') + "/.hg")
98 100 self.path, authinfo = u.authinfo()
99 101
100 102 opener = build_opener(ui, authinfo)
101 103 self.opener = opener(self.path)
102 104 self.vfs = self.opener
103 105 self._phasedefaults = []
104 106
105 107 try:
106 108 requirements = scmutil.readrequires(self.opener, self.supported)
107 109 except IOError, inst:
108 110 if inst.errno != errno.ENOENT:
109 111 raise
110 112 requirements = set()
111 113
112 114 # check if it is a non-empty old-style repository
113 115 try:
114 116 fp = self.opener("00changelog.i")
115 117 fp.read(1)
116 118 fp.close()
117 119 except IOError, inst:
118 120 if inst.errno != errno.ENOENT:
119 121 raise
120 122 # we do not care about empty old-style repositories here
121 123 msg = _("'%s' does not appear to be an hg repository") % path
122 124 raise error.RepoError(msg)
123 125
124 126 # setup store
125 127 self.store = store.store(requirements, self.path, opener)
126 128 self.spath = self.store.path
127 129 self.sopener = self.store.opener
128 130 self.svfs = self.sopener
129 131 self.sjoin = self.store.join
130 132 self._filecache = {}
131 133 self.requirements = requirements
132 134
133 135 self.manifest = manifest.manifest(self.sopener)
134 136 self.changelog = changelog.changelog(self.sopener)
135 137 self._tags = None
136 138 self.nodetagscache = None
137 139 self._branchcaches = {}
138 140 self.encodepats = None
139 141 self.decodepats = None
140 142
141 143 def _restrictcapabilities(self, caps):
142 144 return caps.difference(["pushkey"])
143 145
144 146 def url(self):
145 147 return self._url
146 148
147 149 def local(self):
148 150 return False
149 151
150 152 def peer(self):
151 153 return statichttppeer(self)
152 154
153 155 def lock(self, wait=True):
154 156 raise util.Abort(_('cannot lock static-http repository'))
155 157
156 158 def instance(ui, path, create):
157 159 if create:
158 160 raise util.Abort(_('cannot create new static-http repository'))
159 161 return statichttprepository(ui, path[7:])
@@ -1,19 +1,69 b''
1 1 $ hg init t
2 2 $ cd t
3 3 $ echo a > a
4 4 $ hg add a
5 5 $ hg commit -m test
6 6 $ rm .hg/requires
7 7 $ hg tip
8 8 abort: index 00changelog.i unknown format 2!
9 9 [255]
10 10 $ echo indoor-pool > .hg/requires
11 11 $ hg tip
12 12 abort: unknown repository format: requires features 'indoor-pool' (upgrade Mercurial)!
13 13 [255]
14 14 $ echo outdoor-pool >> .hg/requires
15 15 $ hg tip
16 16 abort: unknown repository format: requires features 'indoor-pool', 'outdoor-pool' (upgrade Mercurial)!
17 17 [255]
18 $ cd ..
19
20 Test checking between features supported locally and ones required in
21 another repository of push/pull/clone on localhost:
22
23 $ mkdir supported-locally
24 $ cd supported-locally
25
26 $ hg init supported
27 $ echo a > supported/a
28 $ hg -R supported commit -Am '#0 at supported'
29 adding a
30
31 $ echo 'featuresetup-test' >> supported/.hg/requires
32 $ cat > $TESTTMP/supported-locally/supportlocally.py <<EOF
33 > from mercurial import localrepo, extensions
34 > def featuresetup(ui, supported):
35 > for name, module in extensions.extensions(ui):
36 > if __name__ == module.__name__:
37 > # support specific feature locally
38 > supported |= set(['featuresetup-test'])
39 > return
40 > def uisetup(ui):
41 > localrepo.localrepository.featuresetupfuncs.add(featuresetup)
42 > EOF
43 $ cat > supported/.hg/hgrc <<EOF
44 > [extensions]
45 > # enable extension locally
46 > supportlocally = $TESTTMP/supported-locally/supportlocally.py
47 > EOF
48 $ hg -R supported status
49
50 $ hg init push-dst
51 $ hg -R supported push push-dst
52 pushing to push-dst
53 abort: required features are not supported in the destination: featuresetup-test
54 [255]
55
56 $ hg init pull-src
57 $ hg -R pull-src pull supported
58 pulling from supported
59 abort: required features are not supported in the destination: featuresetup-test
60 [255]
61
62 $ hg clone supported clone-dst
63 abort: unknown repository format: requires features 'featuresetup-test' (upgrade Mercurial)!
64 [255]
65 $ hg clone --pull supported clone-dst
66 abort: required features are not supported in the destination: featuresetup-test
67 [255]
18 68
19 69 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now