##// END OF EJS Templates
bundle-ng: move bundle generation to changegroup.py
Benoit Boissinot -
r19204:e9c5b1c2 default
parent child Browse files
Show More
@@ -1,354 +1,417 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import nullrev, hex
10 10 import mdiff, util, dagutil
11 11 import struct, os, bz2, zlib, tempfile
12 12
13 13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
14 14
15 15 def readexactly(stream, n):
16 16 '''read n bytes from stream.read and abort if less was available'''
17 17 s = stream.read(n)
18 18 if len(s) < n:
19 19 raise util.Abort(_("stream ended unexpectedly"
20 20 " (got %d bytes, expected %d)")
21 21 % (len(s), n))
22 22 return s
23 23
24 24 def getchunk(stream):
25 25 """return the next chunk from stream as a string"""
26 26 d = readexactly(stream, 4)
27 27 l = struct.unpack(">l", d)[0]
28 28 if l <= 4:
29 29 if l:
30 30 raise util.Abort(_("invalid chunk length %d") % l)
31 31 return ""
32 32 return readexactly(stream, l - 4)
33 33
34 34 def chunkheader(length):
35 35 """return a changegroup chunk header (string)"""
36 36 return struct.pack(">l", length + 4)
37 37
38 38 def closechunk():
39 39 """return a changegroup chunk header (string) for a zero-length chunk"""
40 40 return struct.pack(">l", 0)
41 41
42 42 class nocompress(object):
43 43 def compress(self, x):
44 44 return x
45 45 def flush(self):
46 46 return ""
47 47
48 48 bundletypes = {
49 49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
50 50 # since the unification ssh accepts a header but there
51 51 # is no capability signaling it.
52 52 "HG10UN": ("HG10UN", nocompress),
53 53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
54 54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
55 55 }
56 56
57 57 # hgweb uses this list to communicate its preferred type
58 58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
59 59
60 60 def writebundle(cg, filename, bundletype):
61 61 """Write a bundle file and return its filename.
62 62
63 63 Existing files will not be overwritten.
64 64 If no filename is specified, a temporary file is created.
65 65 bz2 compression can be turned off.
66 66 The bundle file will be deleted in case of errors.
67 67 """
68 68
69 69 fh = None
70 70 cleanup = None
71 71 try:
72 72 if filename:
73 73 fh = open(filename, "wb")
74 74 else:
75 75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
76 76 fh = os.fdopen(fd, "wb")
77 77 cleanup = filename
78 78
79 79 header, compressor = bundletypes[bundletype]
80 80 fh.write(header)
81 81 z = compressor()
82 82
83 83 # parse the changegroup data, otherwise we will block
84 84 # in case of sshrepo because we don't know the end of the stream
85 85
86 86 # an empty chunkgroup is the end of the changegroup
87 87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
88 88 # after that, an empty chunkgroup is the end of the changegroup
89 89 empty = False
90 90 count = 0
91 91 while not empty or count <= 2:
92 92 empty = True
93 93 count += 1
94 94 while True:
95 95 chunk = getchunk(cg)
96 96 if not chunk:
97 97 break
98 98 empty = False
99 99 fh.write(z.compress(chunkheader(len(chunk))))
100 100 pos = 0
101 101 while pos < len(chunk):
102 102 next = pos + 2**20
103 103 fh.write(z.compress(chunk[pos:next]))
104 104 pos = next
105 105 fh.write(z.compress(closechunk()))
106 106 fh.write(z.flush())
107 107 cleanup = None
108 108 return filename
109 109 finally:
110 110 if fh is not None:
111 111 fh.close()
112 112 if cleanup is not None:
113 113 os.unlink(cleanup)
114 114
115 115 def decompressor(fh, alg):
116 116 if alg == 'UN':
117 117 return fh
118 118 elif alg == 'GZ':
119 119 def generator(f):
120 120 zd = zlib.decompressobj()
121 121 for chunk in util.filechunkiter(f):
122 122 yield zd.decompress(chunk)
123 123 elif alg == 'BZ':
124 124 def generator(f):
125 125 zd = bz2.BZ2Decompressor()
126 126 zd.decompress("BZ")
127 127 for chunk in util.filechunkiter(f, 4096):
128 128 yield zd.decompress(chunk)
129 129 else:
130 130 raise util.Abort("unknown bundle compression '%s'" % alg)
131 131 return util.chunkbuffer(generator(fh))
132 132
133 133 class unbundle10(object):
134 134 deltaheader = _BUNDLE10_DELTA_HEADER
135 135 deltaheadersize = struct.calcsize(deltaheader)
136 136 def __init__(self, fh, alg):
137 137 self._stream = decompressor(fh, alg)
138 138 self._type = alg
139 139 self.callback = None
140 140 def compressed(self):
141 141 return self._type != 'UN'
142 142 def read(self, l):
143 143 return self._stream.read(l)
144 144 def seek(self, pos):
145 145 return self._stream.seek(pos)
146 146 def tell(self):
147 147 return self._stream.tell()
148 148 def close(self):
149 149 return self._stream.close()
150 150
151 151 def chunklength(self):
152 152 d = readexactly(self._stream, 4)
153 153 l = struct.unpack(">l", d)[0]
154 154 if l <= 4:
155 155 if l:
156 156 raise util.Abort(_("invalid chunk length %d") % l)
157 157 return 0
158 158 if self.callback:
159 159 self.callback()
160 160 return l - 4
161 161
162 162 def changelogheader(self):
163 163 """v10 does not have a changelog header chunk"""
164 164 return {}
165 165
166 166 def manifestheader(self):
167 167 """v10 does not have a manifest header chunk"""
168 168 return {}
169 169
170 170 def filelogheader(self):
171 171 """return the header of the filelogs chunk, v10 only has the filename"""
172 172 l = self.chunklength()
173 173 if not l:
174 174 return {}
175 175 fname = readexactly(self._stream, l)
176 176 return dict(filename=fname)
177 177
178 178 def _deltaheader(self, headertuple, prevnode):
179 179 node, p1, p2, cs = headertuple
180 180 if prevnode is None:
181 181 deltabase = p1
182 182 else:
183 183 deltabase = prevnode
184 184 return node, p1, p2, deltabase, cs
185 185
186 186 def deltachunk(self, prevnode):
187 187 l = self.chunklength()
188 188 if not l:
189 189 return {}
190 190 headerdata = readexactly(self._stream, self.deltaheadersize)
191 191 header = struct.unpack(self.deltaheader, headerdata)
192 192 delta = readexactly(self._stream, l - self.deltaheadersize)
193 193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
194 194 return dict(node=node, p1=p1, p2=p2, cs=cs,
195 195 deltabase=deltabase, delta=delta)
196 196
197 197 class headerlessfixup(object):
198 198 def __init__(self, fh, h):
199 199 self._h = h
200 200 self._fh = fh
201 201 def read(self, n):
202 202 if self._h:
203 203 d, self._h = self._h[:n], self._h[n:]
204 204 if len(d) < n:
205 205 d += readexactly(self._fh, n - len(d))
206 206 return d
207 207 return readexactly(self._fh, n)
208 208
209 209 def readbundle(fh, fname):
210 210 header = readexactly(fh, 6)
211 211
212 212 if not fname:
213 213 fname = "stream"
214 214 if not header.startswith('HG') and header.startswith('\0'):
215 215 fh = headerlessfixup(fh, header)
216 216 header = "HG10UN"
217 217
218 218 magic, version, alg = header[0:2], header[2:4], header[4:6]
219 219
220 220 if magic != 'HG':
221 221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
222 222 if version != '10':
223 223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
224 224 return unbundle10(fh, alg)
225 225
226 226 class bundle10(object):
227 227 deltaheader = _BUNDLE10_DELTA_HEADER
228 228 def __init__(self, repo, bundlecaps=None):
229 229 """Given a source repo, construct a bundler.
230 230
231 231 bundlecaps is optional and can be used to specify the set of
232 232 capabilities which can be used to build the bundle.
233 233 """
234 234 # Set of capabilities we can use to build the bundle.
235 235 if bundlecaps is None:
236 236 bundlecaps = set()
237 237 self._bundlecaps = bundlecaps
238 238 self._changelog = repo.changelog
239 239 self._manifest = repo.manifest
240 240 reorder = repo.ui.config('bundle', 'reorder', 'auto')
241 241 if reorder == 'auto':
242 242 reorder = None
243 243 else:
244 244 reorder = util.parsebool(reorder)
245 245 self._repo = repo
246 246 self._reorder = reorder
247 247 self.count = [0, 0]
248 248 def start(self, lookup):
249 249 self._lookup = lookup
250 250 def close(self):
251 251 return closechunk()
252 252
253 253 def fileheader(self, fname):
254 254 return chunkheader(len(fname)) + fname
255 255
256 256 def group(self, nodelist, revlog, reorder=None):
257 257 """Calculate a delta group, yielding a sequence of changegroup chunks
258 258 (strings).
259 259
260 260 Given a list of changeset revs, return a set of deltas and
261 261 metadata corresponding to nodes. The first delta is
262 262 first parent(nodelist[0]) -> nodelist[0], the receiver is
263 263 guaranteed to have this parent as it has all history before
264 264 these changesets. In the case firstparent is nullrev the
265 265 changegroup starts with a full revision.
266 266 """
267 267
268 268 # if we don't have any revisions touched by these changesets, bail
269 269 if len(nodelist) == 0:
270 270 yield self.close()
271 271 return
272 272
273 273 # for generaldelta revlogs, we linearize the revs; this will both be
274 274 # much quicker and generate a much smaller bundle
275 275 if (revlog._generaldelta and reorder is not False) or reorder:
276 276 dag = dagutil.revlogdag(revlog)
277 277 revs = set(revlog.rev(n) for n in nodelist)
278 278 revs = dag.linearize(revs)
279 279 else:
280 280 revs = sorted([revlog.rev(n) for n in nodelist])
281 281
282 282 # add the parent of the first rev
283 283 p = revlog.parentrevs(revs[0])[0]
284 284 revs.insert(0, p)
285 285
286 286 # build deltas
287 287 for r in xrange(len(revs) - 1):
288 288 prev, curr = revs[r], revs[r + 1]
289 289 for c in self.revchunk(revlog, curr, prev):
290 290 yield c
291 291
292 292 yield self.close()
293 293
294 def generate(self, clnodes, getmfnodes, getfiles, getfilenodes, source):
294 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
295 295 '''yield a sequence of changegroup chunks (strings)'''
296 296 repo = self._repo
297 297 cl = self._changelog
298 298 mf = self._manifest
299 299 reorder = self._reorder
300 300 progress = repo.ui.progress
301 301 count = self.count
302 302 _bundling = _('bundling')
303 _changesets = _('changesets')
304 _manifests = _('manifests')
305 _files = _('files')
306
307 mfs = {} # needed manifests
308 fnodes = {} # needed file nodes
309 changedfiles = set()
310 fstate = ['', {}]
311
312 # filter any nodes that claim to be part of the known set
313 def prune(revlog, missing):
314 rr, rl = revlog.rev, revlog.linkrev
315 return [n for n in missing
316 if rl(rr(n)) not in commonrevs]
317
318 def lookup(revlog, x):
319 if revlog == cl:
320 c = cl.read(x)
321 changedfiles.update(c[3])
322 mfs.setdefault(c[0], x)
323 count[0] += 1
324 progress(_bundling, count[0],
325 unit=_changesets, total=count[1])
326 return x
327 elif revlog == mf:
328 clnode = mfs[x]
329 if not fastpathlinkrev:
330 mdata = mf.readfast(x)
331 for f, n in mdata.iteritems():
332 if f in changedfiles:
333 fnodes[f].setdefault(n, clnode)
334 count[0] += 1
335 progress(_bundling, count[0],
336 unit=_manifests, total=count[1])
337 return clnode
338 else:
339 progress(_bundling, count[0], item=fstate[0],
340 unit=_files, total=count[1])
341 return fstate[1][x]
342
343 self.start(lookup)
344
345 def getmfnodes():
346 for f in changedfiles:
347 fnodes[f] = {}
348 count[:] = [0, len(mfs)]
349 return prune(mf, mfs)
350 def getfiles():
351 mfs.clear()
352 return changedfiles
353 def getfilenodes(fname, filerevlog):
354 if fastpathlinkrev:
355 ln, llr = filerevlog.node, filerevlog.linkrev
356 def genfilenodes():
357 for r in filerevlog:
358 linkrev = llr(r)
359 if linkrev not in commonrevs:
360 yield filerevlog.node(r), cl.node(linkrev)
361 fnodes[fname] = dict(genfilenodes())
362 fstate[0] = fname
363 fstate[1] = fnodes.pop(fname, {})
364 return prune(filerevlog, fstate[1])
365
303 366
304 367 count[:] = [0, len(clnodes)]
305 368 for chunk in self.group(clnodes, cl, reorder=reorder):
306 369 yield chunk
307 370 progress(_bundling, None)
308 371
309 372 for chunk in self.group(getmfnodes(), mf, reorder=reorder):
310 373 yield chunk
311 374 progress(_bundling, None)
312 375
313 376 changedfiles = getfiles()
314 377 count[:] = [0, len(changedfiles)]
315 378 for fname in sorted(changedfiles):
316 379 filerevlog = repo.file(fname)
317 380 if not len(filerevlog):
318 381 raise util.Abort(_("empty or missing revlog for %s")
319 382 % fname)
320 383 nodelist = getfilenodes(fname, filerevlog)
321 384 if nodelist:
322 385 count[0] += 1
323 386 yield self.fileheader(fname)
324 387 for chunk in self.group(nodelist, filerevlog, reorder):
325 388 yield chunk
326 389 yield self.close()
327 390 progress(_bundling, None)
328 391
329 392 if clnodes:
330 393 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
331 394
332 395 def revchunk(self, revlog, rev, prev):
333 396 node = revlog.node(rev)
334 397 p1, p2 = revlog.parentrevs(rev)
335 398 base = prev
336 399
337 400 prefix = ''
338 401 if base == nullrev:
339 402 delta = revlog.revision(node)
340 403 prefix = mdiff.trivialdiffheader(len(delta))
341 404 else:
342 405 delta = revlog.revdiff(base, rev)
343 406 linknode = self._lookup(revlog, node)
344 407 p1n, p2n = revlog.parents(node)
345 408 basenode = revlog.node(base)
346 409 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
347 410 meta += prefix
348 411 l = len(meta) + len(delta)
349 412 yield chunkheader(l)
350 413 yield meta
351 414 yield delta
352 415 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
353 416 # do nothing with basenode, it is implicitly the previous one in HG10
354 417 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
@@ -1,2503 +1,2433 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if a repo has an unfilteredpropertycache value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo.filtered('served')
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return self._repo.branchmap()
95 95
96 96 def heads(self):
97 97 return self._repo.heads()
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common,
104 104 bundlecaps=None)
105 105
106 106 # TODO We might want to move the next two calls into legacypeer and add
107 107 # unbundle instead.
108 108
109 109 def lock(self):
110 110 return self._repo.lock()
111 111
112 112 def addchangegroup(self, cg, source, url):
113 113 return self._repo.addchangegroup(cg, source, url)
114 114
115 115 def pushkey(self, namespace, key, old, new):
116 116 return self._repo.pushkey(namespace, key, old, new)
117 117
118 118 def listkeys(self, namespace):
119 119 return self._repo.listkeys(namespace)
120 120
121 121 def debugwireargs(self, one, two, three=None, four=None, five=None):
122 122 '''used to test argument passing over the wire'''
123 123 return "%s %s %s %s %s" % (one, two, three, four, five)
124 124
125 125 class locallegacypeer(localpeer):
126 126 '''peer extension which implements legacy methods too; used for tests with
127 127 restricted capabilities'''
128 128
129 129 def __init__(self, repo):
130 130 localpeer.__init__(self, repo, caps=LEGACYCAPS)
131 131
132 132 def branches(self, nodes):
133 133 return self._repo.branches(nodes)
134 134
135 135 def between(self, pairs):
136 136 return self._repo.between(pairs)
137 137
138 138 def changegroup(self, basenodes, source):
139 139 return self._repo.changegroup(basenodes, source)
140 140
141 141 def changegroupsubset(self, bases, heads, source):
142 142 return self._repo.changegroupsubset(bases, heads, source)
143 143
144 144 class localrepository(object):
145 145
146 146 supportedformats = set(('revlogv1', 'generaldelta'))
147 147 supported = supportedformats | set(('store', 'fncache', 'shared',
148 148 'dotencode'))
149 149 openerreqs = set(('revlogv1', 'generaldelta'))
150 150 requirements = ['revlogv1']
151 151 filtername = None
152 152
153 153 def _baserequirements(self, create):
154 154 return self.requirements[:]
155 155
156 156 def __init__(self, baseui, path=None, create=False):
157 157 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
158 158 self.wopener = self.wvfs
159 159 self.root = self.wvfs.base
160 160 self.path = self.wvfs.join(".hg")
161 161 self.origroot = path
162 162 self.auditor = scmutil.pathauditor(self.root, self._checknested)
163 163 self.vfs = scmutil.vfs(self.path)
164 164 self.opener = self.vfs
165 165 self.baseui = baseui
166 166 self.ui = baseui.copy()
167 167 # A list of callback to shape the phase if no data were found.
168 168 # Callback are in the form: func(repo, roots) --> processed root.
169 169 # This list it to be filled by extension during repo setup
170 170 self._phasedefaults = []
171 171 try:
172 172 self.ui.readconfig(self.join("hgrc"), self.root)
173 173 extensions.loadall(self.ui)
174 174 except IOError:
175 175 pass
176 176
177 177 if not self.vfs.isdir():
178 178 if create:
179 179 if not self.wvfs.exists():
180 180 self.wvfs.makedirs()
181 181 self.vfs.makedir(notindexed=True)
182 182 requirements = self._baserequirements(create)
183 183 if self.ui.configbool('format', 'usestore', True):
184 184 self.vfs.mkdir("store")
185 185 requirements.append("store")
186 186 if self.ui.configbool('format', 'usefncache', True):
187 187 requirements.append("fncache")
188 188 if self.ui.configbool('format', 'dotencode', True):
189 189 requirements.append('dotencode')
190 190 # create an invalid changelog
191 191 self.vfs.append(
192 192 "00changelog.i",
193 193 '\0\0\0\2' # represents revlogv2
194 194 ' dummy changelog to prevent using the old repo layout'
195 195 )
196 196 if self.ui.configbool('format', 'generaldelta', False):
197 197 requirements.append("generaldelta")
198 198 requirements = set(requirements)
199 199 else:
200 200 raise error.RepoError(_("repository %s not found") % path)
201 201 elif create:
202 202 raise error.RepoError(_("repository %s already exists") % path)
203 203 else:
204 204 try:
205 205 requirements = scmutil.readrequires(self.vfs, self.supported)
206 206 except IOError, inst:
207 207 if inst.errno != errno.ENOENT:
208 208 raise
209 209 requirements = set()
210 210
211 211 self.sharedpath = self.path
212 212 try:
213 213 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
214 214 realpath=True)
215 215 s = vfs.base
216 216 if not vfs.exists():
217 217 raise error.RepoError(
218 218 _('.hg/sharedpath points to nonexistent directory %s') % s)
219 219 self.sharedpath = s
220 220 except IOError, inst:
221 221 if inst.errno != errno.ENOENT:
222 222 raise
223 223
224 224 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
225 225 self.spath = self.store.path
226 226 self.svfs = self.store.vfs
227 227 self.sopener = self.svfs
228 228 self.sjoin = self.store.join
229 229 self.vfs.createmode = self.store.createmode
230 230 self._applyrequirements(requirements)
231 231 if create:
232 232 self._writerequirements()
233 233
234 234
235 235 self._branchcaches = {}
236 236 self.filterpats = {}
237 237 self._datafilters = {}
238 238 self._transref = self._lockref = self._wlockref = None
239 239
240 240 # A cache for various files under .hg/ that tracks file changes,
241 241 # (used by the filecache decorator)
242 242 #
243 243 # Maps a property name to its util.filecacheentry
244 244 self._filecache = {}
245 245
246 246 # hold sets of revision to be filtered
247 247 # should be cleared when something might have changed the filter value:
248 248 # - new changesets,
249 249 # - phase change,
250 250 # - new obsolescence marker,
251 251 # - working directory parent change,
252 252 # - bookmark changes
253 253 self.filteredrevcache = {}
254 254
255 255 def close(self):
256 256 pass
257 257
258 258 def _restrictcapabilities(self, caps):
259 259 return caps
260 260
261 261 def _applyrequirements(self, requirements):
262 262 self.requirements = requirements
263 263 self.sopener.options = dict((r, 1) for r in requirements
264 264 if r in self.openerreqs)
265 265
266 266 def _writerequirements(self):
267 267 reqfile = self.opener("requires", "w")
268 268 for r in sorted(self.requirements):
269 269 reqfile.write("%s\n" % r)
270 270 reqfile.close()
271 271
272 272 def _checknested(self, path):
273 273 """Determine if path is a legal nested repository."""
274 274 if not path.startswith(self.root):
275 275 return False
276 276 subpath = path[len(self.root) + 1:]
277 277 normsubpath = util.pconvert(subpath)
278 278
279 279 # XXX: Checking against the current working copy is wrong in
280 280 # the sense that it can reject things like
281 281 #
282 282 # $ hg cat -r 10 sub/x.txt
283 283 #
284 284 # if sub/ is no longer a subrepository in the working copy
285 285 # parent revision.
286 286 #
287 287 # However, it can of course also allow things that would have
288 288 # been rejected before, such as the above cat command if sub/
289 289 # is a subrepository now, but was a normal directory before.
290 290 # The old path auditor would have rejected by mistake since it
291 291 # panics when it sees sub/.hg/.
292 292 #
293 293 # All in all, checking against the working copy seems sensible
294 294 # since we want to prevent access to nested repositories on
295 295 # the filesystem *now*.
296 296 ctx = self[None]
297 297 parts = util.splitpath(subpath)
298 298 while parts:
299 299 prefix = '/'.join(parts)
300 300 if prefix in ctx.substate:
301 301 if prefix == normsubpath:
302 302 return True
303 303 else:
304 304 sub = ctx.sub(prefix)
305 305 return sub.checknested(subpath[len(prefix) + 1:])
306 306 else:
307 307 parts.pop()
308 308 return False
309 309
310 310 def peer(self):
311 311 return localpeer(self) # not cached to avoid reference cycle
312 312
313 313 def unfiltered(self):
314 314 """Return unfiltered version of the repository
315 315
316 316 Intended to be overwritten by filtered repo."""
317 317 return self
318 318
319 319 def filtered(self, name):
320 320 """Return a filtered version of a repository"""
321 321 # build a new class with the mixin and the current class
322 322 # (possibly subclass of the repo)
323 323 class proxycls(repoview.repoview, self.unfiltered().__class__):
324 324 pass
325 325 return proxycls(self, name)
326 326
327 327 @repofilecache('bookmarks')
328 328 def _bookmarks(self):
329 329 return bookmarks.bmstore(self)
330 330
331 331 @repofilecache('bookmarks.current')
332 332 def _bookmarkcurrent(self):
333 333 return bookmarks.readcurrent(self)
334 334
335 335 def bookmarkheads(self, bookmark):
336 336 name = bookmark.split('@', 1)[0]
337 337 heads = []
338 338 for mark, n in self._bookmarks.iteritems():
339 339 if mark.split('@', 1)[0] == name:
340 340 heads.append(n)
341 341 return heads
342 342
343 343 @storecache('phaseroots')
344 344 def _phasecache(self):
345 345 return phases.phasecache(self, self._phasedefaults)
346 346
347 347 @storecache('obsstore')
348 348 def obsstore(self):
349 349 store = obsolete.obsstore(self.sopener)
350 350 if store and not obsolete._enabled:
351 351 # message is rare enough to not be translated
352 352 msg = 'obsolete feature not enabled but %i markers found!\n'
353 353 self.ui.warn(msg % len(list(store)))
354 354 return store
355 355
356 356 @storecache('00changelog.i')
357 357 def changelog(self):
358 358 c = changelog.changelog(self.sopener)
359 359 if 'HG_PENDING' in os.environ:
360 360 p = os.environ['HG_PENDING']
361 361 if p.startswith(self.root):
362 362 c.readpending('00changelog.i.a')
363 363 return c
364 364
365 365 @storecache('00manifest.i')
366 366 def manifest(self):
367 367 return manifest.manifest(self.sopener)
368 368
369 369 @repofilecache('dirstate')
370 370 def dirstate(self):
371 371 warned = [0]
372 372 def validate(node):
373 373 try:
374 374 self.changelog.rev(node)
375 375 return node
376 376 except error.LookupError:
377 377 if not warned[0]:
378 378 warned[0] = True
379 379 self.ui.warn(_("warning: ignoring unknown"
380 380 " working parent %s!\n") % short(node))
381 381 return nullid
382 382
383 383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384 384
385 385 def __getitem__(self, changeid):
386 386 if changeid is None:
387 387 return context.workingctx(self)
388 388 return context.changectx(self, changeid)
389 389
390 390 def __contains__(self, changeid):
391 391 try:
392 392 return bool(self.lookup(changeid))
393 393 except error.RepoLookupError:
394 394 return False
395 395
396 396 def __nonzero__(self):
397 397 return True
398 398
399 399 def __len__(self):
400 400 return len(self.changelog)
401 401
402 402 def __iter__(self):
403 403 return iter(self.changelog)
404 404
405 405 def revs(self, expr, *args):
406 406 '''Return a list of revisions matching the given revset'''
407 407 expr = revset.formatspec(expr, *args)
408 408 m = revset.match(None, expr)
409 409 return [r for r in m(self, list(self))]
410 410
411 411 def set(self, expr, *args):
412 412 '''
413 413 Yield a context for each matching revision, after doing arg
414 414 replacement via revset.formatspec
415 415 '''
416 416 for r in self.revs(expr, *args):
417 417 yield self[r]
418 418
419 419 def url(self):
420 420 return 'file:' + self.root
421 421
422 422 def hook(self, name, throw=False, **args):
423 423 return hook.hook(self.ui, self, name, throw, **args)
424 424
425 425 @unfilteredmethod
426 426 def _tag(self, names, node, message, local, user, date, extra={}):
427 427 if isinstance(names, str):
428 428 names = (names,)
429 429
430 430 branches = self.branchmap()
431 431 for name in names:
432 432 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 433 local=local)
434 434 if name in branches:
435 435 self.ui.warn(_("warning: tag %s conflicts with existing"
436 436 " branch name\n") % name)
437 437
438 438 def writetags(fp, names, munge, prevtags):
439 439 fp.seek(0, 2)
440 440 if prevtags and prevtags[-1] != '\n':
441 441 fp.write('\n')
442 442 for name in names:
443 443 m = munge and munge(name) or name
444 444 if (self._tagscache.tagtypes and
445 445 name in self._tagscache.tagtypes):
446 446 old = self.tags().get(name, nullid)
447 447 fp.write('%s %s\n' % (hex(old), m))
448 448 fp.write('%s %s\n' % (hex(node), m))
449 449 fp.close()
450 450
451 451 prevtags = ''
452 452 if local:
453 453 try:
454 454 fp = self.opener('localtags', 'r+')
455 455 except IOError:
456 456 fp = self.opener('localtags', 'a')
457 457 else:
458 458 prevtags = fp.read()
459 459
460 460 # local tags are stored in the current charset
461 461 writetags(fp, names, None, prevtags)
462 462 for name in names:
463 463 self.hook('tag', node=hex(node), tag=name, local=local)
464 464 return
465 465
466 466 try:
467 467 fp = self.wfile('.hgtags', 'rb+')
468 468 except IOError, e:
469 469 if e.errno != errno.ENOENT:
470 470 raise
471 471 fp = self.wfile('.hgtags', 'ab')
472 472 else:
473 473 prevtags = fp.read()
474 474
475 475 # committed tags are stored in UTF-8
476 476 writetags(fp, names, encoding.fromlocal, prevtags)
477 477
478 478 fp.close()
479 479
480 480 self.invalidatecaches()
481 481
482 482 if '.hgtags' not in self.dirstate:
483 483 self[None].add(['.hgtags'])
484 484
485 485 m = matchmod.exact(self.root, '', ['.hgtags'])
486 486 tagnode = self.commit(message, user, date, extra=extra, match=m)
487 487
488 488 for name in names:
489 489 self.hook('tag', node=hex(node), tag=name, local=local)
490 490
491 491 return tagnode
492 492
493 493 def tag(self, names, node, message, local, user, date):
494 494 '''tag a revision with one or more symbolic names.
495 495
496 496 names is a list of strings or, when adding a single tag, names may be a
497 497 string.
498 498
499 499 if local is True, the tags are stored in a per-repository file.
500 500 otherwise, they are stored in the .hgtags file, and a new
501 501 changeset is committed with the change.
502 502
503 503 keyword arguments:
504 504
505 505 local: whether to store tags in non-version-controlled file
506 506 (default False)
507 507
508 508 message: commit message to use if committing
509 509
510 510 user: name of user to use if committing
511 511
512 512 date: date tuple to use if committing'''
513 513
514 514 if not local:
515 515 for x in self.status()[:5]:
516 516 if '.hgtags' in x:
517 517 raise util.Abort(_('working copy of .hgtags is changed '
518 518 '(please commit .hgtags manually)'))
519 519
520 520 self.tags() # instantiate the cache
521 521 self._tag(names, node, message, local, user, date)
522 522
523 523 @filteredpropertycache
524 524 def _tagscache(self):
525 525 '''Returns a tagscache object that contains various tags related
526 526 caches.'''
527 527
528 528 # This simplifies its cache management by having one decorated
529 529 # function (this one) and the rest simply fetch things from it.
530 530 class tagscache(object):
531 531 def __init__(self):
532 532 # These two define the set of tags for this repository. tags
533 533 # maps tag name to node; tagtypes maps tag name to 'global' or
534 534 # 'local'. (Global tags are defined by .hgtags across all
535 535 # heads, and local tags are defined in .hg/localtags.)
536 536 # They constitute the in-memory cache of tags.
537 537 self.tags = self.tagtypes = None
538 538
539 539 self.nodetagscache = self.tagslist = None
540 540
541 541 cache = tagscache()
542 542 cache.tags, cache.tagtypes = self._findtags()
543 543
544 544 return cache
545 545
546 546 def tags(self):
547 547 '''return a mapping of tag to node'''
548 548 t = {}
549 549 if self.changelog.filteredrevs:
550 550 tags, tt = self._findtags()
551 551 else:
552 552 tags = self._tagscache.tags
553 553 for k, v in tags.iteritems():
554 554 try:
555 555 # ignore tags to unknown nodes
556 556 self.changelog.rev(v)
557 557 t[k] = v
558 558 except (error.LookupError, ValueError):
559 559 pass
560 560 return t
561 561
562 562 def _findtags(self):
563 563 '''Do the hard work of finding tags. Return a pair of dicts
564 564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 565 maps tag name to a string like \'global\' or \'local\'.
566 566 Subclasses or extensions are free to add their own tags, but
567 567 should be aware that the returned dicts will be retained for the
568 568 duration of the localrepo object.'''
569 569
570 570 # XXX what tagtype should subclasses/extensions use? Currently
571 571 # mq and bookmarks add tags, but do not set the tagtype at all.
572 572 # Should each extension invent its own tag type? Should there
573 573 # be one tagtype for all such "virtual" tags? Or is the status
574 574 # quo fine?
575 575
576 576 alltags = {} # map tag name to (node, hist)
577 577 tagtypes = {}
578 578
579 579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581 581
582 582 # Build the return dicts. Have to re-encode tag names because
583 583 # the tags module always uses UTF-8 (in order not to lose info
584 584 # writing to the cache), but the rest of Mercurial wants them in
585 585 # local encoding.
586 586 tags = {}
587 587 for (name, (node, hist)) in alltags.iteritems():
588 588 if node != nullid:
589 589 tags[encoding.tolocal(name)] = node
590 590 tags['tip'] = self.changelog.tip()
591 591 tagtypes = dict([(encoding.tolocal(name), value)
592 592 for (name, value) in tagtypes.iteritems()])
593 593 return (tags, tagtypes)
594 594
595 595 def tagtype(self, tagname):
596 596 '''
597 597 return the type of the given tag. result can be:
598 598
599 599 'local' : a local tag
600 600 'global' : a global tag
601 601 None : tag does not exist
602 602 '''
603 603
604 604 return self._tagscache.tagtypes.get(tagname)
605 605
606 606 def tagslist(self):
607 607 '''return a list of tags ordered by revision'''
608 608 if not self._tagscache.tagslist:
609 609 l = []
610 610 for t, n in self.tags().iteritems():
611 611 r = self.changelog.rev(n)
612 612 l.append((r, t, n))
613 613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614 614
615 615 return self._tagscache.tagslist
616 616
617 617 def nodetags(self, node):
618 618 '''return the tags associated with a node'''
619 619 if not self._tagscache.nodetagscache:
620 620 nodetagscache = {}
621 621 for t, n in self._tagscache.tags.iteritems():
622 622 nodetagscache.setdefault(n, []).append(t)
623 623 for tags in nodetagscache.itervalues():
624 624 tags.sort()
625 625 self._tagscache.nodetagscache = nodetagscache
626 626 return self._tagscache.nodetagscache.get(node, [])
627 627
628 628 def nodebookmarks(self, node):
629 629 marks = []
630 630 for bookmark, n in self._bookmarks.iteritems():
631 631 if n == node:
632 632 marks.append(bookmark)
633 633 return sorted(marks)
634 634
635 635 def branchmap(self):
636 636 '''returns a dictionary {branch: [branchheads]}'''
637 637 branchmap.updatecache(self)
638 638 return self._branchcaches[self.filtername]
639 639
640 640
641 641 def _branchtip(self, heads):
642 642 '''return the tipmost branch head in heads'''
643 643 tip = heads[-1]
644 644 for h in reversed(heads):
645 645 if not self[h].closesbranch():
646 646 tip = h
647 647 break
648 648 return tip
649 649
650 650 def branchtip(self, branch):
651 651 '''return the tip node for a given branch'''
652 652 if branch not in self.branchmap():
653 653 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
654 654 return self._branchtip(self.branchmap()[branch])
655 655
656 656 def branchtags(self):
657 657 '''return a dict where branch names map to the tipmost head of
658 658 the branch, open heads come before closed'''
659 659 bt = {}
660 660 for bn, heads in self.branchmap().iteritems():
661 661 bt[bn] = self._branchtip(heads)
662 662 return bt
663 663
664 664 def lookup(self, key):
665 665 return self[key].node()
666 666
667 667 def lookupbranch(self, key, remote=None):
668 668 repo = remote or self
669 669 if key in repo.branchmap():
670 670 return key
671 671
672 672 repo = (remote and remote.local()) and remote or self
673 673 return repo[key].branch()
674 674
675 675 def known(self, nodes):
676 676 nm = self.changelog.nodemap
677 677 pc = self._phasecache
678 678 result = []
679 679 for n in nodes:
680 680 r = nm.get(n)
681 681 resp = not (r is None or pc.phase(self, r) >= phases.secret)
682 682 result.append(resp)
683 683 return result
684 684
685 685 def local(self):
686 686 return self
687 687
688 688 def cancopy(self):
689 689 return self.local() # so statichttprepo's override of local() works
690 690
691 691 def join(self, f):
692 692 return os.path.join(self.path, f)
693 693
694 694 def wjoin(self, f):
695 695 return os.path.join(self.root, f)
696 696
697 697 def file(self, f):
698 698 if f[0] == '/':
699 699 f = f[1:]
700 700 return filelog.filelog(self.sopener, f)
701 701
702 702 def changectx(self, changeid):
703 703 return self[changeid]
704 704
705 705 def parents(self, changeid=None):
706 706 '''get list of changectxs for parents of changeid'''
707 707 return self[changeid].parents()
708 708
709 709 def setparents(self, p1, p2=nullid):
710 710 copies = self.dirstate.setparents(p1, p2)
711 711 pctx = self[p1]
712 712 if copies:
713 713 # Adjust copy records, the dirstate cannot do it, it
714 714 # requires access to parents manifests. Preserve them
715 715 # only for entries added to first parent.
716 716 for f in copies:
717 717 if f not in pctx and copies[f] in pctx:
718 718 self.dirstate.copy(copies[f], f)
719 719 if p2 == nullid:
720 720 for f, s in sorted(self.dirstate.copies().items()):
721 721 if f not in pctx and s not in pctx:
722 722 self.dirstate.copy(None, f)
723 723
724 724 def filectx(self, path, changeid=None, fileid=None):
725 725 """changeid can be a changeset revision, node, or tag.
726 726 fileid can be a file revision or node."""
727 727 return context.filectx(self, path, changeid, fileid)
728 728
729 729 def getcwd(self):
730 730 return self.dirstate.getcwd()
731 731
732 732 def pathto(self, f, cwd=None):
733 733 return self.dirstate.pathto(f, cwd)
734 734
735 735 def wfile(self, f, mode='r'):
736 736 return self.wopener(f, mode)
737 737
738 738 def _link(self, f):
739 739 return self.wvfs.islink(f)
740 740
741 741 def _loadfilter(self, filter):
742 742 if filter not in self.filterpats:
743 743 l = []
744 744 for pat, cmd in self.ui.configitems(filter):
745 745 if cmd == '!':
746 746 continue
747 747 mf = matchmod.match(self.root, '', [pat])
748 748 fn = None
749 749 params = cmd
750 750 for name, filterfn in self._datafilters.iteritems():
751 751 if cmd.startswith(name):
752 752 fn = filterfn
753 753 params = cmd[len(name):].lstrip()
754 754 break
755 755 if not fn:
756 756 fn = lambda s, c, **kwargs: util.filter(s, c)
757 757 # Wrap old filters not supporting keyword arguments
758 758 if not inspect.getargspec(fn)[2]:
759 759 oldfn = fn
760 760 fn = lambda s, c, **kwargs: oldfn(s, c)
761 761 l.append((mf, fn, params))
762 762 self.filterpats[filter] = l
763 763 return self.filterpats[filter]
764 764
765 765 def _filter(self, filterpats, filename, data):
766 766 for mf, fn, cmd in filterpats:
767 767 if mf(filename):
768 768 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
769 769 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
770 770 break
771 771
772 772 return data
773 773
774 774 @unfilteredpropertycache
775 775 def _encodefilterpats(self):
776 776 return self._loadfilter('encode')
777 777
778 778 @unfilteredpropertycache
779 779 def _decodefilterpats(self):
780 780 return self._loadfilter('decode')
781 781
782 782 def adddatafilter(self, name, filter):
783 783 self._datafilters[name] = filter
784 784
785 785 def wread(self, filename):
786 786 if self._link(filename):
787 787 data = self.wvfs.readlink(filename)
788 788 else:
789 789 data = self.wopener.read(filename)
790 790 return self._filter(self._encodefilterpats, filename, data)
791 791
792 792 def wwrite(self, filename, data, flags):
793 793 data = self._filter(self._decodefilterpats, filename, data)
794 794 if 'l' in flags:
795 795 self.wopener.symlink(data, filename)
796 796 else:
797 797 self.wopener.write(filename, data)
798 798 if 'x' in flags:
799 799 self.wvfs.setflags(filename, False, True)
800 800
801 801 def wwritedata(self, filename, data):
802 802 return self._filter(self._decodefilterpats, filename, data)
803 803
804 804 def transaction(self, desc):
805 805 tr = self._transref and self._transref() or None
806 806 if tr and tr.running():
807 807 return tr.nest()
808 808
809 809 # abort here if the journal already exists
810 810 if self.svfs.exists("journal"):
811 811 raise error.RepoError(
812 812 _("abandoned transaction found - run hg recover"))
813 813
814 814 self._writejournal(desc)
815 815 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
816 816
817 817 tr = transaction.transaction(self.ui.warn, self.sopener,
818 818 self.sjoin("journal"),
819 819 aftertrans(renames),
820 820 self.store.createmode)
821 821 self._transref = weakref.ref(tr)
822 822 return tr
823 823
824 824 def _journalfiles(self):
825 825 return ((self.svfs, 'journal'),
826 826 (self.vfs, 'journal.dirstate'),
827 827 (self.vfs, 'journal.branch'),
828 828 (self.vfs, 'journal.desc'),
829 829 (self.vfs, 'journal.bookmarks'),
830 830 (self.svfs, 'journal.phaseroots'))
831 831
832 832 def undofiles(self):
833 833 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
834 834
835 835 def _writejournal(self, desc):
836 836 self.opener.write("journal.dirstate",
837 837 self.opener.tryread("dirstate"))
838 838 self.opener.write("journal.branch",
839 839 encoding.fromlocal(self.dirstate.branch()))
840 840 self.opener.write("journal.desc",
841 841 "%d\n%s\n" % (len(self), desc))
842 842 self.opener.write("journal.bookmarks",
843 843 self.opener.tryread("bookmarks"))
844 844 self.sopener.write("journal.phaseroots",
845 845 self.sopener.tryread("phaseroots"))
846 846
847 847 def recover(self):
848 848 lock = self.lock()
849 849 try:
850 850 if self.svfs.exists("journal"):
851 851 self.ui.status(_("rolling back interrupted transaction\n"))
852 852 transaction.rollback(self.sopener, self.sjoin("journal"),
853 853 self.ui.warn)
854 854 self.invalidate()
855 855 return True
856 856 else:
857 857 self.ui.warn(_("no interrupted transaction available\n"))
858 858 return False
859 859 finally:
860 860 lock.release()
861 861
862 862 def rollback(self, dryrun=False, force=False):
863 863 wlock = lock = None
864 864 try:
865 865 wlock = self.wlock()
866 866 lock = self.lock()
867 867 if self.svfs.exists("undo"):
868 868 return self._rollback(dryrun, force)
869 869 else:
870 870 self.ui.warn(_("no rollback information available\n"))
871 871 return 1
872 872 finally:
873 873 release(lock, wlock)
874 874
875 875 @unfilteredmethod # Until we get smarter cache management
876 876 def _rollback(self, dryrun, force):
877 877 ui = self.ui
878 878 try:
879 879 args = self.opener.read('undo.desc').splitlines()
880 880 (oldlen, desc, detail) = (int(args[0]), args[1], None)
881 881 if len(args) >= 3:
882 882 detail = args[2]
883 883 oldtip = oldlen - 1
884 884
885 885 if detail and ui.verbose:
886 886 msg = (_('repository tip rolled back to revision %s'
887 887 ' (undo %s: %s)\n')
888 888 % (oldtip, desc, detail))
889 889 else:
890 890 msg = (_('repository tip rolled back to revision %s'
891 891 ' (undo %s)\n')
892 892 % (oldtip, desc))
893 893 except IOError:
894 894 msg = _('rolling back unknown transaction\n')
895 895 desc = None
896 896
897 897 if not force and self['.'] != self['tip'] and desc == 'commit':
898 898 raise util.Abort(
899 899 _('rollback of last commit while not checked out '
900 900 'may lose data'), hint=_('use -f to force'))
901 901
902 902 ui.status(msg)
903 903 if dryrun:
904 904 return 0
905 905
906 906 parents = self.dirstate.parents()
907 907 self.destroying()
908 908 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
909 909 if self.vfs.exists('undo.bookmarks'):
910 910 self.vfs.rename('undo.bookmarks', 'bookmarks')
911 911 if self.svfs.exists('undo.phaseroots'):
912 912 self.svfs.rename('undo.phaseroots', 'phaseroots')
913 913 self.invalidate()
914 914
915 915 parentgone = (parents[0] not in self.changelog.nodemap or
916 916 parents[1] not in self.changelog.nodemap)
917 917 if parentgone:
918 918 self.vfs.rename('undo.dirstate', 'dirstate')
919 919 try:
920 920 branch = self.opener.read('undo.branch')
921 921 self.dirstate.setbranch(encoding.tolocal(branch))
922 922 except IOError:
923 923 ui.warn(_('named branch could not be reset: '
924 924 'current branch is still \'%s\'\n')
925 925 % self.dirstate.branch())
926 926
927 927 self.dirstate.invalidate()
928 928 parents = tuple([p.rev() for p in self.parents()])
929 929 if len(parents) > 1:
930 930 ui.status(_('working directory now based on '
931 931 'revisions %d and %d\n') % parents)
932 932 else:
933 933 ui.status(_('working directory now based on '
934 934 'revision %d\n') % parents)
935 935 # TODO: if we know which new heads may result from this rollback, pass
936 936 # them to destroy(), which will prevent the branchhead cache from being
937 937 # invalidated.
938 938 self.destroyed()
939 939 return 0
940 940
941 941 def invalidatecaches(self):
942 942
943 943 if '_tagscache' in vars(self):
944 944 # can't use delattr on proxy
945 945 del self.__dict__['_tagscache']
946 946
947 947 self.unfiltered()._branchcaches.clear()
948 948 self.invalidatevolatilesets()
949 949
950 950 def invalidatevolatilesets(self):
951 951 self.filteredrevcache.clear()
952 952 obsolete.clearobscaches(self)
953 953
954 954 def invalidatedirstate(self):
955 955 '''Invalidates the dirstate, causing the next call to dirstate
956 956 to check if it was modified since the last time it was read,
957 957 rereading it if it has.
958 958
959 959 This is different to dirstate.invalidate() that it doesn't always
960 960 rereads the dirstate. Use dirstate.invalidate() if you want to
961 961 explicitly read the dirstate again (i.e. restoring it to a previous
962 962 known good state).'''
963 963 if hasunfilteredcache(self, 'dirstate'):
964 964 for k in self.dirstate._filecache:
965 965 try:
966 966 delattr(self.dirstate, k)
967 967 except AttributeError:
968 968 pass
969 969 delattr(self.unfiltered(), 'dirstate')
970 970
971 971 def invalidate(self):
972 972 unfiltered = self.unfiltered() # all file caches are stored unfiltered
973 973 for k in self._filecache:
974 974 # dirstate is invalidated separately in invalidatedirstate()
975 975 if k == 'dirstate':
976 976 continue
977 977
978 978 try:
979 979 delattr(unfiltered, k)
980 980 except AttributeError:
981 981 pass
982 982 self.invalidatecaches()
983 983
984 984 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
985 985 try:
986 986 l = lock.lock(lockname, 0, releasefn, desc=desc)
987 987 except error.LockHeld, inst:
988 988 if not wait:
989 989 raise
990 990 self.ui.warn(_("waiting for lock on %s held by %r\n") %
991 991 (desc, inst.locker))
992 992 # default to 600 seconds timeout
993 993 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
994 994 releasefn, desc=desc)
995 995 if acquirefn:
996 996 acquirefn()
997 997 return l
998 998
999 999 def _afterlock(self, callback):
1000 1000 """add a callback to the current repository lock.
1001 1001
1002 1002 The callback will be executed on lock release."""
1003 1003 l = self._lockref and self._lockref()
1004 1004 if l:
1005 1005 l.postrelease.append(callback)
1006 1006 else:
1007 1007 callback()
1008 1008
1009 1009 def lock(self, wait=True):
1010 1010 '''Lock the repository store (.hg/store) and return a weak reference
1011 1011 to the lock. Use this before modifying the store (e.g. committing or
1012 1012 stripping). If you are opening a transaction, get a lock as well.)'''
1013 1013 l = self._lockref and self._lockref()
1014 1014 if l is not None and l.held:
1015 1015 l.lock()
1016 1016 return l
1017 1017
1018 1018 def unlock():
1019 1019 self.store.write()
1020 1020 if hasunfilteredcache(self, '_phasecache'):
1021 1021 self._phasecache.write()
1022 1022 for k, ce in self._filecache.items():
1023 1023 if k == 'dirstate' or k not in self.__dict__:
1024 1024 continue
1025 1025 ce.refresh()
1026 1026
1027 1027 l = self._lock(self.sjoin("lock"), wait, unlock,
1028 1028 self.invalidate, _('repository %s') % self.origroot)
1029 1029 self._lockref = weakref.ref(l)
1030 1030 return l
1031 1031
1032 1032 def wlock(self, wait=True):
1033 1033 '''Lock the non-store parts of the repository (everything under
1034 1034 .hg except .hg/store) and return a weak reference to the lock.
1035 1035 Use this before modifying files in .hg.'''
1036 1036 l = self._wlockref and self._wlockref()
1037 1037 if l is not None and l.held:
1038 1038 l.lock()
1039 1039 return l
1040 1040
1041 1041 def unlock():
1042 1042 self.dirstate.write()
1043 1043 self._filecache['dirstate'].refresh()
1044 1044
1045 1045 l = self._lock(self.join("wlock"), wait, unlock,
1046 1046 self.invalidatedirstate, _('working directory of %s') %
1047 1047 self.origroot)
1048 1048 self._wlockref = weakref.ref(l)
1049 1049 return l
1050 1050
1051 1051 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1052 1052 """
1053 1053 commit an individual file as part of a larger transaction
1054 1054 """
1055 1055
1056 1056 fname = fctx.path()
1057 1057 text = fctx.data()
1058 1058 flog = self.file(fname)
1059 1059 fparent1 = manifest1.get(fname, nullid)
1060 1060 fparent2 = fparent2o = manifest2.get(fname, nullid)
1061 1061
1062 1062 meta = {}
1063 1063 copy = fctx.renamed()
1064 1064 if copy and copy[0] != fname:
1065 1065 # Mark the new revision of this file as a copy of another
1066 1066 # file. This copy data will effectively act as a parent
1067 1067 # of this new revision. If this is a merge, the first
1068 1068 # parent will be the nullid (meaning "look up the copy data")
1069 1069 # and the second one will be the other parent. For example:
1070 1070 #
1071 1071 # 0 --- 1 --- 3 rev1 changes file foo
1072 1072 # \ / rev2 renames foo to bar and changes it
1073 1073 # \- 2 -/ rev3 should have bar with all changes and
1074 1074 # should record that bar descends from
1075 1075 # bar in rev2 and foo in rev1
1076 1076 #
1077 1077 # this allows this merge to succeed:
1078 1078 #
1079 1079 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1080 1080 # \ / merging rev3 and rev4 should use bar@rev2
1081 1081 # \- 2 --- 4 as the merge base
1082 1082 #
1083 1083
1084 1084 cfname = copy[0]
1085 1085 crev = manifest1.get(cfname)
1086 1086 newfparent = fparent2
1087 1087
1088 1088 if manifest2: # branch merge
1089 1089 if fparent2 == nullid or crev is None: # copied on remote side
1090 1090 if cfname in manifest2:
1091 1091 crev = manifest2[cfname]
1092 1092 newfparent = fparent1
1093 1093
1094 1094 # find source in nearest ancestor if we've lost track
1095 1095 if not crev:
1096 1096 self.ui.debug(" %s: searching for copy revision for %s\n" %
1097 1097 (fname, cfname))
1098 1098 for ancestor in self[None].ancestors():
1099 1099 if cfname in ancestor:
1100 1100 crev = ancestor[cfname].filenode()
1101 1101 break
1102 1102
1103 1103 if crev:
1104 1104 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1105 1105 meta["copy"] = cfname
1106 1106 meta["copyrev"] = hex(crev)
1107 1107 fparent1, fparent2 = nullid, newfparent
1108 1108 else:
1109 1109 self.ui.warn(_("warning: can't find ancestor for '%s' "
1110 1110 "copied from '%s'!\n") % (fname, cfname))
1111 1111
1112 1112 elif fparent2 != nullid:
1113 1113 # is one parent an ancestor of the other?
1114 1114 fparentancestor = flog.ancestor(fparent1, fparent2)
1115 1115 if fparentancestor == fparent1:
1116 1116 fparent1, fparent2 = fparent2, nullid
1117 1117 elif fparentancestor == fparent2:
1118 1118 fparent2 = nullid
1119 1119
1120 1120 # is the file changed?
1121 1121 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1122 1122 changelist.append(fname)
1123 1123 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1124 1124
1125 1125 # are just the flags changed during merge?
1126 1126 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1127 1127 changelist.append(fname)
1128 1128
1129 1129 return fparent1
1130 1130
1131 1131 @unfilteredmethod
1132 1132 def commit(self, text="", user=None, date=None, match=None, force=False,
1133 1133 editor=False, extra={}):
1134 1134 """Add a new revision to current repository.
1135 1135
1136 1136 Revision information is gathered from the working directory,
1137 1137 match can be used to filter the committed files. If editor is
1138 1138 supplied, it is called to get a commit message.
1139 1139 """
1140 1140
1141 1141 def fail(f, msg):
1142 1142 raise util.Abort('%s: %s' % (f, msg))
1143 1143
1144 1144 if not match:
1145 1145 match = matchmod.always(self.root, '')
1146 1146
1147 1147 if not force:
1148 1148 vdirs = []
1149 1149 match.explicitdir = vdirs.append
1150 1150 match.bad = fail
1151 1151
1152 1152 wlock = self.wlock()
1153 1153 try:
1154 1154 wctx = self[None]
1155 1155 merge = len(wctx.parents()) > 1
1156 1156
1157 1157 if (not force and merge and match and
1158 1158 (match.files() or match.anypats())):
1159 1159 raise util.Abort(_('cannot partially commit a merge '
1160 1160 '(do not specify files or patterns)'))
1161 1161
1162 1162 changes = self.status(match=match, clean=force)
1163 1163 if force:
1164 1164 changes[0].extend(changes[6]) # mq may commit unchanged files
1165 1165
1166 1166 # check subrepos
1167 1167 subs = []
1168 1168 commitsubs = set()
1169 1169 newstate = wctx.substate.copy()
1170 1170 # only manage subrepos and .hgsubstate if .hgsub is present
1171 1171 if '.hgsub' in wctx:
1172 1172 # we'll decide whether to track this ourselves, thanks
1173 1173 if '.hgsubstate' in changes[0]:
1174 1174 changes[0].remove('.hgsubstate')
1175 1175 if '.hgsubstate' in changes[2]:
1176 1176 changes[2].remove('.hgsubstate')
1177 1177
1178 1178 # compare current state to last committed state
1179 1179 # build new substate based on last committed state
1180 1180 oldstate = wctx.p1().substate
1181 1181 for s in sorted(newstate.keys()):
1182 1182 if not match(s):
1183 1183 # ignore working copy, use old state if present
1184 1184 if s in oldstate:
1185 1185 newstate[s] = oldstate[s]
1186 1186 continue
1187 1187 if not force:
1188 1188 raise util.Abort(
1189 1189 _("commit with new subrepo %s excluded") % s)
1190 1190 if wctx.sub(s).dirty(True):
1191 1191 if not self.ui.configbool('ui', 'commitsubrepos'):
1192 1192 raise util.Abort(
1193 1193 _("uncommitted changes in subrepo %s") % s,
1194 1194 hint=_("use --subrepos for recursive commit"))
1195 1195 subs.append(s)
1196 1196 commitsubs.add(s)
1197 1197 else:
1198 1198 bs = wctx.sub(s).basestate()
1199 1199 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1200 1200 if oldstate.get(s, (None, None, None))[1] != bs:
1201 1201 subs.append(s)
1202 1202
1203 1203 # check for removed subrepos
1204 1204 for p in wctx.parents():
1205 1205 r = [s for s in p.substate if s not in newstate]
1206 1206 subs += [s for s in r if match(s)]
1207 1207 if subs:
1208 1208 if (not match('.hgsub') and
1209 1209 '.hgsub' in (wctx.modified() + wctx.added())):
1210 1210 raise util.Abort(
1211 1211 _("can't commit subrepos without .hgsub"))
1212 1212 changes[0].insert(0, '.hgsubstate')
1213 1213
1214 1214 elif '.hgsub' in changes[2]:
1215 1215 # clean up .hgsubstate when .hgsub is removed
1216 1216 if ('.hgsubstate' in wctx and
1217 1217 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1218 1218 changes[2].insert(0, '.hgsubstate')
1219 1219
1220 1220 # make sure all explicit patterns are matched
1221 1221 if not force and match.files():
1222 1222 matched = set(changes[0] + changes[1] + changes[2])
1223 1223
1224 1224 for f in match.files():
1225 1225 f = self.dirstate.normalize(f)
1226 1226 if f == '.' or f in matched or f in wctx.substate:
1227 1227 continue
1228 1228 if f in changes[3]: # missing
1229 1229 fail(f, _('file not found!'))
1230 1230 if f in vdirs: # visited directory
1231 1231 d = f + '/'
1232 1232 for mf in matched:
1233 1233 if mf.startswith(d):
1234 1234 break
1235 1235 else:
1236 1236 fail(f, _("no match under directory!"))
1237 1237 elif f not in self.dirstate:
1238 1238 fail(f, _("file not tracked!"))
1239 1239
1240 1240 cctx = context.workingctx(self, text, user, date, extra, changes)
1241 1241
1242 1242 if (not force and not extra.get("close") and not merge
1243 1243 and not cctx.files()
1244 1244 and wctx.branch() == wctx.p1().branch()):
1245 1245 return None
1246 1246
1247 1247 if merge and cctx.deleted():
1248 1248 raise util.Abort(_("cannot commit merge with missing files"))
1249 1249
1250 1250 ms = mergemod.mergestate(self)
1251 1251 for f in changes[0]:
1252 1252 if f in ms and ms[f] == 'u':
1253 1253 raise util.Abort(_("unresolved merge conflicts "
1254 1254 "(see hg help resolve)"))
1255 1255
1256 1256 if editor:
1257 1257 cctx._text = editor(self, cctx, subs)
1258 1258 edited = (text != cctx._text)
1259 1259
1260 1260 # commit subs and write new state
1261 1261 if subs:
1262 1262 for s in sorted(commitsubs):
1263 1263 sub = wctx.sub(s)
1264 1264 self.ui.status(_('committing subrepository %s\n') %
1265 1265 subrepo.subrelpath(sub))
1266 1266 sr = sub.commit(cctx._text, user, date)
1267 1267 newstate[s] = (newstate[s][0], sr)
1268 1268 subrepo.writestate(self, newstate)
1269 1269
1270 1270 # Save commit message in case this transaction gets rolled back
1271 1271 # (e.g. by a pretxncommit hook). Leave the content alone on
1272 1272 # the assumption that the user will use the same editor again.
1273 1273 msgfn = self.savecommitmessage(cctx._text)
1274 1274
1275 1275 p1, p2 = self.dirstate.parents()
1276 1276 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1277 1277 try:
1278 1278 self.hook("precommit", throw=True, parent1=hookp1,
1279 1279 parent2=hookp2)
1280 1280 ret = self.commitctx(cctx, True)
1281 1281 except: # re-raises
1282 1282 if edited:
1283 1283 self.ui.write(
1284 1284 _('note: commit message saved in %s\n') % msgfn)
1285 1285 raise
1286 1286
1287 1287 # update bookmarks, dirstate and mergestate
1288 1288 bookmarks.update(self, [p1, p2], ret)
1289 1289 cctx.markcommitted(ret)
1290 1290 ms.reset()
1291 1291 finally:
1292 1292 wlock.release()
1293 1293
1294 1294 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1295 1295 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1296 1296 self._afterlock(commithook)
1297 1297 return ret
1298 1298
1299 1299 @unfilteredmethod
1300 1300 def commitctx(self, ctx, error=False):
1301 1301 """Add a new revision to current repository.
1302 1302 Revision information is passed via the context argument.
1303 1303 """
1304 1304
1305 1305 tr = lock = None
1306 1306 removed = list(ctx.removed())
1307 1307 p1, p2 = ctx.p1(), ctx.p2()
1308 1308 user = ctx.user()
1309 1309
1310 1310 lock = self.lock()
1311 1311 try:
1312 1312 tr = self.transaction("commit")
1313 1313 trp = weakref.proxy(tr)
1314 1314
1315 1315 if ctx.files():
1316 1316 m1 = p1.manifest().copy()
1317 1317 m2 = p2.manifest()
1318 1318
1319 1319 # check in files
1320 1320 new = {}
1321 1321 changed = []
1322 1322 linkrev = len(self)
1323 1323 for f in sorted(ctx.modified() + ctx.added()):
1324 1324 self.ui.note(f + "\n")
1325 1325 try:
1326 1326 fctx = ctx[f]
1327 1327 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1328 1328 changed)
1329 1329 m1.set(f, fctx.flags())
1330 1330 except OSError, inst:
1331 1331 self.ui.warn(_("trouble committing %s!\n") % f)
1332 1332 raise
1333 1333 except IOError, inst:
1334 1334 errcode = getattr(inst, 'errno', errno.ENOENT)
1335 1335 if error or errcode and errcode != errno.ENOENT:
1336 1336 self.ui.warn(_("trouble committing %s!\n") % f)
1337 1337 raise
1338 1338 else:
1339 1339 removed.append(f)
1340 1340
1341 1341 # update manifest
1342 1342 m1.update(new)
1343 1343 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1344 1344 drop = [f for f in removed if f in m1]
1345 1345 for f in drop:
1346 1346 del m1[f]
1347 1347 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1348 1348 p2.manifestnode(), (new, drop))
1349 1349 files = changed + removed
1350 1350 else:
1351 1351 mn = p1.manifestnode()
1352 1352 files = []
1353 1353
1354 1354 # update changelog
1355 1355 self.changelog.delayupdate()
1356 1356 n = self.changelog.add(mn, files, ctx.description(),
1357 1357 trp, p1.node(), p2.node(),
1358 1358 user, ctx.date(), ctx.extra().copy())
1359 1359 p = lambda: self.changelog.writepending() and self.root or ""
1360 1360 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1361 1361 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1362 1362 parent2=xp2, pending=p)
1363 1363 self.changelog.finalize(trp)
1364 1364 # set the new commit is proper phase
1365 1365 targetphase = phases.newcommitphase(self.ui)
1366 1366 if targetphase:
1367 1367 # retract boundary do not alter parent changeset.
1368 1368 # if a parent have higher the resulting phase will
1369 1369 # be compliant anyway
1370 1370 #
1371 1371 # if minimal phase was 0 we don't need to retract anything
1372 1372 phases.retractboundary(self, targetphase, [n])
1373 1373 tr.close()
1374 1374 branchmap.updatecache(self.filtered('served'))
1375 1375 return n
1376 1376 finally:
1377 1377 if tr:
1378 1378 tr.release()
1379 1379 lock.release()
1380 1380
1381 1381 @unfilteredmethod
1382 1382 def destroying(self):
1383 1383 '''Inform the repository that nodes are about to be destroyed.
1384 1384 Intended for use by strip and rollback, so there's a common
1385 1385 place for anything that has to be done before destroying history.
1386 1386
1387 1387 This is mostly useful for saving state that is in memory and waiting
1388 1388 to be flushed when the current lock is released. Because a call to
1389 1389 destroyed is imminent, the repo will be invalidated causing those
1390 1390 changes to stay in memory (waiting for the next unlock), or vanish
1391 1391 completely.
1392 1392 '''
1393 1393 # When using the same lock to commit and strip, the phasecache is left
1394 1394 # dirty after committing. Then when we strip, the repo is invalidated,
1395 1395 # causing those changes to disappear.
1396 1396 if '_phasecache' in vars(self):
1397 1397 self._phasecache.write()
1398 1398
1399 1399 @unfilteredmethod
1400 1400 def destroyed(self):
1401 1401 '''Inform the repository that nodes have been destroyed.
1402 1402 Intended for use by strip and rollback, so there's a common
1403 1403 place for anything that has to be done after destroying history.
1404 1404 '''
1405 1405 # When one tries to:
1406 1406 # 1) destroy nodes thus calling this method (e.g. strip)
1407 1407 # 2) use phasecache somewhere (e.g. commit)
1408 1408 #
1409 1409 # then 2) will fail because the phasecache contains nodes that were
1410 1410 # removed. We can either remove phasecache from the filecache,
1411 1411 # causing it to reload next time it is accessed, or simply filter
1412 1412 # the removed nodes now and write the updated cache.
1413 1413 self._phasecache.filterunknown(self)
1414 1414 self._phasecache.write()
1415 1415
1416 1416 # update the 'served' branch cache to help read only server process
1417 1417 # Thanks to branchcache collaboration this is done from the nearest
1418 1418 # filtered subset and it is expected to be fast.
1419 1419 branchmap.updatecache(self.filtered('served'))
1420 1420
1421 1421 # Ensure the persistent tag cache is updated. Doing it now
1422 1422 # means that the tag cache only has to worry about destroyed
1423 1423 # heads immediately after a strip/rollback. That in turn
1424 1424 # guarantees that "cachetip == currenttip" (comparing both rev
1425 1425 # and node) always means no nodes have been added or destroyed.
1426 1426
1427 1427 # XXX this is suboptimal when qrefresh'ing: we strip the current
1428 1428 # head, refresh the tag cache, then immediately add a new head.
1429 1429 # But I think doing it this way is necessary for the "instant
1430 1430 # tag cache retrieval" case to work.
1431 1431 self.invalidate()
1432 1432
1433 1433 def walk(self, match, node=None):
1434 1434 '''
1435 1435 walk recursively through the directory tree or a given
1436 1436 changeset, finding all files matched by the match
1437 1437 function
1438 1438 '''
1439 1439 return self[node].walk(match)
1440 1440
1441 1441 def status(self, node1='.', node2=None, match=None,
1442 1442 ignored=False, clean=False, unknown=False,
1443 1443 listsubrepos=False):
1444 1444 """return status of files between two nodes or node and working
1445 1445 directory.
1446 1446
1447 1447 If node1 is None, use the first dirstate parent instead.
1448 1448 If node2 is None, compare node1 with working directory.
1449 1449 """
1450 1450
1451 1451 def mfmatches(ctx):
1452 1452 mf = ctx.manifest().copy()
1453 1453 if match.always():
1454 1454 return mf
1455 1455 for fn in mf.keys():
1456 1456 if not match(fn):
1457 1457 del mf[fn]
1458 1458 return mf
1459 1459
1460 1460 if isinstance(node1, context.changectx):
1461 1461 ctx1 = node1
1462 1462 else:
1463 1463 ctx1 = self[node1]
1464 1464 if isinstance(node2, context.changectx):
1465 1465 ctx2 = node2
1466 1466 else:
1467 1467 ctx2 = self[node2]
1468 1468
1469 1469 working = ctx2.rev() is None
1470 1470 parentworking = working and ctx1 == self['.']
1471 1471 match = match or matchmod.always(self.root, self.getcwd())
1472 1472 listignored, listclean, listunknown = ignored, clean, unknown
1473 1473
1474 1474 # load earliest manifest first for caching reasons
1475 1475 if not working and ctx2.rev() < ctx1.rev():
1476 1476 ctx2.manifest()
1477 1477
1478 1478 if not parentworking:
1479 1479 def bad(f, msg):
1480 1480 # 'f' may be a directory pattern from 'match.files()',
1481 1481 # so 'f not in ctx1' is not enough
1482 1482 if f not in ctx1 and f not in ctx1.dirs():
1483 1483 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1484 1484 match.bad = bad
1485 1485
1486 1486 if working: # we need to scan the working dir
1487 1487 subrepos = []
1488 1488 if '.hgsub' in self.dirstate:
1489 1489 subrepos = sorted(ctx2.substate)
1490 1490 s = self.dirstate.status(match, subrepos, listignored,
1491 1491 listclean, listunknown)
1492 1492 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1493 1493
1494 1494 # check for any possibly clean files
1495 1495 if parentworking and cmp:
1496 1496 fixup = []
1497 1497 # do a full compare of any files that might have changed
1498 1498 for f in sorted(cmp):
1499 1499 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1500 1500 or ctx1[f].cmp(ctx2[f])):
1501 1501 modified.append(f)
1502 1502 else:
1503 1503 fixup.append(f)
1504 1504
1505 1505 # update dirstate for files that are actually clean
1506 1506 if fixup:
1507 1507 if listclean:
1508 1508 clean += fixup
1509 1509
1510 1510 try:
1511 1511 # updating the dirstate is optional
1512 1512 # so we don't wait on the lock
1513 1513 wlock = self.wlock(False)
1514 1514 try:
1515 1515 for f in fixup:
1516 1516 self.dirstate.normal(f)
1517 1517 finally:
1518 1518 wlock.release()
1519 1519 except error.LockError:
1520 1520 pass
1521 1521
1522 1522 if not parentworking:
1523 1523 mf1 = mfmatches(ctx1)
1524 1524 if working:
1525 1525 # we are comparing working dir against non-parent
1526 1526 # generate a pseudo-manifest for the working dir
1527 1527 mf2 = mfmatches(self['.'])
1528 1528 for f in cmp + modified + added:
1529 1529 mf2[f] = None
1530 1530 mf2.set(f, ctx2.flags(f))
1531 1531 for f in removed:
1532 1532 if f in mf2:
1533 1533 del mf2[f]
1534 1534 else:
1535 1535 # we are comparing two revisions
1536 1536 deleted, unknown, ignored = [], [], []
1537 1537 mf2 = mfmatches(ctx2)
1538 1538
1539 1539 modified, added, clean = [], [], []
1540 1540 withflags = mf1.withflags() | mf2.withflags()
1541 1541 for fn, mf2node in mf2.iteritems():
1542 1542 if fn in mf1:
1543 1543 if (fn not in deleted and
1544 1544 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1545 1545 (mf1[fn] != mf2node and
1546 1546 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1547 1547 modified.append(fn)
1548 1548 elif listclean:
1549 1549 clean.append(fn)
1550 1550 del mf1[fn]
1551 1551 elif fn not in deleted:
1552 1552 added.append(fn)
1553 1553 removed = mf1.keys()
1554 1554
1555 1555 if working and modified and not self.dirstate._checklink:
1556 1556 # Symlink placeholders may get non-symlink-like contents
1557 1557 # via user error or dereferencing by NFS or Samba servers,
1558 1558 # so we filter out any placeholders that don't look like a
1559 1559 # symlink
1560 1560 sane = []
1561 1561 for f in modified:
1562 1562 if ctx2.flags(f) == 'l':
1563 1563 d = ctx2[f].data()
1564 1564 if len(d) >= 1024 or '\n' in d or util.binary(d):
1565 1565 self.ui.debug('ignoring suspect symlink placeholder'
1566 1566 ' "%s"\n' % f)
1567 1567 continue
1568 1568 sane.append(f)
1569 1569 modified = sane
1570 1570
1571 1571 r = modified, added, removed, deleted, unknown, ignored, clean
1572 1572
1573 1573 if listsubrepos:
1574 1574 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1575 1575 if working:
1576 1576 rev2 = None
1577 1577 else:
1578 1578 rev2 = ctx2.substate[subpath][1]
1579 1579 try:
1580 1580 submatch = matchmod.narrowmatcher(subpath, match)
1581 1581 s = sub.status(rev2, match=submatch, ignored=listignored,
1582 1582 clean=listclean, unknown=listunknown,
1583 1583 listsubrepos=True)
1584 1584 for rfiles, sfiles in zip(r, s):
1585 1585 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1586 1586 except error.LookupError:
1587 1587 self.ui.status(_("skipping missing subrepository: %s\n")
1588 1588 % subpath)
1589 1589
1590 1590 for l in r:
1591 1591 l.sort()
1592 1592 return r
1593 1593
1594 1594 def heads(self, start=None):
1595 1595 heads = self.changelog.heads(start)
1596 1596 # sort the output in rev descending order
1597 1597 return sorted(heads, key=self.changelog.rev, reverse=True)
1598 1598
1599 1599 def branchheads(self, branch=None, start=None, closed=False):
1600 1600 '''return a (possibly filtered) list of heads for the given branch
1601 1601
1602 1602 Heads are returned in topological order, from newest to oldest.
1603 1603 If branch is None, use the dirstate branch.
1604 1604 If start is not None, return only heads reachable from start.
1605 1605 If closed is True, return heads that are marked as closed as well.
1606 1606 '''
1607 1607 if branch is None:
1608 1608 branch = self[None].branch()
1609 1609 branches = self.branchmap()
1610 1610 if branch not in branches:
1611 1611 return []
1612 1612 # the cache returns heads ordered lowest to highest
1613 1613 bheads = list(reversed(branches[branch]))
1614 1614 if start is not None:
1615 1615 # filter out the heads that cannot be reached from startrev
1616 1616 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1617 1617 bheads = [h for h in bheads if h in fbheads]
1618 1618 if not closed:
1619 1619 bheads = [h for h in bheads if not self[h].closesbranch()]
1620 1620 return bheads
1621 1621
1622 1622 def branches(self, nodes):
1623 1623 if not nodes:
1624 1624 nodes = [self.changelog.tip()]
1625 1625 b = []
1626 1626 for n in nodes:
1627 1627 t = n
1628 1628 while True:
1629 1629 p = self.changelog.parents(n)
1630 1630 if p[1] != nullid or p[0] == nullid:
1631 1631 b.append((t, n, p[0], p[1]))
1632 1632 break
1633 1633 n = p[0]
1634 1634 return b
1635 1635
1636 1636 def between(self, pairs):
1637 1637 r = []
1638 1638
1639 1639 for top, bottom in pairs:
1640 1640 n, l, i = top, [], 0
1641 1641 f = 1
1642 1642
1643 1643 while n != bottom and n != nullid:
1644 1644 p = self.changelog.parents(n)[0]
1645 1645 if i == f:
1646 1646 l.append(n)
1647 1647 f = f * 2
1648 1648 n = p
1649 1649 i += 1
1650 1650
1651 1651 r.append(l)
1652 1652
1653 1653 return r
1654 1654
1655 1655 def pull(self, remote, heads=None, force=False):
1656 1656 # don't open transaction for nothing or you break future useful
1657 1657 # rollback call
1658 1658 tr = None
1659 1659 trname = 'pull\n' + util.hidepassword(remote.url())
1660 1660 lock = self.lock()
1661 1661 try:
1662 1662 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1663 1663 force=force)
1664 1664 common, fetch, rheads = tmp
1665 1665 if not fetch:
1666 1666 self.ui.status(_("no changes found\n"))
1667 1667 added = []
1668 1668 result = 0
1669 1669 else:
1670 1670 tr = self.transaction(trname)
1671 1671 if heads is None and list(common) == [nullid]:
1672 1672 self.ui.status(_("requesting all changes\n"))
1673 1673 elif heads is None and remote.capable('changegroupsubset'):
1674 1674 # issue1320, avoid a race if remote changed after discovery
1675 1675 heads = rheads
1676 1676
1677 1677 if remote.capable('getbundle'):
1678 1678 # TODO: get bundlecaps from remote
1679 1679 cg = remote.getbundle('pull', common=common,
1680 1680 heads=heads or rheads)
1681 1681 elif heads is None:
1682 1682 cg = remote.changegroup(fetch, 'pull')
1683 1683 elif not remote.capable('changegroupsubset'):
1684 1684 raise util.Abort(_("partial pull cannot be done because "
1685 1685 "other repository doesn't support "
1686 1686 "changegroupsubset."))
1687 1687 else:
1688 1688 cg = remote.changegroupsubset(fetch, heads, 'pull')
1689 1689 # we use unfiltered changelog here because hidden revision must
1690 1690 # be taken in account for phase synchronization. They may
1691 1691 # becomes public and becomes visible again.
1692 1692 cl = self.unfiltered().changelog
1693 1693 clstart = len(cl)
1694 1694 result = self.addchangegroup(cg, 'pull', remote.url())
1695 1695 clend = len(cl)
1696 1696 added = [cl.node(r) for r in xrange(clstart, clend)]
1697 1697
1698 1698 # compute target subset
1699 1699 if heads is None:
1700 1700 # We pulled every thing possible
1701 1701 # sync on everything common
1702 1702 subset = common + added
1703 1703 else:
1704 1704 # We pulled a specific subset
1705 1705 # sync on this subset
1706 1706 subset = heads
1707 1707
1708 1708 # Get remote phases data from remote
1709 1709 remotephases = remote.listkeys('phases')
1710 1710 publishing = bool(remotephases.get('publishing', False))
1711 1711 if remotephases and not publishing:
1712 1712 # remote is new and unpublishing
1713 1713 pheads, _dr = phases.analyzeremotephases(self, subset,
1714 1714 remotephases)
1715 1715 phases.advanceboundary(self, phases.public, pheads)
1716 1716 phases.advanceboundary(self, phases.draft, subset)
1717 1717 else:
1718 1718 # Remote is old or publishing all common changesets
1719 1719 # should be seen as public
1720 1720 phases.advanceboundary(self, phases.public, subset)
1721 1721
1722 1722 def gettransaction():
1723 1723 if tr is None:
1724 1724 return self.transaction(trname)
1725 1725 return tr
1726 1726
1727 1727 obstr = obsolete.syncpull(self, remote, gettransaction)
1728 1728 if obstr is not None:
1729 1729 tr = obstr
1730 1730
1731 1731 if tr is not None:
1732 1732 tr.close()
1733 1733 finally:
1734 1734 if tr is not None:
1735 1735 tr.release()
1736 1736 lock.release()
1737 1737
1738 1738 return result
1739 1739
1740 1740 def checkpush(self, force, revs):
1741 1741 """Extensions can override this function if additional checks have
1742 1742 to be performed before pushing, or call it if they override push
1743 1743 command.
1744 1744 """
1745 1745 pass
1746 1746
1747 1747 def push(self, remote, force=False, revs=None, newbranch=False):
1748 1748 '''Push outgoing changesets (limited by revs) from the current
1749 1749 repository to remote. Return an integer:
1750 1750 - None means nothing to push
1751 1751 - 0 means HTTP error
1752 1752 - 1 means we pushed and remote head count is unchanged *or*
1753 1753 we have outgoing changesets but refused to push
1754 1754 - other values as described by addchangegroup()
1755 1755 '''
1756 1756 # there are two ways to push to remote repo:
1757 1757 #
1758 1758 # addchangegroup assumes local user can lock remote
1759 1759 # repo (local filesystem, old ssh servers).
1760 1760 #
1761 1761 # unbundle assumes local user cannot lock remote repo (new ssh
1762 1762 # servers, http servers).
1763 1763
1764 1764 if not remote.canpush():
1765 1765 raise util.Abort(_("destination does not support push"))
1766 1766 unfi = self.unfiltered()
1767 1767 def localphasemove(nodes, phase=phases.public):
1768 1768 """move <nodes> to <phase> in the local source repo"""
1769 1769 if locallock is not None:
1770 1770 phases.advanceboundary(self, phase, nodes)
1771 1771 else:
1772 1772 # repo is not locked, do not change any phases!
1773 1773 # Informs the user that phases should have been moved when
1774 1774 # applicable.
1775 1775 actualmoves = [n for n in nodes if phase < self[n].phase()]
1776 1776 phasestr = phases.phasenames[phase]
1777 1777 if actualmoves:
1778 1778 self.ui.status(_('cannot lock source repo, skipping local'
1779 1779 ' %s phase update\n') % phasestr)
1780 1780 # get local lock as we might write phase data
1781 1781 locallock = None
1782 1782 try:
1783 1783 locallock = self.lock()
1784 1784 except IOError, err:
1785 1785 if err.errno != errno.EACCES:
1786 1786 raise
1787 1787 # source repo cannot be locked.
1788 1788 # We do not abort the push, but just disable the local phase
1789 1789 # synchronisation.
1790 1790 msg = 'cannot lock source repository: %s\n' % err
1791 1791 self.ui.debug(msg)
1792 1792 try:
1793 1793 self.checkpush(force, revs)
1794 1794 lock = None
1795 1795 unbundle = remote.capable('unbundle')
1796 1796 if not unbundle:
1797 1797 lock = remote.lock()
1798 1798 try:
1799 1799 # discovery
1800 1800 fci = discovery.findcommonincoming
1801 1801 commoninc = fci(unfi, remote, force=force)
1802 1802 common, inc, remoteheads = commoninc
1803 1803 fco = discovery.findcommonoutgoing
1804 1804 outgoing = fco(unfi, remote, onlyheads=revs,
1805 1805 commoninc=commoninc, force=force)
1806 1806
1807 1807
1808 1808 if not outgoing.missing:
1809 1809 # nothing to push
1810 1810 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1811 1811 ret = None
1812 1812 else:
1813 1813 # something to push
1814 1814 if not force:
1815 1815 # if self.obsstore == False --> no obsolete
1816 1816 # then, save the iteration
1817 1817 if unfi.obsstore:
1818 1818 # this message are here for 80 char limit reason
1819 1819 mso = _("push includes obsolete changeset: %s!")
1820 1820 mst = "push includes %s changeset: %s!"
1821 1821 # plain versions for i18n tool to detect them
1822 1822 _("push includes unstable changeset: %s!")
1823 1823 _("push includes bumped changeset: %s!")
1824 1824 _("push includes divergent changeset: %s!")
1825 1825 # If we are to push if there is at least one
1826 1826 # obsolete or unstable changeset in missing, at
1827 1827 # least one of the missinghead will be obsolete or
1828 1828 # unstable. So checking heads only is ok
1829 1829 for node in outgoing.missingheads:
1830 1830 ctx = unfi[node]
1831 1831 if ctx.obsolete():
1832 1832 raise util.Abort(mso % ctx)
1833 1833 elif ctx.troubled():
1834 1834 raise util.Abort(_(mst)
1835 1835 % (ctx.troubles()[0],
1836 1836 ctx))
1837 1837 discovery.checkheads(unfi, remote, outgoing,
1838 1838 remoteheads, newbranch,
1839 1839 bool(inc))
1840 1840
1841 1841 # TODO: get bundlecaps from remote
1842 1842 bundlecaps = None
1843 1843 # create a changegroup from local
1844 1844 if revs is None and not outgoing.excluded:
1845 1845 # push everything,
1846 1846 # use the fast path, no race possible on push
1847 1847 bundler = changegroup.bundle10(self, bundlecaps)
1848 1848 cg = self._changegroupsubset(outgoing,
1849 1849 bundler,
1850 1850 'push',
1851 1851 fastpath=True)
1852 1852 else:
1853 1853 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1854 1854
1855 1855 # apply changegroup to remote
1856 1856 if unbundle:
1857 1857 # local repo finds heads on server, finds out what
1858 1858 # revs it must push. once revs transferred, if server
1859 1859 # finds it has different heads (someone else won
1860 1860 # commit/push race), server aborts.
1861 1861 if force:
1862 1862 remoteheads = ['force']
1863 1863 # ssh: return remote's addchangegroup()
1864 1864 # http: return remote's addchangegroup() or 0 for error
1865 1865 ret = remote.unbundle(cg, remoteheads, 'push')
1866 1866 else:
1867 1867 # we return an integer indicating remote head count
1868 1868 # change
1869 1869 ret = remote.addchangegroup(cg, 'push', self.url())
1870 1870
1871 1871 if ret:
1872 1872 # push succeed, synchronize target of the push
1873 1873 cheads = outgoing.missingheads
1874 1874 elif revs is None:
1875 1875 # All out push fails. synchronize all common
1876 1876 cheads = outgoing.commonheads
1877 1877 else:
1878 1878 # I want cheads = heads(::missingheads and ::commonheads)
1879 1879 # (missingheads is revs with secret changeset filtered out)
1880 1880 #
1881 1881 # This can be expressed as:
1882 1882 # cheads = ( (missingheads and ::commonheads)
1883 1883 # + (commonheads and ::missingheads))"
1884 1884 # )
1885 1885 #
1886 1886 # while trying to push we already computed the following:
1887 1887 # common = (::commonheads)
1888 1888 # missing = ((commonheads::missingheads) - commonheads)
1889 1889 #
1890 1890 # We can pick:
1891 1891 # * missingheads part of common (::commonheads)
1892 1892 common = set(outgoing.common)
1893 1893 cheads = [node for node in revs if node in common]
1894 1894 # and
1895 1895 # * commonheads parents on missing
1896 1896 revset = unfi.set('%ln and parents(roots(%ln))',
1897 1897 outgoing.commonheads,
1898 1898 outgoing.missing)
1899 1899 cheads.extend(c.node() for c in revset)
1900 1900 # even when we don't push, exchanging phase data is useful
1901 1901 remotephases = remote.listkeys('phases')
1902 1902 if (self.ui.configbool('ui', '_usedassubrepo', False)
1903 1903 and remotephases # server supports phases
1904 1904 and ret is None # nothing was pushed
1905 1905 and remotephases.get('publishing', False)):
1906 1906 # When:
1907 1907 # - this is a subrepo push
1908 1908 # - and remote support phase
1909 1909 # - and no changeset was pushed
1910 1910 # - and remote is publishing
1911 1911 # We may be in issue 3871 case!
1912 1912 # We drop the possible phase synchronisation done by
1913 1913 # courtesy to publish changesets possibly locally draft
1914 1914 # on the remote.
1915 1915 remotephases = {'publishing': 'True'}
1916 1916 if not remotephases: # old server or public only repo
1917 1917 localphasemove(cheads)
1918 1918 # don't push any phase data as there is nothing to push
1919 1919 else:
1920 1920 ana = phases.analyzeremotephases(self, cheads, remotephases)
1921 1921 pheads, droots = ana
1922 1922 ### Apply remote phase on local
1923 1923 if remotephases.get('publishing', False):
1924 1924 localphasemove(cheads)
1925 1925 else: # publish = False
1926 1926 localphasemove(pheads)
1927 1927 localphasemove(cheads, phases.draft)
1928 1928 ### Apply local phase on remote
1929 1929
1930 1930 # Get the list of all revs draft on remote by public here.
1931 1931 # XXX Beware that revset break if droots is not strictly
1932 1932 # XXX root we may want to ensure it is but it is costly
1933 1933 outdated = unfi.set('heads((%ln::%ln) and public())',
1934 1934 droots, cheads)
1935 1935 for newremotehead in outdated:
1936 1936 r = remote.pushkey('phases',
1937 1937 newremotehead.hex(),
1938 1938 str(phases.draft),
1939 1939 str(phases.public))
1940 1940 if not r:
1941 1941 self.ui.warn(_('updating %s to public failed!\n')
1942 1942 % newremotehead)
1943 1943 self.ui.debug('try to push obsolete markers to remote\n')
1944 1944 obsolete.syncpush(self, remote)
1945 1945 finally:
1946 1946 if lock is not None:
1947 1947 lock.release()
1948 1948 finally:
1949 1949 if locallock is not None:
1950 1950 locallock.release()
1951 1951
1952 1952 self.ui.debug("checking for updated bookmarks\n")
1953 1953 rb = remote.listkeys('bookmarks')
1954 1954 for k in rb.keys():
1955 1955 if k in unfi._bookmarks:
1956 1956 nr, nl = rb[k], hex(self._bookmarks[k])
1957 1957 if nr in unfi:
1958 1958 cr = unfi[nr]
1959 1959 cl = unfi[nl]
1960 1960 if bookmarks.validdest(unfi, cr, cl):
1961 1961 r = remote.pushkey('bookmarks', k, nr, nl)
1962 1962 if r:
1963 1963 self.ui.status(_("updating bookmark %s\n") % k)
1964 1964 else:
1965 1965 self.ui.warn(_('updating bookmark %s'
1966 1966 ' failed!\n') % k)
1967 1967
1968 1968 return ret
1969 1969
1970 1970 def changegroupinfo(self, nodes, source):
1971 1971 if self.ui.verbose or source == 'bundle':
1972 1972 self.ui.status(_("%d changesets found\n") % len(nodes))
1973 1973 if self.ui.debugflag:
1974 1974 self.ui.debug("list of changesets:\n")
1975 1975 for node in nodes:
1976 1976 self.ui.debug("%s\n" % hex(node))
1977 1977
1978 1978 def changegroupsubset(self, bases, heads, source):
1979 1979 """Compute a changegroup consisting of all the nodes that are
1980 1980 descendants of any of the bases and ancestors of any of the heads.
1981 1981 Return a chunkbuffer object whose read() method will return
1982 1982 successive changegroup chunks.
1983 1983
1984 1984 It is fairly complex as determining which filenodes and which
1985 1985 manifest nodes need to be included for the changeset to be complete
1986 1986 is non-trivial.
1987 1987
1988 1988 Another wrinkle is doing the reverse, figuring out which changeset in
1989 1989 the changegroup a particular filenode or manifestnode belongs to.
1990 1990 """
1991 1991 cl = self.changelog
1992 1992 if not bases:
1993 1993 bases = [nullid]
1994 1994 # TODO: remove call to nodesbetween.
1995 1995 csets, bases, heads = cl.nodesbetween(bases, heads)
1996 1996 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
1997 1997 outgoing = discovery.outgoing(cl, bases, heads)
1998 1998 bundler = changegroup.bundle10(self)
1999 1999 return self._changegroupsubset(outgoing, bundler, source)
2000 2000
2001 2001 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2002 2002 """Like getbundle, but taking a discovery.outgoing as an argument.
2003 2003
2004 2004 This is only implemented for local repos and reuses potentially
2005 2005 precomputed sets in outgoing."""
2006 2006 if not outgoing.missing:
2007 2007 return None
2008 2008 bundler = changegroup.bundle10(self, bundlecaps)
2009 2009 return self._changegroupsubset(outgoing, bundler, source)
2010 2010
2011 2011 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2012 2012 """Like changegroupsubset, but returns the set difference between the
2013 2013 ancestors of heads and the ancestors common.
2014 2014
2015 2015 If heads is None, use the local heads. If common is None, use [nullid].
2016 2016
2017 2017 The nodes in common might not all be known locally due to the way the
2018 2018 current discovery protocol works.
2019 2019 """
2020 2020 cl = self.changelog
2021 2021 if common:
2022 2022 hasnode = cl.hasnode
2023 2023 common = [n for n in common if hasnode(n)]
2024 2024 else:
2025 2025 common = [nullid]
2026 2026 if not heads:
2027 2027 heads = cl.heads()
2028 2028 return self.getlocalbundle(source,
2029 2029 discovery.outgoing(cl, common, heads),
2030 2030 bundlecaps=bundlecaps)
2031 2031
2032 2032 @unfilteredmethod
2033 2033 def _changegroupsubset(self, outgoing, bundler, source,
2034 2034 fastpath=False):
2035 2035 commonrevs = outgoing.common
2036 2036 csets = outgoing.missing
2037 2037 heads = outgoing.missingheads
2038 cl = bundler._changelog
2039 mf = bundler._manifest
2040 mfs = {} # needed manifests
2041 fnodes = {} # needed file nodes
2042 changedfiles = set()
2043 fstate = ['', {}]
2044
2045 2038 # We go through the fast path if we get told to, or if all (unfiltered
2046 2039 # heads have been requested (since we then know there all linkrevs will
2047 2040 # be pulled by the client).
2048 2041 heads.sort()
2049 2042 fastpathlinkrev = fastpath or (
2050 2043 self.filtername is None and heads == sorted(self.heads()))
2051 2044
2052 2045 self.hook('preoutgoing', throw=True, source=source)
2053 2046 self.changegroupinfo(csets, source)
2054
2055 # filter any nodes that claim to be part of the known set
2056 def prune(revlog, missing):
2057 rr, rl = revlog.rev, revlog.linkrev
2058 return [n for n in missing
2059 if rl(rr(n)) not in commonrevs]
2060
2061 progress = self.ui.progress
2062 _bundling = _('bundling')
2063 _changesets = _('changesets')
2064 _manifests = _('manifests')
2065 _files = _('files')
2066
2067 def lookup(revlog, x):
2068 count = bundler.count
2069 if revlog == cl:
2070 c = cl.read(x)
2071 changedfiles.update(c[3])
2072 mfs.setdefault(c[0], x)
2073 count[0] += 1
2074 progress(_bundling, count[0],
2075 unit=_changesets, total=count[1])
2076 return x
2077 elif revlog == mf:
2078 clnode = mfs[x]
2079 if not fastpathlinkrev:
2080 mdata = mf.readfast(x)
2081 for f, n in mdata.iteritems():
2082 if f in changedfiles:
2083 fnodes[f].setdefault(n, clnode)
2084 count[0] += 1
2085 progress(_bundling, count[0],
2086 unit=_manifests, total=count[1])
2087 return clnode
2088 else:
2089 progress(_bundling, count[0], item=fstate[0],
2090 unit=_files, total=count[1])
2091 return fstate[1][x]
2092
2093 bundler.start(lookup)
2094
2095 def getmfnodes():
2096 for f in changedfiles:
2097 fnodes[f] = {}
2098 bundler.count[:] = [0, len(mfs)]
2099 return prune(mf, mfs)
2100 def getfiles():
2101 mfs.clear()
2102 return changedfiles
2103 def getfilenodes(fname, filerevlog):
2104 if fastpathlinkrev:
2105 ln, llr = filerevlog.node, filerevlog.linkrev
2106 def genfilenodes():
2107 for r in filerevlog:
2108 linkrev = llr(r)
2109 if linkrev not in commonrevs:
2110 yield filerevlog.node(r), cl.node(linkrev)
2111 fnodes[fname] = dict(genfilenodes())
2112 fstate[0] = fname
2113 fstate[1] = fnodes.pop(fname, {})
2114 return prune(filerevlog, fstate[1])
2115
2116 gengroup = bundler.generate(csets, getmfnodes, getfiles, getfilenodes,
2117 source)
2047 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2118 2048 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2119 2049
2120 2050 def changegroup(self, basenodes, source):
2121 2051 # to avoid a race we use changegroupsubset() (issue1320)
2122 2052 return self.changegroupsubset(basenodes, self.heads(), source)
2123 2053
2124 2054 @unfilteredmethod
2125 2055 def addchangegroup(self, source, srctype, url, emptyok=False):
2126 2056 """Add the changegroup returned by source.read() to this repo.
2127 2057 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2128 2058 the URL of the repo where this changegroup is coming from.
2129 2059
2130 2060 Return an integer summarizing the change to this repo:
2131 2061 - nothing changed or no source: 0
2132 2062 - more heads than before: 1+added heads (2..n)
2133 2063 - fewer heads than before: -1-removed heads (-2..-n)
2134 2064 - number of heads stays the same: 1
2135 2065 """
2136 2066 def csmap(x):
2137 2067 self.ui.debug("add changeset %s\n" % short(x))
2138 2068 return len(cl)
2139 2069
2140 2070 def revmap(x):
2141 2071 return cl.rev(x)
2142 2072
2143 2073 if not source:
2144 2074 return 0
2145 2075
2146 2076 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2147 2077
2148 2078 changesets = files = revisions = 0
2149 2079 efiles = set()
2150 2080
2151 2081 # write changelog data to temp files so concurrent readers will not see
2152 2082 # inconsistent view
2153 2083 cl = self.changelog
2154 2084 cl.delayupdate()
2155 2085 oldheads = cl.heads()
2156 2086
2157 2087 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2158 2088 try:
2159 2089 trp = weakref.proxy(tr)
2160 2090 # pull off the changeset group
2161 2091 self.ui.status(_("adding changesets\n"))
2162 2092 clstart = len(cl)
2163 2093 class prog(object):
2164 2094 step = _('changesets')
2165 2095 count = 1
2166 2096 ui = self.ui
2167 2097 total = None
2168 2098 def __call__(self):
2169 2099 self.ui.progress(self.step, self.count, unit=_('chunks'),
2170 2100 total=self.total)
2171 2101 self.count += 1
2172 2102 pr = prog()
2173 2103 source.callback = pr
2174 2104
2175 2105 source.changelogheader()
2176 2106 srccontent = cl.addgroup(source, csmap, trp)
2177 2107 if not (srccontent or emptyok):
2178 2108 raise util.Abort(_("received changelog group is empty"))
2179 2109 clend = len(cl)
2180 2110 changesets = clend - clstart
2181 2111 for c in xrange(clstart, clend):
2182 2112 efiles.update(self[c].files())
2183 2113 efiles = len(efiles)
2184 2114 self.ui.progress(_('changesets'), None)
2185 2115
2186 2116 # pull off the manifest group
2187 2117 self.ui.status(_("adding manifests\n"))
2188 2118 pr.step = _('manifests')
2189 2119 pr.count = 1
2190 2120 pr.total = changesets # manifests <= changesets
2191 2121 # no need to check for empty manifest group here:
2192 2122 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2193 2123 # no new manifest will be created and the manifest group will
2194 2124 # be empty during the pull
2195 2125 source.manifestheader()
2196 2126 self.manifest.addgroup(source, revmap, trp)
2197 2127 self.ui.progress(_('manifests'), None)
2198 2128
2199 2129 needfiles = {}
2200 2130 if self.ui.configbool('server', 'validate', default=False):
2201 2131 # validate incoming csets have their manifests
2202 2132 for cset in xrange(clstart, clend):
2203 2133 mfest = self.changelog.read(self.changelog.node(cset))[0]
2204 2134 mfest = self.manifest.readdelta(mfest)
2205 2135 # store file nodes we must see
2206 2136 for f, n in mfest.iteritems():
2207 2137 needfiles.setdefault(f, set()).add(n)
2208 2138
2209 2139 # process the files
2210 2140 self.ui.status(_("adding file changes\n"))
2211 2141 pr.step = _('files')
2212 2142 pr.count = 1
2213 2143 pr.total = efiles
2214 2144 source.callback = None
2215 2145
2216 2146 while True:
2217 2147 chunkdata = source.filelogheader()
2218 2148 if not chunkdata:
2219 2149 break
2220 2150 f = chunkdata["filename"]
2221 2151 self.ui.debug("adding %s revisions\n" % f)
2222 2152 pr()
2223 2153 fl = self.file(f)
2224 2154 o = len(fl)
2225 2155 if not fl.addgroup(source, revmap, trp):
2226 2156 raise util.Abort(_("received file revlog group is empty"))
2227 2157 revisions += len(fl) - o
2228 2158 files += 1
2229 2159 if f in needfiles:
2230 2160 needs = needfiles[f]
2231 2161 for new in xrange(o, len(fl)):
2232 2162 n = fl.node(new)
2233 2163 if n in needs:
2234 2164 needs.remove(n)
2235 2165 else:
2236 2166 raise util.Abort(
2237 2167 _("received spurious file revlog entry"))
2238 2168 if not needs:
2239 2169 del needfiles[f]
2240 2170 self.ui.progress(_('files'), None)
2241 2171
2242 2172 for f, needs in needfiles.iteritems():
2243 2173 fl = self.file(f)
2244 2174 for n in needs:
2245 2175 try:
2246 2176 fl.rev(n)
2247 2177 except error.LookupError:
2248 2178 raise util.Abort(
2249 2179 _('missing file data for %s:%s - run hg verify') %
2250 2180 (f, hex(n)))
2251 2181
2252 2182 dh = 0
2253 2183 if oldheads:
2254 2184 heads = cl.heads()
2255 2185 dh = len(heads) - len(oldheads)
2256 2186 for h in heads:
2257 2187 if h not in oldheads and self[h].closesbranch():
2258 2188 dh -= 1
2259 2189 htext = ""
2260 2190 if dh:
2261 2191 htext = _(" (%+d heads)") % dh
2262 2192
2263 2193 self.ui.status(_("added %d changesets"
2264 2194 " with %d changes to %d files%s\n")
2265 2195 % (changesets, revisions, files, htext))
2266 2196 self.invalidatevolatilesets()
2267 2197
2268 2198 if changesets > 0:
2269 2199 p = lambda: cl.writepending() and self.root or ""
2270 2200 self.hook('pretxnchangegroup', throw=True,
2271 2201 node=hex(cl.node(clstart)), source=srctype,
2272 2202 url=url, pending=p)
2273 2203
2274 2204 added = [cl.node(r) for r in xrange(clstart, clend)]
2275 2205 publishing = self.ui.configbool('phases', 'publish', True)
2276 2206 if srctype == 'push':
2277 2207 # Old server can not push the boundary themself.
2278 2208 # New server won't push the boundary if changeset already
2279 2209 # existed locally as secrete
2280 2210 #
2281 2211 # We should not use added here but the list of all change in
2282 2212 # the bundle
2283 2213 if publishing:
2284 2214 phases.advanceboundary(self, phases.public, srccontent)
2285 2215 else:
2286 2216 phases.advanceboundary(self, phases.draft, srccontent)
2287 2217 phases.retractboundary(self, phases.draft, added)
2288 2218 elif srctype != 'strip':
2289 2219 # publishing only alter behavior during push
2290 2220 #
2291 2221 # strip should not touch boundary at all
2292 2222 phases.retractboundary(self, phases.draft, added)
2293 2223
2294 2224 # make changelog see real files again
2295 2225 cl.finalize(trp)
2296 2226
2297 2227 tr.close()
2298 2228
2299 2229 if changesets > 0:
2300 2230 if srctype != 'strip':
2301 2231 # During strip, branchcache is invalid but coming call to
2302 2232 # `destroyed` will repair it.
2303 2233 # In other case we can safely update cache on disk.
2304 2234 branchmap.updatecache(self.filtered('served'))
2305 2235 def runhooks():
2306 2236 # forcefully update the on-disk branch cache
2307 2237 self.ui.debug("updating the branch cache\n")
2308 2238 self.hook("changegroup", node=hex(cl.node(clstart)),
2309 2239 source=srctype, url=url)
2310 2240
2311 2241 for n in added:
2312 2242 self.hook("incoming", node=hex(n), source=srctype,
2313 2243 url=url)
2314 2244
2315 2245 newheads = [h for h in self.heads() if h not in oldheads]
2316 2246 self.ui.log("incoming",
2317 2247 "%s incoming changes - new heads: %s\n",
2318 2248 len(added),
2319 2249 ', '.join([hex(c[:6]) for c in newheads]))
2320 2250 self._afterlock(runhooks)
2321 2251
2322 2252 finally:
2323 2253 tr.release()
2324 2254 # never return 0 here:
2325 2255 if dh < 0:
2326 2256 return dh - 1
2327 2257 else:
2328 2258 return dh + 1
2329 2259
2330 2260 def stream_in(self, remote, requirements):
2331 2261 lock = self.lock()
2332 2262 try:
2333 2263 # Save remote branchmap. We will use it later
2334 2264 # to speed up branchcache creation
2335 2265 rbranchmap = None
2336 2266 if remote.capable("branchmap"):
2337 2267 rbranchmap = remote.branchmap()
2338 2268
2339 2269 fp = remote.stream_out()
2340 2270 l = fp.readline()
2341 2271 try:
2342 2272 resp = int(l)
2343 2273 except ValueError:
2344 2274 raise error.ResponseError(
2345 2275 _('unexpected response from remote server:'), l)
2346 2276 if resp == 1:
2347 2277 raise util.Abort(_('operation forbidden by server'))
2348 2278 elif resp == 2:
2349 2279 raise util.Abort(_('locking the remote repository failed'))
2350 2280 elif resp != 0:
2351 2281 raise util.Abort(_('the server sent an unknown error code'))
2352 2282 self.ui.status(_('streaming all changes\n'))
2353 2283 l = fp.readline()
2354 2284 try:
2355 2285 total_files, total_bytes = map(int, l.split(' ', 1))
2356 2286 except (ValueError, TypeError):
2357 2287 raise error.ResponseError(
2358 2288 _('unexpected response from remote server:'), l)
2359 2289 self.ui.status(_('%d files to transfer, %s of data\n') %
2360 2290 (total_files, util.bytecount(total_bytes)))
2361 2291 handled_bytes = 0
2362 2292 self.ui.progress(_('clone'), 0, total=total_bytes)
2363 2293 start = time.time()
2364 2294 for i in xrange(total_files):
2365 2295 # XXX doesn't support '\n' or '\r' in filenames
2366 2296 l = fp.readline()
2367 2297 try:
2368 2298 name, size = l.split('\0', 1)
2369 2299 size = int(size)
2370 2300 except (ValueError, TypeError):
2371 2301 raise error.ResponseError(
2372 2302 _('unexpected response from remote server:'), l)
2373 2303 if self.ui.debugflag:
2374 2304 self.ui.debug('adding %s (%s)\n' %
2375 2305 (name, util.bytecount(size)))
2376 2306 # for backwards compat, name was partially encoded
2377 2307 ofp = self.sopener(store.decodedir(name), 'w')
2378 2308 for chunk in util.filechunkiter(fp, limit=size):
2379 2309 handled_bytes += len(chunk)
2380 2310 self.ui.progress(_('clone'), handled_bytes,
2381 2311 total=total_bytes)
2382 2312 ofp.write(chunk)
2383 2313 ofp.close()
2384 2314 elapsed = time.time() - start
2385 2315 if elapsed <= 0:
2386 2316 elapsed = 0.001
2387 2317 self.ui.progress(_('clone'), None)
2388 2318 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2389 2319 (util.bytecount(total_bytes), elapsed,
2390 2320 util.bytecount(total_bytes / elapsed)))
2391 2321
2392 2322 # new requirements = old non-format requirements +
2393 2323 # new format-related
2394 2324 # requirements from the streamed-in repository
2395 2325 requirements.update(set(self.requirements) - self.supportedformats)
2396 2326 self._applyrequirements(requirements)
2397 2327 self._writerequirements()
2398 2328
2399 2329 if rbranchmap:
2400 2330 rbheads = []
2401 2331 for bheads in rbranchmap.itervalues():
2402 2332 rbheads.extend(bheads)
2403 2333
2404 2334 if rbheads:
2405 2335 rtiprev = max((int(self.changelog.rev(node))
2406 2336 for node in rbheads))
2407 2337 cache = branchmap.branchcache(rbranchmap,
2408 2338 self[rtiprev].node(),
2409 2339 rtiprev)
2410 2340 # Try to stick it as low as possible
2411 2341 # filter above served are unlikely to be fetch from a clone
2412 2342 for candidate in ('base', 'immutable', 'served'):
2413 2343 rview = self.filtered(candidate)
2414 2344 if cache.validfor(rview):
2415 2345 self._branchcaches[candidate] = cache
2416 2346 cache.write(rview)
2417 2347 break
2418 2348 self.invalidate()
2419 2349 return len(self.heads()) + 1
2420 2350 finally:
2421 2351 lock.release()
2422 2352
2423 2353 def clone(self, remote, heads=[], stream=False):
2424 2354 '''clone remote repository.
2425 2355
2426 2356 keyword arguments:
2427 2357 heads: list of revs to clone (forces use of pull)
2428 2358 stream: use streaming clone if possible'''
2429 2359
2430 2360 # now, all clients that can request uncompressed clones can
2431 2361 # read repo formats supported by all servers that can serve
2432 2362 # them.
2433 2363
2434 2364 # if revlog format changes, client will have to check version
2435 2365 # and format flags on "stream" capability, and use
2436 2366 # uncompressed only if compatible.
2437 2367
2438 2368 if not stream:
2439 2369 # if the server explicitly prefers to stream (for fast LANs)
2440 2370 stream = remote.capable('stream-preferred')
2441 2371
2442 2372 if stream and not heads:
2443 2373 # 'stream' means remote revlog format is revlogv1 only
2444 2374 if remote.capable('stream'):
2445 2375 return self.stream_in(remote, set(('revlogv1',)))
2446 2376 # otherwise, 'streamreqs' contains the remote revlog format
2447 2377 streamreqs = remote.capable('streamreqs')
2448 2378 if streamreqs:
2449 2379 streamreqs = set(streamreqs.split(','))
2450 2380 # if we support it, stream in and adjust our requirements
2451 2381 if not streamreqs - self.supportedformats:
2452 2382 return self.stream_in(remote, streamreqs)
2453 2383 return self.pull(remote, heads)
2454 2384
2455 2385 def pushkey(self, namespace, key, old, new):
2456 2386 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2457 2387 old=old, new=new)
2458 2388 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2459 2389 ret = pushkey.push(self, namespace, key, old, new)
2460 2390 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2461 2391 ret=ret)
2462 2392 return ret
2463 2393
2464 2394 def listkeys(self, namespace):
2465 2395 self.hook('prelistkeys', throw=True, namespace=namespace)
2466 2396 self.ui.debug('listing keys for "%s"\n' % namespace)
2467 2397 values = pushkey.list(self, namespace)
2468 2398 self.hook('listkeys', namespace=namespace, values=values)
2469 2399 return values
2470 2400
2471 2401 def debugwireargs(self, one, two, three=None, four=None, five=None):
2472 2402 '''used to test argument passing over the wire'''
2473 2403 return "%s %s %s %s %s" % (one, two, three, four, five)
2474 2404
2475 2405 def savecommitmessage(self, text):
2476 2406 fp = self.opener('last-message.txt', 'wb')
2477 2407 try:
2478 2408 fp.write(text)
2479 2409 finally:
2480 2410 fp.close()
2481 2411 return self.pathto(fp.name[len(self.root) + 1:])
2482 2412
2483 2413 # used to avoid circular references so destructors work
2484 2414 def aftertrans(files):
2485 2415 renamefiles = [tuple(t) for t in files]
2486 2416 def a():
2487 2417 for vfs, src, dest in renamefiles:
2488 2418 try:
2489 2419 vfs.rename(src, dest)
2490 2420 except OSError: # journal file does not yet exist
2491 2421 pass
2492 2422 return a
2493 2423
2494 2424 def undoname(fn):
2495 2425 base, name = os.path.split(fn)
2496 2426 assert name.startswith('journal')
2497 2427 return os.path.join(base, name.replace('journal', 'undo', 1))
2498 2428
2499 2429 def instance(ui, path, create):
2500 2430 return localrepository(ui, util.urllocalpath(path), create)
2501 2431
2502 2432 def islocal(path):
2503 2433 return True
General Comments 0
You need to be logged in to leave comments. Login now