##// END OF EJS Templates
localrepo: use propertycache
Matt Mackall -
r8260:54a4b520 default
parent child Browse files
Show More
@@ -1,299 +1,302 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from node import nullid
14 from node import nullid
15 from i18n import _
15 from i18n import _
16 import changegroup, util, os, struct, bz2, zlib, tempfile, shutil, mdiff
16 import changegroup, util, os, struct, bz2, zlib, tempfile, shutil, mdiff
17 import localrepo, changelog, manifest, filelog, revlog, error
17 import localrepo, changelog, manifest, filelog, revlog, error
18
18
19 class bundlerevlog(revlog.revlog):
19 class bundlerevlog(revlog.revlog):
20 def __init__(self, opener, indexfile, bundlefile,
20 def __init__(self, opener, indexfile, bundlefile,
21 linkmapper=None):
21 linkmapper=None):
22 # How it works:
22 # How it works:
23 # to retrieve a revision, we need to know the offset of
23 # to retrieve a revision, we need to know the offset of
24 # the revision in the bundlefile (an opened file).
24 # the revision in the bundlefile (an opened file).
25 #
25 #
26 # We store this offset in the index (start), to differentiate a
26 # We store this offset in the index (start), to differentiate a
27 # rev in the bundle and from a rev in the revlog, we check
27 # rev in the bundle and from a rev in the revlog, we check
28 # len(index[r]). If the tuple is bigger than 7, it is a bundle
28 # len(index[r]). If the tuple is bigger than 7, it is a bundle
29 # (it is bigger since we store the node to which the delta is)
29 # (it is bigger since we store the node to which the delta is)
30 #
30 #
31 revlog.revlog.__init__(self, opener, indexfile)
31 revlog.revlog.__init__(self, opener, indexfile)
32 self.bundlefile = bundlefile
32 self.bundlefile = bundlefile
33 self.basemap = {}
33 self.basemap = {}
34 def chunkpositer():
34 def chunkpositer():
35 for chunk in changegroup.chunkiter(bundlefile):
35 for chunk in changegroup.chunkiter(bundlefile):
36 pos = bundlefile.tell()
36 pos = bundlefile.tell()
37 yield chunk, pos - len(chunk)
37 yield chunk, pos - len(chunk)
38 n = len(self)
38 n = len(self)
39 prev = None
39 prev = None
40 for chunk, start in chunkpositer():
40 for chunk, start in chunkpositer():
41 size = len(chunk)
41 size = len(chunk)
42 if size < 80:
42 if size < 80:
43 raise util.Abort(_("invalid changegroup"))
43 raise util.Abort(_("invalid changegroup"))
44 start += 80
44 start += 80
45 size -= 80
45 size -= 80
46 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
46 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
47 if node in self.nodemap:
47 if node in self.nodemap:
48 prev = node
48 prev = node
49 continue
49 continue
50 for p in (p1, p2):
50 for p in (p1, p2):
51 if not p in self.nodemap:
51 if not p in self.nodemap:
52 raise error.LookupError(p1, self.indexfile,
52 raise error.LookupError(p1, self.indexfile,
53 _("unknown parent"))
53 _("unknown parent"))
54 if linkmapper is None:
54 if linkmapper is None:
55 link = n
55 link = n
56 else:
56 else:
57 link = linkmapper(cs)
57 link = linkmapper(cs)
58
58
59 if not prev:
59 if not prev:
60 prev = p1
60 prev = p1
61 # start, size, full unc. size, base (unused), link, p1, p2, node
61 # start, size, full unc. size, base (unused), link, p1, p2, node
62 e = (revlog.offset_type(start, 0), size, -1, -1, link,
62 e = (revlog.offset_type(start, 0), size, -1, -1, link,
63 self.rev(p1), self.rev(p2), node)
63 self.rev(p1), self.rev(p2), node)
64 self.basemap[n] = prev
64 self.basemap[n] = prev
65 self.index.insert(-1, e)
65 self.index.insert(-1, e)
66 self.nodemap[node] = n
66 self.nodemap[node] = n
67 prev = node
67 prev = node
68 n += 1
68 n += 1
69
69
70 def bundle(self, rev):
70 def bundle(self, rev):
71 """is rev from the bundle"""
71 """is rev from the bundle"""
72 if rev < 0:
72 if rev < 0:
73 return False
73 return False
74 return rev in self.basemap
74 return rev in self.basemap
75 def bundlebase(self, rev): return self.basemap[rev]
75 def bundlebase(self, rev): return self.basemap[rev]
76 def chunk(self, rev, df=None, cachelen=4096):
76 def chunk(self, rev, df=None, cachelen=4096):
77 # Warning: in case of bundle, the diff is against bundlebase,
77 # Warning: in case of bundle, the diff is against bundlebase,
78 # not against rev - 1
78 # not against rev - 1
79 # XXX: could use some caching
79 # XXX: could use some caching
80 if not self.bundle(rev):
80 if not self.bundle(rev):
81 return revlog.revlog.chunk(self, rev, df)
81 return revlog.revlog.chunk(self, rev, df)
82 self.bundlefile.seek(self.start(rev))
82 self.bundlefile.seek(self.start(rev))
83 return self.bundlefile.read(self.length(rev))
83 return self.bundlefile.read(self.length(rev))
84
84
85 def revdiff(self, rev1, rev2):
85 def revdiff(self, rev1, rev2):
86 """return or calculate a delta between two revisions"""
86 """return or calculate a delta between two revisions"""
87 if self.bundle(rev1) and self.bundle(rev2):
87 if self.bundle(rev1) and self.bundle(rev2):
88 # hot path for bundle
88 # hot path for bundle
89 revb = self.rev(self.bundlebase(rev2))
89 revb = self.rev(self.bundlebase(rev2))
90 if revb == rev1:
90 if revb == rev1:
91 return self.chunk(rev2)
91 return self.chunk(rev2)
92 elif not self.bundle(rev1) and not self.bundle(rev2):
92 elif not self.bundle(rev1) and not self.bundle(rev2):
93 return revlog.revlog.revdiff(self, rev1, rev2)
93 return revlog.revlog.revdiff(self, rev1, rev2)
94
94
95 return mdiff.textdiff(self.revision(self.node(rev1)),
95 return mdiff.textdiff(self.revision(self.node(rev1)),
96 self.revision(self.node(rev2)))
96 self.revision(self.node(rev2)))
97
97
98 def revision(self, node):
98 def revision(self, node):
99 """return an uncompressed revision of a given"""
99 """return an uncompressed revision of a given"""
100 if node == nullid: return ""
100 if node == nullid: return ""
101
101
102 text = None
102 text = None
103 chain = []
103 chain = []
104 iter_node = node
104 iter_node = node
105 rev = self.rev(iter_node)
105 rev = self.rev(iter_node)
106 # reconstruct the revision if it is from a changegroup
106 # reconstruct the revision if it is from a changegroup
107 while self.bundle(rev):
107 while self.bundle(rev):
108 if self._cache and self._cache[0] == iter_node:
108 if self._cache and self._cache[0] == iter_node:
109 text = self._cache[2]
109 text = self._cache[2]
110 break
110 break
111 chain.append(rev)
111 chain.append(rev)
112 iter_node = self.bundlebase(rev)
112 iter_node = self.bundlebase(rev)
113 rev = self.rev(iter_node)
113 rev = self.rev(iter_node)
114 if text is None:
114 if text is None:
115 text = revlog.revlog.revision(self, iter_node)
115 text = revlog.revlog.revision(self, iter_node)
116
116
117 while chain:
117 while chain:
118 delta = self.chunk(chain.pop())
118 delta = self.chunk(chain.pop())
119 text = mdiff.patches(text, [delta])
119 text = mdiff.patches(text, [delta])
120
120
121 p1, p2 = self.parents(node)
121 p1, p2 = self.parents(node)
122 if node != revlog.hash(text, p1, p2):
122 if node != revlog.hash(text, p1, p2):
123 raise error.RevlogError(_("integrity check failed on %s:%d")
123 raise error.RevlogError(_("integrity check failed on %s:%d")
124 % (self.datafile, self.rev(node)))
124 % (self.datafile, self.rev(node)))
125
125
126 self._cache = (node, self.rev(node), text)
126 self._cache = (node, self.rev(node), text)
127 return text
127 return text
128
128
129 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
129 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
130 raise NotImplementedError
130 raise NotImplementedError
131 def addgroup(self, revs, linkmapper, transaction):
131 def addgroup(self, revs, linkmapper, transaction):
132 raise NotImplementedError
132 raise NotImplementedError
133 def strip(self, rev, minlink):
133 def strip(self, rev, minlink):
134 raise NotImplementedError
134 raise NotImplementedError
135 def checksize(self):
135 def checksize(self):
136 raise NotImplementedError
136 raise NotImplementedError
137
137
138 class bundlechangelog(bundlerevlog, changelog.changelog):
138 class bundlechangelog(bundlerevlog, changelog.changelog):
139 def __init__(self, opener, bundlefile):
139 def __init__(self, opener, bundlefile):
140 changelog.changelog.__init__(self, opener)
140 changelog.changelog.__init__(self, opener)
141 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
141 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
142
142
143 class bundlemanifest(bundlerevlog, manifest.manifest):
143 class bundlemanifest(bundlerevlog, manifest.manifest):
144 def __init__(self, opener, bundlefile, linkmapper):
144 def __init__(self, opener, bundlefile, linkmapper):
145 manifest.manifest.__init__(self, opener)
145 manifest.manifest.__init__(self, opener)
146 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
146 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
147 linkmapper)
147 linkmapper)
148
148
149 class bundlefilelog(bundlerevlog, filelog.filelog):
149 class bundlefilelog(bundlerevlog, filelog.filelog):
150 def __init__(self, opener, path, bundlefile, linkmapper):
150 def __init__(self, opener, path, bundlefile, linkmapper):
151 filelog.filelog.__init__(self, opener, path)
151 filelog.filelog.__init__(self, opener, path)
152 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
152 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
153 linkmapper)
153 linkmapper)
154
154
155 class bundlerepository(localrepo.localrepository):
155 class bundlerepository(localrepo.localrepository):
156 def __init__(self, ui, path, bundlename):
156 def __init__(self, ui, path, bundlename):
157 self._tempparent = None
157 self._tempparent = None
158 try:
158 try:
159 localrepo.localrepository.__init__(self, ui, path)
159 localrepo.localrepository.__init__(self, ui, path)
160 except error.RepoError:
160 except error.RepoError:
161 self._tempparent = tempfile.mkdtemp()
161 self._tempparent = tempfile.mkdtemp()
162 localrepo.instance(ui,self._tempparent,1)
162 localrepo.instance(ui,self._tempparent,1)
163 localrepo.localrepository.__init__(self, ui, self._tempparent)
163 localrepo.localrepository.__init__(self, ui, self._tempparent)
164
164
165 if path:
165 if path:
166 self._url = 'bundle:' + path + '+' + bundlename
166 self._url = 'bundle:' + path + '+' + bundlename
167 else:
167 else:
168 self._url = 'bundle:' + bundlename
168 self._url = 'bundle:' + bundlename
169
169
170 self.tempfile = None
170 self.tempfile = None
171 self.bundlefile = open(bundlename, "rb")
171 self.bundlefile = open(bundlename, "rb")
172 header = self.bundlefile.read(6)
172 header = self.bundlefile.read(6)
173 if not header.startswith("HG"):
173 if not header.startswith("HG"):
174 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
174 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
175 elif not header.startswith("HG10"):
175 elif not header.startswith("HG10"):
176 raise util.Abort(_("%s: unknown bundle version") % bundlename)
176 raise util.Abort(_("%s: unknown bundle version") % bundlename)
177 elif (header == "HG10BZ") or (header == "HG10GZ"):
177 elif (header == "HG10BZ") or (header == "HG10GZ"):
178 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
178 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
179 suffix=".hg10un", dir=self.path)
179 suffix=".hg10un", dir=self.path)
180 self.tempfile = temp
180 self.tempfile = temp
181 fptemp = os.fdopen(fdtemp, 'wb')
181 fptemp = os.fdopen(fdtemp, 'wb')
182 def generator(f):
182 def generator(f):
183 if header == "HG10BZ":
183 if header == "HG10BZ":
184 zd = bz2.BZ2Decompressor()
184 zd = bz2.BZ2Decompressor()
185 zd.decompress("BZ")
185 zd.decompress("BZ")
186 elif header == "HG10GZ":
186 elif header == "HG10GZ":
187 zd = zlib.decompressobj()
187 zd = zlib.decompressobj()
188 for chunk in f:
188 for chunk in f:
189 yield zd.decompress(chunk)
189 yield zd.decompress(chunk)
190 gen = generator(util.filechunkiter(self.bundlefile, 4096))
190 gen = generator(util.filechunkiter(self.bundlefile, 4096))
191
191
192 try:
192 try:
193 fptemp.write("HG10UN")
193 fptemp.write("HG10UN")
194 for chunk in gen:
194 for chunk in gen:
195 fptemp.write(chunk)
195 fptemp.write(chunk)
196 finally:
196 finally:
197 fptemp.close()
197 fptemp.close()
198 self.bundlefile.close()
198 self.bundlefile.close()
199
199
200 self.bundlefile = open(self.tempfile, "rb")
200 self.bundlefile = open(self.tempfile, "rb")
201 # seek right after the header
201 # seek right after the header
202 self.bundlefile.seek(6)
202 self.bundlefile.seek(6)
203 elif header == "HG10UN":
203 elif header == "HG10UN":
204 # nothing to do
204 # nothing to do
205 pass
205 pass
206 else:
206 else:
207 raise util.Abort(_("%s: unknown bundle compression type")
207 raise util.Abort(_("%s: unknown bundle compression type")
208 % bundlename)
208 % bundlename)
209 # dict with the mapping 'filename' -> position in the bundle
209 # dict with the mapping 'filename' -> position in the bundle
210 self.bundlefilespos = {}
210 self.bundlefilespos = {}
211
211
212 def __getattr__(self, name):
212 @util.propertycache
213 if name == 'changelog':
213 def changelog(self):
214 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
214 c = bundlechangelog(self.sopener, self.bundlefile)
215 self.manstart = self.bundlefile.tell()
215 self.manstart = self.bundlefile.tell()
216 return self.changelog
216 return c
217 elif name == 'manifest':
217
218 self.bundlefile.seek(self.manstart)
218 @util.propertycache
219 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
219 def manifest(self):
220 self.changelog.rev)
220 self.bundlefile.seek(self.manstart)
221 self.filestart = self.bundlefile.tell()
221 m = bundlemanifest(self.sopener, self.bundlefile, self.changelog.rev)
222 return self.manifest
222 self.filestart = self.bundlefile.tell()
223 elif name == 'manstart':
223 return m
224 self.changelog
224
225 return self.manstart
225 @util.propertycache
226 elif name == 'filestart':
226 def manstart(self):
227 self.manifest
227 self.changelog
228 return self.filestart
228 return self.manstart
229 else:
229
230 return localrepo.localrepository.__getattr__(self, name)
230 @util.propertycache
231 def filestart(self):
232 self.manifest
233 return self.filestart
231
234
232 def url(self):
235 def url(self):
233 return self._url
236 return self._url
234
237
235 def file(self, f):
238 def file(self, f):
236 if not self.bundlefilespos:
239 if not self.bundlefilespos:
237 self.bundlefile.seek(self.filestart)
240 self.bundlefile.seek(self.filestart)
238 while 1:
241 while 1:
239 chunk = changegroup.getchunk(self.bundlefile)
242 chunk = changegroup.getchunk(self.bundlefile)
240 if not chunk:
243 if not chunk:
241 break
244 break
242 self.bundlefilespos[chunk] = self.bundlefile.tell()
245 self.bundlefilespos[chunk] = self.bundlefile.tell()
243 for c in changegroup.chunkiter(self.bundlefile):
246 for c in changegroup.chunkiter(self.bundlefile):
244 pass
247 pass
245
248
246 if f[0] == '/':
249 if f[0] == '/':
247 f = f[1:]
250 f = f[1:]
248 if f in self.bundlefilespos:
251 if f in self.bundlefilespos:
249 self.bundlefile.seek(self.bundlefilespos[f])
252 self.bundlefile.seek(self.bundlefilespos[f])
250 return bundlefilelog(self.sopener, f, self.bundlefile,
253 return bundlefilelog(self.sopener, f, self.bundlefile,
251 self.changelog.rev)
254 self.changelog.rev)
252 else:
255 else:
253 return filelog.filelog(self.sopener, f)
256 return filelog.filelog(self.sopener, f)
254
257
255 def close(self):
258 def close(self):
256 """Close assigned bundle file immediately."""
259 """Close assigned bundle file immediately."""
257 self.bundlefile.close()
260 self.bundlefile.close()
258
261
259 def __del__(self):
262 def __del__(self):
260 bundlefile = getattr(self, 'bundlefile', None)
263 bundlefile = getattr(self, 'bundlefile', None)
261 if bundlefile and not bundlefile.closed:
264 if bundlefile and not bundlefile.closed:
262 bundlefile.close()
265 bundlefile.close()
263 tempfile = getattr(self, 'tempfile', None)
266 tempfile = getattr(self, 'tempfile', None)
264 if tempfile is not None:
267 if tempfile is not None:
265 os.unlink(tempfile)
268 os.unlink(tempfile)
266 if self._tempparent:
269 if self._tempparent:
267 shutil.rmtree(self._tempparent, True)
270 shutil.rmtree(self._tempparent, True)
268
271
269 def cancopy(self):
272 def cancopy(self):
270 return False
273 return False
271
274
272 def getcwd(self):
275 def getcwd(self):
273 return os.getcwd() # always outside the repo
276 return os.getcwd() # always outside the repo
274
277
275 def instance(ui, path, create):
278 def instance(ui, path, create):
276 if create:
279 if create:
277 raise util.Abort(_('cannot create new bundle repository'))
280 raise util.Abort(_('cannot create new bundle repository'))
278 parentpath = ui.config("bundle", "mainreporoot", "")
281 parentpath = ui.config("bundle", "mainreporoot", "")
279 if parentpath:
282 if parentpath:
280 # Try to make the full path relative so we get a nice, short URL.
283 # Try to make the full path relative so we get a nice, short URL.
281 # In particular, we don't want temp dir names in test outputs.
284 # In particular, we don't want temp dir names in test outputs.
282 cwd = os.getcwd()
285 cwd = os.getcwd()
283 if parentpath == cwd:
286 if parentpath == cwd:
284 parentpath = ''
287 parentpath = ''
285 else:
288 else:
286 cwd = os.path.join(cwd,'')
289 cwd = os.path.join(cwd,'')
287 if parentpath.startswith(cwd):
290 if parentpath.startswith(cwd):
288 parentpath = parentpath[len(cwd):]
291 parentpath = parentpath[len(cwd):]
289 path = util.drop_scheme('file', path)
292 path = util.drop_scheme('file', path)
290 if path.startswith('bundle:'):
293 if path.startswith('bundle:'):
291 path = util.drop_scheme('bundle', path)
294 path = util.drop_scheme('bundle', path)
292 s = path.split("+", 1)
295 s = path.split("+", 1)
293 if len(s) == 1:
296 if len(s) == 1:
294 repopath, bundlename = parentpath, s[0]
297 repopath, bundlename = parentpath, s[0]
295 else:
298 else:
296 repopath, bundlename = s
299 repopath, bundlename = s
297 else:
300 else:
298 repopath, bundlename = parentpath, path
301 repopath, bundlename = parentpath, path
299 return bundlerepository(ui, repopath, bundlename)
302 return bundlerepository(ui, repopath, bundlename)
@@ -1,2173 +1,2173 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store, encoding
12 import lock, transaction, stat, errno, ui, store, encoding
13 import os, time, util, extensions, hook, inspect, error
13 import os, time, util, extensions, hook, inspect, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 from lock import release
17 from lock import release
18 propertycache = util.propertycache
18
19
19 class localrepository(repo.repository):
20 class localrepository(repo.repository):
20 capabilities = set(('lookup', 'changegroupsubset'))
21 capabilities = set(('lookup', 'changegroupsubset'))
21 supported = ('revlogv1', 'store', 'fncache')
22 supported = ('revlogv1', 'store', 'fncache')
22
23
23 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
24 repo.repository.__init__(self)
25 repo.repository.__init__(self)
25 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
26 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
27 self.origroot = path
28 self.origroot = path
28 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
29 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
30
31
31 if not os.path.isdir(self.path):
32 if not os.path.isdir(self.path):
32 if create:
33 if create:
33 if not os.path.exists(path):
34 if not os.path.exists(path):
34 os.mkdir(path)
35 os.mkdir(path)
35 os.mkdir(self.path)
36 os.mkdir(self.path)
36 requirements = ["revlogv1"]
37 requirements = ["revlogv1"]
37 if baseui.configbool('format', 'usestore', True):
38 if baseui.configbool('format', 'usestore', True):
38 os.mkdir(os.path.join(self.path, "store"))
39 os.mkdir(os.path.join(self.path, "store"))
39 requirements.append("store")
40 requirements.append("store")
40 if baseui.configbool('format', 'usefncache', True):
41 if baseui.configbool('format', 'usefncache', True):
41 requirements.append("fncache")
42 requirements.append("fncache")
42 # create an invalid changelog
43 # create an invalid changelog
43 self.opener("00changelog.i", "a").write(
44 self.opener("00changelog.i", "a").write(
44 '\0\0\0\2' # represents revlogv2
45 '\0\0\0\2' # represents revlogv2
45 ' dummy changelog to prevent using the old repo layout'
46 ' dummy changelog to prevent using the old repo layout'
46 )
47 )
47 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
48 for r in requirements:
49 for r in requirements:
49 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
50 reqfile.close()
51 reqfile.close()
51 else:
52 else:
52 raise error.RepoError(_("repository %s not found") % path)
53 raise error.RepoError(_("repository %s not found") % path)
53 elif create:
54 elif create:
54 raise error.RepoError(_("repository %s already exists") % path)
55 raise error.RepoError(_("repository %s already exists") % path)
55 else:
56 else:
56 # find requirements
57 # find requirements
57 requirements = []
58 requirements = []
58 try:
59 try:
59 requirements = self.opener("requires").read().splitlines()
60 requirements = self.opener("requires").read().splitlines()
60 for r in requirements:
61 for r in requirements:
61 if r not in self.supported:
62 if r not in self.supported:
62 raise error.RepoError(_("requirement '%s' not supported") % r)
63 raise error.RepoError(_("requirement '%s' not supported") % r)
63 except IOError, inst:
64 except IOError, inst:
64 if inst.errno != errno.ENOENT:
65 if inst.errno != errno.ENOENT:
65 raise
66 raise
66
67
67 self.store = store.store(requirements, self.path, util.opener)
68 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
69 self.spath = self.store.path
69 self.sopener = self.store.opener
70 self.sopener = self.store.opener
70 self.sjoin = self.store.join
71 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
72 self.opener.createmode = self.store.createmode
72
73
73 self.baseui = baseui
74 self.baseui = baseui
74 self.ui = baseui.copy()
75 self.ui = baseui.copy()
75 try:
76 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
78 extensions.loadall(self.ui)
78 except IOError:
79 except IOError:
79 pass
80 pass
80
81
81 self.tagscache = None
82 self.tagscache = None
82 self._tagstypecache = None
83 self._tagstypecache = None
83 self.branchcache = None
84 self.branchcache = None
84 self._ubranchcache = None # UTF-8 version of branchcache
85 self._ubranchcache = None # UTF-8 version of branchcache
85 self._branchcachetip = None
86 self._branchcachetip = None
86 self.nodetagscache = None
87 self.nodetagscache = None
87 self.filterpats = {}
88 self.filterpats = {}
88 self._datafilters = {}
89 self._datafilters = {}
89 self._transref = self._lockref = self._wlockref = None
90 self._transref = self._lockref = self._wlockref = None
90
91
91 def __getattr__(self, name):
92 @propertycache
92 if name == 'changelog':
93 def changelog(self):
93 self.changelog = changelog.changelog(self.sopener)
94 c = changelog.changelog(self.sopener)
94 if 'HG_PENDING' in os.environ:
95 if 'HG_PENDING' in os.environ:
95 p = os.environ['HG_PENDING']
96 p = os.environ['HG_PENDING']
96 if p.startswith(self.root):
97 if p.startswith(self.root):
97 self.changelog.readpending('00changelog.i.a')
98 c.readpending('00changelog.i.a')
98 self.sopener.defversion = self.changelog.version
99 self.sopener.defversion = c.version
99 return self.changelog
100 return c
100 if name == 'manifest':
101
101 self.changelog
102 @propertycache
102 self.manifest = manifest.manifest(self.sopener)
103 def manifest(self):
103 return self.manifest
104 return manifest.manifest(self.sopener)
104 if name == 'dirstate':
105
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
106 @propertycache
106 return self.dirstate
107 def dirstate(self):
107 else:
108 return dirstate.dirstate(self.opener, self.ui, self.root)
108 raise AttributeError(name)
109
109
110 def __getitem__(self, changeid):
110 def __getitem__(self, changeid):
111 if changeid == None:
111 if changeid == None:
112 return context.workingctx(self)
112 return context.workingctx(self)
113 return context.changectx(self, changeid)
113 return context.changectx(self, changeid)
114
114
115 def __nonzero__(self):
115 def __nonzero__(self):
116 return True
116 return True
117
117
118 def __len__(self):
118 def __len__(self):
119 return len(self.changelog)
119 return len(self.changelog)
120
120
121 def __iter__(self):
121 def __iter__(self):
122 for i in xrange(len(self)):
122 for i in xrange(len(self)):
123 yield i
123 yield i
124
124
125 def url(self):
125 def url(self):
126 return 'file:' + self.root
126 return 'file:' + self.root
127
127
128 def hook(self, name, throw=False, **args):
128 def hook(self, name, throw=False, **args):
129 return hook.hook(self.ui, self, name, throw, **args)
129 return hook.hook(self.ui, self, name, throw, **args)
130
130
131 tag_disallowed = ':\r\n'
131 tag_disallowed = ':\r\n'
132
132
133 def _tag(self, names, node, message, local, user, date, parent=None,
133 def _tag(self, names, node, message, local, user, date, parent=None,
134 extra={}):
134 extra={}):
135 use_dirstate = parent is None
135 use_dirstate = parent is None
136
136
137 if isinstance(names, str):
137 if isinstance(names, str):
138 allchars = names
138 allchars = names
139 names = (names,)
139 names = (names,)
140 else:
140 else:
141 allchars = ''.join(names)
141 allchars = ''.join(names)
142 for c in self.tag_disallowed:
142 for c in self.tag_disallowed:
143 if c in allchars:
143 if c in allchars:
144 raise util.Abort(_('%r cannot be used in a tag name') % c)
144 raise util.Abort(_('%r cannot be used in a tag name') % c)
145
145
146 for name in names:
146 for name in names:
147 self.hook('pretag', throw=True, node=hex(node), tag=name,
147 self.hook('pretag', throw=True, node=hex(node), tag=name,
148 local=local)
148 local=local)
149
149
150 def writetags(fp, names, munge, prevtags):
150 def writetags(fp, names, munge, prevtags):
151 fp.seek(0, 2)
151 fp.seek(0, 2)
152 if prevtags and prevtags[-1] != '\n':
152 if prevtags and prevtags[-1] != '\n':
153 fp.write('\n')
153 fp.write('\n')
154 for name in names:
154 for name in names:
155 m = munge and munge(name) or name
155 m = munge and munge(name) or name
156 if self._tagstypecache and name in self._tagstypecache:
156 if self._tagstypecache and name in self._tagstypecache:
157 old = self.tagscache.get(name, nullid)
157 old = self.tagscache.get(name, nullid)
158 fp.write('%s %s\n' % (hex(old), m))
158 fp.write('%s %s\n' % (hex(old), m))
159 fp.write('%s %s\n' % (hex(node), m))
159 fp.write('%s %s\n' % (hex(node), m))
160 fp.close()
160 fp.close()
161
161
162 prevtags = ''
162 prevtags = ''
163 if local:
163 if local:
164 try:
164 try:
165 fp = self.opener('localtags', 'r+')
165 fp = self.opener('localtags', 'r+')
166 except IOError:
166 except IOError:
167 fp = self.opener('localtags', 'a')
167 fp = self.opener('localtags', 'a')
168 else:
168 else:
169 prevtags = fp.read()
169 prevtags = fp.read()
170
170
171 # local tags are stored in the current charset
171 # local tags are stored in the current charset
172 writetags(fp, names, None, prevtags)
172 writetags(fp, names, None, prevtags)
173 for name in names:
173 for name in names:
174 self.hook('tag', node=hex(node), tag=name, local=local)
174 self.hook('tag', node=hex(node), tag=name, local=local)
175 return
175 return
176
176
177 if use_dirstate:
177 if use_dirstate:
178 try:
178 try:
179 fp = self.wfile('.hgtags', 'rb+')
179 fp = self.wfile('.hgtags', 'rb+')
180 except IOError:
180 except IOError:
181 fp = self.wfile('.hgtags', 'ab')
181 fp = self.wfile('.hgtags', 'ab')
182 else:
182 else:
183 prevtags = fp.read()
183 prevtags = fp.read()
184 else:
184 else:
185 try:
185 try:
186 prevtags = self.filectx('.hgtags', parent).data()
186 prevtags = self.filectx('.hgtags', parent).data()
187 except error.LookupError:
187 except error.LookupError:
188 pass
188 pass
189 fp = self.wfile('.hgtags', 'wb')
189 fp = self.wfile('.hgtags', 'wb')
190 if prevtags:
190 if prevtags:
191 fp.write(prevtags)
191 fp.write(prevtags)
192
192
193 # committed tags are stored in UTF-8
193 # committed tags are stored in UTF-8
194 writetags(fp, names, encoding.fromlocal, prevtags)
194 writetags(fp, names, encoding.fromlocal, prevtags)
195
195
196 if use_dirstate and '.hgtags' not in self.dirstate:
196 if use_dirstate and '.hgtags' not in self.dirstate:
197 self.add(['.hgtags'])
197 self.add(['.hgtags'])
198
198
199 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
199 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
200 extra=extra)
200 extra=extra)
201
201
202 for name in names:
202 for name in names:
203 self.hook('tag', node=hex(node), tag=name, local=local)
203 self.hook('tag', node=hex(node), tag=name, local=local)
204
204
205 return tagnode
205 return tagnode
206
206
207 def tag(self, names, node, message, local, user, date):
207 def tag(self, names, node, message, local, user, date):
208 '''tag a revision with one or more symbolic names.
208 '''tag a revision with one or more symbolic names.
209
209
210 names is a list of strings or, when adding a single tag, names may be a
210 names is a list of strings or, when adding a single tag, names may be a
211 string.
211 string.
212
212
213 if local is True, the tags are stored in a per-repository file.
213 if local is True, the tags are stored in a per-repository file.
214 otherwise, they are stored in the .hgtags file, and a new
214 otherwise, they are stored in the .hgtags file, and a new
215 changeset is committed with the change.
215 changeset is committed with the change.
216
216
217 keyword arguments:
217 keyword arguments:
218
218
219 local: whether to store tags in non-version-controlled file
219 local: whether to store tags in non-version-controlled file
220 (default False)
220 (default False)
221
221
222 message: commit message to use if committing
222 message: commit message to use if committing
223
223
224 user: name of user to use if committing
224 user: name of user to use if committing
225
225
226 date: date tuple to use if committing'''
226 date: date tuple to use if committing'''
227
227
228 for x in self.status()[:5]:
228 for x in self.status()[:5]:
229 if '.hgtags' in x:
229 if '.hgtags' in x:
230 raise util.Abort(_('working copy of .hgtags is changed '
230 raise util.Abort(_('working copy of .hgtags is changed '
231 '(please commit .hgtags manually)'))
231 '(please commit .hgtags manually)'))
232
232
233 self.tags() # instantiate the cache
233 self.tags() # instantiate the cache
234 self._tag(names, node, message, local, user, date)
234 self._tag(names, node, message, local, user, date)
235
235
236 def tags(self):
236 def tags(self):
237 '''return a mapping of tag to node'''
237 '''return a mapping of tag to node'''
238 if self.tagscache:
238 if self.tagscache:
239 return self.tagscache
239 return self.tagscache
240
240
241 globaltags = {}
241 globaltags = {}
242 tagtypes = {}
242 tagtypes = {}
243
243
244 def readtags(lines, fn, tagtype):
244 def readtags(lines, fn, tagtype):
245 filetags = {}
245 filetags = {}
246 count = 0
246 count = 0
247
247
248 def warn(msg):
248 def warn(msg):
249 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
249 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
250
250
251 for l in lines:
251 for l in lines:
252 count += 1
252 count += 1
253 if not l:
253 if not l:
254 continue
254 continue
255 s = l.split(" ", 1)
255 s = l.split(" ", 1)
256 if len(s) != 2:
256 if len(s) != 2:
257 warn(_("cannot parse entry"))
257 warn(_("cannot parse entry"))
258 continue
258 continue
259 node, key = s
259 node, key = s
260 key = encoding.tolocal(key.strip()) # stored in UTF-8
260 key = encoding.tolocal(key.strip()) # stored in UTF-8
261 try:
261 try:
262 bin_n = bin(node)
262 bin_n = bin(node)
263 except TypeError:
263 except TypeError:
264 warn(_("node '%s' is not well formed") % node)
264 warn(_("node '%s' is not well formed") % node)
265 continue
265 continue
266 if bin_n not in self.changelog.nodemap:
266 if bin_n not in self.changelog.nodemap:
267 warn(_("tag '%s' refers to unknown node") % key)
267 warn(_("tag '%s' refers to unknown node") % key)
268 continue
268 continue
269
269
270 h = []
270 h = []
271 if key in filetags:
271 if key in filetags:
272 n, h = filetags[key]
272 n, h = filetags[key]
273 h.append(n)
273 h.append(n)
274 filetags[key] = (bin_n, h)
274 filetags[key] = (bin_n, h)
275
275
276 for k, nh in filetags.iteritems():
276 for k, nh in filetags.iteritems():
277 if k not in globaltags:
277 if k not in globaltags:
278 globaltags[k] = nh
278 globaltags[k] = nh
279 tagtypes[k] = tagtype
279 tagtypes[k] = tagtype
280 continue
280 continue
281
281
282 # we prefer the global tag if:
282 # we prefer the global tag if:
283 # it supercedes us OR
283 # it supercedes us OR
284 # mutual supercedes and it has a higher rank
284 # mutual supercedes and it has a higher rank
285 # otherwise we win because we're tip-most
285 # otherwise we win because we're tip-most
286 an, ah = nh
286 an, ah = nh
287 bn, bh = globaltags[k]
287 bn, bh = globaltags[k]
288 if (bn != an and an in bh and
288 if (bn != an and an in bh and
289 (bn not in ah or len(bh) > len(ah))):
289 (bn not in ah or len(bh) > len(ah))):
290 an = bn
290 an = bn
291 ah.extend([n for n in bh if n not in ah])
291 ah.extend([n for n in bh if n not in ah])
292 globaltags[k] = an, ah
292 globaltags[k] = an, ah
293 tagtypes[k] = tagtype
293 tagtypes[k] = tagtype
294
294
295 # read the tags file from each head, ending with the tip
295 # read the tags file from each head, ending with the tip
296 f = None
296 f = None
297 for rev, node, fnode in self._hgtagsnodes():
297 for rev, node, fnode in self._hgtagsnodes():
298 f = (f and f.filectx(fnode) or
298 f = (f and f.filectx(fnode) or
299 self.filectx('.hgtags', fileid=fnode))
299 self.filectx('.hgtags', fileid=fnode))
300 readtags(f.data().splitlines(), f, "global")
300 readtags(f.data().splitlines(), f, "global")
301
301
302 try:
302 try:
303 data = encoding.fromlocal(self.opener("localtags").read())
303 data = encoding.fromlocal(self.opener("localtags").read())
304 # localtags are stored in the local character set
304 # localtags are stored in the local character set
305 # while the internal tag table is stored in UTF-8
305 # while the internal tag table is stored in UTF-8
306 readtags(data.splitlines(), "localtags", "local")
306 readtags(data.splitlines(), "localtags", "local")
307 except IOError:
307 except IOError:
308 pass
308 pass
309
309
310 self.tagscache = {}
310 self.tagscache = {}
311 self._tagstypecache = {}
311 self._tagstypecache = {}
312 for k, nh in globaltags.iteritems():
312 for k, nh in globaltags.iteritems():
313 n = nh[0]
313 n = nh[0]
314 if n != nullid:
314 if n != nullid:
315 self.tagscache[k] = n
315 self.tagscache[k] = n
316 self._tagstypecache[k] = tagtypes[k]
316 self._tagstypecache[k] = tagtypes[k]
317 self.tagscache['tip'] = self.changelog.tip()
317 self.tagscache['tip'] = self.changelog.tip()
318 return self.tagscache
318 return self.tagscache
319
319
320 def tagtype(self, tagname):
320 def tagtype(self, tagname):
321 '''
321 '''
322 return the type of the given tag. result can be:
322 return the type of the given tag. result can be:
323
323
324 'local' : a local tag
324 'local' : a local tag
325 'global' : a global tag
325 'global' : a global tag
326 None : tag does not exist
326 None : tag does not exist
327 '''
327 '''
328
328
329 self.tags()
329 self.tags()
330
330
331 return self._tagstypecache.get(tagname)
331 return self._tagstypecache.get(tagname)
332
332
333 def _hgtagsnodes(self):
333 def _hgtagsnodes(self):
334 last = {}
334 last = {}
335 ret = []
335 ret = []
336 for node in reversed(self.heads()):
336 for node in reversed(self.heads()):
337 c = self[node]
337 c = self[node]
338 rev = c.rev()
338 rev = c.rev()
339 try:
339 try:
340 fnode = c.filenode('.hgtags')
340 fnode = c.filenode('.hgtags')
341 except error.LookupError:
341 except error.LookupError:
342 continue
342 continue
343 ret.append((rev, node, fnode))
343 ret.append((rev, node, fnode))
344 if fnode in last:
344 if fnode in last:
345 ret[last[fnode]] = None
345 ret[last[fnode]] = None
346 last[fnode] = len(ret) - 1
346 last[fnode] = len(ret) - 1
347 return [item for item in ret if item]
347 return [item for item in ret if item]
348
348
349 def tagslist(self):
349 def tagslist(self):
350 '''return a list of tags ordered by revision'''
350 '''return a list of tags ordered by revision'''
351 l = []
351 l = []
352 for t, n in self.tags().iteritems():
352 for t, n in self.tags().iteritems():
353 try:
353 try:
354 r = self.changelog.rev(n)
354 r = self.changelog.rev(n)
355 except:
355 except:
356 r = -2 # sort to the beginning of the list if unknown
356 r = -2 # sort to the beginning of the list if unknown
357 l.append((r, t, n))
357 l.append((r, t, n))
358 return [(t, n) for r, t, n in sorted(l)]
358 return [(t, n) for r, t, n in sorted(l)]
359
359
360 def nodetags(self, node):
360 def nodetags(self, node):
361 '''return the tags associated with a node'''
361 '''return the tags associated with a node'''
362 if not self.nodetagscache:
362 if not self.nodetagscache:
363 self.nodetagscache = {}
363 self.nodetagscache = {}
364 for t, n in self.tags().iteritems():
364 for t, n in self.tags().iteritems():
365 self.nodetagscache.setdefault(n, []).append(t)
365 self.nodetagscache.setdefault(n, []).append(t)
366 return self.nodetagscache.get(node, [])
366 return self.nodetagscache.get(node, [])
367
367
368 def _branchtags(self, partial, lrev):
368 def _branchtags(self, partial, lrev):
369 # TODO: rename this function?
369 # TODO: rename this function?
370 tiprev = len(self) - 1
370 tiprev = len(self) - 1
371 if lrev != tiprev:
371 if lrev != tiprev:
372 self._updatebranchcache(partial, lrev+1, tiprev+1)
372 self._updatebranchcache(partial, lrev+1, tiprev+1)
373 self._writebranchcache(partial, self.changelog.tip(), tiprev)
373 self._writebranchcache(partial, self.changelog.tip(), tiprev)
374
374
375 return partial
375 return partial
376
376
377 def _branchheads(self):
377 def _branchheads(self):
378 tip = self.changelog.tip()
378 tip = self.changelog.tip()
379 if self.branchcache is not None and self._branchcachetip == tip:
379 if self.branchcache is not None and self._branchcachetip == tip:
380 return self.branchcache
380 return self.branchcache
381
381
382 oldtip = self._branchcachetip
382 oldtip = self._branchcachetip
383 self._branchcachetip = tip
383 self._branchcachetip = tip
384 if self.branchcache is None:
384 if self.branchcache is None:
385 self.branchcache = {} # avoid recursion in changectx
385 self.branchcache = {} # avoid recursion in changectx
386 else:
386 else:
387 self.branchcache.clear() # keep using the same dict
387 self.branchcache.clear() # keep using the same dict
388 if oldtip is None or oldtip not in self.changelog.nodemap:
388 if oldtip is None or oldtip not in self.changelog.nodemap:
389 partial, last, lrev = self._readbranchcache()
389 partial, last, lrev = self._readbranchcache()
390 else:
390 else:
391 lrev = self.changelog.rev(oldtip)
391 lrev = self.changelog.rev(oldtip)
392 partial = self._ubranchcache
392 partial = self._ubranchcache
393
393
394 self._branchtags(partial, lrev)
394 self._branchtags(partial, lrev)
395 # this private cache holds all heads (not just tips)
395 # this private cache holds all heads (not just tips)
396 self._ubranchcache = partial
396 self._ubranchcache = partial
397
397
398 # the branch cache is stored on disk as UTF-8, but in the local
398 # the branch cache is stored on disk as UTF-8, but in the local
399 # charset internally
399 # charset internally
400 for k, v in partial.iteritems():
400 for k, v in partial.iteritems():
401 self.branchcache[encoding.tolocal(k)] = v
401 self.branchcache[encoding.tolocal(k)] = v
402 return self.branchcache
402 return self.branchcache
403
403
404
404
405 def branchtags(self):
405 def branchtags(self):
406 '''return a dict where branch names map to the tipmost head of
406 '''return a dict where branch names map to the tipmost head of
407 the branch, open heads come before closed'''
407 the branch, open heads come before closed'''
408 bt = {}
408 bt = {}
409 for bn, heads in self._branchheads().iteritems():
409 for bn, heads in self._branchheads().iteritems():
410 head = None
410 head = None
411 for i in range(len(heads)-1, -1, -1):
411 for i in range(len(heads)-1, -1, -1):
412 h = heads[i]
412 h = heads[i]
413 if 'close' not in self.changelog.read(h)[5]:
413 if 'close' not in self.changelog.read(h)[5]:
414 head = h
414 head = h
415 break
415 break
416 # no open heads were found
416 # no open heads were found
417 if head is None:
417 if head is None:
418 head = heads[-1]
418 head = heads[-1]
419 bt[bn] = head
419 bt[bn] = head
420 return bt
420 return bt
421
421
422
422
423 def _readbranchcache(self):
423 def _readbranchcache(self):
424 partial = {}
424 partial = {}
425 try:
425 try:
426 f = self.opener("branchheads.cache")
426 f = self.opener("branchheads.cache")
427 lines = f.read().split('\n')
427 lines = f.read().split('\n')
428 f.close()
428 f.close()
429 except (IOError, OSError):
429 except (IOError, OSError):
430 return {}, nullid, nullrev
430 return {}, nullid, nullrev
431
431
432 try:
432 try:
433 last, lrev = lines.pop(0).split(" ", 1)
433 last, lrev = lines.pop(0).split(" ", 1)
434 last, lrev = bin(last), int(lrev)
434 last, lrev = bin(last), int(lrev)
435 if lrev >= len(self) or self[lrev].node() != last:
435 if lrev >= len(self) or self[lrev].node() != last:
436 # invalidate the cache
436 # invalidate the cache
437 raise ValueError('invalidating branch cache (tip differs)')
437 raise ValueError('invalidating branch cache (tip differs)')
438 for l in lines:
438 for l in lines:
439 if not l: continue
439 if not l: continue
440 node, label = l.split(" ", 1)
440 node, label = l.split(" ", 1)
441 partial.setdefault(label.strip(), []).append(bin(node))
441 partial.setdefault(label.strip(), []).append(bin(node))
442 except KeyboardInterrupt:
442 except KeyboardInterrupt:
443 raise
443 raise
444 except Exception, inst:
444 except Exception, inst:
445 if self.ui.debugflag:
445 if self.ui.debugflag:
446 self.ui.warn(str(inst), '\n')
446 self.ui.warn(str(inst), '\n')
447 partial, last, lrev = {}, nullid, nullrev
447 partial, last, lrev = {}, nullid, nullrev
448 return partial, last, lrev
448 return partial, last, lrev
449
449
450 def _writebranchcache(self, branches, tip, tiprev):
450 def _writebranchcache(self, branches, tip, tiprev):
451 try:
451 try:
452 f = self.opener("branchheads.cache", "w", atomictemp=True)
452 f = self.opener("branchheads.cache", "w", atomictemp=True)
453 f.write("%s %s\n" % (hex(tip), tiprev))
453 f.write("%s %s\n" % (hex(tip), tiprev))
454 for label, nodes in branches.iteritems():
454 for label, nodes in branches.iteritems():
455 for node in nodes:
455 for node in nodes:
456 f.write("%s %s\n" % (hex(node), label))
456 f.write("%s %s\n" % (hex(node), label))
457 f.rename()
457 f.rename()
458 except (IOError, OSError):
458 except (IOError, OSError):
459 pass
459 pass
460
460
461 def _updatebranchcache(self, partial, start, end):
461 def _updatebranchcache(self, partial, start, end):
462 for r in xrange(start, end):
462 for r in xrange(start, end):
463 c = self[r]
463 c = self[r]
464 b = c.branch()
464 b = c.branch()
465 bheads = partial.setdefault(b, [])
465 bheads = partial.setdefault(b, [])
466 bheads.append(c.node())
466 bheads.append(c.node())
467 for p in c.parents():
467 for p in c.parents():
468 pn = p.node()
468 pn = p.node()
469 if pn in bheads:
469 if pn in bheads:
470 bheads.remove(pn)
470 bheads.remove(pn)
471
471
472 def lookup(self, key):
472 def lookup(self, key):
473 if isinstance(key, int):
473 if isinstance(key, int):
474 return self.changelog.node(key)
474 return self.changelog.node(key)
475 elif key == '.':
475 elif key == '.':
476 return self.dirstate.parents()[0]
476 return self.dirstate.parents()[0]
477 elif key == 'null':
477 elif key == 'null':
478 return nullid
478 return nullid
479 elif key == 'tip':
479 elif key == 'tip':
480 return self.changelog.tip()
480 return self.changelog.tip()
481 n = self.changelog._match(key)
481 n = self.changelog._match(key)
482 if n:
482 if n:
483 return n
483 return n
484 if key in self.tags():
484 if key in self.tags():
485 return self.tags()[key]
485 return self.tags()[key]
486 if key in self.branchtags():
486 if key in self.branchtags():
487 return self.branchtags()[key]
487 return self.branchtags()[key]
488 n = self.changelog._partialmatch(key)
488 n = self.changelog._partialmatch(key)
489 if n:
489 if n:
490 return n
490 return n
491 try:
491 try:
492 if len(key) == 20:
492 if len(key) == 20:
493 key = hex(key)
493 key = hex(key)
494 except:
494 except:
495 pass
495 pass
496 raise error.RepoError(_("unknown revision '%s'") % key)
496 raise error.RepoError(_("unknown revision '%s'") % key)
497
497
498 def local(self):
498 def local(self):
499 return True
499 return True
500
500
501 def join(self, f):
501 def join(self, f):
502 return os.path.join(self.path, f)
502 return os.path.join(self.path, f)
503
503
504 def wjoin(self, f):
504 def wjoin(self, f):
505 return os.path.join(self.root, f)
505 return os.path.join(self.root, f)
506
506
507 def rjoin(self, f):
507 def rjoin(self, f):
508 return os.path.join(self.root, util.pconvert(f))
508 return os.path.join(self.root, util.pconvert(f))
509
509
510 def file(self, f):
510 def file(self, f):
511 if f[0] == '/':
511 if f[0] == '/':
512 f = f[1:]
512 f = f[1:]
513 return filelog.filelog(self.sopener, f)
513 return filelog.filelog(self.sopener, f)
514
514
515 def changectx(self, changeid):
515 def changectx(self, changeid):
516 return self[changeid]
516 return self[changeid]
517
517
518 def parents(self, changeid=None):
518 def parents(self, changeid=None):
519 '''get list of changectxs for parents of changeid'''
519 '''get list of changectxs for parents of changeid'''
520 return self[changeid].parents()
520 return self[changeid].parents()
521
521
522 def filectx(self, path, changeid=None, fileid=None):
522 def filectx(self, path, changeid=None, fileid=None):
523 """changeid can be a changeset revision, node, or tag.
523 """changeid can be a changeset revision, node, or tag.
524 fileid can be a file revision or node."""
524 fileid can be a file revision or node."""
525 return context.filectx(self, path, changeid, fileid)
525 return context.filectx(self, path, changeid, fileid)
526
526
527 def getcwd(self):
527 def getcwd(self):
528 return self.dirstate.getcwd()
528 return self.dirstate.getcwd()
529
529
530 def pathto(self, f, cwd=None):
530 def pathto(self, f, cwd=None):
531 return self.dirstate.pathto(f, cwd)
531 return self.dirstate.pathto(f, cwd)
532
532
533 def wfile(self, f, mode='r'):
533 def wfile(self, f, mode='r'):
534 return self.wopener(f, mode)
534 return self.wopener(f, mode)
535
535
536 def _link(self, f):
536 def _link(self, f):
537 return os.path.islink(self.wjoin(f))
537 return os.path.islink(self.wjoin(f))
538
538
539 def _filter(self, filter, filename, data):
539 def _filter(self, filter, filename, data):
540 if filter not in self.filterpats:
540 if filter not in self.filterpats:
541 l = []
541 l = []
542 for pat, cmd in self.ui.configitems(filter):
542 for pat, cmd in self.ui.configitems(filter):
543 if cmd == '!':
543 if cmd == '!':
544 continue
544 continue
545 mf = util.matcher(self.root, "", [pat], [], [])[1]
545 mf = util.matcher(self.root, "", [pat], [], [])[1]
546 fn = None
546 fn = None
547 params = cmd
547 params = cmd
548 for name, filterfn in self._datafilters.iteritems():
548 for name, filterfn in self._datafilters.iteritems():
549 if cmd.startswith(name):
549 if cmd.startswith(name):
550 fn = filterfn
550 fn = filterfn
551 params = cmd[len(name):].lstrip()
551 params = cmd[len(name):].lstrip()
552 break
552 break
553 if not fn:
553 if not fn:
554 fn = lambda s, c, **kwargs: util.filter(s, c)
554 fn = lambda s, c, **kwargs: util.filter(s, c)
555 # Wrap old filters not supporting keyword arguments
555 # Wrap old filters not supporting keyword arguments
556 if not inspect.getargspec(fn)[2]:
556 if not inspect.getargspec(fn)[2]:
557 oldfn = fn
557 oldfn = fn
558 fn = lambda s, c, **kwargs: oldfn(s, c)
558 fn = lambda s, c, **kwargs: oldfn(s, c)
559 l.append((mf, fn, params))
559 l.append((mf, fn, params))
560 self.filterpats[filter] = l
560 self.filterpats[filter] = l
561
561
562 for mf, fn, cmd in self.filterpats[filter]:
562 for mf, fn, cmd in self.filterpats[filter]:
563 if mf(filename):
563 if mf(filename):
564 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
564 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
565 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
565 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
566 break
566 break
567
567
568 return data
568 return data
569
569
570 def adddatafilter(self, name, filter):
570 def adddatafilter(self, name, filter):
571 self._datafilters[name] = filter
571 self._datafilters[name] = filter
572
572
573 def wread(self, filename):
573 def wread(self, filename):
574 if self._link(filename):
574 if self._link(filename):
575 data = os.readlink(self.wjoin(filename))
575 data = os.readlink(self.wjoin(filename))
576 else:
576 else:
577 data = self.wopener(filename, 'r').read()
577 data = self.wopener(filename, 'r').read()
578 return self._filter("encode", filename, data)
578 return self._filter("encode", filename, data)
579
579
580 def wwrite(self, filename, data, flags):
580 def wwrite(self, filename, data, flags):
581 data = self._filter("decode", filename, data)
581 data = self._filter("decode", filename, data)
582 try:
582 try:
583 os.unlink(self.wjoin(filename))
583 os.unlink(self.wjoin(filename))
584 except OSError:
584 except OSError:
585 pass
585 pass
586 if 'l' in flags:
586 if 'l' in flags:
587 self.wopener.symlink(data, filename)
587 self.wopener.symlink(data, filename)
588 else:
588 else:
589 self.wopener(filename, 'w').write(data)
589 self.wopener(filename, 'w').write(data)
590 if 'x' in flags:
590 if 'x' in flags:
591 util.set_flags(self.wjoin(filename), False, True)
591 util.set_flags(self.wjoin(filename), False, True)
592
592
593 def wwritedata(self, filename, data):
593 def wwritedata(self, filename, data):
594 return self._filter("decode", filename, data)
594 return self._filter("decode", filename, data)
595
595
596 def transaction(self):
596 def transaction(self):
597 tr = self._transref and self._transref() or None
597 tr = self._transref and self._transref() or None
598 if tr and tr.running():
598 if tr and tr.running():
599 return tr.nest()
599 return tr.nest()
600
600
601 # abort here if the journal already exists
601 # abort here if the journal already exists
602 if os.path.exists(self.sjoin("journal")):
602 if os.path.exists(self.sjoin("journal")):
603 raise error.RepoError(_("journal already exists - run hg recover"))
603 raise error.RepoError(_("journal already exists - run hg recover"))
604
604
605 # save dirstate for rollback
605 # save dirstate for rollback
606 try:
606 try:
607 ds = self.opener("dirstate").read()
607 ds = self.opener("dirstate").read()
608 except IOError:
608 except IOError:
609 ds = ""
609 ds = ""
610 self.opener("journal.dirstate", "w").write(ds)
610 self.opener("journal.dirstate", "w").write(ds)
611 self.opener("journal.branch", "w").write(self.dirstate.branch())
611 self.opener("journal.branch", "w").write(self.dirstate.branch())
612
612
613 renames = [(self.sjoin("journal"), self.sjoin("undo")),
613 renames = [(self.sjoin("journal"), self.sjoin("undo")),
614 (self.join("journal.dirstate"), self.join("undo.dirstate")),
614 (self.join("journal.dirstate"), self.join("undo.dirstate")),
615 (self.join("journal.branch"), self.join("undo.branch"))]
615 (self.join("journal.branch"), self.join("undo.branch"))]
616 tr = transaction.transaction(self.ui.warn, self.sopener,
616 tr = transaction.transaction(self.ui.warn, self.sopener,
617 self.sjoin("journal"),
617 self.sjoin("journal"),
618 aftertrans(renames),
618 aftertrans(renames),
619 self.store.createmode)
619 self.store.createmode)
620 self._transref = weakref.ref(tr)
620 self._transref = weakref.ref(tr)
621 return tr
621 return tr
622
622
623 def recover(self):
623 def recover(self):
624 lock = self.lock()
624 lock = self.lock()
625 try:
625 try:
626 if os.path.exists(self.sjoin("journal")):
626 if os.path.exists(self.sjoin("journal")):
627 self.ui.status(_("rolling back interrupted transaction\n"))
627 self.ui.status(_("rolling back interrupted transaction\n"))
628 transaction.rollback(self.sopener, self.sjoin("journal"))
628 transaction.rollback(self.sopener, self.sjoin("journal"))
629 self.invalidate()
629 self.invalidate()
630 return True
630 return True
631 else:
631 else:
632 self.ui.warn(_("no interrupted transaction available\n"))
632 self.ui.warn(_("no interrupted transaction available\n"))
633 return False
633 return False
634 finally:
634 finally:
635 lock.release()
635 lock.release()
636
636
637 def rollback(self):
637 def rollback(self):
638 wlock = lock = None
638 wlock = lock = None
639 try:
639 try:
640 wlock = self.wlock()
640 wlock = self.wlock()
641 lock = self.lock()
641 lock = self.lock()
642 if os.path.exists(self.sjoin("undo")):
642 if os.path.exists(self.sjoin("undo")):
643 self.ui.status(_("rolling back last transaction\n"))
643 self.ui.status(_("rolling back last transaction\n"))
644 transaction.rollback(self.sopener, self.sjoin("undo"))
644 transaction.rollback(self.sopener, self.sjoin("undo"))
645 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
645 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
646 try:
646 try:
647 branch = self.opener("undo.branch").read()
647 branch = self.opener("undo.branch").read()
648 self.dirstate.setbranch(branch)
648 self.dirstate.setbranch(branch)
649 except IOError:
649 except IOError:
650 self.ui.warn(_("Named branch could not be reset, "
650 self.ui.warn(_("Named branch could not be reset, "
651 "current branch still is: %s\n")
651 "current branch still is: %s\n")
652 % encoding.tolocal(self.dirstate.branch()))
652 % encoding.tolocal(self.dirstate.branch()))
653 self.invalidate()
653 self.invalidate()
654 self.dirstate.invalidate()
654 self.dirstate.invalidate()
655 else:
655 else:
656 self.ui.warn(_("no rollback information available\n"))
656 self.ui.warn(_("no rollback information available\n"))
657 finally:
657 finally:
658 release(lock, wlock)
658 release(lock, wlock)
659
659
660 def invalidate(self):
660 def invalidate(self):
661 for a in "changelog manifest".split():
661 for a in "changelog manifest".split():
662 if a in self.__dict__:
662 if a in self.__dict__:
663 delattr(self, a)
663 delattr(self, a)
664 self.tagscache = None
664 self.tagscache = None
665 self._tagstypecache = None
665 self._tagstypecache = None
666 self.nodetagscache = None
666 self.nodetagscache = None
667 self.branchcache = None
667 self.branchcache = None
668 self._ubranchcache = None
668 self._ubranchcache = None
669 self._branchcachetip = None
669 self._branchcachetip = None
670
670
671 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
671 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
672 try:
672 try:
673 l = lock.lock(lockname, 0, releasefn, desc=desc)
673 l = lock.lock(lockname, 0, releasefn, desc=desc)
674 except error.LockHeld, inst:
674 except error.LockHeld, inst:
675 if not wait:
675 if not wait:
676 raise
676 raise
677 self.ui.warn(_("waiting for lock on %s held by %r\n") %
677 self.ui.warn(_("waiting for lock on %s held by %r\n") %
678 (desc, inst.locker))
678 (desc, inst.locker))
679 # default to 600 seconds timeout
679 # default to 600 seconds timeout
680 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
680 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
681 releasefn, desc=desc)
681 releasefn, desc=desc)
682 if acquirefn:
682 if acquirefn:
683 acquirefn()
683 acquirefn()
684 return l
684 return l
685
685
686 def lock(self, wait=True):
686 def lock(self, wait=True):
687 l = self._lockref and self._lockref()
687 l = self._lockref and self._lockref()
688 if l is not None and l.held:
688 if l is not None and l.held:
689 l.lock()
689 l.lock()
690 return l
690 return l
691
691
692 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
692 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
693 _('repository %s') % self.origroot)
693 _('repository %s') % self.origroot)
694 self._lockref = weakref.ref(l)
694 self._lockref = weakref.ref(l)
695 return l
695 return l
696
696
697 def wlock(self, wait=True):
697 def wlock(self, wait=True):
698 l = self._wlockref and self._wlockref()
698 l = self._wlockref and self._wlockref()
699 if l is not None and l.held:
699 if l is not None and l.held:
700 l.lock()
700 l.lock()
701 return l
701 return l
702
702
703 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
703 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
704 self.dirstate.invalidate, _('working directory of %s') %
704 self.dirstate.invalidate, _('working directory of %s') %
705 self.origroot)
705 self.origroot)
706 self._wlockref = weakref.ref(l)
706 self._wlockref = weakref.ref(l)
707 return l
707 return l
708
708
709 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
709 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
710 """
710 """
711 commit an individual file as part of a larger transaction
711 commit an individual file as part of a larger transaction
712 """
712 """
713
713
714 fname = fctx.path()
714 fname = fctx.path()
715 text = fctx.data()
715 text = fctx.data()
716 flog = self.file(fname)
716 flog = self.file(fname)
717 fparent1 = manifest1.get(fname, nullid)
717 fparent1 = manifest1.get(fname, nullid)
718 fparent2 = manifest2.get(fname, nullid)
718 fparent2 = manifest2.get(fname, nullid)
719
719
720 meta = {}
720 meta = {}
721 copy = fctx.renamed()
721 copy = fctx.renamed()
722 if copy and copy[0] != fname:
722 if copy and copy[0] != fname:
723 # Mark the new revision of this file as a copy of another
723 # Mark the new revision of this file as a copy of another
724 # file. This copy data will effectively act as a parent
724 # file. This copy data will effectively act as a parent
725 # of this new revision. If this is a merge, the first
725 # of this new revision. If this is a merge, the first
726 # parent will be the nullid (meaning "look up the copy data")
726 # parent will be the nullid (meaning "look up the copy data")
727 # and the second one will be the other parent. For example:
727 # and the second one will be the other parent. For example:
728 #
728 #
729 # 0 --- 1 --- 3 rev1 changes file foo
729 # 0 --- 1 --- 3 rev1 changes file foo
730 # \ / rev2 renames foo to bar and changes it
730 # \ / rev2 renames foo to bar and changes it
731 # \- 2 -/ rev3 should have bar with all changes and
731 # \- 2 -/ rev3 should have bar with all changes and
732 # should record that bar descends from
732 # should record that bar descends from
733 # bar in rev2 and foo in rev1
733 # bar in rev2 and foo in rev1
734 #
734 #
735 # this allows this merge to succeed:
735 # this allows this merge to succeed:
736 #
736 #
737 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
737 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
738 # \ / merging rev3 and rev4 should use bar@rev2
738 # \ / merging rev3 and rev4 should use bar@rev2
739 # \- 2 --- 4 as the merge base
739 # \- 2 --- 4 as the merge base
740 #
740 #
741
741
742 cfname = copy[0]
742 cfname = copy[0]
743 crev = manifest1.get(cfname)
743 crev = manifest1.get(cfname)
744 newfparent = fparent2
744 newfparent = fparent2
745
745
746 if manifest2: # branch merge
746 if manifest2: # branch merge
747 if fparent2 == nullid or crev is None: # copied on remote side
747 if fparent2 == nullid or crev is None: # copied on remote side
748 if cfname in manifest2:
748 if cfname in manifest2:
749 crev = manifest2[cfname]
749 crev = manifest2[cfname]
750 newfparent = fparent1
750 newfparent = fparent1
751
751
752 # find source in nearest ancestor if we've lost track
752 # find source in nearest ancestor if we've lost track
753 if not crev:
753 if not crev:
754 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
754 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
755 (fname, cfname))
755 (fname, cfname))
756 for ancestor in self['.'].ancestors():
756 for ancestor in self['.'].ancestors():
757 if cfname in ancestor:
757 if cfname in ancestor:
758 crev = ancestor[cfname].filenode()
758 crev = ancestor[cfname].filenode()
759 break
759 break
760
760
761 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
761 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
762 meta["copy"] = cfname
762 meta["copy"] = cfname
763 meta["copyrev"] = hex(crev)
763 meta["copyrev"] = hex(crev)
764 fparent1, fparent2 = nullid, newfparent
764 fparent1, fparent2 = nullid, newfparent
765 elif fparent2 != nullid:
765 elif fparent2 != nullid:
766 # is one parent an ancestor of the other?
766 # is one parent an ancestor of the other?
767 fparentancestor = flog.ancestor(fparent1, fparent2)
767 fparentancestor = flog.ancestor(fparent1, fparent2)
768 if fparentancestor == fparent1:
768 if fparentancestor == fparent1:
769 fparent1, fparent2 = fparent2, nullid
769 fparent1, fparent2 = fparent2, nullid
770 elif fparentancestor == fparent2:
770 elif fparentancestor == fparent2:
771 fparent2 = nullid
771 fparent2 = nullid
772
772
773 # is the file unmodified from the parent? report existing entry
773 # is the file unmodified from the parent? report existing entry
774 if fparent2 == nullid and not flog.cmp(fparent1, text) and not meta:
774 if fparent2 == nullid and not flog.cmp(fparent1, text) and not meta:
775 return fparent1
775 return fparent1
776
776
777 changelist.append(fname)
777 changelist.append(fname)
778 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
778 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
779
779
780 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
780 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
781 if p1 is None:
781 if p1 is None:
782 p1, p2 = self.dirstate.parents()
782 p1, p2 = self.dirstate.parents()
783 return self.commit(files=files, text=text, user=user, date=date,
783 return self.commit(files=files, text=text, user=user, date=date,
784 p1=p1, p2=p2, extra=extra, empty_ok=True)
784 p1=p1, p2=p2, extra=extra, empty_ok=True)
785
785
786 def commit(self, files=None, text="", user=None, date=None,
786 def commit(self, files=None, text="", user=None, date=None,
787 match=None, force=False, force_editor=False,
787 match=None, force=False, force_editor=False,
788 p1=None, p2=None, extra={}, empty_ok=False):
788 p1=None, p2=None, extra={}, empty_ok=False):
789 wlock = lock = None
789 wlock = lock = None
790 if extra.get("close"):
790 if extra.get("close"):
791 force = True
791 force = True
792 if files:
792 if files:
793 files = list(set(files))
793 files = list(set(files))
794 try:
794 try:
795 wlock = self.wlock()
795 wlock = self.wlock()
796 lock = self.lock()
796 lock = self.lock()
797 use_dirstate = (p1 is None) # not rawcommit
797 use_dirstate = (p1 is None) # not rawcommit
798
798
799 if use_dirstate:
799 if use_dirstate:
800 p1, p2 = self.dirstate.parents()
800 p1, p2 = self.dirstate.parents()
801 update_dirstate = True
801 update_dirstate = True
802
802
803 if (not force and p2 != nullid and
803 if (not force and p2 != nullid and
804 (match and (match.files() or match.anypats()))):
804 (match and (match.files() or match.anypats()))):
805 raise util.Abort(_('cannot partially commit a merge '
805 raise util.Abort(_('cannot partially commit a merge '
806 '(do not specify files or patterns)'))
806 '(do not specify files or patterns)'))
807
807
808 if files:
808 if files:
809 modified, removed = [], []
809 modified, removed = [], []
810 for f in files:
810 for f in files:
811 s = self.dirstate[f]
811 s = self.dirstate[f]
812 if s in 'nma':
812 if s in 'nma':
813 modified.append(f)
813 modified.append(f)
814 elif s == 'r':
814 elif s == 'r':
815 removed.append(f)
815 removed.append(f)
816 else:
816 else:
817 self.ui.warn(_("%s not tracked!\n") % f)
817 self.ui.warn(_("%s not tracked!\n") % f)
818 changes = [modified, [], removed, [], []]
818 changes = [modified, [], removed, [], []]
819 else:
819 else:
820 changes = self.status(match=match)
820 changes = self.status(match=match)
821 else:
821 else:
822 p1, p2 = p1, p2 or nullid
822 p1, p2 = p1, p2 or nullid
823 update_dirstate = (self.dirstate.parents()[0] == p1)
823 update_dirstate = (self.dirstate.parents()[0] == p1)
824 changes = [files, [], [], [], []]
824 changes = [files, [], [], [], []]
825
825
826 ms = merge_.mergestate(self)
826 ms = merge_.mergestate(self)
827 for f in changes[0]:
827 for f in changes[0]:
828 if f in ms and ms[f] == 'u':
828 if f in ms and ms[f] == 'u':
829 raise util.Abort(_("unresolved merge conflicts "
829 raise util.Abort(_("unresolved merge conflicts "
830 "(see hg resolve)"))
830 "(see hg resolve)"))
831 wctx = context.workingctx(self, (p1, p2), text, user, date,
831 wctx = context.workingctx(self, (p1, p2), text, user, date,
832 extra, changes)
832 extra, changes)
833 r = self._commitctx(wctx, force, force_editor, empty_ok,
833 r = self._commitctx(wctx, force, force_editor, empty_ok,
834 use_dirstate, update_dirstate)
834 use_dirstate, update_dirstate)
835 ms.reset()
835 ms.reset()
836 return r
836 return r
837
837
838 finally:
838 finally:
839 release(lock, wlock)
839 release(lock, wlock)
840
840
841 def commitctx(self, ctx):
841 def commitctx(self, ctx):
842 """Add a new revision to current repository.
842 """Add a new revision to current repository.
843
843
844 Revision information is passed in the context.memctx argument.
844 Revision information is passed in the context.memctx argument.
845 commitctx() does not touch the working directory.
845 commitctx() does not touch the working directory.
846 """
846 """
847 wlock = lock = None
847 wlock = lock = None
848 try:
848 try:
849 wlock = self.wlock()
849 wlock = self.wlock()
850 lock = self.lock()
850 lock = self.lock()
851 return self._commitctx(ctx, force=True, force_editor=False,
851 return self._commitctx(ctx, force=True, force_editor=False,
852 empty_ok=True, use_dirstate=False,
852 empty_ok=True, use_dirstate=False,
853 update_dirstate=False)
853 update_dirstate=False)
854 finally:
854 finally:
855 release(lock, wlock)
855 release(lock, wlock)
856
856
857 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
857 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
858 use_dirstate=True, update_dirstate=True):
858 use_dirstate=True, update_dirstate=True):
859 tr = None
859 tr = None
860 valid = 0 # don't save the dirstate if this isn't set
860 valid = 0 # don't save the dirstate if this isn't set
861 try:
861 try:
862 commit = sorted(wctx.modified() + wctx.added())
862 commit = sorted(wctx.modified() + wctx.added())
863 remove = wctx.removed()
863 remove = wctx.removed()
864 extra = wctx.extra().copy()
864 extra = wctx.extra().copy()
865 branchname = extra['branch']
865 branchname = extra['branch']
866 user = wctx.user()
866 user = wctx.user()
867 text = wctx.description()
867 text = wctx.description()
868
868
869 p1, p2 = [p.node() for p in wctx.parents()]
869 p1, p2 = [p.node() for p in wctx.parents()]
870 c1 = self.changelog.read(p1)
870 c1 = self.changelog.read(p1)
871 c2 = self.changelog.read(p2)
871 c2 = self.changelog.read(p2)
872 m1 = self.manifest.read(c1[0]).copy()
872 m1 = self.manifest.read(c1[0]).copy()
873 m2 = self.manifest.read(c2[0])
873 m2 = self.manifest.read(c2[0])
874
874
875 if use_dirstate:
875 if use_dirstate:
876 oldname = c1[5].get("branch") # stored in UTF-8
876 oldname = c1[5].get("branch") # stored in UTF-8
877 if (not commit and not remove and not force and p2 == nullid
877 if (not commit and not remove and not force and p2 == nullid
878 and branchname == oldname):
878 and branchname == oldname):
879 self.ui.status(_("nothing changed\n"))
879 self.ui.status(_("nothing changed\n"))
880 return None
880 return None
881
881
882 xp1 = hex(p1)
882 xp1 = hex(p1)
883 if p2 == nullid: xp2 = ''
883 if p2 == nullid: xp2 = ''
884 else: xp2 = hex(p2)
884 else: xp2 = hex(p2)
885
885
886 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
886 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
887
887
888 tr = self.transaction()
888 tr = self.transaction()
889 trp = weakref.proxy(tr)
889 trp = weakref.proxy(tr)
890
890
891 # check in files
891 # check in files
892 new = {}
892 new = {}
893 changed = []
893 changed = []
894 linkrev = len(self)
894 linkrev = len(self)
895 for f in commit:
895 for f in commit:
896 self.ui.note(f + "\n")
896 self.ui.note(f + "\n")
897 try:
897 try:
898 fctx = wctx.filectx(f)
898 fctx = wctx.filectx(f)
899 newflags = fctx.flags()
899 newflags = fctx.flags()
900 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
900 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
901 if ((not changed or changed[-1] != f) and
901 if ((not changed or changed[-1] != f) and
902 m2.get(f) != new[f]):
902 m2.get(f) != new[f]):
903 # mention the file in the changelog if some
903 # mention the file in the changelog if some
904 # flag changed, even if there was no content
904 # flag changed, even if there was no content
905 # change.
905 # change.
906 if m1.flags(f) != newflags:
906 if m1.flags(f) != newflags:
907 changed.append(f)
907 changed.append(f)
908 m1.set(f, newflags)
908 m1.set(f, newflags)
909 if use_dirstate:
909 if use_dirstate:
910 self.dirstate.normal(f)
910 self.dirstate.normal(f)
911
911
912 except (OSError, IOError):
912 except (OSError, IOError):
913 if use_dirstate:
913 if use_dirstate:
914 self.ui.warn(_("trouble committing %s!\n") % f)
914 self.ui.warn(_("trouble committing %s!\n") % f)
915 raise
915 raise
916 else:
916 else:
917 remove.append(f)
917 remove.append(f)
918
918
919 updated, added = [], []
919 updated, added = [], []
920 for f in sorted(changed):
920 for f in sorted(changed):
921 if f in m1 or f in m2:
921 if f in m1 or f in m2:
922 updated.append(f)
922 updated.append(f)
923 else:
923 else:
924 added.append(f)
924 added.append(f)
925
925
926 # update manifest
926 # update manifest
927 m1.update(new)
927 m1.update(new)
928 removed = [f for f in sorted(remove) if f in m1 or f in m2]
928 removed = [f for f in sorted(remove) if f in m1 or f in m2]
929 removed1 = []
929 removed1 = []
930
930
931 for f in removed:
931 for f in removed:
932 if f in m1:
932 if f in m1:
933 del m1[f]
933 del m1[f]
934 removed1.append(f)
934 removed1.append(f)
935 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
935 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
936 (new, removed1))
936 (new, removed1))
937
937
938 # add changeset
938 # add changeset
939 if (not empty_ok and not text) or force_editor:
939 if (not empty_ok and not text) or force_editor:
940 edittext = []
940 edittext = []
941 if text:
941 if text:
942 edittext.append(text)
942 edittext.append(text)
943 edittext.append("")
943 edittext.append("")
944 edittext.append("") # Empty line between message and comments.
944 edittext.append("") # Empty line between message and comments.
945 edittext.append(_("HG: Enter commit message."
945 edittext.append(_("HG: Enter commit message."
946 " Lines beginning with 'HG:' are removed."))
946 " Lines beginning with 'HG:' are removed."))
947 edittext.append("HG: --")
947 edittext.append("HG: --")
948 edittext.append("HG: user: %s" % user)
948 edittext.append("HG: user: %s" % user)
949 if p2 != nullid:
949 if p2 != nullid:
950 edittext.append("HG: branch merge")
950 edittext.append("HG: branch merge")
951 if branchname:
951 if branchname:
952 edittext.append("HG: branch '%s'"
952 edittext.append("HG: branch '%s'"
953 % encoding.tolocal(branchname))
953 % encoding.tolocal(branchname))
954 edittext.extend(["HG: added %s" % f for f in added])
954 edittext.extend(["HG: added %s" % f for f in added])
955 edittext.extend(["HG: changed %s" % f for f in updated])
955 edittext.extend(["HG: changed %s" % f for f in updated])
956 edittext.extend(["HG: removed %s" % f for f in removed])
956 edittext.extend(["HG: removed %s" % f for f in removed])
957 if not added and not updated and not removed:
957 if not added and not updated and not removed:
958 edittext.append("HG: no files changed")
958 edittext.append("HG: no files changed")
959 edittext.append("")
959 edittext.append("")
960 # run editor in the repository root
960 # run editor in the repository root
961 olddir = os.getcwd()
961 olddir = os.getcwd()
962 os.chdir(self.root)
962 os.chdir(self.root)
963 text = self.ui.edit("\n".join(edittext), user)
963 text = self.ui.edit("\n".join(edittext), user)
964 os.chdir(olddir)
964 os.chdir(olddir)
965
965
966 lines = [line.rstrip() for line in text.rstrip().splitlines()]
966 lines = [line.rstrip() for line in text.rstrip().splitlines()]
967 while lines and not lines[0]:
967 while lines and not lines[0]:
968 del lines[0]
968 del lines[0]
969 if not lines and use_dirstate:
969 if not lines and use_dirstate:
970 raise util.Abort(_("empty commit message"))
970 raise util.Abort(_("empty commit message"))
971 text = '\n'.join(lines)
971 text = '\n'.join(lines)
972
972
973 self.changelog.delayupdate()
973 self.changelog.delayupdate()
974 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
974 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
975 user, wctx.date(), extra)
975 user, wctx.date(), extra)
976 p = lambda: self.changelog.writepending() and self.root or ""
976 p = lambda: self.changelog.writepending() and self.root or ""
977 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
977 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
978 parent2=xp2, pending=p)
978 parent2=xp2, pending=p)
979 self.changelog.finalize(trp)
979 self.changelog.finalize(trp)
980 tr.close()
980 tr.close()
981
981
982 if self.branchcache:
982 if self.branchcache:
983 self.branchtags()
983 self.branchtags()
984
984
985 if use_dirstate or update_dirstate:
985 if use_dirstate or update_dirstate:
986 self.dirstate.setparents(n)
986 self.dirstate.setparents(n)
987 if use_dirstate:
987 if use_dirstate:
988 for f in removed:
988 for f in removed:
989 self.dirstate.forget(f)
989 self.dirstate.forget(f)
990 valid = 1 # our dirstate updates are complete
990 valid = 1 # our dirstate updates are complete
991
991
992 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
992 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
993 return n
993 return n
994 finally:
994 finally:
995 if not valid: # don't save our updated dirstate
995 if not valid: # don't save our updated dirstate
996 self.dirstate.invalidate()
996 self.dirstate.invalidate()
997 del tr
997 del tr
998
998
999 def walk(self, match, node=None):
999 def walk(self, match, node=None):
1000 '''
1000 '''
1001 walk recursively through the directory tree or a given
1001 walk recursively through the directory tree or a given
1002 changeset, finding all files matched by the match
1002 changeset, finding all files matched by the match
1003 function
1003 function
1004 '''
1004 '''
1005 return self[node].walk(match)
1005 return self[node].walk(match)
1006
1006
1007 def status(self, node1='.', node2=None, match=None,
1007 def status(self, node1='.', node2=None, match=None,
1008 ignored=False, clean=False, unknown=False):
1008 ignored=False, clean=False, unknown=False):
1009 """return status of files between two nodes or node and working directory
1009 """return status of files between two nodes or node and working directory
1010
1010
1011 If node1 is None, use the first dirstate parent instead.
1011 If node1 is None, use the first dirstate parent instead.
1012 If node2 is None, compare node1 with working directory.
1012 If node2 is None, compare node1 with working directory.
1013 """
1013 """
1014
1014
1015 def mfmatches(ctx):
1015 def mfmatches(ctx):
1016 mf = ctx.manifest().copy()
1016 mf = ctx.manifest().copy()
1017 for fn in mf.keys():
1017 for fn in mf.keys():
1018 if not match(fn):
1018 if not match(fn):
1019 del mf[fn]
1019 del mf[fn]
1020 return mf
1020 return mf
1021
1021
1022 if isinstance(node1, context.changectx):
1022 if isinstance(node1, context.changectx):
1023 ctx1 = node1
1023 ctx1 = node1
1024 else:
1024 else:
1025 ctx1 = self[node1]
1025 ctx1 = self[node1]
1026 if isinstance(node2, context.changectx):
1026 if isinstance(node2, context.changectx):
1027 ctx2 = node2
1027 ctx2 = node2
1028 else:
1028 else:
1029 ctx2 = self[node2]
1029 ctx2 = self[node2]
1030
1030
1031 working = ctx2.rev() is None
1031 working = ctx2.rev() is None
1032 parentworking = working and ctx1 == self['.']
1032 parentworking = working and ctx1 == self['.']
1033 match = match or match_.always(self.root, self.getcwd())
1033 match = match or match_.always(self.root, self.getcwd())
1034 listignored, listclean, listunknown = ignored, clean, unknown
1034 listignored, listclean, listunknown = ignored, clean, unknown
1035
1035
1036 # load earliest manifest first for caching reasons
1036 # load earliest manifest first for caching reasons
1037 if not working and ctx2.rev() < ctx1.rev():
1037 if not working and ctx2.rev() < ctx1.rev():
1038 ctx2.manifest()
1038 ctx2.manifest()
1039
1039
1040 if not parentworking:
1040 if not parentworking:
1041 def bad(f, msg):
1041 def bad(f, msg):
1042 if f not in ctx1:
1042 if f not in ctx1:
1043 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1043 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1044 return False
1044 return False
1045 match.bad = bad
1045 match.bad = bad
1046
1046
1047 if working: # we need to scan the working dir
1047 if working: # we need to scan the working dir
1048 s = self.dirstate.status(match, listignored, listclean, listunknown)
1048 s = self.dirstate.status(match, listignored, listclean, listunknown)
1049 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1049 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1050
1050
1051 # check for any possibly clean files
1051 # check for any possibly clean files
1052 if parentworking and cmp:
1052 if parentworking and cmp:
1053 fixup = []
1053 fixup = []
1054 # do a full compare of any files that might have changed
1054 # do a full compare of any files that might have changed
1055 for f in cmp:
1055 for f in cmp:
1056 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1056 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1057 or ctx1[f].cmp(ctx2[f].data())):
1057 or ctx1[f].cmp(ctx2[f].data())):
1058 modified.append(f)
1058 modified.append(f)
1059 else:
1059 else:
1060 fixup.append(f)
1060 fixup.append(f)
1061
1061
1062 if listclean:
1062 if listclean:
1063 clean += fixup
1063 clean += fixup
1064
1064
1065 # update dirstate for files that are actually clean
1065 # update dirstate for files that are actually clean
1066 if fixup:
1066 if fixup:
1067 wlock = None
1067 wlock = None
1068 try:
1068 try:
1069 try:
1069 try:
1070 # updating the dirstate is optional
1070 # updating the dirstate is optional
1071 # so we don't wait on the lock
1071 # so we don't wait on the lock
1072 wlock = self.wlock(False)
1072 wlock = self.wlock(False)
1073 for f in fixup:
1073 for f in fixup:
1074 self.dirstate.normal(f)
1074 self.dirstate.normal(f)
1075 except error.LockError:
1075 except error.LockError:
1076 pass
1076 pass
1077 finally:
1077 finally:
1078 release(wlock)
1078 release(wlock)
1079
1079
1080 if not parentworking:
1080 if not parentworking:
1081 mf1 = mfmatches(ctx1)
1081 mf1 = mfmatches(ctx1)
1082 if working:
1082 if working:
1083 # we are comparing working dir against non-parent
1083 # we are comparing working dir against non-parent
1084 # generate a pseudo-manifest for the working dir
1084 # generate a pseudo-manifest for the working dir
1085 mf2 = mfmatches(self['.'])
1085 mf2 = mfmatches(self['.'])
1086 for f in cmp + modified + added:
1086 for f in cmp + modified + added:
1087 mf2[f] = None
1087 mf2[f] = None
1088 mf2.set(f, ctx2.flags(f))
1088 mf2.set(f, ctx2.flags(f))
1089 for f in removed:
1089 for f in removed:
1090 if f in mf2:
1090 if f in mf2:
1091 del mf2[f]
1091 del mf2[f]
1092 else:
1092 else:
1093 # we are comparing two revisions
1093 # we are comparing two revisions
1094 deleted, unknown, ignored = [], [], []
1094 deleted, unknown, ignored = [], [], []
1095 mf2 = mfmatches(ctx2)
1095 mf2 = mfmatches(ctx2)
1096
1096
1097 modified, added, clean = [], [], []
1097 modified, added, clean = [], [], []
1098 for fn in mf2:
1098 for fn in mf2:
1099 if fn in mf1:
1099 if fn in mf1:
1100 if (mf1.flags(fn) != mf2.flags(fn) or
1100 if (mf1.flags(fn) != mf2.flags(fn) or
1101 (mf1[fn] != mf2[fn] and
1101 (mf1[fn] != mf2[fn] and
1102 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1102 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1103 modified.append(fn)
1103 modified.append(fn)
1104 elif listclean:
1104 elif listclean:
1105 clean.append(fn)
1105 clean.append(fn)
1106 del mf1[fn]
1106 del mf1[fn]
1107 else:
1107 else:
1108 added.append(fn)
1108 added.append(fn)
1109 removed = mf1.keys()
1109 removed = mf1.keys()
1110
1110
1111 r = modified, added, removed, deleted, unknown, ignored, clean
1111 r = modified, added, removed, deleted, unknown, ignored, clean
1112 [l.sort() for l in r]
1112 [l.sort() for l in r]
1113 return r
1113 return r
1114
1114
1115 def add(self, list):
1115 def add(self, list):
1116 wlock = self.wlock()
1116 wlock = self.wlock()
1117 try:
1117 try:
1118 rejected = []
1118 rejected = []
1119 for f in list:
1119 for f in list:
1120 p = self.wjoin(f)
1120 p = self.wjoin(f)
1121 try:
1121 try:
1122 st = os.lstat(p)
1122 st = os.lstat(p)
1123 except:
1123 except:
1124 self.ui.warn(_("%s does not exist!\n") % f)
1124 self.ui.warn(_("%s does not exist!\n") % f)
1125 rejected.append(f)
1125 rejected.append(f)
1126 continue
1126 continue
1127 if st.st_size > 10000000:
1127 if st.st_size > 10000000:
1128 self.ui.warn(_("%s: files over 10MB may cause memory and"
1128 self.ui.warn(_("%s: files over 10MB may cause memory and"
1129 " performance problems\n"
1129 " performance problems\n"
1130 "(use 'hg revert %s' to unadd the file)\n")
1130 "(use 'hg revert %s' to unadd the file)\n")
1131 % (f, f))
1131 % (f, f))
1132 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1132 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1133 self.ui.warn(_("%s not added: only files and symlinks "
1133 self.ui.warn(_("%s not added: only files and symlinks "
1134 "supported currently\n") % f)
1134 "supported currently\n") % f)
1135 rejected.append(p)
1135 rejected.append(p)
1136 elif self.dirstate[f] in 'amn':
1136 elif self.dirstate[f] in 'amn':
1137 self.ui.warn(_("%s already tracked!\n") % f)
1137 self.ui.warn(_("%s already tracked!\n") % f)
1138 elif self.dirstate[f] == 'r':
1138 elif self.dirstate[f] == 'r':
1139 self.dirstate.normallookup(f)
1139 self.dirstate.normallookup(f)
1140 else:
1140 else:
1141 self.dirstate.add(f)
1141 self.dirstate.add(f)
1142 return rejected
1142 return rejected
1143 finally:
1143 finally:
1144 wlock.release()
1144 wlock.release()
1145
1145
1146 def forget(self, list):
1146 def forget(self, list):
1147 wlock = self.wlock()
1147 wlock = self.wlock()
1148 try:
1148 try:
1149 for f in list:
1149 for f in list:
1150 if self.dirstate[f] != 'a':
1150 if self.dirstate[f] != 'a':
1151 self.ui.warn(_("%s not added!\n") % f)
1151 self.ui.warn(_("%s not added!\n") % f)
1152 else:
1152 else:
1153 self.dirstate.forget(f)
1153 self.dirstate.forget(f)
1154 finally:
1154 finally:
1155 wlock.release()
1155 wlock.release()
1156
1156
1157 def remove(self, list, unlink=False):
1157 def remove(self, list, unlink=False):
1158 wlock = None
1158 wlock = None
1159 try:
1159 try:
1160 if unlink:
1160 if unlink:
1161 for f in list:
1161 for f in list:
1162 try:
1162 try:
1163 util.unlink(self.wjoin(f))
1163 util.unlink(self.wjoin(f))
1164 except OSError, inst:
1164 except OSError, inst:
1165 if inst.errno != errno.ENOENT:
1165 if inst.errno != errno.ENOENT:
1166 raise
1166 raise
1167 wlock = self.wlock()
1167 wlock = self.wlock()
1168 for f in list:
1168 for f in list:
1169 if unlink and os.path.exists(self.wjoin(f)):
1169 if unlink and os.path.exists(self.wjoin(f)):
1170 self.ui.warn(_("%s still exists!\n") % f)
1170 self.ui.warn(_("%s still exists!\n") % f)
1171 elif self.dirstate[f] == 'a':
1171 elif self.dirstate[f] == 'a':
1172 self.dirstate.forget(f)
1172 self.dirstate.forget(f)
1173 elif f not in self.dirstate:
1173 elif f not in self.dirstate:
1174 self.ui.warn(_("%s not tracked!\n") % f)
1174 self.ui.warn(_("%s not tracked!\n") % f)
1175 else:
1175 else:
1176 self.dirstate.remove(f)
1176 self.dirstate.remove(f)
1177 finally:
1177 finally:
1178 release(wlock)
1178 release(wlock)
1179
1179
1180 def undelete(self, list):
1180 def undelete(self, list):
1181 manifests = [self.manifest.read(self.changelog.read(p)[0])
1181 manifests = [self.manifest.read(self.changelog.read(p)[0])
1182 for p in self.dirstate.parents() if p != nullid]
1182 for p in self.dirstate.parents() if p != nullid]
1183 wlock = self.wlock()
1183 wlock = self.wlock()
1184 try:
1184 try:
1185 for f in list:
1185 for f in list:
1186 if self.dirstate[f] != 'r':
1186 if self.dirstate[f] != 'r':
1187 self.ui.warn(_("%s not removed!\n") % f)
1187 self.ui.warn(_("%s not removed!\n") % f)
1188 else:
1188 else:
1189 m = f in manifests[0] and manifests[0] or manifests[1]
1189 m = f in manifests[0] and manifests[0] or manifests[1]
1190 t = self.file(f).read(m[f])
1190 t = self.file(f).read(m[f])
1191 self.wwrite(f, t, m.flags(f))
1191 self.wwrite(f, t, m.flags(f))
1192 self.dirstate.normal(f)
1192 self.dirstate.normal(f)
1193 finally:
1193 finally:
1194 wlock.release()
1194 wlock.release()
1195
1195
1196 def copy(self, source, dest):
1196 def copy(self, source, dest):
1197 p = self.wjoin(dest)
1197 p = self.wjoin(dest)
1198 if not (os.path.exists(p) or os.path.islink(p)):
1198 if not (os.path.exists(p) or os.path.islink(p)):
1199 self.ui.warn(_("%s does not exist!\n") % dest)
1199 self.ui.warn(_("%s does not exist!\n") % dest)
1200 elif not (os.path.isfile(p) or os.path.islink(p)):
1200 elif not (os.path.isfile(p) or os.path.islink(p)):
1201 self.ui.warn(_("copy failed: %s is not a file or a "
1201 self.ui.warn(_("copy failed: %s is not a file or a "
1202 "symbolic link\n") % dest)
1202 "symbolic link\n") % dest)
1203 else:
1203 else:
1204 wlock = self.wlock()
1204 wlock = self.wlock()
1205 try:
1205 try:
1206 if self.dirstate[dest] in '?r':
1206 if self.dirstate[dest] in '?r':
1207 self.dirstate.add(dest)
1207 self.dirstate.add(dest)
1208 self.dirstate.copy(source, dest)
1208 self.dirstate.copy(source, dest)
1209 finally:
1209 finally:
1210 wlock.release()
1210 wlock.release()
1211
1211
1212 def heads(self, start=None, closed=True):
1212 def heads(self, start=None, closed=True):
1213 heads = self.changelog.heads(start)
1213 heads = self.changelog.heads(start)
1214 def display(head):
1214 def display(head):
1215 if closed:
1215 if closed:
1216 return True
1216 return True
1217 extras = self.changelog.read(head)[5]
1217 extras = self.changelog.read(head)[5]
1218 return ('close' not in extras)
1218 return ('close' not in extras)
1219 # sort the output in rev descending order
1219 # sort the output in rev descending order
1220 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1220 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1221 return [n for (r, n) in sorted(heads)]
1221 return [n for (r, n) in sorted(heads)]
1222
1222
1223 def branchheads(self, branch=None, start=None, closed=True):
1223 def branchheads(self, branch=None, start=None, closed=True):
1224 if branch is None:
1224 if branch is None:
1225 branch = self[None].branch()
1225 branch = self[None].branch()
1226 branches = self._branchheads()
1226 branches = self._branchheads()
1227 if branch not in branches:
1227 if branch not in branches:
1228 return []
1228 return []
1229 bheads = branches[branch]
1229 bheads = branches[branch]
1230 # the cache returns heads ordered lowest to highest
1230 # the cache returns heads ordered lowest to highest
1231 bheads.reverse()
1231 bheads.reverse()
1232 if start is not None:
1232 if start is not None:
1233 # filter out the heads that cannot be reached from startrev
1233 # filter out the heads that cannot be reached from startrev
1234 bheads = self.changelog.nodesbetween([start], bheads)[2]
1234 bheads = self.changelog.nodesbetween([start], bheads)[2]
1235 if not closed:
1235 if not closed:
1236 bheads = [h for h in bheads if
1236 bheads = [h for h in bheads if
1237 ('close' not in self.changelog.read(h)[5])]
1237 ('close' not in self.changelog.read(h)[5])]
1238 return bheads
1238 return bheads
1239
1239
1240 def branches(self, nodes):
1240 def branches(self, nodes):
1241 if not nodes:
1241 if not nodes:
1242 nodes = [self.changelog.tip()]
1242 nodes = [self.changelog.tip()]
1243 b = []
1243 b = []
1244 for n in nodes:
1244 for n in nodes:
1245 t = n
1245 t = n
1246 while 1:
1246 while 1:
1247 p = self.changelog.parents(n)
1247 p = self.changelog.parents(n)
1248 if p[1] != nullid or p[0] == nullid:
1248 if p[1] != nullid or p[0] == nullid:
1249 b.append((t, n, p[0], p[1]))
1249 b.append((t, n, p[0], p[1]))
1250 break
1250 break
1251 n = p[0]
1251 n = p[0]
1252 return b
1252 return b
1253
1253
1254 def between(self, pairs):
1254 def between(self, pairs):
1255 r = []
1255 r = []
1256
1256
1257 for top, bottom in pairs:
1257 for top, bottom in pairs:
1258 n, l, i = top, [], 0
1258 n, l, i = top, [], 0
1259 f = 1
1259 f = 1
1260
1260
1261 while n != bottom and n != nullid:
1261 while n != bottom and n != nullid:
1262 p = self.changelog.parents(n)[0]
1262 p = self.changelog.parents(n)[0]
1263 if i == f:
1263 if i == f:
1264 l.append(n)
1264 l.append(n)
1265 f = f * 2
1265 f = f * 2
1266 n = p
1266 n = p
1267 i += 1
1267 i += 1
1268
1268
1269 r.append(l)
1269 r.append(l)
1270
1270
1271 return r
1271 return r
1272
1272
1273 def findincoming(self, remote, base=None, heads=None, force=False):
1273 def findincoming(self, remote, base=None, heads=None, force=False):
1274 """Return list of roots of the subsets of missing nodes from remote
1274 """Return list of roots of the subsets of missing nodes from remote
1275
1275
1276 If base dict is specified, assume that these nodes and their parents
1276 If base dict is specified, assume that these nodes and their parents
1277 exist on the remote side and that no child of a node of base exists
1277 exist on the remote side and that no child of a node of base exists
1278 in both remote and self.
1278 in both remote and self.
1279 Furthermore base will be updated to include the nodes that exists
1279 Furthermore base will be updated to include the nodes that exists
1280 in self and remote but no children exists in self and remote.
1280 in self and remote but no children exists in self and remote.
1281 If a list of heads is specified, return only nodes which are heads
1281 If a list of heads is specified, return only nodes which are heads
1282 or ancestors of these heads.
1282 or ancestors of these heads.
1283
1283
1284 All the ancestors of base are in self and in remote.
1284 All the ancestors of base are in self and in remote.
1285 All the descendants of the list returned are missing in self.
1285 All the descendants of the list returned are missing in self.
1286 (and so we know that the rest of the nodes are missing in remote, see
1286 (and so we know that the rest of the nodes are missing in remote, see
1287 outgoing)
1287 outgoing)
1288 """
1288 """
1289 return self.findcommonincoming(remote, base, heads, force)[1]
1289 return self.findcommonincoming(remote, base, heads, force)[1]
1290
1290
1291 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1291 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1292 """Return a tuple (common, missing roots, heads) used to identify
1292 """Return a tuple (common, missing roots, heads) used to identify
1293 missing nodes from remote.
1293 missing nodes from remote.
1294
1294
1295 If base dict is specified, assume that these nodes and their parents
1295 If base dict is specified, assume that these nodes and their parents
1296 exist on the remote side and that no child of a node of base exists
1296 exist on the remote side and that no child of a node of base exists
1297 in both remote and self.
1297 in both remote and self.
1298 Furthermore base will be updated to include the nodes that exists
1298 Furthermore base will be updated to include the nodes that exists
1299 in self and remote but no children exists in self and remote.
1299 in self and remote but no children exists in self and remote.
1300 If a list of heads is specified, return only nodes which are heads
1300 If a list of heads is specified, return only nodes which are heads
1301 or ancestors of these heads.
1301 or ancestors of these heads.
1302
1302
1303 All the ancestors of base are in self and in remote.
1303 All the ancestors of base are in self and in remote.
1304 """
1304 """
1305 m = self.changelog.nodemap
1305 m = self.changelog.nodemap
1306 search = []
1306 search = []
1307 fetch = set()
1307 fetch = set()
1308 seen = set()
1308 seen = set()
1309 seenbranch = set()
1309 seenbranch = set()
1310 if base == None:
1310 if base == None:
1311 base = {}
1311 base = {}
1312
1312
1313 if not heads:
1313 if not heads:
1314 heads = remote.heads()
1314 heads = remote.heads()
1315
1315
1316 if self.changelog.tip() == nullid:
1316 if self.changelog.tip() == nullid:
1317 base[nullid] = 1
1317 base[nullid] = 1
1318 if heads != [nullid]:
1318 if heads != [nullid]:
1319 return [nullid], [nullid], list(heads)
1319 return [nullid], [nullid], list(heads)
1320 return [nullid], [], []
1320 return [nullid], [], []
1321
1321
1322 # assume we're closer to the tip than the root
1322 # assume we're closer to the tip than the root
1323 # and start by examining the heads
1323 # and start by examining the heads
1324 self.ui.status(_("searching for changes\n"))
1324 self.ui.status(_("searching for changes\n"))
1325
1325
1326 unknown = []
1326 unknown = []
1327 for h in heads:
1327 for h in heads:
1328 if h not in m:
1328 if h not in m:
1329 unknown.append(h)
1329 unknown.append(h)
1330 else:
1330 else:
1331 base[h] = 1
1331 base[h] = 1
1332
1332
1333 heads = unknown
1333 heads = unknown
1334 if not unknown:
1334 if not unknown:
1335 return base.keys(), [], []
1335 return base.keys(), [], []
1336
1336
1337 req = set(unknown)
1337 req = set(unknown)
1338 reqcnt = 0
1338 reqcnt = 0
1339
1339
1340 # search through remote branches
1340 # search through remote branches
1341 # a 'branch' here is a linear segment of history, with four parts:
1341 # a 'branch' here is a linear segment of history, with four parts:
1342 # head, root, first parent, second parent
1342 # head, root, first parent, second parent
1343 # (a branch always has two parents (or none) by definition)
1343 # (a branch always has two parents (or none) by definition)
1344 unknown = remote.branches(unknown)
1344 unknown = remote.branches(unknown)
1345 while unknown:
1345 while unknown:
1346 r = []
1346 r = []
1347 while unknown:
1347 while unknown:
1348 n = unknown.pop(0)
1348 n = unknown.pop(0)
1349 if n[0] in seen:
1349 if n[0] in seen:
1350 continue
1350 continue
1351
1351
1352 self.ui.debug(_("examining %s:%s\n")
1352 self.ui.debug(_("examining %s:%s\n")
1353 % (short(n[0]), short(n[1])))
1353 % (short(n[0]), short(n[1])))
1354 if n[0] == nullid: # found the end of the branch
1354 if n[0] == nullid: # found the end of the branch
1355 pass
1355 pass
1356 elif n in seenbranch:
1356 elif n in seenbranch:
1357 self.ui.debug(_("branch already found\n"))
1357 self.ui.debug(_("branch already found\n"))
1358 continue
1358 continue
1359 elif n[1] and n[1] in m: # do we know the base?
1359 elif n[1] and n[1] in m: # do we know the base?
1360 self.ui.debug(_("found incomplete branch %s:%s\n")
1360 self.ui.debug(_("found incomplete branch %s:%s\n")
1361 % (short(n[0]), short(n[1])))
1361 % (short(n[0]), short(n[1])))
1362 search.append(n[0:2]) # schedule branch range for scanning
1362 search.append(n[0:2]) # schedule branch range for scanning
1363 seenbranch.add(n)
1363 seenbranch.add(n)
1364 else:
1364 else:
1365 if n[1] not in seen and n[1] not in fetch:
1365 if n[1] not in seen and n[1] not in fetch:
1366 if n[2] in m and n[3] in m:
1366 if n[2] in m and n[3] in m:
1367 self.ui.debug(_("found new changeset %s\n") %
1367 self.ui.debug(_("found new changeset %s\n") %
1368 short(n[1]))
1368 short(n[1]))
1369 fetch.add(n[1]) # earliest unknown
1369 fetch.add(n[1]) # earliest unknown
1370 for p in n[2:4]:
1370 for p in n[2:4]:
1371 if p in m:
1371 if p in m:
1372 base[p] = 1 # latest known
1372 base[p] = 1 # latest known
1373
1373
1374 for p in n[2:4]:
1374 for p in n[2:4]:
1375 if p not in req and p not in m:
1375 if p not in req and p not in m:
1376 r.append(p)
1376 r.append(p)
1377 req.add(p)
1377 req.add(p)
1378 seen.add(n[0])
1378 seen.add(n[0])
1379
1379
1380 if r:
1380 if r:
1381 reqcnt += 1
1381 reqcnt += 1
1382 self.ui.debug(_("request %d: %s\n") %
1382 self.ui.debug(_("request %d: %s\n") %
1383 (reqcnt, " ".join(map(short, r))))
1383 (reqcnt, " ".join(map(short, r))))
1384 for p in xrange(0, len(r), 10):
1384 for p in xrange(0, len(r), 10):
1385 for b in remote.branches(r[p:p+10]):
1385 for b in remote.branches(r[p:p+10]):
1386 self.ui.debug(_("received %s:%s\n") %
1386 self.ui.debug(_("received %s:%s\n") %
1387 (short(b[0]), short(b[1])))
1387 (short(b[0]), short(b[1])))
1388 unknown.append(b)
1388 unknown.append(b)
1389
1389
1390 # do binary search on the branches we found
1390 # do binary search on the branches we found
1391 while search:
1391 while search:
1392 newsearch = []
1392 newsearch = []
1393 reqcnt += 1
1393 reqcnt += 1
1394 for n, l in zip(search, remote.between(search)):
1394 for n, l in zip(search, remote.between(search)):
1395 l.append(n[1])
1395 l.append(n[1])
1396 p = n[0]
1396 p = n[0]
1397 f = 1
1397 f = 1
1398 for i in l:
1398 for i in l:
1399 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1399 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1400 if i in m:
1400 if i in m:
1401 if f <= 2:
1401 if f <= 2:
1402 self.ui.debug(_("found new branch changeset %s\n") %
1402 self.ui.debug(_("found new branch changeset %s\n") %
1403 short(p))
1403 short(p))
1404 fetch.add(p)
1404 fetch.add(p)
1405 base[i] = 1
1405 base[i] = 1
1406 else:
1406 else:
1407 self.ui.debug(_("narrowed branch search to %s:%s\n")
1407 self.ui.debug(_("narrowed branch search to %s:%s\n")
1408 % (short(p), short(i)))
1408 % (short(p), short(i)))
1409 newsearch.append((p, i))
1409 newsearch.append((p, i))
1410 break
1410 break
1411 p, f = i, f * 2
1411 p, f = i, f * 2
1412 search = newsearch
1412 search = newsearch
1413
1413
1414 # sanity check our fetch list
1414 # sanity check our fetch list
1415 for f in fetch:
1415 for f in fetch:
1416 if f in m:
1416 if f in m:
1417 raise error.RepoError(_("already have changeset ")
1417 raise error.RepoError(_("already have changeset ")
1418 + short(f[:4]))
1418 + short(f[:4]))
1419
1419
1420 if base.keys() == [nullid]:
1420 if base.keys() == [nullid]:
1421 if force:
1421 if force:
1422 self.ui.warn(_("warning: repository is unrelated\n"))
1422 self.ui.warn(_("warning: repository is unrelated\n"))
1423 else:
1423 else:
1424 raise util.Abort(_("repository is unrelated"))
1424 raise util.Abort(_("repository is unrelated"))
1425
1425
1426 self.ui.debug(_("found new changesets starting at ") +
1426 self.ui.debug(_("found new changesets starting at ") +
1427 " ".join([short(f) for f in fetch]) + "\n")
1427 " ".join([short(f) for f in fetch]) + "\n")
1428
1428
1429 self.ui.debug(_("%d total queries\n") % reqcnt)
1429 self.ui.debug(_("%d total queries\n") % reqcnt)
1430
1430
1431 return base.keys(), list(fetch), heads
1431 return base.keys(), list(fetch), heads
1432
1432
1433 def findoutgoing(self, remote, base=None, heads=None, force=False):
1433 def findoutgoing(self, remote, base=None, heads=None, force=False):
1434 """Return list of nodes that are roots of subsets not in remote
1434 """Return list of nodes that are roots of subsets not in remote
1435
1435
1436 If base dict is specified, assume that these nodes and their parents
1436 If base dict is specified, assume that these nodes and their parents
1437 exist on the remote side.
1437 exist on the remote side.
1438 If a list of heads is specified, return only nodes which are heads
1438 If a list of heads is specified, return only nodes which are heads
1439 or ancestors of these heads, and return a second element which
1439 or ancestors of these heads, and return a second element which
1440 contains all remote heads which get new children.
1440 contains all remote heads which get new children.
1441 """
1441 """
1442 if base == None:
1442 if base == None:
1443 base = {}
1443 base = {}
1444 self.findincoming(remote, base, heads, force=force)
1444 self.findincoming(remote, base, heads, force=force)
1445
1445
1446 self.ui.debug(_("common changesets up to ")
1446 self.ui.debug(_("common changesets up to ")
1447 + " ".join(map(short, base.keys())) + "\n")
1447 + " ".join(map(short, base.keys())) + "\n")
1448
1448
1449 remain = set(self.changelog.nodemap)
1449 remain = set(self.changelog.nodemap)
1450
1450
1451 # prune everything remote has from the tree
1451 # prune everything remote has from the tree
1452 remain.remove(nullid)
1452 remain.remove(nullid)
1453 remove = base.keys()
1453 remove = base.keys()
1454 while remove:
1454 while remove:
1455 n = remove.pop(0)
1455 n = remove.pop(0)
1456 if n in remain:
1456 if n in remain:
1457 remain.remove(n)
1457 remain.remove(n)
1458 for p in self.changelog.parents(n):
1458 for p in self.changelog.parents(n):
1459 remove.append(p)
1459 remove.append(p)
1460
1460
1461 # find every node whose parents have been pruned
1461 # find every node whose parents have been pruned
1462 subset = []
1462 subset = []
1463 # find every remote head that will get new children
1463 # find every remote head that will get new children
1464 updated_heads = {}
1464 updated_heads = {}
1465 for n in remain:
1465 for n in remain:
1466 p1, p2 = self.changelog.parents(n)
1466 p1, p2 = self.changelog.parents(n)
1467 if p1 not in remain and p2 not in remain:
1467 if p1 not in remain and p2 not in remain:
1468 subset.append(n)
1468 subset.append(n)
1469 if heads:
1469 if heads:
1470 if p1 in heads:
1470 if p1 in heads:
1471 updated_heads[p1] = True
1471 updated_heads[p1] = True
1472 if p2 in heads:
1472 if p2 in heads:
1473 updated_heads[p2] = True
1473 updated_heads[p2] = True
1474
1474
1475 # this is the set of all roots we have to push
1475 # this is the set of all roots we have to push
1476 if heads:
1476 if heads:
1477 return subset, updated_heads.keys()
1477 return subset, updated_heads.keys()
1478 else:
1478 else:
1479 return subset
1479 return subset
1480
1480
1481 def pull(self, remote, heads=None, force=False):
1481 def pull(self, remote, heads=None, force=False):
1482 lock = self.lock()
1482 lock = self.lock()
1483 try:
1483 try:
1484 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1484 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1485 force=force)
1485 force=force)
1486 if fetch == [nullid]:
1486 if fetch == [nullid]:
1487 self.ui.status(_("requesting all changes\n"))
1487 self.ui.status(_("requesting all changes\n"))
1488
1488
1489 if not fetch:
1489 if not fetch:
1490 self.ui.status(_("no changes found\n"))
1490 self.ui.status(_("no changes found\n"))
1491 return 0
1491 return 0
1492
1492
1493 if heads is None and remote.capable('changegroupsubset'):
1493 if heads is None and remote.capable('changegroupsubset'):
1494 heads = rheads
1494 heads = rheads
1495
1495
1496 if heads is None:
1496 if heads is None:
1497 cg = remote.changegroup(fetch, 'pull')
1497 cg = remote.changegroup(fetch, 'pull')
1498 else:
1498 else:
1499 if not remote.capable('changegroupsubset'):
1499 if not remote.capable('changegroupsubset'):
1500 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1500 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1501 cg = remote.changegroupsubset(fetch, heads, 'pull')
1501 cg = remote.changegroupsubset(fetch, heads, 'pull')
1502 return self.addchangegroup(cg, 'pull', remote.url())
1502 return self.addchangegroup(cg, 'pull', remote.url())
1503 finally:
1503 finally:
1504 lock.release()
1504 lock.release()
1505
1505
1506 def push(self, remote, force=False, revs=None):
1506 def push(self, remote, force=False, revs=None):
1507 # there are two ways to push to remote repo:
1507 # there are two ways to push to remote repo:
1508 #
1508 #
1509 # addchangegroup assumes local user can lock remote
1509 # addchangegroup assumes local user can lock remote
1510 # repo (local filesystem, old ssh servers).
1510 # repo (local filesystem, old ssh servers).
1511 #
1511 #
1512 # unbundle assumes local user cannot lock remote repo (new ssh
1512 # unbundle assumes local user cannot lock remote repo (new ssh
1513 # servers, http servers).
1513 # servers, http servers).
1514
1514
1515 if remote.capable('unbundle'):
1515 if remote.capable('unbundle'):
1516 return self.push_unbundle(remote, force, revs)
1516 return self.push_unbundle(remote, force, revs)
1517 return self.push_addchangegroup(remote, force, revs)
1517 return self.push_addchangegroup(remote, force, revs)
1518
1518
1519 def prepush(self, remote, force, revs):
1519 def prepush(self, remote, force, revs):
1520 common = {}
1520 common = {}
1521 remote_heads = remote.heads()
1521 remote_heads = remote.heads()
1522 inc = self.findincoming(remote, common, remote_heads, force=force)
1522 inc = self.findincoming(remote, common, remote_heads, force=force)
1523
1523
1524 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1524 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1525 if revs is not None:
1525 if revs is not None:
1526 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1526 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1527 else:
1527 else:
1528 bases, heads = update, self.changelog.heads()
1528 bases, heads = update, self.changelog.heads()
1529
1529
1530 if not bases:
1530 if not bases:
1531 self.ui.status(_("no changes found\n"))
1531 self.ui.status(_("no changes found\n"))
1532 return None, 1
1532 return None, 1
1533 elif not force:
1533 elif not force:
1534 # check if we're creating new remote heads
1534 # check if we're creating new remote heads
1535 # to be a remote head after push, node must be either
1535 # to be a remote head after push, node must be either
1536 # - unknown locally
1536 # - unknown locally
1537 # - a local outgoing head descended from update
1537 # - a local outgoing head descended from update
1538 # - a remote head that's known locally and not
1538 # - a remote head that's known locally and not
1539 # ancestral to an outgoing head
1539 # ancestral to an outgoing head
1540
1540
1541 warn = 0
1541 warn = 0
1542
1542
1543 if remote_heads == [nullid]:
1543 if remote_heads == [nullid]:
1544 warn = 0
1544 warn = 0
1545 elif not revs and len(heads) > len(remote_heads):
1545 elif not revs and len(heads) > len(remote_heads):
1546 warn = 1
1546 warn = 1
1547 else:
1547 else:
1548 newheads = list(heads)
1548 newheads = list(heads)
1549 for r in remote_heads:
1549 for r in remote_heads:
1550 if r in self.changelog.nodemap:
1550 if r in self.changelog.nodemap:
1551 desc = self.changelog.heads(r, heads)
1551 desc = self.changelog.heads(r, heads)
1552 l = [h for h in heads if h in desc]
1552 l = [h for h in heads if h in desc]
1553 if not l:
1553 if not l:
1554 newheads.append(r)
1554 newheads.append(r)
1555 else:
1555 else:
1556 newheads.append(r)
1556 newheads.append(r)
1557 if len(newheads) > len(remote_heads):
1557 if len(newheads) > len(remote_heads):
1558 warn = 1
1558 warn = 1
1559
1559
1560 if warn:
1560 if warn:
1561 self.ui.warn(_("abort: push creates new remote heads!\n"))
1561 self.ui.warn(_("abort: push creates new remote heads!\n"))
1562 self.ui.status(_("(did you forget to merge?"
1562 self.ui.status(_("(did you forget to merge?"
1563 " use push -f to force)\n"))
1563 " use push -f to force)\n"))
1564 return None, 0
1564 return None, 0
1565 elif inc:
1565 elif inc:
1566 self.ui.warn(_("note: unsynced remote changes!\n"))
1566 self.ui.warn(_("note: unsynced remote changes!\n"))
1567
1567
1568
1568
1569 if revs is None:
1569 if revs is None:
1570 # use the fast path, no race possible on push
1570 # use the fast path, no race possible on push
1571 cg = self._changegroup(common.keys(), 'push')
1571 cg = self._changegroup(common.keys(), 'push')
1572 else:
1572 else:
1573 cg = self.changegroupsubset(update, revs, 'push')
1573 cg = self.changegroupsubset(update, revs, 'push')
1574 return cg, remote_heads
1574 return cg, remote_heads
1575
1575
1576 def push_addchangegroup(self, remote, force, revs):
1576 def push_addchangegroup(self, remote, force, revs):
1577 lock = remote.lock()
1577 lock = remote.lock()
1578 try:
1578 try:
1579 ret = self.prepush(remote, force, revs)
1579 ret = self.prepush(remote, force, revs)
1580 if ret[0] is not None:
1580 if ret[0] is not None:
1581 cg, remote_heads = ret
1581 cg, remote_heads = ret
1582 return remote.addchangegroup(cg, 'push', self.url())
1582 return remote.addchangegroup(cg, 'push', self.url())
1583 return ret[1]
1583 return ret[1]
1584 finally:
1584 finally:
1585 lock.release()
1585 lock.release()
1586
1586
1587 def push_unbundle(self, remote, force, revs):
1587 def push_unbundle(self, remote, force, revs):
1588 # local repo finds heads on server, finds out what revs it
1588 # local repo finds heads on server, finds out what revs it
1589 # must push. once revs transferred, if server finds it has
1589 # must push. once revs transferred, if server finds it has
1590 # different heads (someone else won commit/push race), server
1590 # different heads (someone else won commit/push race), server
1591 # aborts.
1591 # aborts.
1592
1592
1593 ret = self.prepush(remote, force, revs)
1593 ret = self.prepush(remote, force, revs)
1594 if ret[0] is not None:
1594 if ret[0] is not None:
1595 cg, remote_heads = ret
1595 cg, remote_heads = ret
1596 if force: remote_heads = ['force']
1596 if force: remote_heads = ['force']
1597 return remote.unbundle(cg, remote_heads, 'push')
1597 return remote.unbundle(cg, remote_heads, 'push')
1598 return ret[1]
1598 return ret[1]
1599
1599
1600 def changegroupinfo(self, nodes, source):
1600 def changegroupinfo(self, nodes, source):
1601 if self.ui.verbose or source == 'bundle':
1601 if self.ui.verbose or source == 'bundle':
1602 self.ui.status(_("%d changesets found\n") % len(nodes))
1602 self.ui.status(_("%d changesets found\n") % len(nodes))
1603 if self.ui.debugflag:
1603 if self.ui.debugflag:
1604 self.ui.debug(_("list of changesets:\n"))
1604 self.ui.debug(_("list of changesets:\n"))
1605 for node in nodes:
1605 for node in nodes:
1606 self.ui.debug("%s\n" % hex(node))
1606 self.ui.debug("%s\n" % hex(node))
1607
1607
1608 def changegroupsubset(self, bases, heads, source, extranodes=None):
1608 def changegroupsubset(self, bases, heads, source, extranodes=None):
1609 """This function generates a changegroup consisting of all the nodes
1609 """This function generates a changegroup consisting of all the nodes
1610 that are descendents of any of the bases, and ancestors of any of
1610 that are descendents of any of the bases, and ancestors of any of
1611 the heads.
1611 the heads.
1612
1612
1613 It is fairly complex as determining which filenodes and which
1613 It is fairly complex as determining which filenodes and which
1614 manifest nodes need to be included for the changeset to be complete
1614 manifest nodes need to be included for the changeset to be complete
1615 is non-trivial.
1615 is non-trivial.
1616
1616
1617 Another wrinkle is doing the reverse, figuring out which changeset in
1617 Another wrinkle is doing the reverse, figuring out which changeset in
1618 the changegroup a particular filenode or manifestnode belongs to.
1618 the changegroup a particular filenode or manifestnode belongs to.
1619
1619
1620 The caller can specify some nodes that must be included in the
1620 The caller can specify some nodes that must be included in the
1621 changegroup using the extranodes argument. It should be a dict
1621 changegroup using the extranodes argument. It should be a dict
1622 where the keys are the filenames (or 1 for the manifest), and the
1622 where the keys are the filenames (or 1 for the manifest), and the
1623 values are lists of (node, linknode) tuples, where node is a wanted
1623 values are lists of (node, linknode) tuples, where node is a wanted
1624 node and linknode is the changelog node that should be transmitted as
1624 node and linknode is the changelog node that should be transmitted as
1625 the linkrev.
1625 the linkrev.
1626 """
1626 """
1627
1627
1628 if extranodes is None:
1628 if extranodes is None:
1629 # can we go through the fast path ?
1629 # can we go through the fast path ?
1630 heads.sort()
1630 heads.sort()
1631 allheads = self.heads()
1631 allheads = self.heads()
1632 allheads.sort()
1632 allheads.sort()
1633 if heads == allheads:
1633 if heads == allheads:
1634 common = []
1634 common = []
1635 # parents of bases are known from both sides
1635 # parents of bases are known from both sides
1636 for n in bases:
1636 for n in bases:
1637 for p in self.changelog.parents(n):
1637 for p in self.changelog.parents(n):
1638 if p != nullid:
1638 if p != nullid:
1639 common.append(p)
1639 common.append(p)
1640 return self._changegroup(common, source)
1640 return self._changegroup(common, source)
1641
1641
1642 self.hook('preoutgoing', throw=True, source=source)
1642 self.hook('preoutgoing', throw=True, source=source)
1643
1643
1644 # Set up some initial variables
1644 # Set up some initial variables
1645 # Make it easy to refer to self.changelog
1645 # Make it easy to refer to self.changelog
1646 cl = self.changelog
1646 cl = self.changelog
1647 # msng is short for missing - compute the list of changesets in this
1647 # msng is short for missing - compute the list of changesets in this
1648 # changegroup.
1648 # changegroup.
1649 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1649 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1650 self.changegroupinfo(msng_cl_lst, source)
1650 self.changegroupinfo(msng_cl_lst, source)
1651 # Some bases may turn out to be superfluous, and some heads may be
1651 # Some bases may turn out to be superfluous, and some heads may be
1652 # too. nodesbetween will return the minimal set of bases and heads
1652 # too. nodesbetween will return the minimal set of bases and heads
1653 # necessary to re-create the changegroup.
1653 # necessary to re-create the changegroup.
1654
1654
1655 # Known heads are the list of heads that it is assumed the recipient
1655 # Known heads are the list of heads that it is assumed the recipient
1656 # of this changegroup will know about.
1656 # of this changegroup will know about.
1657 knownheads = {}
1657 knownheads = {}
1658 # We assume that all parents of bases are known heads.
1658 # We assume that all parents of bases are known heads.
1659 for n in bases:
1659 for n in bases:
1660 for p in cl.parents(n):
1660 for p in cl.parents(n):
1661 if p != nullid:
1661 if p != nullid:
1662 knownheads[p] = 1
1662 knownheads[p] = 1
1663 knownheads = knownheads.keys()
1663 knownheads = knownheads.keys()
1664 if knownheads:
1664 if knownheads:
1665 # Now that we know what heads are known, we can compute which
1665 # Now that we know what heads are known, we can compute which
1666 # changesets are known. The recipient must know about all
1666 # changesets are known. The recipient must know about all
1667 # changesets required to reach the known heads from the null
1667 # changesets required to reach the known heads from the null
1668 # changeset.
1668 # changeset.
1669 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1669 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1670 junk = None
1670 junk = None
1671 # Transform the list into a set.
1671 # Transform the list into a set.
1672 has_cl_set = set(has_cl_set)
1672 has_cl_set = set(has_cl_set)
1673 else:
1673 else:
1674 # If there were no known heads, the recipient cannot be assumed to
1674 # If there were no known heads, the recipient cannot be assumed to
1675 # know about any changesets.
1675 # know about any changesets.
1676 has_cl_set = set()
1676 has_cl_set = set()
1677
1677
1678 # Make it easy to refer to self.manifest
1678 # Make it easy to refer to self.manifest
1679 mnfst = self.manifest
1679 mnfst = self.manifest
1680 # We don't know which manifests are missing yet
1680 # We don't know which manifests are missing yet
1681 msng_mnfst_set = {}
1681 msng_mnfst_set = {}
1682 # Nor do we know which filenodes are missing.
1682 # Nor do we know which filenodes are missing.
1683 msng_filenode_set = {}
1683 msng_filenode_set = {}
1684
1684
1685 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1685 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1686 junk = None
1686 junk = None
1687
1687
1688 # A changeset always belongs to itself, so the changenode lookup
1688 # A changeset always belongs to itself, so the changenode lookup
1689 # function for a changenode is identity.
1689 # function for a changenode is identity.
1690 def identity(x):
1690 def identity(x):
1691 return x
1691 return x
1692
1692
1693 # A function generating function. Sets up an environment for the
1693 # A function generating function. Sets up an environment for the
1694 # inner function.
1694 # inner function.
1695 def cmp_by_rev_func(revlog):
1695 def cmp_by_rev_func(revlog):
1696 # Compare two nodes by their revision number in the environment's
1696 # Compare two nodes by their revision number in the environment's
1697 # revision history. Since the revision number both represents the
1697 # revision history. Since the revision number both represents the
1698 # most efficient order to read the nodes in, and represents a
1698 # most efficient order to read the nodes in, and represents a
1699 # topological sorting of the nodes, this function is often useful.
1699 # topological sorting of the nodes, this function is often useful.
1700 def cmp_by_rev(a, b):
1700 def cmp_by_rev(a, b):
1701 return cmp(revlog.rev(a), revlog.rev(b))
1701 return cmp(revlog.rev(a), revlog.rev(b))
1702 return cmp_by_rev
1702 return cmp_by_rev
1703
1703
1704 # If we determine that a particular file or manifest node must be a
1704 # If we determine that a particular file or manifest node must be a
1705 # node that the recipient of the changegroup will already have, we can
1705 # node that the recipient of the changegroup will already have, we can
1706 # also assume the recipient will have all the parents. This function
1706 # also assume the recipient will have all the parents. This function
1707 # prunes them from the set of missing nodes.
1707 # prunes them from the set of missing nodes.
1708 def prune_parents(revlog, hasset, msngset):
1708 def prune_parents(revlog, hasset, msngset):
1709 haslst = hasset.keys()
1709 haslst = hasset.keys()
1710 haslst.sort(cmp_by_rev_func(revlog))
1710 haslst.sort(cmp_by_rev_func(revlog))
1711 for node in haslst:
1711 for node in haslst:
1712 parentlst = [p for p in revlog.parents(node) if p != nullid]
1712 parentlst = [p for p in revlog.parents(node) if p != nullid]
1713 while parentlst:
1713 while parentlst:
1714 n = parentlst.pop()
1714 n = parentlst.pop()
1715 if n not in hasset:
1715 if n not in hasset:
1716 hasset[n] = 1
1716 hasset[n] = 1
1717 p = [p for p in revlog.parents(n) if p != nullid]
1717 p = [p for p in revlog.parents(n) if p != nullid]
1718 parentlst.extend(p)
1718 parentlst.extend(p)
1719 for n in hasset:
1719 for n in hasset:
1720 msngset.pop(n, None)
1720 msngset.pop(n, None)
1721
1721
1722 # This is a function generating function used to set up an environment
1722 # This is a function generating function used to set up an environment
1723 # for the inner function to execute in.
1723 # for the inner function to execute in.
1724 def manifest_and_file_collector(changedfileset):
1724 def manifest_and_file_collector(changedfileset):
1725 # This is an information gathering function that gathers
1725 # This is an information gathering function that gathers
1726 # information from each changeset node that goes out as part of
1726 # information from each changeset node that goes out as part of
1727 # the changegroup. The information gathered is a list of which
1727 # the changegroup. The information gathered is a list of which
1728 # manifest nodes are potentially required (the recipient may
1728 # manifest nodes are potentially required (the recipient may
1729 # already have them) and total list of all files which were
1729 # already have them) and total list of all files which were
1730 # changed in any changeset in the changegroup.
1730 # changed in any changeset in the changegroup.
1731 #
1731 #
1732 # We also remember the first changenode we saw any manifest
1732 # We also remember the first changenode we saw any manifest
1733 # referenced by so we can later determine which changenode 'owns'
1733 # referenced by so we can later determine which changenode 'owns'
1734 # the manifest.
1734 # the manifest.
1735 def collect_manifests_and_files(clnode):
1735 def collect_manifests_and_files(clnode):
1736 c = cl.read(clnode)
1736 c = cl.read(clnode)
1737 for f in c[3]:
1737 for f in c[3]:
1738 # This is to make sure we only have one instance of each
1738 # This is to make sure we only have one instance of each
1739 # filename string for each filename.
1739 # filename string for each filename.
1740 changedfileset.setdefault(f, f)
1740 changedfileset.setdefault(f, f)
1741 msng_mnfst_set.setdefault(c[0], clnode)
1741 msng_mnfst_set.setdefault(c[0], clnode)
1742 return collect_manifests_and_files
1742 return collect_manifests_and_files
1743
1743
1744 # Figure out which manifest nodes (of the ones we think might be part
1744 # Figure out which manifest nodes (of the ones we think might be part
1745 # of the changegroup) the recipient must know about and remove them
1745 # of the changegroup) the recipient must know about and remove them
1746 # from the changegroup.
1746 # from the changegroup.
1747 def prune_manifests():
1747 def prune_manifests():
1748 has_mnfst_set = {}
1748 has_mnfst_set = {}
1749 for n in msng_mnfst_set:
1749 for n in msng_mnfst_set:
1750 # If a 'missing' manifest thinks it belongs to a changenode
1750 # If a 'missing' manifest thinks it belongs to a changenode
1751 # the recipient is assumed to have, obviously the recipient
1751 # the recipient is assumed to have, obviously the recipient
1752 # must have that manifest.
1752 # must have that manifest.
1753 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1753 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1754 if linknode in has_cl_set:
1754 if linknode in has_cl_set:
1755 has_mnfst_set[n] = 1
1755 has_mnfst_set[n] = 1
1756 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1756 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1757
1757
1758 # Use the information collected in collect_manifests_and_files to say
1758 # Use the information collected in collect_manifests_and_files to say
1759 # which changenode any manifestnode belongs to.
1759 # which changenode any manifestnode belongs to.
1760 def lookup_manifest_link(mnfstnode):
1760 def lookup_manifest_link(mnfstnode):
1761 return msng_mnfst_set[mnfstnode]
1761 return msng_mnfst_set[mnfstnode]
1762
1762
1763 # A function generating function that sets up the initial environment
1763 # A function generating function that sets up the initial environment
1764 # the inner function.
1764 # the inner function.
1765 def filenode_collector(changedfiles):
1765 def filenode_collector(changedfiles):
1766 next_rev = [0]
1766 next_rev = [0]
1767 # This gathers information from each manifestnode included in the
1767 # This gathers information from each manifestnode included in the
1768 # changegroup about which filenodes the manifest node references
1768 # changegroup about which filenodes the manifest node references
1769 # so we can include those in the changegroup too.
1769 # so we can include those in the changegroup too.
1770 #
1770 #
1771 # It also remembers which changenode each filenode belongs to. It
1771 # It also remembers which changenode each filenode belongs to. It
1772 # does this by assuming the a filenode belongs to the changenode
1772 # does this by assuming the a filenode belongs to the changenode
1773 # the first manifest that references it belongs to.
1773 # the first manifest that references it belongs to.
1774 def collect_msng_filenodes(mnfstnode):
1774 def collect_msng_filenodes(mnfstnode):
1775 r = mnfst.rev(mnfstnode)
1775 r = mnfst.rev(mnfstnode)
1776 if r == next_rev[0]:
1776 if r == next_rev[0]:
1777 # If the last rev we looked at was the one just previous,
1777 # If the last rev we looked at was the one just previous,
1778 # we only need to see a diff.
1778 # we only need to see a diff.
1779 deltamf = mnfst.readdelta(mnfstnode)
1779 deltamf = mnfst.readdelta(mnfstnode)
1780 # For each line in the delta
1780 # For each line in the delta
1781 for f, fnode in deltamf.iteritems():
1781 for f, fnode in deltamf.iteritems():
1782 f = changedfiles.get(f, None)
1782 f = changedfiles.get(f, None)
1783 # And if the file is in the list of files we care
1783 # And if the file is in the list of files we care
1784 # about.
1784 # about.
1785 if f is not None:
1785 if f is not None:
1786 # Get the changenode this manifest belongs to
1786 # Get the changenode this manifest belongs to
1787 clnode = msng_mnfst_set[mnfstnode]
1787 clnode = msng_mnfst_set[mnfstnode]
1788 # Create the set of filenodes for the file if
1788 # Create the set of filenodes for the file if
1789 # there isn't one already.
1789 # there isn't one already.
1790 ndset = msng_filenode_set.setdefault(f, {})
1790 ndset = msng_filenode_set.setdefault(f, {})
1791 # And set the filenode's changelog node to the
1791 # And set the filenode's changelog node to the
1792 # manifest's if it hasn't been set already.
1792 # manifest's if it hasn't been set already.
1793 ndset.setdefault(fnode, clnode)
1793 ndset.setdefault(fnode, clnode)
1794 else:
1794 else:
1795 # Otherwise we need a full manifest.
1795 # Otherwise we need a full manifest.
1796 m = mnfst.read(mnfstnode)
1796 m = mnfst.read(mnfstnode)
1797 # For every file in we care about.
1797 # For every file in we care about.
1798 for f in changedfiles:
1798 for f in changedfiles:
1799 fnode = m.get(f, None)
1799 fnode = m.get(f, None)
1800 # If it's in the manifest
1800 # If it's in the manifest
1801 if fnode is not None:
1801 if fnode is not None:
1802 # See comments above.
1802 # See comments above.
1803 clnode = msng_mnfst_set[mnfstnode]
1803 clnode = msng_mnfst_set[mnfstnode]
1804 ndset = msng_filenode_set.setdefault(f, {})
1804 ndset = msng_filenode_set.setdefault(f, {})
1805 ndset.setdefault(fnode, clnode)
1805 ndset.setdefault(fnode, clnode)
1806 # Remember the revision we hope to see next.
1806 # Remember the revision we hope to see next.
1807 next_rev[0] = r + 1
1807 next_rev[0] = r + 1
1808 return collect_msng_filenodes
1808 return collect_msng_filenodes
1809
1809
1810 # We have a list of filenodes we think we need for a file, lets remove
1810 # We have a list of filenodes we think we need for a file, lets remove
1811 # all those we know the recipient must have.
1811 # all those we know the recipient must have.
1812 def prune_filenodes(f, filerevlog):
1812 def prune_filenodes(f, filerevlog):
1813 msngset = msng_filenode_set[f]
1813 msngset = msng_filenode_set[f]
1814 hasset = {}
1814 hasset = {}
1815 # If a 'missing' filenode thinks it belongs to a changenode we
1815 # If a 'missing' filenode thinks it belongs to a changenode we
1816 # assume the recipient must have, then the recipient must have
1816 # assume the recipient must have, then the recipient must have
1817 # that filenode.
1817 # that filenode.
1818 for n in msngset:
1818 for n in msngset:
1819 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1819 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1820 if clnode in has_cl_set:
1820 if clnode in has_cl_set:
1821 hasset[n] = 1
1821 hasset[n] = 1
1822 prune_parents(filerevlog, hasset, msngset)
1822 prune_parents(filerevlog, hasset, msngset)
1823
1823
1824 # A function generator function that sets up the a context for the
1824 # A function generator function that sets up the a context for the
1825 # inner function.
1825 # inner function.
1826 def lookup_filenode_link_func(fname):
1826 def lookup_filenode_link_func(fname):
1827 msngset = msng_filenode_set[fname]
1827 msngset = msng_filenode_set[fname]
1828 # Lookup the changenode the filenode belongs to.
1828 # Lookup the changenode the filenode belongs to.
1829 def lookup_filenode_link(fnode):
1829 def lookup_filenode_link(fnode):
1830 return msngset[fnode]
1830 return msngset[fnode]
1831 return lookup_filenode_link
1831 return lookup_filenode_link
1832
1832
1833 # Add the nodes that were explicitly requested.
1833 # Add the nodes that were explicitly requested.
1834 def add_extra_nodes(name, nodes):
1834 def add_extra_nodes(name, nodes):
1835 if not extranodes or name not in extranodes:
1835 if not extranodes or name not in extranodes:
1836 return
1836 return
1837
1837
1838 for node, linknode in extranodes[name]:
1838 for node, linknode in extranodes[name]:
1839 if node not in nodes:
1839 if node not in nodes:
1840 nodes[node] = linknode
1840 nodes[node] = linknode
1841
1841
1842 # Now that we have all theses utility functions to help out and
1842 # Now that we have all theses utility functions to help out and
1843 # logically divide up the task, generate the group.
1843 # logically divide up the task, generate the group.
1844 def gengroup():
1844 def gengroup():
1845 # The set of changed files starts empty.
1845 # The set of changed files starts empty.
1846 changedfiles = {}
1846 changedfiles = {}
1847 # Create a changenode group generator that will call our functions
1847 # Create a changenode group generator that will call our functions
1848 # back to lookup the owning changenode and collect information.
1848 # back to lookup the owning changenode and collect information.
1849 group = cl.group(msng_cl_lst, identity,
1849 group = cl.group(msng_cl_lst, identity,
1850 manifest_and_file_collector(changedfiles))
1850 manifest_and_file_collector(changedfiles))
1851 for chnk in group:
1851 for chnk in group:
1852 yield chnk
1852 yield chnk
1853
1853
1854 # The list of manifests has been collected by the generator
1854 # The list of manifests has been collected by the generator
1855 # calling our functions back.
1855 # calling our functions back.
1856 prune_manifests()
1856 prune_manifests()
1857 add_extra_nodes(1, msng_mnfst_set)
1857 add_extra_nodes(1, msng_mnfst_set)
1858 msng_mnfst_lst = msng_mnfst_set.keys()
1858 msng_mnfst_lst = msng_mnfst_set.keys()
1859 # Sort the manifestnodes by revision number.
1859 # Sort the manifestnodes by revision number.
1860 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1860 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1861 # Create a generator for the manifestnodes that calls our lookup
1861 # Create a generator for the manifestnodes that calls our lookup
1862 # and data collection functions back.
1862 # and data collection functions back.
1863 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1863 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1864 filenode_collector(changedfiles))
1864 filenode_collector(changedfiles))
1865 for chnk in group:
1865 for chnk in group:
1866 yield chnk
1866 yield chnk
1867
1867
1868 # These are no longer needed, dereference and toss the memory for
1868 # These are no longer needed, dereference and toss the memory for
1869 # them.
1869 # them.
1870 msng_mnfst_lst = None
1870 msng_mnfst_lst = None
1871 msng_mnfst_set.clear()
1871 msng_mnfst_set.clear()
1872
1872
1873 if extranodes:
1873 if extranodes:
1874 for fname in extranodes:
1874 for fname in extranodes:
1875 if isinstance(fname, int):
1875 if isinstance(fname, int):
1876 continue
1876 continue
1877 msng_filenode_set.setdefault(fname, {})
1877 msng_filenode_set.setdefault(fname, {})
1878 changedfiles[fname] = 1
1878 changedfiles[fname] = 1
1879 # Go through all our files in order sorted by name.
1879 # Go through all our files in order sorted by name.
1880 for fname in sorted(changedfiles):
1880 for fname in sorted(changedfiles):
1881 filerevlog = self.file(fname)
1881 filerevlog = self.file(fname)
1882 if not len(filerevlog):
1882 if not len(filerevlog):
1883 raise util.Abort(_("empty or missing revlog for %s") % fname)
1883 raise util.Abort(_("empty or missing revlog for %s") % fname)
1884 # Toss out the filenodes that the recipient isn't really
1884 # Toss out the filenodes that the recipient isn't really
1885 # missing.
1885 # missing.
1886 if fname in msng_filenode_set:
1886 if fname in msng_filenode_set:
1887 prune_filenodes(fname, filerevlog)
1887 prune_filenodes(fname, filerevlog)
1888 add_extra_nodes(fname, msng_filenode_set[fname])
1888 add_extra_nodes(fname, msng_filenode_set[fname])
1889 msng_filenode_lst = msng_filenode_set[fname].keys()
1889 msng_filenode_lst = msng_filenode_set[fname].keys()
1890 else:
1890 else:
1891 msng_filenode_lst = []
1891 msng_filenode_lst = []
1892 # If any filenodes are left, generate the group for them,
1892 # If any filenodes are left, generate the group for them,
1893 # otherwise don't bother.
1893 # otherwise don't bother.
1894 if len(msng_filenode_lst) > 0:
1894 if len(msng_filenode_lst) > 0:
1895 yield changegroup.chunkheader(len(fname))
1895 yield changegroup.chunkheader(len(fname))
1896 yield fname
1896 yield fname
1897 # Sort the filenodes by their revision #
1897 # Sort the filenodes by their revision #
1898 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1898 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1899 # Create a group generator and only pass in a changenode
1899 # Create a group generator and only pass in a changenode
1900 # lookup function as we need to collect no information
1900 # lookup function as we need to collect no information
1901 # from filenodes.
1901 # from filenodes.
1902 group = filerevlog.group(msng_filenode_lst,
1902 group = filerevlog.group(msng_filenode_lst,
1903 lookup_filenode_link_func(fname))
1903 lookup_filenode_link_func(fname))
1904 for chnk in group:
1904 for chnk in group:
1905 yield chnk
1905 yield chnk
1906 if fname in msng_filenode_set:
1906 if fname in msng_filenode_set:
1907 # Don't need this anymore, toss it to free memory.
1907 # Don't need this anymore, toss it to free memory.
1908 del msng_filenode_set[fname]
1908 del msng_filenode_set[fname]
1909 # Signal that no more groups are left.
1909 # Signal that no more groups are left.
1910 yield changegroup.closechunk()
1910 yield changegroup.closechunk()
1911
1911
1912 if msng_cl_lst:
1912 if msng_cl_lst:
1913 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1913 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1914
1914
1915 return util.chunkbuffer(gengroup())
1915 return util.chunkbuffer(gengroup())
1916
1916
1917 def changegroup(self, basenodes, source):
1917 def changegroup(self, basenodes, source):
1918 # to avoid a race we use changegroupsubset() (issue1320)
1918 # to avoid a race we use changegroupsubset() (issue1320)
1919 return self.changegroupsubset(basenodes, self.heads(), source)
1919 return self.changegroupsubset(basenodes, self.heads(), source)
1920
1920
1921 def _changegroup(self, common, source):
1921 def _changegroup(self, common, source):
1922 """Generate a changegroup of all nodes that we have that a recipient
1922 """Generate a changegroup of all nodes that we have that a recipient
1923 doesn't.
1923 doesn't.
1924
1924
1925 This is much easier than the previous function as we can assume that
1925 This is much easier than the previous function as we can assume that
1926 the recipient has any changenode we aren't sending them.
1926 the recipient has any changenode we aren't sending them.
1927
1927
1928 common is the set of common nodes between remote and self"""
1928 common is the set of common nodes between remote and self"""
1929
1929
1930 self.hook('preoutgoing', throw=True, source=source)
1930 self.hook('preoutgoing', throw=True, source=source)
1931
1931
1932 cl = self.changelog
1932 cl = self.changelog
1933 nodes = cl.findmissing(common)
1933 nodes = cl.findmissing(common)
1934 revset = set([cl.rev(n) for n in nodes])
1934 revset = set([cl.rev(n) for n in nodes])
1935 self.changegroupinfo(nodes, source)
1935 self.changegroupinfo(nodes, source)
1936
1936
1937 def identity(x):
1937 def identity(x):
1938 return x
1938 return x
1939
1939
1940 def gennodelst(log):
1940 def gennodelst(log):
1941 for r in log:
1941 for r in log:
1942 if log.linkrev(r) in revset:
1942 if log.linkrev(r) in revset:
1943 yield log.node(r)
1943 yield log.node(r)
1944
1944
1945 def changed_file_collector(changedfileset):
1945 def changed_file_collector(changedfileset):
1946 def collect_changed_files(clnode):
1946 def collect_changed_files(clnode):
1947 c = cl.read(clnode)
1947 c = cl.read(clnode)
1948 for fname in c[3]:
1948 for fname in c[3]:
1949 changedfileset[fname] = 1
1949 changedfileset[fname] = 1
1950 return collect_changed_files
1950 return collect_changed_files
1951
1951
1952 def lookuprevlink_func(revlog):
1952 def lookuprevlink_func(revlog):
1953 def lookuprevlink(n):
1953 def lookuprevlink(n):
1954 return cl.node(revlog.linkrev(revlog.rev(n)))
1954 return cl.node(revlog.linkrev(revlog.rev(n)))
1955 return lookuprevlink
1955 return lookuprevlink
1956
1956
1957 def gengroup():
1957 def gengroup():
1958 # construct a list of all changed files
1958 # construct a list of all changed files
1959 changedfiles = {}
1959 changedfiles = {}
1960
1960
1961 for chnk in cl.group(nodes, identity,
1961 for chnk in cl.group(nodes, identity,
1962 changed_file_collector(changedfiles)):
1962 changed_file_collector(changedfiles)):
1963 yield chnk
1963 yield chnk
1964
1964
1965 mnfst = self.manifest
1965 mnfst = self.manifest
1966 nodeiter = gennodelst(mnfst)
1966 nodeiter = gennodelst(mnfst)
1967 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1967 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1968 yield chnk
1968 yield chnk
1969
1969
1970 for fname in sorted(changedfiles):
1970 for fname in sorted(changedfiles):
1971 filerevlog = self.file(fname)
1971 filerevlog = self.file(fname)
1972 if not len(filerevlog):
1972 if not len(filerevlog):
1973 raise util.Abort(_("empty or missing revlog for %s") % fname)
1973 raise util.Abort(_("empty or missing revlog for %s") % fname)
1974 nodeiter = gennodelst(filerevlog)
1974 nodeiter = gennodelst(filerevlog)
1975 nodeiter = list(nodeiter)
1975 nodeiter = list(nodeiter)
1976 if nodeiter:
1976 if nodeiter:
1977 yield changegroup.chunkheader(len(fname))
1977 yield changegroup.chunkheader(len(fname))
1978 yield fname
1978 yield fname
1979 lookup = lookuprevlink_func(filerevlog)
1979 lookup = lookuprevlink_func(filerevlog)
1980 for chnk in filerevlog.group(nodeiter, lookup):
1980 for chnk in filerevlog.group(nodeiter, lookup):
1981 yield chnk
1981 yield chnk
1982
1982
1983 yield changegroup.closechunk()
1983 yield changegroup.closechunk()
1984
1984
1985 if nodes:
1985 if nodes:
1986 self.hook('outgoing', node=hex(nodes[0]), source=source)
1986 self.hook('outgoing', node=hex(nodes[0]), source=source)
1987
1987
1988 return util.chunkbuffer(gengroup())
1988 return util.chunkbuffer(gengroup())
1989
1989
1990 def addchangegroup(self, source, srctype, url, emptyok=False):
1990 def addchangegroup(self, source, srctype, url, emptyok=False):
1991 """add changegroup to repo.
1991 """add changegroup to repo.
1992
1992
1993 return values:
1993 return values:
1994 - nothing changed or no source: 0
1994 - nothing changed or no source: 0
1995 - more heads than before: 1+added heads (2..n)
1995 - more heads than before: 1+added heads (2..n)
1996 - less heads than before: -1-removed heads (-2..-n)
1996 - less heads than before: -1-removed heads (-2..-n)
1997 - number of heads stays the same: 1
1997 - number of heads stays the same: 1
1998 """
1998 """
1999 def csmap(x):
1999 def csmap(x):
2000 self.ui.debug(_("add changeset %s\n") % short(x))
2000 self.ui.debug(_("add changeset %s\n") % short(x))
2001 return len(cl)
2001 return len(cl)
2002
2002
2003 def revmap(x):
2003 def revmap(x):
2004 return cl.rev(x)
2004 return cl.rev(x)
2005
2005
2006 if not source:
2006 if not source:
2007 return 0
2007 return 0
2008
2008
2009 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2009 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2010
2010
2011 changesets = files = revisions = 0
2011 changesets = files = revisions = 0
2012
2012
2013 # write changelog data to temp files so concurrent readers will not see
2013 # write changelog data to temp files so concurrent readers will not see
2014 # inconsistent view
2014 # inconsistent view
2015 cl = self.changelog
2015 cl = self.changelog
2016 cl.delayupdate()
2016 cl.delayupdate()
2017 oldheads = len(cl.heads())
2017 oldheads = len(cl.heads())
2018
2018
2019 tr = self.transaction()
2019 tr = self.transaction()
2020 try:
2020 try:
2021 trp = weakref.proxy(tr)
2021 trp = weakref.proxy(tr)
2022 # pull off the changeset group
2022 # pull off the changeset group
2023 self.ui.status(_("adding changesets\n"))
2023 self.ui.status(_("adding changesets\n"))
2024 cor = len(cl) - 1
2024 cor = len(cl) - 1
2025 chunkiter = changegroup.chunkiter(source)
2025 chunkiter = changegroup.chunkiter(source)
2026 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2026 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2027 raise util.Abort(_("received changelog group is empty"))
2027 raise util.Abort(_("received changelog group is empty"))
2028 cnr = len(cl) - 1
2028 cnr = len(cl) - 1
2029 changesets = cnr - cor
2029 changesets = cnr - cor
2030
2030
2031 # pull off the manifest group
2031 # pull off the manifest group
2032 self.ui.status(_("adding manifests\n"))
2032 self.ui.status(_("adding manifests\n"))
2033 chunkiter = changegroup.chunkiter(source)
2033 chunkiter = changegroup.chunkiter(source)
2034 # no need to check for empty manifest group here:
2034 # no need to check for empty manifest group here:
2035 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2035 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2036 # no new manifest will be created and the manifest group will
2036 # no new manifest will be created and the manifest group will
2037 # be empty during the pull
2037 # be empty during the pull
2038 self.manifest.addgroup(chunkiter, revmap, trp)
2038 self.manifest.addgroup(chunkiter, revmap, trp)
2039
2039
2040 # process the files
2040 # process the files
2041 self.ui.status(_("adding file changes\n"))
2041 self.ui.status(_("adding file changes\n"))
2042 while 1:
2042 while 1:
2043 f = changegroup.getchunk(source)
2043 f = changegroup.getchunk(source)
2044 if not f:
2044 if not f:
2045 break
2045 break
2046 self.ui.debug(_("adding %s revisions\n") % f)
2046 self.ui.debug(_("adding %s revisions\n") % f)
2047 fl = self.file(f)
2047 fl = self.file(f)
2048 o = len(fl)
2048 o = len(fl)
2049 chunkiter = changegroup.chunkiter(source)
2049 chunkiter = changegroup.chunkiter(source)
2050 if fl.addgroup(chunkiter, revmap, trp) is None:
2050 if fl.addgroup(chunkiter, revmap, trp) is None:
2051 raise util.Abort(_("received file revlog group is empty"))
2051 raise util.Abort(_("received file revlog group is empty"))
2052 revisions += len(fl) - o
2052 revisions += len(fl) - o
2053 files += 1
2053 files += 1
2054
2054
2055 newheads = len(self.changelog.heads())
2055 newheads = len(self.changelog.heads())
2056 heads = ""
2056 heads = ""
2057 if oldheads and newheads != oldheads:
2057 if oldheads and newheads != oldheads:
2058 heads = _(" (%+d heads)") % (newheads - oldheads)
2058 heads = _(" (%+d heads)") % (newheads - oldheads)
2059
2059
2060 self.ui.status(_("added %d changesets"
2060 self.ui.status(_("added %d changesets"
2061 " with %d changes to %d files%s\n")
2061 " with %d changes to %d files%s\n")
2062 % (changesets, revisions, files, heads))
2062 % (changesets, revisions, files, heads))
2063
2063
2064 if changesets > 0:
2064 if changesets > 0:
2065 p = lambda: self.changelog.writepending() and self.root or ""
2065 p = lambda: self.changelog.writepending() and self.root or ""
2066 self.hook('pretxnchangegroup', throw=True,
2066 self.hook('pretxnchangegroup', throw=True,
2067 node=hex(self.changelog.node(cor+1)), source=srctype,
2067 node=hex(self.changelog.node(cor+1)), source=srctype,
2068 url=url, pending=p)
2068 url=url, pending=p)
2069
2069
2070 # make changelog see real files again
2070 # make changelog see real files again
2071 cl.finalize(trp)
2071 cl.finalize(trp)
2072
2072
2073 tr.close()
2073 tr.close()
2074 finally:
2074 finally:
2075 del tr
2075 del tr
2076
2076
2077 if changesets > 0:
2077 if changesets > 0:
2078 # forcefully update the on-disk branch cache
2078 # forcefully update the on-disk branch cache
2079 self.ui.debug(_("updating the branch cache\n"))
2079 self.ui.debug(_("updating the branch cache\n"))
2080 self.branchtags()
2080 self.branchtags()
2081 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2081 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2082 source=srctype, url=url)
2082 source=srctype, url=url)
2083
2083
2084 for i in xrange(cor + 1, cnr + 1):
2084 for i in xrange(cor + 1, cnr + 1):
2085 self.hook("incoming", node=hex(self.changelog.node(i)),
2085 self.hook("incoming", node=hex(self.changelog.node(i)),
2086 source=srctype, url=url)
2086 source=srctype, url=url)
2087
2087
2088 # never return 0 here:
2088 # never return 0 here:
2089 if newheads < oldheads:
2089 if newheads < oldheads:
2090 return newheads - oldheads - 1
2090 return newheads - oldheads - 1
2091 else:
2091 else:
2092 return newheads - oldheads + 1
2092 return newheads - oldheads + 1
2093
2093
2094
2094
2095 def stream_in(self, remote):
2095 def stream_in(self, remote):
2096 fp = remote.stream_out()
2096 fp = remote.stream_out()
2097 l = fp.readline()
2097 l = fp.readline()
2098 try:
2098 try:
2099 resp = int(l)
2099 resp = int(l)
2100 except ValueError:
2100 except ValueError:
2101 raise error.ResponseError(
2101 raise error.ResponseError(
2102 _('Unexpected response from remote server:'), l)
2102 _('Unexpected response from remote server:'), l)
2103 if resp == 1:
2103 if resp == 1:
2104 raise util.Abort(_('operation forbidden by server'))
2104 raise util.Abort(_('operation forbidden by server'))
2105 elif resp == 2:
2105 elif resp == 2:
2106 raise util.Abort(_('locking the remote repository failed'))
2106 raise util.Abort(_('locking the remote repository failed'))
2107 elif resp != 0:
2107 elif resp != 0:
2108 raise util.Abort(_('the server sent an unknown error code'))
2108 raise util.Abort(_('the server sent an unknown error code'))
2109 self.ui.status(_('streaming all changes\n'))
2109 self.ui.status(_('streaming all changes\n'))
2110 l = fp.readline()
2110 l = fp.readline()
2111 try:
2111 try:
2112 total_files, total_bytes = map(int, l.split(' ', 1))
2112 total_files, total_bytes = map(int, l.split(' ', 1))
2113 except (ValueError, TypeError):
2113 except (ValueError, TypeError):
2114 raise error.ResponseError(
2114 raise error.ResponseError(
2115 _('Unexpected response from remote server:'), l)
2115 _('Unexpected response from remote server:'), l)
2116 self.ui.status(_('%d files to transfer, %s of data\n') %
2116 self.ui.status(_('%d files to transfer, %s of data\n') %
2117 (total_files, util.bytecount(total_bytes)))
2117 (total_files, util.bytecount(total_bytes)))
2118 start = time.time()
2118 start = time.time()
2119 for i in xrange(total_files):
2119 for i in xrange(total_files):
2120 # XXX doesn't support '\n' or '\r' in filenames
2120 # XXX doesn't support '\n' or '\r' in filenames
2121 l = fp.readline()
2121 l = fp.readline()
2122 try:
2122 try:
2123 name, size = l.split('\0', 1)
2123 name, size = l.split('\0', 1)
2124 size = int(size)
2124 size = int(size)
2125 except (ValueError, TypeError):
2125 except (ValueError, TypeError):
2126 raise error.ResponseError(
2126 raise error.ResponseError(
2127 _('Unexpected response from remote server:'), l)
2127 _('Unexpected response from remote server:'), l)
2128 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2128 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2129 ofp = self.sopener(name, 'w')
2129 ofp = self.sopener(name, 'w')
2130 for chunk in util.filechunkiter(fp, limit=size):
2130 for chunk in util.filechunkiter(fp, limit=size):
2131 ofp.write(chunk)
2131 ofp.write(chunk)
2132 ofp.close()
2132 ofp.close()
2133 elapsed = time.time() - start
2133 elapsed = time.time() - start
2134 if elapsed <= 0:
2134 if elapsed <= 0:
2135 elapsed = 0.001
2135 elapsed = 0.001
2136 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2136 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2137 (util.bytecount(total_bytes), elapsed,
2137 (util.bytecount(total_bytes), elapsed,
2138 util.bytecount(total_bytes / elapsed)))
2138 util.bytecount(total_bytes / elapsed)))
2139 self.invalidate()
2139 self.invalidate()
2140 return len(self.heads()) + 1
2140 return len(self.heads()) + 1
2141
2141
2142 def clone(self, remote, heads=[], stream=False):
2142 def clone(self, remote, heads=[], stream=False):
2143 '''clone remote repository.
2143 '''clone remote repository.
2144
2144
2145 keyword arguments:
2145 keyword arguments:
2146 heads: list of revs to clone (forces use of pull)
2146 heads: list of revs to clone (forces use of pull)
2147 stream: use streaming clone if possible'''
2147 stream: use streaming clone if possible'''
2148
2148
2149 # now, all clients that can request uncompressed clones can
2149 # now, all clients that can request uncompressed clones can
2150 # read repo formats supported by all servers that can serve
2150 # read repo formats supported by all servers that can serve
2151 # them.
2151 # them.
2152
2152
2153 # if revlog format changes, client will have to check version
2153 # if revlog format changes, client will have to check version
2154 # and format flags on "stream" capability, and use
2154 # and format flags on "stream" capability, and use
2155 # uncompressed only if compatible.
2155 # uncompressed only if compatible.
2156
2156
2157 if stream and not heads and remote.capable('stream'):
2157 if stream and not heads and remote.capable('stream'):
2158 return self.stream_in(remote)
2158 return self.stream_in(remote)
2159 return self.pull(remote, heads)
2159 return self.pull(remote, heads)
2160
2160
2161 # used to avoid circular references so destructors work
2161 # used to avoid circular references so destructors work
2162 def aftertrans(files):
2162 def aftertrans(files):
2163 renamefiles = [tuple(t) for t in files]
2163 renamefiles = [tuple(t) for t in files]
2164 def a():
2164 def a():
2165 for src, dest in renamefiles:
2165 for src, dest in renamefiles:
2166 util.rename(src, dest)
2166 util.rename(src, dest)
2167 return a
2167 return a
2168
2168
2169 def instance(ui, path, create):
2169 def instance(ui, path, create):
2170 return localrepository(ui, util.drop_scheme('file', path), create)
2170 return localrepository(ui, util.drop_scheme('file', path), create)
2171
2171
2172 def islocal(path):
2172 def islocal(path):
2173 return True
2173 return True
General Comments 0
You need to be logged in to leave comments. Login now