##// END OF EJS Templates
repo classes: remove unused dev() method
Matt Mackall -
r6312:08800489 default
parent child Browse files
Show More
@@ -1,283 +1,280
1 """
1 """
2 bundlerepo.py - repository class for viewing uncompressed bundles
2 bundlerepo.py - repository class for viewing uncompressed bundles
3
3
4 This provides a read-only repository interface to bundles as if
4 This provides a read-only repository interface to bundles as if
5 they were part of the actual repository.
5 they were part of the actual repository.
6
6
7 Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
7 Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 from i18n import _
14 from i18n import _
15 import changegroup, util, os, struct, bz2, tempfile, mdiff
15 import changegroup, util, os, struct, bz2, tempfile, mdiff
16 import localrepo, changelog, manifest, filelog, revlog
16 import localrepo, changelog, manifest, filelog, revlog
17
17
18 class bundlerevlog(revlog.revlog):
18 class bundlerevlog(revlog.revlog):
19 def __init__(self, opener, indexfile, bundlefile,
19 def __init__(self, opener, indexfile, bundlefile,
20 linkmapper=None):
20 linkmapper=None):
21 # How it works:
21 # How it works:
22 # to retrieve a revision, we need to know the offset of
22 # to retrieve a revision, we need to know the offset of
23 # the revision in the bundlefile (an opened file).
23 # the revision in the bundlefile (an opened file).
24 #
24 #
25 # We store this offset in the index (start), to differentiate a
25 # We store this offset in the index (start), to differentiate a
26 # rev in the bundle and from a rev in the revlog, we check
26 # rev in the bundle and from a rev in the revlog, we check
27 # len(index[r]). If the tuple is bigger than 7, it is a bundle
27 # len(index[r]). If the tuple is bigger than 7, it is a bundle
28 # (it is bigger since we store the node to which the delta is)
28 # (it is bigger since we store the node to which the delta is)
29 #
29 #
30 revlog.revlog.__init__(self, opener, indexfile)
30 revlog.revlog.__init__(self, opener, indexfile)
31 self.bundlefile = bundlefile
31 self.bundlefile = bundlefile
32 self.basemap = {}
32 self.basemap = {}
33 def chunkpositer():
33 def chunkpositer():
34 for chunk in changegroup.chunkiter(bundlefile):
34 for chunk in changegroup.chunkiter(bundlefile):
35 pos = bundlefile.tell()
35 pos = bundlefile.tell()
36 yield chunk, pos - len(chunk)
36 yield chunk, pos - len(chunk)
37 n = self.count()
37 n = self.count()
38 prev = None
38 prev = None
39 for chunk, start in chunkpositer():
39 for chunk, start in chunkpositer():
40 size = len(chunk)
40 size = len(chunk)
41 if size < 80:
41 if size < 80:
42 raise util.Abort("invalid changegroup")
42 raise util.Abort("invalid changegroup")
43 start += 80
43 start += 80
44 size -= 80
44 size -= 80
45 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
45 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
46 if node in self.nodemap:
46 if node in self.nodemap:
47 prev = node
47 prev = node
48 continue
48 continue
49 for p in (p1, p2):
49 for p in (p1, p2):
50 if not p in self.nodemap:
50 if not p in self.nodemap:
51 raise revlog.LookupError(p1, self.indexfile,
51 raise revlog.LookupError(p1, self.indexfile,
52 _("unknown parent"))
52 _("unknown parent"))
53 if linkmapper is None:
53 if linkmapper is None:
54 link = n
54 link = n
55 else:
55 else:
56 link = linkmapper(cs)
56 link = linkmapper(cs)
57
57
58 if not prev:
58 if not prev:
59 prev = p1
59 prev = p1
60 # start, size, full unc. size, base (unused), link, p1, p2, node
60 # start, size, full unc. size, base (unused), link, p1, p2, node
61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
62 self.rev(p1), self.rev(p2), node)
62 self.rev(p1), self.rev(p2), node)
63 self.basemap[n] = prev
63 self.basemap[n] = prev
64 self.index.insert(-1, e)
64 self.index.insert(-1, e)
65 self.nodemap[node] = n
65 self.nodemap[node] = n
66 prev = node
66 prev = node
67 n += 1
67 n += 1
68
68
69 def bundle(self, rev):
69 def bundle(self, rev):
70 """is rev from the bundle"""
70 """is rev from the bundle"""
71 if rev < 0:
71 if rev < 0:
72 return False
72 return False
73 return rev in self.basemap
73 return rev in self.basemap
74 def bundlebase(self, rev): return self.basemap[rev]
74 def bundlebase(self, rev): return self.basemap[rev]
75 def chunk(self, rev, df=None, cachelen=4096):
75 def chunk(self, rev, df=None, cachelen=4096):
76 # Warning: in case of bundle, the diff is against bundlebase,
76 # Warning: in case of bundle, the diff is against bundlebase,
77 # not against rev - 1
77 # not against rev - 1
78 # XXX: could use some caching
78 # XXX: could use some caching
79 if not self.bundle(rev):
79 if not self.bundle(rev):
80 return revlog.revlog.chunk(self, rev, df)
80 return revlog.revlog.chunk(self, rev, df)
81 self.bundlefile.seek(self.start(rev))
81 self.bundlefile.seek(self.start(rev))
82 return self.bundlefile.read(self.length(rev))
82 return self.bundlefile.read(self.length(rev))
83
83
84 def revdiff(self, rev1, rev2):
84 def revdiff(self, rev1, rev2):
85 """return or calculate a delta between two revisions"""
85 """return or calculate a delta between two revisions"""
86 if self.bundle(rev1) and self.bundle(rev2):
86 if self.bundle(rev1) and self.bundle(rev2):
87 # hot path for bundle
87 # hot path for bundle
88 revb = self.rev(self.bundlebase(rev2))
88 revb = self.rev(self.bundlebase(rev2))
89 if revb == rev1:
89 if revb == rev1:
90 return self.chunk(rev2)
90 return self.chunk(rev2)
91 elif not self.bundle(rev1) and not self.bundle(rev2):
91 elif not self.bundle(rev1) and not self.bundle(rev2):
92 return revlog.revlog.revdiff(self, rev1, rev2)
92 return revlog.revlog.revdiff(self, rev1, rev2)
93
93
94 return mdiff.textdiff(self.revision(self.node(rev1)),
94 return mdiff.textdiff(self.revision(self.node(rev1)),
95 self.revision(self.node(rev2)))
95 self.revision(self.node(rev2)))
96
96
97 def revision(self, node):
97 def revision(self, node):
98 """return an uncompressed revision of a given"""
98 """return an uncompressed revision of a given"""
99 if node == nullid: return ""
99 if node == nullid: return ""
100
100
101 text = None
101 text = None
102 chain = []
102 chain = []
103 iter_node = node
103 iter_node = node
104 rev = self.rev(iter_node)
104 rev = self.rev(iter_node)
105 # reconstruct the revision if it is from a changegroup
105 # reconstruct the revision if it is from a changegroup
106 while self.bundle(rev):
106 while self.bundle(rev):
107 if self._cache and self._cache[0] == iter_node:
107 if self._cache and self._cache[0] == iter_node:
108 text = self._cache[2]
108 text = self._cache[2]
109 break
109 break
110 chain.append(rev)
110 chain.append(rev)
111 iter_node = self.bundlebase(rev)
111 iter_node = self.bundlebase(rev)
112 rev = self.rev(iter_node)
112 rev = self.rev(iter_node)
113 if text is None:
113 if text is None:
114 text = revlog.revlog.revision(self, iter_node)
114 text = revlog.revlog.revision(self, iter_node)
115
115
116 while chain:
116 while chain:
117 delta = self.chunk(chain.pop())
117 delta = self.chunk(chain.pop())
118 text = mdiff.patches(text, [delta])
118 text = mdiff.patches(text, [delta])
119
119
120 p1, p2 = self.parents(node)
120 p1, p2 = self.parents(node)
121 if node != revlog.hash(text, p1, p2):
121 if node != revlog.hash(text, p1, p2):
122 raise revlog.RevlogError(_("integrity check failed on %s:%d")
122 raise revlog.RevlogError(_("integrity check failed on %s:%d")
123 % (self.datafile, self.rev(node)))
123 % (self.datafile, self.rev(node)))
124
124
125 self._cache = (node, self.rev(node), text)
125 self._cache = (node, self.rev(node), text)
126 return text
126 return text
127
127
128 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
128 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
129 raise NotImplementedError
129 raise NotImplementedError
130 def addgroup(self, revs, linkmapper, transaction, unique=0):
130 def addgroup(self, revs, linkmapper, transaction, unique=0):
131 raise NotImplementedError
131 raise NotImplementedError
132 def strip(self, rev, minlink):
132 def strip(self, rev, minlink):
133 raise NotImplementedError
133 raise NotImplementedError
134 def checksize(self):
134 def checksize(self):
135 raise NotImplementedError
135 raise NotImplementedError
136
136
137 class bundlechangelog(bundlerevlog, changelog.changelog):
137 class bundlechangelog(bundlerevlog, changelog.changelog):
138 def __init__(self, opener, bundlefile):
138 def __init__(self, opener, bundlefile):
139 changelog.changelog.__init__(self, opener)
139 changelog.changelog.__init__(self, opener)
140 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
140 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
141
141
142 class bundlemanifest(bundlerevlog, manifest.manifest):
142 class bundlemanifest(bundlerevlog, manifest.manifest):
143 def __init__(self, opener, bundlefile, linkmapper):
143 def __init__(self, opener, bundlefile, linkmapper):
144 manifest.manifest.__init__(self, opener)
144 manifest.manifest.__init__(self, opener)
145 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
145 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
146 linkmapper)
146 linkmapper)
147
147
148 class bundlefilelog(bundlerevlog, filelog.filelog):
148 class bundlefilelog(bundlerevlog, filelog.filelog):
149 def __init__(self, opener, path, bundlefile, linkmapper):
149 def __init__(self, opener, path, bundlefile, linkmapper):
150 filelog.filelog.__init__(self, opener, path)
150 filelog.filelog.__init__(self, opener, path)
151 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
151 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
152 linkmapper)
152 linkmapper)
153
153
154 class bundlerepository(localrepo.localrepository):
154 class bundlerepository(localrepo.localrepository):
155 def __init__(self, ui, path, bundlename):
155 def __init__(self, ui, path, bundlename):
156 localrepo.localrepository.__init__(self, ui, path)
156 localrepo.localrepository.__init__(self, ui, path)
157
157
158 if path:
158 if path:
159 self._url = 'bundle:' + path + '+' + bundlename
159 self._url = 'bundle:' + path + '+' + bundlename
160 else:
160 else:
161 self._url = 'bundle:' + bundlename
161 self._url = 'bundle:' + bundlename
162
162
163 self.tempfile = None
163 self.tempfile = None
164 self.bundlefile = open(bundlename, "rb")
164 self.bundlefile = open(bundlename, "rb")
165 header = self.bundlefile.read(6)
165 header = self.bundlefile.read(6)
166 if not header.startswith("HG"):
166 if not header.startswith("HG"):
167 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
167 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
168 elif not header.startswith("HG10"):
168 elif not header.startswith("HG10"):
169 raise util.Abort(_("%s: unknown bundle version") % bundlename)
169 raise util.Abort(_("%s: unknown bundle version") % bundlename)
170 elif header == "HG10BZ":
170 elif header == "HG10BZ":
171 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
171 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
172 suffix=".hg10un", dir=self.path)
172 suffix=".hg10un", dir=self.path)
173 self.tempfile = temp
173 self.tempfile = temp
174 fptemp = os.fdopen(fdtemp, 'wb')
174 fptemp = os.fdopen(fdtemp, 'wb')
175 def generator(f):
175 def generator(f):
176 zd = bz2.BZ2Decompressor()
176 zd = bz2.BZ2Decompressor()
177 zd.decompress("BZ")
177 zd.decompress("BZ")
178 for chunk in f:
178 for chunk in f:
179 yield zd.decompress(chunk)
179 yield zd.decompress(chunk)
180 gen = generator(util.filechunkiter(self.bundlefile, 4096))
180 gen = generator(util.filechunkiter(self.bundlefile, 4096))
181
181
182 try:
182 try:
183 fptemp.write("HG10UN")
183 fptemp.write("HG10UN")
184 for chunk in gen:
184 for chunk in gen:
185 fptemp.write(chunk)
185 fptemp.write(chunk)
186 finally:
186 finally:
187 fptemp.close()
187 fptemp.close()
188 self.bundlefile.close()
188 self.bundlefile.close()
189
189
190 self.bundlefile = open(self.tempfile, "rb")
190 self.bundlefile = open(self.tempfile, "rb")
191 # seek right after the header
191 # seek right after the header
192 self.bundlefile.seek(6)
192 self.bundlefile.seek(6)
193 elif header == "HG10UN":
193 elif header == "HG10UN":
194 # nothing to do
194 # nothing to do
195 pass
195 pass
196 else:
196 else:
197 raise util.Abort(_("%s: unknown bundle compression type")
197 raise util.Abort(_("%s: unknown bundle compression type")
198 % bundlename)
198 % bundlename)
199 # dict with the mapping 'filename' -> position in the bundle
199 # dict with the mapping 'filename' -> position in the bundle
200 self.bundlefilespos = {}
200 self.bundlefilespos = {}
201
201
202 def __getattr__(self, name):
202 def __getattr__(self, name):
203 if name == 'changelog':
203 if name == 'changelog':
204 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
204 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
205 self.manstart = self.bundlefile.tell()
205 self.manstart = self.bundlefile.tell()
206 return self.changelog
206 return self.changelog
207 if name == 'manifest':
207 if name == 'manifest':
208 self.bundlefile.seek(self.manstart)
208 self.bundlefile.seek(self.manstart)
209 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
209 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
210 self.changelog.rev)
210 self.changelog.rev)
211 self.filestart = self.bundlefile.tell()
211 self.filestart = self.bundlefile.tell()
212 return self.manifest
212 return self.manifest
213 if name == 'manstart':
213 if name == 'manstart':
214 self.changelog
214 self.changelog
215 return self.manstart
215 return self.manstart
216 if name == 'filestart':
216 if name == 'filestart':
217 self.manifest
217 self.manifest
218 return self.filestart
218 return self.filestart
219 return localrepo.localrepository.__getattr__(self, name)
219 return localrepo.localrepository.__getattr__(self, name)
220
220
221 def url(self):
221 def url(self):
222 return self._url
222 return self._url
223
223
224 def dev(self):
225 return -1
226
227 def file(self, f):
224 def file(self, f):
228 if not self.bundlefilespos:
225 if not self.bundlefilespos:
229 self.bundlefile.seek(self.filestart)
226 self.bundlefile.seek(self.filestart)
230 while 1:
227 while 1:
231 chunk = changegroup.getchunk(self.bundlefile)
228 chunk = changegroup.getchunk(self.bundlefile)
232 if not chunk:
229 if not chunk:
233 break
230 break
234 self.bundlefilespos[chunk] = self.bundlefile.tell()
231 self.bundlefilespos[chunk] = self.bundlefile.tell()
235 for c in changegroup.chunkiter(self.bundlefile):
232 for c in changegroup.chunkiter(self.bundlefile):
236 pass
233 pass
237
234
238 if f[0] == '/':
235 if f[0] == '/':
239 f = f[1:]
236 f = f[1:]
240 if f in self.bundlefilespos:
237 if f in self.bundlefilespos:
241 self.bundlefile.seek(self.bundlefilespos[f])
238 self.bundlefile.seek(self.bundlefilespos[f])
242 return bundlefilelog(self.sopener, f, self.bundlefile,
239 return bundlefilelog(self.sopener, f, self.bundlefile,
243 self.changelog.rev)
240 self.changelog.rev)
244 else:
241 else:
245 return filelog.filelog(self.sopener, f)
242 return filelog.filelog(self.sopener, f)
246
243
247 def close(self):
244 def close(self):
248 """Close assigned bundle file immediately."""
245 """Close assigned bundle file immediately."""
249 self.bundlefile.close()
246 self.bundlefile.close()
250
247
251 def __del__(self):
248 def __del__(self):
252 bundlefile = getattr(self, 'bundlefile', None)
249 bundlefile = getattr(self, 'bundlefile', None)
253 if bundlefile and not bundlefile.closed:
250 if bundlefile and not bundlefile.closed:
254 bundlefile.close()
251 bundlefile.close()
255 tempfile = getattr(self, 'tempfile', None)
252 tempfile = getattr(self, 'tempfile', None)
256 if tempfile is not None:
253 if tempfile is not None:
257 os.unlink(tempfile)
254 os.unlink(tempfile)
258
255
259 def instance(ui, path, create):
256 def instance(ui, path, create):
260 if create:
257 if create:
261 raise util.Abort(_('cannot create new bundle repository'))
258 raise util.Abort(_('cannot create new bundle repository'))
262 parentpath = ui.config("bundle", "mainreporoot", "")
259 parentpath = ui.config("bundle", "mainreporoot", "")
263 if parentpath:
260 if parentpath:
264 # Try to make the full path relative so we get a nice, short URL.
261 # Try to make the full path relative so we get a nice, short URL.
265 # In particular, we don't want temp dir names in test outputs.
262 # In particular, we don't want temp dir names in test outputs.
266 cwd = os.getcwd()
263 cwd = os.getcwd()
267 if parentpath == cwd:
264 if parentpath == cwd:
268 parentpath = ''
265 parentpath = ''
269 else:
266 else:
270 cwd = os.path.join(cwd,'')
267 cwd = os.path.join(cwd,'')
271 if parentpath.startswith(cwd):
268 if parentpath.startswith(cwd):
272 parentpath = parentpath[len(cwd):]
269 parentpath = parentpath[len(cwd):]
273 path = util.drop_scheme('file', path)
270 path = util.drop_scheme('file', path)
274 if path.startswith('bundle:'):
271 if path.startswith('bundle:'):
275 path = util.drop_scheme('bundle', path)
272 path = util.drop_scheme('bundle', path)
276 s = path.split("+", 1)
273 s = path.split("+", 1)
277 if len(s) == 1:
274 if len(s) == 1:
278 repopath, bundlename = parentpath, s[0]
275 repopath, bundlename = parentpath, s[0]
279 else:
276 else:
280 repopath, bundlename = s
277 repopath, bundlename = s
281 else:
278 else:
282 repopath, bundlename = parentpath, path
279 repopath, bundlename = parentpath, path
283 return bundlerepository(ui, repopath, bundlename)
280 return bundlerepository(ui, repopath, bundlename)
@@ -1,2123 +1,2120
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71
71
72 try:
72 try:
73 # files in .hg/ will be created using this mode
73 # files in .hg/ will be created using this mode
74 mode = os.stat(self.spath).st_mode
74 mode = os.stat(self.spath).st_mode
75 # avoid some useless chmods
75 # avoid some useless chmods
76 if (0777 & ~util._umask) == (0777 & mode):
76 if (0777 & ~util._umask) == (0777 & mode):
77 mode = None
77 mode = None
78 except OSError:
78 except OSError:
79 mode = None
79 mode = None
80
80
81 self._createmode = mode
81 self._createmode = mode
82 self.opener.createmode = mode
82 self.opener.createmode = mode
83 sopener = util.opener(self.spath)
83 sopener = util.opener(self.spath)
84 sopener.createmode = mode
84 sopener.createmode = mode
85 self.sopener = util.encodedopener(sopener, self.encodefn)
85 self.sopener = util.encodedopener(sopener, self.encodefn)
86
86
87 self.ui = ui.ui(parentui=parentui)
87 self.ui = ui.ui(parentui=parentui)
88 try:
88 try:
89 self.ui.readconfig(self.join("hgrc"), self.root)
89 self.ui.readconfig(self.join("hgrc"), self.root)
90 extensions.loadall(self.ui)
90 extensions.loadall(self.ui)
91 except IOError:
91 except IOError:
92 pass
92 pass
93
93
94 self.tagscache = None
94 self.tagscache = None
95 self._tagstypecache = None
95 self._tagstypecache = None
96 self.branchcache = None
96 self.branchcache = None
97 self._ubranchcache = None # UTF-8 version of branchcache
97 self._ubranchcache = None # UTF-8 version of branchcache
98 self._branchcachetip = None
98 self._branchcachetip = None
99 self.nodetagscache = None
99 self.nodetagscache = None
100 self.filterpats = {}
100 self.filterpats = {}
101 self._datafilters = {}
101 self._datafilters = {}
102 self._transref = self._lockref = self._wlockref = None
102 self._transref = self._lockref = self._wlockref = None
103
103
104 def __getattr__(self, name):
104 def __getattr__(self, name):
105 if name == 'changelog':
105 if name == 'changelog':
106 self.changelog = changelog.changelog(self.sopener)
106 self.changelog = changelog.changelog(self.sopener)
107 self.sopener.defversion = self.changelog.version
107 self.sopener.defversion = self.changelog.version
108 return self.changelog
108 return self.changelog
109 if name == 'manifest':
109 if name == 'manifest':
110 self.changelog
110 self.changelog
111 self.manifest = manifest.manifest(self.sopener)
111 self.manifest = manifest.manifest(self.sopener)
112 return self.manifest
112 return self.manifest
113 if name == 'dirstate':
113 if name == 'dirstate':
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 return self.dirstate
115 return self.dirstate
116 else:
116 else:
117 raise AttributeError, name
117 raise AttributeError, name
118
118
119 def url(self):
119 def url(self):
120 return 'file:' + self.root
120 return 'file:' + self.root
121
121
122 def hook(self, name, throw=False, **args):
122 def hook(self, name, throw=False, **args):
123 return hook.hook(self.ui, self, name, throw, **args)
123 return hook.hook(self.ui, self, name, throw, **args)
124
124
125 tag_disallowed = ':\r\n'
125 tag_disallowed = ':\r\n'
126
126
127 def _tag(self, name, node, message, local, user, date, parent=None,
127 def _tag(self, name, node, message, local, user, date, parent=None,
128 extra={}):
128 extra={}):
129 use_dirstate = parent is None
129 use_dirstate = parent is None
130
130
131 for c in self.tag_disallowed:
131 for c in self.tag_disallowed:
132 if c in name:
132 if c in name:
133 raise util.Abort(_('%r cannot be used in a tag name') % c)
133 raise util.Abort(_('%r cannot be used in a tag name') % c)
134
134
135 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
135 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
136
136
137 def writetag(fp, name, munge, prevtags):
137 def writetag(fp, name, munge, prevtags):
138 fp.seek(0, 2)
138 fp.seek(0, 2)
139 if prevtags and prevtags[-1] != '\n':
139 if prevtags and prevtags[-1] != '\n':
140 fp.write('\n')
140 fp.write('\n')
141 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
141 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
142 fp.close()
142 fp.close()
143
143
144 prevtags = ''
144 prevtags = ''
145 if local:
145 if local:
146 try:
146 try:
147 fp = self.opener('localtags', 'r+')
147 fp = self.opener('localtags', 'r+')
148 except IOError, err:
148 except IOError, err:
149 fp = self.opener('localtags', 'a')
149 fp = self.opener('localtags', 'a')
150 else:
150 else:
151 prevtags = fp.read()
151 prevtags = fp.read()
152
152
153 # local tags are stored in the current charset
153 # local tags are stored in the current charset
154 writetag(fp, name, None, prevtags)
154 writetag(fp, name, None, prevtags)
155 self.hook('tag', node=hex(node), tag=name, local=local)
155 self.hook('tag', node=hex(node), tag=name, local=local)
156 return
156 return
157
157
158 if use_dirstate:
158 if use_dirstate:
159 try:
159 try:
160 fp = self.wfile('.hgtags', 'rb+')
160 fp = self.wfile('.hgtags', 'rb+')
161 except IOError, err:
161 except IOError, err:
162 fp = self.wfile('.hgtags', 'ab')
162 fp = self.wfile('.hgtags', 'ab')
163 else:
163 else:
164 prevtags = fp.read()
164 prevtags = fp.read()
165 else:
165 else:
166 try:
166 try:
167 prevtags = self.filectx('.hgtags', parent).data()
167 prevtags = self.filectx('.hgtags', parent).data()
168 except revlog.LookupError:
168 except revlog.LookupError:
169 pass
169 pass
170 fp = self.wfile('.hgtags', 'wb')
170 fp = self.wfile('.hgtags', 'wb')
171 if prevtags:
171 if prevtags:
172 fp.write(prevtags)
172 fp.write(prevtags)
173
173
174 # committed tags are stored in UTF-8
174 # committed tags are stored in UTF-8
175 writetag(fp, name, util.fromlocal, prevtags)
175 writetag(fp, name, util.fromlocal, prevtags)
176
176
177 if use_dirstate and '.hgtags' not in self.dirstate:
177 if use_dirstate and '.hgtags' not in self.dirstate:
178 self.add(['.hgtags'])
178 self.add(['.hgtags'])
179
179
180 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
180 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
181 extra=extra)
181 extra=extra)
182
182
183 self.hook('tag', node=hex(node), tag=name, local=local)
183 self.hook('tag', node=hex(node), tag=name, local=local)
184
184
185 return tagnode
185 return tagnode
186
186
187 def tag(self, name, node, message, local, user, date):
187 def tag(self, name, node, message, local, user, date):
188 '''tag a revision with a symbolic name.
188 '''tag a revision with a symbolic name.
189
189
190 if local is True, the tag is stored in a per-repository file.
190 if local is True, the tag is stored in a per-repository file.
191 otherwise, it is stored in the .hgtags file, and a new
191 otherwise, it is stored in the .hgtags file, and a new
192 changeset is committed with the change.
192 changeset is committed with the change.
193
193
194 keyword arguments:
194 keyword arguments:
195
195
196 local: whether to store tag in non-version-controlled file
196 local: whether to store tag in non-version-controlled file
197 (default False)
197 (default False)
198
198
199 message: commit message to use if committing
199 message: commit message to use if committing
200
200
201 user: name of user to use if committing
201 user: name of user to use if committing
202
202
203 date: date tuple to use if committing'''
203 date: date tuple to use if committing'''
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self._tag(name, node, message, local, user, date)
210 self._tag(name, node, message, local, user, date)
211
211
212 def tags(self):
212 def tags(self):
213 '''return a mapping of tag to node'''
213 '''return a mapping of tag to node'''
214 if self.tagscache:
214 if self.tagscache:
215 return self.tagscache
215 return self.tagscache
216
216
217 globaltags = {}
217 globaltags = {}
218 tagtypes = {}
218 tagtypes = {}
219
219
220 def readtags(lines, fn, tagtype):
220 def readtags(lines, fn, tagtype):
221 filetags = {}
221 filetags = {}
222 count = 0
222 count = 0
223
223
224 def warn(msg):
224 def warn(msg):
225 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
225 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
226
226
227 for l in lines:
227 for l in lines:
228 count += 1
228 count += 1
229 if not l:
229 if not l:
230 continue
230 continue
231 s = l.split(" ", 1)
231 s = l.split(" ", 1)
232 if len(s) != 2:
232 if len(s) != 2:
233 warn(_("cannot parse entry"))
233 warn(_("cannot parse entry"))
234 continue
234 continue
235 node, key = s
235 node, key = s
236 key = util.tolocal(key.strip()) # stored in UTF-8
236 key = util.tolocal(key.strip()) # stored in UTF-8
237 try:
237 try:
238 bin_n = bin(node)
238 bin_n = bin(node)
239 except TypeError:
239 except TypeError:
240 warn(_("node '%s' is not well formed") % node)
240 warn(_("node '%s' is not well formed") % node)
241 continue
241 continue
242 if bin_n not in self.changelog.nodemap:
242 if bin_n not in self.changelog.nodemap:
243 warn(_("tag '%s' refers to unknown node") % key)
243 warn(_("tag '%s' refers to unknown node") % key)
244 continue
244 continue
245
245
246 h = []
246 h = []
247 if key in filetags:
247 if key in filetags:
248 n, h = filetags[key]
248 n, h = filetags[key]
249 h.append(n)
249 h.append(n)
250 filetags[key] = (bin_n, h)
250 filetags[key] = (bin_n, h)
251
251
252 for k, nh in filetags.items():
252 for k, nh in filetags.items():
253 if k not in globaltags:
253 if k not in globaltags:
254 globaltags[k] = nh
254 globaltags[k] = nh
255 tagtypes[k] = tagtype
255 tagtypes[k] = tagtype
256 continue
256 continue
257
257
258 # we prefer the global tag if:
258 # we prefer the global tag if:
259 # it supercedes us OR
259 # it supercedes us OR
260 # mutual supercedes and it has a higher rank
260 # mutual supercedes and it has a higher rank
261 # otherwise we win because we're tip-most
261 # otherwise we win because we're tip-most
262 an, ah = nh
262 an, ah = nh
263 bn, bh = globaltags[k]
263 bn, bh = globaltags[k]
264 if (bn != an and an in bh and
264 if (bn != an and an in bh and
265 (bn not in ah or len(bh) > len(ah))):
265 (bn not in ah or len(bh) > len(ah))):
266 an = bn
266 an = bn
267 ah.extend([n for n in bh if n not in ah])
267 ah.extend([n for n in bh if n not in ah])
268 globaltags[k] = an, ah
268 globaltags[k] = an, ah
269 tagtypes[k] = tagtype
269 tagtypes[k] = tagtype
270
270
271 # read the tags file from each head, ending with the tip
271 # read the tags file from each head, ending with the tip
272 f = None
272 f = None
273 for rev, node, fnode in self._hgtagsnodes():
273 for rev, node, fnode in self._hgtagsnodes():
274 f = (f and f.filectx(fnode) or
274 f = (f and f.filectx(fnode) or
275 self.filectx('.hgtags', fileid=fnode))
275 self.filectx('.hgtags', fileid=fnode))
276 readtags(f.data().splitlines(), f, "global")
276 readtags(f.data().splitlines(), f, "global")
277
277
278 try:
278 try:
279 data = util.fromlocal(self.opener("localtags").read())
279 data = util.fromlocal(self.opener("localtags").read())
280 # localtags are stored in the local character set
280 # localtags are stored in the local character set
281 # while the internal tag table is stored in UTF-8
281 # while the internal tag table is stored in UTF-8
282 readtags(data.splitlines(), "localtags", "local")
282 readtags(data.splitlines(), "localtags", "local")
283 except IOError:
283 except IOError:
284 pass
284 pass
285
285
286 self.tagscache = {}
286 self.tagscache = {}
287 self._tagstypecache = {}
287 self._tagstypecache = {}
288 for k,nh in globaltags.items():
288 for k,nh in globaltags.items():
289 n = nh[0]
289 n = nh[0]
290 if n != nullid:
290 if n != nullid:
291 self.tagscache[k] = n
291 self.tagscache[k] = n
292 self._tagstypecache[k] = tagtypes[k]
292 self._tagstypecache[k] = tagtypes[k]
293 self.tagscache['tip'] = self.changelog.tip()
293 self.tagscache['tip'] = self.changelog.tip()
294
294
295 return self.tagscache
295 return self.tagscache
296
296
297 def tagtype(self, tagname):
297 def tagtype(self, tagname):
298 '''
298 '''
299 return the type of the given tag. result can be:
299 return the type of the given tag. result can be:
300
300
301 'local' : a local tag
301 'local' : a local tag
302 'global' : a global tag
302 'global' : a global tag
303 None : tag does not exist
303 None : tag does not exist
304 '''
304 '''
305
305
306 self.tags()
306 self.tags()
307
307
308 return self._tagstypecache.get(tagname)
308 return self._tagstypecache.get(tagname)
309
309
310 def _hgtagsnodes(self):
310 def _hgtagsnodes(self):
311 heads = self.heads()
311 heads = self.heads()
312 heads.reverse()
312 heads.reverse()
313 last = {}
313 last = {}
314 ret = []
314 ret = []
315 for node in heads:
315 for node in heads:
316 c = self.changectx(node)
316 c = self.changectx(node)
317 rev = c.rev()
317 rev = c.rev()
318 try:
318 try:
319 fnode = c.filenode('.hgtags')
319 fnode = c.filenode('.hgtags')
320 except revlog.LookupError:
320 except revlog.LookupError:
321 continue
321 continue
322 ret.append((rev, node, fnode))
322 ret.append((rev, node, fnode))
323 if fnode in last:
323 if fnode in last:
324 ret[last[fnode]] = None
324 ret[last[fnode]] = None
325 last[fnode] = len(ret) - 1
325 last[fnode] = len(ret) - 1
326 return [item for item in ret if item]
326 return [item for item in ret if item]
327
327
328 def tagslist(self):
328 def tagslist(self):
329 '''return a list of tags ordered by revision'''
329 '''return a list of tags ordered by revision'''
330 l = []
330 l = []
331 for t, n in self.tags().items():
331 for t, n in self.tags().items():
332 try:
332 try:
333 r = self.changelog.rev(n)
333 r = self.changelog.rev(n)
334 except:
334 except:
335 r = -2 # sort to the beginning of the list if unknown
335 r = -2 # sort to the beginning of the list if unknown
336 l.append((r, t, n))
336 l.append((r, t, n))
337 l.sort()
337 l.sort()
338 return [(t, n) for r, t, n in l]
338 return [(t, n) for r, t, n in l]
339
339
340 def nodetags(self, node):
340 def nodetags(self, node):
341 '''return the tags associated with a node'''
341 '''return the tags associated with a node'''
342 if not self.nodetagscache:
342 if not self.nodetagscache:
343 self.nodetagscache = {}
343 self.nodetagscache = {}
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 self.nodetagscache.setdefault(n, []).append(t)
345 self.nodetagscache.setdefault(n, []).append(t)
346 return self.nodetagscache.get(node, [])
346 return self.nodetagscache.get(node, [])
347
347
348 def _branchtags(self, partial, lrev):
348 def _branchtags(self, partial, lrev):
349 tiprev = self.changelog.count() - 1
349 tiprev = self.changelog.count() - 1
350 if lrev != tiprev:
350 if lrev != tiprev:
351 self._updatebranchcache(partial, lrev+1, tiprev+1)
351 self._updatebranchcache(partial, lrev+1, tiprev+1)
352 self._writebranchcache(partial, self.changelog.tip(), tiprev)
352 self._writebranchcache(partial, self.changelog.tip(), tiprev)
353
353
354 return partial
354 return partial
355
355
356 def branchtags(self):
356 def branchtags(self):
357 tip = self.changelog.tip()
357 tip = self.changelog.tip()
358 if self.branchcache is not None and self._branchcachetip == tip:
358 if self.branchcache is not None and self._branchcachetip == tip:
359 return self.branchcache
359 return self.branchcache
360
360
361 oldtip = self._branchcachetip
361 oldtip = self._branchcachetip
362 self._branchcachetip = tip
362 self._branchcachetip = tip
363 if self.branchcache is None:
363 if self.branchcache is None:
364 self.branchcache = {} # avoid recursion in changectx
364 self.branchcache = {} # avoid recursion in changectx
365 else:
365 else:
366 self.branchcache.clear() # keep using the same dict
366 self.branchcache.clear() # keep using the same dict
367 if oldtip is None or oldtip not in self.changelog.nodemap:
367 if oldtip is None or oldtip not in self.changelog.nodemap:
368 partial, last, lrev = self._readbranchcache()
368 partial, last, lrev = self._readbranchcache()
369 else:
369 else:
370 lrev = self.changelog.rev(oldtip)
370 lrev = self.changelog.rev(oldtip)
371 partial = self._ubranchcache
371 partial = self._ubranchcache
372
372
373 self._branchtags(partial, lrev)
373 self._branchtags(partial, lrev)
374
374
375 # the branch cache is stored on disk as UTF-8, but in the local
375 # the branch cache is stored on disk as UTF-8, but in the local
376 # charset internally
376 # charset internally
377 for k, v in partial.items():
377 for k, v in partial.items():
378 self.branchcache[util.tolocal(k)] = v
378 self.branchcache[util.tolocal(k)] = v
379 self._ubranchcache = partial
379 self._ubranchcache = partial
380 return self.branchcache
380 return self.branchcache
381
381
382 def _readbranchcache(self):
382 def _readbranchcache(self):
383 partial = {}
383 partial = {}
384 try:
384 try:
385 f = self.opener("branch.cache")
385 f = self.opener("branch.cache")
386 lines = f.read().split('\n')
386 lines = f.read().split('\n')
387 f.close()
387 f.close()
388 except (IOError, OSError):
388 except (IOError, OSError):
389 return {}, nullid, nullrev
389 return {}, nullid, nullrev
390
390
391 try:
391 try:
392 last, lrev = lines.pop(0).split(" ", 1)
392 last, lrev = lines.pop(0).split(" ", 1)
393 last, lrev = bin(last), int(lrev)
393 last, lrev = bin(last), int(lrev)
394 if not (lrev < self.changelog.count() and
394 if not (lrev < self.changelog.count() and
395 self.changelog.node(lrev) == last): # sanity check
395 self.changelog.node(lrev) == last): # sanity check
396 # invalidate the cache
396 # invalidate the cache
397 raise ValueError('invalidating branch cache (tip differs)')
397 raise ValueError('invalidating branch cache (tip differs)')
398 for l in lines:
398 for l in lines:
399 if not l: continue
399 if not l: continue
400 node, label = l.split(" ", 1)
400 node, label = l.split(" ", 1)
401 partial[label.strip()] = bin(node)
401 partial[label.strip()] = bin(node)
402 except (KeyboardInterrupt, util.SignalInterrupt):
402 except (KeyboardInterrupt, util.SignalInterrupt):
403 raise
403 raise
404 except Exception, inst:
404 except Exception, inst:
405 if self.ui.debugflag:
405 if self.ui.debugflag:
406 self.ui.warn(str(inst), '\n')
406 self.ui.warn(str(inst), '\n')
407 partial, last, lrev = {}, nullid, nullrev
407 partial, last, lrev = {}, nullid, nullrev
408 return partial, last, lrev
408 return partial, last, lrev
409
409
410 def _writebranchcache(self, branches, tip, tiprev):
410 def _writebranchcache(self, branches, tip, tiprev):
411 try:
411 try:
412 f = self.opener("branch.cache", "w", atomictemp=True)
412 f = self.opener("branch.cache", "w", atomictemp=True)
413 f.write("%s %s\n" % (hex(tip), tiprev))
413 f.write("%s %s\n" % (hex(tip), tiprev))
414 for label, node in branches.iteritems():
414 for label, node in branches.iteritems():
415 f.write("%s %s\n" % (hex(node), label))
415 f.write("%s %s\n" % (hex(node), label))
416 f.rename()
416 f.rename()
417 except (IOError, OSError):
417 except (IOError, OSError):
418 pass
418 pass
419
419
420 def _updatebranchcache(self, partial, start, end):
420 def _updatebranchcache(self, partial, start, end):
421 for r in xrange(start, end):
421 for r in xrange(start, end):
422 c = self.changectx(r)
422 c = self.changectx(r)
423 b = c.branch()
423 b = c.branch()
424 partial[b] = c.node()
424 partial[b] = c.node()
425
425
426 def lookup(self, key):
426 def lookup(self, key):
427 if key == '.':
427 if key == '.':
428 key, second = self.dirstate.parents()
428 key, second = self.dirstate.parents()
429 if key == nullid:
429 if key == nullid:
430 raise repo.RepoError(_("no revision checked out"))
430 raise repo.RepoError(_("no revision checked out"))
431 if second != nullid:
431 if second != nullid:
432 self.ui.warn(_("warning: working directory has two parents, "
432 self.ui.warn(_("warning: working directory has two parents, "
433 "tag '.' uses the first\n"))
433 "tag '.' uses the first\n"))
434 elif key == 'null':
434 elif key == 'null':
435 return nullid
435 return nullid
436 n = self.changelog._match(key)
436 n = self.changelog._match(key)
437 if n:
437 if n:
438 return n
438 return n
439 if key in self.tags():
439 if key in self.tags():
440 return self.tags()[key]
440 return self.tags()[key]
441 if key in self.branchtags():
441 if key in self.branchtags():
442 return self.branchtags()[key]
442 return self.branchtags()[key]
443 n = self.changelog._partialmatch(key)
443 n = self.changelog._partialmatch(key)
444 if n:
444 if n:
445 return n
445 return n
446 try:
446 try:
447 if len(key) == 20:
447 if len(key) == 20:
448 key = hex(key)
448 key = hex(key)
449 except:
449 except:
450 pass
450 pass
451 raise repo.RepoError(_("unknown revision '%s'") % key)
451 raise repo.RepoError(_("unknown revision '%s'") % key)
452
452
453 def dev(self):
454 return os.lstat(self.path).st_dev
455
456 def local(self):
453 def local(self):
457 return True
454 return True
458
455
459 def join(self, f):
456 def join(self, f):
460 return os.path.join(self.path, f)
457 return os.path.join(self.path, f)
461
458
462 def sjoin(self, f):
459 def sjoin(self, f):
463 f = self.encodefn(f)
460 f = self.encodefn(f)
464 return os.path.join(self.spath, f)
461 return os.path.join(self.spath, f)
465
462
466 def wjoin(self, f):
463 def wjoin(self, f):
467 return os.path.join(self.root, f)
464 return os.path.join(self.root, f)
468
465
469 def file(self, f):
466 def file(self, f):
470 if f[0] == '/':
467 if f[0] == '/':
471 f = f[1:]
468 f = f[1:]
472 return filelog.filelog(self.sopener, f)
469 return filelog.filelog(self.sopener, f)
473
470
474 def changectx(self, changeid=None):
471 def changectx(self, changeid=None):
475 return context.changectx(self, changeid)
472 return context.changectx(self, changeid)
476
473
477 def workingctx(self):
474 def workingctx(self):
478 return context.workingctx(self)
475 return context.workingctx(self)
479
476
480 def parents(self, changeid=None):
477 def parents(self, changeid=None):
481 '''
478 '''
482 get list of changectxs for parents of changeid or working directory
479 get list of changectxs for parents of changeid or working directory
483 '''
480 '''
484 if changeid is None:
481 if changeid is None:
485 pl = self.dirstate.parents()
482 pl = self.dirstate.parents()
486 else:
483 else:
487 n = self.changelog.lookup(changeid)
484 n = self.changelog.lookup(changeid)
488 pl = self.changelog.parents(n)
485 pl = self.changelog.parents(n)
489 if pl[1] == nullid:
486 if pl[1] == nullid:
490 return [self.changectx(pl[0])]
487 return [self.changectx(pl[0])]
491 return [self.changectx(pl[0]), self.changectx(pl[1])]
488 return [self.changectx(pl[0]), self.changectx(pl[1])]
492
489
493 def filectx(self, path, changeid=None, fileid=None):
490 def filectx(self, path, changeid=None, fileid=None):
494 """changeid can be a changeset revision, node, or tag.
491 """changeid can be a changeset revision, node, or tag.
495 fileid can be a file revision or node."""
492 fileid can be a file revision or node."""
496 return context.filectx(self, path, changeid, fileid)
493 return context.filectx(self, path, changeid, fileid)
497
494
498 def getcwd(self):
495 def getcwd(self):
499 return self.dirstate.getcwd()
496 return self.dirstate.getcwd()
500
497
501 def pathto(self, f, cwd=None):
498 def pathto(self, f, cwd=None):
502 return self.dirstate.pathto(f, cwd)
499 return self.dirstate.pathto(f, cwd)
503
500
504 def wfile(self, f, mode='r'):
501 def wfile(self, f, mode='r'):
505 return self.wopener(f, mode)
502 return self.wopener(f, mode)
506
503
507 def _link(self, f):
504 def _link(self, f):
508 return os.path.islink(self.wjoin(f))
505 return os.path.islink(self.wjoin(f))
509
506
510 def _filter(self, filter, filename, data):
507 def _filter(self, filter, filename, data):
511 if filter not in self.filterpats:
508 if filter not in self.filterpats:
512 l = []
509 l = []
513 for pat, cmd in self.ui.configitems(filter):
510 for pat, cmd in self.ui.configitems(filter):
514 mf = util.matcher(self.root, "", [pat], [], [])[1]
511 mf = util.matcher(self.root, "", [pat], [], [])[1]
515 fn = None
512 fn = None
516 params = cmd
513 params = cmd
517 for name, filterfn in self._datafilters.iteritems():
514 for name, filterfn in self._datafilters.iteritems():
518 if cmd.startswith(name):
515 if cmd.startswith(name):
519 fn = filterfn
516 fn = filterfn
520 params = cmd[len(name):].lstrip()
517 params = cmd[len(name):].lstrip()
521 break
518 break
522 if not fn:
519 if not fn:
523 fn = lambda s, c, **kwargs: util.filter(s, c)
520 fn = lambda s, c, **kwargs: util.filter(s, c)
524 # Wrap old filters not supporting keyword arguments
521 # Wrap old filters not supporting keyword arguments
525 if not inspect.getargspec(fn)[2]:
522 if not inspect.getargspec(fn)[2]:
526 oldfn = fn
523 oldfn = fn
527 fn = lambda s, c, **kwargs: oldfn(s, c)
524 fn = lambda s, c, **kwargs: oldfn(s, c)
528 l.append((mf, fn, params))
525 l.append((mf, fn, params))
529 self.filterpats[filter] = l
526 self.filterpats[filter] = l
530
527
531 for mf, fn, cmd in self.filterpats[filter]:
528 for mf, fn, cmd in self.filterpats[filter]:
532 if mf(filename):
529 if mf(filename):
533 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
530 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
534 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
531 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
535 break
532 break
536
533
537 return data
534 return data
538
535
539 def adddatafilter(self, name, filter):
536 def adddatafilter(self, name, filter):
540 self._datafilters[name] = filter
537 self._datafilters[name] = filter
541
538
542 def wread(self, filename):
539 def wread(self, filename):
543 if self._link(filename):
540 if self._link(filename):
544 data = os.readlink(self.wjoin(filename))
541 data = os.readlink(self.wjoin(filename))
545 else:
542 else:
546 data = self.wopener(filename, 'r').read()
543 data = self.wopener(filename, 'r').read()
547 return self._filter("encode", filename, data)
544 return self._filter("encode", filename, data)
548
545
549 def wwrite(self, filename, data, flags):
546 def wwrite(self, filename, data, flags):
550 data = self._filter("decode", filename, data)
547 data = self._filter("decode", filename, data)
551 try:
548 try:
552 os.unlink(self.wjoin(filename))
549 os.unlink(self.wjoin(filename))
553 except OSError:
550 except OSError:
554 pass
551 pass
555 self.wopener(filename, 'w').write(data)
552 self.wopener(filename, 'w').write(data)
556 util.set_flags(self.wjoin(filename), flags)
553 util.set_flags(self.wjoin(filename), flags)
557
554
558 def wwritedata(self, filename, data):
555 def wwritedata(self, filename, data):
559 return self._filter("decode", filename, data)
556 return self._filter("decode", filename, data)
560
557
561 def transaction(self):
558 def transaction(self):
562 if self._transref and self._transref():
559 if self._transref and self._transref():
563 return self._transref().nest()
560 return self._transref().nest()
564
561
565 # abort here if the journal already exists
562 # abort here if the journal already exists
566 if os.path.exists(self.sjoin("journal")):
563 if os.path.exists(self.sjoin("journal")):
567 raise repo.RepoError(_("journal already exists - run hg recover"))
564 raise repo.RepoError(_("journal already exists - run hg recover"))
568
565
569 # save dirstate for rollback
566 # save dirstate for rollback
570 try:
567 try:
571 ds = self.opener("dirstate").read()
568 ds = self.opener("dirstate").read()
572 except IOError:
569 except IOError:
573 ds = ""
570 ds = ""
574 self.opener("journal.dirstate", "w").write(ds)
571 self.opener("journal.dirstate", "w").write(ds)
575 self.opener("journal.branch", "w").write(self.dirstate.branch())
572 self.opener("journal.branch", "w").write(self.dirstate.branch())
576
573
577 renames = [(self.sjoin("journal"), self.sjoin("undo")),
574 renames = [(self.sjoin("journal"), self.sjoin("undo")),
578 (self.join("journal.dirstate"), self.join("undo.dirstate")),
575 (self.join("journal.dirstate"), self.join("undo.dirstate")),
579 (self.join("journal.branch"), self.join("undo.branch"))]
576 (self.join("journal.branch"), self.join("undo.branch"))]
580 tr = transaction.transaction(self.ui.warn, self.sopener,
577 tr = transaction.transaction(self.ui.warn, self.sopener,
581 self.sjoin("journal"),
578 self.sjoin("journal"),
582 aftertrans(renames),
579 aftertrans(renames),
583 self._createmode)
580 self._createmode)
584 self._transref = weakref.ref(tr)
581 self._transref = weakref.ref(tr)
585 return tr
582 return tr
586
583
587 def recover(self):
584 def recover(self):
588 l = self.lock()
585 l = self.lock()
589 try:
586 try:
590 if os.path.exists(self.sjoin("journal")):
587 if os.path.exists(self.sjoin("journal")):
591 self.ui.status(_("rolling back interrupted transaction\n"))
588 self.ui.status(_("rolling back interrupted transaction\n"))
592 transaction.rollback(self.sopener, self.sjoin("journal"))
589 transaction.rollback(self.sopener, self.sjoin("journal"))
593 self.invalidate()
590 self.invalidate()
594 return True
591 return True
595 else:
592 else:
596 self.ui.warn(_("no interrupted transaction available\n"))
593 self.ui.warn(_("no interrupted transaction available\n"))
597 return False
594 return False
598 finally:
595 finally:
599 del l
596 del l
600
597
601 def rollback(self):
598 def rollback(self):
602 wlock = lock = None
599 wlock = lock = None
603 try:
600 try:
604 wlock = self.wlock()
601 wlock = self.wlock()
605 lock = self.lock()
602 lock = self.lock()
606 if os.path.exists(self.sjoin("undo")):
603 if os.path.exists(self.sjoin("undo")):
607 self.ui.status(_("rolling back last transaction\n"))
604 self.ui.status(_("rolling back last transaction\n"))
608 transaction.rollback(self.sopener, self.sjoin("undo"))
605 transaction.rollback(self.sopener, self.sjoin("undo"))
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
606 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 try:
607 try:
611 branch = self.opener("undo.branch").read()
608 branch = self.opener("undo.branch").read()
612 self.dirstate.setbranch(branch)
609 self.dirstate.setbranch(branch)
613 except IOError:
610 except IOError:
614 self.ui.warn(_("Named branch could not be reset, "
611 self.ui.warn(_("Named branch could not be reset, "
615 "current branch still is: %s\n")
612 "current branch still is: %s\n")
616 % util.tolocal(self.dirstate.branch()))
613 % util.tolocal(self.dirstate.branch()))
617 self.invalidate()
614 self.invalidate()
618 self.dirstate.invalidate()
615 self.dirstate.invalidate()
619 else:
616 else:
620 self.ui.warn(_("no rollback information available\n"))
617 self.ui.warn(_("no rollback information available\n"))
621 finally:
618 finally:
622 del lock, wlock
619 del lock, wlock
623
620
624 def invalidate(self):
621 def invalidate(self):
625 for a in "changelog manifest".split():
622 for a in "changelog manifest".split():
626 if hasattr(self, a):
623 if hasattr(self, a):
627 self.__delattr__(a)
624 self.__delattr__(a)
628 self.tagscache = None
625 self.tagscache = None
629 self._tagstypecache = None
626 self._tagstypecache = None
630 self.nodetagscache = None
627 self.nodetagscache = None
631 self.branchcache = None
628 self.branchcache = None
632 self._ubranchcache = None
629 self._ubranchcache = None
633 self._branchcachetip = None
630 self._branchcachetip = None
634
631
635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
632 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
636 try:
633 try:
637 l = lock.lock(lockname, 0, releasefn, desc=desc)
634 l = lock.lock(lockname, 0, releasefn, desc=desc)
638 except lock.LockHeld, inst:
635 except lock.LockHeld, inst:
639 if not wait:
636 if not wait:
640 raise
637 raise
641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
638 self.ui.warn(_("waiting for lock on %s held by %r\n") %
642 (desc, inst.locker))
639 (desc, inst.locker))
643 # default to 600 seconds timeout
640 # default to 600 seconds timeout
644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
641 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
645 releasefn, desc=desc)
642 releasefn, desc=desc)
646 if acquirefn:
643 if acquirefn:
647 acquirefn()
644 acquirefn()
648 return l
645 return l
649
646
650 def lock(self, wait=True):
647 def lock(self, wait=True):
651 if self._lockref and self._lockref():
648 if self._lockref and self._lockref():
652 return self._lockref()
649 return self._lockref()
653
650
654 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
651 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
655 _('repository %s') % self.origroot)
652 _('repository %s') % self.origroot)
656 self._lockref = weakref.ref(l)
653 self._lockref = weakref.ref(l)
657 return l
654 return l
658
655
659 def wlock(self, wait=True):
656 def wlock(self, wait=True):
660 if self._wlockref and self._wlockref():
657 if self._wlockref and self._wlockref():
661 return self._wlockref()
658 return self._wlockref()
662
659
663 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
660 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
664 self.dirstate.invalidate, _('working directory of %s') %
661 self.dirstate.invalidate, _('working directory of %s') %
665 self.origroot)
662 self.origroot)
666 self._wlockref = weakref.ref(l)
663 self._wlockref = weakref.ref(l)
667 return l
664 return l
668
665
669 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
666 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
670 """
667 """
671 commit an individual file as part of a larger transaction
668 commit an individual file as part of a larger transaction
672 """
669 """
673
670
674 t = self.wread(fn)
671 t = self.wread(fn)
675 fl = self.file(fn)
672 fl = self.file(fn)
676 fp1 = manifest1.get(fn, nullid)
673 fp1 = manifest1.get(fn, nullid)
677 fp2 = manifest2.get(fn, nullid)
674 fp2 = manifest2.get(fn, nullid)
678
675
679 meta = {}
676 meta = {}
680 cp = self.dirstate.copied(fn)
677 cp = self.dirstate.copied(fn)
681 if cp:
678 if cp:
682 # Mark the new revision of this file as a copy of another
679 # Mark the new revision of this file as a copy of another
683 # file. This copy data will effectively act as a parent
680 # file. This copy data will effectively act as a parent
684 # of this new revision. If this is a merge, the first
681 # of this new revision. If this is a merge, the first
685 # parent will be the nullid (meaning "look up the copy data")
682 # parent will be the nullid (meaning "look up the copy data")
686 # and the second one will be the other parent. For example:
683 # and the second one will be the other parent. For example:
687 #
684 #
688 # 0 --- 1 --- 3 rev1 changes file foo
685 # 0 --- 1 --- 3 rev1 changes file foo
689 # \ / rev2 renames foo to bar and changes it
686 # \ / rev2 renames foo to bar and changes it
690 # \- 2 -/ rev3 should have bar with all changes and
687 # \- 2 -/ rev3 should have bar with all changes and
691 # should record that bar descends from
688 # should record that bar descends from
692 # bar in rev2 and foo in rev1
689 # bar in rev2 and foo in rev1
693 #
690 #
694 # this allows this merge to succeed:
691 # this allows this merge to succeed:
695 #
692 #
696 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
693 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
697 # \ / merging rev3 and rev4 should use bar@rev2
694 # \ / merging rev3 and rev4 should use bar@rev2
698 # \- 2 --- 4 as the merge base
695 # \- 2 --- 4 as the merge base
699 #
696 #
700 meta["copy"] = cp
697 meta["copy"] = cp
701 if not manifest2: # not a branch merge
698 if not manifest2: # not a branch merge
702 meta["copyrev"] = hex(manifest1.get(cp, nullid))
699 meta["copyrev"] = hex(manifest1.get(cp, nullid))
703 fp2 = nullid
700 fp2 = nullid
704 elif fp2 != nullid: # copied on remote side
701 elif fp2 != nullid: # copied on remote side
705 meta["copyrev"] = hex(manifest1.get(cp, nullid))
702 meta["copyrev"] = hex(manifest1.get(cp, nullid))
706 elif fp1 != nullid: # copied on local side, reversed
703 elif fp1 != nullid: # copied on local side, reversed
707 meta["copyrev"] = hex(manifest2.get(cp))
704 meta["copyrev"] = hex(manifest2.get(cp))
708 fp2 = fp1
705 fp2 = fp1
709 elif cp in manifest2: # directory rename on local side
706 elif cp in manifest2: # directory rename on local side
710 meta["copyrev"] = hex(manifest2[cp])
707 meta["copyrev"] = hex(manifest2[cp])
711 else: # directory rename on remote side
708 else: # directory rename on remote side
712 meta["copyrev"] = hex(manifest1.get(cp, nullid))
709 meta["copyrev"] = hex(manifest1.get(cp, nullid))
713 self.ui.debug(_(" %s: copy %s:%s\n") %
710 self.ui.debug(_(" %s: copy %s:%s\n") %
714 (fn, cp, meta["copyrev"]))
711 (fn, cp, meta["copyrev"]))
715 fp1 = nullid
712 fp1 = nullid
716 elif fp2 != nullid:
713 elif fp2 != nullid:
717 # is one parent an ancestor of the other?
714 # is one parent an ancestor of the other?
718 fpa = fl.ancestor(fp1, fp2)
715 fpa = fl.ancestor(fp1, fp2)
719 if fpa == fp1:
716 if fpa == fp1:
720 fp1, fp2 = fp2, nullid
717 fp1, fp2 = fp2, nullid
721 elif fpa == fp2:
718 elif fpa == fp2:
722 fp2 = nullid
719 fp2 = nullid
723
720
724 # is the file unmodified from the parent? report existing entry
721 # is the file unmodified from the parent? report existing entry
725 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
722 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
726 return fp1
723 return fp1
727
724
728 changelist.append(fn)
725 changelist.append(fn)
729 return fl.add(t, meta, tr, linkrev, fp1, fp2)
726 return fl.add(t, meta, tr, linkrev, fp1, fp2)
730
727
731 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
728 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
732 if p1 is None:
729 if p1 is None:
733 p1, p2 = self.dirstate.parents()
730 p1, p2 = self.dirstate.parents()
734 return self.commit(files=files, text=text, user=user, date=date,
731 return self.commit(files=files, text=text, user=user, date=date,
735 p1=p1, p2=p2, extra=extra, empty_ok=True)
732 p1=p1, p2=p2, extra=extra, empty_ok=True)
736
733
737 def commit(self, files=None, text="", user=None, date=None,
734 def commit(self, files=None, text="", user=None, date=None,
738 match=util.always, force=False, force_editor=False,
735 match=util.always, force=False, force_editor=False,
739 p1=None, p2=None, extra={}, empty_ok=False):
736 p1=None, p2=None, extra={}, empty_ok=False):
740 wlock = lock = tr = None
737 wlock = lock = tr = None
741 valid = 0 # don't save the dirstate if this isn't set
738 valid = 0 # don't save the dirstate if this isn't set
742 if files:
739 if files:
743 files = util.unique(files)
740 files = util.unique(files)
744 try:
741 try:
745 commit = []
742 commit = []
746 remove = []
743 remove = []
747 changed = []
744 changed = []
748 use_dirstate = (p1 is None) # not rawcommit
745 use_dirstate = (p1 is None) # not rawcommit
749 extra = extra.copy()
746 extra = extra.copy()
750
747
751 if use_dirstate:
748 if use_dirstate:
752 if files:
749 if files:
753 for f in files:
750 for f in files:
754 s = self.dirstate[f]
751 s = self.dirstate[f]
755 if s in 'nma':
752 if s in 'nma':
756 commit.append(f)
753 commit.append(f)
757 elif s == 'r':
754 elif s == 'r':
758 remove.append(f)
755 remove.append(f)
759 else:
756 else:
760 self.ui.warn(_("%s not tracked!\n") % f)
757 self.ui.warn(_("%s not tracked!\n") % f)
761 else:
758 else:
762 changes = self.status(match=match)[:5]
759 changes = self.status(match=match)[:5]
763 modified, added, removed, deleted, unknown = changes
760 modified, added, removed, deleted, unknown = changes
764 commit = modified + added
761 commit = modified + added
765 remove = removed
762 remove = removed
766 else:
763 else:
767 commit = files
764 commit = files
768
765
769 if use_dirstate:
766 if use_dirstate:
770 p1, p2 = self.dirstate.parents()
767 p1, p2 = self.dirstate.parents()
771 update_dirstate = True
768 update_dirstate = True
772 else:
769 else:
773 p1, p2 = p1, p2 or nullid
770 p1, p2 = p1, p2 or nullid
774 update_dirstate = (self.dirstate.parents()[0] == p1)
771 update_dirstate = (self.dirstate.parents()[0] == p1)
775
772
776 c1 = self.changelog.read(p1)
773 c1 = self.changelog.read(p1)
777 c2 = self.changelog.read(p2)
774 c2 = self.changelog.read(p2)
778 m1 = self.manifest.read(c1[0]).copy()
775 m1 = self.manifest.read(c1[0]).copy()
779 m2 = self.manifest.read(c2[0])
776 m2 = self.manifest.read(c2[0])
780
777
781 if use_dirstate:
778 if use_dirstate:
782 branchname = self.workingctx().branch()
779 branchname = self.workingctx().branch()
783 try:
780 try:
784 branchname = branchname.decode('UTF-8').encode('UTF-8')
781 branchname = branchname.decode('UTF-8').encode('UTF-8')
785 except UnicodeDecodeError:
782 except UnicodeDecodeError:
786 raise util.Abort(_('branch name not in UTF-8!'))
783 raise util.Abort(_('branch name not in UTF-8!'))
787 else:
784 else:
788 branchname = ""
785 branchname = ""
789
786
790 if use_dirstate:
787 if use_dirstate:
791 oldname = c1[5].get("branch") # stored in UTF-8
788 oldname = c1[5].get("branch") # stored in UTF-8
792 if (not commit and not remove and not force and p2 == nullid
789 if (not commit and not remove and not force and p2 == nullid
793 and branchname == oldname):
790 and branchname == oldname):
794 self.ui.status(_("nothing changed\n"))
791 self.ui.status(_("nothing changed\n"))
795 return None
792 return None
796
793
797 xp1 = hex(p1)
794 xp1 = hex(p1)
798 if p2 == nullid: xp2 = ''
795 if p2 == nullid: xp2 = ''
799 else: xp2 = hex(p2)
796 else: xp2 = hex(p2)
800
797
801 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
798 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
802
799
803 wlock = self.wlock()
800 wlock = self.wlock()
804 lock = self.lock()
801 lock = self.lock()
805 tr = self.transaction()
802 tr = self.transaction()
806 trp = weakref.proxy(tr)
803 trp = weakref.proxy(tr)
807
804
808 # check in files
805 # check in files
809 new = {}
806 new = {}
810 linkrev = self.changelog.count()
807 linkrev = self.changelog.count()
811 commit.sort()
808 commit.sort()
812 is_exec = util.execfunc(self.root, m1.execf)
809 is_exec = util.execfunc(self.root, m1.execf)
813 is_link = util.linkfunc(self.root, m1.linkf)
810 is_link = util.linkfunc(self.root, m1.linkf)
814 for f in commit:
811 for f in commit:
815 self.ui.note(f + "\n")
812 self.ui.note(f + "\n")
816 try:
813 try:
817 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
814 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
818 new_exec = is_exec(f)
815 new_exec = is_exec(f)
819 new_link = is_link(f)
816 new_link = is_link(f)
820 if ((not changed or changed[-1] != f) and
817 if ((not changed or changed[-1] != f) and
821 m2.get(f) != new[f]):
818 m2.get(f) != new[f]):
822 # mention the file in the changelog if some
819 # mention the file in the changelog if some
823 # flag changed, even if there was no content
820 # flag changed, even if there was no content
824 # change.
821 # change.
825 old_exec = m1.execf(f)
822 old_exec = m1.execf(f)
826 old_link = m1.linkf(f)
823 old_link = m1.linkf(f)
827 if old_exec != new_exec or old_link != new_link:
824 if old_exec != new_exec or old_link != new_link:
828 changed.append(f)
825 changed.append(f)
829 m1.set(f, new_exec, new_link)
826 m1.set(f, new_exec, new_link)
830 if use_dirstate:
827 if use_dirstate:
831 self.dirstate.normal(f)
828 self.dirstate.normal(f)
832
829
833 except (OSError, IOError):
830 except (OSError, IOError):
834 if use_dirstate:
831 if use_dirstate:
835 self.ui.warn(_("trouble committing %s!\n") % f)
832 self.ui.warn(_("trouble committing %s!\n") % f)
836 raise
833 raise
837 else:
834 else:
838 remove.append(f)
835 remove.append(f)
839
836
840 # update manifest
837 # update manifest
841 m1.update(new)
838 m1.update(new)
842 remove.sort()
839 remove.sort()
843 removed = []
840 removed = []
844
841
845 for f in remove:
842 for f in remove:
846 if f in m1:
843 if f in m1:
847 del m1[f]
844 del m1[f]
848 removed.append(f)
845 removed.append(f)
849 elif f in m2:
846 elif f in m2:
850 removed.append(f)
847 removed.append(f)
851 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
848 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
852 (new, removed))
849 (new, removed))
853
850
854 # add changeset
851 # add changeset
855 new = new.keys()
852 new = new.keys()
856 new.sort()
853 new.sort()
857
854
858 user = user or self.ui.username()
855 user = user or self.ui.username()
859 if (not empty_ok and not text) or force_editor:
856 if (not empty_ok and not text) or force_editor:
860 edittext = []
857 edittext = []
861 if text:
858 if text:
862 edittext.append(text)
859 edittext.append(text)
863 edittext.append("")
860 edittext.append("")
864 edittext.append(_("HG: Enter commit message."
861 edittext.append(_("HG: Enter commit message."
865 " Lines beginning with 'HG:' are removed."))
862 " Lines beginning with 'HG:' are removed."))
866 edittext.append("HG: --")
863 edittext.append("HG: --")
867 edittext.append("HG: user: %s" % user)
864 edittext.append("HG: user: %s" % user)
868 if p2 != nullid:
865 if p2 != nullid:
869 edittext.append("HG: branch merge")
866 edittext.append("HG: branch merge")
870 if branchname:
867 if branchname:
871 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
868 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
872 edittext.extend(["HG: changed %s" % f for f in changed])
869 edittext.extend(["HG: changed %s" % f for f in changed])
873 edittext.extend(["HG: removed %s" % f for f in removed])
870 edittext.extend(["HG: removed %s" % f for f in removed])
874 if not changed and not remove:
871 if not changed and not remove:
875 edittext.append("HG: no files changed")
872 edittext.append("HG: no files changed")
876 edittext.append("")
873 edittext.append("")
877 # run editor in the repository root
874 # run editor in the repository root
878 olddir = os.getcwd()
875 olddir = os.getcwd()
879 os.chdir(self.root)
876 os.chdir(self.root)
880 text = self.ui.edit("\n".join(edittext), user)
877 text = self.ui.edit("\n".join(edittext), user)
881 os.chdir(olddir)
878 os.chdir(olddir)
882
879
883 if branchname:
880 if branchname:
884 extra["branch"] = branchname
881 extra["branch"] = branchname
885
882
886 lines = [line.rstrip() for line in text.rstrip().splitlines()]
883 lines = [line.rstrip() for line in text.rstrip().splitlines()]
887 while lines and not lines[0]:
884 while lines and not lines[0]:
888 del lines[0]
885 del lines[0]
889 if not lines and use_dirstate:
886 if not lines and use_dirstate:
890 raise util.Abort(_("empty commit message"))
887 raise util.Abort(_("empty commit message"))
891 text = '\n'.join(lines)
888 text = '\n'.join(lines)
892
889
893 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
890 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
894 user, date, extra)
891 user, date, extra)
895 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
892 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
896 parent2=xp2)
893 parent2=xp2)
897 tr.close()
894 tr.close()
898
895
899 if self.branchcache:
896 if self.branchcache:
900 self.branchtags()
897 self.branchtags()
901
898
902 if use_dirstate or update_dirstate:
899 if use_dirstate or update_dirstate:
903 self.dirstate.setparents(n)
900 self.dirstate.setparents(n)
904 if use_dirstate:
901 if use_dirstate:
905 for f in removed:
902 for f in removed:
906 self.dirstate.forget(f)
903 self.dirstate.forget(f)
907 valid = 1 # our dirstate updates are complete
904 valid = 1 # our dirstate updates are complete
908
905
909 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
906 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
910 return n
907 return n
911 finally:
908 finally:
912 if not valid: # don't save our updated dirstate
909 if not valid: # don't save our updated dirstate
913 self.dirstate.invalidate()
910 self.dirstate.invalidate()
914 del tr, lock, wlock
911 del tr, lock, wlock
915
912
916 def walk(self, node=None, files=[], match=util.always, badmatch=None):
913 def walk(self, node=None, files=[], match=util.always, badmatch=None):
917 '''
914 '''
918 walk recursively through the directory tree or a given
915 walk recursively through the directory tree or a given
919 changeset, finding all files matched by the match
916 changeset, finding all files matched by the match
920 function
917 function
921
918
922 results are yielded in a tuple (src, filename), where src
919 results are yielded in a tuple (src, filename), where src
923 is one of:
920 is one of:
924 'f' the file was found in the directory tree
921 'f' the file was found in the directory tree
925 'm' the file was only in the dirstate and not in the tree
922 'm' the file was only in the dirstate and not in the tree
926 'b' file was not found and matched badmatch
923 'b' file was not found and matched badmatch
927 '''
924 '''
928
925
929 if node:
926 if node:
930 fdict = dict.fromkeys(files)
927 fdict = dict.fromkeys(files)
931 # for dirstate.walk, files=['.'] means "walk the whole tree".
928 # for dirstate.walk, files=['.'] means "walk the whole tree".
932 # follow that here, too
929 # follow that here, too
933 fdict.pop('.', None)
930 fdict.pop('.', None)
934 mdict = self.manifest.read(self.changelog.read(node)[0])
931 mdict = self.manifest.read(self.changelog.read(node)[0])
935 mfiles = mdict.keys()
932 mfiles = mdict.keys()
936 mfiles.sort()
933 mfiles.sort()
937 for fn in mfiles:
934 for fn in mfiles:
938 for ffn in fdict:
935 for ffn in fdict:
939 # match if the file is the exact name or a directory
936 # match if the file is the exact name or a directory
940 if ffn == fn or fn.startswith("%s/" % ffn):
937 if ffn == fn or fn.startswith("%s/" % ffn):
941 del fdict[ffn]
938 del fdict[ffn]
942 break
939 break
943 if match(fn):
940 if match(fn):
944 yield 'm', fn
941 yield 'm', fn
945 ffiles = fdict.keys()
942 ffiles = fdict.keys()
946 ffiles.sort()
943 ffiles.sort()
947 for fn in ffiles:
944 for fn in ffiles:
948 if badmatch and badmatch(fn):
945 if badmatch and badmatch(fn):
949 if match(fn):
946 if match(fn):
950 yield 'b', fn
947 yield 'b', fn
951 else:
948 else:
952 self.ui.warn(_('%s: No such file in rev %s\n')
949 self.ui.warn(_('%s: No such file in rev %s\n')
953 % (self.pathto(fn), short(node)))
950 % (self.pathto(fn), short(node)))
954 else:
951 else:
955 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
952 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
956 yield src, fn
953 yield src, fn
957
954
958 def status(self, node1=None, node2=None, files=[], match=util.always,
955 def status(self, node1=None, node2=None, files=[], match=util.always,
959 list_ignored=False, list_clean=False, list_unknown=True):
956 list_ignored=False, list_clean=False, list_unknown=True):
960 """return status of files between two nodes or node and working directory
957 """return status of files between two nodes or node and working directory
961
958
962 If node1 is None, use the first dirstate parent instead.
959 If node1 is None, use the first dirstate parent instead.
963 If node2 is None, compare node1 with working directory.
960 If node2 is None, compare node1 with working directory.
964 """
961 """
965
962
966 def fcmp(fn, getnode):
963 def fcmp(fn, getnode):
967 t1 = self.wread(fn)
964 t1 = self.wread(fn)
968 return self.file(fn).cmp(getnode(fn), t1)
965 return self.file(fn).cmp(getnode(fn), t1)
969
966
970 def mfmatches(node):
967 def mfmatches(node):
971 change = self.changelog.read(node)
968 change = self.changelog.read(node)
972 mf = self.manifest.read(change[0]).copy()
969 mf = self.manifest.read(change[0]).copy()
973 for fn in mf.keys():
970 for fn in mf.keys():
974 if not match(fn):
971 if not match(fn):
975 del mf[fn]
972 del mf[fn]
976 return mf
973 return mf
977
974
978 modified, added, removed, deleted, unknown = [], [], [], [], []
975 modified, added, removed, deleted, unknown = [], [], [], [], []
979 ignored, clean = [], []
976 ignored, clean = [], []
980
977
981 compareworking = False
978 compareworking = False
982 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
979 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
983 compareworking = True
980 compareworking = True
984
981
985 if not compareworking:
982 if not compareworking:
986 # read the manifest from node1 before the manifest from node2,
983 # read the manifest from node1 before the manifest from node2,
987 # so that we'll hit the manifest cache if we're going through
984 # so that we'll hit the manifest cache if we're going through
988 # all the revisions in parent->child order.
985 # all the revisions in parent->child order.
989 mf1 = mfmatches(node1)
986 mf1 = mfmatches(node1)
990
987
991 # are we comparing the working directory?
988 # are we comparing the working directory?
992 if not node2:
989 if not node2:
993 (lookup, modified, added, removed, deleted, unknown,
990 (lookup, modified, added, removed, deleted, unknown,
994 ignored, clean) = self.dirstate.status(files, match,
991 ignored, clean) = self.dirstate.status(files, match,
995 list_ignored, list_clean,
992 list_ignored, list_clean,
996 list_unknown)
993 list_unknown)
997
994
998 # are we comparing working dir against its parent?
995 # are we comparing working dir against its parent?
999 if compareworking:
996 if compareworking:
1000 if lookup:
997 if lookup:
1001 fixup = []
998 fixup = []
1002 # do a full compare of any files that might have changed
999 # do a full compare of any files that might have changed
1003 ctx = self.changectx()
1000 ctx = self.changectx()
1004 mexec = lambda f: 'x' in ctx.fileflags(f)
1001 mexec = lambda f: 'x' in ctx.fileflags(f)
1005 mlink = lambda f: 'l' in ctx.fileflags(f)
1002 mlink = lambda f: 'l' in ctx.fileflags(f)
1006 is_exec = util.execfunc(self.root, mexec)
1003 is_exec = util.execfunc(self.root, mexec)
1007 is_link = util.linkfunc(self.root, mlink)
1004 is_link = util.linkfunc(self.root, mlink)
1008 def flags(f):
1005 def flags(f):
1009 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1006 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1010 for f in lookup:
1007 for f in lookup:
1011 if (f not in ctx or flags(f) != ctx.fileflags(f)
1008 if (f not in ctx or flags(f) != ctx.fileflags(f)
1012 or ctx[f].cmp(self.wread(f))):
1009 or ctx[f].cmp(self.wread(f))):
1013 modified.append(f)
1010 modified.append(f)
1014 else:
1011 else:
1015 fixup.append(f)
1012 fixup.append(f)
1016 if list_clean:
1013 if list_clean:
1017 clean.append(f)
1014 clean.append(f)
1018
1015
1019 # update dirstate for files that are actually clean
1016 # update dirstate for files that are actually clean
1020 if fixup:
1017 if fixup:
1021 wlock = None
1018 wlock = None
1022 try:
1019 try:
1023 try:
1020 try:
1024 wlock = self.wlock(False)
1021 wlock = self.wlock(False)
1025 except lock.LockException:
1022 except lock.LockException:
1026 pass
1023 pass
1027 if wlock:
1024 if wlock:
1028 for f in fixup:
1025 for f in fixup:
1029 self.dirstate.normal(f)
1026 self.dirstate.normal(f)
1030 finally:
1027 finally:
1031 del wlock
1028 del wlock
1032 else:
1029 else:
1033 # we are comparing working dir against non-parent
1030 # we are comparing working dir against non-parent
1034 # generate a pseudo-manifest for the working dir
1031 # generate a pseudo-manifest for the working dir
1035 # XXX: create it in dirstate.py ?
1032 # XXX: create it in dirstate.py ?
1036 mf2 = mfmatches(self.dirstate.parents()[0])
1033 mf2 = mfmatches(self.dirstate.parents()[0])
1037 is_exec = util.execfunc(self.root, mf2.execf)
1034 is_exec = util.execfunc(self.root, mf2.execf)
1038 is_link = util.linkfunc(self.root, mf2.linkf)
1035 is_link = util.linkfunc(self.root, mf2.linkf)
1039 for f in lookup + modified + added:
1036 for f in lookup + modified + added:
1040 mf2[f] = ""
1037 mf2[f] = ""
1041 mf2.set(f, is_exec(f), is_link(f))
1038 mf2.set(f, is_exec(f), is_link(f))
1042 for f in removed:
1039 for f in removed:
1043 if f in mf2:
1040 if f in mf2:
1044 del mf2[f]
1041 del mf2[f]
1045
1042
1046 else:
1043 else:
1047 # we are comparing two revisions
1044 # we are comparing two revisions
1048 mf2 = mfmatches(node2)
1045 mf2 = mfmatches(node2)
1049
1046
1050 if not compareworking:
1047 if not compareworking:
1051 # flush lists from dirstate before comparing manifests
1048 # flush lists from dirstate before comparing manifests
1052 modified, added, clean = [], [], []
1049 modified, added, clean = [], [], []
1053
1050
1054 # make sure to sort the files so we talk to the disk in a
1051 # make sure to sort the files so we talk to the disk in a
1055 # reasonable order
1052 # reasonable order
1056 mf2keys = mf2.keys()
1053 mf2keys = mf2.keys()
1057 mf2keys.sort()
1054 mf2keys.sort()
1058 getnode = lambda fn: mf1.get(fn, nullid)
1055 getnode = lambda fn: mf1.get(fn, nullid)
1059 for fn in mf2keys:
1056 for fn in mf2keys:
1060 if fn in mf1:
1057 if fn in mf1:
1061 if (mf1.flags(fn) != mf2.flags(fn) or
1058 if (mf1.flags(fn) != mf2.flags(fn) or
1062 (mf1[fn] != mf2[fn] and
1059 (mf1[fn] != mf2[fn] and
1063 (mf2[fn] != "" or fcmp(fn, getnode)))):
1060 (mf2[fn] != "" or fcmp(fn, getnode)))):
1064 modified.append(fn)
1061 modified.append(fn)
1065 elif list_clean:
1062 elif list_clean:
1066 clean.append(fn)
1063 clean.append(fn)
1067 del mf1[fn]
1064 del mf1[fn]
1068 else:
1065 else:
1069 added.append(fn)
1066 added.append(fn)
1070
1067
1071 removed = mf1.keys()
1068 removed = mf1.keys()
1072
1069
1073 # sort and return results:
1070 # sort and return results:
1074 for l in modified, added, removed, deleted, unknown, ignored, clean:
1071 for l in modified, added, removed, deleted, unknown, ignored, clean:
1075 l.sort()
1072 l.sort()
1076 return (modified, added, removed, deleted, unknown, ignored, clean)
1073 return (modified, added, removed, deleted, unknown, ignored, clean)
1077
1074
1078 def add(self, list):
1075 def add(self, list):
1079 wlock = self.wlock()
1076 wlock = self.wlock()
1080 try:
1077 try:
1081 rejected = []
1078 rejected = []
1082 for f in list:
1079 for f in list:
1083 p = self.wjoin(f)
1080 p = self.wjoin(f)
1084 try:
1081 try:
1085 st = os.lstat(p)
1082 st = os.lstat(p)
1086 except:
1083 except:
1087 self.ui.warn(_("%s does not exist!\n") % f)
1084 self.ui.warn(_("%s does not exist!\n") % f)
1088 rejected.append(f)
1085 rejected.append(f)
1089 continue
1086 continue
1090 if st.st_size > 10000000:
1087 if st.st_size > 10000000:
1091 self.ui.warn(_("%s: files over 10MB may cause memory and"
1088 self.ui.warn(_("%s: files over 10MB may cause memory and"
1092 " performance problems\n"
1089 " performance problems\n"
1093 "(use 'hg revert %s' to unadd the file)\n")
1090 "(use 'hg revert %s' to unadd the file)\n")
1094 % (f, f))
1091 % (f, f))
1095 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1092 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1096 self.ui.warn(_("%s not added: only files and symlinks "
1093 self.ui.warn(_("%s not added: only files and symlinks "
1097 "supported currently\n") % f)
1094 "supported currently\n") % f)
1098 rejected.append(p)
1095 rejected.append(p)
1099 elif self.dirstate[f] in 'amn':
1096 elif self.dirstate[f] in 'amn':
1100 self.ui.warn(_("%s already tracked!\n") % f)
1097 self.ui.warn(_("%s already tracked!\n") % f)
1101 elif self.dirstate[f] == 'r':
1098 elif self.dirstate[f] == 'r':
1102 self.dirstate.normallookup(f)
1099 self.dirstate.normallookup(f)
1103 else:
1100 else:
1104 self.dirstate.add(f)
1101 self.dirstate.add(f)
1105 return rejected
1102 return rejected
1106 finally:
1103 finally:
1107 del wlock
1104 del wlock
1108
1105
1109 def forget(self, list):
1106 def forget(self, list):
1110 wlock = self.wlock()
1107 wlock = self.wlock()
1111 try:
1108 try:
1112 for f in list:
1109 for f in list:
1113 if self.dirstate[f] != 'a':
1110 if self.dirstate[f] != 'a':
1114 self.ui.warn(_("%s not added!\n") % f)
1111 self.ui.warn(_("%s not added!\n") % f)
1115 else:
1112 else:
1116 self.dirstate.forget(f)
1113 self.dirstate.forget(f)
1117 finally:
1114 finally:
1118 del wlock
1115 del wlock
1119
1116
1120 def remove(self, list, unlink=False):
1117 def remove(self, list, unlink=False):
1121 wlock = None
1118 wlock = None
1122 try:
1119 try:
1123 if unlink:
1120 if unlink:
1124 for f in list:
1121 for f in list:
1125 try:
1122 try:
1126 util.unlink(self.wjoin(f))
1123 util.unlink(self.wjoin(f))
1127 except OSError, inst:
1124 except OSError, inst:
1128 if inst.errno != errno.ENOENT:
1125 if inst.errno != errno.ENOENT:
1129 raise
1126 raise
1130 wlock = self.wlock()
1127 wlock = self.wlock()
1131 for f in list:
1128 for f in list:
1132 if unlink and os.path.exists(self.wjoin(f)):
1129 if unlink and os.path.exists(self.wjoin(f)):
1133 self.ui.warn(_("%s still exists!\n") % f)
1130 self.ui.warn(_("%s still exists!\n") % f)
1134 elif self.dirstate[f] == 'a':
1131 elif self.dirstate[f] == 'a':
1135 self.dirstate.forget(f)
1132 self.dirstate.forget(f)
1136 elif f not in self.dirstate:
1133 elif f not in self.dirstate:
1137 self.ui.warn(_("%s not tracked!\n") % f)
1134 self.ui.warn(_("%s not tracked!\n") % f)
1138 else:
1135 else:
1139 self.dirstate.remove(f)
1136 self.dirstate.remove(f)
1140 finally:
1137 finally:
1141 del wlock
1138 del wlock
1142
1139
1143 def undelete(self, list):
1140 def undelete(self, list):
1144 wlock = None
1141 wlock = None
1145 try:
1142 try:
1146 manifests = [self.manifest.read(self.changelog.read(p)[0])
1143 manifests = [self.manifest.read(self.changelog.read(p)[0])
1147 for p in self.dirstate.parents() if p != nullid]
1144 for p in self.dirstate.parents() if p != nullid]
1148 wlock = self.wlock()
1145 wlock = self.wlock()
1149 for f in list:
1146 for f in list:
1150 if self.dirstate[f] != 'r':
1147 if self.dirstate[f] != 'r':
1151 self.ui.warn("%s not removed!\n" % f)
1148 self.ui.warn("%s not removed!\n" % f)
1152 else:
1149 else:
1153 m = f in manifests[0] and manifests[0] or manifests[1]
1150 m = f in manifests[0] and manifests[0] or manifests[1]
1154 t = self.file(f).read(m[f])
1151 t = self.file(f).read(m[f])
1155 self.wwrite(f, t, m.flags(f))
1152 self.wwrite(f, t, m.flags(f))
1156 self.dirstate.normal(f)
1153 self.dirstate.normal(f)
1157 finally:
1154 finally:
1158 del wlock
1155 del wlock
1159
1156
1160 def copy(self, source, dest):
1157 def copy(self, source, dest):
1161 wlock = None
1158 wlock = None
1162 try:
1159 try:
1163 p = self.wjoin(dest)
1160 p = self.wjoin(dest)
1164 if not (os.path.exists(p) or os.path.islink(p)):
1161 if not (os.path.exists(p) or os.path.islink(p)):
1165 self.ui.warn(_("%s does not exist!\n") % dest)
1162 self.ui.warn(_("%s does not exist!\n") % dest)
1166 elif not (os.path.isfile(p) or os.path.islink(p)):
1163 elif not (os.path.isfile(p) or os.path.islink(p)):
1167 self.ui.warn(_("copy failed: %s is not a file or a "
1164 self.ui.warn(_("copy failed: %s is not a file or a "
1168 "symbolic link\n") % dest)
1165 "symbolic link\n") % dest)
1169 else:
1166 else:
1170 wlock = self.wlock()
1167 wlock = self.wlock()
1171 if dest not in self.dirstate:
1168 if dest not in self.dirstate:
1172 self.dirstate.add(dest)
1169 self.dirstate.add(dest)
1173 self.dirstate.copy(source, dest)
1170 self.dirstate.copy(source, dest)
1174 finally:
1171 finally:
1175 del wlock
1172 del wlock
1176
1173
1177 def heads(self, start=None):
1174 def heads(self, start=None):
1178 heads = self.changelog.heads(start)
1175 heads = self.changelog.heads(start)
1179 # sort the output in rev descending order
1176 # sort the output in rev descending order
1180 heads = [(-self.changelog.rev(h), h) for h in heads]
1177 heads = [(-self.changelog.rev(h), h) for h in heads]
1181 heads.sort()
1178 heads.sort()
1182 return [n for (r, n) in heads]
1179 return [n for (r, n) in heads]
1183
1180
1184 def branchheads(self, branch, start=None):
1181 def branchheads(self, branch, start=None):
1185 branches = self.branchtags()
1182 branches = self.branchtags()
1186 if branch not in branches:
1183 if branch not in branches:
1187 return []
1184 return []
1188 # The basic algorithm is this:
1185 # The basic algorithm is this:
1189 #
1186 #
1190 # Start from the branch tip since there are no later revisions that can
1187 # Start from the branch tip since there are no later revisions that can
1191 # possibly be in this branch, and the tip is a guaranteed head.
1188 # possibly be in this branch, and the tip is a guaranteed head.
1192 #
1189 #
1193 # Remember the tip's parents as the first ancestors, since these by
1190 # Remember the tip's parents as the first ancestors, since these by
1194 # definition are not heads.
1191 # definition are not heads.
1195 #
1192 #
1196 # Step backwards from the brach tip through all the revisions. We are
1193 # Step backwards from the brach tip through all the revisions. We are
1197 # guaranteed by the rules of Mercurial that we will now be visiting the
1194 # guaranteed by the rules of Mercurial that we will now be visiting the
1198 # nodes in reverse topological order (children before parents).
1195 # nodes in reverse topological order (children before parents).
1199 #
1196 #
1200 # If a revision is one of the ancestors of a head then we can toss it
1197 # If a revision is one of the ancestors of a head then we can toss it
1201 # out of the ancestors set (we've already found it and won't be
1198 # out of the ancestors set (we've already found it and won't be
1202 # visiting it again) and put its parents in the ancestors set.
1199 # visiting it again) and put its parents in the ancestors set.
1203 #
1200 #
1204 # Otherwise, if a revision is in the branch it's another head, since it
1201 # Otherwise, if a revision is in the branch it's another head, since it
1205 # wasn't in the ancestor list of an existing head. So add it to the
1202 # wasn't in the ancestor list of an existing head. So add it to the
1206 # head list, and add its parents to the ancestor list.
1203 # head list, and add its parents to the ancestor list.
1207 #
1204 #
1208 # If it is not in the branch ignore it.
1205 # If it is not in the branch ignore it.
1209 #
1206 #
1210 # Once we have a list of heads, use nodesbetween to filter out all the
1207 # Once we have a list of heads, use nodesbetween to filter out all the
1211 # heads that cannot be reached from startrev. There may be a more
1208 # heads that cannot be reached from startrev. There may be a more
1212 # efficient way to do this as part of the previous algorithm.
1209 # efficient way to do this as part of the previous algorithm.
1213
1210
1214 set = util.set
1211 set = util.set
1215 heads = [self.changelog.rev(branches[branch])]
1212 heads = [self.changelog.rev(branches[branch])]
1216 # Don't care if ancestors contains nullrev or not.
1213 # Don't care if ancestors contains nullrev or not.
1217 ancestors = set(self.changelog.parentrevs(heads[0]))
1214 ancestors = set(self.changelog.parentrevs(heads[0]))
1218 for rev in xrange(heads[0] - 1, nullrev, -1):
1215 for rev in xrange(heads[0] - 1, nullrev, -1):
1219 if rev in ancestors:
1216 if rev in ancestors:
1220 ancestors.update(self.changelog.parentrevs(rev))
1217 ancestors.update(self.changelog.parentrevs(rev))
1221 ancestors.remove(rev)
1218 ancestors.remove(rev)
1222 elif self.changectx(rev).branch() == branch:
1219 elif self.changectx(rev).branch() == branch:
1223 heads.append(rev)
1220 heads.append(rev)
1224 ancestors.update(self.changelog.parentrevs(rev))
1221 ancestors.update(self.changelog.parentrevs(rev))
1225 heads = [self.changelog.node(rev) for rev in heads]
1222 heads = [self.changelog.node(rev) for rev in heads]
1226 if start is not None:
1223 if start is not None:
1227 heads = self.changelog.nodesbetween([start], heads)[2]
1224 heads = self.changelog.nodesbetween([start], heads)[2]
1228 return heads
1225 return heads
1229
1226
1230 def branches(self, nodes):
1227 def branches(self, nodes):
1231 if not nodes:
1228 if not nodes:
1232 nodes = [self.changelog.tip()]
1229 nodes = [self.changelog.tip()]
1233 b = []
1230 b = []
1234 for n in nodes:
1231 for n in nodes:
1235 t = n
1232 t = n
1236 while 1:
1233 while 1:
1237 p = self.changelog.parents(n)
1234 p = self.changelog.parents(n)
1238 if p[1] != nullid or p[0] == nullid:
1235 if p[1] != nullid or p[0] == nullid:
1239 b.append((t, n, p[0], p[1]))
1236 b.append((t, n, p[0], p[1]))
1240 break
1237 break
1241 n = p[0]
1238 n = p[0]
1242 return b
1239 return b
1243
1240
1244 def between(self, pairs):
1241 def between(self, pairs):
1245 r = []
1242 r = []
1246
1243
1247 for top, bottom in pairs:
1244 for top, bottom in pairs:
1248 n, l, i = top, [], 0
1245 n, l, i = top, [], 0
1249 f = 1
1246 f = 1
1250
1247
1251 while n != bottom:
1248 while n != bottom:
1252 p = self.changelog.parents(n)[0]
1249 p = self.changelog.parents(n)[0]
1253 if i == f:
1250 if i == f:
1254 l.append(n)
1251 l.append(n)
1255 f = f * 2
1252 f = f * 2
1256 n = p
1253 n = p
1257 i += 1
1254 i += 1
1258
1255
1259 r.append(l)
1256 r.append(l)
1260
1257
1261 return r
1258 return r
1262
1259
1263 def findincoming(self, remote, base=None, heads=None, force=False):
1260 def findincoming(self, remote, base=None, heads=None, force=False):
1264 """Return list of roots of the subsets of missing nodes from remote
1261 """Return list of roots of the subsets of missing nodes from remote
1265
1262
1266 If base dict is specified, assume that these nodes and their parents
1263 If base dict is specified, assume that these nodes and their parents
1267 exist on the remote side and that no child of a node of base exists
1264 exist on the remote side and that no child of a node of base exists
1268 in both remote and self.
1265 in both remote and self.
1269 Furthermore base will be updated to include the nodes that exists
1266 Furthermore base will be updated to include the nodes that exists
1270 in self and remote but no children exists in self and remote.
1267 in self and remote but no children exists in self and remote.
1271 If a list of heads is specified, return only nodes which are heads
1268 If a list of heads is specified, return only nodes which are heads
1272 or ancestors of these heads.
1269 or ancestors of these heads.
1273
1270
1274 All the ancestors of base are in self and in remote.
1271 All the ancestors of base are in self and in remote.
1275 All the descendants of the list returned are missing in self.
1272 All the descendants of the list returned are missing in self.
1276 (and so we know that the rest of the nodes are missing in remote, see
1273 (and so we know that the rest of the nodes are missing in remote, see
1277 outgoing)
1274 outgoing)
1278 """
1275 """
1279 m = self.changelog.nodemap
1276 m = self.changelog.nodemap
1280 search = []
1277 search = []
1281 fetch = {}
1278 fetch = {}
1282 seen = {}
1279 seen = {}
1283 seenbranch = {}
1280 seenbranch = {}
1284 if base == None:
1281 if base == None:
1285 base = {}
1282 base = {}
1286
1283
1287 if not heads:
1284 if not heads:
1288 heads = remote.heads()
1285 heads = remote.heads()
1289
1286
1290 if self.changelog.tip() == nullid:
1287 if self.changelog.tip() == nullid:
1291 base[nullid] = 1
1288 base[nullid] = 1
1292 if heads != [nullid]:
1289 if heads != [nullid]:
1293 return [nullid]
1290 return [nullid]
1294 return []
1291 return []
1295
1292
1296 # assume we're closer to the tip than the root
1293 # assume we're closer to the tip than the root
1297 # and start by examining the heads
1294 # and start by examining the heads
1298 self.ui.status(_("searching for changes\n"))
1295 self.ui.status(_("searching for changes\n"))
1299
1296
1300 unknown = []
1297 unknown = []
1301 for h in heads:
1298 for h in heads:
1302 if h not in m:
1299 if h not in m:
1303 unknown.append(h)
1300 unknown.append(h)
1304 else:
1301 else:
1305 base[h] = 1
1302 base[h] = 1
1306
1303
1307 if not unknown:
1304 if not unknown:
1308 return []
1305 return []
1309
1306
1310 req = dict.fromkeys(unknown)
1307 req = dict.fromkeys(unknown)
1311 reqcnt = 0
1308 reqcnt = 0
1312
1309
1313 # search through remote branches
1310 # search through remote branches
1314 # a 'branch' here is a linear segment of history, with four parts:
1311 # a 'branch' here is a linear segment of history, with four parts:
1315 # head, root, first parent, second parent
1312 # head, root, first parent, second parent
1316 # (a branch always has two parents (or none) by definition)
1313 # (a branch always has two parents (or none) by definition)
1317 unknown = remote.branches(unknown)
1314 unknown = remote.branches(unknown)
1318 while unknown:
1315 while unknown:
1319 r = []
1316 r = []
1320 while unknown:
1317 while unknown:
1321 n = unknown.pop(0)
1318 n = unknown.pop(0)
1322 if n[0] in seen:
1319 if n[0] in seen:
1323 continue
1320 continue
1324
1321
1325 self.ui.debug(_("examining %s:%s\n")
1322 self.ui.debug(_("examining %s:%s\n")
1326 % (short(n[0]), short(n[1])))
1323 % (short(n[0]), short(n[1])))
1327 if n[0] == nullid: # found the end of the branch
1324 if n[0] == nullid: # found the end of the branch
1328 pass
1325 pass
1329 elif n in seenbranch:
1326 elif n in seenbranch:
1330 self.ui.debug(_("branch already found\n"))
1327 self.ui.debug(_("branch already found\n"))
1331 continue
1328 continue
1332 elif n[1] and n[1] in m: # do we know the base?
1329 elif n[1] and n[1] in m: # do we know the base?
1333 self.ui.debug(_("found incomplete branch %s:%s\n")
1330 self.ui.debug(_("found incomplete branch %s:%s\n")
1334 % (short(n[0]), short(n[1])))
1331 % (short(n[0]), short(n[1])))
1335 search.append(n) # schedule branch range for scanning
1332 search.append(n) # schedule branch range for scanning
1336 seenbranch[n] = 1
1333 seenbranch[n] = 1
1337 else:
1334 else:
1338 if n[1] not in seen and n[1] not in fetch:
1335 if n[1] not in seen and n[1] not in fetch:
1339 if n[2] in m and n[3] in m:
1336 if n[2] in m and n[3] in m:
1340 self.ui.debug(_("found new changeset %s\n") %
1337 self.ui.debug(_("found new changeset %s\n") %
1341 short(n[1]))
1338 short(n[1]))
1342 fetch[n[1]] = 1 # earliest unknown
1339 fetch[n[1]] = 1 # earliest unknown
1343 for p in n[2:4]:
1340 for p in n[2:4]:
1344 if p in m:
1341 if p in m:
1345 base[p] = 1 # latest known
1342 base[p] = 1 # latest known
1346
1343
1347 for p in n[2:4]:
1344 for p in n[2:4]:
1348 if p not in req and p not in m:
1345 if p not in req and p not in m:
1349 r.append(p)
1346 r.append(p)
1350 req[p] = 1
1347 req[p] = 1
1351 seen[n[0]] = 1
1348 seen[n[0]] = 1
1352
1349
1353 if r:
1350 if r:
1354 reqcnt += 1
1351 reqcnt += 1
1355 self.ui.debug(_("request %d: %s\n") %
1352 self.ui.debug(_("request %d: %s\n") %
1356 (reqcnt, " ".join(map(short, r))))
1353 (reqcnt, " ".join(map(short, r))))
1357 for p in xrange(0, len(r), 10):
1354 for p in xrange(0, len(r), 10):
1358 for b in remote.branches(r[p:p+10]):
1355 for b in remote.branches(r[p:p+10]):
1359 self.ui.debug(_("received %s:%s\n") %
1356 self.ui.debug(_("received %s:%s\n") %
1360 (short(b[0]), short(b[1])))
1357 (short(b[0]), short(b[1])))
1361 unknown.append(b)
1358 unknown.append(b)
1362
1359
1363 # do binary search on the branches we found
1360 # do binary search on the branches we found
1364 while search:
1361 while search:
1365 n = search.pop(0)
1362 n = search.pop(0)
1366 reqcnt += 1
1363 reqcnt += 1
1367 l = remote.between([(n[0], n[1])])[0]
1364 l = remote.between([(n[0], n[1])])[0]
1368 l.append(n[1])
1365 l.append(n[1])
1369 p = n[0]
1366 p = n[0]
1370 f = 1
1367 f = 1
1371 for i in l:
1368 for i in l:
1372 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1369 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1373 if i in m:
1370 if i in m:
1374 if f <= 2:
1371 if f <= 2:
1375 self.ui.debug(_("found new branch changeset %s\n") %
1372 self.ui.debug(_("found new branch changeset %s\n") %
1376 short(p))
1373 short(p))
1377 fetch[p] = 1
1374 fetch[p] = 1
1378 base[i] = 1
1375 base[i] = 1
1379 else:
1376 else:
1380 self.ui.debug(_("narrowed branch search to %s:%s\n")
1377 self.ui.debug(_("narrowed branch search to %s:%s\n")
1381 % (short(p), short(i)))
1378 % (short(p), short(i)))
1382 search.append((p, i))
1379 search.append((p, i))
1383 break
1380 break
1384 p, f = i, f * 2
1381 p, f = i, f * 2
1385
1382
1386 # sanity check our fetch list
1383 # sanity check our fetch list
1387 for f in fetch.keys():
1384 for f in fetch.keys():
1388 if f in m:
1385 if f in m:
1389 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1386 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1390
1387
1391 if base.keys() == [nullid]:
1388 if base.keys() == [nullid]:
1392 if force:
1389 if force:
1393 self.ui.warn(_("warning: repository is unrelated\n"))
1390 self.ui.warn(_("warning: repository is unrelated\n"))
1394 else:
1391 else:
1395 raise util.Abort(_("repository is unrelated"))
1392 raise util.Abort(_("repository is unrelated"))
1396
1393
1397 self.ui.debug(_("found new changesets starting at ") +
1394 self.ui.debug(_("found new changesets starting at ") +
1398 " ".join([short(f) for f in fetch]) + "\n")
1395 " ".join([short(f) for f in fetch]) + "\n")
1399
1396
1400 self.ui.debug(_("%d total queries\n") % reqcnt)
1397 self.ui.debug(_("%d total queries\n") % reqcnt)
1401
1398
1402 return fetch.keys()
1399 return fetch.keys()
1403
1400
1404 def findoutgoing(self, remote, base=None, heads=None, force=False):
1401 def findoutgoing(self, remote, base=None, heads=None, force=False):
1405 """Return list of nodes that are roots of subsets not in remote
1402 """Return list of nodes that are roots of subsets not in remote
1406
1403
1407 If base dict is specified, assume that these nodes and their parents
1404 If base dict is specified, assume that these nodes and their parents
1408 exist on the remote side.
1405 exist on the remote side.
1409 If a list of heads is specified, return only nodes which are heads
1406 If a list of heads is specified, return only nodes which are heads
1410 or ancestors of these heads, and return a second element which
1407 or ancestors of these heads, and return a second element which
1411 contains all remote heads which get new children.
1408 contains all remote heads which get new children.
1412 """
1409 """
1413 if base == None:
1410 if base == None:
1414 base = {}
1411 base = {}
1415 self.findincoming(remote, base, heads, force=force)
1412 self.findincoming(remote, base, heads, force=force)
1416
1413
1417 self.ui.debug(_("common changesets up to ")
1414 self.ui.debug(_("common changesets up to ")
1418 + " ".join(map(short, base.keys())) + "\n")
1415 + " ".join(map(short, base.keys())) + "\n")
1419
1416
1420 remain = dict.fromkeys(self.changelog.nodemap)
1417 remain = dict.fromkeys(self.changelog.nodemap)
1421
1418
1422 # prune everything remote has from the tree
1419 # prune everything remote has from the tree
1423 del remain[nullid]
1420 del remain[nullid]
1424 remove = base.keys()
1421 remove = base.keys()
1425 while remove:
1422 while remove:
1426 n = remove.pop(0)
1423 n = remove.pop(0)
1427 if n in remain:
1424 if n in remain:
1428 del remain[n]
1425 del remain[n]
1429 for p in self.changelog.parents(n):
1426 for p in self.changelog.parents(n):
1430 remove.append(p)
1427 remove.append(p)
1431
1428
1432 # find every node whose parents have been pruned
1429 # find every node whose parents have been pruned
1433 subset = []
1430 subset = []
1434 # find every remote head that will get new children
1431 # find every remote head that will get new children
1435 updated_heads = {}
1432 updated_heads = {}
1436 for n in remain:
1433 for n in remain:
1437 p1, p2 = self.changelog.parents(n)
1434 p1, p2 = self.changelog.parents(n)
1438 if p1 not in remain and p2 not in remain:
1435 if p1 not in remain and p2 not in remain:
1439 subset.append(n)
1436 subset.append(n)
1440 if heads:
1437 if heads:
1441 if p1 in heads:
1438 if p1 in heads:
1442 updated_heads[p1] = True
1439 updated_heads[p1] = True
1443 if p2 in heads:
1440 if p2 in heads:
1444 updated_heads[p2] = True
1441 updated_heads[p2] = True
1445
1442
1446 # this is the set of all roots we have to push
1443 # this is the set of all roots we have to push
1447 if heads:
1444 if heads:
1448 return subset, updated_heads.keys()
1445 return subset, updated_heads.keys()
1449 else:
1446 else:
1450 return subset
1447 return subset
1451
1448
1452 def pull(self, remote, heads=None, force=False):
1449 def pull(self, remote, heads=None, force=False):
1453 lock = self.lock()
1450 lock = self.lock()
1454 try:
1451 try:
1455 fetch = self.findincoming(remote, heads=heads, force=force)
1452 fetch = self.findincoming(remote, heads=heads, force=force)
1456 if fetch == [nullid]:
1453 if fetch == [nullid]:
1457 self.ui.status(_("requesting all changes\n"))
1454 self.ui.status(_("requesting all changes\n"))
1458
1455
1459 if not fetch:
1456 if not fetch:
1460 self.ui.status(_("no changes found\n"))
1457 self.ui.status(_("no changes found\n"))
1461 return 0
1458 return 0
1462
1459
1463 if heads is None:
1460 if heads is None:
1464 cg = remote.changegroup(fetch, 'pull')
1461 cg = remote.changegroup(fetch, 'pull')
1465 else:
1462 else:
1466 if 'changegroupsubset' not in remote.capabilities:
1463 if 'changegroupsubset' not in remote.capabilities:
1467 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1464 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1468 cg = remote.changegroupsubset(fetch, heads, 'pull')
1465 cg = remote.changegroupsubset(fetch, heads, 'pull')
1469 return self.addchangegroup(cg, 'pull', remote.url())
1466 return self.addchangegroup(cg, 'pull', remote.url())
1470 finally:
1467 finally:
1471 del lock
1468 del lock
1472
1469
1473 def push(self, remote, force=False, revs=None):
1470 def push(self, remote, force=False, revs=None):
1474 # there are two ways to push to remote repo:
1471 # there are two ways to push to remote repo:
1475 #
1472 #
1476 # addchangegroup assumes local user can lock remote
1473 # addchangegroup assumes local user can lock remote
1477 # repo (local filesystem, old ssh servers).
1474 # repo (local filesystem, old ssh servers).
1478 #
1475 #
1479 # unbundle assumes local user cannot lock remote repo (new ssh
1476 # unbundle assumes local user cannot lock remote repo (new ssh
1480 # servers, http servers).
1477 # servers, http servers).
1481
1478
1482 if remote.capable('unbundle'):
1479 if remote.capable('unbundle'):
1483 return self.push_unbundle(remote, force, revs)
1480 return self.push_unbundle(remote, force, revs)
1484 return self.push_addchangegroup(remote, force, revs)
1481 return self.push_addchangegroup(remote, force, revs)
1485
1482
1486 def prepush(self, remote, force, revs):
1483 def prepush(self, remote, force, revs):
1487 base = {}
1484 base = {}
1488 remote_heads = remote.heads()
1485 remote_heads = remote.heads()
1489 inc = self.findincoming(remote, base, remote_heads, force=force)
1486 inc = self.findincoming(remote, base, remote_heads, force=force)
1490
1487
1491 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1488 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1492 if revs is not None:
1489 if revs is not None:
1493 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1490 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1494 else:
1491 else:
1495 bases, heads = update, self.changelog.heads()
1492 bases, heads = update, self.changelog.heads()
1496
1493
1497 if not bases:
1494 if not bases:
1498 self.ui.status(_("no changes found\n"))
1495 self.ui.status(_("no changes found\n"))
1499 return None, 1
1496 return None, 1
1500 elif not force:
1497 elif not force:
1501 # check if we're creating new remote heads
1498 # check if we're creating new remote heads
1502 # to be a remote head after push, node must be either
1499 # to be a remote head after push, node must be either
1503 # - unknown locally
1500 # - unknown locally
1504 # - a local outgoing head descended from update
1501 # - a local outgoing head descended from update
1505 # - a remote head that's known locally and not
1502 # - a remote head that's known locally and not
1506 # ancestral to an outgoing head
1503 # ancestral to an outgoing head
1507
1504
1508 warn = 0
1505 warn = 0
1509
1506
1510 if remote_heads == [nullid]:
1507 if remote_heads == [nullid]:
1511 warn = 0
1508 warn = 0
1512 elif not revs and len(heads) > len(remote_heads):
1509 elif not revs and len(heads) > len(remote_heads):
1513 warn = 1
1510 warn = 1
1514 else:
1511 else:
1515 newheads = list(heads)
1512 newheads = list(heads)
1516 for r in remote_heads:
1513 for r in remote_heads:
1517 if r in self.changelog.nodemap:
1514 if r in self.changelog.nodemap:
1518 desc = self.changelog.heads(r, heads)
1515 desc = self.changelog.heads(r, heads)
1519 l = [h for h in heads if h in desc]
1516 l = [h for h in heads if h in desc]
1520 if not l:
1517 if not l:
1521 newheads.append(r)
1518 newheads.append(r)
1522 else:
1519 else:
1523 newheads.append(r)
1520 newheads.append(r)
1524 if len(newheads) > len(remote_heads):
1521 if len(newheads) > len(remote_heads):
1525 warn = 1
1522 warn = 1
1526
1523
1527 if warn:
1524 if warn:
1528 self.ui.warn(_("abort: push creates new remote heads!\n"))
1525 self.ui.warn(_("abort: push creates new remote heads!\n"))
1529 self.ui.status(_("(did you forget to merge?"
1526 self.ui.status(_("(did you forget to merge?"
1530 " use push -f to force)\n"))
1527 " use push -f to force)\n"))
1531 return None, 0
1528 return None, 0
1532 elif inc:
1529 elif inc:
1533 self.ui.warn(_("note: unsynced remote changes!\n"))
1530 self.ui.warn(_("note: unsynced remote changes!\n"))
1534
1531
1535
1532
1536 if revs is None:
1533 if revs is None:
1537 cg = self.changegroup(update, 'push')
1534 cg = self.changegroup(update, 'push')
1538 else:
1535 else:
1539 cg = self.changegroupsubset(update, revs, 'push')
1536 cg = self.changegroupsubset(update, revs, 'push')
1540 return cg, remote_heads
1537 return cg, remote_heads
1541
1538
1542 def push_addchangegroup(self, remote, force, revs):
1539 def push_addchangegroup(self, remote, force, revs):
1543 lock = remote.lock()
1540 lock = remote.lock()
1544 try:
1541 try:
1545 ret = self.prepush(remote, force, revs)
1542 ret = self.prepush(remote, force, revs)
1546 if ret[0] is not None:
1543 if ret[0] is not None:
1547 cg, remote_heads = ret
1544 cg, remote_heads = ret
1548 return remote.addchangegroup(cg, 'push', self.url())
1545 return remote.addchangegroup(cg, 'push', self.url())
1549 return ret[1]
1546 return ret[1]
1550 finally:
1547 finally:
1551 del lock
1548 del lock
1552
1549
1553 def push_unbundle(self, remote, force, revs):
1550 def push_unbundle(self, remote, force, revs):
1554 # local repo finds heads on server, finds out what revs it
1551 # local repo finds heads on server, finds out what revs it
1555 # must push. once revs transferred, if server finds it has
1552 # must push. once revs transferred, if server finds it has
1556 # different heads (someone else won commit/push race), server
1553 # different heads (someone else won commit/push race), server
1557 # aborts.
1554 # aborts.
1558
1555
1559 ret = self.prepush(remote, force, revs)
1556 ret = self.prepush(remote, force, revs)
1560 if ret[0] is not None:
1557 if ret[0] is not None:
1561 cg, remote_heads = ret
1558 cg, remote_heads = ret
1562 if force: remote_heads = ['force']
1559 if force: remote_heads = ['force']
1563 return remote.unbundle(cg, remote_heads, 'push')
1560 return remote.unbundle(cg, remote_heads, 'push')
1564 return ret[1]
1561 return ret[1]
1565
1562
1566 def changegroupinfo(self, nodes, source):
1563 def changegroupinfo(self, nodes, source):
1567 if self.ui.verbose or source == 'bundle':
1564 if self.ui.verbose or source == 'bundle':
1568 self.ui.status(_("%d changesets found\n") % len(nodes))
1565 self.ui.status(_("%d changesets found\n") % len(nodes))
1569 if self.ui.debugflag:
1566 if self.ui.debugflag:
1570 self.ui.debug(_("List of changesets:\n"))
1567 self.ui.debug(_("List of changesets:\n"))
1571 for node in nodes:
1568 for node in nodes:
1572 self.ui.debug("%s\n" % hex(node))
1569 self.ui.debug("%s\n" % hex(node))
1573
1570
1574 def changegroupsubset(self, bases, heads, source, extranodes=None):
1571 def changegroupsubset(self, bases, heads, source, extranodes=None):
1575 """This function generates a changegroup consisting of all the nodes
1572 """This function generates a changegroup consisting of all the nodes
1576 that are descendents of any of the bases, and ancestors of any of
1573 that are descendents of any of the bases, and ancestors of any of
1577 the heads.
1574 the heads.
1578
1575
1579 It is fairly complex as determining which filenodes and which
1576 It is fairly complex as determining which filenodes and which
1580 manifest nodes need to be included for the changeset to be complete
1577 manifest nodes need to be included for the changeset to be complete
1581 is non-trivial.
1578 is non-trivial.
1582
1579
1583 Another wrinkle is doing the reverse, figuring out which changeset in
1580 Another wrinkle is doing the reverse, figuring out which changeset in
1584 the changegroup a particular filenode or manifestnode belongs to.
1581 the changegroup a particular filenode or manifestnode belongs to.
1585
1582
1586 The caller can specify some nodes that must be included in the
1583 The caller can specify some nodes that must be included in the
1587 changegroup using the extranodes argument. It should be a dict
1584 changegroup using the extranodes argument. It should be a dict
1588 where the keys are the filenames (or 1 for the manifest), and the
1585 where the keys are the filenames (or 1 for the manifest), and the
1589 values are lists of (node, linknode) tuples, where node is a wanted
1586 values are lists of (node, linknode) tuples, where node is a wanted
1590 node and linknode is the changelog node that should be transmitted as
1587 node and linknode is the changelog node that should be transmitted as
1591 the linkrev.
1588 the linkrev.
1592 """
1589 """
1593
1590
1594 self.hook('preoutgoing', throw=True, source=source)
1591 self.hook('preoutgoing', throw=True, source=source)
1595
1592
1596 # Set up some initial variables
1593 # Set up some initial variables
1597 # Make it easy to refer to self.changelog
1594 # Make it easy to refer to self.changelog
1598 cl = self.changelog
1595 cl = self.changelog
1599 # msng is short for missing - compute the list of changesets in this
1596 # msng is short for missing - compute the list of changesets in this
1600 # changegroup.
1597 # changegroup.
1601 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1598 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1602 self.changegroupinfo(msng_cl_lst, source)
1599 self.changegroupinfo(msng_cl_lst, source)
1603 # Some bases may turn out to be superfluous, and some heads may be
1600 # Some bases may turn out to be superfluous, and some heads may be
1604 # too. nodesbetween will return the minimal set of bases and heads
1601 # too. nodesbetween will return the minimal set of bases and heads
1605 # necessary to re-create the changegroup.
1602 # necessary to re-create the changegroup.
1606
1603
1607 # Known heads are the list of heads that it is assumed the recipient
1604 # Known heads are the list of heads that it is assumed the recipient
1608 # of this changegroup will know about.
1605 # of this changegroup will know about.
1609 knownheads = {}
1606 knownheads = {}
1610 # We assume that all parents of bases are known heads.
1607 # We assume that all parents of bases are known heads.
1611 for n in bases:
1608 for n in bases:
1612 for p in cl.parents(n):
1609 for p in cl.parents(n):
1613 if p != nullid:
1610 if p != nullid:
1614 knownheads[p] = 1
1611 knownheads[p] = 1
1615 knownheads = knownheads.keys()
1612 knownheads = knownheads.keys()
1616 if knownheads:
1613 if knownheads:
1617 # Now that we know what heads are known, we can compute which
1614 # Now that we know what heads are known, we can compute which
1618 # changesets are known. The recipient must know about all
1615 # changesets are known. The recipient must know about all
1619 # changesets required to reach the known heads from the null
1616 # changesets required to reach the known heads from the null
1620 # changeset.
1617 # changeset.
1621 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1618 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1622 junk = None
1619 junk = None
1623 # Transform the list into an ersatz set.
1620 # Transform the list into an ersatz set.
1624 has_cl_set = dict.fromkeys(has_cl_set)
1621 has_cl_set = dict.fromkeys(has_cl_set)
1625 else:
1622 else:
1626 # If there were no known heads, the recipient cannot be assumed to
1623 # If there were no known heads, the recipient cannot be assumed to
1627 # know about any changesets.
1624 # know about any changesets.
1628 has_cl_set = {}
1625 has_cl_set = {}
1629
1626
1630 # Make it easy to refer to self.manifest
1627 # Make it easy to refer to self.manifest
1631 mnfst = self.manifest
1628 mnfst = self.manifest
1632 # We don't know which manifests are missing yet
1629 # We don't know which manifests are missing yet
1633 msng_mnfst_set = {}
1630 msng_mnfst_set = {}
1634 # Nor do we know which filenodes are missing.
1631 # Nor do we know which filenodes are missing.
1635 msng_filenode_set = {}
1632 msng_filenode_set = {}
1636
1633
1637 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1634 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1638 junk = None
1635 junk = None
1639
1636
1640 # A changeset always belongs to itself, so the changenode lookup
1637 # A changeset always belongs to itself, so the changenode lookup
1641 # function for a changenode is identity.
1638 # function for a changenode is identity.
1642 def identity(x):
1639 def identity(x):
1643 return x
1640 return x
1644
1641
1645 # A function generating function. Sets up an environment for the
1642 # A function generating function. Sets up an environment for the
1646 # inner function.
1643 # inner function.
1647 def cmp_by_rev_func(revlog):
1644 def cmp_by_rev_func(revlog):
1648 # Compare two nodes by their revision number in the environment's
1645 # Compare two nodes by their revision number in the environment's
1649 # revision history. Since the revision number both represents the
1646 # revision history. Since the revision number both represents the
1650 # most efficient order to read the nodes in, and represents a
1647 # most efficient order to read the nodes in, and represents a
1651 # topological sorting of the nodes, this function is often useful.
1648 # topological sorting of the nodes, this function is often useful.
1652 def cmp_by_rev(a, b):
1649 def cmp_by_rev(a, b):
1653 return cmp(revlog.rev(a), revlog.rev(b))
1650 return cmp(revlog.rev(a), revlog.rev(b))
1654 return cmp_by_rev
1651 return cmp_by_rev
1655
1652
1656 # If we determine that a particular file or manifest node must be a
1653 # If we determine that a particular file or manifest node must be a
1657 # node that the recipient of the changegroup will already have, we can
1654 # node that the recipient of the changegroup will already have, we can
1658 # also assume the recipient will have all the parents. This function
1655 # also assume the recipient will have all the parents. This function
1659 # prunes them from the set of missing nodes.
1656 # prunes them from the set of missing nodes.
1660 def prune_parents(revlog, hasset, msngset):
1657 def prune_parents(revlog, hasset, msngset):
1661 haslst = hasset.keys()
1658 haslst = hasset.keys()
1662 haslst.sort(cmp_by_rev_func(revlog))
1659 haslst.sort(cmp_by_rev_func(revlog))
1663 for node in haslst:
1660 for node in haslst:
1664 parentlst = [p for p in revlog.parents(node) if p != nullid]
1661 parentlst = [p for p in revlog.parents(node) if p != nullid]
1665 while parentlst:
1662 while parentlst:
1666 n = parentlst.pop()
1663 n = parentlst.pop()
1667 if n not in hasset:
1664 if n not in hasset:
1668 hasset[n] = 1
1665 hasset[n] = 1
1669 p = [p for p in revlog.parents(n) if p != nullid]
1666 p = [p for p in revlog.parents(n) if p != nullid]
1670 parentlst.extend(p)
1667 parentlst.extend(p)
1671 for n in hasset:
1668 for n in hasset:
1672 msngset.pop(n, None)
1669 msngset.pop(n, None)
1673
1670
1674 # This is a function generating function used to set up an environment
1671 # This is a function generating function used to set up an environment
1675 # for the inner function to execute in.
1672 # for the inner function to execute in.
1676 def manifest_and_file_collector(changedfileset):
1673 def manifest_and_file_collector(changedfileset):
1677 # This is an information gathering function that gathers
1674 # This is an information gathering function that gathers
1678 # information from each changeset node that goes out as part of
1675 # information from each changeset node that goes out as part of
1679 # the changegroup. The information gathered is a list of which
1676 # the changegroup. The information gathered is a list of which
1680 # manifest nodes are potentially required (the recipient may
1677 # manifest nodes are potentially required (the recipient may
1681 # already have them) and total list of all files which were
1678 # already have them) and total list of all files which were
1682 # changed in any changeset in the changegroup.
1679 # changed in any changeset in the changegroup.
1683 #
1680 #
1684 # We also remember the first changenode we saw any manifest
1681 # We also remember the first changenode we saw any manifest
1685 # referenced by so we can later determine which changenode 'owns'
1682 # referenced by so we can later determine which changenode 'owns'
1686 # the manifest.
1683 # the manifest.
1687 def collect_manifests_and_files(clnode):
1684 def collect_manifests_and_files(clnode):
1688 c = cl.read(clnode)
1685 c = cl.read(clnode)
1689 for f in c[3]:
1686 for f in c[3]:
1690 # This is to make sure we only have one instance of each
1687 # This is to make sure we only have one instance of each
1691 # filename string for each filename.
1688 # filename string for each filename.
1692 changedfileset.setdefault(f, f)
1689 changedfileset.setdefault(f, f)
1693 msng_mnfst_set.setdefault(c[0], clnode)
1690 msng_mnfst_set.setdefault(c[0], clnode)
1694 return collect_manifests_and_files
1691 return collect_manifests_and_files
1695
1692
1696 # Figure out which manifest nodes (of the ones we think might be part
1693 # Figure out which manifest nodes (of the ones we think might be part
1697 # of the changegroup) the recipient must know about and remove them
1694 # of the changegroup) the recipient must know about and remove them
1698 # from the changegroup.
1695 # from the changegroup.
1699 def prune_manifests():
1696 def prune_manifests():
1700 has_mnfst_set = {}
1697 has_mnfst_set = {}
1701 for n in msng_mnfst_set:
1698 for n in msng_mnfst_set:
1702 # If a 'missing' manifest thinks it belongs to a changenode
1699 # If a 'missing' manifest thinks it belongs to a changenode
1703 # the recipient is assumed to have, obviously the recipient
1700 # the recipient is assumed to have, obviously the recipient
1704 # must have that manifest.
1701 # must have that manifest.
1705 linknode = cl.node(mnfst.linkrev(n))
1702 linknode = cl.node(mnfst.linkrev(n))
1706 if linknode in has_cl_set:
1703 if linknode in has_cl_set:
1707 has_mnfst_set[n] = 1
1704 has_mnfst_set[n] = 1
1708 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1705 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1709
1706
1710 # Use the information collected in collect_manifests_and_files to say
1707 # Use the information collected in collect_manifests_and_files to say
1711 # which changenode any manifestnode belongs to.
1708 # which changenode any manifestnode belongs to.
1712 def lookup_manifest_link(mnfstnode):
1709 def lookup_manifest_link(mnfstnode):
1713 return msng_mnfst_set[mnfstnode]
1710 return msng_mnfst_set[mnfstnode]
1714
1711
1715 # A function generating function that sets up the initial environment
1712 # A function generating function that sets up the initial environment
1716 # the inner function.
1713 # the inner function.
1717 def filenode_collector(changedfiles):
1714 def filenode_collector(changedfiles):
1718 next_rev = [0]
1715 next_rev = [0]
1719 # This gathers information from each manifestnode included in the
1716 # This gathers information from each manifestnode included in the
1720 # changegroup about which filenodes the manifest node references
1717 # changegroup about which filenodes the manifest node references
1721 # so we can include those in the changegroup too.
1718 # so we can include those in the changegroup too.
1722 #
1719 #
1723 # It also remembers which changenode each filenode belongs to. It
1720 # It also remembers which changenode each filenode belongs to. It
1724 # does this by assuming the a filenode belongs to the changenode
1721 # does this by assuming the a filenode belongs to the changenode
1725 # the first manifest that references it belongs to.
1722 # the first manifest that references it belongs to.
1726 def collect_msng_filenodes(mnfstnode):
1723 def collect_msng_filenodes(mnfstnode):
1727 r = mnfst.rev(mnfstnode)
1724 r = mnfst.rev(mnfstnode)
1728 if r == next_rev[0]:
1725 if r == next_rev[0]:
1729 # If the last rev we looked at was the one just previous,
1726 # If the last rev we looked at was the one just previous,
1730 # we only need to see a diff.
1727 # we only need to see a diff.
1731 deltamf = mnfst.readdelta(mnfstnode)
1728 deltamf = mnfst.readdelta(mnfstnode)
1732 # For each line in the delta
1729 # For each line in the delta
1733 for f, fnode in deltamf.items():
1730 for f, fnode in deltamf.items():
1734 f = changedfiles.get(f, None)
1731 f = changedfiles.get(f, None)
1735 # And if the file is in the list of files we care
1732 # And if the file is in the list of files we care
1736 # about.
1733 # about.
1737 if f is not None:
1734 if f is not None:
1738 # Get the changenode this manifest belongs to
1735 # Get the changenode this manifest belongs to
1739 clnode = msng_mnfst_set[mnfstnode]
1736 clnode = msng_mnfst_set[mnfstnode]
1740 # Create the set of filenodes for the file if
1737 # Create the set of filenodes for the file if
1741 # there isn't one already.
1738 # there isn't one already.
1742 ndset = msng_filenode_set.setdefault(f, {})
1739 ndset = msng_filenode_set.setdefault(f, {})
1743 # And set the filenode's changelog node to the
1740 # And set the filenode's changelog node to the
1744 # manifest's if it hasn't been set already.
1741 # manifest's if it hasn't been set already.
1745 ndset.setdefault(fnode, clnode)
1742 ndset.setdefault(fnode, clnode)
1746 else:
1743 else:
1747 # Otherwise we need a full manifest.
1744 # Otherwise we need a full manifest.
1748 m = mnfst.read(mnfstnode)
1745 m = mnfst.read(mnfstnode)
1749 # For every file in we care about.
1746 # For every file in we care about.
1750 for f in changedfiles:
1747 for f in changedfiles:
1751 fnode = m.get(f, None)
1748 fnode = m.get(f, None)
1752 # If it's in the manifest
1749 # If it's in the manifest
1753 if fnode is not None:
1750 if fnode is not None:
1754 # See comments above.
1751 # See comments above.
1755 clnode = msng_mnfst_set[mnfstnode]
1752 clnode = msng_mnfst_set[mnfstnode]
1756 ndset = msng_filenode_set.setdefault(f, {})
1753 ndset = msng_filenode_set.setdefault(f, {})
1757 ndset.setdefault(fnode, clnode)
1754 ndset.setdefault(fnode, clnode)
1758 # Remember the revision we hope to see next.
1755 # Remember the revision we hope to see next.
1759 next_rev[0] = r + 1
1756 next_rev[0] = r + 1
1760 return collect_msng_filenodes
1757 return collect_msng_filenodes
1761
1758
1762 # We have a list of filenodes we think we need for a file, lets remove
1759 # We have a list of filenodes we think we need for a file, lets remove
1763 # all those we now the recipient must have.
1760 # all those we now the recipient must have.
1764 def prune_filenodes(f, filerevlog):
1761 def prune_filenodes(f, filerevlog):
1765 msngset = msng_filenode_set[f]
1762 msngset = msng_filenode_set[f]
1766 hasset = {}
1763 hasset = {}
1767 # If a 'missing' filenode thinks it belongs to a changenode we
1764 # If a 'missing' filenode thinks it belongs to a changenode we
1768 # assume the recipient must have, then the recipient must have
1765 # assume the recipient must have, then the recipient must have
1769 # that filenode.
1766 # that filenode.
1770 for n in msngset:
1767 for n in msngset:
1771 clnode = cl.node(filerevlog.linkrev(n))
1768 clnode = cl.node(filerevlog.linkrev(n))
1772 if clnode in has_cl_set:
1769 if clnode in has_cl_set:
1773 hasset[n] = 1
1770 hasset[n] = 1
1774 prune_parents(filerevlog, hasset, msngset)
1771 prune_parents(filerevlog, hasset, msngset)
1775
1772
1776 # A function generator function that sets up the a context for the
1773 # A function generator function that sets up the a context for the
1777 # inner function.
1774 # inner function.
1778 def lookup_filenode_link_func(fname):
1775 def lookup_filenode_link_func(fname):
1779 msngset = msng_filenode_set[fname]
1776 msngset = msng_filenode_set[fname]
1780 # Lookup the changenode the filenode belongs to.
1777 # Lookup the changenode the filenode belongs to.
1781 def lookup_filenode_link(fnode):
1778 def lookup_filenode_link(fnode):
1782 return msngset[fnode]
1779 return msngset[fnode]
1783 return lookup_filenode_link
1780 return lookup_filenode_link
1784
1781
1785 # Add the nodes that were explicitly requested.
1782 # Add the nodes that were explicitly requested.
1786 def add_extra_nodes(name, nodes):
1783 def add_extra_nodes(name, nodes):
1787 if not extranodes or name not in extranodes:
1784 if not extranodes or name not in extranodes:
1788 return
1785 return
1789
1786
1790 for node, linknode in extranodes[name]:
1787 for node, linknode in extranodes[name]:
1791 if node not in nodes:
1788 if node not in nodes:
1792 nodes[node] = linknode
1789 nodes[node] = linknode
1793
1790
1794 # Now that we have all theses utility functions to help out and
1791 # Now that we have all theses utility functions to help out and
1795 # logically divide up the task, generate the group.
1792 # logically divide up the task, generate the group.
1796 def gengroup():
1793 def gengroup():
1797 # The set of changed files starts empty.
1794 # The set of changed files starts empty.
1798 changedfiles = {}
1795 changedfiles = {}
1799 # Create a changenode group generator that will call our functions
1796 # Create a changenode group generator that will call our functions
1800 # back to lookup the owning changenode and collect information.
1797 # back to lookup the owning changenode and collect information.
1801 group = cl.group(msng_cl_lst, identity,
1798 group = cl.group(msng_cl_lst, identity,
1802 manifest_and_file_collector(changedfiles))
1799 manifest_and_file_collector(changedfiles))
1803 for chnk in group:
1800 for chnk in group:
1804 yield chnk
1801 yield chnk
1805
1802
1806 # The list of manifests has been collected by the generator
1803 # The list of manifests has been collected by the generator
1807 # calling our functions back.
1804 # calling our functions back.
1808 prune_manifests()
1805 prune_manifests()
1809 add_extra_nodes(1, msng_mnfst_set)
1806 add_extra_nodes(1, msng_mnfst_set)
1810 msng_mnfst_lst = msng_mnfst_set.keys()
1807 msng_mnfst_lst = msng_mnfst_set.keys()
1811 # Sort the manifestnodes by revision number.
1808 # Sort the manifestnodes by revision number.
1812 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1809 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1813 # Create a generator for the manifestnodes that calls our lookup
1810 # Create a generator for the manifestnodes that calls our lookup
1814 # and data collection functions back.
1811 # and data collection functions back.
1815 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1812 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1816 filenode_collector(changedfiles))
1813 filenode_collector(changedfiles))
1817 for chnk in group:
1814 for chnk in group:
1818 yield chnk
1815 yield chnk
1819
1816
1820 # These are no longer needed, dereference and toss the memory for
1817 # These are no longer needed, dereference and toss the memory for
1821 # them.
1818 # them.
1822 msng_mnfst_lst = None
1819 msng_mnfst_lst = None
1823 msng_mnfst_set.clear()
1820 msng_mnfst_set.clear()
1824
1821
1825 if extranodes:
1822 if extranodes:
1826 for fname in extranodes:
1823 for fname in extranodes:
1827 if isinstance(fname, int):
1824 if isinstance(fname, int):
1828 continue
1825 continue
1829 add_extra_nodes(fname,
1826 add_extra_nodes(fname,
1830 msng_filenode_set.setdefault(fname, {}))
1827 msng_filenode_set.setdefault(fname, {}))
1831 changedfiles[fname] = 1
1828 changedfiles[fname] = 1
1832 changedfiles = changedfiles.keys()
1829 changedfiles = changedfiles.keys()
1833 changedfiles.sort()
1830 changedfiles.sort()
1834 # Go through all our files in order sorted by name.
1831 # Go through all our files in order sorted by name.
1835 for fname in changedfiles:
1832 for fname in changedfiles:
1836 filerevlog = self.file(fname)
1833 filerevlog = self.file(fname)
1837 if filerevlog.count() == 0:
1834 if filerevlog.count() == 0:
1838 raise util.Abort(_("empty or missing revlog for %s") % fname)
1835 raise util.Abort(_("empty or missing revlog for %s") % fname)
1839 # Toss out the filenodes that the recipient isn't really
1836 # Toss out the filenodes that the recipient isn't really
1840 # missing.
1837 # missing.
1841 if fname in msng_filenode_set:
1838 if fname in msng_filenode_set:
1842 prune_filenodes(fname, filerevlog)
1839 prune_filenodes(fname, filerevlog)
1843 msng_filenode_lst = msng_filenode_set[fname].keys()
1840 msng_filenode_lst = msng_filenode_set[fname].keys()
1844 else:
1841 else:
1845 msng_filenode_lst = []
1842 msng_filenode_lst = []
1846 # If any filenodes are left, generate the group for them,
1843 # If any filenodes are left, generate the group for them,
1847 # otherwise don't bother.
1844 # otherwise don't bother.
1848 if len(msng_filenode_lst) > 0:
1845 if len(msng_filenode_lst) > 0:
1849 yield changegroup.chunkheader(len(fname))
1846 yield changegroup.chunkheader(len(fname))
1850 yield fname
1847 yield fname
1851 # Sort the filenodes by their revision #
1848 # Sort the filenodes by their revision #
1852 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1849 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1853 # Create a group generator and only pass in a changenode
1850 # Create a group generator and only pass in a changenode
1854 # lookup function as we need to collect no information
1851 # lookup function as we need to collect no information
1855 # from filenodes.
1852 # from filenodes.
1856 group = filerevlog.group(msng_filenode_lst,
1853 group = filerevlog.group(msng_filenode_lst,
1857 lookup_filenode_link_func(fname))
1854 lookup_filenode_link_func(fname))
1858 for chnk in group:
1855 for chnk in group:
1859 yield chnk
1856 yield chnk
1860 if fname in msng_filenode_set:
1857 if fname in msng_filenode_set:
1861 # Don't need this anymore, toss it to free memory.
1858 # Don't need this anymore, toss it to free memory.
1862 del msng_filenode_set[fname]
1859 del msng_filenode_set[fname]
1863 # Signal that no more groups are left.
1860 # Signal that no more groups are left.
1864 yield changegroup.closechunk()
1861 yield changegroup.closechunk()
1865
1862
1866 if msng_cl_lst:
1863 if msng_cl_lst:
1867 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1864 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1868
1865
1869 return util.chunkbuffer(gengroup())
1866 return util.chunkbuffer(gengroup())
1870
1867
1871 def changegroup(self, basenodes, source):
1868 def changegroup(self, basenodes, source):
1872 """Generate a changegroup of all nodes that we have that a recipient
1869 """Generate a changegroup of all nodes that we have that a recipient
1873 doesn't.
1870 doesn't.
1874
1871
1875 This is much easier than the previous function as we can assume that
1872 This is much easier than the previous function as we can assume that
1876 the recipient has any changenode we aren't sending them."""
1873 the recipient has any changenode we aren't sending them."""
1877
1874
1878 self.hook('preoutgoing', throw=True, source=source)
1875 self.hook('preoutgoing', throw=True, source=source)
1879
1876
1880 cl = self.changelog
1877 cl = self.changelog
1881 nodes = cl.nodesbetween(basenodes, None)[0]
1878 nodes = cl.nodesbetween(basenodes, None)[0]
1882 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1879 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1883 self.changegroupinfo(nodes, source)
1880 self.changegroupinfo(nodes, source)
1884
1881
1885 def identity(x):
1882 def identity(x):
1886 return x
1883 return x
1887
1884
1888 def gennodelst(revlog):
1885 def gennodelst(revlog):
1889 for r in xrange(0, revlog.count()):
1886 for r in xrange(0, revlog.count()):
1890 n = revlog.node(r)
1887 n = revlog.node(r)
1891 if revlog.linkrev(n) in revset:
1888 if revlog.linkrev(n) in revset:
1892 yield n
1889 yield n
1893
1890
1894 def changed_file_collector(changedfileset):
1891 def changed_file_collector(changedfileset):
1895 def collect_changed_files(clnode):
1892 def collect_changed_files(clnode):
1896 c = cl.read(clnode)
1893 c = cl.read(clnode)
1897 for fname in c[3]:
1894 for fname in c[3]:
1898 changedfileset[fname] = 1
1895 changedfileset[fname] = 1
1899 return collect_changed_files
1896 return collect_changed_files
1900
1897
1901 def lookuprevlink_func(revlog):
1898 def lookuprevlink_func(revlog):
1902 def lookuprevlink(n):
1899 def lookuprevlink(n):
1903 return cl.node(revlog.linkrev(n))
1900 return cl.node(revlog.linkrev(n))
1904 return lookuprevlink
1901 return lookuprevlink
1905
1902
1906 def gengroup():
1903 def gengroup():
1907 # construct a list of all changed files
1904 # construct a list of all changed files
1908 changedfiles = {}
1905 changedfiles = {}
1909
1906
1910 for chnk in cl.group(nodes, identity,
1907 for chnk in cl.group(nodes, identity,
1911 changed_file_collector(changedfiles)):
1908 changed_file_collector(changedfiles)):
1912 yield chnk
1909 yield chnk
1913 changedfiles = changedfiles.keys()
1910 changedfiles = changedfiles.keys()
1914 changedfiles.sort()
1911 changedfiles.sort()
1915
1912
1916 mnfst = self.manifest
1913 mnfst = self.manifest
1917 nodeiter = gennodelst(mnfst)
1914 nodeiter = gennodelst(mnfst)
1918 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1915 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1919 yield chnk
1916 yield chnk
1920
1917
1921 for fname in changedfiles:
1918 for fname in changedfiles:
1922 filerevlog = self.file(fname)
1919 filerevlog = self.file(fname)
1923 if filerevlog.count() == 0:
1920 if filerevlog.count() == 0:
1924 raise util.Abort(_("empty or missing revlog for %s") % fname)
1921 raise util.Abort(_("empty or missing revlog for %s") % fname)
1925 nodeiter = gennodelst(filerevlog)
1922 nodeiter = gennodelst(filerevlog)
1926 nodeiter = list(nodeiter)
1923 nodeiter = list(nodeiter)
1927 if nodeiter:
1924 if nodeiter:
1928 yield changegroup.chunkheader(len(fname))
1925 yield changegroup.chunkheader(len(fname))
1929 yield fname
1926 yield fname
1930 lookup = lookuprevlink_func(filerevlog)
1927 lookup = lookuprevlink_func(filerevlog)
1931 for chnk in filerevlog.group(nodeiter, lookup):
1928 for chnk in filerevlog.group(nodeiter, lookup):
1932 yield chnk
1929 yield chnk
1933
1930
1934 yield changegroup.closechunk()
1931 yield changegroup.closechunk()
1935
1932
1936 if nodes:
1933 if nodes:
1937 self.hook('outgoing', node=hex(nodes[0]), source=source)
1934 self.hook('outgoing', node=hex(nodes[0]), source=source)
1938
1935
1939 return util.chunkbuffer(gengroup())
1936 return util.chunkbuffer(gengroup())
1940
1937
1941 def addchangegroup(self, source, srctype, url, emptyok=False):
1938 def addchangegroup(self, source, srctype, url, emptyok=False):
1942 """add changegroup to repo.
1939 """add changegroup to repo.
1943
1940
1944 return values:
1941 return values:
1945 - nothing changed or no source: 0
1942 - nothing changed or no source: 0
1946 - more heads than before: 1+added heads (2..n)
1943 - more heads than before: 1+added heads (2..n)
1947 - less heads than before: -1-removed heads (-2..-n)
1944 - less heads than before: -1-removed heads (-2..-n)
1948 - number of heads stays the same: 1
1945 - number of heads stays the same: 1
1949 """
1946 """
1950 def csmap(x):
1947 def csmap(x):
1951 self.ui.debug(_("add changeset %s\n") % short(x))
1948 self.ui.debug(_("add changeset %s\n") % short(x))
1952 return cl.count()
1949 return cl.count()
1953
1950
1954 def revmap(x):
1951 def revmap(x):
1955 return cl.rev(x)
1952 return cl.rev(x)
1956
1953
1957 if not source:
1954 if not source:
1958 return 0
1955 return 0
1959
1956
1960 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1957 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1961
1958
1962 changesets = files = revisions = 0
1959 changesets = files = revisions = 0
1963
1960
1964 # write changelog data to temp files so concurrent readers will not see
1961 # write changelog data to temp files so concurrent readers will not see
1965 # inconsistent view
1962 # inconsistent view
1966 cl = self.changelog
1963 cl = self.changelog
1967 cl.delayupdate()
1964 cl.delayupdate()
1968 oldheads = len(cl.heads())
1965 oldheads = len(cl.heads())
1969
1966
1970 tr = self.transaction()
1967 tr = self.transaction()
1971 try:
1968 try:
1972 trp = weakref.proxy(tr)
1969 trp = weakref.proxy(tr)
1973 # pull off the changeset group
1970 # pull off the changeset group
1974 self.ui.status(_("adding changesets\n"))
1971 self.ui.status(_("adding changesets\n"))
1975 cor = cl.count() - 1
1972 cor = cl.count() - 1
1976 chunkiter = changegroup.chunkiter(source)
1973 chunkiter = changegroup.chunkiter(source)
1977 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1974 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1978 raise util.Abort(_("received changelog group is empty"))
1975 raise util.Abort(_("received changelog group is empty"))
1979 cnr = cl.count() - 1
1976 cnr = cl.count() - 1
1980 changesets = cnr - cor
1977 changesets = cnr - cor
1981
1978
1982 # pull off the manifest group
1979 # pull off the manifest group
1983 self.ui.status(_("adding manifests\n"))
1980 self.ui.status(_("adding manifests\n"))
1984 chunkiter = changegroup.chunkiter(source)
1981 chunkiter = changegroup.chunkiter(source)
1985 # no need to check for empty manifest group here:
1982 # no need to check for empty manifest group here:
1986 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1983 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1987 # no new manifest will be created and the manifest group will
1984 # no new manifest will be created and the manifest group will
1988 # be empty during the pull
1985 # be empty during the pull
1989 self.manifest.addgroup(chunkiter, revmap, trp)
1986 self.manifest.addgroup(chunkiter, revmap, trp)
1990
1987
1991 # process the files
1988 # process the files
1992 self.ui.status(_("adding file changes\n"))
1989 self.ui.status(_("adding file changes\n"))
1993 while 1:
1990 while 1:
1994 f = changegroup.getchunk(source)
1991 f = changegroup.getchunk(source)
1995 if not f:
1992 if not f:
1996 break
1993 break
1997 self.ui.debug(_("adding %s revisions\n") % f)
1994 self.ui.debug(_("adding %s revisions\n") % f)
1998 fl = self.file(f)
1995 fl = self.file(f)
1999 o = fl.count()
1996 o = fl.count()
2000 chunkiter = changegroup.chunkiter(source)
1997 chunkiter = changegroup.chunkiter(source)
2001 if fl.addgroup(chunkiter, revmap, trp) is None:
1998 if fl.addgroup(chunkiter, revmap, trp) is None:
2002 raise util.Abort(_("received file revlog group is empty"))
1999 raise util.Abort(_("received file revlog group is empty"))
2003 revisions += fl.count() - o
2000 revisions += fl.count() - o
2004 files += 1
2001 files += 1
2005
2002
2006 # make changelog see real files again
2003 # make changelog see real files again
2007 cl.finalize(trp)
2004 cl.finalize(trp)
2008
2005
2009 newheads = len(self.changelog.heads())
2006 newheads = len(self.changelog.heads())
2010 heads = ""
2007 heads = ""
2011 if oldheads and newheads != oldheads:
2008 if oldheads and newheads != oldheads:
2012 heads = _(" (%+d heads)") % (newheads - oldheads)
2009 heads = _(" (%+d heads)") % (newheads - oldheads)
2013
2010
2014 self.ui.status(_("added %d changesets"
2011 self.ui.status(_("added %d changesets"
2015 " with %d changes to %d files%s\n")
2012 " with %d changes to %d files%s\n")
2016 % (changesets, revisions, files, heads))
2013 % (changesets, revisions, files, heads))
2017
2014
2018 if changesets > 0:
2015 if changesets > 0:
2019 self.hook('pretxnchangegroup', throw=True,
2016 self.hook('pretxnchangegroup', throw=True,
2020 node=hex(self.changelog.node(cor+1)), source=srctype,
2017 node=hex(self.changelog.node(cor+1)), source=srctype,
2021 url=url)
2018 url=url)
2022
2019
2023 tr.close()
2020 tr.close()
2024 finally:
2021 finally:
2025 del tr
2022 del tr
2026
2023
2027 if changesets > 0:
2024 if changesets > 0:
2028 # forcefully update the on-disk branch cache
2025 # forcefully update the on-disk branch cache
2029 self.ui.debug(_("updating the branch cache\n"))
2026 self.ui.debug(_("updating the branch cache\n"))
2030 self.branchtags()
2027 self.branchtags()
2031 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2028 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2032 source=srctype, url=url)
2029 source=srctype, url=url)
2033
2030
2034 for i in xrange(cor + 1, cnr + 1):
2031 for i in xrange(cor + 1, cnr + 1):
2035 self.hook("incoming", node=hex(self.changelog.node(i)),
2032 self.hook("incoming", node=hex(self.changelog.node(i)),
2036 source=srctype, url=url)
2033 source=srctype, url=url)
2037
2034
2038 # never return 0 here:
2035 # never return 0 here:
2039 if newheads < oldheads:
2036 if newheads < oldheads:
2040 return newheads - oldheads - 1
2037 return newheads - oldheads - 1
2041 else:
2038 else:
2042 return newheads - oldheads + 1
2039 return newheads - oldheads + 1
2043
2040
2044
2041
2045 def stream_in(self, remote):
2042 def stream_in(self, remote):
2046 fp = remote.stream_out()
2043 fp = remote.stream_out()
2047 l = fp.readline()
2044 l = fp.readline()
2048 try:
2045 try:
2049 resp = int(l)
2046 resp = int(l)
2050 except ValueError:
2047 except ValueError:
2051 raise util.UnexpectedOutput(
2048 raise util.UnexpectedOutput(
2052 _('Unexpected response from remote server:'), l)
2049 _('Unexpected response from remote server:'), l)
2053 if resp == 1:
2050 if resp == 1:
2054 raise util.Abort(_('operation forbidden by server'))
2051 raise util.Abort(_('operation forbidden by server'))
2055 elif resp == 2:
2052 elif resp == 2:
2056 raise util.Abort(_('locking the remote repository failed'))
2053 raise util.Abort(_('locking the remote repository failed'))
2057 elif resp != 0:
2054 elif resp != 0:
2058 raise util.Abort(_('the server sent an unknown error code'))
2055 raise util.Abort(_('the server sent an unknown error code'))
2059 self.ui.status(_('streaming all changes\n'))
2056 self.ui.status(_('streaming all changes\n'))
2060 l = fp.readline()
2057 l = fp.readline()
2061 try:
2058 try:
2062 total_files, total_bytes = map(int, l.split(' ', 1))
2059 total_files, total_bytes = map(int, l.split(' ', 1))
2063 except ValueError, TypeError:
2060 except ValueError, TypeError:
2064 raise util.UnexpectedOutput(
2061 raise util.UnexpectedOutput(
2065 _('Unexpected response from remote server:'), l)
2062 _('Unexpected response from remote server:'), l)
2066 self.ui.status(_('%d files to transfer, %s of data\n') %
2063 self.ui.status(_('%d files to transfer, %s of data\n') %
2067 (total_files, util.bytecount(total_bytes)))
2064 (total_files, util.bytecount(total_bytes)))
2068 start = time.time()
2065 start = time.time()
2069 for i in xrange(total_files):
2066 for i in xrange(total_files):
2070 # XXX doesn't support '\n' or '\r' in filenames
2067 # XXX doesn't support '\n' or '\r' in filenames
2071 l = fp.readline()
2068 l = fp.readline()
2072 try:
2069 try:
2073 name, size = l.split('\0', 1)
2070 name, size = l.split('\0', 1)
2074 size = int(size)
2071 size = int(size)
2075 except ValueError, TypeError:
2072 except ValueError, TypeError:
2076 raise util.UnexpectedOutput(
2073 raise util.UnexpectedOutput(
2077 _('Unexpected response from remote server:'), l)
2074 _('Unexpected response from remote server:'), l)
2078 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2075 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2079 ofp = self.sopener(name, 'w')
2076 ofp = self.sopener(name, 'w')
2080 for chunk in util.filechunkiter(fp, limit=size):
2077 for chunk in util.filechunkiter(fp, limit=size):
2081 ofp.write(chunk)
2078 ofp.write(chunk)
2082 ofp.close()
2079 ofp.close()
2083 elapsed = time.time() - start
2080 elapsed = time.time() - start
2084 if elapsed <= 0:
2081 if elapsed <= 0:
2085 elapsed = 0.001
2082 elapsed = 0.001
2086 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2083 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2087 (util.bytecount(total_bytes), elapsed,
2084 (util.bytecount(total_bytes), elapsed,
2088 util.bytecount(total_bytes / elapsed)))
2085 util.bytecount(total_bytes / elapsed)))
2089 self.invalidate()
2086 self.invalidate()
2090 return len(self.heads()) + 1
2087 return len(self.heads()) + 1
2091
2088
2092 def clone(self, remote, heads=[], stream=False):
2089 def clone(self, remote, heads=[], stream=False):
2093 '''clone remote repository.
2090 '''clone remote repository.
2094
2091
2095 keyword arguments:
2092 keyword arguments:
2096 heads: list of revs to clone (forces use of pull)
2093 heads: list of revs to clone (forces use of pull)
2097 stream: use streaming clone if possible'''
2094 stream: use streaming clone if possible'''
2098
2095
2099 # now, all clients that can request uncompressed clones can
2096 # now, all clients that can request uncompressed clones can
2100 # read repo formats supported by all servers that can serve
2097 # read repo formats supported by all servers that can serve
2101 # them.
2098 # them.
2102
2099
2103 # if revlog format changes, client will have to check version
2100 # if revlog format changes, client will have to check version
2104 # and format flags on "stream" capability, and use
2101 # and format flags on "stream" capability, and use
2105 # uncompressed only if compatible.
2102 # uncompressed only if compatible.
2106
2103
2107 if stream and not heads and remote.capable('stream'):
2104 if stream and not heads and remote.capable('stream'):
2108 return self.stream_in(remote)
2105 return self.stream_in(remote)
2109 return self.pull(remote, heads)
2106 return self.pull(remote, heads)
2110
2107
2111 # used to avoid circular references so destructors work
2108 # used to avoid circular references so destructors work
2112 def aftertrans(files):
2109 def aftertrans(files):
2113 renamefiles = [tuple(t) for t in files]
2110 renamefiles = [tuple(t) for t in files]
2114 def a():
2111 def a():
2115 for src, dest in renamefiles:
2112 for src, dest in renamefiles:
2116 util.rename(src, dest)
2113 util.rename(src, dest)
2117 return a
2114 return a
2118
2115
2119 def instance(ui, path, create):
2116 def instance(ui, path, create):
2120 return localrepository(ui, util.drop_scheme('file', path), create)
2117 return localrepository(ui, util.drop_scheme('file', path), create)
2121
2118
2122 def islocal(path):
2119 def islocal(path):
2123 return True
2120 return True
@@ -1,25 +1,21
1 # remoterepo - remote repository proxy classes for mercurial
1 # remoterepo - remote repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import repo
8 import repo
9
9
10 class remoterepository(repo.repository):
10 class remoterepository(repo.repository):
11 def dev(self):
11 pass
12 return -1
13
14 def local(self):
15 return False
16
12
17 class remotelock(object):
13 class remotelock(object):
18 def __init__(self, repo):
14 def __init__(self, repo):
19 self.repo = repo
15 self.repo = repo
20 def release(self):
16 def release(self):
21 self.repo.unlock()
17 self.repo.unlock()
22 self.repo = None
18 self.repo = None
23 def __del__(self):
19 def __del__(self):
24 if self.repo:
20 if self.repo:
25 self.release()
21 self.release()
@@ -1,43 +1,42
1 # repo.py - repository base classes for mercurial
1 # repo.py - repository base classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from i18n import _
9 from i18n import _
10
10
11 class RepoError(Exception):
11 class RepoError(Exception):
12 pass
12 pass
13
13
14 class NoCapability(RepoError):
14 class NoCapability(RepoError):
15 pass
15 pass
16
16
17 class repository(object):
17 class repository(object):
18 def capable(self, name):
18 def capable(self, name):
19 '''tell whether repo supports named capability.
19 '''tell whether repo supports named capability.
20 return False if not supported.
20 return False if not supported.
21 if boolean capability, return True.
21 if boolean capability, return True.
22 if string capability, return string.'''
22 if string capability, return string.'''
23 if name in self.capabilities:
23 if name in self.capabilities:
24 return True
24 return True
25 name_eq = name + '='
25 name_eq = name + '='
26 for cap in self.capabilities:
26 for cap in self.capabilities:
27 if cap.startswith(name_eq):
27 if cap.startswith(name_eq):
28 return cap[len(name_eq):]
28 return cap[len(name_eq):]
29 return False
29 return False
30
30
31 def requirecap(self, name, purpose):
31 def requirecap(self, name, purpose):
32 '''raise an exception if the given capability is not present'''
32 '''raise an exception if the given capability is not present'''
33 if not self.capable(name):
33 if not self.capable(name):
34 raise NoCapability(_('cannot %s; remote repository does not '
34 raise NoCapability(_('cannot %s; remote repository does not '
35 'support the %r capability') %
35 'support the %r capability') %
36 (purpose, name))
36 (purpose, name))
37
37
38 def local(self):
38 def local(self):
39 return False
39 return False
40
40
41 def cancopy(self):
41 def cancopy(self):
42 return self.local()
42 return self.local()
43
@@ -1,86 +1,83
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms
7 # This software may be used and distributed according to the terms
8 # of the GNU General Public License, incorporated herein by reference.
8 # of the GNU General Public License, incorporated herein by reference.
9
9
10 from i18n import _
10 from i18n import _
11 import changelog, httprangereader
11 import changelog, httprangereader
12 import repo, localrepo, manifest, util
12 import repo, localrepo, manifest, util
13 import urllib, urllib2, errno
13 import urllib, urllib2, errno
14
14
15 class rangereader(httprangereader.httprangereader):
15 class rangereader(httprangereader.httprangereader):
16 def read(self, size=None):
16 def read(self, size=None):
17 try:
17 try:
18 return httprangereader.httprangereader.read(self, size)
18 return httprangereader.httprangereader.read(self, size)
19 except urllib2.HTTPError, inst:
19 except urllib2.HTTPError, inst:
20 num = inst.code == 404 and errno.ENOENT or None
20 num = inst.code == 404 and errno.ENOENT or None
21 raise IOError(num, inst)
21 raise IOError(num, inst)
22 except urllib2.URLError, inst:
22 except urllib2.URLError, inst:
23 raise IOError(None, inst.reason[1])
23 raise IOError(None, inst.reason[1])
24
24
25 def opener(base):
25 def opener(base):
26 """return a function that opens files over http"""
26 """return a function that opens files over http"""
27 p = base
27 p = base
28 def o(path, mode="r"):
28 def o(path, mode="r"):
29 f = "/".join((p, urllib.quote(path)))
29 f = "/".join((p, urllib.quote(path)))
30 return rangereader(f)
30 return rangereader(f)
31 return o
31 return o
32
32
33 class statichttprepository(localrepo.localrepository):
33 class statichttprepository(localrepo.localrepository):
34 def __init__(self, ui, path):
34 def __init__(self, ui, path):
35 self._url = path
35 self._url = path
36 self.ui = ui
36 self.ui = ui
37
37
38 self.path = path.rstrip('/') + "/.hg"
38 self.path = path.rstrip('/') + "/.hg"
39 self.opener = opener(self.path)
39 self.opener = opener(self.path)
40
40
41 # find requirements
41 # find requirements
42 try:
42 try:
43 requirements = self.opener("requires").read().splitlines()
43 requirements = self.opener("requires").read().splitlines()
44 except IOError, inst:
44 except IOError, inst:
45 if inst.errno == errno.ENOENT:
45 if inst.errno == errno.ENOENT:
46 msg = _("'%s' does not appear to be an hg repository") % path
46 msg = _("'%s' does not appear to be an hg repository") % path
47 raise repo.RepoError(msg)
47 raise repo.RepoError(msg)
48 else:
48 else:
49 requirements = []
49 requirements = []
50
50
51 # check them
51 # check them
52 for r in requirements:
52 for r in requirements:
53 if r not in self.supported:
53 if r not in self.supported:
54 raise repo.RepoError(_("requirement '%s' not supported") % r)
54 raise repo.RepoError(_("requirement '%s' not supported") % r)
55
55
56 # setup store
56 # setup store
57 if "store" in requirements:
57 if "store" in requirements:
58 self.encodefn = util.encodefilename
58 self.encodefn = util.encodefilename
59 self.decodefn = util.decodefilename
59 self.decodefn = util.decodefilename
60 self.spath = self.path + "/store"
60 self.spath = self.path + "/store"
61 else:
61 else:
62 self.encodefn = lambda x: x
62 self.encodefn = lambda x: x
63 self.decodefn = lambda x: x
63 self.decodefn = lambda x: x
64 self.spath = self.path
64 self.spath = self.path
65 self.sopener = util.encodedopener(opener(self.spath), self.encodefn)
65 self.sopener = util.encodedopener(opener(self.spath), self.encodefn)
66
66
67 self.manifest = manifest.manifest(self.sopener)
67 self.manifest = manifest.manifest(self.sopener)
68 self.changelog = changelog.changelog(self.sopener)
68 self.changelog = changelog.changelog(self.sopener)
69 self.tagscache = None
69 self.tagscache = None
70 self.nodetagscache = None
70 self.nodetagscache = None
71 self.encodepats = None
71 self.encodepats = None
72 self.decodepats = None
72 self.decodepats = None
73
73
74 def url(self):
74 def url(self):
75 return 'static-' + self._url
75 return 'static-' + self._url
76
76
77 def dev(self):
78 return -1
79
80 def local(self):
77 def local(self):
81 return False
78 return False
82
79
83 def instance(ui, path, create):
80 def instance(ui, path, create):
84 if create:
81 if create:
85 raise util.Abort(_('cannot create new static-http repository'))
82 raise util.Abort(_('cannot create new static-http repository'))
86 return statichttprepository(ui, path[7:])
83 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now