##// END OF EJS Templates
repo classes: remove unused dev() method
Matt Mackall -
r6312:08800489 default
parent child Browse files
Show More
@@ -1,283 +1,280
1 1 """
2 2 bundlerepo.py - repository class for viewing uncompressed bundles
3 3
4 4 This provides a read-only repository interface to bundles as if
5 5 they were part of the actual repository.
6 6
7 7 Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import hex, nullid, short
14 14 from i18n import _
15 15 import changegroup, util, os, struct, bz2, tempfile, mdiff
16 16 import localrepo, changelog, manifest, filelog, revlog
17 17
18 18 class bundlerevlog(revlog.revlog):
19 19 def __init__(self, opener, indexfile, bundlefile,
20 20 linkmapper=None):
21 21 # How it works:
22 22 # to retrieve a revision, we need to know the offset of
23 23 # the revision in the bundlefile (an opened file).
24 24 #
25 25 # We store this offset in the index (start), to differentiate a
26 26 # rev in the bundle and from a rev in the revlog, we check
27 27 # len(index[r]). If the tuple is bigger than 7, it is a bundle
28 28 # (it is bigger since we store the node to which the delta is)
29 29 #
30 30 revlog.revlog.__init__(self, opener, indexfile)
31 31 self.bundlefile = bundlefile
32 32 self.basemap = {}
33 33 def chunkpositer():
34 34 for chunk in changegroup.chunkiter(bundlefile):
35 35 pos = bundlefile.tell()
36 36 yield chunk, pos - len(chunk)
37 37 n = self.count()
38 38 prev = None
39 39 for chunk, start in chunkpositer():
40 40 size = len(chunk)
41 41 if size < 80:
42 42 raise util.Abort("invalid changegroup")
43 43 start += 80
44 44 size -= 80
45 45 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
46 46 if node in self.nodemap:
47 47 prev = node
48 48 continue
49 49 for p in (p1, p2):
50 50 if not p in self.nodemap:
51 51 raise revlog.LookupError(p1, self.indexfile,
52 52 _("unknown parent"))
53 53 if linkmapper is None:
54 54 link = n
55 55 else:
56 56 link = linkmapper(cs)
57 57
58 58 if not prev:
59 59 prev = p1
60 60 # start, size, full unc. size, base (unused), link, p1, p2, node
61 61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
62 62 self.rev(p1), self.rev(p2), node)
63 63 self.basemap[n] = prev
64 64 self.index.insert(-1, e)
65 65 self.nodemap[node] = n
66 66 prev = node
67 67 n += 1
68 68
69 69 def bundle(self, rev):
70 70 """is rev from the bundle"""
71 71 if rev < 0:
72 72 return False
73 73 return rev in self.basemap
74 74 def bundlebase(self, rev): return self.basemap[rev]
75 75 def chunk(self, rev, df=None, cachelen=4096):
76 76 # Warning: in case of bundle, the diff is against bundlebase,
77 77 # not against rev - 1
78 78 # XXX: could use some caching
79 79 if not self.bundle(rev):
80 80 return revlog.revlog.chunk(self, rev, df)
81 81 self.bundlefile.seek(self.start(rev))
82 82 return self.bundlefile.read(self.length(rev))
83 83
84 84 def revdiff(self, rev1, rev2):
85 85 """return or calculate a delta between two revisions"""
86 86 if self.bundle(rev1) and self.bundle(rev2):
87 87 # hot path for bundle
88 88 revb = self.rev(self.bundlebase(rev2))
89 89 if revb == rev1:
90 90 return self.chunk(rev2)
91 91 elif not self.bundle(rev1) and not self.bundle(rev2):
92 92 return revlog.revlog.revdiff(self, rev1, rev2)
93 93
94 94 return mdiff.textdiff(self.revision(self.node(rev1)),
95 95 self.revision(self.node(rev2)))
96 96
97 97 def revision(self, node):
98 98 """return an uncompressed revision of a given"""
99 99 if node == nullid: return ""
100 100
101 101 text = None
102 102 chain = []
103 103 iter_node = node
104 104 rev = self.rev(iter_node)
105 105 # reconstruct the revision if it is from a changegroup
106 106 while self.bundle(rev):
107 107 if self._cache and self._cache[0] == iter_node:
108 108 text = self._cache[2]
109 109 break
110 110 chain.append(rev)
111 111 iter_node = self.bundlebase(rev)
112 112 rev = self.rev(iter_node)
113 113 if text is None:
114 114 text = revlog.revlog.revision(self, iter_node)
115 115
116 116 while chain:
117 117 delta = self.chunk(chain.pop())
118 118 text = mdiff.patches(text, [delta])
119 119
120 120 p1, p2 = self.parents(node)
121 121 if node != revlog.hash(text, p1, p2):
122 122 raise revlog.RevlogError(_("integrity check failed on %s:%d")
123 123 % (self.datafile, self.rev(node)))
124 124
125 125 self._cache = (node, self.rev(node), text)
126 126 return text
127 127
128 128 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
129 129 raise NotImplementedError
130 130 def addgroup(self, revs, linkmapper, transaction, unique=0):
131 131 raise NotImplementedError
132 132 def strip(self, rev, minlink):
133 133 raise NotImplementedError
134 134 def checksize(self):
135 135 raise NotImplementedError
136 136
137 137 class bundlechangelog(bundlerevlog, changelog.changelog):
138 138 def __init__(self, opener, bundlefile):
139 139 changelog.changelog.__init__(self, opener)
140 140 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
141 141
142 142 class bundlemanifest(bundlerevlog, manifest.manifest):
143 143 def __init__(self, opener, bundlefile, linkmapper):
144 144 manifest.manifest.__init__(self, opener)
145 145 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
146 146 linkmapper)
147 147
148 148 class bundlefilelog(bundlerevlog, filelog.filelog):
149 149 def __init__(self, opener, path, bundlefile, linkmapper):
150 150 filelog.filelog.__init__(self, opener, path)
151 151 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
152 152 linkmapper)
153 153
154 154 class bundlerepository(localrepo.localrepository):
155 155 def __init__(self, ui, path, bundlename):
156 156 localrepo.localrepository.__init__(self, ui, path)
157 157
158 158 if path:
159 159 self._url = 'bundle:' + path + '+' + bundlename
160 160 else:
161 161 self._url = 'bundle:' + bundlename
162 162
163 163 self.tempfile = None
164 164 self.bundlefile = open(bundlename, "rb")
165 165 header = self.bundlefile.read(6)
166 166 if not header.startswith("HG"):
167 167 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
168 168 elif not header.startswith("HG10"):
169 169 raise util.Abort(_("%s: unknown bundle version") % bundlename)
170 170 elif header == "HG10BZ":
171 171 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
172 172 suffix=".hg10un", dir=self.path)
173 173 self.tempfile = temp
174 174 fptemp = os.fdopen(fdtemp, 'wb')
175 175 def generator(f):
176 176 zd = bz2.BZ2Decompressor()
177 177 zd.decompress("BZ")
178 178 for chunk in f:
179 179 yield zd.decompress(chunk)
180 180 gen = generator(util.filechunkiter(self.bundlefile, 4096))
181 181
182 182 try:
183 183 fptemp.write("HG10UN")
184 184 for chunk in gen:
185 185 fptemp.write(chunk)
186 186 finally:
187 187 fptemp.close()
188 188 self.bundlefile.close()
189 189
190 190 self.bundlefile = open(self.tempfile, "rb")
191 191 # seek right after the header
192 192 self.bundlefile.seek(6)
193 193 elif header == "HG10UN":
194 194 # nothing to do
195 195 pass
196 196 else:
197 197 raise util.Abort(_("%s: unknown bundle compression type")
198 198 % bundlename)
199 199 # dict with the mapping 'filename' -> position in the bundle
200 200 self.bundlefilespos = {}
201 201
202 202 def __getattr__(self, name):
203 203 if name == 'changelog':
204 204 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
205 205 self.manstart = self.bundlefile.tell()
206 206 return self.changelog
207 207 if name == 'manifest':
208 208 self.bundlefile.seek(self.manstart)
209 209 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
210 210 self.changelog.rev)
211 211 self.filestart = self.bundlefile.tell()
212 212 return self.manifest
213 213 if name == 'manstart':
214 214 self.changelog
215 215 return self.manstart
216 216 if name == 'filestart':
217 217 self.manifest
218 218 return self.filestart
219 219 return localrepo.localrepository.__getattr__(self, name)
220 220
221 221 def url(self):
222 222 return self._url
223 223
224 def dev(self):
225 return -1
226
227 224 def file(self, f):
228 225 if not self.bundlefilespos:
229 226 self.bundlefile.seek(self.filestart)
230 227 while 1:
231 228 chunk = changegroup.getchunk(self.bundlefile)
232 229 if not chunk:
233 230 break
234 231 self.bundlefilespos[chunk] = self.bundlefile.tell()
235 232 for c in changegroup.chunkiter(self.bundlefile):
236 233 pass
237 234
238 235 if f[0] == '/':
239 236 f = f[1:]
240 237 if f in self.bundlefilespos:
241 238 self.bundlefile.seek(self.bundlefilespos[f])
242 239 return bundlefilelog(self.sopener, f, self.bundlefile,
243 240 self.changelog.rev)
244 241 else:
245 242 return filelog.filelog(self.sopener, f)
246 243
247 244 def close(self):
248 245 """Close assigned bundle file immediately."""
249 246 self.bundlefile.close()
250 247
251 248 def __del__(self):
252 249 bundlefile = getattr(self, 'bundlefile', None)
253 250 if bundlefile and not bundlefile.closed:
254 251 bundlefile.close()
255 252 tempfile = getattr(self, 'tempfile', None)
256 253 if tempfile is not None:
257 254 os.unlink(tempfile)
258 255
259 256 def instance(ui, path, create):
260 257 if create:
261 258 raise util.Abort(_('cannot create new bundle repository'))
262 259 parentpath = ui.config("bundle", "mainreporoot", "")
263 260 if parentpath:
264 261 # Try to make the full path relative so we get a nice, short URL.
265 262 # In particular, we don't want temp dir names in test outputs.
266 263 cwd = os.getcwd()
267 264 if parentpath == cwd:
268 265 parentpath = ''
269 266 else:
270 267 cwd = os.path.join(cwd,'')
271 268 if parentpath.startswith(cwd):
272 269 parentpath = parentpath[len(cwd):]
273 270 path = util.drop_scheme('file', path)
274 271 if path.startswith('bundle:'):
275 272 path = util.drop_scheme('bundle', path)
276 273 s = path.split("+", 1)
277 274 if len(s) == 1:
278 275 repopath, bundlename = parentpath, s[0]
279 276 else:
280 277 repopath, bundlename = s
281 278 else:
282 279 repopath, bundlename = parentpath, path
283 280 return bundlerepository(ui, repopath, bundlename)
@@ -1,2123 +1,2120
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.root = os.path.realpath(path)
22 22 self.path = os.path.join(self.root, ".hg")
23 23 self.origroot = path
24 24 self.opener = util.opener(self.path)
25 25 self.wopener = util.opener(self.root)
26 26
27 27 if not os.path.isdir(self.path):
28 28 if create:
29 29 if not os.path.exists(path):
30 30 os.mkdir(path)
31 31 os.mkdir(self.path)
32 32 requirements = ["revlogv1"]
33 33 if parentui.configbool('format', 'usestore', True):
34 34 os.mkdir(os.path.join(self.path, "store"))
35 35 requirements.append("store")
36 36 # create an invalid changelog
37 37 self.opener("00changelog.i", "a").write(
38 38 '\0\0\0\2' # represents revlogv2
39 39 ' dummy changelog to prevent using the old repo layout'
40 40 )
41 41 reqfile = self.opener("requires", "w")
42 42 for r in requirements:
43 43 reqfile.write("%s\n" % r)
44 44 reqfile.close()
45 45 else:
46 46 raise repo.RepoError(_("repository %s not found") % path)
47 47 elif create:
48 48 raise repo.RepoError(_("repository %s already exists") % path)
49 49 else:
50 50 # find requirements
51 51 try:
52 52 requirements = self.opener("requires").read().splitlines()
53 53 except IOError, inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 requirements = []
57 57 # check them
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61
62 62 # setup store
63 63 if "store" in requirements:
64 64 self.encodefn = util.encodefilename
65 65 self.decodefn = util.decodefilename
66 66 self.spath = os.path.join(self.path, "store")
67 67 else:
68 68 self.encodefn = lambda x: x
69 69 self.decodefn = lambda x: x
70 70 self.spath = self.path
71 71
72 72 try:
73 73 # files in .hg/ will be created using this mode
74 74 mode = os.stat(self.spath).st_mode
75 75 # avoid some useless chmods
76 76 if (0777 & ~util._umask) == (0777 & mode):
77 77 mode = None
78 78 except OSError:
79 79 mode = None
80 80
81 81 self._createmode = mode
82 82 self.opener.createmode = mode
83 83 sopener = util.opener(self.spath)
84 84 sopener.createmode = mode
85 85 self.sopener = util.encodedopener(sopener, self.encodefn)
86 86
87 87 self.ui = ui.ui(parentui=parentui)
88 88 try:
89 89 self.ui.readconfig(self.join("hgrc"), self.root)
90 90 extensions.loadall(self.ui)
91 91 except IOError:
92 92 pass
93 93
94 94 self.tagscache = None
95 95 self._tagstypecache = None
96 96 self.branchcache = None
97 97 self._ubranchcache = None # UTF-8 version of branchcache
98 98 self._branchcachetip = None
99 99 self.nodetagscache = None
100 100 self.filterpats = {}
101 101 self._datafilters = {}
102 102 self._transref = self._lockref = self._wlockref = None
103 103
104 104 def __getattr__(self, name):
105 105 if name == 'changelog':
106 106 self.changelog = changelog.changelog(self.sopener)
107 107 self.sopener.defversion = self.changelog.version
108 108 return self.changelog
109 109 if name == 'manifest':
110 110 self.changelog
111 111 self.manifest = manifest.manifest(self.sopener)
112 112 return self.manifest
113 113 if name == 'dirstate':
114 114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 115 return self.dirstate
116 116 else:
117 117 raise AttributeError, name
118 118
119 119 def url(self):
120 120 return 'file:' + self.root
121 121
122 122 def hook(self, name, throw=False, **args):
123 123 return hook.hook(self.ui, self, name, throw, **args)
124 124
125 125 tag_disallowed = ':\r\n'
126 126
127 127 def _tag(self, name, node, message, local, user, date, parent=None,
128 128 extra={}):
129 129 use_dirstate = parent is None
130 130
131 131 for c in self.tag_disallowed:
132 132 if c in name:
133 133 raise util.Abort(_('%r cannot be used in a tag name') % c)
134 134
135 135 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
136 136
137 137 def writetag(fp, name, munge, prevtags):
138 138 fp.seek(0, 2)
139 139 if prevtags and prevtags[-1] != '\n':
140 140 fp.write('\n')
141 141 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
142 142 fp.close()
143 143
144 144 prevtags = ''
145 145 if local:
146 146 try:
147 147 fp = self.opener('localtags', 'r+')
148 148 except IOError, err:
149 149 fp = self.opener('localtags', 'a')
150 150 else:
151 151 prevtags = fp.read()
152 152
153 153 # local tags are stored in the current charset
154 154 writetag(fp, name, None, prevtags)
155 155 self.hook('tag', node=hex(node), tag=name, local=local)
156 156 return
157 157
158 158 if use_dirstate:
159 159 try:
160 160 fp = self.wfile('.hgtags', 'rb+')
161 161 except IOError, err:
162 162 fp = self.wfile('.hgtags', 'ab')
163 163 else:
164 164 prevtags = fp.read()
165 165 else:
166 166 try:
167 167 prevtags = self.filectx('.hgtags', parent).data()
168 168 except revlog.LookupError:
169 169 pass
170 170 fp = self.wfile('.hgtags', 'wb')
171 171 if prevtags:
172 172 fp.write(prevtags)
173 173
174 174 # committed tags are stored in UTF-8
175 175 writetag(fp, name, util.fromlocal, prevtags)
176 176
177 177 if use_dirstate and '.hgtags' not in self.dirstate:
178 178 self.add(['.hgtags'])
179 179
180 180 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
181 181 extra=extra)
182 182
183 183 self.hook('tag', node=hex(node), tag=name, local=local)
184 184
185 185 return tagnode
186 186
187 187 def tag(self, name, node, message, local, user, date):
188 188 '''tag a revision with a symbolic name.
189 189
190 190 if local is True, the tag is stored in a per-repository file.
191 191 otherwise, it is stored in the .hgtags file, and a new
192 192 changeset is committed with the change.
193 193
194 194 keyword arguments:
195 195
196 196 local: whether to store tag in non-version-controlled file
197 197 (default False)
198 198
199 199 message: commit message to use if committing
200 200
201 201 user: name of user to use if committing
202 202
203 203 date: date tuple to use if committing'''
204 204
205 205 for x in self.status()[:5]:
206 206 if '.hgtags' in x:
207 207 raise util.Abort(_('working copy of .hgtags is changed '
208 208 '(please commit .hgtags manually)'))
209 209
210 210 self._tag(name, node, message, local, user, date)
211 211
212 212 def tags(self):
213 213 '''return a mapping of tag to node'''
214 214 if self.tagscache:
215 215 return self.tagscache
216 216
217 217 globaltags = {}
218 218 tagtypes = {}
219 219
220 220 def readtags(lines, fn, tagtype):
221 221 filetags = {}
222 222 count = 0
223 223
224 224 def warn(msg):
225 225 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
226 226
227 227 for l in lines:
228 228 count += 1
229 229 if not l:
230 230 continue
231 231 s = l.split(" ", 1)
232 232 if len(s) != 2:
233 233 warn(_("cannot parse entry"))
234 234 continue
235 235 node, key = s
236 236 key = util.tolocal(key.strip()) # stored in UTF-8
237 237 try:
238 238 bin_n = bin(node)
239 239 except TypeError:
240 240 warn(_("node '%s' is not well formed") % node)
241 241 continue
242 242 if bin_n not in self.changelog.nodemap:
243 243 warn(_("tag '%s' refers to unknown node") % key)
244 244 continue
245 245
246 246 h = []
247 247 if key in filetags:
248 248 n, h = filetags[key]
249 249 h.append(n)
250 250 filetags[key] = (bin_n, h)
251 251
252 252 for k, nh in filetags.items():
253 253 if k not in globaltags:
254 254 globaltags[k] = nh
255 255 tagtypes[k] = tagtype
256 256 continue
257 257
258 258 # we prefer the global tag if:
259 259 # it supercedes us OR
260 260 # mutual supercedes and it has a higher rank
261 261 # otherwise we win because we're tip-most
262 262 an, ah = nh
263 263 bn, bh = globaltags[k]
264 264 if (bn != an and an in bh and
265 265 (bn not in ah or len(bh) > len(ah))):
266 266 an = bn
267 267 ah.extend([n for n in bh if n not in ah])
268 268 globaltags[k] = an, ah
269 269 tagtypes[k] = tagtype
270 270
271 271 # read the tags file from each head, ending with the tip
272 272 f = None
273 273 for rev, node, fnode in self._hgtagsnodes():
274 274 f = (f and f.filectx(fnode) or
275 275 self.filectx('.hgtags', fileid=fnode))
276 276 readtags(f.data().splitlines(), f, "global")
277 277
278 278 try:
279 279 data = util.fromlocal(self.opener("localtags").read())
280 280 # localtags are stored in the local character set
281 281 # while the internal tag table is stored in UTF-8
282 282 readtags(data.splitlines(), "localtags", "local")
283 283 except IOError:
284 284 pass
285 285
286 286 self.tagscache = {}
287 287 self._tagstypecache = {}
288 288 for k,nh in globaltags.items():
289 289 n = nh[0]
290 290 if n != nullid:
291 291 self.tagscache[k] = n
292 292 self._tagstypecache[k] = tagtypes[k]
293 293 self.tagscache['tip'] = self.changelog.tip()
294 294
295 295 return self.tagscache
296 296
297 297 def tagtype(self, tagname):
298 298 '''
299 299 return the type of the given tag. result can be:
300 300
301 301 'local' : a local tag
302 302 'global' : a global tag
303 303 None : tag does not exist
304 304 '''
305 305
306 306 self.tags()
307 307
308 308 return self._tagstypecache.get(tagname)
309 309
310 310 def _hgtagsnodes(self):
311 311 heads = self.heads()
312 312 heads.reverse()
313 313 last = {}
314 314 ret = []
315 315 for node in heads:
316 316 c = self.changectx(node)
317 317 rev = c.rev()
318 318 try:
319 319 fnode = c.filenode('.hgtags')
320 320 except revlog.LookupError:
321 321 continue
322 322 ret.append((rev, node, fnode))
323 323 if fnode in last:
324 324 ret[last[fnode]] = None
325 325 last[fnode] = len(ret) - 1
326 326 return [item for item in ret if item]
327 327
328 328 def tagslist(self):
329 329 '''return a list of tags ordered by revision'''
330 330 l = []
331 331 for t, n in self.tags().items():
332 332 try:
333 333 r = self.changelog.rev(n)
334 334 except:
335 335 r = -2 # sort to the beginning of the list if unknown
336 336 l.append((r, t, n))
337 337 l.sort()
338 338 return [(t, n) for r, t, n in l]
339 339
340 340 def nodetags(self, node):
341 341 '''return the tags associated with a node'''
342 342 if not self.nodetagscache:
343 343 self.nodetagscache = {}
344 344 for t, n in self.tags().items():
345 345 self.nodetagscache.setdefault(n, []).append(t)
346 346 return self.nodetagscache.get(node, [])
347 347
348 348 def _branchtags(self, partial, lrev):
349 349 tiprev = self.changelog.count() - 1
350 350 if lrev != tiprev:
351 351 self._updatebranchcache(partial, lrev+1, tiprev+1)
352 352 self._writebranchcache(partial, self.changelog.tip(), tiprev)
353 353
354 354 return partial
355 355
356 356 def branchtags(self):
357 357 tip = self.changelog.tip()
358 358 if self.branchcache is not None and self._branchcachetip == tip:
359 359 return self.branchcache
360 360
361 361 oldtip = self._branchcachetip
362 362 self._branchcachetip = tip
363 363 if self.branchcache is None:
364 364 self.branchcache = {} # avoid recursion in changectx
365 365 else:
366 366 self.branchcache.clear() # keep using the same dict
367 367 if oldtip is None or oldtip not in self.changelog.nodemap:
368 368 partial, last, lrev = self._readbranchcache()
369 369 else:
370 370 lrev = self.changelog.rev(oldtip)
371 371 partial = self._ubranchcache
372 372
373 373 self._branchtags(partial, lrev)
374 374
375 375 # the branch cache is stored on disk as UTF-8, but in the local
376 376 # charset internally
377 377 for k, v in partial.items():
378 378 self.branchcache[util.tolocal(k)] = v
379 379 self._ubranchcache = partial
380 380 return self.branchcache
381 381
382 382 def _readbranchcache(self):
383 383 partial = {}
384 384 try:
385 385 f = self.opener("branch.cache")
386 386 lines = f.read().split('\n')
387 387 f.close()
388 388 except (IOError, OSError):
389 389 return {}, nullid, nullrev
390 390
391 391 try:
392 392 last, lrev = lines.pop(0).split(" ", 1)
393 393 last, lrev = bin(last), int(lrev)
394 394 if not (lrev < self.changelog.count() and
395 395 self.changelog.node(lrev) == last): # sanity check
396 396 # invalidate the cache
397 397 raise ValueError('invalidating branch cache (tip differs)')
398 398 for l in lines:
399 399 if not l: continue
400 400 node, label = l.split(" ", 1)
401 401 partial[label.strip()] = bin(node)
402 402 except (KeyboardInterrupt, util.SignalInterrupt):
403 403 raise
404 404 except Exception, inst:
405 405 if self.ui.debugflag:
406 406 self.ui.warn(str(inst), '\n')
407 407 partial, last, lrev = {}, nullid, nullrev
408 408 return partial, last, lrev
409 409
410 410 def _writebranchcache(self, branches, tip, tiprev):
411 411 try:
412 412 f = self.opener("branch.cache", "w", atomictemp=True)
413 413 f.write("%s %s\n" % (hex(tip), tiprev))
414 414 for label, node in branches.iteritems():
415 415 f.write("%s %s\n" % (hex(node), label))
416 416 f.rename()
417 417 except (IOError, OSError):
418 418 pass
419 419
420 420 def _updatebranchcache(self, partial, start, end):
421 421 for r in xrange(start, end):
422 422 c = self.changectx(r)
423 423 b = c.branch()
424 424 partial[b] = c.node()
425 425
426 426 def lookup(self, key):
427 427 if key == '.':
428 428 key, second = self.dirstate.parents()
429 429 if key == nullid:
430 430 raise repo.RepoError(_("no revision checked out"))
431 431 if second != nullid:
432 432 self.ui.warn(_("warning: working directory has two parents, "
433 433 "tag '.' uses the first\n"))
434 434 elif key == 'null':
435 435 return nullid
436 436 n = self.changelog._match(key)
437 437 if n:
438 438 return n
439 439 if key in self.tags():
440 440 return self.tags()[key]
441 441 if key in self.branchtags():
442 442 return self.branchtags()[key]
443 443 n = self.changelog._partialmatch(key)
444 444 if n:
445 445 return n
446 446 try:
447 447 if len(key) == 20:
448 448 key = hex(key)
449 449 except:
450 450 pass
451 451 raise repo.RepoError(_("unknown revision '%s'") % key)
452 452
453 def dev(self):
454 return os.lstat(self.path).st_dev
455
456 453 def local(self):
457 454 return True
458 455
459 456 def join(self, f):
460 457 return os.path.join(self.path, f)
461 458
462 459 def sjoin(self, f):
463 460 f = self.encodefn(f)
464 461 return os.path.join(self.spath, f)
465 462
466 463 def wjoin(self, f):
467 464 return os.path.join(self.root, f)
468 465
469 466 def file(self, f):
470 467 if f[0] == '/':
471 468 f = f[1:]
472 469 return filelog.filelog(self.sopener, f)
473 470
474 471 def changectx(self, changeid=None):
475 472 return context.changectx(self, changeid)
476 473
477 474 def workingctx(self):
478 475 return context.workingctx(self)
479 476
480 477 def parents(self, changeid=None):
481 478 '''
482 479 get list of changectxs for parents of changeid or working directory
483 480 '''
484 481 if changeid is None:
485 482 pl = self.dirstate.parents()
486 483 else:
487 484 n = self.changelog.lookup(changeid)
488 485 pl = self.changelog.parents(n)
489 486 if pl[1] == nullid:
490 487 return [self.changectx(pl[0])]
491 488 return [self.changectx(pl[0]), self.changectx(pl[1])]
492 489
493 490 def filectx(self, path, changeid=None, fileid=None):
494 491 """changeid can be a changeset revision, node, or tag.
495 492 fileid can be a file revision or node."""
496 493 return context.filectx(self, path, changeid, fileid)
497 494
498 495 def getcwd(self):
499 496 return self.dirstate.getcwd()
500 497
501 498 def pathto(self, f, cwd=None):
502 499 return self.dirstate.pathto(f, cwd)
503 500
504 501 def wfile(self, f, mode='r'):
505 502 return self.wopener(f, mode)
506 503
507 504 def _link(self, f):
508 505 return os.path.islink(self.wjoin(f))
509 506
510 507 def _filter(self, filter, filename, data):
511 508 if filter not in self.filterpats:
512 509 l = []
513 510 for pat, cmd in self.ui.configitems(filter):
514 511 mf = util.matcher(self.root, "", [pat], [], [])[1]
515 512 fn = None
516 513 params = cmd
517 514 for name, filterfn in self._datafilters.iteritems():
518 515 if cmd.startswith(name):
519 516 fn = filterfn
520 517 params = cmd[len(name):].lstrip()
521 518 break
522 519 if not fn:
523 520 fn = lambda s, c, **kwargs: util.filter(s, c)
524 521 # Wrap old filters not supporting keyword arguments
525 522 if not inspect.getargspec(fn)[2]:
526 523 oldfn = fn
527 524 fn = lambda s, c, **kwargs: oldfn(s, c)
528 525 l.append((mf, fn, params))
529 526 self.filterpats[filter] = l
530 527
531 528 for mf, fn, cmd in self.filterpats[filter]:
532 529 if mf(filename):
533 530 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
534 531 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
535 532 break
536 533
537 534 return data
538 535
539 536 def adddatafilter(self, name, filter):
540 537 self._datafilters[name] = filter
541 538
542 539 def wread(self, filename):
543 540 if self._link(filename):
544 541 data = os.readlink(self.wjoin(filename))
545 542 else:
546 543 data = self.wopener(filename, 'r').read()
547 544 return self._filter("encode", filename, data)
548 545
549 546 def wwrite(self, filename, data, flags):
550 547 data = self._filter("decode", filename, data)
551 548 try:
552 549 os.unlink(self.wjoin(filename))
553 550 except OSError:
554 551 pass
555 552 self.wopener(filename, 'w').write(data)
556 553 util.set_flags(self.wjoin(filename), flags)
557 554
558 555 def wwritedata(self, filename, data):
559 556 return self._filter("decode", filename, data)
560 557
561 558 def transaction(self):
562 559 if self._transref and self._transref():
563 560 return self._transref().nest()
564 561
565 562 # abort here if the journal already exists
566 563 if os.path.exists(self.sjoin("journal")):
567 564 raise repo.RepoError(_("journal already exists - run hg recover"))
568 565
569 566 # save dirstate for rollback
570 567 try:
571 568 ds = self.opener("dirstate").read()
572 569 except IOError:
573 570 ds = ""
574 571 self.opener("journal.dirstate", "w").write(ds)
575 572 self.opener("journal.branch", "w").write(self.dirstate.branch())
576 573
577 574 renames = [(self.sjoin("journal"), self.sjoin("undo")),
578 575 (self.join("journal.dirstate"), self.join("undo.dirstate")),
579 576 (self.join("journal.branch"), self.join("undo.branch"))]
580 577 tr = transaction.transaction(self.ui.warn, self.sopener,
581 578 self.sjoin("journal"),
582 579 aftertrans(renames),
583 580 self._createmode)
584 581 self._transref = weakref.ref(tr)
585 582 return tr
586 583
587 584 def recover(self):
588 585 l = self.lock()
589 586 try:
590 587 if os.path.exists(self.sjoin("journal")):
591 588 self.ui.status(_("rolling back interrupted transaction\n"))
592 589 transaction.rollback(self.sopener, self.sjoin("journal"))
593 590 self.invalidate()
594 591 return True
595 592 else:
596 593 self.ui.warn(_("no interrupted transaction available\n"))
597 594 return False
598 595 finally:
599 596 del l
600 597
601 598 def rollback(self):
602 599 wlock = lock = None
603 600 try:
604 601 wlock = self.wlock()
605 602 lock = self.lock()
606 603 if os.path.exists(self.sjoin("undo")):
607 604 self.ui.status(_("rolling back last transaction\n"))
608 605 transaction.rollback(self.sopener, self.sjoin("undo"))
609 606 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 607 try:
611 608 branch = self.opener("undo.branch").read()
612 609 self.dirstate.setbranch(branch)
613 610 except IOError:
614 611 self.ui.warn(_("Named branch could not be reset, "
615 612 "current branch still is: %s\n")
616 613 % util.tolocal(self.dirstate.branch()))
617 614 self.invalidate()
618 615 self.dirstate.invalidate()
619 616 else:
620 617 self.ui.warn(_("no rollback information available\n"))
621 618 finally:
622 619 del lock, wlock
623 620
624 621 def invalidate(self):
625 622 for a in "changelog manifest".split():
626 623 if hasattr(self, a):
627 624 self.__delattr__(a)
628 625 self.tagscache = None
629 626 self._tagstypecache = None
630 627 self.nodetagscache = None
631 628 self.branchcache = None
632 629 self._ubranchcache = None
633 630 self._branchcachetip = None
634 631
635 632 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
636 633 try:
637 634 l = lock.lock(lockname, 0, releasefn, desc=desc)
638 635 except lock.LockHeld, inst:
639 636 if not wait:
640 637 raise
641 638 self.ui.warn(_("waiting for lock on %s held by %r\n") %
642 639 (desc, inst.locker))
643 640 # default to 600 seconds timeout
644 641 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
645 642 releasefn, desc=desc)
646 643 if acquirefn:
647 644 acquirefn()
648 645 return l
649 646
650 647 def lock(self, wait=True):
651 648 if self._lockref and self._lockref():
652 649 return self._lockref()
653 650
654 651 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
655 652 _('repository %s') % self.origroot)
656 653 self._lockref = weakref.ref(l)
657 654 return l
658 655
659 656 def wlock(self, wait=True):
660 657 if self._wlockref and self._wlockref():
661 658 return self._wlockref()
662 659
663 660 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
664 661 self.dirstate.invalidate, _('working directory of %s') %
665 662 self.origroot)
666 663 self._wlockref = weakref.ref(l)
667 664 return l
668 665
669 666 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
670 667 """
671 668 commit an individual file as part of a larger transaction
672 669 """
673 670
674 671 t = self.wread(fn)
675 672 fl = self.file(fn)
676 673 fp1 = manifest1.get(fn, nullid)
677 674 fp2 = manifest2.get(fn, nullid)
678 675
679 676 meta = {}
680 677 cp = self.dirstate.copied(fn)
681 678 if cp:
682 679 # Mark the new revision of this file as a copy of another
683 680 # file. This copy data will effectively act as a parent
684 681 # of this new revision. If this is a merge, the first
685 682 # parent will be the nullid (meaning "look up the copy data")
686 683 # and the second one will be the other parent. For example:
687 684 #
688 685 # 0 --- 1 --- 3 rev1 changes file foo
689 686 # \ / rev2 renames foo to bar and changes it
690 687 # \- 2 -/ rev3 should have bar with all changes and
691 688 # should record that bar descends from
692 689 # bar in rev2 and foo in rev1
693 690 #
694 691 # this allows this merge to succeed:
695 692 #
696 693 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
697 694 # \ / merging rev3 and rev4 should use bar@rev2
698 695 # \- 2 --- 4 as the merge base
699 696 #
700 697 meta["copy"] = cp
701 698 if not manifest2: # not a branch merge
702 699 meta["copyrev"] = hex(manifest1.get(cp, nullid))
703 700 fp2 = nullid
704 701 elif fp2 != nullid: # copied on remote side
705 702 meta["copyrev"] = hex(manifest1.get(cp, nullid))
706 703 elif fp1 != nullid: # copied on local side, reversed
707 704 meta["copyrev"] = hex(manifest2.get(cp))
708 705 fp2 = fp1
709 706 elif cp in manifest2: # directory rename on local side
710 707 meta["copyrev"] = hex(manifest2[cp])
711 708 else: # directory rename on remote side
712 709 meta["copyrev"] = hex(manifest1.get(cp, nullid))
713 710 self.ui.debug(_(" %s: copy %s:%s\n") %
714 711 (fn, cp, meta["copyrev"]))
715 712 fp1 = nullid
716 713 elif fp2 != nullid:
717 714 # is one parent an ancestor of the other?
718 715 fpa = fl.ancestor(fp1, fp2)
719 716 if fpa == fp1:
720 717 fp1, fp2 = fp2, nullid
721 718 elif fpa == fp2:
722 719 fp2 = nullid
723 720
724 721 # is the file unmodified from the parent? report existing entry
725 722 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
726 723 return fp1
727 724
728 725 changelist.append(fn)
729 726 return fl.add(t, meta, tr, linkrev, fp1, fp2)
730 727
731 728 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
732 729 if p1 is None:
733 730 p1, p2 = self.dirstate.parents()
734 731 return self.commit(files=files, text=text, user=user, date=date,
735 732 p1=p1, p2=p2, extra=extra, empty_ok=True)
736 733
737 734 def commit(self, files=None, text="", user=None, date=None,
738 735 match=util.always, force=False, force_editor=False,
739 736 p1=None, p2=None, extra={}, empty_ok=False):
740 737 wlock = lock = tr = None
741 738 valid = 0 # don't save the dirstate if this isn't set
742 739 if files:
743 740 files = util.unique(files)
744 741 try:
745 742 commit = []
746 743 remove = []
747 744 changed = []
748 745 use_dirstate = (p1 is None) # not rawcommit
749 746 extra = extra.copy()
750 747
751 748 if use_dirstate:
752 749 if files:
753 750 for f in files:
754 751 s = self.dirstate[f]
755 752 if s in 'nma':
756 753 commit.append(f)
757 754 elif s == 'r':
758 755 remove.append(f)
759 756 else:
760 757 self.ui.warn(_("%s not tracked!\n") % f)
761 758 else:
762 759 changes = self.status(match=match)[:5]
763 760 modified, added, removed, deleted, unknown = changes
764 761 commit = modified + added
765 762 remove = removed
766 763 else:
767 764 commit = files
768 765
769 766 if use_dirstate:
770 767 p1, p2 = self.dirstate.parents()
771 768 update_dirstate = True
772 769 else:
773 770 p1, p2 = p1, p2 or nullid
774 771 update_dirstate = (self.dirstate.parents()[0] == p1)
775 772
776 773 c1 = self.changelog.read(p1)
777 774 c2 = self.changelog.read(p2)
778 775 m1 = self.manifest.read(c1[0]).copy()
779 776 m2 = self.manifest.read(c2[0])
780 777
781 778 if use_dirstate:
782 779 branchname = self.workingctx().branch()
783 780 try:
784 781 branchname = branchname.decode('UTF-8').encode('UTF-8')
785 782 except UnicodeDecodeError:
786 783 raise util.Abort(_('branch name not in UTF-8!'))
787 784 else:
788 785 branchname = ""
789 786
790 787 if use_dirstate:
791 788 oldname = c1[5].get("branch") # stored in UTF-8
792 789 if (not commit and not remove and not force and p2 == nullid
793 790 and branchname == oldname):
794 791 self.ui.status(_("nothing changed\n"))
795 792 return None
796 793
797 794 xp1 = hex(p1)
798 795 if p2 == nullid: xp2 = ''
799 796 else: xp2 = hex(p2)
800 797
801 798 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
802 799
803 800 wlock = self.wlock()
804 801 lock = self.lock()
805 802 tr = self.transaction()
806 803 trp = weakref.proxy(tr)
807 804
808 805 # check in files
809 806 new = {}
810 807 linkrev = self.changelog.count()
811 808 commit.sort()
812 809 is_exec = util.execfunc(self.root, m1.execf)
813 810 is_link = util.linkfunc(self.root, m1.linkf)
814 811 for f in commit:
815 812 self.ui.note(f + "\n")
816 813 try:
817 814 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
818 815 new_exec = is_exec(f)
819 816 new_link = is_link(f)
820 817 if ((not changed or changed[-1] != f) and
821 818 m2.get(f) != new[f]):
822 819 # mention the file in the changelog if some
823 820 # flag changed, even if there was no content
824 821 # change.
825 822 old_exec = m1.execf(f)
826 823 old_link = m1.linkf(f)
827 824 if old_exec != new_exec or old_link != new_link:
828 825 changed.append(f)
829 826 m1.set(f, new_exec, new_link)
830 827 if use_dirstate:
831 828 self.dirstate.normal(f)
832 829
833 830 except (OSError, IOError):
834 831 if use_dirstate:
835 832 self.ui.warn(_("trouble committing %s!\n") % f)
836 833 raise
837 834 else:
838 835 remove.append(f)
839 836
840 837 # update manifest
841 838 m1.update(new)
842 839 remove.sort()
843 840 removed = []
844 841
845 842 for f in remove:
846 843 if f in m1:
847 844 del m1[f]
848 845 removed.append(f)
849 846 elif f in m2:
850 847 removed.append(f)
851 848 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
852 849 (new, removed))
853 850
854 851 # add changeset
855 852 new = new.keys()
856 853 new.sort()
857 854
858 855 user = user or self.ui.username()
859 856 if (not empty_ok and not text) or force_editor:
860 857 edittext = []
861 858 if text:
862 859 edittext.append(text)
863 860 edittext.append("")
864 861 edittext.append(_("HG: Enter commit message."
865 862 " Lines beginning with 'HG:' are removed."))
866 863 edittext.append("HG: --")
867 864 edittext.append("HG: user: %s" % user)
868 865 if p2 != nullid:
869 866 edittext.append("HG: branch merge")
870 867 if branchname:
871 868 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
872 869 edittext.extend(["HG: changed %s" % f for f in changed])
873 870 edittext.extend(["HG: removed %s" % f for f in removed])
874 871 if not changed and not remove:
875 872 edittext.append("HG: no files changed")
876 873 edittext.append("")
877 874 # run editor in the repository root
878 875 olddir = os.getcwd()
879 876 os.chdir(self.root)
880 877 text = self.ui.edit("\n".join(edittext), user)
881 878 os.chdir(olddir)
882 879
883 880 if branchname:
884 881 extra["branch"] = branchname
885 882
886 883 lines = [line.rstrip() for line in text.rstrip().splitlines()]
887 884 while lines and not lines[0]:
888 885 del lines[0]
889 886 if not lines and use_dirstate:
890 887 raise util.Abort(_("empty commit message"))
891 888 text = '\n'.join(lines)
892 889
893 890 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
894 891 user, date, extra)
895 892 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
896 893 parent2=xp2)
897 894 tr.close()
898 895
899 896 if self.branchcache:
900 897 self.branchtags()
901 898
902 899 if use_dirstate or update_dirstate:
903 900 self.dirstate.setparents(n)
904 901 if use_dirstate:
905 902 for f in removed:
906 903 self.dirstate.forget(f)
907 904 valid = 1 # our dirstate updates are complete
908 905
909 906 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
910 907 return n
911 908 finally:
912 909 if not valid: # don't save our updated dirstate
913 910 self.dirstate.invalidate()
914 911 del tr, lock, wlock
915 912
916 913 def walk(self, node=None, files=[], match=util.always, badmatch=None):
917 914 '''
918 915 walk recursively through the directory tree or a given
919 916 changeset, finding all files matched by the match
920 917 function
921 918
922 919 results are yielded in a tuple (src, filename), where src
923 920 is one of:
924 921 'f' the file was found in the directory tree
925 922 'm' the file was only in the dirstate and not in the tree
926 923 'b' file was not found and matched badmatch
927 924 '''
928 925
929 926 if node:
930 927 fdict = dict.fromkeys(files)
931 928 # for dirstate.walk, files=['.'] means "walk the whole tree".
932 929 # follow that here, too
933 930 fdict.pop('.', None)
934 931 mdict = self.manifest.read(self.changelog.read(node)[0])
935 932 mfiles = mdict.keys()
936 933 mfiles.sort()
937 934 for fn in mfiles:
938 935 for ffn in fdict:
939 936 # match if the file is the exact name or a directory
940 937 if ffn == fn or fn.startswith("%s/" % ffn):
941 938 del fdict[ffn]
942 939 break
943 940 if match(fn):
944 941 yield 'm', fn
945 942 ffiles = fdict.keys()
946 943 ffiles.sort()
947 944 for fn in ffiles:
948 945 if badmatch and badmatch(fn):
949 946 if match(fn):
950 947 yield 'b', fn
951 948 else:
952 949 self.ui.warn(_('%s: No such file in rev %s\n')
953 950 % (self.pathto(fn), short(node)))
954 951 else:
955 952 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
956 953 yield src, fn
957 954
958 955 def status(self, node1=None, node2=None, files=[], match=util.always,
959 956 list_ignored=False, list_clean=False, list_unknown=True):
960 957 """return status of files between two nodes or node and working directory
961 958
962 959 If node1 is None, use the first dirstate parent instead.
963 960 If node2 is None, compare node1 with working directory.
964 961 """
965 962
966 963 def fcmp(fn, getnode):
967 964 t1 = self.wread(fn)
968 965 return self.file(fn).cmp(getnode(fn), t1)
969 966
970 967 def mfmatches(node):
971 968 change = self.changelog.read(node)
972 969 mf = self.manifest.read(change[0]).copy()
973 970 for fn in mf.keys():
974 971 if not match(fn):
975 972 del mf[fn]
976 973 return mf
977 974
978 975 modified, added, removed, deleted, unknown = [], [], [], [], []
979 976 ignored, clean = [], []
980 977
981 978 compareworking = False
982 979 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
983 980 compareworking = True
984 981
985 982 if not compareworking:
986 983 # read the manifest from node1 before the manifest from node2,
987 984 # so that we'll hit the manifest cache if we're going through
988 985 # all the revisions in parent->child order.
989 986 mf1 = mfmatches(node1)
990 987
991 988 # are we comparing the working directory?
992 989 if not node2:
993 990 (lookup, modified, added, removed, deleted, unknown,
994 991 ignored, clean) = self.dirstate.status(files, match,
995 992 list_ignored, list_clean,
996 993 list_unknown)
997 994
998 995 # are we comparing working dir against its parent?
999 996 if compareworking:
1000 997 if lookup:
1001 998 fixup = []
1002 999 # do a full compare of any files that might have changed
1003 1000 ctx = self.changectx()
1004 1001 mexec = lambda f: 'x' in ctx.fileflags(f)
1005 1002 mlink = lambda f: 'l' in ctx.fileflags(f)
1006 1003 is_exec = util.execfunc(self.root, mexec)
1007 1004 is_link = util.linkfunc(self.root, mlink)
1008 1005 def flags(f):
1009 1006 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1010 1007 for f in lookup:
1011 1008 if (f not in ctx or flags(f) != ctx.fileflags(f)
1012 1009 or ctx[f].cmp(self.wread(f))):
1013 1010 modified.append(f)
1014 1011 else:
1015 1012 fixup.append(f)
1016 1013 if list_clean:
1017 1014 clean.append(f)
1018 1015
1019 1016 # update dirstate for files that are actually clean
1020 1017 if fixup:
1021 1018 wlock = None
1022 1019 try:
1023 1020 try:
1024 1021 wlock = self.wlock(False)
1025 1022 except lock.LockException:
1026 1023 pass
1027 1024 if wlock:
1028 1025 for f in fixup:
1029 1026 self.dirstate.normal(f)
1030 1027 finally:
1031 1028 del wlock
1032 1029 else:
1033 1030 # we are comparing working dir against non-parent
1034 1031 # generate a pseudo-manifest for the working dir
1035 1032 # XXX: create it in dirstate.py ?
1036 1033 mf2 = mfmatches(self.dirstate.parents()[0])
1037 1034 is_exec = util.execfunc(self.root, mf2.execf)
1038 1035 is_link = util.linkfunc(self.root, mf2.linkf)
1039 1036 for f in lookup + modified + added:
1040 1037 mf2[f] = ""
1041 1038 mf2.set(f, is_exec(f), is_link(f))
1042 1039 for f in removed:
1043 1040 if f in mf2:
1044 1041 del mf2[f]
1045 1042
1046 1043 else:
1047 1044 # we are comparing two revisions
1048 1045 mf2 = mfmatches(node2)
1049 1046
1050 1047 if not compareworking:
1051 1048 # flush lists from dirstate before comparing manifests
1052 1049 modified, added, clean = [], [], []
1053 1050
1054 1051 # make sure to sort the files so we talk to the disk in a
1055 1052 # reasonable order
1056 1053 mf2keys = mf2.keys()
1057 1054 mf2keys.sort()
1058 1055 getnode = lambda fn: mf1.get(fn, nullid)
1059 1056 for fn in mf2keys:
1060 1057 if fn in mf1:
1061 1058 if (mf1.flags(fn) != mf2.flags(fn) or
1062 1059 (mf1[fn] != mf2[fn] and
1063 1060 (mf2[fn] != "" or fcmp(fn, getnode)))):
1064 1061 modified.append(fn)
1065 1062 elif list_clean:
1066 1063 clean.append(fn)
1067 1064 del mf1[fn]
1068 1065 else:
1069 1066 added.append(fn)
1070 1067
1071 1068 removed = mf1.keys()
1072 1069
1073 1070 # sort and return results:
1074 1071 for l in modified, added, removed, deleted, unknown, ignored, clean:
1075 1072 l.sort()
1076 1073 return (modified, added, removed, deleted, unknown, ignored, clean)
1077 1074
1078 1075 def add(self, list):
1079 1076 wlock = self.wlock()
1080 1077 try:
1081 1078 rejected = []
1082 1079 for f in list:
1083 1080 p = self.wjoin(f)
1084 1081 try:
1085 1082 st = os.lstat(p)
1086 1083 except:
1087 1084 self.ui.warn(_("%s does not exist!\n") % f)
1088 1085 rejected.append(f)
1089 1086 continue
1090 1087 if st.st_size > 10000000:
1091 1088 self.ui.warn(_("%s: files over 10MB may cause memory and"
1092 1089 " performance problems\n"
1093 1090 "(use 'hg revert %s' to unadd the file)\n")
1094 1091 % (f, f))
1095 1092 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1096 1093 self.ui.warn(_("%s not added: only files and symlinks "
1097 1094 "supported currently\n") % f)
1098 1095 rejected.append(p)
1099 1096 elif self.dirstate[f] in 'amn':
1100 1097 self.ui.warn(_("%s already tracked!\n") % f)
1101 1098 elif self.dirstate[f] == 'r':
1102 1099 self.dirstate.normallookup(f)
1103 1100 else:
1104 1101 self.dirstate.add(f)
1105 1102 return rejected
1106 1103 finally:
1107 1104 del wlock
1108 1105
1109 1106 def forget(self, list):
1110 1107 wlock = self.wlock()
1111 1108 try:
1112 1109 for f in list:
1113 1110 if self.dirstate[f] != 'a':
1114 1111 self.ui.warn(_("%s not added!\n") % f)
1115 1112 else:
1116 1113 self.dirstate.forget(f)
1117 1114 finally:
1118 1115 del wlock
1119 1116
1120 1117 def remove(self, list, unlink=False):
1121 1118 wlock = None
1122 1119 try:
1123 1120 if unlink:
1124 1121 for f in list:
1125 1122 try:
1126 1123 util.unlink(self.wjoin(f))
1127 1124 except OSError, inst:
1128 1125 if inst.errno != errno.ENOENT:
1129 1126 raise
1130 1127 wlock = self.wlock()
1131 1128 for f in list:
1132 1129 if unlink and os.path.exists(self.wjoin(f)):
1133 1130 self.ui.warn(_("%s still exists!\n") % f)
1134 1131 elif self.dirstate[f] == 'a':
1135 1132 self.dirstate.forget(f)
1136 1133 elif f not in self.dirstate:
1137 1134 self.ui.warn(_("%s not tracked!\n") % f)
1138 1135 else:
1139 1136 self.dirstate.remove(f)
1140 1137 finally:
1141 1138 del wlock
1142 1139
1143 1140 def undelete(self, list):
1144 1141 wlock = None
1145 1142 try:
1146 1143 manifests = [self.manifest.read(self.changelog.read(p)[0])
1147 1144 for p in self.dirstate.parents() if p != nullid]
1148 1145 wlock = self.wlock()
1149 1146 for f in list:
1150 1147 if self.dirstate[f] != 'r':
1151 1148 self.ui.warn("%s not removed!\n" % f)
1152 1149 else:
1153 1150 m = f in manifests[0] and manifests[0] or manifests[1]
1154 1151 t = self.file(f).read(m[f])
1155 1152 self.wwrite(f, t, m.flags(f))
1156 1153 self.dirstate.normal(f)
1157 1154 finally:
1158 1155 del wlock
1159 1156
1160 1157 def copy(self, source, dest):
1161 1158 wlock = None
1162 1159 try:
1163 1160 p = self.wjoin(dest)
1164 1161 if not (os.path.exists(p) or os.path.islink(p)):
1165 1162 self.ui.warn(_("%s does not exist!\n") % dest)
1166 1163 elif not (os.path.isfile(p) or os.path.islink(p)):
1167 1164 self.ui.warn(_("copy failed: %s is not a file or a "
1168 1165 "symbolic link\n") % dest)
1169 1166 else:
1170 1167 wlock = self.wlock()
1171 1168 if dest not in self.dirstate:
1172 1169 self.dirstate.add(dest)
1173 1170 self.dirstate.copy(source, dest)
1174 1171 finally:
1175 1172 del wlock
1176 1173
1177 1174 def heads(self, start=None):
1178 1175 heads = self.changelog.heads(start)
1179 1176 # sort the output in rev descending order
1180 1177 heads = [(-self.changelog.rev(h), h) for h in heads]
1181 1178 heads.sort()
1182 1179 return [n for (r, n) in heads]
1183 1180
1184 1181 def branchheads(self, branch, start=None):
1185 1182 branches = self.branchtags()
1186 1183 if branch not in branches:
1187 1184 return []
1188 1185 # The basic algorithm is this:
1189 1186 #
1190 1187 # Start from the branch tip since there are no later revisions that can
1191 1188 # possibly be in this branch, and the tip is a guaranteed head.
1192 1189 #
1193 1190 # Remember the tip's parents as the first ancestors, since these by
1194 1191 # definition are not heads.
1195 1192 #
1196 1193 # Step backwards from the brach tip through all the revisions. We are
1197 1194 # guaranteed by the rules of Mercurial that we will now be visiting the
1198 1195 # nodes in reverse topological order (children before parents).
1199 1196 #
1200 1197 # If a revision is one of the ancestors of a head then we can toss it
1201 1198 # out of the ancestors set (we've already found it and won't be
1202 1199 # visiting it again) and put its parents in the ancestors set.
1203 1200 #
1204 1201 # Otherwise, if a revision is in the branch it's another head, since it
1205 1202 # wasn't in the ancestor list of an existing head. So add it to the
1206 1203 # head list, and add its parents to the ancestor list.
1207 1204 #
1208 1205 # If it is not in the branch ignore it.
1209 1206 #
1210 1207 # Once we have a list of heads, use nodesbetween to filter out all the
1211 1208 # heads that cannot be reached from startrev. There may be a more
1212 1209 # efficient way to do this as part of the previous algorithm.
1213 1210
1214 1211 set = util.set
1215 1212 heads = [self.changelog.rev(branches[branch])]
1216 1213 # Don't care if ancestors contains nullrev or not.
1217 1214 ancestors = set(self.changelog.parentrevs(heads[0]))
1218 1215 for rev in xrange(heads[0] - 1, nullrev, -1):
1219 1216 if rev in ancestors:
1220 1217 ancestors.update(self.changelog.parentrevs(rev))
1221 1218 ancestors.remove(rev)
1222 1219 elif self.changectx(rev).branch() == branch:
1223 1220 heads.append(rev)
1224 1221 ancestors.update(self.changelog.parentrevs(rev))
1225 1222 heads = [self.changelog.node(rev) for rev in heads]
1226 1223 if start is not None:
1227 1224 heads = self.changelog.nodesbetween([start], heads)[2]
1228 1225 return heads
1229 1226
1230 1227 def branches(self, nodes):
1231 1228 if not nodes:
1232 1229 nodes = [self.changelog.tip()]
1233 1230 b = []
1234 1231 for n in nodes:
1235 1232 t = n
1236 1233 while 1:
1237 1234 p = self.changelog.parents(n)
1238 1235 if p[1] != nullid or p[0] == nullid:
1239 1236 b.append((t, n, p[0], p[1]))
1240 1237 break
1241 1238 n = p[0]
1242 1239 return b
1243 1240
1244 1241 def between(self, pairs):
1245 1242 r = []
1246 1243
1247 1244 for top, bottom in pairs:
1248 1245 n, l, i = top, [], 0
1249 1246 f = 1
1250 1247
1251 1248 while n != bottom:
1252 1249 p = self.changelog.parents(n)[0]
1253 1250 if i == f:
1254 1251 l.append(n)
1255 1252 f = f * 2
1256 1253 n = p
1257 1254 i += 1
1258 1255
1259 1256 r.append(l)
1260 1257
1261 1258 return r
1262 1259
1263 1260 def findincoming(self, remote, base=None, heads=None, force=False):
1264 1261 """Return list of roots of the subsets of missing nodes from remote
1265 1262
1266 1263 If base dict is specified, assume that these nodes and their parents
1267 1264 exist on the remote side and that no child of a node of base exists
1268 1265 in both remote and self.
1269 1266 Furthermore base will be updated to include the nodes that exists
1270 1267 in self and remote but no children exists in self and remote.
1271 1268 If a list of heads is specified, return only nodes which are heads
1272 1269 or ancestors of these heads.
1273 1270
1274 1271 All the ancestors of base are in self and in remote.
1275 1272 All the descendants of the list returned are missing in self.
1276 1273 (and so we know that the rest of the nodes are missing in remote, see
1277 1274 outgoing)
1278 1275 """
1279 1276 m = self.changelog.nodemap
1280 1277 search = []
1281 1278 fetch = {}
1282 1279 seen = {}
1283 1280 seenbranch = {}
1284 1281 if base == None:
1285 1282 base = {}
1286 1283
1287 1284 if not heads:
1288 1285 heads = remote.heads()
1289 1286
1290 1287 if self.changelog.tip() == nullid:
1291 1288 base[nullid] = 1
1292 1289 if heads != [nullid]:
1293 1290 return [nullid]
1294 1291 return []
1295 1292
1296 1293 # assume we're closer to the tip than the root
1297 1294 # and start by examining the heads
1298 1295 self.ui.status(_("searching for changes\n"))
1299 1296
1300 1297 unknown = []
1301 1298 for h in heads:
1302 1299 if h not in m:
1303 1300 unknown.append(h)
1304 1301 else:
1305 1302 base[h] = 1
1306 1303
1307 1304 if not unknown:
1308 1305 return []
1309 1306
1310 1307 req = dict.fromkeys(unknown)
1311 1308 reqcnt = 0
1312 1309
1313 1310 # search through remote branches
1314 1311 # a 'branch' here is a linear segment of history, with four parts:
1315 1312 # head, root, first parent, second parent
1316 1313 # (a branch always has two parents (or none) by definition)
1317 1314 unknown = remote.branches(unknown)
1318 1315 while unknown:
1319 1316 r = []
1320 1317 while unknown:
1321 1318 n = unknown.pop(0)
1322 1319 if n[0] in seen:
1323 1320 continue
1324 1321
1325 1322 self.ui.debug(_("examining %s:%s\n")
1326 1323 % (short(n[0]), short(n[1])))
1327 1324 if n[0] == nullid: # found the end of the branch
1328 1325 pass
1329 1326 elif n in seenbranch:
1330 1327 self.ui.debug(_("branch already found\n"))
1331 1328 continue
1332 1329 elif n[1] and n[1] in m: # do we know the base?
1333 1330 self.ui.debug(_("found incomplete branch %s:%s\n")
1334 1331 % (short(n[0]), short(n[1])))
1335 1332 search.append(n) # schedule branch range for scanning
1336 1333 seenbranch[n] = 1
1337 1334 else:
1338 1335 if n[1] not in seen and n[1] not in fetch:
1339 1336 if n[2] in m and n[3] in m:
1340 1337 self.ui.debug(_("found new changeset %s\n") %
1341 1338 short(n[1]))
1342 1339 fetch[n[1]] = 1 # earliest unknown
1343 1340 for p in n[2:4]:
1344 1341 if p in m:
1345 1342 base[p] = 1 # latest known
1346 1343
1347 1344 for p in n[2:4]:
1348 1345 if p not in req and p not in m:
1349 1346 r.append(p)
1350 1347 req[p] = 1
1351 1348 seen[n[0]] = 1
1352 1349
1353 1350 if r:
1354 1351 reqcnt += 1
1355 1352 self.ui.debug(_("request %d: %s\n") %
1356 1353 (reqcnt, " ".join(map(short, r))))
1357 1354 for p in xrange(0, len(r), 10):
1358 1355 for b in remote.branches(r[p:p+10]):
1359 1356 self.ui.debug(_("received %s:%s\n") %
1360 1357 (short(b[0]), short(b[1])))
1361 1358 unknown.append(b)
1362 1359
1363 1360 # do binary search on the branches we found
1364 1361 while search:
1365 1362 n = search.pop(0)
1366 1363 reqcnt += 1
1367 1364 l = remote.between([(n[0], n[1])])[0]
1368 1365 l.append(n[1])
1369 1366 p = n[0]
1370 1367 f = 1
1371 1368 for i in l:
1372 1369 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1373 1370 if i in m:
1374 1371 if f <= 2:
1375 1372 self.ui.debug(_("found new branch changeset %s\n") %
1376 1373 short(p))
1377 1374 fetch[p] = 1
1378 1375 base[i] = 1
1379 1376 else:
1380 1377 self.ui.debug(_("narrowed branch search to %s:%s\n")
1381 1378 % (short(p), short(i)))
1382 1379 search.append((p, i))
1383 1380 break
1384 1381 p, f = i, f * 2
1385 1382
1386 1383 # sanity check our fetch list
1387 1384 for f in fetch.keys():
1388 1385 if f in m:
1389 1386 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1390 1387
1391 1388 if base.keys() == [nullid]:
1392 1389 if force:
1393 1390 self.ui.warn(_("warning: repository is unrelated\n"))
1394 1391 else:
1395 1392 raise util.Abort(_("repository is unrelated"))
1396 1393
1397 1394 self.ui.debug(_("found new changesets starting at ") +
1398 1395 " ".join([short(f) for f in fetch]) + "\n")
1399 1396
1400 1397 self.ui.debug(_("%d total queries\n") % reqcnt)
1401 1398
1402 1399 return fetch.keys()
1403 1400
1404 1401 def findoutgoing(self, remote, base=None, heads=None, force=False):
1405 1402 """Return list of nodes that are roots of subsets not in remote
1406 1403
1407 1404 If base dict is specified, assume that these nodes and their parents
1408 1405 exist on the remote side.
1409 1406 If a list of heads is specified, return only nodes which are heads
1410 1407 or ancestors of these heads, and return a second element which
1411 1408 contains all remote heads which get new children.
1412 1409 """
1413 1410 if base == None:
1414 1411 base = {}
1415 1412 self.findincoming(remote, base, heads, force=force)
1416 1413
1417 1414 self.ui.debug(_("common changesets up to ")
1418 1415 + " ".join(map(short, base.keys())) + "\n")
1419 1416
1420 1417 remain = dict.fromkeys(self.changelog.nodemap)
1421 1418
1422 1419 # prune everything remote has from the tree
1423 1420 del remain[nullid]
1424 1421 remove = base.keys()
1425 1422 while remove:
1426 1423 n = remove.pop(0)
1427 1424 if n in remain:
1428 1425 del remain[n]
1429 1426 for p in self.changelog.parents(n):
1430 1427 remove.append(p)
1431 1428
1432 1429 # find every node whose parents have been pruned
1433 1430 subset = []
1434 1431 # find every remote head that will get new children
1435 1432 updated_heads = {}
1436 1433 for n in remain:
1437 1434 p1, p2 = self.changelog.parents(n)
1438 1435 if p1 not in remain and p2 not in remain:
1439 1436 subset.append(n)
1440 1437 if heads:
1441 1438 if p1 in heads:
1442 1439 updated_heads[p1] = True
1443 1440 if p2 in heads:
1444 1441 updated_heads[p2] = True
1445 1442
1446 1443 # this is the set of all roots we have to push
1447 1444 if heads:
1448 1445 return subset, updated_heads.keys()
1449 1446 else:
1450 1447 return subset
1451 1448
1452 1449 def pull(self, remote, heads=None, force=False):
1453 1450 lock = self.lock()
1454 1451 try:
1455 1452 fetch = self.findincoming(remote, heads=heads, force=force)
1456 1453 if fetch == [nullid]:
1457 1454 self.ui.status(_("requesting all changes\n"))
1458 1455
1459 1456 if not fetch:
1460 1457 self.ui.status(_("no changes found\n"))
1461 1458 return 0
1462 1459
1463 1460 if heads is None:
1464 1461 cg = remote.changegroup(fetch, 'pull')
1465 1462 else:
1466 1463 if 'changegroupsubset' not in remote.capabilities:
1467 1464 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1468 1465 cg = remote.changegroupsubset(fetch, heads, 'pull')
1469 1466 return self.addchangegroup(cg, 'pull', remote.url())
1470 1467 finally:
1471 1468 del lock
1472 1469
1473 1470 def push(self, remote, force=False, revs=None):
1474 1471 # there are two ways to push to remote repo:
1475 1472 #
1476 1473 # addchangegroup assumes local user can lock remote
1477 1474 # repo (local filesystem, old ssh servers).
1478 1475 #
1479 1476 # unbundle assumes local user cannot lock remote repo (new ssh
1480 1477 # servers, http servers).
1481 1478
1482 1479 if remote.capable('unbundle'):
1483 1480 return self.push_unbundle(remote, force, revs)
1484 1481 return self.push_addchangegroup(remote, force, revs)
1485 1482
1486 1483 def prepush(self, remote, force, revs):
1487 1484 base = {}
1488 1485 remote_heads = remote.heads()
1489 1486 inc = self.findincoming(remote, base, remote_heads, force=force)
1490 1487
1491 1488 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1492 1489 if revs is not None:
1493 1490 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1494 1491 else:
1495 1492 bases, heads = update, self.changelog.heads()
1496 1493
1497 1494 if not bases:
1498 1495 self.ui.status(_("no changes found\n"))
1499 1496 return None, 1
1500 1497 elif not force:
1501 1498 # check if we're creating new remote heads
1502 1499 # to be a remote head after push, node must be either
1503 1500 # - unknown locally
1504 1501 # - a local outgoing head descended from update
1505 1502 # - a remote head that's known locally and not
1506 1503 # ancestral to an outgoing head
1507 1504
1508 1505 warn = 0
1509 1506
1510 1507 if remote_heads == [nullid]:
1511 1508 warn = 0
1512 1509 elif not revs and len(heads) > len(remote_heads):
1513 1510 warn = 1
1514 1511 else:
1515 1512 newheads = list(heads)
1516 1513 for r in remote_heads:
1517 1514 if r in self.changelog.nodemap:
1518 1515 desc = self.changelog.heads(r, heads)
1519 1516 l = [h for h in heads if h in desc]
1520 1517 if not l:
1521 1518 newheads.append(r)
1522 1519 else:
1523 1520 newheads.append(r)
1524 1521 if len(newheads) > len(remote_heads):
1525 1522 warn = 1
1526 1523
1527 1524 if warn:
1528 1525 self.ui.warn(_("abort: push creates new remote heads!\n"))
1529 1526 self.ui.status(_("(did you forget to merge?"
1530 1527 " use push -f to force)\n"))
1531 1528 return None, 0
1532 1529 elif inc:
1533 1530 self.ui.warn(_("note: unsynced remote changes!\n"))
1534 1531
1535 1532
1536 1533 if revs is None:
1537 1534 cg = self.changegroup(update, 'push')
1538 1535 else:
1539 1536 cg = self.changegroupsubset(update, revs, 'push')
1540 1537 return cg, remote_heads
1541 1538
1542 1539 def push_addchangegroup(self, remote, force, revs):
1543 1540 lock = remote.lock()
1544 1541 try:
1545 1542 ret = self.prepush(remote, force, revs)
1546 1543 if ret[0] is not None:
1547 1544 cg, remote_heads = ret
1548 1545 return remote.addchangegroup(cg, 'push', self.url())
1549 1546 return ret[1]
1550 1547 finally:
1551 1548 del lock
1552 1549
1553 1550 def push_unbundle(self, remote, force, revs):
1554 1551 # local repo finds heads on server, finds out what revs it
1555 1552 # must push. once revs transferred, if server finds it has
1556 1553 # different heads (someone else won commit/push race), server
1557 1554 # aborts.
1558 1555
1559 1556 ret = self.prepush(remote, force, revs)
1560 1557 if ret[0] is not None:
1561 1558 cg, remote_heads = ret
1562 1559 if force: remote_heads = ['force']
1563 1560 return remote.unbundle(cg, remote_heads, 'push')
1564 1561 return ret[1]
1565 1562
1566 1563 def changegroupinfo(self, nodes, source):
1567 1564 if self.ui.verbose or source == 'bundle':
1568 1565 self.ui.status(_("%d changesets found\n") % len(nodes))
1569 1566 if self.ui.debugflag:
1570 1567 self.ui.debug(_("List of changesets:\n"))
1571 1568 for node in nodes:
1572 1569 self.ui.debug("%s\n" % hex(node))
1573 1570
1574 1571 def changegroupsubset(self, bases, heads, source, extranodes=None):
1575 1572 """This function generates a changegroup consisting of all the nodes
1576 1573 that are descendents of any of the bases, and ancestors of any of
1577 1574 the heads.
1578 1575
1579 1576 It is fairly complex as determining which filenodes and which
1580 1577 manifest nodes need to be included for the changeset to be complete
1581 1578 is non-trivial.
1582 1579
1583 1580 Another wrinkle is doing the reverse, figuring out which changeset in
1584 1581 the changegroup a particular filenode or manifestnode belongs to.
1585 1582
1586 1583 The caller can specify some nodes that must be included in the
1587 1584 changegroup using the extranodes argument. It should be a dict
1588 1585 where the keys are the filenames (or 1 for the manifest), and the
1589 1586 values are lists of (node, linknode) tuples, where node is a wanted
1590 1587 node and linknode is the changelog node that should be transmitted as
1591 1588 the linkrev.
1592 1589 """
1593 1590
1594 1591 self.hook('preoutgoing', throw=True, source=source)
1595 1592
1596 1593 # Set up some initial variables
1597 1594 # Make it easy to refer to self.changelog
1598 1595 cl = self.changelog
1599 1596 # msng is short for missing - compute the list of changesets in this
1600 1597 # changegroup.
1601 1598 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1602 1599 self.changegroupinfo(msng_cl_lst, source)
1603 1600 # Some bases may turn out to be superfluous, and some heads may be
1604 1601 # too. nodesbetween will return the minimal set of bases and heads
1605 1602 # necessary to re-create the changegroup.
1606 1603
1607 1604 # Known heads are the list of heads that it is assumed the recipient
1608 1605 # of this changegroup will know about.
1609 1606 knownheads = {}
1610 1607 # We assume that all parents of bases are known heads.
1611 1608 for n in bases:
1612 1609 for p in cl.parents(n):
1613 1610 if p != nullid:
1614 1611 knownheads[p] = 1
1615 1612 knownheads = knownheads.keys()
1616 1613 if knownheads:
1617 1614 # Now that we know what heads are known, we can compute which
1618 1615 # changesets are known. The recipient must know about all
1619 1616 # changesets required to reach the known heads from the null
1620 1617 # changeset.
1621 1618 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1622 1619 junk = None
1623 1620 # Transform the list into an ersatz set.
1624 1621 has_cl_set = dict.fromkeys(has_cl_set)
1625 1622 else:
1626 1623 # If there were no known heads, the recipient cannot be assumed to
1627 1624 # know about any changesets.
1628 1625 has_cl_set = {}
1629 1626
1630 1627 # Make it easy to refer to self.manifest
1631 1628 mnfst = self.manifest
1632 1629 # We don't know which manifests are missing yet
1633 1630 msng_mnfst_set = {}
1634 1631 # Nor do we know which filenodes are missing.
1635 1632 msng_filenode_set = {}
1636 1633
1637 1634 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1638 1635 junk = None
1639 1636
1640 1637 # A changeset always belongs to itself, so the changenode lookup
1641 1638 # function for a changenode is identity.
1642 1639 def identity(x):
1643 1640 return x
1644 1641
1645 1642 # A function generating function. Sets up an environment for the
1646 1643 # inner function.
1647 1644 def cmp_by_rev_func(revlog):
1648 1645 # Compare two nodes by their revision number in the environment's
1649 1646 # revision history. Since the revision number both represents the
1650 1647 # most efficient order to read the nodes in, and represents a
1651 1648 # topological sorting of the nodes, this function is often useful.
1652 1649 def cmp_by_rev(a, b):
1653 1650 return cmp(revlog.rev(a), revlog.rev(b))
1654 1651 return cmp_by_rev
1655 1652
1656 1653 # If we determine that a particular file or manifest node must be a
1657 1654 # node that the recipient of the changegroup will already have, we can
1658 1655 # also assume the recipient will have all the parents. This function
1659 1656 # prunes them from the set of missing nodes.
1660 1657 def prune_parents(revlog, hasset, msngset):
1661 1658 haslst = hasset.keys()
1662 1659 haslst.sort(cmp_by_rev_func(revlog))
1663 1660 for node in haslst:
1664 1661 parentlst = [p for p in revlog.parents(node) if p != nullid]
1665 1662 while parentlst:
1666 1663 n = parentlst.pop()
1667 1664 if n not in hasset:
1668 1665 hasset[n] = 1
1669 1666 p = [p for p in revlog.parents(n) if p != nullid]
1670 1667 parentlst.extend(p)
1671 1668 for n in hasset:
1672 1669 msngset.pop(n, None)
1673 1670
1674 1671 # This is a function generating function used to set up an environment
1675 1672 # for the inner function to execute in.
1676 1673 def manifest_and_file_collector(changedfileset):
1677 1674 # This is an information gathering function that gathers
1678 1675 # information from each changeset node that goes out as part of
1679 1676 # the changegroup. The information gathered is a list of which
1680 1677 # manifest nodes are potentially required (the recipient may
1681 1678 # already have them) and total list of all files which were
1682 1679 # changed in any changeset in the changegroup.
1683 1680 #
1684 1681 # We also remember the first changenode we saw any manifest
1685 1682 # referenced by so we can later determine which changenode 'owns'
1686 1683 # the manifest.
1687 1684 def collect_manifests_and_files(clnode):
1688 1685 c = cl.read(clnode)
1689 1686 for f in c[3]:
1690 1687 # This is to make sure we only have one instance of each
1691 1688 # filename string for each filename.
1692 1689 changedfileset.setdefault(f, f)
1693 1690 msng_mnfst_set.setdefault(c[0], clnode)
1694 1691 return collect_manifests_and_files
1695 1692
1696 1693 # Figure out which manifest nodes (of the ones we think might be part
1697 1694 # of the changegroup) the recipient must know about and remove them
1698 1695 # from the changegroup.
1699 1696 def prune_manifests():
1700 1697 has_mnfst_set = {}
1701 1698 for n in msng_mnfst_set:
1702 1699 # If a 'missing' manifest thinks it belongs to a changenode
1703 1700 # the recipient is assumed to have, obviously the recipient
1704 1701 # must have that manifest.
1705 1702 linknode = cl.node(mnfst.linkrev(n))
1706 1703 if linknode in has_cl_set:
1707 1704 has_mnfst_set[n] = 1
1708 1705 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1709 1706
1710 1707 # Use the information collected in collect_manifests_and_files to say
1711 1708 # which changenode any manifestnode belongs to.
1712 1709 def lookup_manifest_link(mnfstnode):
1713 1710 return msng_mnfst_set[mnfstnode]
1714 1711
1715 1712 # A function generating function that sets up the initial environment
1716 1713 # the inner function.
1717 1714 def filenode_collector(changedfiles):
1718 1715 next_rev = [0]
1719 1716 # This gathers information from each manifestnode included in the
1720 1717 # changegroup about which filenodes the manifest node references
1721 1718 # so we can include those in the changegroup too.
1722 1719 #
1723 1720 # It also remembers which changenode each filenode belongs to. It
1724 1721 # does this by assuming the a filenode belongs to the changenode
1725 1722 # the first manifest that references it belongs to.
1726 1723 def collect_msng_filenodes(mnfstnode):
1727 1724 r = mnfst.rev(mnfstnode)
1728 1725 if r == next_rev[0]:
1729 1726 # If the last rev we looked at was the one just previous,
1730 1727 # we only need to see a diff.
1731 1728 deltamf = mnfst.readdelta(mnfstnode)
1732 1729 # For each line in the delta
1733 1730 for f, fnode in deltamf.items():
1734 1731 f = changedfiles.get(f, None)
1735 1732 # And if the file is in the list of files we care
1736 1733 # about.
1737 1734 if f is not None:
1738 1735 # Get the changenode this manifest belongs to
1739 1736 clnode = msng_mnfst_set[mnfstnode]
1740 1737 # Create the set of filenodes for the file if
1741 1738 # there isn't one already.
1742 1739 ndset = msng_filenode_set.setdefault(f, {})
1743 1740 # And set the filenode's changelog node to the
1744 1741 # manifest's if it hasn't been set already.
1745 1742 ndset.setdefault(fnode, clnode)
1746 1743 else:
1747 1744 # Otherwise we need a full manifest.
1748 1745 m = mnfst.read(mnfstnode)
1749 1746 # For every file in we care about.
1750 1747 for f in changedfiles:
1751 1748 fnode = m.get(f, None)
1752 1749 # If it's in the manifest
1753 1750 if fnode is not None:
1754 1751 # See comments above.
1755 1752 clnode = msng_mnfst_set[mnfstnode]
1756 1753 ndset = msng_filenode_set.setdefault(f, {})
1757 1754 ndset.setdefault(fnode, clnode)
1758 1755 # Remember the revision we hope to see next.
1759 1756 next_rev[0] = r + 1
1760 1757 return collect_msng_filenodes
1761 1758
1762 1759 # We have a list of filenodes we think we need for a file, lets remove
1763 1760 # all those we now the recipient must have.
1764 1761 def prune_filenodes(f, filerevlog):
1765 1762 msngset = msng_filenode_set[f]
1766 1763 hasset = {}
1767 1764 # If a 'missing' filenode thinks it belongs to a changenode we
1768 1765 # assume the recipient must have, then the recipient must have
1769 1766 # that filenode.
1770 1767 for n in msngset:
1771 1768 clnode = cl.node(filerevlog.linkrev(n))
1772 1769 if clnode in has_cl_set:
1773 1770 hasset[n] = 1
1774 1771 prune_parents(filerevlog, hasset, msngset)
1775 1772
1776 1773 # A function generator function that sets up the a context for the
1777 1774 # inner function.
1778 1775 def lookup_filenode_link_func(fname):
1779 1776 msngset = msng_filenode_set[fname]
1780 1777 # Lookup the changenode the filenode belongs to.
1781 1778 def lookup_filenode_link(fnode):
1782 1779 return msngset[fnode]
1783 1780 return lookup_filenode_link
1784 1781
1785 1782 # Add the nodes that were explicitly requested.
1786 1783 def add_extra_nodes(name, nodes):
1787 1784 if not extranodes or name not in extranodes:
1788 1785 return
1789 1786
1790 1787 for node, linknode in extranodes[name]:
1791 1788 if node not in nodes:
1792 1789 nodes[node] = linknode
1793 1790
1794 1791 # Now that we have all theses utility functions to help out and
1795 1792 # logically divide up the task, generate the group.
1796 1793 def gengroup():
1797 1794 # The set of changed files starts empty.
1798 1795 changedfiles = {}
1799 1796 # Create a changenode group generator that will call our functions
1800 1797 # back to lookup the owning changenode and collect information.
1801 1798 group = cl.group(msng_cl_lst, identity,
1802 1799 manifest_and_file_collector(changedfiles))
1803 1800 for chnk in group:
1804 1801 yield chnk
1805 1802
1806 1803 # The list of manifests has been collected by the generator
1807 1804 # calling our functions back.
1808 1805 prune_manifests()
1809 1806 add_extra_nodes(1, msng_mnfst_set)
1810 1807 msng_mnfst_lst = msng_mnfst_set.keys()
1811 1808 # Sort the manifestnodes by revision number.
1812 1809 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1813 1810 # Create a generator for the manifestnodes that calls our lookup
1814 1811 # and data collection functions back.
1815 1812 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1816 1813 filenode_collector(changedfiles))
1817 1814 for chnk in group:
1818 1815 yield chnk
1819 1816
1820 1817 # These are no longer needed, dereference and toss the memory for
1821 1818 # them.
1822 1819 msng_mnfst_lst = None
1823 1820 msng_mnfst_set.clear()
1824 1821
1825 1822 if extranodes:
1826 1823 for fname in extranodes:
1827 1824 if isinstance(fname, int):
1828 1825 continue
1829 1826 add_extra_nodes(fname,
1830 1827 msng_filenode_set.setdefault(fname, {}))
1831 1828 changedfiles[fname] = 1
1832 1829 changedfiles = changedfiles.keys()
1833 1830 changedfiles.sort()
1834 1831 # Go through all our files in order sorted by name.
1835 1832 for fname in changedfiles:
1836 1833 filerevlog = self.file(fname)
1837 1834 if filerevlog.count() == 0:
1838 1835 raise util.Abort(_("empty or missing revlog for %s") % fname)
1839 1836 # Toss out the filenodes that the recipient isn't really
1840 1837 # missing.
1841 1838 if fname in msng_filenode_set:
1842 1839 prune_filenodes(fname, filerevlog)
1843 1840 msng_filenode_lst = msng_filenode_set[fname].keys()
1844 1841 else:
1845 1842 msng_filenode_lst = []
1846 1843 # If any filenodes are left, generate the group for them,
1847 1844 # otherwise don't bother.
1848 1845 if len(msng_filenode_lst) > 0:
1849 1846 yield changegroup.chunkheader(len(fname))
1850 1847 yield fname
1851 1848 # Sort the filenodes by their revision #
1852 1849 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1853 1850 # Create a group generator and only pass in a changenode
1854 1851 # lookup function as we need to collect no information
1855 1852 # from filenodes.
1856 1853 group = filerevlog.group(msng_filenode_lst,
1857 1854 lookup_filenode_link_func(fname))
1858 1855 for chnk in group:
1859 1856 yield chnk
1860 1857 if fname in msng_filenode_set:
1861 1858 # Don't need this anymore, toss it to free memory.
1862 1859 del msng_filenode_set[fname]
1863 1860 # Signal that no more groups are left.
1864 1861 yield changegroup.closechunk()
1865 1862
1866 1863 if msng_cl_lst:
1867 1864 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1868 1865
1869 1866 return util.chunkbuffer(gengroup())
1870 1867
1871 1868 def changegroup(self, basenodes, source):
1872 1869 """Generate a changegroup of all nodes that we have that a recipient
1873 1870 doesn't.
1874 1871
1875 1872 This is much easier than the previous function as we can assume that
1876 1873 the recipient has any changenode we aren't sending them."""
1877 1874
1878 1875 self.hook('preoutgoing', throw=True, source=source)
1879 1876
1880 1877 cl = self.changelog
1881 1878 nodes = cl.nodesbetween(basenodes, None)[0]
1882 1879 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1883 1880 self.changegroupinfo(nodes, source)
1884 1881
1885 1882 def identity(x):
1886 1883 return x
1887 1884
1888 1885 def gennodelst(revlog):
1889 1886 for r in xrange(0, revlog.count()):
1890 1887 n = revlog.node(r)
1891 1888 if revlog.linkrev(n) in revset:
1892 1889 yield n
1893 1890
1894 1891 def changed_file_collector(changedfileset):
1895 1892 def collect_changed_files(clnode):
1896 1893 c = cl.read(clnode)
1897 1894 for fname in c[3]:
1898 1895 changedfileset[fname] = 1
1899 1896 return collect_changed_files
1900 1897
1901 1898 def lookuprevlink_func(revlog):
1902 1899 def lookuprevlink(n):
1903 1900 return cl.node(revlog.linkrev(n))
1904 1901 return lookuprevlink
1905 1902
1906 1903 def gengroup():
1907 1904 # construct a list of all changed files
1908 1905 changedfiles = {}
1909 1906
1910 1907 for chnk in cl.group(nodes, identity,
1911 1908 changed_file_collector(changedfiles)):
1912 1909 yield chnk
1913 1910 changedfiles = changedfiles.keys()
1914 1911 changedfiles.sort()
1915 1912
1916 1913 mnfst = self.manifest
1917 1914 nodeiter = gennodelst(mnfst)
1918 1915 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1919 1916 yield chnk
1920 1917
1921 1918 for fname in changedfiles:
1922 1919 filerevlog = self.file(fname)
1923 1920 if filerevlog.count() == 0:
1924 1921 raise util.Abort(_("empty or missing revlog for %s") % fname)
1925 1922 nodeiter = gennodelst(filerevlog)
1926 1923 nodeiter = list(nodeiter)
1927 1924 if nodeiter:
1928 1925 yield changegroup.chunkheader(len(fname))
1929 1926 yield fname
1930 1927 lookup = lookuprevlink_func(filerevlog)
1931 1928 for chnk in filerevlog.group(nodeiter, lookup):
1932 1929 yield chnk
1933 1930
1934 1931 yield changegroup.closechunk()
1935 1932
1936 1933 if nodes:
1937 1934 self.hook('outgoing', node=hex(nodes[0]), source=source)
1938 1935
1939 1936 return util.chunkbuffer(gengroup())
1940 1937
1941 1938 def addchangegroup(self, source, srctype, url, emptyok=False):
1942 1939 """add changegroup to repo.
1943 1940
1944 1941 return values:
1945 1942 - nothing changed or no source: 0
1946 1943 - more heads than before: 1+added heads (2..n)
1947 1944 - less heads than before: -1-removed heads (-2..-n)
1948 1945 - number of heads stays the same: 1
1949 1946 """
1950 1947 def csmap(x):
1951 1948 self.ui.debug(_("add changeset %s\n") % short(x))
1952 1949 return cl.count()
1953 1950
1954 1951 def revmap(x):
1955 1952 return cl.rev(x)
1956 1953
1957 1954 if not source:
1958 1955 return 0
1959 1956
1960 1957 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1961 1958
1962 1959 changesets = files = revisions = 0
1963 1960
1964 1961 # write changelog data to temp files so concurrent readers will not see
1965 1962 # inconsistent view
1966 1963 cl = self.changelog
1967 1964 cl.delayupdate()
1968 1965 oldheads = len(cl.heads())
1969 1966
1970 1967 tr = self.transaction()
1971 1968 try:
1972 1969 trp = weakref.proxy(tr)
1973 1970 # pull off the changeset group
1974 1971 self.ui.status(_("adding changesets\n"))
1975 1972 cor = cl.count() - 1
1976 1973 chunkiter = changegroup.chunkiter(source)
1977 1974 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1978 1975 raise util.Abort(_("received changelog group is empty"))
1979 1976 cnr = cl.count() - 1
1980 1977 changesets = cnr - cor
1981 1978
1982 1979 # pull off the manifest group
1983 1980 self.ui.status(_("adding manifests\n"))
1984 1981 chunkiter = changegroup.chunkiter(source)
1985 1982 # no need to check for empty manifest group here:
1986 1983 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1987 1984 # no new manifest will be created and the manifest group will
1988 1985 # be empty during the pull
1989 1986 self.manifest.addgroup(chunkiter, revmap, trp)
1990 1987
1991 1988 # process the files
1992 1989 self.ui.status(_("adding file changes\n"))
1993 1990 while 1:
1994 1991 f = changegroup.getchunk(source)
1995 1992 if not f:
1996 1993 break
1997 1994 self.ui.debug(_("adding %s revisions\n") % f)
1998 1995 fl = self.file(f)
1999 1996 o = fl.count()
2000 1997 chunkiter = changegroup.chunkiter(source)
2001 1998 if fl.addgroup(chunkiter, revmap, trp) is None:
2002 1999 raise util.Abort(_("received file revlog group is empty"))
2003 2000 revisions += fl.count() - o
2004 2001 files += 1
2005 2002
2006 2003 # make changelog see real files again
2007 2004 cl.finalize(trp)
2008 2005
2009 2006 newheads = len(self.changelog.heads())
2010 2007 heads = ""
2011 2008 if oldheads and newheads != oldheads:
2012 2009 heads = _(" (%+d heads)") % (newheads - oldheads)
2013 2010
2014 2011 self.ui.status(_("added %d changesets"
2015 2012 " with %d changes to %d files%s\n")
2016 2013 % (changesets, revisions, files, heads))
2017 2014
2018 2015 if changesets > 0:
2019 2016 self.hook('pretxnchangegroup', throw=True,
2020 2017 node=hex(self.changelog.node(cor+1)), source=srctype,
2021 2018 url=url)
2022 2019
2023 2020 tr.close()
2024 2021 finally:
2025 2022 del tr
2026 2023
2027 2024 if changesets > 0:
2028 2025 # forcefully update the on-disk branch cache
2029 2026 self.ui.debug(_("updating the branch cache\n"))
2030 2027 self.branchtags()
2031 2028 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2032 2029 source=srctype, url=url)
2033 2030
2034 2031 for i in xrange(cor + 1, cnr + 1):
2035 2032 self.hook("incoming", node=hex(self.changelog.node(i)),
2036 2033 source=srctype, url=url)
2037 2034
2038 2035 # never return 0 here:
2039 2036 if newheads < oldheads:
2040 2037 return newheads - oldheads - 1
2041 2038 else:
2042 2039 return newheads - oldheads + 1
2043 2040
2044 2041
2045 2042 def stream_in(self, remote):
2046 2043 fp = remote.stream_out()
2047 2044 l = fp.readline()
2048 2045 try:
2049 2046 resp = int(l)
2050 2047 except ValueError:
2051 2048 raise util.UnexpectedOutput(
2052 2049 _('Unexpected response from remote server:'), l)
2053 2050 if resp == 1:
2054 2051 raise util.Abort(_('operation forbidden by server'))
2055 2052 elif resp == 2:
2056 2053 raise util.Abort(_('locking the remote repository failed'))
2057 2054 elif resp != 0:
2058 2055 raise util.Abort(_('the server sent an unknown error code'))
2059 2056 self.ui.status(_('streaming all changes\n'))
2060 2057 l = fp.readline()
2061 2058 try:
2062 2059 total_files, total_bytes = map(int, l.split(' ', 1))
2063 2060 except ValueError, TypeError:
2064 2061 raise util.UnexpectedOutput(
2065 2062 _('Unexpected response from remote server:'), l)
2066 2063 self.ui.status(_('%d files to transfer, %s of data\n') %
2067 2064 (total_files, util.bytecount(total_bytes)))
2068 2065 start = time.time()
2069 2066 for i in xrange(total_files):
2070 2067 # XXX doesn't support '\n' or '\r' in filenames
2071 2068 l = fp.readline()
2072 2069 try:
2073 2070 name, size = l.split('\0', 1)
2074 2071 size = int(size)
2075 2072 except ValueError, TypeError:
2076 2073 raise util.UnexpectedOutput(
2077 2074 _('Unexpected response from remote server:'), l)
2078 2075 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2079 2076 ofp = self.sopener(name, 'w')
2080 2077 for chunk in util.filechunkiter(fp, limit=size):
2081 2078 ofp.write(chunk)
2082 2079 ofp.close()
2083 2080 elapsed = time.time() - start
2084 2081 if elapsed <= 0:
2085 2082 elapsed = 0.001
2086 2083 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2087 2084 (util.bytecount(total_bytes), elapsed,
2088 2085 util.bytecount(total_bytes / elapsed)))
2089 2086 self.invalidate()
2090 2087 return len(self.heads()) + 1
2091 2088
2092 2089 def clone(self, remote, heads=[], stream=False):
2093 2090 '''clone remote repository.
2094 2091
2095 2092 keyword arguments:
2096 2093 heads: list of revs to clone (forces use of pull)
2097 2094 stream: use streaming clone if possible'''
2098 2095
2099 2096 # now, all clients that can request uncompressed clones can
2100 2097 # read repo formats supported by all servers that can serve
2101 2098 # them.
2102 2099
2103 2100 # if revlog format changes, client will have to check version
2104 2101 # and format flags on "stream" capability, and use
2105 2102 # uncompressed only if compatible.
2106 2103
2107 2104 if stream and not heads and remote.capable('stream'):
2108 2105 return self.stream_in(remote)
2109 2106 return self.pull(remote, heads)
2110 2107
2111 2108 # used to avoid circular references so destructors work
2112 2109 def aftertrans(files):
2113 2110 renamefiles = [tuple(t) for t in files]
2114 2111 def a():
2115 2112 for src, dest in renamefiles:
2116 2113 util.rename(src, dest)
2117 2114 return a
2118 2115
2119 2116 def instance(ui, path, create):
2120 2117 return localrepository(ui, util.drop_scheme('file', path), create)
2121 2118
2122 2119 def islocal(path):
2123 2120 return True
@@ -1,25 +1,21
1 1 # remoterepo - remote repository proxy classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import repo
9 9
10 10 class remoterepository(repo.repository):
11 def dev(self):
12 return -1
13
14 def local(self):
15 return False
11 pass
16 12
17 13 class remotelock(object):
18 14 def __init__(self, repo):
19 15 self.repo = repo
20 16 def release(self):
21 17 self.repo.unlock()
22 18 self.repo = None
23 19 def __del__(self):
24 20 if self.repo:
25 21 self.release()
@@ -1,43 +1,42
1 1 # repo.py - repository base classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 from i18n import _
10 10
11 11 class RepoError(Exception):
12 12 pass
13 13
14 14 class NoCapability(RepoError):
15 15 pass
16 16
17 17 class repository(object):
18 18 def capable(self, name):
19 19 '''tell whether repo supports named capability.
20 20 return False if not supported.
21 21 if boolean capability, return True.
22 22 if string capability, return string.'''
23 23 if name in self.capabilities:
24 24 return True
25 25 name_eq = name + '='
26 26 for cap in self.capabilities:
27 27 if cap.startswith(name_eq):
28 28 return cap[len(name_eq):]
29 29 return False
30 30
31 31 def requirecap(self, name, purpose):
32 32 '''raise an exception if the given capability is not present'''
33 33 if not self.capable(name):
34 34 raise NoCapability(_('cannot %s; remote repository does not '
35 35 'support the %r capability') %
36 36 (purpose, name))
37 37
38 38 def local(self):
39 39 return False
40 40
41 41 def cancopy(self):
42 42 return self.local()
43
@@ -1,86 +1,83
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms
8 8 # of the GNU General Public License, incorporated herein by reference.
9 9
10 10 from i18n import _
11 11 import changelog, httprangereader
12 12 import repo, localrepo, manifest, util
13 13 import urllib, urllib2, errno
14 14
15 15 class rangereader(httprangereader.httprangereader):
16 16 def read(self, size=None):
17 17 try:
18 18 return httprangereader.httprangereader.read(self, size)
19 19 except urllib2.HTTPError, inst:
20 20 num = inst.code == 404 and errno.ENOENT or None
21 21 raise IOError(num, inst)
22 22 except urllib2.URLError, inst:
23 23 raise IOError(None, inst.reason[1])
24 24
25 25 def opener(base):
26 26 """return a function that opens files over http"""
27 27 p = base
28 28 def o(path, mode="r"):
29 29 f = "/".join((p, urllib.quote(path)))
30 30 return rangereader(f)
31 31 return o
32 32
33 33 class statichttprepository(localrepo.localrepository):
34 34 def __init__(self, ui, path):
35 35 self._url = path
36 36 self.ui = ui
37 37
38 38 self.path = path.rstrip('/') + "/.hg"
39 39 self.opener = opener(self.path)
40 40
41 41 # find requirements
42 42 try:
43 43 requirements = self.opener("requires").read().splitlines()
44 44 except IOError, inst:
45 45 if inst.errno == errno.ENOENT:
46 46 msg = _("'%s' does not appear to be an hg repository") % path
47 47 raise repo.RepoError(msg)
48 48 else:
49 49 requirements = []
50 50
51 51 # check them
52 52 for r in requirements:
53 53 if r not in self.supported:
54 54 raise repo.RepoError(_("requirement '%s' not supported") % r)
55 55
56 56 # setup store
57 57 if "store" in requirements:
58 58 self.encodefn = util.encodefilename
59 59 self.decodefn = util.decodefilename
60 60 self.spath = self.path + "/store"
61 61 else:
62 62 self.encodefn = lambda x: x
63 63 self.decodefn = lambda x: x
64 64 self.spath = self.path
65 65 self.sopener = util.encodedopener(opener(self.spath), self.encodefn)
66 66
67 67 self.manifest = manifest.manifest(self.sopener)
68 68 self.changelog = changelog.changelog(self.sopener)
69 69 self.tagscache = None
70 70 self.nodetagscache = None
71 71 self.encodepats = None
72 72 self.decodepats = None
73 73
74 74 def url(self):
75 75 return 'static-' + self._url
76 76
77 def dev(self):
78 return -1
79
80 77 def local(self):
81 78 return False
82 79
83 80 def instance(ui, path, create):
84 81 if create:
85 82 raise util.Abort(_('cannot create new static-http repository'))
86 83 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now