Coverage for oc_meta / run / meta / check_results.py: 27%
373 statements
« prev ^ index » next coverage.py v7.13.4, created at 2026-03-03 17:25 +0000
« prev ^ index » next coverage.py v7.13.4, created at 2026-03-03 17:25 +0000
1import argparse
2import csv
3import os
4import re
5import time
6import zipfile
7from datetime import datetime
8from typing import Callable, Dict, List, Set, TypeVar
10import yaml
11from rich.progress import (BarColumn, Progress, TaskProgressColumn, TextColumn,
12 TimeRemainingColumn)
13from rich_argparse import RichHelpFormatter
14from sparqlite import SPARQLClient
15from sparqlite.exceptions import EndpointError
17from oc_meta.lib.cleaner import Cleaner
18from oc_meta.lib.master_of_regex import name_and_ids, semicolon_in_people_field
20T = TypeVar('T')
22BATCH_SIZE = 10
23MAX_RETRIES = 10
24RETRY_BACKOFF = 2
27def retry_on_error(func: Callable[[], T]) -> T:
28 """Retry a function on EndpointError (including 4xx errors from Virtuoso)."""
29 for attempt in range(MAX_RETRIES):
30 try:
31 return func()
32 except EndpointError:
33 if attempt == MAX_RETRIES - 1:
34 raise
35 wait_time = RETRY_BACKOFF ** attempt
36 time.sleep(wait_time)
37 raise RuntimeError("Unreachable")
40def parse_identifiers(id_string: str) -> List[Dict[str, str]]:
41 """
42 Parse space-separated identifiers in the format schema:value
43 Returns a list of dicts with 'schema' and 'value' keys.
44 Handles values that may contain colons by splitting only at the first colon.
45 """
46 if not id_string or id_string.isspace():
47 return []
49 identifiers = []
50 for identifier in id_string.strip().split():
51 parts = identifier.split(':', 1)
52 if len(parts) == 2:
53 value = Cleaner(parts[1]).normalize_hyphens()
54 identifiers.append({
55 'schema': parts[0].lower(),
56 'value': value
57 })
58 return identifiers
60def check_provenance_existence(omids: List[str], prov_endpoint_url: str) -> Dict[str, bool]:
61 """
62 Query provenance SPARQL endpoint to check if provenance exists for the given OMIDs.
63 Uses ASK queries on the snapshot URI for better performance.
64 Returns dict mapping OMID to boolean indicating if provenance exists.
65 """
66 if not omids:
67 return {}
69 prov_results = {omid: False for omid in omids}
71 with SPARQLClient(prov_endpoint_url, max_retries=10, backoff_factor=2, timeout=3600) as client:
72 for omid in omids:
73 snapshot_uri = f"{omid}/prov/se/1"
74 query = f"ASK {{ <{snapshot_uri}> <http://www.w3.org/ns/prov#specializationOf> ?o }}"
75 prov_results[omid] = retry_on_error(lambda q=query: client.ask(q))
77 return prov_results
79def check_omids_existence(identifiers: List[Dict[str, str]], endpoint_url: str) -> Dict[str, Set[str]]:
80 """
81 Query SPARQL endpoint to find OMIDs for a single identifier
82 Returns dict mapping identifier keys to sets of found OMIDs
83 """
84 if not identifiers:
85 return {}
87 found_omids = {}
89 with SPARQLClient(endpoint_url, max_retries=10, backoff_factor=2, timeout=3600) as client:
90 for identifier in identifiers:
91 id_key = f"{identifier['schema']}:{identifier['value']}"
93 query = f"""
94 PREFIX datacite: <http://purl.org/spar/datacite/>
95 PREFIX literal: <http://www.essepuntato.it/2010/06/literalreification/>
96 PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
98 SELECT DISTINCT ?omid
99 WHERE {{
100 {{
101 ?omid literal:hasLiteralValue "{identifier['value']}"^^xsd:string ;
102 datacite:usesIdentifierScheme datacite:{identifier['schema']} .
103 }}
104 UNION
105 {{
106 ?omid literal:hasLiteralValue "{identifier['value']}" ;
107 datacite:usesIdentifierScheme datacite:{identifier['schema']} .
108 }}
109 }}
110 """
112 results = retry_on_error(lambda q=query: client.query(q))
113 omids = set()
115 for result in results["results"]["bindings"]:
116 omid = result["omid"]["value"]
117 omids.add(omid)
119 found_omids[id_key] = omids
121 return found_omids
123def find_file(rdf_dir: str, dir_split_number: int, items_per_file: int, uri: str, zip_output_rdf: bool) -> str|None:
124 """Find the ZIP file containing the entity data"""
125 entity_regex: str = r'^(https:\/\/w3id\.org\/oc\/meta)\/([a-z][a-z])\/(0[1-9]+0)?([1-9][0-9]*)$'
126 entity_match = re.match(entity_regex, uri)
127 if entity_match:
128 cur_number = int(entity_match.group(4))
129 cur_file_split: int = 0
130 while True:
131 if cur_number > cur_file_split:
132 cur_file_split += items_per_file
133 else:
134 break
135 cur_split: int = 0
136 while True:
137 if cur_number > cur_split:
138 cur_split += dir_split_number
139 else:
140 break
141 short_name = entity_match.group(2)
142 sub_folder = entity_match.group(3)
143 cur_dir_path = os.path.join(rdf_dir, short_name, sub_folder, str(cur_split))
144 extension = '.zip' if zip_output_rdf else '.json'
145 cur_file_path = os.path.join(cur_dir_path, str(cur_file_split)) + extension
146 return cur_file_path
147 return None
149def find_prov_file(data_zip_path: str) -> str|None:
150 """Find the provenance ZIP file associated with the data file"""
151 try:
152 base_dir = os.path.dirname(data_zip_path)
153 file_name = os.path.splitext(os.path.basename(data_zip_path))[0]
154 prov_dir = os.path.join(base_dir, file_name, 'prov')
155 prov_file = os.path.join(prov_dir, 'se.zip')
156 return prov_file if os.path.exists(prov_file) else None
158 except Exception as e:
159 print(f"Error finding provenance file for {data_zip_path}: {str(e)}")
160 return None
162def process_csv_file(args: tuple, progress=None, task_id=None):
163 """
164 Process a single CSV file and check its identifiers
165 Returns statistics about processed rows and found/missing OMIDs
166 """
167 csv_file, endpoint_url, prov_endpoint_url, rdf_dir, dir_split_number, items_per_file, zip_output_rdf, generate_rdf_files = args
169 def update_phase(phase: str):
170 if progress and task_id is not None:
171 progress.update(task_id, detail=phase)
173 stats = {
174 'total_rows': 0,
175 'rows_with_ids': 0,
176 'total_identifiers': 0,
177 'identifiers_with_omids': 0,
178 'identifiers_without_omids': 0,
179 'omid_schema_identifiers': 0,
180 'data_graphs_found': 0,
181 'data_graphs_missing': 0,
182 'prov_graphs_found': 0,
183 'prov_graphs_missing': 0,
184 'omids_with_provenance': 0,
185 'omids_without_provenance': 0,
186 'identifiers_details': [],
187 'processed_omids': {}
188 }
190 identifier_cache = {} # chiave: "schema:value", valore: set di OMID
191 omid_results_cache = {} # chiave: omid, valore: (data_found, prov_found)
193 unique_identifiers = set()
194 row_identifiers = [] # Lista di tuple (row_num, col, identifier)
196 update_phase("Phase 1/5: Reading CSV")
197 with open(csv_file, 'r', encoding='utf-8') as f:
198 reader = csv.DictReader(f)
199 for row_num, row in enumerate(reader, 1):
200 stats['total_rows'] += 1
201 row_has_ids = False
203 id_columns = ['id', 'author', 'editor', 'publisher', 'venue']
205 for col in id_columns:
206 identifiers = []
208 if col == 'id':
209 if row[col]:
210 identifiers = parse_identifiers(row[col])
211 else:
212 if row[col]:
213 elements = re.split(semicolon_in_people_field, row[col])
214 for element in elements:
215 match = re.search(name_and_ids, element)
216 if match and match.group(2):
217 ids_str = match.group(2)
218 identifiers.extend(parse_identifiers(ids_str))
220 if identifiers:
221 row_has_ids = True
222 stats['total_identifiers'] += len(identifiers)
223 for identifier in identifiers:
224 id_key = f"{identifier['schema']}:{identifier['value']}"
225 if identifier['schema'].lower() != 'omid':
226 unique_identifiers.add(id_key)
227 else:
228 stats['omid_schema_identifiers'] += 1
229 row_identifiers.append((row_num, col, identifier))
231 if row_has_ids:
232 stats['rows_with_ids'] += 1
234 all_identifiers = []
235 for id_key in unique_identifiers:
236 parts = id_key.split(':', 1)
237 if len(parts) == 2:
238 all_identifiers.append({
239 'schema': parts[0].lower(),
240 'value': parts[1]
241 })
243 update_phase(f"Phase 2/5: Querying DB for {len(all_identifiers)} identifiers")
244 for i in range(0, len(all_identifiers), BATCH_SIZE):
245 batch = all_identifiers[i:i + BATCH_SIZE]
246 batch_results = check_omids_existence(batch, endpoint_url)
247 identifier_cache.update(batch_results)
249 # Terza fase: mappatura OMID -> file ZIP
250 update_phase("Phase 3/5: Mapping OMIDs to files")
251 omids_by_file = {} # chiave: zip_path, valore: set di OMID
252 all_omids = set()
254 for row_num, col, identifier in row_identifiers:
255 id_key = f"{identifier['schema']}:{identifier['value']}"
257 omids: set = set()
258 if identifier['schema'].lower() == 'omid':
259 pass
260 else:
261 omids = identifier_cache.get(id_key, set())
263 if omids:
264 stats['identifiers_with_omids'] += 1
265 all_omids.add(next(iter(omids))) # Only one OMID per identifier for provenance check
267 # Raggruppa OMID per file
268 for omid in omids:
269 if omid not in omid_results_cache: # Skip se già controllato
270 if generate_rdf_files:
271 zip_path = find_file(rdf_dir, dir_split_number, items_per_file, omid, zip_output_rdf)
272 if zip_path and os.path.exists(zip_path):
273 if zip_path not in omids_by_file:
274 omids_by_file[zip_path] = set()
275 omids_by_file[zip_path].add(omid)
276 else:
277 stats['identifiers_without_omids'] += 1
279 stats['identifiers_details'].append({
280 'schema': identifier['schema'],
281 'value': identifier['value'],
282 'column': col,
283 'has_omid': bool(omids),
284 'row_number': row_num,
285 'file': csv_file
286 })
288 # Quarta fase: controllo dei grafi per file
289 update_phase(f"Phase 4/5: Checking {len(omids_by_file)} RDF files")
290 for zip_path, omids in omids_by_file.items():
291 data_content = None
292 with zipfile.ZipFile(zip_path, 'r') as z:
293 json_files = [f for f in z.namelist() if f.endswith('.json')]
294 if json_files:
295 with z.open(json_files[0]) as f:
296 data_content = f.read().decode('utf-8')
298 prov_content = None
299 prov_path = find_prov_file(zip_path)
300 if prov_path and os.path.exists(prov_path):
301 with zipfile.ZipFile(prov_path, 'r') as z:
302 json_files = [f for f in z.namelist() if f.endswith('.json')]
303 if json_files:
304 with z.open(json_files[0]) as f:
305 prov_content = f.read().decode('utf-8')
307 for omid in omids:
308 if omid in omid_results_cache:
309 data_found, prov_found = omid_results_cache[omid]
310 else:
311 data_found = data_content is not None and omid in data_content
313 if prov_content is not None:
314 prov_found = omid in prov_content
315 else:
316 prov_found = False
318 omid_results_cache[omid] = (data_found, prov_found)
320 if data_found:
321 stats['data_graphs_found'] += 1
322 else:
323 stats['data_graphs_missing'] += 1
325 if prov_found:
326 stats['prov_graphs_found'] += 1
327 else:
328 stats['prov_graphs_missing'] += 1
330 # Trova il contesto originale dell'OMID
331 for row_num, col, identifier in row_identifiers:
332 id_key = f"{identifier['schema']}:{identifier['value']}"
333 if omid in identifier_cache.get(id_key, set()):
334 stats['processed_omids'][omid] = {
335 'row': row_num,
336 'column': col,
337 'identifier': id_key,
338 'file': csv_file,
339 'data_found': data_found,
340 'prov_found': prov_found
341 }
342 break
344 for row_num, col, identifier in row_identifiers:
345 if identifier['schema'].lower() == 'omid':
346 omid = identifier['value']
347 if omid.startswith('http'):
348 all_omids.add(omid)
350 update_phase(f"Phase 5/5: Checking provenance for {len(all_omids)} OMIDs")
351 prov_results = check_provenance_existence(list(all_omids), prov_endpoint_url)
353 for omid, has_prov in prov_results.items():
354 if has_prov:
355 stats['omids_with_provenance'] += 1
356 else:
357 stats['omids_without_provenance'] += 1
359 if omid in stats['processed_omids']:
360 stats['processed_omids'][omid]['triplestore_prov_found'] = has_prov
362 return stats
364def write_header(f):
365 """Write report header to file"""
366 f.write("=" * 80 + "\n")
367 f.write("CHECK RESULTS REPORT\n")
368 f.write(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
369 f.write("=" * 80 + "\n\n")
370 f.flush()
373def write_file_report(f, csv_file: str, stats: dict, generate_rdf_files: bool):
374 """Write report section for a single CSV file, including any problems found"""
375 filename = os.path.basename(csv_file)
376 f.write(f"--- File: {filename} ---\n")
378 # Basic stats
379 non_omid = stats['total_identifiers'] - stats['omid_schema_identifiers']
380 f.write(f"Rows: {stats['total_rows']}, With IDs: {stats['rows_with_ids']}, ")
381 f.write(f"Identifiers: {stats['total_identifiers']} (omid schema: {stats['omid_schema_identifiers']})\n")
383 if non_omid > 0:
384 f.write(f"With OMID: {stats['identifiers_with_omids']}, Without OMID: {stats['identifiers_without_omids']}\n")
386 if generate_rdf_files:
387 f.write(f"Data graphs - Found: {stats['data_graphs_found']}, Missing: {stats['data_graphs_missing']}\n")
388 f.write(f"Prov graphs - Found: {stats['prov_graphs_found']}, Missing: {stats['prov_graphs_missing']}\n")
390 f.write(f"Provenance in DB - With: {stats['omids_with_provenance']}, Without: {stats['omids_without_provenance']}\n")
392 # Problems for this file
393 problems_found = False
395 # Identifiers without OMID
396 missing_omids = [d for d in stats['identifiers_details']
397 if not d['has_omid'] and d['schema'].lower() != 'omid']
398 if missing_omids:
399 if not problems_found:
400 f.write("\nProblems in this file:\n")
401 problems_found = True
402 for detail in missing_omids:
403 f.write(f" - Identifier {detail['schema']}:{detail['value']} has no OMID ")
404 f.write(f"(row {detail['row_number']}, column {detail['column']})\n")
406 # OMIDs without provenance (only those that were actually checked)
407 omids_no_prov = [(omid, details) for omid, details in stats['processed_omids'].items()
408 if 'triplestore_prov_found' in details and not details['triplestore_prov_found']]
409 if omids_no_prov:
410 if not problems_found:
411 f.write("\nProblems in this file:\n")
412 problems_found = True
413 for omid, details in omids_no_prov:
414 f.write(f" - OMID {omid} has no provenance ")
415 f.write(f"(row {details['row']}, identifier {details['identifier']})\n")
417 f.write("\n")
418 f.flush()
421def write_aggregated_summary(
422 f,
423 total_rows: int,
424 total_rows_with_ids: int,
425 total_identifiers: int,
426 total_omid_schema: int,
427 total_with_omids: int,
428 total_without_omids: int,
429 total_data_graphs_found: int,
430 total_data_graphs_missing: int,
431 total_prov_graphs_found: int,
432 total_prov_graphs_missing: int,
433 total_omids_with_provenance: int,
434 total_omids_without_provenance: int,
435 problematic_identifiers: dict,
436 generate_rdf_files: bool
437):
438 """Write aggregated summary at the end of the report"""
439 f.write("=" * 80 + "\n")
440 f.write("AGGREGATED SUMMARY\n")
441 f.write("=" * 80 + "\n\n")
443 f.write(f"Total rows processed: {total_rows}\n")
444 f.write(f"Rows containing identifiers: {total_rows_with_ids}\n")
445 f.write(f"Total identifiers found: {total_identifiers}\n")
446 f.write(f"Identifiers with 'omid' schema (skipped checking): {total_omid_schema}\n")
448 non_omid_identifiers = total_identifiers - total_omid_schema
449 if non_omid_identifiers > 0:
450 f.write(f"Identifiers with associated OMIDs: {total_with_omids} ({(total_with_omids/non_omid_identifiers*100):.2f}%)\n")
451 f.write(f"Identifiers without OMIDs: {total_without_omids} ({(total_without_omids/non_omid_identifiers*100):.2f}%)\n")
452 else:
453 f.write("No non-omid identifiers found to check for OMID associations.\n")
455 if generate_rdf_files:
456 f.write(f"\nData Graphs - Found: {total_data_graphs_found}, Missing: {total_data_graphs_missing}\n")
457 f.write(f"Provenance Graphs - Found: {total_prov_graphs_found}, Missing: {total_prov_graphs_missing}\n")
458 else:
459 f.write("\nRDF file generation is disabled. File checks were skipped.\n")
461 total_omids_checked = total_omids_with_provenance + total_omids_without_provenance
462 if total_omids_checked > 0:
463 f.write("\nProvenance in Triplestore:\n")
464 f.write(f" OMIDs with provenance: {total_omids_with_provenance} ({(total_omids_with_provenance/total_omids_checked*100):.2f}%)\n")
465 f.write(f" OMIDs without provenance: {total_omids_without_provenance} ({(total_omids_without_provenance/total_omids_checked*100):.2f}%)\n")
466 else:
467 f.write("\nNo OMIDs found to check for provenance.\n")
469 # Cross-file problems: identifiers with multiple OMIDs
470 if problematic_identifiers:
471 f.write("\n" + "=" * 80 + "\n")
472 f.write("WARNING: Found identifiers with multiple OMIDs (cross-file issue):\n")
473 f.write("=" * 80 + "\n")
474 for id_key, details in problematic_identifiers.items():
475 f.write(f"\nIdentifier {id_key} is associated with {len(details['omids'])} different OMIDs:\n")
476 f.write(f" OMIDs: {', '.join(sorted(details['omids']))}\n")
477 f.write(" Occurrences:\n")
478 for occ in details['occurrences']:
479 f.write(f" - Row {occ['row']} in {occ['file']}, column {occ['column']}\n")
481 f.flush()
483def main():
484 parser = argparse.ArgumentParser(
485 description="Check MetaProcess results by verifying input CSV identifiers",
486 formatter_class=RichHelpFormatter,
487 )
488 parser.add_argument("meta_config", help="Path to meta_config.yaml file")
489 parser.add_argument("output", help="Output file path for results")
490 args = parser.parse_args()
492 with open(args.meta_config, 'r', encoding='utf-8') as f:
493 config = yaml.safe_load(f)
495 input_csv_dir = config['input_csv_dir']
496 base_output_dir = config['output_rdf_dir']
497 output_rdf_dir = os.path.join(base_output_dir, 'rdf')
498 endpoint_url = config['triplestore_url']
499 prov_endpoint_url = config['provenance_triplestore_url']
500 generate_rdf_files = config.get('generate_rdf_files', True)
502 if generate_rdf_files and not os.path.exists(output_rdf_dir):
503 print(f"RDF directory not found at {output_rdf_dir}")
504 return
506 csv_files = []
507 for root, _, files in os.walk(input_csv_dir):
508 csv_files.extend(
509 os.path.join(root, f) for f in files if f.endswith('.csv')
510 )
512 if not csv_files:
513 print(f"No CSV files found in {input_csv_dir}")
514 return
516 print(f"Found {len(csv_files)} CSV files to process")
518 # Prepare output file
519 output_dir = os.path.dirname(args.output) or '.'
520 os.makedirs(output_dir, exist_ok=True)
522 # Aggregation variables
523 total_rows = 0
524 total_rows_with_ids = 0
525 total_identifiers = 0
526 total_with_omids = 0
527 total_without_omids = 0
528 total_omid_schema = 0
529 total_data_graphs_found = 0
530 total_data_graphs_missing = 0
531 total_prov_graphs_found = 0
532 total_prov_graphs_missing = 0
533 total_omids_with_provenance = 0
534 total_omids_without_provenance = 0
535 id_key_to_omids = {}
536 all_results = []
538 with open(args.output, 'w', encoding='utf-8') as output_file:
539 write_header(output_file)
541 process_args = [(f, endpoint_url, prov_endpoint_url, output_rdf_dir, config['dir_split_number'], config['items_per_file'], config['zip_output_rdf'], generate_rdf_files) for f in csv_files]
543 with Progress(
544 TextColumn("[progress.description]{task.description}"),
545 BarColumn(),
546 TaskProgressColumn(),
547 TimeRemainingColumn(),
548 TextColumn("[cyan]{task.fields[current_file]}"),
549 TextColumn("[yellow]{task.fields[detail]}"),
550 ) as progress:
551 task = progress.add_task("Processing CSV files", total=len(csv_files), current_file="", detail="")
552 for idx, proc_args in enumerate(process_args):
553 current_file = os.path.basename(csv_files[idx])
554 progress.update(task, current_file=f"[{idx+1}/{len(csv_files)}] {current_file}")
556 result = process_csv_file(proc_args, progress=progress, task_id=task)
558 # Write file report immediately
559 write_file_report(output_file, csv_files[idx], result, generate_rdf_files)
561 # Accumulate for aggregation
562 total_rows += result['total_rows']
563 total_rows_with_ids += result['rows_with_ids']
564 total_identifiers += result['total_identifiers']
565 total_with_omids += result['identifiers_with_omids']
566 total_without_omids += result['identifiers_without_omids']
567 total_omid_schema += result['omid_schema_identifiers']
568 total_data_graphs_found += result['data_graphs_found']
569 total_data_graphs_missing += result['data_graphs_missing']
570 total_prov_graphs_found += result['prov_graphs_found']
571 total_prov_graphs_missing += result['prov_graphs_missing']
572 total_omids_with_provenance += result['omids_with_provenance']
573 total_omids_without_provenance += result['omids_without_provenance']
575 # Build cross-file lookup
576 for omid, omid_details in result['processed_omids'].items():
577 id_key = omid_details['identifier']
578 if id_key not in id_key_to_omids:
579 id_key_to_omids[id_key] = set()
580 id_key_to_omids[id_key].add(omid)
582 all_results.append(result)
583 progress.advance(task)
585 # Find cross-file problematic identifiers (same identifier with multiple OMIDs)
586 problematic_identifiers = {}
587 with Progress(
588 TextColumn("[progress.description]{task.description}"),
589 BarColumn(),
590 TaskProgressColumn(),
591 TimeRemainingColumn(),
592 TextColumn("[cyan]{task.fields[detail]}"),
593 ) as progress:
594 task = progress.add_task("Checking cross-file issues", total=len(all_results), detail="")
595 for result in all_results:
596 for detail in result['identifiers_details']:
597 id_key = f"{detail['schema']}:{detail['value']}"
598 omids = id_key_to_omids.get(id_key, set())
600 if len(omids) > 1:
601 if id_key not in problematic_identifiers:
602 problematic_identifiers[id_key] = {
603 'omids': omids,
604 'occurrences': []
605 }
606 problematic_identifiers[id_key]['occurrences'].append({
607 'file': detail['file'],
608 'row': detail['row_number'],
609 'column': detail['column']
610 })
611 progress.advance(task)
612 progress.update(task, detail=f"Found {len(problematic_identifiers)} identifiers with multiple OMIDs")
614 # Write aggregated summary
615 write_aggregated_summary(
616 output_file,
617 total_rows=total_rows,
618 total_rows_with_ids=total_rows_with_ids,
619 total_identifiers=total_identifiers,
620 total_omid_schema=total_omid_schema,
621 total_with_omids=total_with_omids,
622 total_without_omids=total_without_omids,
623 total_data_graphs_found=total_data_graphs_found,
624 total_data_graphs_missing=total_data_graphs_missing,
625 total_prov_graphs_found=total_prov_graphs_found,
626 total_prov_graphs_missing=total_prov_graphs_missing,
627 total_omids_with_provenance=total_omids_with_provenance,
628 total_omids_without_provenance=total_omids_without_provenance,
629 problematic_identifiers=problematic_identifiers,
630 generate_rdf_files=generate_rdf_files
631 )
633 print(f"Results written to: {args.output}")
635if __name__ == "__main__":
636 main()