Add Appraiser Review Letter template and XML merge data
This commit is contained in:
parent
63b1bfd758
commit
1e532029fa
|
|
@ -0,0 +1 @@
|
||||||
|
<EFBFBD>覆骒陡<EFBFBD>'<27>噏
|
||||||
Binary file not shown.
|
|
@ -112,20 +112,25 @@ public class CLMDocGenCallout {
|
||||||
private static String buildDataXml(Map<String, Object> payload) {
|
private static String buildDataXml(Map<String, Object> payload) {
|
||||||
String xml = '<TemplateFieldData>';
|
String xml = '<TemplateFieldData>';
|
||||||
|
|
||||||
|
// Emit flat fields first
|
||||||
for (String key : payload.keySet()) {
|
for (String key : payload.keySet()) {
|
||||||
if (key == 'DeficiencyList') continue;
|
if (key == 'DeficiencyList') continue;
|
||||||
xml += '<' + key + '>' + escapeXml(String.valueOf(payload.get(key))) + '</' + key + '>';
|
xml += '<' + key + '>' + escapeXml(String.valueOf(payload.get(key))) + '</' + key + '>';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Emit DeficiencyList as a nested list so templates can iterate dynamically
|
||||||
List<Object> deficiencies = (List<Object>) payload.get('DeficiencyList');
|
List<Object> deficiencies = (List<Object>) payload.get('DeficiencyList');
|
||||||
if (deficiencies != null) {
|
if (deficiencies != null && !deficiencies.isEmpty()) {
|
||||||
|
xml += '<DeficiencyList>';
|
||||||
for (Integer i = 0; i < deficiencies.size(); i++) {
|
for (Integer i = 0; i < deficiencies.size(); i++) {
|
||||||
Map<String, Object> d = (Map<String, Object>) deficiencies[i];
|
Map<String, Object> d = (Map<String, Object>) deficiencies[i];
|
||||||
String p = 'Deficiency_' + (i + 1) + '_';
|
xml += '<Deficiency>';
|
||||||
xml += '<' + p + 'Number>' + escapeXml(String.valueOf(d.get('deficiencyNumber'))) + '</' + p + 'Number>';
|
xml += '<Number>' + escapeXml(String.valueOf(d.get('deficiencyNumber'))) + '</Number>';
|
||||||
xml += '<' + p + 'Description>' + escapeXml(String.valueOf(d.get('description'))) + '</' + p + 'Description>';
|
xml += '<Description>' + escapeXml(String.valueOf(d.get('description'))) + '</Description>';
|
||||||
xml += '<' + p + 'Resolution>' + escapeXml(String.valueOf(d.get('resolution'))) + '</' + p + 'Resolution>';
|
xml += '<Resolution>' + escapeXml(String.valueOf(d.get('resolution'))) + '</Resolution>';
|
||||||
|
xml += '</Deficiency>';
|
||||||
}
|
}
|
||||||
|
xml += '</DeficiencyList>';
|
||||||
xml += '<DeficiencyCount>' + deficiencies.size() + '</DeficiencyCount>';
|
xml += '<DeficiencyCount>' + deficiencies.size() + '</DeficiencyCount>';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,19 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<NamedCredential xmlns="http://soap.sforce.com/2006/04/metadata">
|
||||||
|
<allowMergeFieldsInBody>false</allowMergeFieldsInBody>
|
||||||
|
<allowMergeFieldsInHeader>false</allowMergeFieldsInHeader>
|
||||||
|
<calloutStatus>Enabled</calloutStatus>
|
||||||
|
<generateAuthorizationHeader>true</generateAuthorizationHeader>
|
||||||
|
<label>CLMuatDownload</label>
|
||||||
|
<namedCredentialParameters>
|
||||||
|
<parameterName>Url</parameterName>
|
||||||
|
<parameterType>Url</parameterType>
|
||||||
|
<parameterValue>https://apidownloaduatna11.springcm.com</parameterValue>
|
||||||
|
</namedCredentialParameters>
|
||||||
|
<namedCredentialParameters>
|
||||||
|
<externalCredential>DocusignJWT</externalCredential>
|
||||||
|
<parameterName>ExternalCredential</parameterName>
|
||||||
|
<parameterType>Authentication</parameterType>
|
||||||
|
</namedCredentialParameters>
|
||||||
|
<namedCredentialType>SecuredEndpoint</namedCredentialType>
|
||||||
|
</NamedCredential>
|
||||||
|
|
@ -0,0 +1,19 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<NamedCredential xmlns="http://soap.sforce.com/2006/04/metadata">
|
||||||
|
<allowMergeFieldsInBody>false</allowMergeFieldsInBody>
|
||||||
|
<allowMergeFieldsInHeader>false</allowMergeFieldsInHeader>
|
||||||
|
<calloutStatus>Enabled</calloutStatus>
|
||||||
|
<generateAuthorizationHeader>true</generateAuthorizationHeader>
|
||||||
|
<label>CLMuatDownloadNamedCreds</label>
|
||||||
|
<namedCredentialParameters>
|
||||||
|
<parameterName>Url</parameterName>
|
||||||
|
<parameterType>Url</parameterType>
|
||||||
|
<parameterValue>https://apidownloaduatna11.springcm.com</parameterValue>
|
||||||
|
</namedCredentialParameters>
|
||||||
|
<namedCredentialParameters>
|
||||||
|
<externalCredential>DocusignJWT</externalCredential>
|
||||||
|
<parameterName>ExternalCredential</parameterName>
|
||||||
|
<parameterType>Authentication</parameterType>
|
||||||
|
</namedCredentialParameters>
|
||||||
|
<namedCredentialType>SecuredEndpoint</namedCredentialType>
|
||||||
|
</NamedCredential>
|
||||||
|
|
@ -0,0 +1,99 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
import json, difflib
|
||||||
|
|
||||||
|
postman_json_text = '''{
|
||||||
|
"TemplateDocument": {
|
||||||
|
"Href": "https://apiuatna11.springcm.com/v2/bccae332-c7db-4892-ab85-257df0f70fea/documents/a0cbc0e6-d87d-459e-8d63-66baa47878f3" },
|
||||||
|
"DataXML": "<TemplateFieldData><AppraiserCaseNumber>AC-00001</AppraiserCaseNumber><AppraiserFieldReviewDate>02/04/2026</AppraiserFieldReviewDate><PropertyAddress>123 Main St, Denver, CO 80202</PropertyAddress><DeficiencyList><Deficiency><Number>1</Number><Description>Missing comparable sale adjustment detail.</Description><Resolution>Added adjustment rationale and supporting calculations.</Resolution></Deficiency><Deficiency><Number>2</Number><Description>Neighborhood trend explanation insufficient.</Description><Resolution>Expanded market trend narrative with MLS evidence.</Resolution></Deficiency><Deficiency><Number>3</Number><Description>Photo date stamps were not included.</Description><Resolution>Re-uploaded photos with date metadata and captions.</Resolution></Deficiency></DeficiencyList><DeficiencyCount>3</DeficiencyCount></TemplateFieldData>",
|
||||||
|
"DestinationDocumentName": "Review_AC-00001.docx",
|
||||||
|
"DestinationFolder": {
|
||||||
|
"Href": "https://apiuatna11.springcm.com/v2/bccae332-c7db-4892-ab85-257df0f70fea/folders/12220442-b12e-f111-84fc-88e9a4bd0d9c"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Parse the Postman JSON
|
||||||
|
postman = json.loads(postman_json_text)
|
||||||
|
|
||||||
|
# Reconstruct the Apex payload (simulate AppraiserCasePayloadBuilder output)
|
||||||
|
payload = {
|
||||||
|
'AppraiserCaseNumber': 'AC-00001',
|
||||||
|
'AppraiserFieldReviewDate': '02/04/2026',
|
||||||
|
'PropertyAddress': '123 Main St, Denver, CO 80202',
|
||||||
|
'DeficiencyList': [
|
||||||
|
{'deficiencyNumber': '1', 'description': 'Missing comparable sale adjustment detail.', 'resolution': 'Added adjustment rationale and supporting calculations.'},
|
||||||
|
{'deficiencyNumber': '2', 'description': 'Neighborhood trend explanation insufficient.', 'resolution': 'Expanded market trend narrative with MLS evidence.'},
|
||||||
|
{'deficiencyNumber': '3', 'description': 'Photo date stamps were not included.', 'resolution': 'Re-uploaded photos with date metadata and captions.'}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to mimic buildDataXml from Apex implementation
|
||||||
|
def escape_xml(s):
|
||||||
|
if s is None:
|
||||||
|
return ''
|
||||||
|
return s.replace('&','&').replace('<','<').replace('>','>').replace('"','"').replace("'","'")
|
||||||
|
|
||||||
|
def build_data_xml(payload):
|
||||||
|
xml = '<TemplateFieldData>'
|
||||||
|
# Emit flat fields first (Apex iterates payload.keySet() which is not ordered in Python dict, but we'll follow this order)
|
||||||
|
# We'll follow the same ordering as in Postman: AppraiserCaseNumber, AppraiserFieldReviewDate, PropertyAddress
|
||||||
|
for key in ['AppraiserCaseNumber','AppraiserFieldReviewDate','PropertyAddress']:
|
||||||
|
v = payload.get(key)
|
||||||
|
xml += f'<{key}>' + escape_xml(str(v) if v is not None else '') + f'</{key}>'
|
||||||
|
|
||||||
|
# Emit DeficiencyList nested structure
|
||||||
|
deficiencies = payload.get('DeficiencyList')
|
||||||
|
if deficiencies:
|
||||||
|
xml += '<DeficiencyList>'
|
||||||
|
for d in deficiencies:
|
||||||
|
xml += '<Deficiency>'
|
||||||
|
xml += '<Number>' + escape_xml(str(d.get('deficiencyNumber'))) + '</Number>'
|
||||||
|
xml += '<Description>' + escape_xml(str(d.get('description'))) + '</Description>'
|
||||||
|
xml += '<Resolution>' + escape_xml(str(d.get('resolution'))) + '</Resolution>'
|
||||||
|
xml += '</Deficiency>'
|
||||||
|
xml += '</DeficiencyList>'
|
||||||
|
xml += '<DeficiencyCount>' + str(len(deficiencies)) + '</DeficiencyCount>'
|
||||||
|
xml += '</TemplateFieldData>'
|
||||||
|
return xml
|
||||||
|
|
||||||
|
apex_data_xml = build_data_xml(payload)
|
||||||
|
|
||||||
|
apex_request = {
|
||||||
|
'TemplateDocument': {'Href': postman['TemplateDocument']['Href']},
|
||||||
|
'DataXML': apex_data_xml,
|
||||||
|
'DestinationDocumentName': postman['DestinationDocumentName'],
|
||||||
|
'DestinationFolder': {'Href': postman['DestinationFolder']['Href']}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Compare postman vs apex
|
||||||
|
print('Comparing top-level fields:')
|
||||||
|
for key in ['TemplateDocument','DestinationDocumentName','DestinationFolder']:
|
||||||
|
p = postman.get(key)
|
||||||
|
a = apex_request.get(key)
|
||||||
|
equal = (p == a)
|
||||||
|
print(f' - {key}:', 'MATCH' if equal else 'DIFFER')
|
||||||
|
|
||||||
|
# Compare DataXML
|
||||||
|
postman_xml = postman['DataXML']
|
||||||
|
apex_xml = apex_request['DataXML']
|
||||||
|
|
||||||
|
if postman_xml == apex_xml:
|
||||||
|
print('\nDataXML: EXACT MATCH')
|
||||||
|
else:
|
||||||
|
print('\nDataXML: DIFFER')
|
||||||
|
# show diff
|
||||||
|
pd = postman_xml.split('><')
|
||||||
|
ad = apex_xml.split('><')
|
||||||
|
diff = difflib.unified_diff(pd, ad, fromfile='postman_xml', tofile='apex_xml', lineterm='')
|
||||||
|
print('\n'.join(diff))
|
||||||
|
# also show small head/tail
|
||||||
|
print('\n-- Postman head 200 chars --\n', postman_xml[:200])
|
||||||
|
print('\n-- Apex head 200 chars --\n', apex_xml[:200])
|
||||||
|
|
||||||
|
# Additionally print whether other top-level objects match (TemplateDocument Href and Folder Href)
|
||||||
|
print('\nTemplateDocument.Href match:', postman['TemplateDocument']['Href'] == apex_request['TemplateDocument']['Href'])
|
||||||
|
print('DestinationFolder.Href match:', postman['DestinationFolder']['Href'] == apex_request['DestinationFolder']['Href'])
|
||||||
|
|
||||||
|
# Print apex_data_xml for inspection
|
||||||
|
print('\n--- Apex DataXML ---\n')
|
||||||
|
print(apex_data_xml)
|
||||||
|
|
@ -0,0 +1,100 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
import os, re, base64, zipfile, xml.etree.ElementTree as ET, sys
|
||||||
|
log_path = '/home/paulh/.vscode-server/data/User/workspaceStorage/79b924110cb5ff6de49811d445e59969-1/GitHub.copilot-chat/chat-session-resources/90e6dae0-2184-412b-af0b-eac258be98c5/call_gY8uUyzuGvFiN46d4ZPVFtjz__vscode-1775271381281/content.txt'
|
||||||
|
out_dir = 'artifacts/doc_inspect'
|
||||||
|
os.makedirs(out_dir, exist_ok=True)
|
||||||
|
start=False
|
||||||
|
chunks = []
|
||||||
|
with open(log_path, 'r', errors='replace') as f:
|
||||||
|
for line in f:
|
||||||
|
if 'BASE64_BEGIN' in line:
|
||||||
|
start = True
|
||||||
|
continue
|
||||||
|
if 'BASE64_END' in line:
|
||||||
|
break
|
||||||
|
if start:
|
||||||
|
if 'BASE64_CHUNK:' in line:
|
||||||
|
parts = line.split('BASE64_CHUNK:',1)[1].strip()
|
||||||
|
chunks.append(parts)
|
||||||
|
else:
|
||||||
|
# fallback: if line looks like base64 (long and only base64 chars + =), take it
|
||||||
|
s = line.strip()
|
||||||
|
if len(s) > 100 and re.fullmatch(r'[A-Za-z0-9+/=\n\r]+', s):
|
||||||
|
chunks.append(s)
|
||||||
|
|
||||||
|
if not chunks:
|
||||||
|
print('ERROR: no base64 chunks found in log at', log_path)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
b64 = ''.join(chunks)
|
||||||
|
# sanitize (remove any DEBUG prefixes that snuck in)
|
||||||
|
b64 = re.sub(r'\s+', '', b64)
|
||||||
|
try:
|
||||||
|
data = base64.b64decode(b64)
|
||||||
|
except Exception as e:
|
||||||
|
print('ERROR decoding base64:', e)
|
||||||
|
sys.exit(3)
|
||||||
|
|
||||||
|
docx_path = os.path.join(out_dir, 'downloaded.docx')
|
||||||
|
with open(docx_path, 'wb') as f:
|
||||||
|
f.write(data)
|
||||||
|
print('WROTE_DOCX:', docx_path, 'size=', os.path.getsize(docx_path))
|
||||||
|
|
||||||
|
# unzip
|
||||||
|
unzip_dir = os.path.join(out_dir, 'unzipped')
|
||||||
|
os.makedirs(unzip_dir, exist_ok=True)
|
||||||
|
try:
|
||||||
|
with zipfile.ZipFile(docx_path, 'r') as z:
|
||||||
|
z.extractall(unzip_dir)
|
||||||
|
except Exception as e:
|
||||||
|
print('ERROR unzipping docx:', e)
|
||||||
|
sys.exit(4)
|
||||||
|
|
||||||
|
doc_xml = os.path.join(unzip_dir, 'word', 'document.xml')
|
||||||
|
if not os.path.exists(doc_xml):
|
||||||
|
print('ERROR: word/document.xml not found in the docx')
|
||||||
|
sys.exit(5)
|
||||||
|
|
||||||
|
# parse XML and extract tables
|
||||||
|
ns = {'w':'http://schemas.openxmlformats.org/wordprocessingml/2006/main'}
|
||||||
|
ET.register_namespace('w', ns['w'])
|
||||||
|
try:
|
||||||
|
tree = ET.parse(doc_xml)
|
||||||
|
root = tree.getroot()
|
||||||
|
except Exception as e:
|
||||||
|
print('ERROR parsing document.xml:', e)
|
||||||
|
sys.exit(6)
|
||||||
|
|
||||||
|
tables = root.findall('.//w:tbl', ns)
|
||||||
|
print('TABLE_COUNT:', len(tables))
|
||||||
|
# For each table, collect row texts (limit output to first 5 tables and 20 rows each)
|
||||||
|
found_def_texts = []
|
||||||
|
for ti, tbl in enumerate(tables[:5], start=1):
|
||||||
|
rows = tbl.findall('.//w:tr', ns)
|
||||||
|
print('\n--- TABLE', ti, 'rows=', len(rows), '---')
|
||||||
|
for ri, tr in enumerate(rows[:20], start=1):
|
||||||
|
texts = [t.text for t in tr.findall('.//w:t', ns) if t.text]
|
||||||
|
joined = ' | '.join(texts).strip()
|
||||||
|
if joined:
|
||||||
|
print('ROW %d:'%ri, repr(joined))
|
||||||
|
# heuristic: look for keywords
|
||||||
|
if any(k.lower() in joined.lower() for k in ('deficiency','description','defect','ac-','AC-','DeficiencyList')):
|
||||||
|
found_def_texts.append(joined)
|
||||||
|
else:
|
||||||
|
print('ROW %d: <empty>'%ri)
|
||||||
|
|
||||||
|
# Also search whole document.xml for certain keywords
|
||||||
|
full_xml = open(doc_xml,'r',encoding='utf-8',errors='replace').read()
|
||||||
|
keywords = ['Deficiency','DeficiencyList','Description','<TableRow','AC-']
|
||||||
|
hits = {k: (full_xml.count(k)) for k in keywords}
|
||||||
|
print('\nKEYWORD_COUNTS:')
|
||||||
|
for k,v in hits.items():
|
||||||
|
print(k+':', v)
|
||||||
|
|
||||||
|
print('\nFOUND_DEFICIENCY_TEXTS_COUNT:', len(found_def_texts))
|
||||||
|
for i, txt in enumerate(found_def_texts[:20], start=1):
|
||||||
|
print('FOUND_%d:'%i, txt)
|
||||||
|
|
||||||
|
# Exit with success
|
||||||
|
print('\nSUMMARY: docx_size=%d tables=%d deficiency_text_found=%s' % (os.path.getsize(docx_path), len(tables), bool(found_def_texts)))
|
||||||
|
print('OUTPUT_DIR:', os.path.abspath(out_dir))
|
||||||
Loading…
Reference in New Issue